1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/core/ethtool.c - Ethtool ioctl handler 4 * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx> 5 * 6 * This file is where we call all the ethtool_ops commands to get 7 * the information ethtool needs. 8 */ 9 10 #include <linux/compat.h> 11 #include <linux/etherdevice.h> 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/capability.h> 15 #include <linux/errno.h> 16 #include <linux/ethtool.h> 17 #include <linux/netdevice.h> 18 #include <linux/net_tstamp.h> 19 #include <linux/phy.h> 20 #include <linux/bitops.h> 21 #include <linux/uaccess.h> 22 #include <linux/vmalloc.h> 23 #include <linux/sfp.h> 24 #include <linux/slab.h> 25 #include <linux/rtnetlink.h> 26 #include <linux/sched/signal.h> 27 #include <linux/net.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/utsname.h> 30 #include <net/devlink.h> 31 #include <net/ipv6.h> 32 #include <net/xdp_sock_drv.h> 33 #include <net/flow_offload.h> 34 #include <net/netdev_lock.h> 35 #include <linux/ethtool_netlink.h> 36 #include "common.h" 37 38 /* State held across locks and calls for commands which have devlink fallback */ 39 struct ethtool_devlink_compat { 40 struct devlink *devlink; 41 union { 42 struct ethtool_flash efl; 43 struct ethtool_drvinfo info; 44 }; 45 }; 46 47 static struct devlink *netdev_to_devlink_get(struct net_device *dev) 48 { 49 if (!dev->devlink_port) 50 return NULL; 51 return devlink_try_get(dev->devlink_port->devlink); 52 } 53 54 /* 55 * Some useful ethtool_ops methods that're device independent. 56 * If we find that all drivers want to do the same thing here, 57 * we can turn these into dev_() function calls. 58 */ 59 60 u32 ethtool_op_get_link(struct net_device *dev) 61 { 62 /* Synchronize carrier state with link watch, see also rtnl_getlink() */ 63 __linkwatch_sync_dev(dev); 64 65 return netif_carrier_ok(dev) ? 1 : 0; 66 } 67 EXPORT_SYMBOL(ethtool_op_get_link); 68 69 int ethtool_op_get_ts_info(struct net_device *dev, 70 struct kernel_ethtool_ts_info *info) 71 { 72 info->so_timestamping = 73 SOF_TIMESTAMPING_TX_SOFTWARE | 74 SOF_TIMESTAMPING_RX_SOFTWARE | 75 SOF_TIMESTAMPING_SOFTWARE; 76 info->phc_index = -1; 77 return 0; 78 } 79 EXPORT_SYMBOL(ethtool_op_get_ts_info); 80 81 /* Handlers for each ethtool command */ 82 83 static int ethtool_get_features(struct net_device *dev, void __user *useraddr) 84 { 85 struct ethtool_gfeatures cmd = { 86 .cmd = ETHTOOL_GFEATURES, 87 .size = ETHTOOL_DEV_FEATURE_WORDS, 88 }; 89 struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS]; 90 u32 __user *sizeaddr; 91 u32 copy_size; 92 int i; 93 94 /* in case feature bits run out again */ 95 BUILD_BUG_ON(ETHTOOL_DEV_FEATURE_WORDS * sizeof(u32) > sizeof(netdev_features_t)); 96 97 for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) { 98 features[i].available = (u32)(dev->hw_features >> (32 * i)); 99 features[i].requested = (u32)(dev->wanted_features >> (32 * i)); 100 features[i].active = (u32)(dev->features >> (32 * i)); 101 features[i].never_changed = 102 (u32)(NETIF_F_NEVER_CHANGE >> (32 * i)); 103 } 104 105 sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size); 106 if (get_user(copy_size, sizeaddr)) 107 return -EFAULT; 108 109 if (copy_size > ETHTOOL_DEV_FEATURE_WORDS) 110 copy_size = ETHTOOL_DEV_FEATURE_WORDS; 111 112 if (copy_to_user(useraddr, &cmd, sizeof(cmd))) 113 return -EFAULT; 114 useraddr += sizeof(cmd); 115 if (copy_to_user(useraddr, features, 116 array_size(copy_size, sizeof(*features)))) 117 return -EFAULT; 118 119 return 0; 120 } 121 122 static int ethtool_set_features(struct net_device *dev, void __user *useraddr) 123 { 124 struct ethtool_sfeatures cmd; 125 struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS]; 126 netdev_features_t wanted = 0, valid = 0; 127 int i, ret = 0; 128 129 if (copy_from_user(&cmd, useraddr, sizeof(cmd))) 130 return -EFAULT; 131 useraddr += sizeof(cmd); 132 133 if (cmd.size != ETHTOOL_DEV_FEATURE_WORDS) 134 return -EINVAL; 135 136 if (copy_from_user(features, useraddr, sizeof(features))) 137 return -EFAULT; 138 139 for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) { 140 valid |= (netdev_features_t)features[i].valid << (32 * i); 141 wanted |= (netdev_features_t)features[i].requested << (32 * i); 142 } 143 144 if (valid & ~NETIF_F_ETHTOOL_BITS) 145 return -EINVAL; 146 147 if (valid & ~dev->hw_features) { 148 valid &= dev->hw_features; 149 ret |= ETHTOOL_F_UNSUPPORTED; 150 } 151 152 dev->wanted_features &= ~valid; 153 dev->wanted_features |= wanted & valid; 154 __netdev_update_features(dev); 155 156 if ((dev->wanted_features ^ dev->features) & valid) 157 ret |= ETHTOOL_F_WISH; 158 159 return ret; 160 } 161 162 static int __ethtool_get_sset_count(struct net_device *dev, int sset) 163 { 164 const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; 165 const struct ethtool_ops *ops = dev->ethtool_ops; 166 167 if (sset == ETH_SS_FEATURES) 168 return ARRAY_SIZE(netdev_features_strings); 169 170 if (sset == ETH_SS_RSS_HASH_FUNCS) 171 return ARRAY_SIZE(rss_hash_func_strings); 172 173 if (sset == ETH_SS_TUNABLES) 174 return ARRAY_SIZE(tunable_strings); 175 176 if (sset == ETH_SS_PHY_TUNABLES) 177 return ARRAY_SIZE(phy_tunable_strings); 178 179 if (sset == ETH_SS_PHY_STATS && dev->phydev && 180 !ops->get_ethtool_phy_stats && 181 phy_ops && phy_ops->get_sset_count) 182 return phy_ops->get_sset_count(dev->phydev); 183 184 if (sset == ETH_SS_LINK_MODES) 185 return __ETHTOOL_LINK_MODE_MASK_NBITS; 186 187 if (ops->get_sset_count && ops->get_strings) 188 return ops->get_sset_count(dev, sset); 189 else 190 return -EOPNOTSUPP; 191 } 192 193 static void __ethtool_get_strings(struct net_device *dev, 194 u32 stringset, u8 *data) 195 { 196 const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; 197 const struct ethtool_ops *ops = dev->ethtool_ops; 198 199 if (stringset == ETH_SS_FEATURES) 200 memcpy(data, netdev_features_strings, 201 sizeof(netdev_features_strings)); 202 else if (stringset == ETH_SS_RSS_HASH_FUNCS) 203 memcpy(data, rss_hash_func_strings, 204 sizeof(rss_hash_func_strings)); 205 else if (stringset == ETH_SS_TUNABLES) 206 memcpy(data, tunable_strings, sizeof(tunable_strings)); 207 else if (stringset == ETH_SS_PHY_TUNABLES) 208 memcpy(data, phy_tunable_strings, sizeof(phy_tunable_strings)); 209 else if (stringset == ETH_SS_PHY_STATS && dev->phydev && 210 !ops->get_ethtool_phy_stats && phy_ops && 211 phy_ops->get_strings) 212 phy_ops->get_strings(dev->phydev, data); 213 else if (stringset == ETH_SS_LINK_MODES) 214 memcpy(data, link_mode_names, 215 __ETHTOOL_LINK_MODE_MASK_NBITS * ETH_GSTRING_LEN); 216 else 217 /* ops->get_strings is valid because checked earlier */ 218 ops->get_strings(dev, stringset, data); 219 } 220 221 static netdev_features_t ethtool_get_feature_mask(u32 eth_cmd) 222 { 223 /* feature masks of legacy discrete ethtool ops */ 224 225 switch (eth_cmd) { 226 case ETHTOOL_GTXCSUM: 227 case ETHTOOL_STXCSUM: 228 return NETIF_F_CSUM_MASK | NETIF_F_FCOE_CRC | 229 NETIF_F_SCTP_CRC; 230 case ETHTOOL_GRXCSUM: 231 case ETHTOOL_SRXCSUM: 232 return NETIF_F_RXCSUM; 233 case ETHTOOL_GSG: 234 case ETHTOOL_SSG: 235 return NETIF_F_SG | NETIF_F_FRAGLIST; 236 case ETHTOOL_GTSO: 237 case ETHTOOL_STSO: 238 return NETIF_F_ALL_TSO; 239 case ETHTOOL_GGSO: 240 case ETHTOOL_SGSO: 241 return NETIF_F_GSO; 242 case ETHTOOL_GGRO: 243 case ETHTOOL_SGRO: 244 return NETIF_F_GRO; 245 default: 246 BUG(); 247 } 248 } 249 250 static int ethtool_get_one_feature(struct net_device *dev, 251 char __user *useraddr, u32 ethcmd) 252 { 253 netdev_features_t mask = ethtool_get_feature_mask(ethcmd); 254 struct ethtool_value edata = { 255 .cmd = ethcmd, 256 .data = !!(dev->features & mask), 257 }; 258 259 if (copy_to_user(useraddr, &edata, sizeof(edata))) 260 return -EFAULT; 261 return 0; 262 } 263 264 static int ethtool_set_one_feature(struct net_device *dev, 265 void __user *useraddr, u32 ethcmd) 266 { 267 struct ethtool_value edata; 268 netdev_features_t mask; 269 270 if (copy_from_user(&edata, useraddr, sizeof(edata))) 271 return -EFAULT; 272 273 mask = ethtool_get_feature_mask(ethcmd); 274 mask &= dev->hw_features; 275 if (!mask) 276 return -EOPNOTSUPP; 277 278 if (edata.data) 279 dev->wanted_features |= mask; 280 else 281 dev->wanted_features &= ~mask; 282 283 __netdev_update_features(dev); 284 285 return 0; 286 } 287 288 #define ETH_ALL_FLAGS (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \ 289 ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH) 290 #define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_CTAG_RX | \ 291 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_NTUPLE | \ 292 NETIF_F_RXHASH) 293 294 static u32 __ethtool_get_flags(struct net_device *dev) 295 { 296 u32 flags = 0; 297 298 if (dev->features & NETIF_F_LRO) 299 flags |= ETH_FLAG_LRO; 300 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 301 flags |= ETH_FLAG_RXVLAN; 302 if (dev->features & NETIF_F_HW_VLAN_CTAG_TX) 303 flags |= ETH_FLAG_TXVLAN; 304 if (dev->features & NETIF_F_NTUPLE) 305 flags |= ETH_FLAG_NTUPLE; 306 if (dev->features & NETIF_F_RXHASH) 307 flags |= ETH_FLAG_RXHASH; 308 309 return flags; 310 } 311 312 static int __ethtool_set_flags(struct net_device *dev, u32 data) 313 { 314 netdev_features_t features = 0, changed; 315 316 if (data & ~ETH_ALL_FLAGS) 317 return -EINVAL; 318 319 if (data & ETH_FLAG_LRO) 320 features |= NETIF_F_LRO; 321 if (data & ETH_FLAG_RXVLAN) 322 features |= NETIF_F_HW_VLAN_CTAG_RX; 323 if (data & ETH_FLAG_TXVLAN) 324 features |= NETIF_F_HW_VLAN_CTAG_TX; 325 if (data & ETH_FLAG_NTUPLE) 326 features |= NETIF_F_NTUPLE; 327 if (data & ETH_FLAG_RXHASH) 328 features |= NETIF_F_RXHASH; 329 330 /* allow changing only bits set in hw_features */ 331 changed = (features ^ dev->features) & ETH_ALL_FEATURES; 332 if (changed & ~dev->hw_features) 333 return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP; 334 335 dev->wanted_features = 336 (dev->wanted_features & ~changed) | (features & changed); 337 338 __netdev_update_features(dev); 339 340 return 0; 341 } 342 343 /* Given two link masks, AND them together and save the result in dst. */ 344 void ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, 345 struct ethtool_link_ksettings *src) 346 { 347 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); 348 unsigned int idx = 0; 349 350 for (; idx < size; idx++) { 351 dst->link_modes.supported[idx] &= 352 src->link_modes.supported[idx]; 353 dst->link_modes.advertising[idx] &= 354 src->link_modes.advertising[idx]; 355 } 356 } 357 EXPORT_SYMBOL(ethtool_intersect_link_masks); 358 359 void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, 360 u32 legacy_u32) 361 { 362 linkmode_zero(dst); 363 dst[0] = legacy_u32; 364 } 365 EXPORT_SYMBOL(ethtool_convert_legacy_u32_to_link_mode); 366 367 /* return false if src had higher bits set. lower bits always updated. */ 368 bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, 369 const unsigned long *src) 370 { 371 *legacy_u32 = src[0]; 372 return find_next_bit(src, __ETHTOOL_LINK_MODE_MASK_NBITS, 32) == 373 __ETHTOOL_LINK_MODE_MASK_NBITS; 374 } 375 EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32); 376 377 /* return false if ksettings link modes had higher bits 378 * set. legacy_settings always updated (best effort) 379 */ 380 static bool 381 convert_link_ksettings_to_legacy_settings( 382 struct ethtool_cmd *legacy_settings, 383 const struct ethtool_link_ksettings *link_ksettings) 384 { 385 bool retval = true; 386 387 memset(legacy_settings, 0, sizeof(*legacy_settings)); 388 /* this also clears the deprecated fields in legacy structure: 389 * __u8 transceiver; 390 * __u32 maxtxpkt; 391 * __u32 maxrxpkt; 392 */ 393 394 retval &= ethtool_convert_link_mode_to_legacy_u32( 395 &legacy_settings->supported, 396 link_ksettings->link_modes.supported); 397 retval &= ethtool_convert_link_mode_to_legacy_u32( 398 &legacy_settings->advertising, 399 link_ksettings->link_modes.advertising); 400 retval &= ethtool_convert_link_mode_to_legacy_u32( 401 &legacy_settings->lp_advertising, 402 link_ksettings->link_modes.lp_advertising); 403 ethtool_cmd_speed_set(legacy_settings, link_ksettings->base.speed); 404 legacy_settings->duplex 405 = link_ksettings->base.duplex; 406 legacy_settings->port 407 = link_ksettings->base.port; 408 legacy_settings->phy_address 409 = link_ksettings->base.phy_address; 410 legacy_settings->autoneg 411 = link_ksettings->base.autoneg; 412 legacy_settings->mdio_support 413 = link_ksettings->base.mdio_support; 414 legacy_settings->eth_tp_mdix 415 = link_ksettings->base.eth_tp_mdix; 416 legacy_settings->eth_tp_mdix_ctrl 417 = link_ksettings->base.eth_tp_mdix_ctrl; 418 legacy_settings->transceiver 419 = link_ksettings->base.transceiver; 420 return retval; 421 } 422 423 /* number of 32-bit words to store the user's link mode bitmaps */ 424 #define __ETHTOOL_LINK_MODE_MASK_NU32 \ 425 DIV_ROUND_UP(__ETHTOOL_LINK_MODE_MASK_NBITS, 32) 426 427 /* layout of the struct passed from/to userland */ 428 struct ethtool_link_usettings { 429 struct ethtool_link_settings base; 430 struct { 431 __u32 supported[__ETHTOOL_LINK_MODE_MASK_NU32]; 432 __u32 advertising[__ETHTOOL_LINK_MODE_MASK_NU32]; 433 __u32 lp_advertising[__ETHTOOL_LINK_MODE_MASK_NU32]; 434 } link_modes; 435 }; 436 437 /* Internal kernel helper to query a device ethtool_link_settings. */ 438 int __ethtool_get_link_ksettings(struct net_device *dev, 439 struct ethtool_link_ksettings *link_ksettings) 440 { 441 ASSERT_RTNL(); 442 443 if (!dev->ethtool_ops->get_link_ksettings) 444 return -EOPNOTSUPP; 445 446 if (!netif_device_present(dev)) 447 return -ENODEV; 448 449 memset(link_ksettings, 0, sizeof(*link_ksettings)); 450 return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings); 451 } 452 EXPORT_SYMBOL(__ethtool_get_link_ksettings); 453 454 /* convert ethtool_link_usettings in user space to a kernel internal 455 * ethtool_link_ksettings. return 0 on success, errno on error. 456 */ 457 static int load_link_ksettings_from_user(struct ethtool_link_ksettings *to, 458 const void __user *from) 459 { 460 struct ethtool_link_usettings link_usettings; 461 462 if (copy_from_user(&link_usettings, from, sizeof(link_usettings))) 463 return -EFAULT; 464 465 memcpy(&to->base, &link_usettings.base, sizeof(to->base)); 466 bitmap_from_arr32(to->link_modes.supported, 467 link_usettings.link_modes.supported, 468 __ETHTOOL_LINK_MODE_MASK_NBITS); 469 bitmap_from_arr32(to->link_modes.advertising, 470 link_usettings.link_modes.advertising, 471 __ETHTOOL_LINK_MODE_MASK_NBITS); 472 bitmap_from_arr32(to->link_modes.lp_advertising, 473 link_usettings.link_modes.lp_advertising, 474 __ETHTOOL_LINK_MODE_MASK_NBITS); 475 476 return 0; 477 } 478 479 /* Check if the user is trying to change anything besides speed/duplex */ 480 bool ethtool_virtdev_validate_cmd(const struct ethtool_link_ksettings *cmd) 481 { 482 struct ethtool_link_settings base2 = {}; 483 484 base2.speed = cmd->base.speed; 485 base2.port = PORT_OTHER; 486 base2.duplex = cmd->base.duplex; 487 base2.cmd = cmd->base.cmd; 488 base2.link_mode_masks_nwords = cmd->base.link_mode_masks_nwords; 489 490 return !memcmp(&base2, &cmd->base, sizeof(base2)) && 491 bitmap_empty(cmd->link_modes.supported, 492 __ETHTOOL_LINK_MODE_MASK_NBITS) && 493 bitmap_empty(cmd->link_modes.lp_advertising, 494 __ETHTOOL_LINK_MODE_MASK_NBITS); 495 } 496 497 /* convert a kernel internal ethtool_link_ksettings to 498 * ethtool_link_usettings in user space. return 0 on success, errno on 499 * error. 500 */ 501 static int 502 store_link_ksettings_for_user(void __user *to, 503 const struct ethtool_link_ksettings *from) 504 { 505 struct ethtool_link_usettings link_usettings; 506 507 memcpy(&link_usettings, from, sizeof(link_usettings)); 508 bitmap_to_arr32(link_usettings.link_modes.supported, 509 from->link_modes.supported, 510 __ETHTOOL_LINK_MODE_MASK_NBITS); 511 bitmap_to_arr32(link_usettings.link_modes.advertising, 512 from->link_modes.advertising, 513 __ETHTOOL_LINK_MODE_MASK_NBITS); 514 bitmap_to_arr32(link_usettings.link_modes.lp_advertising, 515 from->link_modes.lp_advertising, 516 __ETHTOOL_LINK_MODE_MASK_NBITS); 517 518 if (copy_to_user(to, &link_usettings, sizeof(link_usettings))) 519 return -EFAULT; 520 521 return 0; 522 } 523 524 /* Query device for its ethtool_link_settings. */ 525 static int ethtool_get_link_ksettings(struct net_device *dev, 526 void __user *useraddr) 527 { 528 int err = 0; 529 struct ethtool_link_ksettings link_ksettings; 530 531 ASSERT_RTNL(); 532 if (!dev->ethtool_ops->get_link_ksettings) 533 return -EOPNOTSUPP; 534 535 /* handle bitmap nbits handshake */ 536 if (copy_from_user(&link_ksettings.base, useraddr, 537 sizeof(link_ksettings.base))) 538 return -EFAULT; 539 540 if (__ETHTOOL_LINK_MODE_MASK_NU32 541 != link_ksettings.base.link_mode_masks_nwords) { 542 /* wrong link mode nbits requested */ 543 memset(&link_ksettings, 0, sizeof(link_ksettings)); 544 link_ksettings.base.cmd = ETHTOOL_GLINKSETTINGS; 545 /* send back number of words required as negative val */ 546 compiletime_assert(__ETHTOOL_LINK_MODE_MASK_NU32 <= S8_MAX, 547 "need too many bits for link modes!"); 548 link_ksettings.base.link_mode_masks_nwords 549 = -((s8)__ETHTOOL_LINK_MODE_MASK_NU32); 550 551 /* copy the base fields back to user, not the link 552 * mode bitmaps 553 */ 554 if (copy_to_user(useraddr, &link_ksettings.base, 555 sizeof(link_ksettings.base))) 556 return -EFAULT; 557 558 return 0; 559 } 560 561 /* handshake successful: user/kernel agree on 562 * link_mode_masks_nwords 563 */ 564 565 memset(&link_ksettings, 0, sizeof(link_ksettings)); 566 err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings); 567 if (err < 0) 568 return err; 569 570 /* make sure we tell the right values to user */ 571 link_ksettings.base.cmd = ETHTOOL_GLINKSETTINGS; 572 link_ksettings.base.link_mode_masks_nwords 573 = __ETHTOOL_LINK_MODE_MASK_NU32; 574 link_ksettings.base.master_slave_cfg = MASTER_SLAVE_CFG_UNSUPPORTED; 575 link_ksettings.base.master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED; 576 link_ksettings.base.rate_matching = RATE_MATCH_NONE; 577 578 return store_link_ksettings_for_user(useraddr, &link_ksettings); 579 } 580 581 /* Update device ethtool_link_settings. */ 582 static int ethtool_set_link_ksettings(struct net_device *dev, 583 void __user *useraddr) 584 { 585 struct ethtool_link_ksettings link_ksettings = {}; 586 int err; 587 588 ASSERT_RTNL(); 589 590 if (!dev->ethtool_ops->set_link_ksettings) 591 return -EOPNOTSUPP; 592 593 /* make sure nbits field has expected value */ 594 if (copy_from_user(&link_ksettings.base, useraddr, 595 sizeof(link_ksettings.base))) 596 return -EFAULT; 597 598 if (__ETHTOOL_LINK_MODE_MASK_NU32 599 != link_ksettings.base.link_mode_masks_nwords) 600 return -EINVAL; 601 602 /* copy the whole structure, now that we know it has expected 603 * format 604 */ 605 err = load_link_ksettings_from_user(&link_ksettings, useraddr); 606 if (err) 607 return err; 608 609 /* re-check nwords field, just in case */ 610 if (__ETHTOOL_LINK_MODE_MASK_NU32 611 != link_ksettings.base.link_mode_masks_nwords) 612 return -EINVAL; 613 614 if (link_ksettings.base.master_slave_cfg || 615 link_ksettings.base.master_slave_state) 616 return -EINVAL; 617 618 err = dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings); 619 if (err >= 0) { 620 ethtool_notify(dev, ETHTOOL_MSG_LINKINFO_NTF, NULL); 621 ethtool_notify(dev, ETHTOOL_MSG_LINKMODES_NTF, NULL); 622 } 623 return err; 624 } 625 626 int ethtool_virtdev_set_link_ksettings(struct net_device *dev, 627 const struct ethtool_link_ksettings *cmd, 628 u32 *dev_speed, u8 *dev_duplex) 629 { 630 u32 speed; 631 u8 duplex; 632 633 speed = cmd->base.speed; 634 duplex = cmd->base.duplex; 635 /* don't allow custom speed and duplex */ 636 if (!ethtool_validate_speed(speed) || 637 !ethtool_validate_duplex(duplex) || 638 !ethtool_virtdev_validate_cmd(cmd)) 639 return -EINVAL; 640 *dev_speed = speed; 641 *dev_duplex = duplex; 642 643 return 0; 644 } 645 EXPORT_SYMBOL(ethtool_virtdev_set_link_ksettings); 646 647 /* Query device for its ethtool_cmd settings. 648 * 649 * Backward compatibility note: for compatibility with legacy ethtool, this is 650 * now implemented via get_link_ksettings. When driver reports higher link mode 651 * bits, a kernel warning is logged once (with name of 1st driver/device) to 652 * recommend user to upgrade ethtool, but the command is successful (only the 653 * lower link mode bits reported back to user). Deprecated fields from 654 * ethtool_cmd (transceiver/maxrxpkt/maxtxpkt) are always set to zero. 655 */ 656 static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) 657 { 658 struct ethtool_link_ksettings link_ksettings; 659 struct ethtool_cmd cmd; 660 int err; 661 662 ASSERT_RTNL(); 663 if (!dev->ethtool_ops->get_link_ksettings) 664 return -EOPNOTSUPP; 665 666 if (dev->ethtool->module_fw_flash_in_progress) 667 return -EBUSY; 668 669 memset(&link_ksettings, 0, sizeof(link_ksettings)); 670 err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings); 671 if (err < 0) 672 return err; 673 convert_link_ksettings_to_legacy_settings(&cmd, &link_ksettings); 674 675 /* send a sensible cmd tag back to user */ 676 cmd.cmd = ETHTOOL_GSET; 677 678 if (copy_to_user(useraddr, &cmd, sizeof(cmd))) 679 return -EFAULT; 680 681 return 0; 682 } 683 684 /* Update device link settings with given ethtool_cmd. 685 * 686 * Backward compatibility note: for compatibility with legacy ethtool, this is 687 * now always implemented via set_link_settings. When user's request updates 688 * deprecated ethtool_cmd fields (transceiver/maxrxpkt/maxtxpkt), a kernel 689 * warning is logged once (with name of 1st driver/device) to recommend user to 690 * upgrade ethtool, and the request is rejected. 691 */ 692 static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) 693 { 694 struct ethtool_link_ksettings link_ksettings; 695 struct ethtool_cmd cmd; 696 int ret; 697 698 ASSERT_RTNL(); 699 700 if (copy_from_user(&cmd, useraddr, sizeof(cmd))) 701 return -EFAULT; 702 if (!dev->ethtool_ops->set_link_ksettings) 703 return -EOPNOTSUPP; 704 705 if (!convert_legacy_settings_to_link_ksettings(&link_ksettings, &cmd)) 706 return -EINVAL; 707 link_ksettings.base.link_mode_masks_nwords = 708 __ETHTOOL_LINK_MODE_MASK_NU32; 709 ret = dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings); 710 if (ret >= 0) { 711 ethtool_notify(dev, ETHTOOL_MSG_LINKINFO_NTF, NULL); 712 ethtool_notify(dev, ETHTOOL_MSG_LINKMODES_NTF, NULL); 713 } 714 return ret; 715 } 716 717 static int 718 ethtool_get_drvinfo(struct net_device *dev, struct ethtool_devlink_compat *rsp) 719 { 720 const struct ethtool_ops *ops = dev->ethtool_ops; 721 struct device *parent = dev->dev.parent; 722 723 rsp->info.cmd = ETHTOOL_GDRVINFO; 724 strscpy(rsp->info.version, init_uts_ns.name.release, 725 sizeof(rsp->info.version)); 726 if (ops->get_drvinfo) { 727 ops->get_drvinfo(dev, &rsp->info); 728 if (!rsp->info.bus_info[0] && parent) 729 strscpy(rsp->info.bus_info, dev_name(parent), 730 sizeof(rsp->info.bus_info)); 731 if (!rsp->info.driver[0] && parent && parent->driver) 732 strscpy(rsp->info.driver, parent->driver->name, 733 sizeof(rsp->info.driver)); 734 } else if (parent && parent->driver) { 735 strscpy(rsp->info.bus_info, dev_name(parent), 736 sizeof(rsp->info.bus_info)); 737 strscpy(rsp->info.driver, parent->driver->name, 738 sizeof(rsp->info.driver)); 739 } else if (dev->rtnl_link_ops) { 740 strscpy(rsp->info.driver, dev->rtnl_link_ops->kind, 741 sizeof(rsp->info.driver)); 742 } else { 743 return -EOPNOTSUPP; 744 } 745 746 /* 747 * this method of obtaining string set info is deprecated; 748 * Use ETHTOOL_GSSET_INFO instead. 749 */ 750 if (ops->get_sset_count) { 751 int rc; 752 753 rc = ops->get_sset_count(dev, ETH_SS_TEST); 754 if (rc >= 0) 755 rsp->info.testinfo_len = rc; 756 rc = ops->get_sset_count(dev, ETH_SS_STATS); 757 if (rc >= 0) 758 rsp->info.n_stats = rc; 759 rc = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS); 760 if (rc >= 0) 761 rsp->info.n_priv_flags = rc; 762 } 763 if (ops->get_regs_len) { 764 int ret = ops->get_regs_len(dev); 765 766 if (ret > 0) 767 rsp->info.regdump_len = ret; 768 } 769 770 if (ops->get_eeprom_len) 771 rsp->info.eedump_len = ops->get_eeprom_len(dev); 772 773 if (!rsp->info.fw_version[0]) 774 rsp->devlink = netdev_to_devlink_get(dev); 775 776 return 0; 777 } 778 779 static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, 780 void __user *useraddr) 781 { 782 struct ethtool_sset_info info; 783 u64 sset_mask; 784 int i, idx = 0, n_bits = 0, ret, rc; 785 u32 *info_buf = NULL; 786 787 if (copy_from_user(&info, useraddr, sizeof(info))) 788 return -EFAULT; 789 790 /* store copy of mask, because we zero struct later on */ 791 sset_mask = info.sset_mask; 792 if (!sset_mask) 793 return 0; 794 795 /* calculate size of return buffer */ 796 n_bits = hweight64(sset_mask); 797 798 memset(&info, 0, sizeof(info)); 799 info.cmd = ETHTOOL_GSSET_INFO; 800 801 info_buf = kcalloc(n_bits, sizeof(u32), GFP_USER); 802 if (!info_buf) 803 return -ENOMEM; 804 805 /* 806 * fill return buffer based on input bitmask and successful 807 * get_sset_count return 808 */ 809 for (i = 0; i < 64; i++) { 810 if (!(sset_mask & (1ULL << i))) 811 continue; 812 813 rc = __ethtool_get_sset_count(dev, i); 814 if (rc >= 0) { 815 info.sset_mask |= (1ULL << i); 816 info_buf[idx++] = rc; 817 } 818 } 819 820 ret = -EFAULT; 821 if (copy_to_user(useraddr, &info, sizeof(info))) 822 goto out; 823 824 useraddr += offsetof(struct ethtool_sset_info, data); 825 if (copy_to_user(useraddr, info_buf, array_size(idx, sizeof(u32)))) 826 goto out; 827 828 ret = 0; 829 830 out: 831 kfree(info_buf); 832 return ret; 833 } 834 835 static noinline_for_stack int 836 ethtool_rxnfc_copy_from_compat(struct ethtool_rxnfc *rxnfc, 837 const struct compat_ethtool_rxnfc __user *useraddr, 838 size_t size) 839 { 840 struct compat_ethtool_rxnfc crxnfc = {}; 841 842 /* We expect there to be holes between fs.m_ext and 843 * fs.ring_cookie and at the end of fs, but nowhere else. 844 * On non-x86, no conversion should be needed. 845 */ 846 BUILD_BUG_ON(!IS_ENABLED(CONFIG_X86_64) && 847 sizeof(struct compat_ethtool_rxnfc) != 848 sizeof(struct ethtool_rxnfc)); 849 BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) + 850 sizeof(useraddr->fs.m_ext) != 851 offsetof(struct ethtool_rxnfc, fs.m_ext) + 852 sizeof(rxnfc->fs.m_ext)); 853 BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.location) - 854 offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != 855 offsetof(struct ethtool_rxnfc, fs.location) - 856 offsetof(struct ethtool_rxnfc, fs.ring_cookie)); 857 858 if (copy_from_user(&crxnfc, useraddr, min(size, sizeof(crxnfc)))) 859 return -EFAULT; 860 861 *rxnfc = (struct ethtool_rxnfc) { 862 .cmd = crxnfc.cmd, 863 .flow_type = crxnfc.flow_type, 864 .data = crxnfc.data, 865 .fs = { 866 .flow_type = crxnfc.fs.flow_type, 867 .h_u = crxnfc.fs.h_u, 868 .h_ext = crxnfc.fs.h_ext, 869 .m_u = crxnfc.fs.m_u, 870 .m_ext = crxnfc.fs.m_ext, 871 .ring_cookie = crxnfc.fs.ring_cookie, 872 .location = crxnfc.fs.location, 873 }, 874 .rule_cnt = crxnfc.rule_cnt, 875 }; 876 877 return 0; 878 } 879 880 static int ethtool_rxnfc_copy_from_user(struct ethtool_rxnfc *rxnfc, 881 const void __user *useraddr, 882 size_t size) 883 { 884 if (compat_need_64bit_alignment_fixup()) 885 return ethtool_rxnfc_copy_from_compat(rxnfc, useraddr, size); 886 887 if (copy_from_user(rxnfc, useraddr, size)) 888 return -EFAULT; 889 890 return 0; 891 } 892 893 static int ethtool_rxnfc_copy_to_compat(void __user *useraddr, 894 const struct ethtool_rxnfc *rxnfc, 895 size_t size, const u32 *rule_buf) 896 { 897 struct compat_ethtool_rxnfc crxnfc; 898 899 memset(&crxnfc, 0, sizeof(crxnfc)); 900 crxnfc = (struct compat_ethtool_rxnfc) { 901 .cmd = rxnfc->cmd, 902 .flow_type = rxnfc->flow_type, 903 .data = rxnfc->data, 904 .fs = { 905 .flow_type = rxnfc->fs.flow_type, 906 .h_u = rxnfc->fs.h_u, 907 .h_ext = rxnfc->fs.h_ext, 908 .m_u = rxnfc->fs.m_u, 909 .m_ext = rxnfc->fs.m_ext, 910 .ring_cookie = rxnfc->fs.ring_cookie, 911 .location = rxnfc->fs.location, 912 }, 913 .rule_cnt = rxnfc->rule_cnt, 914 }; 915 916 if (copy_to_user(useraddr, &crxnfc, min(size, sizeof(crxnfc)))) 917 return -EFAULT; 918 919 return 0; 920 } 921 922 static int ethtool_rxnfc_copy_struct(u32 cmd, struct ethtool_rxnfc *info, 923 size_t *info_size, void __user *useraddr) 924 { 925 /* struct ethtool_rxnfc was originally defined for 926 * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data 927 * members. User-space might still be using that 928 * definition. 929 */ 930 if (cmd == ETHTOOL_GRXFH || cmd == ETHTOOL_SRXFH) 931 *info_size = (offsetof(struct ethtool_rxnfc, data) + 932 sizeof(info->data)); 933 934 if (ethtool_rxnfc_copy_from_user(info, useraddr, *info_size)) 935 return -EFAULT; 936 937 if ((cmd == ETHTOOL_GRXFH || cmd == ETHTOOL_SRXFH) && info->flow_type & FLOW_RSS) { 938 *info_size = sizeof(*info); 939 if (ethtool_rxnfc_copy_from_user(info, useraddr, *info_size)) 940 return -EFAULT; 941 /* Since malicious users may modify the original data, 942 * we need to check whether FLOW_RSS is still requested. 943 */ 944 if (!(info->flow_type & FLOW_RSS)) 945 return -EINVAL; 946 } 947 948 if (info->cmd != cmd) 949 return -EINVAL; 950 951 return 0; 952 } 953 954 static int ethtool_rxnfc_copy_to_user(void __user *useraddr, 955 const struct ethtool_rxnfc *rxnfc, 956 size_t size, const u32 *rule_buf) 957 { 958 int ret; 959 960 if (compat_need_64bit_alignment_fixup()) { 961 ret = ethtool_rxnfc_copy_to_compat(useraddr, rxnfc, size, 962 rule_buf); 963 useraddr += offsetof(struct compat_ethtool_rxnfc, rule_locs); 964 } else { 965 ret = copy_to_user(useraddr, rxnfc, size); 966 useraddr += offsetof(struct ethtool_rxnfc, rule_locs); 967 } 968 969 if (ret) 970 return -EFAULT; 971 972 if (rule_buf) { 973 if (copy_to_user(useraddr, rule_buf, 974 rxnfc->rule_cnt * sizeof(u32))) 975 return -EFAULT; 976 } 977 978 return 0; 979 } 980 981 static bool flow_type_hashable(u32 flow_type) 982 { 983 switch (flow_type) { 984 case TCP_V4_FLOW: 985 case UDP_V4_FLOW: 986 case SCTP_V4_FLOW: 987 case AH_ESP_V4_FLOW: 988 case TCP_V6_FLOW: 989 case UDP_V6_FLOW: 990 case SCTP_V6_FLOW: 991 case AH_ESP_V6_FLOW: 992 case AH_V4_FLOW: 993 case ESP_V4_FLOW: 994 case AH_V6_FLOW: 995 case ESP_V6_FLOW: 996 case IPV4_FLOW: 997 case IPV6_FLOW: 998 case GTPU_V4_FLOW: 999 case GTPU_V6_FLOW: 1000 case GTPC_V4_FLOW: 1001 case GTPC_V6_FLOW: 1002 case GTPC_TEID_V4_FLOW: 1003 case GTPC_TEID_V6_FLOW: 1004 case GTPU_EH_V4_FLOW: 1005 case GTPU_EH_V6_FLOW: 1006 case GTPU_UL_V4_FLOW: 1007 case GTPU_UL_V6_FLOW: 1008 case GTPU_DL_V4_FLOW: 1009 case GTPU_DL_V6_FLOW: 1010 return true; 1011 } 1012 1013 return false; 1014 } 1015 1016 /* When adding a new type, update the assert and, if it's hashable, add it to 1017 * the flow_type_hashable switch case. 1018 */ 1019 static_assert(GTPU_DL_V6_FLOW + 1 == __FLOW_TYPE_COUNT); 1020 1021 static int ethtool_check_xfrm_rxfh(u32 input_xfrm, u64 rxfh) 1022 { 1023 /* Sanity check: if symmetric-xor/symmetric-or-xor is set, then: 1024 * 1 - no other fields besides IP src/dst and/or L4 src/dst are set 1025 * 2 - If src is set, dst must also be set 1026 */ 1027 if ((input_xfrm != RXH_XFRM_NO_CHANGE && 1028 input_xfrm & (RXH_XFRM_SYM_XOR | RXH_XFRM_SYM_OR_XOR)) && 1029 ((rxfh & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) || 1030 (!!(rxfh & RXH_IP_SRC) ^ !!(rxfh & RXH_IP_DST)) || 1031 (!!(rxfh & RXH_L4_B_0_1) ^ !!(rxfh & RXH_L4_B_2_3)))) 1032 return -EINVAL; 1033 1034 return 0; 1035 } 1036 1037 static int ethtool_check_flow_types(struct net_device *dev, u32 input_xfrm) 1038 { 1039 const struct ethtool_ops *ops = dev->ethtool_ops; 1040 struct ethtool_rxnfc info = { 1041 .cmd = ETHTOOL_GRXFH, 1042 }; 1043 int err; 1044 u32 i; 1045 1046 for (i = 0; i < __FLOW_TYPE_COUNT; i++) { 1047 if (!flow_type_hashable(i)) 1048 continue; 1049 1050 info.flow_type = i; 1051 err = ops->get_rxnfc(dev, &info, NULL); 1052 if (err) 1053 continue; 1054 1055 err = ethtool_check_xfrm_rxfh(input_xfrm, info.data); 1056 if (err) 1057 return err; 1058 } 1059 1060 return 0; 1061 } 1062 1063 static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, 1064 u32 cmd, void __user *useraddr) 1065 { 1066 const struct ethtool_ops *ops = dev->ethtool_ops; 1067 struct ethtool_rxnfc info; 1068 size_t info_size = sizeof(info); 1069 int rc; 1070 1071 if (!ops->set_rxnfc) 1072 return -EOPNOTSUPP; 1073 1074 rc = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr); 1075 if (rc) 1076 return rc; 1077 1078 if (cmd == ETHTOOL_SRXCLSRLINS && info.fs.flow_type & FLOW_RSS) { 1079 /* Nonzero ring with RSS only makes sense 1080 * if NIC adds them together 1081 */ 1082 if (!ops->cap_rss_rxnfc_adds && 1083 ethtool_get_flow_spec_ring(info.fs.ring_cookie)) 1084 return -EINVAL; 1085 1086 if (!xa_load(&dev->ethtool->rss_ctx, info.rss_context)) 1087 return -EINVAL; 1088 } 1089 1090 if (cmd == ETHTOOL_SRXFH && ops->get_rxfh) { 1091 struct ethtool_rxfh_param rxfh = {}; 1092 1093 rc = ops->get_rxfh(dev, &rxfh); 1094 if (rc) 1095 return rc; 1096 1097 rc = ethtool_check_xfrm_rxfh(rxfh.input_xfrm, info.data); 1098 if (rc) 1099 return rc; 1100 } 1101 1102 rc = ops->set_rxnfc(dev, &info); 1103 if (rc) 1104 return rc; 1105 1106 if (cmd == ETHTOOL_SRXCLSRLINS && 1107 ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, NULL)) 1108 return -EFAULT; 1109 1110 return 0; 1111 } 1112 1113 static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, 1114 u32 cmd, void __user *useraddr) 1115 { 1116 struct ethtool_rxnfc info; 1117 size_t info_size = sizeof(info); 1118 const struct ethtool_ops *ops = dev->ethtool_ops; 1119 int ret; 1120 void *rule_buf = NULL; 1121 1122 if (!ops->get_rxnfc) 1123 return -EOPNOTSUPP; 1124 1125 ret = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr); 1126 if (ret) 1127 return ret; 1128 1129 if (info.cmd == ETHTOOL_GRXCLSRLALL) { 1130 if (info.rule_cnt > 0) { 1131 if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32)) 1132 rule_buf = kcalloc(info.rule_cnt, sizeof(u32), 1133 GFP_USER); 1134 if (!rule_buf) 1135 return -ENOMEM; 1136 } 1137 } 1138 1139 ret = ops->get_rxnfc(dev, &info, rule_buf); 1140 if (ret < 0) 1141 goto err_out; 1142 1143 ret = ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, rule_buf); 1144 err_out: 1145 kfree(rule_buf); 1146 1147 return ret; 1148 } 1149 1150 static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr, 1151 struct ethtool_rxnfc *rx_rings, 1152 u32 size) 1153 { 1154 int i; 1155 1156 if (copy_from_user(indir, useraddr, array_size(size, sizeof(indir[0])))) 1157 return -EFAULT; 1158 1159 /* Validate ring indices */ 1160 for (i = 0; i < size; i++) 1161 if (indir[i] >= rx_rings->data) 1162 return -EINVAL; 1163 1164 return 0; 1165 } 1166 1167 u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; 1168 1169 void netdev_rss_key_fill(void *buffer, size_t len) 1170 { 1171 BUG_ON(len > sizeof(netdev_rss_key)); 1172 net_get_random_once(netdev_rss_key, sizeof(netdev_rss_key)); 1173 memcpy(buffer, netdev_rss_key, len); 1174 } 1175 EXPORT_SYMBOL(netdev_rss_key_fill); 1176 1177 static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, 1178 void __user *useraddr) 1179 { 1180 struct ethtool_rxfh_param rxfh = {}; 1181 u32 user_size; 1182 int ret; 1183 1184 if (!dev->ethtool_ops->get_rxfh_indir_size || 1185 !dev->ethtool_ops->get_rxfh) 1186 return -EOPNOTSUPP; 1187 rxfh.indir_size = dev->ethtool_ops->get_rxfh_indir_size(dev); 1188 if (rxfh.indir_size == 0) 1189 return -EOPNOTSUPP; 1190 1191 if (copy_from_user(&user_size, 1192 useraddr + offsetof(struct ethtool_rxfh_indir, size), 1193 sizeof(user_size))) 1194 return -EFAULT; 1195 1196 if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh_indir, size), 1197 &rxfh.indir_size, sizeof(rxfh.indir_size))) 1198 return -EFAULT; 1199 1200 /* If the user buffer size is 0, this is just a query for the 1201 * device table size. Otherwise, if it's smaller than the 1202 * device table size it's an error. 1203 */ 1204 if (user_size < rxfh.indir_size) 1205 return user_size == 0 ? 0 : -EINVAL; 1206 1207 rxfh.indir = kcalloc(rxfh.indir_size, sizeof(rxfh.indir[0]), GFP_USER); 1208 if (!rxfh.indir) 1209 return -ENOMEM; 1210 1211 ret = dev->ethtool_ops->get_rxfh(dev, &rxfh); 1212 if (ret) 1213 goto out; 1214 if (copy_to_user(useraddr + 1215 offsetof(struct ethtool_rxfh_indir, ring_index[0]), 1216 rxfh.indir, rxfh.indir_size * sizeof(*rxfh.indir))) 1217 ret = -EFAULT; 1218 1219 out: 1220 kfree(rxfh.indir); 1221 return ret; 1222 } 1223 1224 static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev, 1225 void __user *useraddr) 1226 { 1227 const struct ethtool_ops *ops = dev->ethtool_ops; 1228 struct ethtool_rxfh_param rxfh_dev = {}; 1229 struct netlink_ext_ack *extack = NULL; 1230 struct ethtool_rxnfc rx_rings; 1231 u32 user_size, i; 1232 int ret; 1233 u32 ringidx_offset = offsetof(struct ethtool_rxfh_indir, ring_index[0]); 1234 1235 if (!ops->get_rxfh_indir_size || !ops->set_rxfh || 1236 !ops->get_rxnfc) 1237 return -EOPNOTSUPP; 1238 1239 rxfh_dev.indir_size = ops->get_rxfh_indir_size(dev); 1240 if (rxfh_dev.indir_size == 0) 1241 return -EOPNOTSUPP; 1242 1243 if (copy_from_user(&user_size, 1244 useraddr + offsetof(struct ethtool_rxfh_indir, size), 1245 sizeof(user_size))) 1246 return -EFAULT; 1247 1248 if (user_size != 0 && user_size != rxfh_dev.indir_size) 1249 return -EINVAL; 1250 1251 rxfh_dev.indir = kcalloc(rxfh_dev.indir_size, 1252 sizeof(rxfh_dev.indir[0]), GFP_USER); 1253 if (!rxfh_dev.indir) 1254 return -ENOMEM; 1255 1256 rx_rings.cmd = ETHTOOL_GRXRINGS; 1257 ret = ops->get_rxnfc(dev, &rx_rings, NULL); 1258 if (ret) 1259 goto out; 1260 1261 if (user_size == 0) { 1262 u32 *indir = rxfh_dev.indir; 1263 1264 for (i = 0; i < rxfh_dev.indir_size; i++) 1265 indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data); 1266 } else { 1267 ret = ethtool_copy_validate_indir(rxfh_dev.indir, 1268 useraddr + ringidx_offset, 1269 &rx_rings, 1270 rxfh_dev.indir_size); 1271 if (ret) 1272 goto out; 1273 } 1274 1275 rxfh_dev.hfunc = ETH_RSS_HASH_NO_CHANGE; 1276 ret = ops->set_rxfh(dev, &rxfh_dev, extack); 1277 if (ret) 1278 goto out; 1279 1280 /* indicate whether rxfh was set to default */ 1281 if (user_size == 0) 1282 dev->priv_flags &= ~IFF_RXFH_CONFIGURED; 1283 else 1284 dev->priv_flags |= IFF_RXFH_CONFIGURED; 1285 1286 out: 1287 kfree(rxfh_dev.indir); 1288 return ret; 1289 } 1290 1291 static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev, 1292 void __user *useraddr) 1293 { 1294 const struct ethtool_ops *ops = dev->ethtool_ops; 1295 struct ethtool_rxfh_param rxfh_dev = {}; 1296 u32 user_indir_size, user_key_size; 1297 struct ethtool_rxfh_context *ctx; 1298 struct ethtool_rxfh rxfh; 1299 u32 indir_bytes; 1300 u8 *rss_config; 1301 u32 total_size; 1302 int ret; 1303 1304 if (!ops->get_rxfh) 1305 return -EOPNOTSUPP; 1306 1307 if (ops->get_rxfh_indir_size) 1308 rxfh_dev.indir_size = ops->get_rxfh_indir_size(dev); 1309 if (ops->get_rxfh_key_size) 1310 rxfh_dev.key_size = ops->get_rxfh_key_size(dev); 1311 1312 if (copy_from_user(&rxfh, useraddr, sizeof(rxfh))) 1313 return -EFAULT; 1314 user_indir_size = rxfh.indir_size; 1315 user_key_size = rxfh.key_size; 1316 1317 /* Check that reserved fields are 0 for now */ 1318 if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd32) 1319 return -EINVAL; 1320 /* Most drivers don't handle rss_context, check it's 0 as well */ 1321 if (rxfh.rss_context && !(ops->cap_rss_ctx_supported || 1322 ops->create_rxfh_context)) 1323 return -EOPNOTSUPP; 1324 1325 rxfh.indir_size = rxfh_dev.indir_size; 1326 rxfh.key_size = rxfh_dev.key_size; 1327 if (copy_to_user(useraddr, &rxfh, sizeof(rxfh))) 1328 return -EFAULT; 1329 1330 if ((user_indir_size && user_indir_size != rxfh_dev.indir_size) || 1331 (user_key_size && user_key_size != rxfh_dev.key_size)) 1332 return -EINVAL; 1333 1334 indir_bytes = user_indir_size * sizeof(rxfh_dev.indir[0]); 1335 total_size = indir_bytes + user_key_size; 1336 rss_config = kzalloc(total_size, GFP_USER); 1337 if (!rss_config) 1338 return -ENOMEM; 1339 1340 if (user_indir_size) 1341 rxfh_dev.indir = (u32 *)rss_config; 1342 1343 if (user_key_size) 1344 rxfh_dev.key = rss_config + indir_bytes; 1345 1346 if (rxfh.rss_context) { 1347 ctx = xa_load(&dev->ethtool->rss_ctx, rxfh.rss_context); 1348 if (!ctx) { 1349 ret = -ENOENT; 1350 goto out; 1351 } 1352 if (rxfh_dev.indir) 1353 memcpy(rxfh_dev.indir, ethtool_rxfh_context_indir(ctx), 1354 indir_bytes); 1355 if (!ops->rxfh_per_ctx_key) { 1356 rxfh_dev.key_size = 0; 1357 } else { 1358 if (rxfh_dev.key) 1359 memcpy(rxfh_dev.key, 1360 ethtool_rxfh_context_key(ctx), 1361 user_key_size); 1362 rxfh_dev.hfunc = ctx->hfunc; 1363 } 1364 rxfh_dev.input_xfrm = ctx->input_xfrm; 1365 ret = 0; 1366 } else { 1367 ret = dev->ethtool_ops->get_rxfh(dev, &rxfh_dev); 1368 if (ret) 1369 goto out; 1370 } 1371 1372 if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, hfunc), 1373 &rxfh_dev.hfunc, sizeof(rxfh.hfunc))) { 1374 ret = -EFAULT; 1375 } else if (copy_to_user(useraddr + 1376 offsetof(struct ethtool_rxfh, input_xfrm), 1377 &rxfh_dev.input_xfrm, 1378 sizeof(rxfh.input_xfrm))) { 1379 ret = -EFAULT; 1380 } else if (copy_to_user(useraddr + 1381 offsetof(struct ethtool_rxfh, key_size), 1382 &rxfh_dev.key_size, 1383 sizeof(rxfh.key_size))) { 1384 ret = -EFAULT; 1385 } else if (copy_to_user(useraddr + 1386 offsetof(struct ethtool_rxfh, rss_config[0]), 1387 rss_config, total_size)) { 1388 ret = -EFAULT; 1389 } 1390 out: 1391 kfree(rss_config); 1392 1393 return ret; 1394 } 1395 1396 static struct ethtool_rxfh_context * 1397 ethtool_rxfh_ctx_alloc(const struct ethtool_ops *ops, 1398 u32 indir_size, u32 key_size) 1399 { 1400 size_t indir_bytes, flex_len, key_off, size; 1401 struct ethtool_rxfh_context *ctx; 1402 u32 priv_bytes, indir_max; 1403 u16 key_max; 1404 1405 key_max = max(key_size, ops->rxfh_key_space); 1406 indir_max = max(indir_size, ops->rxfh_indir_space); 1407 1408 priv_bytes = ALIGN(ops->rxfh_priv_size, sizeof(u32)); 1409 indir_bytes = array_size(indir_max, sizeof(u32)); 1410 1411 key_off = size_add(priv_bytes, indir_bytes); 1412 flex_len = size_add(key_off, key_max); 1413 size = struct_size_t(struct ethtool_rxfh_context, data, flex_len); 1414 1415 ctx = kzalloc(size, GFP_KERNEL_ACCOUNT); 1416 if (!ctx) 1417 return NULL; 1418 1419 ctx->indir_size = indir_size; 1420 ctx->key_size = key_size; 1421 ctx->key_off = key_off; 1422 ctx->priv_size = ops->rxfh_priv_size; 1423 1424 ctx->hfunc = ETH_RSS_HASH_NO_CHANGE; 1425 ctx->input_xfrm = RXH_XFRM_NO_CHANGE; 1426 1427 return ctx; 1428 } 1429 1430 static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, 1431 void __user *useraddr) 1432 { 1433 u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]); 1434 const struct ethtool_ops *ops = dev->ethtool_ops; 1435 u32 dev_indir_size = 0, dev_key_size = 0, i; 1436 u32 user_indir_len = 0, indir_bytes = 0; 1437 struct ethtool_rxfh_param rxfh_dev = {}; 1438 struct ethtool_rxfh_context *ctx = NULL; 1439 struct netlink_ext_ack *extack = NULL; 1440 struct ethtool_rxnfc rx_rings; 1441 struct ethtool_rxfh rxfh; 1442 bool locked = false; /* dev->ethtool->rss_lock taken */ 1443 bool create = false; 1444 u8 *rss_config; 1445 int ret; 1446 1447 if (!ops->get_rxnfc || !ops->set_rxfh) 1448 return -EOPNOTSUPP; 1449 1450 if (ops->get_rxfh_indir_size) 1451 dev_indir_size = ops->get_rxfh_indir_size(dev); 1452 if (ops->get_rxfh_key_size) 1453 dev_key_size = ops->get_rxfh_key_size(dev); 1454 1455 if (copy_from_user(&rxfh, useraddr, sizeof(rxfh))) 1456 return -EFAULT; 1457 1458 /* Check that reserved fields are 0 for now */ 1459 if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd32) 1460 return -EINVAL; 1461 /* Most drivers don't handle rss_context, check it's 0 as well */ 1462 if (rxfh.rss_context && !(ops->cap_rss_ctx_supported || 1463 ops->create_rxfh_context)) 1464 return -EOPNOTSUPP; 1465 /* Check input data transformation capabilities */ 1466 if (rxfh.input_xfrm && rxfh.input_xfrm != RXH_XFRM_SYM_XOR && 1467 rxfh.input_xfrm != RXH_XFRM_SYM_OR_XOR && 1468 rxfh.input_xfrm != RXH_XFRM_NO_CHANGE) 1469 return -EINVAL; 1470 if (rxfh.input_xfrm != RXH_XFRM_NO_CHANGE && 1471 rxfh.input_xfrm & ~ops->supported_input_xfrm) 1472 return -EOPNOTSUPP; 1473 create = rxfh.rss_context == ETH_RXFH_CONTEXT_ALLOC; 1474 1475 if ((rxfh.indir_size && 1476 rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE && 1477 rxfh.indir_size != dev_indir_size) || 1478 (rxfh.key_size && rxfh.key_size != dev_key_size)) 1479 return -EINVAL; 1480 1481 /* Must request at least one change: indir size, hash key, function 1482 * or input transformation. 1483 * There's no need for any of it in case of context creation. 1484 */ 1485 if (!create && 1486 (rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE && 1487 rxfh.key_size == 0 && rxfh.hfunc == ETH_RSS_HASH_NO_CHANGE && 1488 rxfh.input_xfrm == RXH_XFRM_NO_CHANGE)) 1489 return -EINVAL; 1490 1491 ret = ethtool_check_flow_types(dev, rxfh.input_xfrm); 1492 if (ret) 1493 return ret; 1494 1495 indir_bytes = dev_indir_size * sizeof(rxfh_dev.indir[0]); 1496 1497 /* Check settings which may be global rather than per RSS-context */ 1498 if (rxfh.rss_context && !ops->rxfh_per_ctx_key) 1499 if (rxfh.key_size || 1500 (rxfh.hfunc && rxfh.hfunc != ETH_RSS_HASH_NO_CHANGE) || 1501 (rxfh.input_xfrm && rxfh.input_xfrm != RXH_XFRM_NO_CHANGE)) 1502 return -EOPNOTSUPP; 1503 1504 rss_config = kzalloc(indir_bytes + dev_key_size, GFP_USER); 1505 if (!rss_config) 1506 return -ENOMEM; 1507 1508 rx_rings.cmd = ETHTOOL_GRXRINGS; 1509 ret = ops->get_rxnfc(dev, &rx_rings, NULL); 1510 if (ret) 1511 goto out; 1512 1513 /* rxfh.indir_size == 0 means reset the indir table to default (master 1514 * context) or delete the context (other RSS contexts). 1515 * rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE means leave it unchanged. 1516 */ 1517 if (rxfh.indir_size && 1518 rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) { 1519 user_indir_len = indir_bytes; 1520 rxfh_dev.indir = (u32 *)rss_config; 1521 rxfh_dev.indir_size = dev_indir_size; 1522 ret = ethtool_copy_validate_indir(rxfh_dev.indir, 1523 useraddr + rss_cfg_offset, 1524 &rx_rings, 1525 rxfh.indir_size); 1526 if (ret) 1527 goto out; 1528 } else if (rxfh.indir_size == 0) { 1529 if (rxfh.rss_context == 0) { 1530 u32 *indir; 1531 1532 rxfh_dev.indir = (u32 *)rss_config; 1533 rxfh_dev.indir_size = dev_indir_size; 1534 indir = rxfh_dev.indir; 1535 for (i = 0; i < dev_indir_size; i++) 1536 indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data); 1537 } else { 1538 rxfh_dev.rss_delete = true; 1539 } 1540 } 1541 1542 if (rxfh.key_size) { 1543 rxfh_dev.key_size = dev_key_size; 1544 rxfh_dev.key = rss_config + indir_bytes; 1545 if (copy_from_user(rxfh_dev.key, 1546 useraddr + rss_cfg_offset + user_indir_len, 1547 rxfh.key_size)) { 1548 ret = -EFAULT; 1549 goto out; 1550 } 1551 } 1552 1553 if (rxfh.rss_context) { 1554 mutex_lock(&dev->ethtool->rss_lock); 1555 locked = true; 1556 } 1557 1558 if (rxfh.rss_context && rxfh_dev.rss_delete) { 1559 ret = ethtool_check_rss_ctx_busy(dev, rxfh.rss_context); 1560 if (ret) 1561 goto out; 1562 } 1563 1564 if (create) { 1565 if (rxfh_dev.rss_delete) { 1566 ret = -EINVAL; 1567 goto out; 1568 } 1569 ctx = ethtool_rxfh_ctx_alloc(ops, dev_indir_size, dev_key_size); 1570 if (!ctx) { 1571 ret = -ENOMEM; 1572 goto out; 1573 } 1574 1575 if (ops->create_rxfh_context) { 1576 u32 limit = ops->rxfh_max_num_contexts ?: U32_MAX; 1577 u32 ctx_id; 1578 1579 /* driver uses new API, core allocates ID */ 1580 ret = xa_alloc(&dev->ethtool->rss_ctx, &ctx_id, ctx, 1581 XA_LIMIT(1, limit - 1), 1582 GFP_KERNEL_ACCOUNT); 1583 if (ret < 0) { 1584 kfree(ctx); 1585 goto out; 1586 } 1587 WARN_ON(!ctx_id); /* can't happen */ 1588 rxfh.rss_context = ctx_id; 1589 } 1590 } else if (rxfh.rss_context) { 1591 ctx = xa_load(&dev->ethtool->rss_ctx, rxfh.rss_context); 1592 if (!ctx) { 1593 ret = -ENOENT; 1594 goto out; 1595 } 1596 } 1597 rxfh_dev.hfunc = rxfh.hfunc; 1598 rxfh_dev.rss_context = rxfh.rss_context; 1599 rxfh_dev.input_xfrm = rxfh.input_xfrm; 1600 1601 if (rxfh.rss_context && ops->create_rxfh_context) { 1602 if (create) { 1603 ret = ops->create_rxfh_context(dev, ctx, &rxfh_dev, 1604 extack); 1605 /* Make sure driver populates defaults */ 1606 WARN_ON_ONCE(!ret && !rxfh_dev.key && 1607 ops->rxfh_per_ctx_key && 1608 !memchr_inv(ethtool_rxfh_context_key(ctx), 1609 0, ctx->key_size)); 1610 } else if (rxfh_dev.rss_delete) { 1611 ret = ops->remove_rxfh_context(dev, ctx, 1612 rxfh.rss_context, 1613 extack); 1614 } else { 1615 ret = ops->modify_rxfh_context(dev, ctx, &rxfh_dev, 1616 extack); 1617 } 1618 } else { 1619 ret = ops->set_rxfh(dev, &rxfh_dev, extack); 1620 } 1621 if (ret) { 1622 if (create) { 1623 /* failed to create, free our new tracking entry */ 1624 if (ops->create_rxfh_context) 1625 xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context); 1626 kfree(ctx); 1627 } 1628 goto out; 1629 } 1630 1631 if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, rss_context), 1632 &rxfh_dev.rss_context, sizeof(rxfh_dev.rss_context))) 1633 ret = -EFAULT; 1634 1635 if (!rxfh_dev.rss_context) { 1636 /* indicate whether rxfh was set to default */ 1637 if (rxfh.indir_size == 0) 1638 dev->priv_flags &= ~IFF_RXFH_CONFIGURED; 1639 else if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) 1640 dev->priv_flags |= IFF_RXFH_CONFIGURED; 1641 } 1642 /* Update rss_ctx tracking */ 1643 if (create && !ops->create_rxfh_context) { 1644 /* driver uses old API, it chose context ID */ 1645 if (WARN_ON(xa_load(&dev->ethtool->rss_ctx, rxfh_dev.rss_context))) { 1646 /* context ID reused, our tracking is screwed */ 1647 kfree(ctx); 1648 goto out; 1649 } 1650 /* Allocate the exact ID the driver gave us */ 1651 if (xa_is_err(xa_store(&dev->ethtool->rss_ctx, rxfh_dev.rss_context, 1652 ctx, GFP_KERNEL))) { 1653 kfree(ctx); 1654 goto out; 1655 } 1656 1657 /* Fetch the defaults for the old API, in the new API drivers 1658 * should write defaults into ctx themselves. 1659 */ 1660 rxfh_dev.indir = (u32 *)rss_config; 1661 rxfh_dev.indir_size = dev_indir_size; 1662 1663 rxfh_dev.key = rss_config + indir_bytes; 1664 rxfh_dev.key_size = dev_key_size; 1665 1666 ret = ops->get_rxfh(dev, &rxfh_dev); 1667 if (WARN_ON(ret)) { 1668 xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context); 1669 kfree(ctx); 1670 goto out; 1671 } 1672 } 1673 if (rxfh_dev.rss_delete) { 1674 WARN_ON(xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context) != ctx); 1675 kfree(ctx); 1676 } else if (ctx) { 1677 if (rxfh_dev.indir) { 1678 for (i = 0; i < dev_indir_size; i++) 1679 ethtool_rxfh_context_indir(ctx)[i] = rxfh_dev.indir[i]; 1680 ctx->indir_configured = 1681 rxfh.indir_size && 1682 rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE; 1683 } 1684 if (rxfh_dev.key) { 1685 memcpy(ethtool_rxfh_context_key(ctx), rxfh_dev.key, 1686 dev_key_size); 1687 ctx->key_configured = !!rxfh.key_size; 1688 } 1689 if (rxfh_dev.hfunc != ETH_RSS_HASH_NO_CHANGE) 1690 ctx->hfunc = rxfh_dev.hfunc; 1691 if (rxfh_dev.input_xfrm != RXH_XFRM_NO_CHANGE) 1692 ctx->input_xfrm = rxfh_dev.input_xfrm; 1693 } 1694 1695 out: 1696 if (locked) 1697 mutex_unlock(&dev->ethtool->rss_lock); 1698 kfree(rss_config); 1699 return ret; 1700 } 1701 1702 static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) 1703 { 1704 struct ethtool_regs regs; 1705 const struct ethtool_ops *ops = dev->ethtool_ops; 1706 void *regbuf; 1707 int reglen, ret; 1708 1709 if (!ops->get_regs || !ops->get_regs_len) 1710 return -EOPNOTSUPP; 1711 1712 if (copy_from_user(®s, useraddr, sizeof(regs))) 1713 return -EFAULT; 1714 1715 reglen = ops->get_regs_len(dev); 1716 if (reglen <= 0) 1717 return reglen; 1718 1719 if (regs.len > reglen) 1720 regs.len = reglen; 1721 1722 regbuf = vzalloc(reglen); 1723 if (!regbuf) 1724 return -ENOMEM; 1725 1726 if (regs.len < reglen) 1727 reglen = regs.len; 1728 1729 ops->get_regs(dev, ®s, regbuf); 1730 1731 ret = -EFAULT; 1732 if (copy_to_user(useraddr, ®s, sizeof(regs))) 1733 goto out; 1734 useraddr += offsetof(struct ethtool_regs, data); 1735 if (copy_to_user(useraddr, regbuf, reglen)) 1736 goto out; 1737 ret = 0; 1738 1739 out: 1740 vfree(regbuf); 1741 return ret; 1742 } 1743 1744 static int ethtool_reset(struct net_device *dev, char __user *useraddr) 1745 { 1746 struct ethtool_value reset; 1747 int ret; 1748 1749 if (!dev->ethtool_ops->reset) 1750 return -EOPNOTSUPP; 1751 1752 if (dev->ethtool->module_fw_flash_in_progress) 1753 return -EBUSY; 1754 1755 if (copy_from_user(&reset, useraddr, sizeof(reset))) 1756 return -EFAULT; 1757 1758 ret = dev->ethtool_ops->reset(dev, &reset.data); 1759 if (ret) 1760 return ret; 1761 1762 if (copy_to_user(useraddr, &reset, sizeof(reset))) 1763 return -EFAULT; 1764 return 0; 1765 } 1766 1767 static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) 1768 { 1769 struct ethtool_wolinfo wol; 1770 1771 if (!dev->ethtool_ops->get_wol) 1772 return -EOPNOTSUPP; 1773 1774 memset(&wol, 0, sizeof(struct ethtool_wolinfo)); 1775 wol.cmd = ETHTOOL_GWOL; 1776 dev->ethtool_ops->get_wol(dev, &wol); 1777 1778 if (copy_to_user(useraddr, &wol, sizeof(wol))) 1779 return -EFAULT; 1780 return 0; 1781 } 1782 1783 static int ethtool_set_wol(struct net_device *dev, char __user *useraddr) 1784 { 1785 struct ethtool_wolinfo wol, cur_wol; 1786 int ret; 1787 1788 if (!dev->ethtool_ops->get_wol || !dev->ethtool_ops->set_wol) 1789 return -EOPNOTSUPP; 1790 1791 memset(&cur_wol, 0, sizeof(struct ethtool_wolinfo)); 1792 cur_wol.cmd = ETHTOOL_GWOL; 1793 dev->ethtool_ops->get_wol(dev, &cur_wol); 1794 1795 if (copy_from_user(&wol, useraddr, sizeof(wol))) 1796 return -EFAULT; 1797 1798 if (wol.wolopts & ~cur_wol.supported) 1799 return -EINVAL; 1800 1801 if (wol.wolopts == cur_wol.wolopts && 1802 !memcmp(wol.sopass, cur_wol.sopass, sizeof(wol.sopass))) 1803 return 0; 1804 1805 ret = dev->ethtool_ops->set_wol(dev, &wol); 1806 if (ret) 1807 return ret; 1808 1809 dev->ethtool->wol_enabled = !!wol.wolopts; 1810 ethtool_notify(dev, ETHTOOL_MSG_WOL_NTF, NULL); 1811 1812 return 0; 1813 } 1814 1815 static void eee_to_keee(struct ethtool_keee *keee, 1816 const struct ethtool_eee *eee) 1817 { 1818 memset(keee, 0, sizeof(*keee)); 1819 1820 keee->eee_enabled = eee->eee_enabled; 1821 keee->tx_lpi_enabled = eee->tx_lpi_enabled; 1822 keee->tx_lpi_timer = eee->tx_lpi_timer; 1823 1824 ethtool_convert_legacy_u32_to_link_mode(keee->advertised, 1825 eee->advertised); 1826 } 1827 1828 static void keee_to_eee(struct ethtool_eee *eee, 1829 const struct ethtool_keee *keee) 1830 { 1831 bool overflow; 1832 1833 memset(eee, 0, sizeof(*eee)); 1834 1835 eee->eee_active = keee->eee_active; 1836 eee->eee_enabled = keee->eee_enabled; 1837 eee->tx_lpi_enabled = keee->tx_lpi_enabled; 1838 eee->tx_lpi_timer = keee->tx_lpi_timer; 1839 1840 overflow = !ethtool_convert_link_mode_to_legacy_u32(&eee->supported, 1841 keee->supported); 1842 ethtool_convert_link_mode_to_legacy_u32(&eee->advertised, 1843 keee->advertised); 1844 ethtool_convert_link_mode_to_legacy_u32(&eee->lp_advertised, 1845 keee->lp_advertised); 1846 if (overflow) 1847 pr_warn("Ethtool ioctl interface doesn't support passing EEE linkmodes beyond bit 32\n"); 1848 } 1849 1850 static int ethtool_get_eee(struct net_device *dev, char __user *useraddr) 1851 { 1852 struct ethtool_keee keee; 1853 struct ethtool_eee eee; 1854 int rc; 1855 1856 if (!dev->ethtool_ops->get_eee) 1857 return -EOPNOTSUPP; 1858 1859 memset(&keee, 0, sizeof(keee)); 1860 rc = dev->ethtool_ops->get_eee(dev, &keee); 1861 if (rc) 1862 return rc; 1863 1864 keee_to_eee(&eee, &keee); 1865 if (copy_to_user(useraddr, &eee, sizeof(eee))) 1866 return -EFAULT; 1867 1868 return 0; 1869 } 1870 1871 static int ethtool_set_eee(struct net_device *dev, char __user *useraddr) 1872 { 1873 struct ethtool_keee keee; 1874 struct ethtool_eee eee; 1875 int ret; 1876 1877 if (!dev->ethtool_ops->set_eee) 1878 return -EOPNOTSUPP; 1879 1880 if (copy_from_user(&eee, useraddr, sizeof(eee))) 1881 return -EFAULT; 1882 1883 eee_to_keee(&keee, &eee); 1884 ret = dev->ethtool_ops->set_eee(dev, &keee); 1885 if (!ret) 1886 ethtool_notify(dev, ETHTOOL_MSG_EEE_NTF, NULL); 1887 return ret; 1888 } 1889 1890 static int ethtool_nway_reset(struct net_device *dev) 1891 { 1892 if (!dev->ethtool_ops->nway_reset) 1893 return -EOPNOTSUPP; 1894 1895 return dev->ethtool_ops->nway_reset(dev); 1896 } 1897 1898 static int ethtool_get_link(struct net_device *dev, char __user *useraddr) 1899 { 1900 struct ethtool_value edata = { .cmd = ETHTOOL_GLINK }; 1901 int link = __ethtool_get_link(dev); 1902 1903 if (link < 0) 1904 return link; 1905 1906 edata.data = link; 1907 if (copy_to_user(useraddr, &edata, sizeof(edata))) 1908 return -EFAULT; 1909 return 0; 1910 } 1911 1912 static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr, 1913 int (*getter)(struct net_device *, 1914 struct ethtool_eeprom *, u8 *), 1915 u32 total_len) 1916 { 1917 struct ethtool_eeprom eeprom; 1918 void __user *userbuf = useraddr + sizeof(eeprom); 1919 u32 bytes_remaining; 1920 u8 *data; 1921 int ret = 0; 1922 1923 if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) 1924 return -EFAULT; 1925 1926 /* Check for wrap and zero */ 1927 if (eeprom.offset + eeprom.len <= eeprom.offset) 1928 return -EINVAL; 1929 1930 /* Check for exceeding total eeprom len */ 1931 if (eeprom.offset + eeprom.len > total_len) 1932 return -EINVAL; 1933 1934 data = kzalloc(PAGE_SIZE, GFP_USER); 1935 if (!data) 1936 return -ENOMEM; 1937 1938 bytes_remaining = eeprom.len; 1939 while (bytes_remaining > 0) { 1940 eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE); 1941 1942 ret = getter(dev, &eeprom, data); 1943 if (ret) 1944 break; 1945 if (!eeprom.len) { 1946 ret = -EIO; 1947 break; 1948 } 1949 if (copy_to_user(userbuf, data, eeprom.len)) { 1950 ret = -EFAULT; 1951 break; 1952 } 1953 userbuf += eeprom.len; 1954 eeprom.offset += eeprom.len; 1955 bytes_remaining -= eeprom.len; 1956 } 1957 1958 eeprom.len = userbuf - (useraddr + sizeof(eeprom)); 1959 eeprom.offset -= eeprom.len; 1960 if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) 1961 ret = -EFAULT; 1962 1963 kfree(data); 1964 return ret; 1965 } 1966 1967 static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) 1968 { 1969 const struct ethtool_ops *ops = dev->ethtool_ops; 1970 1971 if (!ops->get_eeprom || !ops->get_eeprom_len || 1972 !ops->get_eeprom_len(dev)) 1973 return -EOPNOTSUPP; 1974 1975 return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom, 1976 ops->get_eeprom_len(dev)); 1977 } 1978 1979 static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) 1980 { 1981 struct ethtool_eeprom eeprom; 1982 const struct ethtool_ops *ops = dev->ethtool_ops; 1983 void __user *userbuf = useraddr + sizeof(eeprom); 1984 u32 bytes_remaining; 1985 u8 *data; 1986 int ret = 0; 1987 1988 if (!ops->set_eeprom || !ops->get_eeprom_len || 1989 !ops->get_eeprom_len(dev)) 1990 return -EOPNOTSUPP; 1991 1992 if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) 1993 return -EFAULT; 1994 1995 /* Check for wrap and zero */ 1996 if (eeprom.offset + eeprom.len <= eeprom.offset) 1997 return -EINVAL; 1998 1999 /* Check for exceeding total eeprom len */ 2000 if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) 2001 return -EINVAL; 2002 2003 data = kzalloc(PAGE_SIZE, GFP_USER); 2004 if (!data) 2005 return -ENOMEM; 2006 2007 bytes_remaining = eeprom.len; 2008 while (bytes_remaining > 0) { 2009 eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE); 2010 2011 if (copy_from_user(data, userbuf, eeprom.len)) { 2012 ret = -EFAULT; 2013 break; 2014 } 2015 ret = ops->set_eeprom(dev, &eeprom, data); 2016 if (ret) 2017 break; 2018 userbuf += eeprom.len; 2019 eeprom.offset += eeprom.len; 2020 bytes_remaining -= eeprom.len; 2021 } 2022 2023 kfree(data); 2024 return ret; 2025 } 2026 2027 static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, 2028 void __user *useraddr) 2029 { 2030 struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; 2031 struct kernel_ethtool_coalesce kernel_coalesce = {}; 2032 int ret; 2033 2034 if (!dev->ethtool_ops->get_coalesce) 2035 return -EOPNOTSUPP; 2036 2037 ret = dev->ethtool_ops->get_coalesce(dev, &coalesce, &kernel_coalesce, 2038 NULL); 2039 if (ret) 2040 return ret; 2041 2042 if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) 2043 return -EFAULT; 2044 return 0; 2045 } 2046 2047 static bool 2048 ethtool_set_coalesce_supported(struct net_device *dev, 2049 struct ethtool_coalesce *coalesce) 2050 { 2051 u32 supported_params = dev->ethtool_ops->supported_coalesce_params; 2052 u32 nonzero_params = 0; 2053 2054 if (coalesce->rx_coalesce_usecs) 2055 nonzero_params |= ETHTOOL_COALESCE_RX_USECS; 2056 if (coalesce->rx_max_coalesced_frames) 2057 nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES; 2058 if (coalesce->rx_coalesce_usecs_irq) 2059 nonzero_params |= ETHTOOL_COALESCE_RX_USECS_IRQ; 2060 if (coalesce->rx_max_coalesced_frames_irq) 2061 nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ; 2062 if (coalesce->tx_coalesce_usecs) 2063 nonzero_params |= ETHTOOL_COALESCE_TX_USECS; 2064 if (coalesce->tx_max_coalesced_frames) 2065 nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES; 2066 if (coalesce->tx_coalesce_usecs_irq) 2067 nonzero_params |= ETHTOOL_COALESCE_TX_USECS_IRQ; 2068 if (coalesce->tx_max_coalesced_frames_irq) 2069 nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ; 2070 if (coalesce->stats_block_coalesce_usecs) 2071 nonzero_params |= ETHTOOL_COALESCE_STATS_BLOCK_USECS; 2072 if (coalesce->use_adaptive_rx_coalesce) 2073 nonzero_params |= ETHTOOL_COALESCE_USE_ADAPTIVE_RX; 2074 if (coalesce->use_adaptive_tx_coalesce) 2075 nonzero_params |= ETHTOOL_COALESCE_USE_ADAPTIVE_TX; 2076 if (coalesce->pkt_rate_low) 2077 nonzero_params |= ETHTOOL_COALESCE_PKT_RATE_LOW; 2078 if (coalesce->rx_coalesce_usecs_low) 2079 nonzero_params |= ETHTOOL_COALESCE_RX_USECS_LOW; 2080 if (coalesce->rx_max_coalesced_frames_low) 2081 nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW; 2082 if (coalesce->tx_coalesce_usecs_low) 2083 nonzero_params |= ETHTOOL_COALESCE_TX_USECS_LOW; 2084 if (coalesce->tx_max_coalesced_frames_low) 2085 nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW; 2086 if (coalesce->pkt_rate_high) 2087 nonzero_params |= ETHTOOL_COALESCE_PKT_RATE_HIGH; 2088 if (coalesce->rx_coalesce_usecs_high) 2089 nonzero_params |= ETHTOOL_COALESCE_RX_USECS_HIGH; 2090 if (coalesce->rx_max_coalesced_frames_high) 2091 nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH; 2092 if (coalesce->tx_coalesce_usecs_high) 2093 nonzero_params |= ETHTOOL_COALESCE_TX_USECS_HIGH; 2094 if (coalesce->tx_max_coalesced_frames_high) 2095 nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH; 2096 if (coalesce->rate_sample_interval) 2097 nonzero_params |= ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL; 2098 2099 return (supported_params & nonzero_params) == nonzero_params; 2100 } 2101 2102 static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, 2103 void __user *useraddr) 2104 { 2105 struct kernel_ethtool_coalesce kernel_coalesce = {}; 2106 struct ethtool_coalesce coalesce; 2107 int ret; 2108 2109 if (!dev->ethtool_ops->set_coalesce || !dev->ethtool_ops->get_coalesce) 2110 return -EOPNOTSUPP; 2111 2112 ret = dev->ethtool_ops->get_coalesce(dev, &coalesce, &kernel_coalesce, 2113 NULL); 2114 if (ret) 2115 return ret; 2116 2117 if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) 2118 return -EFAULT; 2119 2120 if (!ethtool_set_coalesce_supported(dev, &coalesce)) 2121 return -EOPNOTSUPP; 2122 2123 ret = dev->ethtool_ops->set_coalesce(dev, &coalesce, &kernel_coalesce, 2124 NULL); 2125 if (!ret) 2126 ethtool_notify(dev, ETHTOOL_MSG_COALESCE_NTF, NULL); 2127 return ret; 2128 } 2129 2130 static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) 2131 { 2132 struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM }; 2133 struct kernel_ethtool_ringparam kernel_ringparam = {}; 2134 2135 if (!dev->ethtool_ops->get_ringparam) 2136 return -EOPNOTSUPP; 2137 2138 dev->ethtool_ops->get_ringparam(dev, &ringparam, 2139 &kernel_ringparam, NULL); 2140 2141 if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) 2142 return -EFAULT; 2143 return 0; 2144 } 2145 2146 static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr) 2147 { 2148 struct kernel_ethtool_ringparam kernel_ringparam; 2149 struct ethtool_ringparam ringparam, max; 2150 int ret; 2151 2152 if (!dev->ethtool_ops->set_ringparam || !dev->ethtool_ops->get_ringparam) 2153 return -EOPNOTSUPP; 2154 2155 if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) 2156 return -EFAULT; 2157 2158 ethtool_ringparam_get_cfg(dev, &max, &kernel_ringparam, NULL); 2159 2160 /* ensure new ring parameters are within the maximums */ 2161 if (ringparam.rx_pending > max.rx_max_pending || 2162 ringparam.rx_mini_pending > max.rx_mini_max_pending || 2163 ringparam.rx_jumbo_pending > max.rx_jumbo_max_pending || 2164 ringparam.tx_pending > max.tx_max_pending) 2165 return -EINVAL; 2166 2167 ret = dev->ethtool_ops->set_ringparam(dev, &ringparam, 2168 &kernel_ringparam, NULL); 2169 if (!ret) 2170 ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL); 2171 return ret; 2172 } 2173 2174 static noinline_for_stack int ethtool_get_channels(struct net_device *dev, 2175 void __user *useraddr) 2176 { 2177 struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS }; 2178 2179 if (!dev->ethtool_ops->get_channels) 2180 return -EOPNOTSUPP; 2181 2182 dev->ethtool_ops->get_channels(dev, &channels); 2183 2184 if (copy_to_user(useraddr, &channels, sizeof(channels))) 2185 return -EFAULT; 2186 return 0; 2187 } 2188 2189 static noinline_for_stack int ethtool_set_channels(struct net_device *dev, 2190 void __user *useraddr) 2191 { 2192 struct ethtool_channels channels, curr = { .cmd = ETHTOOL_GCHANNELS }; 2193 u16 from_channel, to_channel; 2194 unsigned int i; 2195 int ret; 2196 2197 if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels) 2198 return -EOPNOTSUPP; 2199 2200 if (copy_from_user(&channels, useraddr, sizeof(channels))) 2201 return -EFAULT; 2202 2203 dev->ethtool_ops->get_channels(dev, &curr); 2204 2205 if (channels.rx_count == curr.rx_count && 2206 channels.tx_count == curr.tx_count && 2207 channels.combined_count == curr.combined_count && 2208 channels.other_count == curr.other_count) 2209 return 0; 2210 2211 /* ensure new counts are within the maximums */ 2212 if (channels.rx_count > curr.max_rx || 2213 channels.tx_count > curr.max_tx || 2214 channels.combined_count > curr.max_combined || 2215 channels.other_count > curr.max_other) 2216 return -EINVAL; 2217 2218 /* ensure there is at least one RX and one TX channel */ 2219 if (!channels.combined_count && 2220 (!channels.rx_count || !channels.tx_count)) 2221 return -EINVAL; 2222 2223 ret = ethtool_check_max_channel(dev, channels, NULL); 2224 if (ret) 2225 return ret; 2226 2227 /* Disabling channels, query zero-copy AF_XDP sockets */ 2228 from_channel = channels.combined_count + 2229 min(channels.rx_count, channels.tx_count); 2230 to_channel = curr.combined_count + max(curr.rx_count, curr.tx_count); 2231 for (i = from_channel; i < to_channel; i++) 2232 if (xsk_get_pool_from_qid(dev, i)) 2233 return -EINVAL; 2234 2235 ret = dev->ethtool_ops->set_channels(dev, &channels); 2236 if (!ret) 2237 ethtool_notify(dev, ETHTOOL_MSG_CHANNELS_NTF, NULL); 2238 return ret; 2239 } 2240 2241 static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr) 2242 { 2243 struct ethtool_pauseparam pauseparam = { .cmd = ETHTOOL_GPAUSEPARAM }; 2244 2245 if (!dev->ethtool_ops->get_pauseparam) 2246 return -EOPNOTSUPP; 2247 2248 dev->ethtool_ops->get_pauseparam(dev, &pauseparam); 2249 2250 if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) 2251 return -EFAULT; 2252 return 0; 2253 } 2254 2255 static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr) 2256 { 2257 struct ethtool_pauseparam pauseparam; 2258 int ret; 2259 2260 if (!dev->ethtool_ops->set_pauseparam) 2261 return -EOPNOTSUPP; 2262 2263 if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) 2264 return -EFAULT; 2265 2266 ret = dev->ethtool_ops->set_pauseparam(dev, &pauseparam); 2267 if (!ret) 2268 ethtool_notify(dev, ETHTOOL_MSG_PAUSE_NTF, NULL); 2269 return ret; 2270 } 2271 2272 static int ethtool_self_test(struct net_device *dev, char __user *useraddr) 2273 { 2274 struct ethtool_test test; 2275 const struct ethtool_ops *ops = dev->ethtool_ops; 2276 u64 *data; 2277 int ret, test_len; 2278 2279 if (!ops->self_test || !ops->get_sset_count) 2280 return -EOPNOTSUPP; 2281 2282 test_len = ops->get_sset_count(dev, ETH_SS_TEST); 2283 if (test_len < 0) 2284 return test_len; 2285 WARN_ON(test_len == 0); 2286 2287 if (copy_from_user(&test, useraddr, sizeof(test))) 2288 return -EFAULT; 2289 2290 test.len = test_len; 2291 data = kcalloc(test_len, sizeof(u64), GFP_USER); 2292 if (!data) 2293 return -ENOMEM; 2294 2295 netif_testing_on(dev); 2296 ops->self_test(dev, &test, data); 2297 netif_testing_off(dev); 2298 2299 ret = -EFAULT; 2300 if (copy_to_user(useraddr, &test, sizeof(test))) 2301 goto out; 2302 useraddr += sizeof(test); 2303 if (copy_to_user(useraddr, data, array_size(test.len, sizeof(u64)))) 2304 goto out; 2305 ret = 0; 2306 2307 out: 2308 kfree(data); 2309 return ret; 2310 } 2311 2312 static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) 2313 { 2314 struct ethtool_gstrings gstrings; 2315 u8 *data; 2316 int ret; 2317 2318 if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) 2319 return -EFAULT; 2320 2321 ret = __ethtool_get_sset_count(dev, gstrings.string_set); 2322 if (ret < 0) 2323 return ret; 2324 if (ret > S32_MAX / ETH_GSTRING_LEN) 2325 return -ENOMEM; 2326 WARN_ON_ONCE(!ret); 2327 2328 gstrings.len = ret; 2329 2330 if (gstrings.len) { 2331 data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN)); 2332 if (!data) 2333 return -ENOMEM; 2334 2335 __ethtool_get_strings(dev, gstrings.string_set, data); 2336 } else { 2337 data = NULL; 2338 } 2339 2340 ret = -EFAULT; 2341 if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) 2342 goto out; 2343 useraddr += sizeof(gstrings); 2344 if (gstrings.len && 2345 copy_to_user(useraddr, data, 2346 array_size(gstrings.len, ETH_GSTRING_LEN))) 2347 goto out; 2348 ret = 0; 2349 2350 out: 2351 vfree(data); 2352 return ret; 2353 } 2354 2355 __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...) 2356 { 2357 va_list args; 2358 2359 va_start(args, fmt); 2360 vsnprintf(*data, ETH_GSTRING_LEN, fmt, args); 2361 va_end(args); 2362 2363 *data += ETH_GSTRING_LEN; 2364 } 2365 EXPORT_SYMBOL(ethtool_sprintf); 2366 2367 void ethtool_puts(u8 **data, const char *str) 2368 { 2369 strscpy(*data, str, ETH_GSTRING_LEN); 2370 *data += ETH_GSTRING_LEN; 2371 } 2372 EXPORT_SYMBOL(ethtool_puts); 2373 2374 static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) 2375 { 2376 struct ethtool_value id; 2377 static bool busy; 2378 const struct ethtool_ops *ops = dev->ethtool_ops; 2379 netdevice_tracker dev_tracker; 2380 int rc; 2381 2382 if (!ops->set_phys_id) 2383 return -EOPNOTSUPP; 2384 2385 if (busy) 2386 return -EBUSY; 2387 2388 if (copy_from_user(&id, useraddr, sizeof(id))) 2389 return -EFAULT; 2390 2391 rc = ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE); 2392 if (rc < 0) 2393 return rc; 2394 2395 /* Drop the RTNL lock while waiting, but prevent reentry or 2396 * removal of the device. 2397 */ 2398 busy = true; 2399 netdev_hold(dev, &dev_tracker, GFP_KERNEL); 2400 netdev_unlock_ops(dev); 2401 rtnl_unlock(); 2402 2403 if (rc == 0) { 2404 /* Driver will handle this itself */ 2405 schedule_timeout_interruptible( 2406 id.data ? (id.data * HZ) : MAX_SCHEDULE_TIMEOUT); 2407 } else { 2408 /* Driver expects to be called at twice the frequency in rc */ 2409 int n = rc * 2, interval = HZ / n; 2410 u64 count = mul_u32_u32(n, id.data); 2411 u64 i = 0; 2412 2413 do { 2414 rtnl_lock(); 2415 netdev_lock_ops(dev); 2416 rc = ops->set_phys_id(dev, 2417 (i++ & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON); 2418 netdev_unlock_ops(dev); 2419 rtnl_unlock(); 2420 if (rc) 2421 break; 2422 schedule_timeout_interruptible(interval); 2423 } while (!signal_pending(current) && (!id.data || i < count)); 2424 } 2425 2426 rtnl_lock(); 2427 netdev_lock_ops(dev); 2428 netdev_put(dev, &dev_tracker); 2429 busy = false; 2430 2431 (void) ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE); 2432 return rc; 2433 } 2434 2435 static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) 2436 { 2437 struct ethtool_stats stats; 2438 const struct ethtool_ops *ops = dev->ethtool_ops; 2439 u64 *data; 2440 int ret, n_stats; 2441 2442 if (!ops->get_ethtool_stats || !ops->get_sset_count) 2443 return -EOPNOTSUPP; 2444 2445 n_stats = ops->get_sset_count(dev, ETH_SS_STATS); 2446 if (n_stats < 0) 2447 return n_stats; 2448 if (n_stats > S32_MAX / sizeof(u64)) 2449 return -ENOMEM; 2450 WARN_ON_ONCE(!n_stats); 2451 if (copy_from_user(&stats, useraddr, sizeof(stats))) 2452 return -EFAULT; 2453 2454 stats.n_stats = n_stats; 2455 2456 if (n_stats) { 2457 data = vzalloc(array_size(n_stats, sizeof(u64))); 2458 if (!data) 2459 return -ENOMEM; 2460 ops->get_ethtool_stats(dev, &stats, data); 2461 } else { 2462 data = NULL; 2463 } 2464 2465 ret = -EFAULT; 2466 if (copy_to_user(useraddr, &stats, sizeof(stats))) 2467 goto out; 2468 useraddr += sizeof(stats); 2469 if (n_stats && copy_to_user(useraddr, data, array_size(n_stats, sizeof(u64)))) 2470 goto out; 2471 ret = 0; 2472 2473 out: 2474 vfree(data); 2475 return ret; 2476 } 2477 2478 static int ethtool_vzalloc_stats_array(int n_stats, u64 **data) 2479 { 2480 if (n_stats < 0) 2481 return n_stats; 2482 if (n_stats > S32_MAX / sizeof(u64)) 2483 return -ENOMEM; 2484 if (WARN_ON_ONCE(!n_stats)) 2485 return -EOPNOTSUPP; 2486 2487 *data = vzalloc(array_size(n_stats, sizeof(u64))); 2488 if (!*data) 2489 return -ENOMEM; 2490 2491 return 0; 2492 } 2493 2494 static int ethtool_get_phy_stats_phydev(struct phy_device *phydev, 2495 struct ethtool_stats *stats, 2496 u64 **data) 2497 { 2498 const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; 2499 int n_stats, ret; 2500 2501 if (!phy_ops || !phy_ops->get_sset_count || !phy_ops->get_stats) 2502 return -EOPNOTSUPP; 2503 2504 n_stats = phy_ops->get_sset_count(phydev); 2505 2506 ret = ethtool_vzalloc_stats_array(n_stats, data); 2507 if (ret) 2508 return ret; 2509 2510 stats->n_stats = n_stats; 2511 return phy_ops->get_stats(phydev, stats, *data); 2512 } 2513 2514 static int ethtool_get_phy_stats_ethtool(struct net_device *dev, 2515 struct ethtool_stats *stats, 2516 u64 **data) 2517 { 2518 const struct ethtool_ops *ops = dev->ethtool_ops; 2519 int n_stats, ret; 2520 2521 if (!ops || !ops->get_sset_count || !ops->get_ethtool_phy_stats) 2522 return -EOPNOTSUPP; 2523 2524 n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS); 2525 2526 ret = ethtool_vzalloc_stats_array(n_stats, data); 2527 if (ret) 2528 return ret; 2529 2530 stats->n_stats = n_stats; 2531 ops->get_ethtool_phy_stats(dev, stats, *data); 2532 2533 return 0; 2534 } 2535 2536 static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) 2537 { 2538 struct phy_device *phydev = dev->phydev; 2539 struct ethtool_stats stats; 2540 u64 *data = NULL; 2541 int ret = -EOPNOTSUPP; 2542 2543 if (copy_from_user(&stats, useraddr, sizeof(stats))) 2544 return -EFAULT; 2545 2546 if (phydev) 2547 ret = ethtool_get_phy_stats_phydev(phydev, &stats, &data); 2548 2549 if (ret == -EOPNOTSUPP) 2550 ret = ethtool_get_phy_stats_ethtool(dev, &stats, &data); 2551 2552 if (ret) 2553 goto out; 2554 2555 if (copy_to_user(useraddr, &stats, sizeof(stats))) { 2556 ret = -EFAULT; 2557 goto out; 2558 } 2559 2560 useraddr += sizeof(stats); 2561 if (copy_to_user(useraddr, data, array_size(stats.n_stats, sizeof(u64)))) 2562 ret = -EFAULT; 2563 2564 out: 2565 vfree(data); 2566 return ret; 2567 } 2568 2569 static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr) 2570 { 2571 struct ethtool_perm_addr epaddr; 2572 2573 if (copy_from_user(&epaddr, useraddr, sizeof(epaddr))) 2574 return -EFAULT; 2575 2576 if (epaddr.size < dev->addr_len) 2577 return -ETOOSMALL; 2578 epaddr.size = dev->addr_len; 2579 2580 if (copy_to_user(useraddr, &epaddr, sizeof(epaddr))) 2581 return -EFAULT; 2582 useraddr += sizeof(epaddr); 2583 if (copy_to_user(useraddr, dev->perm_addr, epaddr.size)) 2584 return -EFAULT; 2585 return 0; 2586 } 2587 2588 static int ethtool_get_value(struct net_device *dev, char __user *useraddr, 2589 u32 cmd, u32 (*actor)(struct net_device *)) 2590 { 2591 struct ethtool_value edata = { .cmd = cmd }; 2592 2593 if (!actor) 2594 return -EOPNOTSUPP; 2595 2596 edata.data = actor(dev); 2597 2598 if (copy_to_user(useraddr, &edata, sizeof(edata))) 2599 return -EFAULT; 2600 return 0; 2601 } 2602 2603 static int ethtool_set_value_void(struct net_device *dev, char __user *useraddr, 2604 void (*actor)(struct net_device *, u32)) 2605 { 2606 struct ethtool_value edata; 2607 2608 if (!actor) 2609 return -EOPNOTSUPP; 2610 2611 if (copy_from_user(&edata, useraddr, sizeof(edata))) 2612 return -EFAULT; 2613 2614 actor(dev, edata.data); 2615 return 0; 2616 } 2617 2618 static int ethtool_set_value(struct net_device *dev, char __user *useraddr, 2619 int (*actor)(struct net_device *, u32)) 2620 { 2621 struct ethtool_value edata; 2622 2623 if (!actor) 2624 return -EOPNOTSUPP; 2625 2626 if (copy_from_user(&edata, useraddr, sizeof(edata))) 2627 return -EFAULT; 2628 2629 return actor(dev, edata.data); 2630 } 2631 2632 static int 2633 ethtool_flash_device(struct net_device *dev, struct ethtool_devlink_compat *req) 2634 { 2635 if (!dev->ethtool_ops->flash_device) { 2636 req->devlink = netdev_to_devlink_get(dev); 2637 return 0; 2638 } 2639 2640 return dev->ethtool_ops->flash_device(dev, &req->efl); 2641 } 2642 2643 static int ethtool_set_dump(struct net_device *dev, 2644 void __user *useraddr) 2645 { 2646 struct ethtool_dump dump; 2647 2648 if (!dev->ethtool_ops->set_dump) 2649 return -EOPNOTSUPP; 2650 2651 if (copy_from_user(&dump, useraddr, sizeof(dump))) 2652 return -EFAULT; 2653 2654 return dev->ethtool_ops->set_dump(dev, &dump); 2655 } 2656 2657 static int ethtool_get_dump_flag(struct net_device *dev, 2658 void __user *useraddr) 2659 { 2660 int ret; 2661 struct ethtool_dump dump; 2662 const struct ethtool_ops *ops = dev->ethtool_ops; 2663 2664 if (!ops->get_dump_flag) 2665 return -EOPNOTSUPP; 2666 2667 if (copy_from_user(&dump, useraddr, sizeof(dump))) 2668 return -EFAULT; 2669 2670 ret = ops->get_dump_flag(dev, &dump); 2671 if (ret) 2672 return ret; 2673 2674 if (copy_to_user(useraddr, &dump, sizeof(dump))) 2675 return -EFAULT; 2676 return 0; 2677 } 2678 2679 static int ethtool_get_dump_data(struct net_device *dev, 2680 void __user *useraddr) 2681 { 2682 int ret; 2683 __u32 len; 2684 struct ethtool_dump dump, tmp; 2685 const struct ethtool_ops *ops = dev->ethtool_ops; 2686 void *data = NULL; 2687 2688 if (!ops->get_dump_data || !ops->get_dump_flag) 2689 return -EOPNOTSUPP; 2690 2691 if (copy_from_user(&dump, useraddr, sizeof(dump))) 2692 return -EFAULT; 2693 2694 memset(&tmp, 0, sizeof(tmp)); 2695 tmp.cmd = ETHTOOL_GET_DUMP_FLAG; 2696 ret = ops->get_dump_flag(dev, &tmp); 2697 if (ret) 2698 return ret; 2699 2700 len = min(tmp.len, dump.len); 2701 if (!len) 2702 return -EFAULT; 2703 2704 /* Don't ever let the driver think there's more space available 2705 * than it requested with .get_dump_flag(). 2706 */ 2707 dump.len = len; 2708 2709 /* Always allocate enough space to hold the whole thing so that the 2710 * driver does not need to check the length and bother with partial 2711 * dumping. 2712 */ 2713 data = vzalloc(tmp.len); 2714 if (!data) 2715 return -ENOMEM; 2716 ret = ops->get_dump_data(dev, &dump, data); 2717 if (ret) 2718 goto out; 2719 2720 /* There are two sane possibilities: 2721 * 1. The driver's .get_dump_data() does not touch dump.len. 2722 * 2. Or it may set dump.len to how much it really writes, which 2723 * should be tmp.len (or len if it can do a partial dump). 2724 * In any case respond to userspace with the actual length of data 2725 * it's receiving. 2726 */ 2727 WARN_ON(dump.len != len && dump.len != tmp.len); 2728 dump.len = len; 2729 2730 if (copy_to_user(useraddr, &dump, sizeof(dump))) { 2731 ret = -EFAULT; 2732 goto out; 2733 } 2734 useraddr += offsetof(struct ethtool_dump, data); 2735 if (copy_to_user(useraddr, data, len)) 2736 ret = -EFAULT; 2737 out: 2738 vfree(data); 2739 return ret; 2740 } 2741 2742 static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr) 2743 { 2744 struct kernel_ethtool_ts_info kernel_info; 2745 struct ethtool_ts_info info = {}; 2746 int err; 2747 2748 err = __ethtool_get_ts_info(dev, &kernel_info); 2749 if (err) 2750 return err; 2751 2752 info.cmd = kernel_info.cmd; 2753 info.so_timestamping = kernel_info.so_timestamping; 2754 info.phc_index = kernel_info.phc_index; 2755 info.tx_types = kernel_info.tx_types; 2756 info.rx_filters = kernel_info.rx_filters; 2757 2758 if (copy_to_user(useraddr, &info, sizeof(info))) 2759 return -EFAULT; 2760 2761 return 0; 2762 } 2763 2764 int ethtool_get_module_info_call(struct net_device *dev, 2765 struct ethtool_modinfo *modinfo) 2766 { 2767 const struct ethtool_ops *ops = dev->ethtool_ops; 2768 struct phy_device *phydev = dev->phydev; 2769 2770 if (dev->ethtool->module_fw_flash_in_progress) 2771 return -EBUSY; 2772 2773 if (dev->sfp_bus) 2774 return sfp_get_module_info(dev->sfp_bus, modinfo); 2775 2776 if (phydev && phydev->drv && phydev->drv->module_info) 2777 return phydev->drv->module_info(phydev, modinfo); 2778 2779 if (ops->get_module_info) 2780 return ops->get_module_info(dev, modinfo); 2781 2782 return -EOPNOTSUPP; 2783 } 2784 2785 static int ethtool_get_module_info(struct net_device *dev, 2786 void __user *useraddr) 2787 { 2788 int ret; 2789 struct ethtool_modinfo modinfo; 2790 2791 if (copy_from_user(&modinfo, useraddr, sizeof(modinfo))) 2792 return -EFAULT; 2793 2794 ret = ethtool_get_module_info_call(dev, &modinfo); 2795 if (ret) 2796 return ret; 2797 2798 if (copy_to_user(useraddr, &modinfo, sizeof(modinfo))) 2799 return -EFAULT; 2800 2801 return 0; 2802 } 2803 2804 int ethtool_get_module_eeprom_call(struct net_device *dev, 2805 struct ethtool_eeprom *ee, u8 *data) 2806 { 2807 const struct ethtool_ops *ops = dev->ethtool_ops; 2808 struct phy_device *phydev = dev->phydev; 2809 2810 if (dev->ethtool->module_fw_flash_in_progress) 2811 return -EBUSY; 2812 2813 if (dev->sfp_bus) 2814 return sfp_get_module_eeprom(dev->sfp_bus, ee, data); 2815 2816 if (phydev && phydev->drv && phydev->drv->module_eeprom) 2817 return phydev->drv->module_eeprom(phydev, ee, data); 2818 2819 if (ops->get_module_eeprom) 2820 return ops->get_module_eeprom(dev, ee, data); 2821 2822 return -EOPNOTSUPP; 2823 } 2824 2825 static int ethtool_get_module_eeprom(struct net_device *dev, 2826 void __user *useraddr) 2827 { 2828 int ret; 2829 struct ethtool_modinfo modinfo; 2830 2831 ret = ethtool_get_module_info_call(dev, &modinfo); 2832 if (ret) 2833 return ret; 2834 2835 return ethtool_get_any_eeprom(dev, useraddr, 2836 ethtool_get_module_eeprom_call, 2837 modinfo.eeprom_len); 2838 } 2839 2840 static int ethtool_tunable_valid(const struct ethtool_tunable *tuna) 2841 { 2842 switch (tuna->id) { 2843 case ETHTOOL_RX_COPYBREAK: 2844 case ETHTOOL_TX_COPYBREAK: 2845 case ETHTOOL_TX_COPYBREAK_BUF_SIZE: 2846 if (tuna->len != sizeof(u32) || 2847 tuna->type_id != ETHTOOL_TUNABLE_U32) 2848 return -EINVAL; 2849 break; 2850 case ETHTOOL_PFC_PREVENTION_TOUT: 2851 if (tuna->len != sizeof(u16) || 2852 tuna->type_id != ETHTOOL_TUNABLE_U16) 2853 return -EINVAL; 2854 break; 2855 default: 2856 return -EINVAL; 2857 } 2858 2859 return 0; 2860 } 2861 2862 static int ethtool_get_tunable(struct net_device *dev, void __user *useraddr) 2863 { 2864 int ret; 2865 struct ethtool_tunable tuna; 2866 const struct ethtool_ops *ops = dev->ethtool_ops; 2867 void *data; 2868 2869 if (!ops->get_tunable) 2870 return -EOPNOTSUPP; 2871 if (copy_from_user(&tuna, useraddr, sizeof(tuna))) 2872 return -EFAULT; 2873 ret = ethtool_tunable_valid(&tuna); 2874 if (ret) 2875 return ret; 2876 data = kzalloc(tuna.len, GFP_USER); 2877 if (!data) 2878 return -ENOMEM; 2879 ret = ops->get_tunable(dev, &tuna, data); 2880 if (ret) 2881 goto out; 2882 useraddr += sizeof(tuna); 2883 ret = -EFAULT; 2884 if (copy_to_user(useraddr, data, tuna.len)) 2885 goto out; 2886 ret = 0; 2887 2888 out: 2889 kfree(data); 2890 return ret; 2891 } 2892 2893 static int ethtool_set_tunable(struct net_device *dev, void __user *useraddr) 2894 { 2895 int ret; 2896 struct ethtool_tunable tuna; 2897 const struct ethtool_ops *ops = dev->ethtool_ops; 2898 void *data; 2899 2900 if (!ops->set_tunable) 2901 return -EOPNOTSUPP; 2902 if (copy_from_user(&tuna, useraddr, sizeof(tuna))) 2903 return -EFAULT; 2904 ret = ethtool_tunable_valid(&tuna); 2905 if (ret) 2906 return ret; 2907 useraddr += sizeof(tuna); 2908 data = memdup_user(useraddr, tuna.len); 2909 if (IS_ERR(data)) 2910 return PTR_ERR(data); 2911 ret = ops->set_tunable(dev, &tuna, data); 2912 2913 kfree(data); 2914 return ret; 2915 } 2916 2917 static noinline_for_stack int 2918 ethtool_get_per_queue_coalesce(struct net_device *dev, 2919 void __user *useraddr, 2920 struct ethtool_per_queue_op *per_queue_opt) 2921 { 2922 u32 bit; 2923 int ret; 2924 DECLARE_BITMAP(queue_mask, MAX_NUM_QUEUE); 2925 2926 if (!dev->ethtool_ops->get_per_queue_coalesce) 2927 return -EOPNOTSUPP; 2928 2929 useraddr += sizeof(*per_queue_opt); 2930 2931 bitmap_from_arr32(queue_mask, per_queue_opt->queue_mask, 2932 MAX_NUM_QUEUE); 2933 2934 for_each_set_bit(bit, queue_mask, MAX_NUM_QUEUE) { 2935 struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; 2936 2937 ret = dev->ethtool_ops->get_per_queue_coalesce(dev, bit, &coalesce); 2938 if (ret != 0) 2939 return ret; 2940 if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) 2941 return -EFAULT; 2942 useraddr += sizeof(coalesce); 2943 } 2944 2945 return 0; 2946 } 2947 2948 static noinline_for_stack int 2949 ethtool_set_per_queue_coalesce(struct net_device *dev, 2950 void __user *useraddr, 2951 struct ethtool_per_queue_op *per_queue_opt) 2952 { 2953 u32 bit; 2954 int i, ret = 0; 2955 int n_queue; 2956 struct ethtool_coalesce *backup = NULL, *tmp = NULL; 2957 DECLARE_BITMAP(queue_mask, MAX_NUM_QUEUE); 2958 2959 if ((!dev->ethtool_ops->set_per_queue_coalesce) || 2960 (!dev->ethtool_ops->get_per_queue_coalesce)) 2961 return -EOPNOTSUPP; 2962 2963 useraddr += sizeof(*per_queue_opt); 2964 2965 bitmap_from_arr32(queue_mask, per_queue_opt->queue_mask, MAX_NUM_QUEUE); 2966 n_queue = bitmap_weight(queue_mask, MAX_NUM_QUEUE); 2967 tmp = backup = kmalloc_array(n_queue, sizeof(*backup), GFP_KERNEL); 2968 if (!backup) 2969 return -ENOMEM; 2970 2971 for_each_set_bit(bit, queue_mask, MAX_NUM_QUEUE) { 2972 struct ethtool_coalesce coalesce; 2973 2974 ret = dev->ethtool_ops->get_per_queue_coalesce(dev, bit, tmp); 2975 if (ret != 0) 2976 goto roll_back; 2977 2978 tmp++; 2979 2980 if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) { 2981 ret = -EFAULT; 2982 goto roll_back; 2983 } 2984 2985 if (!ethtool_set_coalesce_supported(dev, &coalesce)) { 2986 ret = -EOPNOTSUPP; 2987 goto roll_back; 2988 } 2989 2990 ret = dev->ethtool_ops->set_per_queue_coalesce(dev, bit, &coalesce); 2991 if (ret != 0) 2992 goto roll_back; 2993 2994 useraddr += sizeof(coalesce); 2995 } 2996 2997 roll_back: 2998 if (ret != 0) { 2999 tmp = backup; 3000 for_each_set_bit(i, queue_mask, bit) { 3001 dev->ethtool_ops->set_per_queue_coalesce(dev, i, tmp); 3002 tmp++; 3003 } 3004 } 3005 kfree(backup); 3006 3007 return ret; 3008 } 3009 3010 static int noinline_for_stack ethtool_set_per_queue(struct net_device *dev, 3011 void __user *useraddr, u32 sub_cmd) 3012 { 3013 struct ethtool_per_queue_op per_queue_opt; 3014 3015 if (copy_from_user(&per_queue_opt, useraddr, sizeof(per_queue_opt))) 3016 return -EFAULT; 3017 3018 if (per_queue_opt.sub_command != sub_cmd) 3019 return -EINVAL; 3020 3021 switch (per_queue_opt.sub_command) { 3022 case ETHTOOL_GCOALESCE: 3023 return ethtool_get_per_queue_coalesce(dev, useraddr, &per_queue_opt); 3024 case ETHTOOL_SCOALESCE: 3025 return ethtool_set_per_queue_coalesce(dev, useraddr, &per_queue_opt); 3026 default: 3027 return -EOPNOTSUPP; 3028 } 3029 } 3030 3031 static int ethtool_phy_tunable_valid(const struct ethtool_tunable *tuna) 3032 { 3033 switch (tuna->id) { 3034 case ETHTOOL_PHY_DOWNSHIFT: 3035 case ETHTOOL_PHY_FAST_LINK_DOWN: 3036 if (tuna->len != sizeof(u8) || 3037 tuna->type_id != ETHTOOL_TUNABLE_U8) 3038 return -EINVAL; 3039 break; 3040 case ETHTOOL_PHY_EDPD: 3041 if (tuna->len != sizeof(u16) || 3042 tuna->type_id != ETHTOOL_TUNABLE_U16) 3043 return -EINVAL; 3044 break; 3045 default: 3046 return -EINVAL; 3047 } 3048 3049 return 0; 3050 } 3051 3052 static int get_phy_tunable(struct net_device *dev, void __user *useraddr) 3053 { 3054 struct phy_device *phydev = dev->phydev; 3055 struct ethtool_tunable tuna; 3056 bool phy_drv_tunable; 3057 void *data; 3058 int ret; 3059 3060 phy_drv_tunable = phydev && phydev->drv && phydev->drv->get_tunable; 3061 if (!phy_drv_tunable && !dev->ethtool_ops->get_phy_tunable) 3062 return -EOPNOTSUPP; 3063 if (copy_from_user(&tuna, useraddr, sizeof(tuna))) 3064 return -EFAULT; 3065 ret = ethtool_phy_tunable_valid(&tuna); 3066 if (ret) 3067 return ret; 3068 data = kzalloc(tuna.len, GFP_USER); 3069 if (!data) 3070 return -ENOMEM; 3071 if (phy_drv_tunable) { 3072 mutex_lock(&phydev->lock); 3073 ret = phydev->drv->get_tunable(phydev, &tuna, data); 3074 mutex_unlock(&phydev->lock); 3075 } else { 3076 ret = dev->ethtool_ops->get_phy_tunable(dev, &tuna, data); 3077 } 3078 if (ret) 3079 goto out; 3080 useraddr += sizeof(tuna); 3081 ret = -EFAULT; 3082 if (copy_to_user(useraddr, data, tuna.len)) 3083 goto out; 3084 ret = 0; 3085 3086 out: 3087 kfree(data); 3088 return ret; 3089 } 3090 3091 static int set_phy_tunable(struct net_device *dev, void __user *useraddr) 3092 { 3093 struct phy_device *phydev = dev->phydev; 3094 struct ethtool_tunable tuna; 3095 bool phy_drv_tunable; 3096 void *data; 3097 int ret; 3098 3099 phy_drv_tunable = phydev && phydev->drv && phydev->drv->get_tunable; 3100 if (!phy_drv_tunable && !dev->ethtool_ops->set_phy_tunable) 3101 return -EOPNOTSUPP; 3102 if (copy_from_user(&tuna, useraddr, sizeof(tuna))) 3103 return -EFAULT; 3104 ret = ethtool_phy_tunable_valid(&tuna); 3105 if (ret) 3106 return ret; 3107 useraddr += sizeof(tuna); 3108 data = memdup_user(useraddr, tuna.len); 3109 if (IS_ERR(data)) 3110 return PTR_ERR(data); 3111 if (phy_drv_tunable) { 3112 mutex_lock(&phydev->lock); 3113 ret = phydev->drv->set_tunable(phydev, &tuna, data); 3114 mutex_unlock(&phydev->lock); 3115 } else { 3116 ret = dev->ethtool_ops->set_phy_tunable(dev, &tuna, data); 3117 } 3118 3119 kfree(data); 3120 return ret; 3121 } 3122 3123 static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr) 3124 { 3125 struct ethtool_fecparam fecparam = { .cmd = ETHTOOL_GFECPARAM }; 3126 int rc; 3127 3128 if (!dev->ethtool_ops->get_fecparam) 3129 return -EOPNOTSUPP; 3130 3131 rc = dev->ethtool_ops->get_fecparam(dev, &fecparam); 3132 if (rc) 3133 return rc; 3134 3135 if (WARN_ON_ONCE(fecparam.reserved)) 3136 fecparam.reserved = 0; 3137 3138 if (copy_to_user(useraddr, &fecparam, sizeof(fecparam))) 3139 return -EFAULT; 3140 return 0; 3141 } 3142 3143 static int ethtool_set_fecparam(struct net_device *dev, void __user *useraddr) 3144 { 3145 struct ethtool_fecparam fecparam; 3146 3147 if (!dev->ethtool_ops->set_fecparam) 3148 return -EOPNOTSUPP; 3149 3150 if (copy_from_user(&fecparam, useraddr, sizeof(fecparam))) 3151 return -EFAULT; 3152 3153 if (!fecparam.fec || fecparam.fec & ETHTOOL_FEC_NONE) 3154 return -EINVAL; 3155 3156 fecparam.active_fec = 0; 3157 fecparam.reserved = 0; 3158 3159 return dev->ethtool_ops->set_fecparam(dev, &fecparam); 3160 } 3161 3162 /* The main entry point in this file. Called from net/core/dev_ioctl.c */ 3163 3164 static int 3165 __dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr, 3166 u32 ethcmd, struct ethtool_devlink_compat *devlink_state) 3167 { 3168 struct net_device *dev; 3169 u32 sub_cmd; 3170 int rc; 3171 netdev_features_t old_features; 3172 3173 dev = __dev_get_by_name(net, ifr->ifr_name); 3174 if (!dev) 3175 return -ENODEV; 3176 3177 if (ethcmd == ETHTOOL_PERQUEUE) { 3178 if (copy_from_user(&sub_cmd, useraddr + sizeof(ethcmd), sizeof(sub_cmd))) 3179 return -EFAULT; 3180 } else { 3181 sub_cmd = ethcmd; 3182 } 3183 /* Allow some commands to be done by anyone */ 3184 switch (sub_cmd) { 3185 case ETHTOOL_GSET: 3186 case ETHTOOL_GDRVINFO: 3187 case ETHTOOL_GMSGLVL: 3188 case ETHTOOL_GLINK: 3189 case ETHTOOL_GCOALESCE: 3190 case ETHTOOL_GRINGPARAM: 3191 case ETHTOOL_GPAUSEPARAM: 3192 case ETHTOOL_GRXCSUM: 3193 case ETHTOOL_GTXCSUM: 3194 case ETHTOOL_GSG: 3195 case ETHTOOL_GSSET_INFO: 3196 case ETHTOOL_GSTRINGS: 3197 case ETHTOOL_GSTATS: 3198 case ETHTOOL_GPHYSTATS: 3199 case ETHTOOL_GTSO: 3200 case ETHTOOL_GPERMADDR: 3201 case ETHTOOL_GUFO: 3202 case ETHTOOL_GGSO: 3203 case ETHTOOL_GGRO: 3204 case ETHTOOL_GFLAGS: 3205 case ETHTOOL_GPFLAGS: 3206 case ETHTOOL_GRXFH: 3207 case ETHTOOL_GRXRINGS: 3208 case ETHTOOL_GRXCLSRLCNT: 3209 case ETHTOOL_GRXCLSRULE: 3210 case ETHTOOL_GRXCLSRLALL: 3211 case ETHTOOL_GRXFHINDIR: 3212 case ETHTOOL_GRSSH: 3213 case ETHTOOL_GFEATURES: 3214 case ETHTOOL_GCHANNELS: 3215 case ETHTOOL_GET_TS_INFO: 3216 case ETHTOOL_GEEE: 3217 case ETHTOOL_GTUNABLE: 3218 case ETHTOOL_PHY_GTUNABLE: 3219 case ETHTOOL_GLINKSETTINGS: 3220 case ETHTOOL_GFECPARAM: 3221 break; 3222 default: 3223 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 3224 return -EPERM; 3225 } 3226 3227 netdev_lock_ops(dev); 3228 if (dev->dev.parent) 3229 pm_runtime_get_sync(dev->dev.parent); 3230 3231 if (!netif_device_present(dev)) { 3232 rc = -ENODEV; 3233 goto out; 3234 } 3235 3236 if (dev->ethtool_ops->begin) { 3237 rc = dev->ethtool_ops->begin(dev); 3238 if (rc < 0) 3239 goto out; 3240 } 3241 old_features = dev->features; 3242 3243 switch (ethcmd) { 3244 case ETHTOOL_GSET: 3245 rc = ethtool_get_settings(dev, useraddr); 3246 break; 3247 case ETHTOOL_SSET: 3248 rc = ethtool_set_settings(dev, useraddr); 3249 break; 3250 case ETHTOOL_GDRVINFO: 3251 rc = ethtool_get_drvinfo(dev, devlink_state); 3252 break; 3253 case ETHTOOL_GREGS: 3254 rc = ethtool_get_regs(dev, useraddr); 3255 break; 3256 case ETHTOOL_GWOL: 3257 rc = ethtool_get_wol(dev, useraddr); 3258 break; 3259 case ETHTOOL_SWOL: 3260 rc = ethtool_set_wol(dev, useraddr); 3261 break; 3262 case ETHTOOL_GMSGLVL: 3263 rc = ethtool_get_value(dev, useraddr, ethcmd, 3264 dev->ethtool_ops->get_msglevel); 3265 break; 3266 case ETHTOOL_SMSGLVL: 3267 rc = ethtool_set_value_void(dev, useraddr, 3268 dev->ethtool_ops->set_msglevel); 3269 if (!rc) 3270 ethtool_notify(dev, ETHTOOL_MSG_DEBUG_NTF, NULL); 3271 break; 3272 case ETHTOOL_GEEE: 3273 rc = ethtool_get_eee(dev, useraddr); 3274 break; 3275 case ETHTOOL_SEEE: 3276 rc = ethtool_set_eee(dev, useraddr); 3277 break; 3278 case ETHTOOL_NWAY_RST: 3279 rc = ethtool_nway_reset(dev); 3280 break; 3281 case ETHTOOL_GLINK: 3282 rc = ethtool_get_link(dev, useraddr); 3283 break; 3284 case ETHTOOL_GEEPROM: 3285 rc = ethtool_get_eeprom(dev, useraddr); 3286 break; 3287 case ETHTOOL_SEEPROM: 3288 rc = ethtool_set_eeprom(dev, useraddr); 3289 break; 3290 case ETHTOOL_GCOALESCE: 3291 rc = ethtool_get_coalesce(dev, useraddr); 3292 break; 3293 case ETHTOOL_SCOALESCE: 3294 rc = ethtool_set_coalesce(dev, useraddr); 3295 break; 3296 case ETHTOOL_GRINGPARAM: 3297 rc = ethtool_get_ringparam(dev, useraddr); 3298 break; 3299 case ETHTOOL_SRINGPARAM: 3300 rc = ethtool_set_ringparam(dev, useraddr); 3301 break; 3302 case ETHTOOL_GPAUSEPARAM: 3303 rc = ethtool_get_pauseparam(dev, useraddr); 3304 break; 3305 case ETHTOOL_SPAUSEPARAM: 3306 rc = ethtool_set_pauseparam(dev, useraddr); 3307 break; 3308 case ETHTOOL_TEST: 3309 rc = ethtool_self_test(dev, useraddr); 3310 break; 3311 case ETHTOOL_GSTRINGS: 3312 rc = ethtool_get_strings(dev, useraddr); 3313 break; 3314 case ETHTOOL_PHYS_ID: 3315 rc = ethtool_phys_id(dev, useraddr); 3316 break; 3317 case ETHTOOL_GSTATS: 3318 rc = ethtool_get_stats(dev, useraddr); 3319 break; 3320 case ETHTOOL_GPERMADDR: 3321 rc = ethtool_get_perm_addr(dev, useraddr); 3322 break; 3323 case ETHTOOL_GFLAGS: 3324 rc = ethtool_get_value(dev, useraddr, ethcmd, 3325 __ethtool_get_flags); 3326 break; 3327 case ETHTOOL_SFLAGS: 3328 rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags); 3329 break; 3330 case ETHTOOL_GPFLAGS: 3331 rc = ethtool_get_value(dev, useraddr, ethcmd, 3332 dev->ethtool_ops->get_priv_flags); 3333 if (!rc) 3334 ethtool_notify(dev, ETHTOOL_MSG_PRIVFLAGS_NTF, NULL); 3335 break; 3336 case ETHTOOL_SPFLAGS: 3337 rc = ethtool_set_value(dev, useraddr, 3338 dev->ethtool_ops->set_priv_flags); 3339 break; 3340 case ETHTOOL_GRXFH: 3341 case ETHTOOL_GRXRINGS: 3342 case ETHTOOL_GRXCLSRLCNT: 3343 case ETHTOOL_GRXCLSRULE: 3344 case ETHTOOL_GRXCLSRLALL: 3345 rc = ethtool_get_rxnfc(dev, ethcmd, useraddr); 3346 break; 3347 case ETHTOOL_SRXFH: 3348 case ETHTOOL_SRXCLSRLDEL: 3349 case ETHTOOL_SRXCLSRLINS: 3350 rc = ethtool_set_rxnfc(dev, ethcmd, useraddr); 3351 break; 3352 case ETHTOOL_FLASHDEV: 3353 rc = ethtool_flash_device(dev, devlink_state); 3354 break; 3355 case ETHTOOL_RESET: 3356 rc = ethtool_reset(dev, useraddr); 3357 break; 3358 case ETHTOOL_GSSET_INFO: 3359 rc = ethtool_get_sset_info(dev, useraddr); 3360 break; 3361 case ETHTOOL_GRXFHINDIR: 3362 rc = ethtool_get_rxfh_indir(dev, useraddr); 3363 break; 3364 case ETHTOOL_SRXFHINDIR: 3365 rc = ethtool_set_rxfh_indir(dev, useraddr); 3366 break; 3367 case ETHTOOL_GRSSH: 3368 rc = ethtool_get_rxfh(dev, useraddr); 3369 break; 3370 case ETHTOOL_SRSSH: 3371 rc = ethtool_set_rxfh(dev, useraddr); 3372 break; 3373 case ETHTOOL_GFEATURES: 3374 rc = ethtool_get_features(dev, useraddr); 3375 break; 3376 case ETHTOOL_SFEATURES: 3377 rc = ethtool_set_features(dev, useraddr); 3378 break; 3379 case ETHTOOL_GTXCSUM: 3380 case ETHTOOL_GRXCSUM: 3381 case ETHTOOL_GSG: 3382 case ETHTOOL_GTSO: 3383 case ETHTOOL_GGSO: 3384 case ETHTOOL_GGRO: 3385 rc = ethtool_get_one_feature(dev, useraddr, ethcmd); 3386 break; 3387 case ETHTOOL_STXCSUM: 3388 case ETHTOOL_SRXCSUM: 3389 case ETHTOOL_SSG: 3390 case ETHTOOL_STSO: 3391 case ETHTOOL_SGSO: 3392 case ETHTOOL_SGRO: 3393 rc = ethtool_set_one_feature(dev, useraddr, ethcmd); 3394 break; 3395 case ETHTOOL_GCHANNELS: 3396 rc = ethtool_get_channels(dev, useraddr); 3397 break; 3398 case ETHTOOL_SCHANNELS: 3399 rc = ethtool_set_channels(dev, useraddr); 3400 break; 3401 case ETHTOOL_SET_DUMP: 3402 rc = ethtool_set_dump(dev, useraddr); 3403 break; 3404 case ETHTOOL_GET_DUMP_FLAG: 3405 rc = ethtool_get_dump_flag(dev, useraddr); 3406 break; 3407 case ETHTOOL_GET_DUMP_DATA: 3408 rc = ethtool_get_dump_data(dev, useraddr); 3409 break; 3410 case ETHTOOL_GET_TS_INFO: 3411 rc = ethtool_get_ts_info(dev, useraddr); 3412 break; 3413 case ETHTOOL_GMODULEINFO: 3414 rc = ethtool_get_module_info(dev, useraddr); 3415 break; 3416 case ETHTOOL_GMODULEEEPROM: 3417 rc = ethtool_get_module_eeprom(dev, useraddr); 3418 break; 3419 case ETHTOOL_GTUNABLE: 3420 rc = ethtool_get_tunable(dev, useraddr); 3421 break; 3422 case ETHTOOL_STUNABLE: 3423 rc = ethtool_set_tunable(dev, useraddr); 3424 break; 3425 case ETHTOOL_GPHYSTATS: 3426 rc = ethtool_get_phy_stats(dev, useraddr); 3427 break; 3428 case ETHTOOL_PERQUEUE: 3429 rc = ethtool_set_per_queue(dev, useraddr, sub_cmd); 3430 break; 3431 case ETHTOOL_GLINKSETTINGS: 3432 rc = ethtool_get_link_ksettings(dev, useraddr); 3433 break; 3434 case ETHTOOL_SLINKSETTINGS: 3435 rc = ethtool_set_link_ksettings(dev, useraddr); 3436 break; 3437 case ETHTOOL_PHY_GTUNABLE: 3438 rc = get_phy_tunable(dev, useraddr); 3439 break; 3440 case ETHTOOL_PHY_STUNABLE: 3441 rc = set_phy_tunable(dev, useraddr); 3442 break; 3443 case ETHTOOL_GFECPARAM: 3444 rc = ethtool_get_fecparam(dev, useraddr); 3445 break; 3446 case ETHTOOL_SFECPARAM: 3447 rc = ethtool_set_fecparam(dev, useraddr); 3448 break; 3449 default: 3450 rc = -EOPNOTSUPP; 3451 } 3452 3453 if (dev->ethtool_ops->complete) 3454 dev->ethtool_ops->complete(dev); 3455 3456 if (old_features != dev->features) 3457 netdev_features_change(dev); 3458 out: 3459 if (dev->dev.parent) 3460 pm_runtime_put(dev->dev.parent); 3461 netdev_unlock_ops(dev); 3462 3463 return rc; 3464 } 3465 3466 int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr) 3467 { 3468 struct ethtool_devlink_compat *state; 3469 u32 ethcmd; 3470 int rc; 3471 3472 if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd))) 3473 return -EFAULT; 3474 3475 state = kzalloc(sizeof(*state), GFP_KERNEL); 3476 if (!state) 3477 return -ENOMEM; 3478 3479 switch (ethcmd) { 3480 case ETHTOOL_FLASHDEV: 3481 if (copy_from_user(&state->efl, useraddr, sizeof(state->efl))) { 3482 rc = -EFAULT; 3483 goto exit_free; 3484 } 3485 state->efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0; 3486 break; 3487 } 3488 3489 rtnl_lock(); 3490 rc = __dev_ethtool(net, ifr, useraddr, ethcmd, state); 3491 rtnl_unlock(); 3492 if (rc) 3493 goto exit_free; 3494 3495 switch (ethcmd) { 3496 case ETHTOOL_FLASHDEV: 3497 if (state->devlink) 3498 rc = devlink_compat_flash_update(state->devlink, 3499 state->efl.data); 3500 break; 3501 case ETHTOOL_GDRVINFO: 3502 if (state->devlink) 3503 devlink_compat_running_version(state->devlink, 3504 state->info.fw_version, 3505 sizeof(state->info.fw_version)); 3506 if (copy_to_user(useraddr, &state->info, sizeof(state->info))) { 3507 rc = -EFAULT; 3508 goto exit_free; 3509 } 3510 break; 3511 } 3512 3513 exit_free: 3514 if (state->devlink) 3515 devlink_put(state->devlink); 3516 kfree(state); 3517 return rc; 3518 } 3519 3520 struct ethtool_rx_flow_key { 3521 struct flow_dissector_key_basic basic; 3522 union { 3523 struct flow_dissector_key_ipv4_addrs ipv4; 3524 struct flow_dissector_key_ipv6_addrs ipv6; 3525 }; 3526 struct flow_dissector_key_ports tp; 3527 struct flow_dissector_key_ip ip; 3528 struct flow_dissector_key_vlan vlan; 3529 struct flow_dissector_key_eth_addrs eth_addrs; 3530 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ 3531 3532 struct ethtool_rx_flow_match { 3533 struct flow_dissector dissector; 3534 struct ethtool_rx_flow_key key; 3535 struct ethtool_rx_flow_key mask; 3536 }; 3537 3538 struct ethtool_rx_flow_rule * 3539 ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input) 3540 { 3541 const struct ethtool_rx_flow_spec *fs = input->fs; 3542 struct ethtool_rx_flow_match *match; 3543 struct ethtool_rx_flow_rule *flow; 3544 struct flow_action_entry *act; 3545 3546 flow = kzalloc(sizeof(struct ethtool_rx_flow_rule) + 3547 sizeof(struct ethtool_rx_flow_match), GFP_KERNEL); 3548 if (!flow) 3549 return ERR_PTR(-ENOMEM); 3550 3551 /* ethtool_rx supports only one single action per rule. */ 3552 flow->rule = flow_rule_alloc(1); 3553 if (!flow->rule) { 3554 kfree(flow); 3555 return ERR_PTR(-ENOMEM); 3556 } 3557 3558 match = (struct ethtool_rx_flow_match *)flow->priv; 3559 flow->rule->match.dissector = &match->dissector; 3560 flow->rule->match.mask = &match->mask; 3561 flow->rule->match.key = &match->key; 3562 3563 match->mask.basic.n_proto = htons(0xffff); 3564 3565 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) { 3566 case ETHER_FLOW: { 3567 const struct ethhdr *ether_spec, *ether_m_spec; 3568 3569 ether_spec = &fs->h_u.ether_spec; 3570 ether_m_spec = &fs->m_u.ether_spec; 3571 3572 if (!is_zero_ether_addr(ether_m_spec->h_source)) { 3573 ether_addr_copy(match->key.eth_addrs.src, 3574 ether_spec->h_source); 3575 ether_addr_copy(match->mask.eth_addrs.src, 3576 ether_m_spec->h_source); 3577 } 3578 if (!is_zero_ether_addr(ether_m_spec->h_dest)) { 3579 ether_addr_copy(match->key.eth_addrs.dst, 3580 ether_spec->h_dest); 3581 ether_addr_copy(match->mask.eth_addrs.dst, 3582 ether_m_spec->h_dest); 3583 } 3584 if (ether_m_spec->h_proto) { 3585 match->key.basic.n_proto = ether_spec->h_proto; 3586 match->mask.basic.n_proto = ether_m_spec->h_proto; 3587 } 3588 } 3589 break; 3590 case TCP_V4_FLOW: 3591 case UDP_V4_FLOW: { 3592 const struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec; 3593 3594 match->key.basic.n_proto = htons(ETH_P_IP); 3595 3596 v4_spec = &fs->h_u.tcp_ip4_spec; 3597 v4_m_spec = &fs->m_u.tcp_ip4_spec; 3598 3599 if (v4_m_spec->ip4src) { 3600 match->key.ipv4.src = v4_spec->ip4src; 3601 match->mask.ipv4.src = v4_m_spec->ip4src; 3602 } 3603 if (v4_m_spec->ip4dst) { 3604 match->key.ipv4.dst = v4_spec->ip4dst; 3605 match->mask.ipv4.dst = v4_m_spec->ip4dst; 3606 } 3607 if (v4_m_spec->ip4src || 3608 v4_m_spec->ip4dst) { 3609 match->dissector.used_keys |= 3610 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS); 3611 match->dissector.offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 3612 offsetof(struct ethtool_rx_flow_key, ipv4); 3613 } 3614 if (v4_m_spec->psrc) { 3615 match->key.tp.src = v4_spec->psrc; 3616 match->mask.tp.src = v4_m_spec->psrc; 3617 } 3618 if (v4_m_spec->pdst) { 3619 match->key.tp.dst = v4_spec->pdst; 3620 match->mask.tp.dst = v4_m_spec->pdst; 3621 } 3622 if (v4_m_spec->psrc || 3623 v4_m_spec->pdst) { 3624 match->dissector.used_keys |= 3625 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS); 3626 match->dissector.offset[FLOW_DISSECTOR_KEY_PORTS] = 3627 offsetof(struct ethtool_rx_flow_key, tp); 3628 } 3629 if (v4_m_spec->tos) { 3630 match->key.ip.tos = v4_spec->tos; 3631 match->mask.ip.tos = v4_m_spec->tos; 3632 match->dissector.used_keys |= 3633 BIT(FLOW_DISSECTOR_KEY_IP); 3634 match->dissector.offset[FLOW_DISSECTOR_KEY_IP] = 3635 offsetof(struct ethtool_rx_flow_key, ip); 3636 } 3637 } 3638 break; 3639 case TCP_V6_FLOW: 3640 case UDP_V6_FLOW: { 3641 const struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec; 3642 3643 match->key.basic.n_proto = htons(ETH_P_IPV6); 3644 3645 v6_spec = &fs->h_u.tcp_ip6_spec; 3646 v6_m_spec = &fs->m_u.tcp_ip6_spec; 3647 if (!ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6src)) { 3648 memcpy(&match->key.ipv6.src, v6_spec->ip6src, 3649 sizeof(match->key.ipv6.src)); 3650 memcpy(&match->mask.ipv6.src, v6_m_spec->ip6src, 3651 sizeof(match->mask.ipv6.src)); 3652 } 3653 if (!ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6dst)) { 3654 memcpy(&match->key.ipv6.dst, v6_spec->ip6dst, 3655 sizeof(match->key.ipv6.dst)); 3656 memcpy(&match->mask.ipv6.dst, v6_m_spec->ip6dst, 3657 sizeof(match->mask.ipv6.dst)); 3658 } 3659 if (!ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6src) || 3660 !ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6dst)) { 3661 match->dissector.used_keys |= 3662 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS); 3663 match->dissector.offset[FLOW_DISSECTOR_KEY_IPV6_ADDRS] = 3664 offsetof(struct ethtool_rx_flow_key, ipv6); 3665 } 3666 if (v6_m_spec->psrc) { 3667 match->key.tp.src = v6_spec->psrc; 3668 match->mask.tp.src = v6_m_spec->psrc; 3669 } 3670 if (v6_m_spec->pdst) { 3671 match->key.tp.dst = v6_spec->pdst; 3672 match->mask.tp.dst = v6_m_spec->pdst; 3673 } 3674 if (v6_m_spec->psrc || 3675 v6_m_spec->pdst) { 3676 match->dissector.used_keys |= 3677 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS); 3678 match->dissector.offset[FLOW_DISSECTOR_KEY_PORTS] = 3679 offsetof(struct ethtool_rx_flow_key, tp); 3680 } 3681 if (v6_m_spec->tclass) { 3682 match->key.ip.tos = v6_spec->tclass; 3683 match->mask.ip.tos = v6_m_spec->tclass; 3684 match->dissector.used_keys |= 3685 BIT_ULL(FLOW_DISSECTOR_KEY_IP); 3686 match->dissector.offset[FLOW_DISSECTOR_KEY_IP] = 3687 offsetof(struct ethtool_rx_flow_key, ip); 3688 } 3689 } 3690 break; 3691 default: 3692 ethtool_rx_flow_rule_destroy(flow); 3693 return ERR_PTR(-EINVAL); 3694 } 3695 3696 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) { 3697 case TCP_V4_FLOW: 3698 case TCP_V6_FLOW: 3699 match->key.basic.ip_proto = IPPROTO_TCP; 3700 match->mask.basic.ip_proto = 0xff; 3701 break; 3702 case UDP_V4_FLOW: 3703 case UDP_V6_FLOW: 3704 match->key.basic.ip_proto = IPPROTO_UDP; 3705 match->mask.basic.ip_proto = 0xff; 3706 break; 3707 } 3708 3709 match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_BASIC); 3710 match->dissector.offset[FLOW_DISSECTOR_KEY_BASIC] = 3711 offsetof(struct ethtool_rx_flow_key, basic); 3712 3713 if (fs->flow_type & FLOW_EXT) { 3714 const struct ethtool_flow_ext *ext_h_spec = &fs->h_ext; 3715 const struct ethtool_flow_ext *ext_m_spec = &fs->m_ext; 3716 3717 if (ext_m_spec->vlan_etype) { 3718 match->key.vlan.vlan_tpid = ext_h_spec->vlan_etype; 3719 match->mask.vlan.vlan_tpid = ext_m_spec->vlan_etype; 3720 } 3721 3722 if (ext_m_spec->vlan_tci) { 3723 match->key.vlan.vlan_id = 3724 ntohs(ext_h_spec->vlan_tci) & 0x0fff; 3725 match->mask.vlan.vlan_id = 3726 ntohs(ext_m_spec->vlan_tci) & 0x0fff; 3727 3728 match->key.vlan.vlan_dei = 3729 !!(ext_h_spec->vlan_tci & htons(0x1000)); 3730 match->mask.vlan.vlan_dei = 3731 !!(ext_m_spec->vlan_tci & htons(0x1000)); 3732 3733 match->key.vlan.vlan_priority = 3734 (ntohs(ext_h_spec->vlan_tci) & 0xe000) >> 13; 3735 match->mask.vlan.vlan_priority = 3736 (ntohs(ext_m_spec->vlan_tci) & 0xe000) >> 13; 3737 } 3738 3739 if (ext_m_spec->vlan_etype || 3740 ext_m_spec->vlan_tci) { 3741 match->dissector.used_keys |= 3742 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN); 3743 match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] = 3744 offsetof(struct ethtool_rx_flow_key, vlan); 3745 } 3746 } 3747 if (fs->flow_type & FLOW_MAC_EXT) { 3748 const struct ethtool_flow_ext *ext_h_spec = &fs->h_ext; 3749 const struct ethtool_flow_ext *ext_m_spec = &fs->m_ext; 3750 3751 memcpy(match->key.eth_addrs.dst, ext_h_spec->h_dest, 3752 ETH_ALEN); 3753 memcpy(match->mask.eth_addrs.dst, ext_m_spec->h_dest, 3754 ETH_ALEN); 3755 3756 match->dissector.used_keys |= 3757 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS); 3758 match->dissector.offset[FLOW_DISSECTOR_KEY_ETH_ADDRS] = 3759 offsetof(struct ethtool_rx_flow_key, eth_addrs); 3760 } 3761 3762 act = &flow->rule->action.entries[0]; 3763 switch (fs->ring_cookie) { 3764 case RX_CLS_FLOW_DISC: 3765 act->id = FLOW_ACTION_DROP; 3766 break; 3767 case RX_CLS_FLOW_WAKE: 3768 act->id = FLOW_ACTION_WAKE; 3769 break; 3770 default: 3771 act->id = FLOW_ACTION_QUEUE; 3772 if (fs->flow_type & FLOW_RSS) 3773 act->queue.ctx = input->rss_ctx; 3774 3775 act->queue.vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 3776 act->queue.index = ethtool_get_flow_spec_ring(fs->ring_cookie); 3777 break; 3778 } 3779 3780 return flow; 3781 } 3782 EXPORT_SYMBOL(ethtool_rx_flow_rule_create); 3783 3784 void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *flow) 3785 { 3786 kfree(flow->rule); 3787 kfree(flow); 3788 } 3789 EXPORT_SYMBOL(ethtool_rx_flow_rule_destroy); 3790