1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/core/ethtool.c - Ethtool ioctl handler 4 * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx> 5 * 6 * This file is where we call all the ethtool_ops commands to get 7 * the information ethtool needs. 8 */ 9 10 #include <linux/compat.h> 11 #include <linux/etherdevice.h> 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/capability.h> 15 #include <linux/errno.h> 16 #include <linux/ethtool.h> 17 #include <linux/netdevice.h> 18 #include <linux/net_tstamp.h> 19 #include <linux/phy.h> 20 #include <linux/bitops.h> 21 #include <linux/uaccess.h> 22 #include <linux/vmalloc.h> 23 #include <linux/sfp.h> 24 #include <linux/slab.h> 25 #include <linux/rtnetlink.h> 26 #include <linux/sched/signal.h> 27 #include <linux/net.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/utsname.h> 30 #include <net/devlink.h> 31 #include <net/ipv6.h> 32 #include <net/xdp_sock_drv.h> 33 #include <net/flow_offload.h> 34 #include <net/netdev_lock.h> 35 #include <linux/ethtool_netlink.h> 36 #include "common.h" 37 38 /* State held across locks and calls for commands which have devlink fallback */ 39 struct ethtool_devlink_compat { 40 struct devlink *devlink; 41 union { 42 struct ethtool_flash efl; 43 struct ethtool_drvinfo info; 44 }; 45 }; 46 47 static struct devlink *netdev_to_devlink_get(struct net_device *dev) 48 { 49 if (!dev->devlink_port) 50 return NULL; 51 return devlink_try_get(dev->devlink_port->devlink); 52 } 53 54 /* 55 * Some useful ethtool_ops methods that're device independent. 56 * If we find that all drivers want to do the same thing here, 57 * we can turn these into dev_() function calls. 58 */ 59 60 u32 ethtool_op_get_link(struct net_device *dev) 61 { 62 /* Synchronize carrier state with link watch, see also rtnl_getlink() */ 63 __linkwatch_sync_dev(dev); 64 65 return netif_carrier_ok(dev) ? 1 : 0; 66 } 67 EXPORT_SYMBOL(ethtool_op_get_link); 68 69 int ethtool_op_get_ts_info(struct net_device *dev, 70 struct kernel_ethtool_ts_info *info) 71 { 72 info->so_timestamping = 73 SOF_TIMESTAMPING_TX_SOFTWARE | 74 SOF_TIMESTAMPING_RX_SOFTWARE | 75 SOF_TIMESTAMPING_SOFTWARE; 76 info->phc_index = -1; 77 return 0; 78 } 79 EXPORT_SYMBOL(ethtool_op_get_ts_info); 80 81 /* Handlers for each ethtool command */ 82 83 static int ethtool_get_features(struct net_device *dev, void __user *useraddr) 84 { 85 struct ethtool_gfeatures cmd = { 86 .cmd = ETHTOOL_GFEATURES, 87 .size = ETHTOOL_DEV_FEATURE_WORDS, 88 }; 89 struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS]; 90 u32 __user *sizeaddr; 91 u32 copy_size; 92 int i; 93 94 /* in case feature bits run out again */ 95 BUILD_BUG_ON(ETHTOOL_DEV_FEATURE_WORDS * sizeof(u32) > sizeof(netdev_features_t)); 96 97 for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) { 98 features[i].available = (u32)(dev->hw_features >> (32 * i)); 99 features[i].requested = (u32)(dev->wanted_features >> (32 * i)); 100 features[i].active = (u32)(dev->features >> (32 * i)); 101 features[i].never_changed = 102 (u32)(NETIF_F_NEVER_CHANGE >> (32 * i)); 103 } 104 105 sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size); 106 if (get_user(copy_size, sizeaddr)) 107 return -EFAULT; 108 109 if (copy_size > ETHTOOL_DEV_FEATURE_WORDS) 110 copy_size = ETHTOOL_DEV_FEATURE_WORDS; 111 112 if (copy_to_user(useraddr, &cmd, sizeof(cmd))) 113 return -EFAULT; 114 useraddr += sizeof(cmd); 115 if (copy_to_user(useraddr, features, 116 array_size(copy_size, sizeof(*features)))) 117 return -EFAULT; 118 119 return 0; 120 } 121 122 static int ethtool_set_features(struct net_device *dev, void __user *useraddr) 123 { 124 struct ethtool_sfeatures cmd; 125 struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS]; 126 netdev_features_t wanted = 0, valid = 0; 127 int i, ret = 0; 128 129 if (copy_from_user(&cmd, useraddr, sizeof(cmd))) 130 return -EFAULT; 131 useraddr += sizeof(cmd); 132 133 if (cmd.size != ETHTOOL_DEV_FEATURE_WORDS) 134 return -EINVAL; 135 136 if (copy_from_user(features, useraddr, sizeof(features))) 137 return -EFAULT; 138 139 for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) { 140 valid |= (netdev_features_t)features[i].valid << (32 * i); 141 wanted |= (netdev_features_t)features[i].requested << (32 * i); 142 } 143 144 if (valid & ~NETIF_F_ETHTOOL_BITS) 145 return -EINVAL; 146 147 if (valid & ~dev->hw_features) { 148 valid &= dev->hw_features; 149 ret |= ETHTOOL_F_UNSUPPORTED; 150 } 151 152 dev->wanted_features &= ~valid; 153 dev->wanted_features |= wanted & valid; 154 __netdev_update_features(dev); 155 156 if ((dev->wanted_features ^ dev->features) & valid) 157 ret |= ETHTOOL_F_WISH; 158 159 return ret; 160 } 161 162 static int __ethtool_get_sset_count(struct net_device *dev, int sset) 163 { 164 const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; 165 const struct ethtool_ops *ops = dev->ethtool_ops; 166 167 if (sset == ETH_SS_FEATURES) 168 return ARRAY_SIZE(netdev_features_strings); 169 170 if (sset == ETH_SS_RSS_HASH_FUNCS) 171 return ARRAY_SIZE(rss_hash_func_strings); 172 173 if (sset == ETH_SS_TUNABLES) 174 return ARRAY_SIZE(tunable_strings); 175 176 if (sset == ETH_SS_PHY_TUNABLES) 177 return ARRAY_SIZE(phy_tunable_strings); 178 179 if (sset == ETH_SS_PHY_STATS && dev->phydev && 180 !ops->get_ethtool_phy_stats && 181 phy_ops && phy_ops->get_sset_count) 182 return phy_ops->get_sset_count(dev->phydev); 183 184 if (sset == ETH_SS_LINK_MODES) 185 return __ETHTOOL_LINK_MODE_MASK_NBITS; 186 187 if (ops->get_sset_count && ops->get_strings) 188 return ops->get_sset_count(dev, sset); 189 else 190 return -EOPNOTSUPP; 191 } 192 193 static void __ethtool_get_strings(struct net_device *dev, 194 u32 stringset, u8 *data) 195 { 196 const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; 197 const struct ethtool_ops *ops = dev->ethtool_ops; 198 199 if (stringset == ETH_SS_FEATURES) 200 memcpy(data, netdev_features_strings, 201 sizeof(netdev_features_strings)); 202 else if (stringset == ETH_SS_RSS_HASH_FUNCS) 203 memcpy(data, rss_hash_func_strings, 204 sizeof(rss_hash_func_strings)); 205 else if (stringset == ETH_SS_TUNABLES) 206 memcpy(data, tunable_strings, sizeof(tunable_strings)); 207 else if (stringset == ETH_SS_PHY_TUNABLES) 208 memcpy(data, phy_tunable_strings, sizeof(phy_tunable_strings)); 209 else if (stringset == ETH_SS_PHY_STATS && dev->phydev && 210 !ops->get_ethtool_phy_stats && phy_ops && 211 phy_ops->get_strings) 212 phy_ops->get_strings(dev->phydev, data); 213 else if (stringset == ETH_SS_LINK_MODES) 214 memcpy(data, link_mode_names, 215 __ETHTOOL_LINK_MODE_MASK_NBITS * ETH_GSTRING_LEN); 216 else 217 /* ops->get_strings is valid because checked earlier */ 218 ops->get_strings(dev, stringset, data); 219 } 220 221 static netdev_features_t ethtool_get_feature_mask(u32 eth_cmd) 222 { 223 /* feature masks of legacy discrete ethtool ops */ 224 225 switch (eth_cmd) { 226 case ETHTOOL_GTXCSUM: 227 case ETHTOOL_STXCSUM: 228 return NETIF_F_CSUM_MASK | NETIF_F_FCOE_CRC | 229 NETIF_F_SCTP_CRC; 230 case ETHTOOL_GRXCSUM: 231 case ETHTOOL_SRXCSUM: 232 return NETIF_F_RXCSUM; 233 case ETHTOOL_GSG: 234 case ETHTOOL_SSG: 235 return NETIF_F_SG | NETIF_F_FRAGLIST; 236 case ETHTOOL_GTSO: 237 case ETHTOOL_STSO: 238 return NETIF_F_ALL_TSO; 239 case ETHTOOL_GGSO: 240 case ETHTOOL_SGSO: 241 return NETIF_F_GSO; 242 case ETHTOOL_GGRO: 243 case ETHTOOL_SGRO: 244 return NETIF_F_GRO; 245 default: 246 BUG(); 247 } 248 } 249 250 static int ethtool_get_one_feature(struct net_device *dev, 251 char __user *useraddr, u32 ethcmd) 252 { 253 netdev_features_t mask = ethtool_get_feature_mask(ethcmd); 254 struct ethtool_value edata = { 255 .cmd = ethcmd, 256 .data = !!(dev->features & mask), 257 }; 258 259 if (copy_to_user(useraddr, &edata, sizeof(edata))) 260 return -EFAULT; 261 return 0; 262 } 263 264 static int ethtool_set_one_feature(struct net_device *dev, 265 void __user *useraddr, u32 ethcmd) 266 { 267 struct ethtool_value edata; 268 netdev_features_t mask; 269 270 if (copy_from_user(&edata, useraddr, sizeof(edata))) 271 return -EFAULT; 272 273 mask = ethtool_get_feature_mask(ethcmd); 274 mask &= dev->hw_features; 275 if (!mask) 276 return -EOPNOTSUPP; 277 278 if (edata.data) 279 dev->wanted_features |= mask; 280 else 281 dev->wanted_features &= ~mask; 282 283 __netdev_update_features(dev); 284 285 return 0; 286 } 287 288 #define ETH_ALL_FLAGS (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \ 289 ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH) 290 #define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_CTAG_RX | \ 291 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_NTUPLE | \ 292 NETIF_F_RXHASH) 293 294 static u32 __ethtool_get_flags(struct net_device *dev) 295 { 296 u32 flags = 0; 297 298 if (dev->features & NETIF_F_LRO) 299 flags |= ETH_FLAG_LRO; 300 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 301 flags |= ETH_FLAG_RXVLAN; 302 if (dev->features & NETIF_F_HW_VLAN_CTAG_TX) 303 flags |= ETH_FLAG_TXVLAN; 304 if (dev->features & NETIF_F_NTUPLE) 305 flags |= ETH_FLAG_NTUPLE; 306 if (dev->features & NETIF_F_RXHASH) 307 flags |= ETH_FLAG_RXHASH; 308 309 return flags; 310 } 311 312 static int __ethtool_set_flags(struct net_device *dev, u32 data) 313 { 314 netdev_features_t features = 0, changed; 315 316 if (data & ~ETH_ALL_FLAGS) 317 return -EINVAL; 318 319 if (data & ETH_FLAG_LRO) 320 features |= NETIF_F_LRO; 321 if (data & ETH_FLAG_RXVLAN) 322 features |= NETIF_F_HW_VLAN_CTAG_RX; 323 if (data & ETH_FLAG_TXVLAN) 324 features |= NETIF_F_HW_VLAN_CTAG_TX; 325 if (data & ETH_FLAG_NTUPLE) 326 features |= NETIF_F_NTUPLE; 327 if (data & ETH_FLAG_RXHASH) 328 features |= NETIF_F_RXHASH; 329 330 /* allow changing only bits set in hw_features */ 331 changed = (features ^ dev->features) & ETH_ALL_FEATURES; 332 if (changed & ~dev->hw_features) 333 return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP; 334 335 dev->wanted_features = 336 (dev->wanted_features & ~changed) | (features & changed); 337 338 __netdev_update_features(dev); 339 340 return 0; 341 } 342 343 /* Given two link masks, AND them together and save the result in dst. */ 344 void ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, 345 struct ethtool_link_ksettings *src) 346 { 347 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); 348 unsigned int idx = 0; 349 350 for (; idx < size; idx++) { 351 dst->link_modes.supported[idx] &= 352 src->link_modes.supported[idx]; 353 dst->link_modes.advertising[idx] &= 354 src->link_modes.advertising[idx]; 355 } 356 } 357 EXPORT_SYMBOL(ethtool_intersect_link_masks); 358 359 void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, 360 u32 legacy_u32) 361 { 362 linkmode_zero(dst); 363 dst[0] = legacy_u32; 364 } 365 EXPORT_SYMBOL(ethtool_convert_legacy_u32_to_link_mode); 366 367 /* return false if src had higher bits set. lower bits always updated. */ 368 bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, 369 const unsigned long *src) 370 { 371 *legacy_u32 = src[0]; 372 return find_next_bit(src, __ETHTOOL_LINK_MODE_MASK_NBITS, 32) == 373 __ETHTOOL_LINK_MODE_MASK_NBITS; 374 } 375 EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32); 376 377 /* return false if ksettings link modes had higher bits 378 * set. legacy_settings always updated (best effort) 379 */ 380 static bool 381 convert_link_ksettings_to_legacy_settings( 382 struct ethtool_cmd *legacy_settings, 383 const struct ethtool_link_ksettings *link_ksettings) 384 { 385 bool retval = true; 386 387 memset(legacy_settings, 0, sizeof(*legacy_settings)); 388 /* this also clears the deprecated fields in legacy structure: 389 * __u8 transceiver; 390 * __u32 maxtxpkt; 391 * __u32 maxrxpkt; 392 */ 393 394 retval &= ethtool_convert_link_mode_to_legacy_u32( 395 &legacy_settings->supported, 396 link_ksettings->link_modes.supported); 397 retval &= ethtool_convert_link_mode_to_legacy_u32( 398 &legacy_settings->advertising, 399 link_ksettings->link_modes.advertising); 400 retval &= ethtool_convert_link_mode_to_legacy_u32( 401 &legacy_settings->lp_advertising, 402 link_ksettings->link_modes.lp_advertising); 403 ethtool_cmd_speed_set(legacy_settings, link_ksettings->base.speed); 404 legacy_settings->duplex 405 = link_ksettings->base.duplex; 406 legacy_settings->port 407 = link_ksettings->base.port; 408 legacy_settings->phy_address 409 = link_ksettings->base.phy_address; 410 legacy_settings->autoneg 411 = link_ksettings->base.autoneg; 412 legacy_settings->mdio_support 413 = link_ksettings->base.mdio_support; 414 legacy_settings->eth_tp_mdix 415 = link_ksettings->base.eth_tp_mdix; 416 legacy_settings->eth_tp_mdix_ctrl 417 = link_ksettings->base.eth_tp_mdix_ctrl; 418 legacy_settings->transceiver 419 = link_ksettings->base.transceiver; 420 return retval; 421 } 422 423 /* number of 32-bit words to store the user's link mode bitmaps */ 424 #define __ETHTOOL_LINK_MODE_MASK_NU32 \ 425 DIV_ROUND_UP(__ETHTOOL_LINK_MODE_MASK_NBITS, 32) 426 427 /* layout of the struct passed from/to userland */ 428 struct ethtool_link_usettings { 429 struct ethtool_link_settings base; 430 struct { 431 __u32 supported[__ETHTOOL_LINK_MODE_MASK_NU32]; 432 __u32 advertising[__ETHTOOL_LINK_MODE_MASK_NU32]; 433 __u32 lp_advertising[__ETHTOOL_LINK_MODE_MASK_NU32]; 434 } link_modes; 435 }; 436 437 /* Internal kernel helper to query a device ethtool_link_settings. */ 438 int __ethtool_get_link_ksettings(struct net_device *dev, 439 struct ethtool_link_ksettings *link_ksettings) 440 { 441 ASSERT_RTNL(); 442 443 if (!dev->ethtool_ops->get_link_ksettings) 444 return -EOPNOTSUPP; 445 446 if (!netif_device_present(dev)) 447 return -ENODEV; 448 449 memset(link_ksettings, 0, sizeof(*link_ksettings)); 450 return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings); 451 } 452 EXPORT_SYMBOL(__ethtool_get_link_ksettings); 453 454 /* convert ethtool_link_usettings in user space to a kernel internal 455 * ethtool_link_ksettings. return 0 on success, errno on error. 456 */ 457 static int load_link_ksettings_from_user(struct ethtool_link_ksettings *to, 458 const void __user *from) 459 { 460 struct ethtool_link_usettings link_usettings; 461 462 if (copy_from_user(&link_usettings, from, sizeof(link_usettings))) 463 return -EFAULT; 464 465 memcpy(&to->base, &link_usettings.base, sizeof(to->base)); 466 bitmap_from_arr32(to->link_modes.supported, 467 link_usettings.link_modes.supported, 468 __ETHTOOL_LINK_MODE_MASK_NBITS); 469 bitmap_from_arr32(to->link_modes.advertising, 470 link_usettings.link_modes.advertising, 471 __ETHTOOL_LINK_MODE_MASK_NBITS); 472 bitmap_from_arr32(to->link_modes.lp_advertising, 473 link_usettings.link_modes.lp_advertising, 474 __ETHTOOL_LINK_MODE_MASK_NBITS); 475 476 return 0; 477 } 478 479 /* Check if the user is trying to change anything besides speed/duplex */ 480 bool ethtool_virtdev_validate_cmd(const struct ethtool_link_ksettings *cmd) 481 { 482 struct ethtool_link_settings base2 = {}; 483 484 base2.speed = cmd->base.speed; 485 base2.port = PORT_OTHER; 486 base2.duplex = cmd->base.duplex; 487 base2.cmd = cmd->base.cmd; 488 base2.link_mode_masks_nwords = cmd->base.link_mode_masks_nwords; 489 490 return !memcmp(&base2, &cmd->base, sizeof(base2)) && 491 bitmap_empty(cmd->link_modes.supported, 492 __ETHTOOL_LINK_MODE_MASK_NBITS) && 493 bitmap_empty(cmd->link_modes.lp_advertising, 494 __ETHTOOL_LINK_MODE_MASK_NBITS); 495 } 496 497 /* convert a kernel internal ethtool_link_ksettings to 498 * ethtool_link_usettings in user space. return 0 on success, errno on 499 * error. 500 */ 501 static int 502 store_link_ksettings_for_user(void __user *to, 503 const struct ethtool_link_ksettings *from) 504 { 505 struct ethtool_link_usettings link_usettings; 506 507 memcpy(&link_usettings, from, sizeof(link_usettings)); 508 bitmap_to_arr32(link_usettings.link_modes.supported, 509 from->link_modes.supported, 510 __ETHTOOL_LINK_MODE_MASK_NBITS); 511 bitmap_to_arr32(link_usettings.link_modes.advertising, 512 from->link_modes.advertising, 513 __ETHTOOL_LINK_MODE_MASK_NBITS); 514 bitmap_to_arr32(link_usettings.link_modes.lp_advertising, 515 from->link_modes.lp_advertising, 516 __ETHTOOL_LINK_MODE_MASK_NBITS); 517 518 if (copy_to_user(to, &link_usettings, sizeof(link_usettings))) 519 return -EFAULT; 520 521 return 0; 522 } 523 524 /* Query device for its ethtool_link_settings. */ 525 static int ethtool_get_link_ksettings(struct net_device *dev, 526 void __user *useraddr) 527 { 528 int err = 0; 529 struct ethtool_link_ksettings link_ksettings; 530 531 ASSERT_RTNL(); 532 if (!dev->ethtool_ops->get_link_ksettings) 533 return -EOPNOTSUPP; 534 535 /* handle bitmap nbits handshake */ 536 if (copy_from_user(&link_ksettings.base, useraddr, 537 sizeof(link_ksettings.base))) 538 return -EFAULT; 539 540 if (__ETHTOOL_LINK_MODE_MASK_NU32 541 != link_ksettings.base.link_mode_masks_nwords) { 542 /* wrong link mode nbits requested */ 543 memset(&link_ksettings, 0, sizeof(link_ksettings)); 544 link_ksettings.base.cmd = ETHTOOL_GLINKSETTINGS; 545 /* send back number of words required as negative val */ 546 compiletime_assert(__ETHTOOL_LINK_MODE_MASK_NU32 <= S8_MAX, 547 "need too many bits for link modes!"); 548 link_ksettings.base.link_mode_masks_nwords 549 = -((s8)__ETHTOOL_LINK_MODE_MASK_NU32); 550 551 /* copy the base fields back to user, not the link 552 * mode bitmaps 553 */ 554 if (copy_to_user(useraddr, &link_ksettings.base, 555 sizeof(link_ksettings.base))) 556 return -EFAULT; 557 558 return 0; 559 } 560 561 /* handshake successful: user/kernel agree on 562 * link_mode_masks_nwords 563 */ 564 565 memset(&link_ksettings, 0, sizeof(link_ksettings)); 566 err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings); 567 if (err < 0) 568 return err; 569 570 /* make sure we tell the right values to user */ 571 link_ksettings.base.cmd = ETHTOOL_GLINKSETTINGS; 572 link_ksettings.base.link_mode_masks_nwords 573 = __ETHTOOL_LINK_MODE_MASK_NU32; 574 link_ksettings.base.master_slave_cfg = MASTER_SLAVE_CFG_UNSUPPORTED; 575 link_ksettings.base.master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED; 576 link_ksettings.base.rate_matching = RATE_MATCH_NONE; 577 578 return store_link_ksettings_for_user(useraddr, &link_ksettings); 579 } 580 581 /* Update device ethtool_link_settings. */ 582 static int ethtool_set_link_ksettings(struct net_device *dev, 583 void __user *useraddr) 584 { 585 struct ethtool_link_ksettings link_ksettings = {}; 586 int err; 587 588 ASSERT_RTNL(); 589 590 if (!dev->ethtool_ops->set_link_ksettings) 591 return -EOPNOTSUPP; 592 593 /* make sure nbits field has expected value */ 594 if (copy_from_user(&link_ksettings.base, useraddr, 595 sizeof(link_ksettings.base))) 596 return -EFAULT; 597 598 if (__ETHTOOL_LINK_MODE_MASK_NU32 599 != link_ksettings.base.link_mode_masks_nwords) 600 return -EINVAL; 601 602 /* copy the whole structure, now that we know it has expected 603 * format 604 */ 605 err = load_link_ksettings_from_user(&link_ksettings, useraddr); 606 if (err) 607 return err; 608 609 /* re-check nwords field, just in case */ 610 if (__ETHTOOL_LINK_MODE_MASK_NU32 611 != link_ksettings.base.link_mode_masks_nwords) 612 return -EINVAL; 613 614 if (link_ksettings.base.master_slave_cfg || 615 link_ksettings.base.master_slave_state) 616 return -EINVAL; 617 618 err = dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings); 619 if (err >= 0) { 620 ethtool_notify(dev, ETHTOOL_MSG_LINKINFO_NTF, NULL); 621 ethtool_notify(dev, ETHTOOL_MSG_LINKMODES_NTF, NULL); 622 } 623 return err; 624 } 625 626 int ethtool_virtdev_set_link_ksettings(struct net_device *dev, 627 const struct ethtool_link_ksettings *cmd, 628 u32 *dev_speed, u8 *dev_duplex) 629 { 630 u32 speed; 631 u8 duplex; 632 633 speed = cmd->base.speed; 634 duplex = cmd->base.duplex; 635 /* don't allow custom speed and duplex */ 636 if (!ethtool_validate_speed(speed) || 637 !ethtool_validate_duplex(duplex) || 638 !ethtool_virtdev_validate_cmd(cmd)) 639 return -EINVAL; 640 *dev_speed = speed; 641 *dev_duplex = duplex; 642 643 return 0; 644 } 645 EXPORT_SYMBOL(ethtool_virtdev_set_link_ksettings); 646 647 /* Query device for its ethtool_cmd settings. 648 * 649 * Backward compatibility note: for compatibility with legacy ethtool, this is 650 * now implemented via get_link_ksettings. When driver reports higher link mode 651 * bits, a kernel warning is logged once (with name of 1st driver/device) to 652 * recommend user to upgrade ethtool, but the command is successful (only the 653 * lower link mode bits reported back to user). Deprecated fields from 654 * ethtool_cmd (transceiver/maxrxpkt/maxtxpkt) are always set to zero. 655 */ 656 static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) 657 { 658 struct ethtool_link_ksettings link_ksettings; 659 struct ethtool_cmd cmd; 660 int err; 661 662 ASSERT_RTNL(); 663 if (!dev->ethtool_ops->get_link_ksettings) 664 return -EOPNOTSUPP; 665 666 if (dev->ethtool->module_fw_flash_in_progress) 667 return -EBUSY; 668 669 memset(&link_ksettings, 0, sizeof(link_ksettings)); 670 err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings); 671 if (err < 0) 672 return err; 673 convert_link_ksettings_to_legacy_settings(&cmd, &link_ksettings); 674 675 /* send a sensible cmd tag back to user */ 676 cmd.cmd = ETHTOOL_GSET; 677 678 if (copy_to_user(useraddr, &cmd, sizeof(cmd))) 679 return -EFAULT; 680 681 return 0; 682 } 683 684 /* Update device link settings with given ethtool_cmd. 685 * 686 * Backward compatibility note: for compatibility with legacy ethtool, this is 687 * now always implemented via set_link_settings. When user's request updates 688 * deprecated ethtool_cmd fields (transceiver/maxrxpkt/maxtxpkt), a kernel 689 * warning is logged once (with name of 1st driver/device) to recommend user to 690 * upgrade ethtool, and the request is rejected. 691 */ 692 static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) 693 { 694 struct ethtool_link_ksettings link_ksettings; 695 struct ethtool_cmd cmd; 696 int ret; 697 698 ASSERT_RTNL(); 699 700 if (copy_from_user(&cmd, useraddr, sizeof(cmd))) 701 return -EFAULT; 702 if (!dev->ethtool_ops->set_link_ksettings) 703 return -EOPNOTSUPP; 704 705 if (!convert_legacy_settings_to_link_ksettings(&link_ksettings, &cmd)) 706 return -EINVAL; 707 link_ksettings.base.link_mode_masks_nwords = 708 __ETHTOOL_LINK_MODE_MASK_NU32; 709 ret = dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings); 710 if (ret >= 0) { 711 ethtool_notify(dev, ETHTOOL_MSG_LINKINFO_NTF, NULL); 712 ethtool_notify(dev, ETHTOOL_MSG_LINKMODES_NTF, NULL); 713 } 714 return ret; 715 } 716 717 static int 718 ethtool_get_drvinfo(struct net_device *dev, struct ethtool_devlink_compat *rsp) 719 { 720 const struct ethtool_ops *ops = dev->ethtool_ops; 721 struct device *parent = dev->dev.parent; 722 723 rsp->info.cmd = ETHTOOL_GDRVINFO; 724 strscpy(rsp->info.version, init_uts_ns.name.release, 725 sizeof(rsp->info.version)); 726 if (ops->get_drvinfo) { 727 ops->get_drvinfo(dev, &rsp->info); 728 if (!rsp->info.bus_info[0] && parent) 729 strscpy(rsp->info.bus_info, dev_name(parent), 730 sizeof(rsp->info.bus_info)); 731 if (!rsp->info.driver[0] && parent && parent->driver) 732 strscpy(rsp->info.driver, parent->driver->name, 733 sizeof(rsp->info.driver)); 734 } else if (parent && parent->driver) { 735 strscpy(rsp->info.bus_info, dev_name(parent), 736 sizeof(rsp->info.bus_info)); 737 strscpy(rsp->info.driver, parent->driver->name, 738 sizeof(rsp->info.driver)); 739 } else if (dev->rtnl_link_ops) { 740 strscpy(rsp->info.driver, dev->rtnl_link_ops->kind, 741 sizeof(rsp->info.driver)); 742 } else { 743 return -EOPNOTSUPP; 744 } 745 746 /* 747 * this method of obtaining string set info is deprecated; 748 * Use ETHTOOL_GSSET_INFO instead. 749 */ 750 if (ops->get_sset_count) { 751 int rc; 752 753 rc = ops->get_sset_count(dev, ETH_SS_TEST); 754 if (rc >= 0) 755 rsp->info.testinfo_len = rc; 756 rc = ops->get_sset_count(dev, ETH_SS_STATS); 757 if (rc >= 0) 758 rsp->info.n_stats = rc; 759 rc = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS); 760 if (rc >= 0) 761 rsp->info.n_priv_flags = rc; 762 } 763 if (ops->get_regs_len) { 764 int ret = ops->get_regs_len(dev); 765 766 if (ret > 0) 767 rsp->info.regdump_len = ret; 768 } 769 770 if (ops->get_eeprom_len) 771 rsp->info.eedump_len = ops->get_eeprom_len(dev); 772 773 if (!rsp->info.fw_version[0]) 774 rsp->devlink = netdev_to_devlink_get(dev); 775 776 return 0; 777 } 778 779 static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, 780 void __user *useraddr) 781 { 782 struct ethtool_sset_info info; 783 u64 sset_mask; 784 int i, idx = 0, n_bits = 0, ret, rc; 785 u32 *info_buf = NULL; 786 787 if (copy_from_user(&info, useraddr, sizeof(info))) 788 return -EFAULT; 789 790 /* store copy of mask, because we zero struct later on */ 791 sset_mask = info.sset_mask; 792 if (!sset_mask) 793 return 0; 794 795 /* calculate size of return buffer */ 796 n_bits = hweight64(sset_mask); 797 798 memset(&info, 0, sizeof(info)); 799 info.cmd = ETHTOOL_GSSET_INFO; 800 801 info_buf = kcalloc(n_bits, sizeof(u32), GFP_USER); 802 if (!info_buf) 803 return -ENOMEM; 804 805 /* 806 * fill return buffer based on input bitmask and successful 807 * get_sset_count return 808 */ 809 for (i = 0; i < 64; i++) { 810 if (!(sset_mask & (1ULL << i))) 811 continue; 812 813 rc = __ethtool_get_sset_count(dev, i); 814 if (rc >= 0) { 815 info.sset_mask |= (1ULL << i); 816 info_buf[idx++] = rc; 817 } 818 } 819 820 ret = -EFAULT; 821 if (copy_to_user(useraddr, &info, sizeof(info))) 822 goto out; 823 824 useraddr += offsetof(struct ethtool_sset_info, data); 825 if (copy_to_user(useraddr, info_buf, array_size(idx, sizeof(u32)))) 826 goto out; 827 828 ret = 0; 829 830 out: 831 kfree(info_buf); 832 return ret; 833 } 834 835 static noinline_for_stack int 836 ethtool_rxnfc_copy_from_compat(struct ethtool_rxnfc *rxnfc, 837 const struct compat_ethtool_rxnfc __user *useraddr, 838 size_t size) 839 { 840 struct compat_ethtool_rxnfc crxnfc = {}; 841 842 /* We expect there to be holes between fs.m_ext and 843 * fs.ring_cookie and at the end of fs, but nowhere else. 844 * On non-x86, no conversion should be needed. 845 */ 846 BUILD_BUG_ON(!IS_ENABLED(CONFIG_X86_64) && 847 sizeof(struct compat_ethtool_rxnfc) != 848 sizeof(struct ethtool_rxnfc)); 849 BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) + 850 sizeof(useraddr->fs.m_ext) != 851 offsetof(struct ethtool_rxnfc, fs.m_ext) + 852 sizeof(rxnfc->fs.m_ext)); 853 BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.location) - 854 offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != 855 offsetof(struct ethtool_rxnfc, fs.location) - 856 offsetof(struct ethtool_rxnfc, fs.ring_cookie)); 857 858 if (copy_from_user(&crxnfc, useraddr, min(size, sizeof(crxnfc)))) 859 return -EFAULT; 860 861 *rxnfc = (struct ethtool_rxnfc) { 862 .cmd = crxnfc.cmd, 863 .flow_type = crxnfc.flow_type, 864 .data = crxnfc.data, 865 .fs = { 866 .flow_type = crxnfc.fs.flow_type, 867 .h_u = crxnfc.fs.h_u, 868 .h_ext = crxnfc.fs.h_ext, 869 .m_u = crxnfc.fs.m_u, 870 .m_ext = crxnfc.fs.m_ext, 871 .ring_cookie = crxnfc.fs.ring_cookie, 872 .location = crxnfc.fs.location, 873 }, 874 .rule_cnt = crxnfc.rule_cnt, 875 }; 876 877 return 0; 878 } 879 880 static int ethtool_rxnfc_copy_from_user(struct ethtool_rxnfc *rxnfc, 881 const void __user *useraddr, 882 size_t size) 883 { 884 if (compat_need_64bit_alignment_fixup()) 885 return ethtool_rxnfc_copy_from_compat(rxnfc, useraddr, size); 886 887 if (copy_from_user(rxnfc, useraddr, size)) 888 return -EFAULT; 889 890 return 0; 891 } 892 893 static int ethtool_rxnfc_copy_to_compat(void __user *useraddr, 894 const struct ethtool_rxnfc *rxnfc, 895 size_t size, const u32 *rule_buf) 896 { 897 struct compat_ethtool_rxnfc crxnfc; 898 899 memset(&crxnfc, 0, sizeof(crxnfc)); 900 crxnfc = (struct compat_ethtool_rxnfc) { 901 .cmd = rxnfc->cmd, 902 .flow_type = rxnfc->flow_type, 903 .data = rxnfc->data, 904 .fs = { 905 .flow_type = rxnfc->fs.flow_type, 906 .h_u = rxnfc->fs.h_u, 907 .h_ext = rxnfc->fs.h_ext, 908 .m_u = rxnfc->fs.m_u, 909 .m_ext = rxnfc->fs.m_ext, 910 .ring_cookie = rxnfc->fs.ring_cookie, 911 .location = rxnfc->fs.location, 912 }, 913 .rule_cnt = rxnfc->rule_cnt, 914 }; 915 916 if (copy_to_user(useraddr, &crxnfc, min(size, sizeof(crxnfc)))) 917 return -EFAULT; 918 919 return 0; 920 } 921 922 static int ethtool_rxnfc_copy_struct(u32 cmd, struct ethtool_rxnfc *info, 923 size_t *info_size, void __user *useraddr) 924 { 925 /* struct ethtool_rxnfc was originally defined for 926 * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data 927 * members. User-space might still be using that 928 * definition. 929 */ 930 if (cmd == ETHTOOL_GRXFH || cmd == ETHTOOL_SRXFH) 931 *info_size = (offsetof(struct ethtool_rxnfc, data) + 932 sizeof(info->data)); 933 934 if (ethtool_rxnfc_copy_from_user(info, useraddr, *info_size)) 935 return -EFAULT; 936 937 if ((cmd == ETHTOOL_GRXFH || cmd == ETHTOOL_SRXFH) && info->flow_type & FLOW_RSS) { 938 *info_size = sizeof(*info); 939 if (ethtool_rxnfc_copy_from_user(info, useraddr, *info_size)) 940 return -EFAULT; 941 /* Since malicious users may modify the original data, 942 * we need to check whether FLOW_RSS is still requested. 943 */ 944 if (!(info->flow_type & FLOW_RSS)) 945 return -EINVAL; 946 } 947 948 if (info->cmd != cmd) 949 return -EINVAL; 950 951 return 0; 952 } 953 954 static int ethtool_rxnfc_copy_to_user(void __user *useraddr, 955 const struct ethtool_rxnfc *rxnfc, 956 size_t size, const u32 *rule_buf) 957 { 958 int ret; 959 960 if (compat_need_64bit_alignment_fixup()) { 961 ret = ethtool_rxnfc_copy_to_compat(useraddr, rxnfc, size, 962 rule_buf); 963 useraddr += offsetof(struct compat_ethtool_rxnfc, rule_locs); 964 } else { 965 ret = copy_to_user(useraddr, rxnfc, size); 966 useraddr += offsetof(struct ethtool_rxnfc, rule_locs); 967 } 968 969 if (ret) 970 return -EFAULT; 971 972 if (rule_buf) { 973 if (copy_to_user(useraddr, rule_buf, 974 rxnfc->rule_cnt * sizeof(u32))) 975 return -EFAULT; 976 } 977 978 return 0; 979 } 980 981 static bool flow_type_hashable(u32 flow_type) 982 { 983 switch (flow_type) { 984 case TCP_V4_FLOW: 985 case UDP_V4_FLOW: 986 case SCTP_V4_FLOW: 987 case AH_ESP_V4_FLOW: 988 case TCP_V6_FLOW: 989 case UDP_V6_FLOW: 990 case SCTP_V6_FLOW: 991 case AH_ESP_V6_FLOW: 992 case AH_V4_FLOW: 993 case ESP_V4_FLOW: 994 case AH_V6_FLOW: 995 case ESP_V6_FLOW: 996 case IPV4_FLOW: 997 case IPV6_FLOW: 998 case GTPU_V4_FLOW: 999 case GTPU_V6_FLOW: 1000 case GTPC_V4_FLOW: 1001 case GTPC_V6_FLOW: 1002 case GTPC_TEID_V4_FLOW: 1003 case GTPC_TEID_V6_FLOW: 1004 case GTPU_EH_V4_FLOW: 1005 case GTPU_EH_V6_FLOW: 1006 case GTPU_UL_V4_FLOW: 1007 case GTPU_UL_V6_FLOW: 1008 case GTPU_DL_V4_FLOW: 1009 case GTPU_DL_V6_FLOW: 1010 return true; 1011 } 1012 1013 return false; 1014 } 1015 1016 /* When adding a new type, update the assert and, if it's hashable, add it to 1017 * the flow_type_hashable switch case. 1018 */ 1019 static_assert(GTPU_DL_V6_FLOW + 1 == __FLOW_TYPE_COUNT); 1020 1021 static int ethtool_check_xfrm_rxfh(u32 input_xfrm, u64 rxfh) 1022 { 1023 /* Sanity check: if symmetric-xor/symmetric-or-xor is set, then: 1024 * 1 - no other fields besides IP src/dst and/or L4 src/dst are set 1025 * 2 - If src is set, dst must also be set 1026 */ 1027 if ((input_xfrm != RXH_XFRM_NO_CHANGE && 1028 input_xfrm & (RXH_XFRM_SYM_XOR | RXH_XFRM_SYM_OR_XOR)) && 1029 ((rxfh & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) || 1030 (!!(rxfh & RXH_IP_SRC) ^ !!(rxfh & RXH_IP_DST)) || 1031 (!!(rxfh & RXH_L4_B_0_1) ^ !!(rxfh & RXH_L4_B_2_3)))) 1032 return -EINVAL; 1033 1034 return 0; 1035 } 1036 1037 static int ethtool_check_flow_types(struct net_device *dev, u32 input_xfrm) 1038 { 1039 const struct ethtool_ops *ops = dev->ethtool_ops; 1040 struct ethtool_rxnfc info = { 1041 .cmd = ETHTOOL_GRXFH, 1042 }; 1043 int err; 1044 u32 i; 1045 1046 for (i = 0; i < __FLOW_TYPE_COUNT; i++) { 1047 if (!flow_type_hashable(i)) 1048 continue; 1049 1050 info.flow_type = i; 1051 1052 if (ops->get_rxfh_fields) { 1053 struct ethtool_rxfh_fields fields = { 1054 .flow_type = info.flow_type, 1055 }; 1056 1057 if (ops->get_rxfh_fields(dev, &fields)) 1058 continue; 1059 1060 info.data = fields.data; 1061 } else { 1062 if (ops->get_rxnfc(dev, &info, NULL)) 1063 continue; 1064 } 1065 1066 err = ethtool_check_xfrm_rxfh(input_xfrm, info.data); 1067 if (err) 1068 return err; 1069 } 1070 1071 return 0; 1072 } 1073 1074 static noinline_for_stack int 1075 ethtool_set_rxfh_fields(struct net_device *dev, u32 cmd, void __user *useraddr) 1076 { 1077 const struct ethtool_ops *ops = dev->ethtool_ops; 1078 struct ethtool_rxfh_fields fields = {}; 1079 struct ethtool_rxnfc info; 1080 size_t info_size = sizeof(info); 1081 int rc; 1082 1083 if (!ops->set_rxnfc && !ops->set_rxfh_fields) 1084 return -EOPNOTSUPP; 1085 1086 rc = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr); 1087 if (rc) 1088 return rc; 1089 1090 if (info.flow_type & FLOW_RSS && info.rss_context && 1091 !ops->rxfh_per_ctx_fields) 1092 return -EINVAL; 1093 1094 if (ops->get_rxfh) { 1095 struct ethtool_rxfh_param rxfh = {}; 1096 1097 rc = ops->get_rxfh(dev, &rxfh); 1098 if (rc) 1099 return rc; 1100 1101 rc = ethtool_check_xfrm_rxfh(rxfh.input_xfrm, info.data); 1102 if (rc) 1103 return rc; 1104 } 1105 1106 if (!ops->set_rxfh_fields) 1107 return ops->set_rxnfc(dev, &info); 1108 1109 fields.data = info.data; 1110 fields.flow_type = info.flow_type & ~FLOW_RSS; 1111 if (info.flow_type & FLOW_RSS) 1112 fields.rss_context = info.rss_context; 1113 1114 return ops->set_rxfh_fields(dev, &fields, NULL); 1115 } 1116 1117 static noinline_for_stack int 1118 ethtool_get_rxfh_fields(struct net_device *dev, u32 cmd, void __user *useraddr) 1119 { 1120 struct ethtool_rxnfc info; 1121 size_t info_size = sizeof(info); 1122 const struct ethtool_ops *ops = dev->ethtool_ops; 1123 int ret; 1124 1125 if (!ops->get_rxnfc && !ops->get_rxfh_fields) 1126 return -EOPNOTSUPP; 1127 1128 ret = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr); 1129 if (ret) 1130 return ret; 1131 1132 if (info.flow_type & FLOW_RSS && info.rss_context && 1133 !ops->rxfh_per_ctx_fields) 1134 return -EINVAL; 1135 1136 if (ops->get_rxfh_fields) { 1137 struct ethtool_rxfh_fields fields = { 1138 .flow_type = info.flow_type & ~FLOW_RSS, 1139 }; 1140 1141 if (info.flow_type & FLOW_RSS) 1142 fields.rss_context = info.rss_context; 1143 1144 ret = ops->get_rxfh_fields(dev, &fields); 1145 if (ret < 0) 1146 return ret; 1147 1148 info.data = fields.data; 1149 } else { 1150 ret = ops->get_rxnfc(dev, &info, NULL); 1151 if (ret < 0) 1152 return ret; 1153 } 1154 1155 return ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, NULL); 1156 } 1157 1158 static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, 1159 u32 cmd, void __user *useraddr) 1160 { 1161 const struct ethtool_ops *ops = dev->ethtool_ops; 1162 struct ethtool_rxnfc info; 1163 size_t info_size = sizeof(info); 1164 int rc; 1165 1166 if (!ops->set_rxnfc) 1167 return -EOPNOTSUPP; 1168 1169 rc = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr); 1170 if (rc) 1171 return rc; 1172 1173 if (cmd == ETHTOOL_SRXCLSRLINS && info.fs.flow_type & FLOW_RSS) { 1174 /* Nonzero ring with RSS only makes sense 1175 * if NIC adds them together 1176 */ 1177 if (!ops->cap_rss_rxnfc_adds && 1178 ethtool_get_flow_spec_ring(info.fs.ring_cookie)) 1179 return -EINVAL; 1180 1181 if (info.rss_context && 1182 !xa_load(&dev->ethtool->rss_ctx, info.rss_context)) 1183 return -EINVAL; 1184 } 1185 1186 rc = ops->set_rxnfc(dev, &info); 1187 if (rc) 1188 return rc; 1189 1190 if (cmd == ETHTOOL_SRXCLSRLINS && 1191 ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, NULL)) 1192 return -EFAULT; 1193 1194 return 0; 1195 } 1196 1197 static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, 1198 u32 cmd, void __user *useraddr) 1199 { 1200 struct ethtool_rxnfc info; 1201 size_t info_size = sizeof(info); 1202 const struct ethtool_ops *ops = dev->ethtool_ops; 1203 int ret; 1204 void *rule_buf = NULL; 1205 1206 if (!ops->get_rxnfc) 1207 return -EOPNOTSUPP; 1208 1209 ret = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr); 1210 if (ret) 1211 return ret; 1212 1213 if (info.cmd == ETHTOOL_GRXCLSRLALL) { 1214 if (info.rule_cnt > 0) { 1215 if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32)) 1216 rule_buf = kcalloc(info.rule_cnt, sizeof(u32), 1217 GFP_USER); 1218 if (!rule_buf) 1219 return -ENOMEM; 1220 } 1221 } 1222 1223 ret = ops->get_rxnfc(dev, &info, rule_buf); 1224 if (ret < 0) 1225 goto err_out; 1226 1227 ret = ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, rule_buf); 1228 err_out: 1229 kfree(rule_buf); 1230 1231 return ret; 1232 } 1233 1234 static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr, 1235 struct ethtool_rxnfc *rx_rings, 1236 u32 size) 1237 { 1238 int i; 1239 1240 if (copy_from_user(indir, useraddr, array_size(size, sizeof(indir[0])))) 1241 return -EFAULT; 1242 1243 /* Validate ring indices */ 1244 for (i = 0; i < size; i++) 1245 if (indir[i] >= rx_rings->data) 1246 return -EINVAL; 1247 1248 return 0; 1249 } 1250 1251 u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; 1252 1253 void netdev_rss_key_fill(void *buffer, size_t len) 1254 { 1255 BUG_ON(len > sizeof(netdev_rss_key)); 1256 net_get_random_once(netdev_rss_key, sizeof(netdev_rss_key)); 1257 memcpy(buffer, netdev_rss_key, len); 1258 } 1259 EXPORT_SYMBOL(netdev_rss_key_fill); 1260 1261 static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, 1262 void __user *useraddr) 1263 { 1264 struct ethtool_rxfh_param rxfh = {}; 1265 u32 user_size; 1266 int ret; 1267 1268 if (!dev->ethtool_ops->get_rxfh_indir_size || 1269 !dev->ethtool_ops->get_rxfh) 1270 return -EOPNOTSUPP; 1271 rxfh.indir_size = dev->ethtool_ops->get_rxfh_indir_size(dev); 1272 if (rxfh.indir_size == 0) 1273 return -EOPNOTSUPP; 1274 1275 if (copy_from_user(&user_size, 1276 useraddr + offsetof(struct ethtool_rxfh_indir, size), 1277 sizeof(user_size))) 1278 return -EFAULT; 1279 1280 if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh_indir, size), 1281 &rxfh.indir_size, sizeof(rxfh.indir_size))) 1282 return -EFAULT; 1283 1284 /* If the user buffer size is 0, this is just a query for the 1285 * device table size. Otherwise, if it's smaller than the 1286 * device table size it's an error. 1287 */ 1288 if (user_size < rxfh.indir_size) 1289 return user_size == 0 ? 0 : -EINVAL; 1290 1291 rxfh.indir = kcalloc(rxfh.indir_size, sizeof(rxfh.indir[0]), GFP_USER); 1292 if (!rxfh.indir) 1293 return -ENOMEM; 1294 1295 ret = dev->ethtool_ops->get_rxfh(dev, &rxfh); 1296 if (ret) 1297 goto out; 1298 if (copy_to_user(useraddr + 1299 offsetof(struct ethtool_rxfh_indir, ring_index[0]), 1300 rxfh.indir, rxfh.indir_size * sizeof(*rxfh.indir))) 1301 ret = -EFAULT; 1302 1303 out: 1304 kfree(rxfh.indir); 1305 return ret; 1306 } 1307 1308 static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev, 1309 void __user *useraddr) 1310 { 1311 const struct ethtool_ops *ops = dev->ethtool_ops; 1312 struct ethtool_rxfh_param rxfh_dev = {}; 1313 struct netlink_ext_ack *extack = NULL; 1314 struct ethtool_rxnfc rx_rings; 1315 u32 user_size, i; 1316 int ret; 1317 u32 ringidx_offset = offsetof(struct ethtool_rxfh_indir, ring_index[0]); 1318 1319 if (!ops->get_rxfh_indir_size || !ops->set_rxfh || 1320 !ops->get_rxnfc) 1321 return -EOPNOTSUPP; 1322 1323 rxfh_dev.indir_size = ops->get_rxfh_indir_size(dev); 1324 if (rxfh_dev.indir_size == 0) 1325 return -EOPNOTSUPP; 1326 1327 if (copy_from_user(&user_size, 1328 useraddr + offsetof(struct ethtool_rxfh_indir, size), 1329 sizeof(user_size))) 1330 return -EFAULT; 1331 1332 if (user_size != 0 && user_size != rxfh_dev.indir_size) 1333 return -EINVAL; 1334 1335 rxfh_dev.indir = kcalloc(rxfh_dev.indir_size, 1336 sizeof(rxfh_dev.indir[0]), GFP_USER); 1337 if (!rxfh_dev.indir) 1338 return -ENOMEM; 1339 1340 rx_rings.cmd = ETHTOOL_GRXRINGS; 1341 ret = ops->get_rxnfc(dev, &rx_rings, NULL); 1342 if (ret) 1343 goto out; 1344 1345 if (user_size == 0) { 1346 u32 *indir = rxfh_dev.indir; 1347 1348 for (i = 0; i < rxfh_dev.indir_size; i++) 1349 indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data); 1350 } else { 1351 ret = ethtool_copy_validate_indir(rxfh_dev.indir, 1352 useraddr + ringidx_offset, 1353 &rx_rings, 1354 rxfh_dev.indir_size); 1355 if (ret) 1356 goto out; 1357 } 1358 1359 rxfh_dev.hfunc = ETH_RSS_HASH_NO_CHANGE; 1360 ret = ops->set_rxfh(dev, &rxfh_dev, extack); 1361 if (ret) 1362 goto out; 1363 1364 /* indicate whether rxfh was set to default */ 1365 if (user_size == 0) 1366 dev->priv_flags &= ~IFF_RXFH_CONFIGURED; 1367 else 1368 dev->priv_flags |= IFF_RXFH_CONFIGURED; 1369 1370 out: 1371 kfree(rxfh_dev.indir); 1372 return ret; 1373 } 1374 1375 static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev, 1376 void __user *useraddr) 1377 { 1378 const struct ethtool_ops *ops = dev->ethtool_ops; 1379 struct ethtool_rxfh_param rxfh_dev = {}; 1380 u32 user_indir_size, user_key_size; 1381 struct ethtool_rxfh_context *ctx; 1382 struct ethtool_rxfh rxfh; 1383 u32 indir_bytes; 1384 u8 *rss_config; 1385 u32 total_size; 1386 int ret; 1387 1388 if (!ops->get_rxfh) 1389 return -EOPNOTSUPP; 1390 1391 if (ops->get_rxfh_indir_size) 1392 rxfh_dev.indir_size = ops->get_rxfh_indir_size(dev); 1393 if (ops->get_rxfh_key_size) 1394 rxfh_dev.key_size = ops->get_rxfh_key_size(dev); 1395 1396 if (copy_from_user(&rxfh, useraddr, sizeof(rxfh))) 1397 return -EFAULT; 1398 user_indir_size = rxfh.indir_size; 1399 user_key_size = rxfh.key_size; 1400 1401 /* Check that reserved fields are 0 for now */ 1402 if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd32) 1403 return -EINVAL; 1404 /* Most drivers don't handle rss_context, check it's 0 as well */ 1405 if (rxfh.rss_context && !(ops->cap_rss_ctx_supported || 1406 ops->create_rxfh_context)) 1407 return -EOPNOTSUPP; 1408 1409 rxfh.indir_size = rxfh_dev.indir_size; 1410 rxfh.key_size = rxfh_dev.key_size; 1411 if (copy_to_user(useraddr, &rxfh, sizeof(rxfh))) 1412 return -EFAULT; 1413 1414 if ((user_indir_size && user_indir_size != rxfh_dev.indir_size) || 1415 (user_key_size && user_key_size != rxfh_dev.key_size)) 1416 return -EINVAL; 1417 1418 indir_bytes = user_indir_size * sizeof(rxfh_dev.indir[0]); 1419 total_size = indir_bytes + user_key_size; 1420 rss_config = kzalloc(total_size, GFP_USER); 1421 if (!rss_config) 1422 return -ENOMEM; 1423 1424 if (user_indir_size) 1425 rxfh_dev.indir = (u32 *)rss_config; 1426 1427 if (user_key_size) 1428 rxfh_dev.key = rss_config + indir_bytes; 1429 1430 if (rxfh.rss_context) { 1431 ctx = xa_load(&dev->ethtool->rss_ctx, rxfh.rss_context); 1432 if (!ctx) { 1433 ret = -ENOENT; 1434 goto out; 1435 } 1436 if (rxfh_dev.indir) 1437 memcpy(rxfh_dev.indir, ethtool_rxfh_context_indir(ctx), 1438 indir_bytes); 1439 if (!ops->rxfh_per_ctx_key) { 1440 rxfh_dev.key_size = 0; 1441 } else { 1442 if (rxfh_dev.key) 1443 memcpy(rxfh_dev.key, 1444 ethtool_rxfh_context_key(ctx), 1445 user_key_size); 1446 rxfh_dev.hfunc = ctx->hfunc; 1447 } 1448 rxfh_dev.input_xfrm = ctx->input_xfrm; 1449 ret = 0; 1450 } else { 1451 ret = dev->ethtool_ops->get_rxfh(dev, &rxfh_dev); 1452 if (ret) 1453 goto out; 1454 } 1455 1456 if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, hfunc), 1457 &rxfh_dev.hfunc, sizeof(rxfh.hfunc))) { 1458 ret = -EFAULT; 1459 } else if (copy_to_user(useraddr + 1460 offsetof(struct ethtool_rxfh, input_xfrm), 1461 &rxfh_dev.input_xfrm, 1462 sizeof(rxfh.input_xfrm))) { 1463 ret = -EFAULT; 1464 } else if (copy_to_user(useraddr + 1465 offsetof(struct ethtool_rxfh, key_size), 1466 &rxfh_dev.key_size, 1467 sizeof(rxfh.key_size))) { 1468 ret = -EFAULT; 1469 } else if (copy_to_user(useraddr + 1470 offsetof(struct ethtool_rxfh, rss_config[0]), 1471 rss_config, total_size)) { 1472 ret = -EFAULT; 1473 } 1474 out: 1475 kfree(rss_config); 1476 1477 return ret; 1478 } 1479 1480 static struct ethtool_rxfh_context * 1481 ethtool_rxfh_ctx_alloc(const struct ethtool_ops *ops, 1482 u32 indir_size, u32 key_size) 1483 { 1484 size_t indir_bytes, flex_len, key_off, size; 1485 struct ethtool_rxfh_context *ctx; 1486 u32 priv_bytes, indir_max; 1487 u16 key_max; 1488 1489 key_max = max(key_size, ops->rxfh_key_space); 1490 indir_max = max(indir_size, ops->rxfh_indir_space); 1491 1492 priv_bytes = ALIGN(ops->rxfh_priv_size, sizeof(u32)); 1493 indir_bytes = array_size(indir_max, sizeof(u32)); 1494 1495 key_off = size_add(priv_bytes, indir_bytes); 1496 flex_len = size_add(key_off, key_max); 1497 size = struct_size_t(struct ethtool_rxfh_context, data, flex_len); 1498 1499 ctx = kzalloc(size, GFP_KERNEL_ACCOUNT); 1500 if (!ctx) 1501 return NULL; 1502 1503 ctx->indir_size = indir_size; 1504 ctx->key_size = key_size; 1505 ctx->key_off = key_off; 1506 ctx->priv_size = ops->rxfh_priv_size; 1507 1508 ctx->hfunc = ETH_RSS_HASH_NO_CHANGE; 1509 ctx->input_xfrm = RXH_XFRM_NO_CHANGE; 1510 1511 return ctx; 1512 } 1513 1514 static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, 1515 void __user *useraddr) 1516 { 1517 u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]); 1518 const struct ethtool_ops *ops = dev->ethtool_ops; 1519 u32 dev_indir_size = 0, dev_key_size = 0, i; 1520 u32 user_indir_len = 0, indir_bytes = 0; 1521 struct ethtool_rxfh_param rxfh_dev = {}; 1522 struct ethtool_rxfh_context *ctx = NULL; 1523 struct netlink_ext_ack *extack = NULL; 1524 struct ethtool_rxnfc rx_rings; 1525 struct ethtool_rxfh rxfh; 1526 bool locked = false; /* dev->ethtool->rss_lock taken */ 1527 bool create = false; 1528 u8 *rss_config; 1529 int ret; 1530 1531 if ((!ops->get_rxnfc && !ops->get_rxfh_fields) || !ops->set_rxfh) 1532 return -EOPNOTSUPP; 1533 1534 if (ops->get_rxfh_indir_size) 1535 dev_indir_size = ops->get_rxfh_indir_size(dev); 1536 if (ops->get_rxfh_key_size) 1537 dev_key_size = ops->get_rxfh_key_size(dev); 1538 1539 if (copy_from_user(&rxfh, useraddr, sizeof(rxfh))) 1540 return -EFAULT; 1541 1542 /* Check that reserved fields are 0 for now */ 1543 if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd32) 1544 return -EINVAL; 1545 /* Most drivers don't handle rss_context, check it's 0 as well */ 1546 if (rxfh.rss_context && !(ops->cap_rss_ctx_supported || 1547 ops->create_rxfh_context)) 1548 return -EOPNOTSUPP; 1549 /* Check input data transformation capabilities */ 1550 if (rxfh.input_xfrm && rxfh.input_xfrm != RXH_XFRM_SYM_XOR && 1551 rxfh.input_xfrm != RXH_XFRM_SYM_OR_XOR && 1552 rxfh.input_xfrm != RXH_XFRM_NO_CHANGE) 1553 return -EINVAL; 1554 if (rxfh.input_xfrm != RXH_XFRM_NO_CHANGE && 1555 rxfh.input_xfrm & ~ops->supported_input_xfrm) 1556 return -EOPNOTSUPP; 1557 create = rxfh.rss_context == ETH_RXFH_CONTEXT_ALLOC; 1558 1559 if ((rxfh.indir_size && 1560 rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE && 1561 rxfh.indir_size != dev_indir_size) || 1562 (rxfh.key_size && rxfh.key_size != dev_key_size)) 1563 return -EINVAL; 1564 1565 /* Must request at least one change: indir size, hash key, function 1566 * or input transformation. 1567 * There's no need for any of it in case of context creation. 1568 */ 1569 if (!create && 1570 (rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE && 1571 rxfh.key_size == 0 && rxfh.hfunc == ETH_RSS_HASH_NO_CHANGE && 1572 rxfh.input_xfrm == RXH_XFRM_NO_CHANGE)) 1573 return -EINVAL; 1574 1575 ret = ethtool_check_flow_types(dev, rxfh.input_xfrm); 1576 if (ret) 1577 return ret; 1578 1579 indir_bytes = dev_indir_size * sizeof(rxfh_dev.indir[0]); 1580 1581 /* Check settings which may be global rather than per RSS-context */ 1582 if (rxfh.rss_context && !ops->rxfh_per_ctx_key) 1583 if (rxfh.key_size || 1584 (rxfh.hfunc && rxfh.hfunc != ETH_RSS_HASH_NO_CHANGE) || 1585 (rxfh.input_xfrm && rxfh.input_xfrm != RXH_XFRM_NO_CHANGE)) 1586 return -EOPNOTSUPP; 1587 1588 rss_config = kzalloc(indir_bytes + dev_key_size, GFP_USER); 1589 if (!rss_config) 1590 return -ENOMEM; 1591 1592 rx_rings.cmd = ETHTOOL_GRXRINGS; 1593 ret = ops->get_rxnfc(dev, &rx_rings, NULL); 1594 if (ret) 1595 goto out; 1596 1597 /* rxfh.indir_size == 0 means reset the indir table to default (master 1598 * context) or delete the context (other RSS contexts). 1599 * rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE means leave it unchanged. 1600 */ 1601 if (rxfh.indir_size && 1602 rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) { 1603 user_indir_len = indir_bytes; 1604 rxfh_dev.indir = (u32 *)rss_config; 1605 rxfh_dev.indir_size = dev_indir_size; 1606 ret = ethtool_copy_validate_indir(rxfh_dev.indir, 1607 useraddr + rss_cfg_offset, 1608 &rx_rings, 1609 rxfh.indir_size); 1610 if (ret) 1611 goto out; 1612 } else if (rxfh.indir_size == 0) { 1613 if (rxfh.rss_context == 0) { 1614 u32 *indir; 1615 1616 rxfh_dev.indir = (u32 *)rss_config; 1617 rxfh_dev.indir_size = dev_indir_size; 1618 indir = rxfh_dev.indir; 1619 for (i = 0; i < dev_indir_size; i++) 1620 indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data); 1621 } else { 1622 rxfh_dev.rss_delete = true; 1623 } 1624 } 1625 1626 if (rxfh.key_size) { 1627 rxfh_dev.key_size = dev_key_size; 1628 rxfh_dev.key = rss_config + indir_bytes; 1629 if (copy_from_user(rxfh_dev.key, 1630 useraddr + rss_cfg_offset + user_indir_len, 1631 rxfh.key_size)) { 1632 ret = -EFAULT; 1633 goto out; 1634 } 1635 } 1636 1637 if (rxfh.rss_context) { 1638 mutex_lock(&dev->ethtool->rss_lock); 1639 locked = true; 1640 } 1641 1642 if (rxfh.rss_context && rxfh_dev.rss_delete) { 1643 ret = ethtool_check_rss_ctx_busy(dev, rxfh.rss_context); 1644 if (ret) 1645 goto out; 1646 } 1647 1648 if (create) { 1649 if (rxfh_dev.rss_delete) { 1650 ret = -EINVAL; 1651 goto out; 1652 } 1653 ctx = ethtool_rxfh_ctx_alloc(ops, dev_indir_size, dev_key_size); 1654 if (!ctx) { 1655 ret = -ENOMEM; 1656 goto out; 1657 } 1658 1659 if (ops->create_rxfh_context) { 1660 u32 limit = ops->rxfh_max_num_contexts ?: U32_MAX; 1661 u32 ctx_id; 1662 1663 /* driver uses new API, core allocates ID */ 1664 ret = xa_alloc(&dev->ethtool->rss_ctx, &ctx_id, ctx, 1665 XA_LIMIT(1, limit - 1), 1666 GFP_KERNEL_ACCOUNT); 1667 if (ret < 0) { 1668 kfree(ctx); 1669 goto out; 1670 } 1671 WARN_ON(!ctx_id); /* can't happen */ 1672 rxfh.rss_context = ctx_id; 1673 } 1674 } else if (rxfh.rss_context) { 1675 ctx = xa_load(&dev->ethtool->rss_ctx, rxfh.rss_context); 1676 if (!ctx) { 1677 ret = -ENOENT; 1678 goto out; 1679 } 1680 } 1681 rxfh_dev.hfunc = rxfh.hfunc; 1682 rxfh_dev.rss_context = rxfh.rss_context; 1683 rxfh_dev.input_xfrm = rxfh.input_xfrm; 1684 1685 if (rxfh.rss_context && ops->create_rxfh_context) { 1686 if (create) { 1687 ret = ops->create_rxfh_context(dev, ctx, &rxfh_dev, 1688 extack); 1689 /* Make sure driver populates defaults */ 1690 WARN_ON_ONCE(!ret && !rxfh_dev.key && 1691 ops->rxfh_per_ctx_key && 1692 !memchr_inv(ethtool_rxfh_context_key(ctx), 1693 0, ctx->key_size)); 1694 } else if (rxfh_dev.rss_delete) { 1695 ret = ops->remove_rxfh_context(dev, ctx, 1696 rxfh.rss_context, 1697 extack); 1698 } else { 1699 ret = ops->modify_rxfh_context(dev, ctx, &rxfh_dev, 1700 extack); 1701 } 1702 } else { 1703 ret = ops->set_rxfh(dev, &rxfh_dev, extack); 1704 } 1705 if (ret) { 1706 if (create) { 1707 /* failed to create, free our new tracking entry */ 1708 if (ops->create_rxfh_context) 1709 xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context); 1710 kfree(ctx); 1711 } 1712 goto out; 1713 } 1714 1715 if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, rss_context), 1716 &rxfh_dev.rss_context, sizeof(rxfh_dev.rss_context))) 1717 ret = -EFAULT; 1718 1719 if (!rxfh_dev.rss_context) { 1720 /* indicate whether rxfh was set to default */ 1721 if (rxfh.indir_size == 0) 1722 dev->priv_flags &= ~IFF_RXFH_CONFIGURED; 1723 else if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) 1724 dev->priv_flags |= IFF_RXFH_CONFIGURED; 1725 } 1726 /* Update rss_ctx tracking */ 1727 if (create && !ops->create_rxfh_context) { 1728 /* driver uses old API, it chose context ID */ 1729 if (WARN_ON(xa_load(&dev->ethtool->rss_ctx, rxfh_dev.rss_context))) { 1730 /* context ID reused, our tracking is screwed */ 1731 kfree(ctx); 1732 goto out; 1733 } 1734 /* Allocate the exact ID the driver gave us */ 1735 if (xa_is_err(xa_store(&dev->ethtool->rss_ctx, rxfh_dev.rss_context, 1736 ctx, GFP_KERNEL))) { 1737 kfree(ctx); 1738 goto out; 1739 } 1740 1741 /* Fetch the defaults for the old API, in the new API drivers 1742 * should write defaults into ctx themselves. 1743 */ 1744 rxfh_dev.indir = (u32 *)rss_config; 1745 rxfh_dev.indir_size = dev_indir_size; 1746 1747 rxfh_dev.key = rss_config + indir_bytes; 1748 rxfh_dev.key_size = dev_key_size; 1749 1750 ret = ops->get_rxfh(dev, &rxfh_dev); 1751 if (WARN_ON(ret)) { 1752 xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context); 1753 kfree(ctx); 1754 goto out; 1755 } 1756 } 1757 if (rxfh_dev.rss_delete) { 1758 WARN_ON(xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context) != ctx); 1759 kfree(ctx); 1760 } else if (ctx) { 1761 if (rxfh_dev.indir) { 1762 for (i = 0; i < dev_indir_size; i++) 1763 ethtool_rxfh_context_indir(ctx)[i] = rxfh_dev.indir[i]; 1764 ctx->indir_configured = 1765 rxfh.indir_size && 1766 rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE; 1767 } 1768 if (rxfh_dev.key) { 1769 memcpy(ethtool_rxfh_context_key(ctx), rxfh_dev.key, 1770 dev_key_size); 1771 ctx->key_configured = !!rxfh.key_size; 1772 } 1773 if (rxfh_dev.hfunc != ETH_RSS_HASH_NO_CHANGE) 1774 ctx->hfunc = rxfh_dev.hfunc; 1775 if (rxfh_dev.input_xfrm != RXH_XFRM_NO_CHANGE) 1776 ctx->input_xfrm = rxfh_dev.input_xfrm; 1777 } 1778 1779 out: 1780 if (locked) 1781 mutex_unlock(&dev->ethtool->rss_lock); 1782 kfree(rss_config); 1783 return ret; 1784 } 1785 1786 static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) 1787 { 1788 struct ethtool_regs regs; 1789 const struct ethtool_ops *ops = dev->ethtool_ops; 1790 void *regbuf; 1791 int reglen, ret; 1792 1793 if (!ops->get_regs || !ops->get_regs_len) 1794 return -EOPNOTSUPP; 1795 1796 if (copy_from_user(®s, useraddr, sizeof(regs))) 1797 return -EFAULT; 1798 1799 reglen = ops->get_regs_len(dev); 1800 if (reglen <= 0) 1801 return reglen; 1802 1803 if (regs.len > reglen) 1804 regs.len = reglen; 1805 1806 regbuf = vzalloc(reglen); 1807 if (!regbuf) 1808 return -ENOMEM; 1809 1810 if (regs.len < reglen) 1811 reglen = regs.len; 1812 1813 ops->get_regs(dev, ®s, regbuf); 1814 1815 ret = -EFAULT; 1816 if (copy_to_user(useraddr, ®s, sizeof(regs))) 1817 goto out; 1818 useraddr += offsetof(struct ethtool_regs, data); 1819 if (copy_to_user(useraddr, regbuf, reglen)) 1820 goto out; 1821 ret = 0; 1822 1823 out: 1824 vfree(regbuf); 1825 return ret; 1826 } 1827 1828 static int ethtool_reset(struct net_device *dev, char __user *useraddr) 1829 { 1830 struct ethtool_value reset; 1831 int ret; 1832 1833 if (!dev->ethtool_ops->reset) 1834 return -EOPNOTSUPP; 1835 1836 if (dev->ethtool->module_fw_flash_in_progress) 1837 return -EBUSY; 1838 1839 if (copy_from_user(&reset, useraddr, sizeof(reset))) 1840 return -EFAULT; 1841 1842 ret = dev->ethtool_ops->reset(dev, &reset.data); 1843 if (ret) 1844 return ret; 1845 1846 if (copy_to_user(useraddr, &reset, sizeof(reset))) 1847 return -EFAULT; 1848 return 0; 1849 } 1850 1851 static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) 1852 { 1853 struct ethtool_wolinfo wol; 1854 1855 if (!dev->ethtool_ops->get_wol) 1856 return -EOPNOTSUPP; 1857 1858 memset(&wol, 0, sizeof(struct ethtool_wolinfo)); 1859 wol.cmd = ETHTOOL_GWOL; 1860 dev->ethtool_ops->get_wol(dev, &wol); 1861 1862 if (copy_to_user(useraddr, &wol, sizeof(wol))) 1863 return -EFAULT; 1864 return 0; 1865 } 1866 1867 static int ethtool_set_wol(struct net_device *dev, char __user *useraddr) 1868 { 1869 struct ethtool_wolinfo wol, cur_wol; 1870 int ret; 1871 1872 if (!dev->ethtool_ops->get_wol || !dev->ethtool_ops->set_wol) 1873 return -EOPNOTSUPP; 1874 1875 memset(&cur_wol, 0, sizeof(struct ethtool_wolinfo)); 1876 cur_wol.cmd = ETHTOOL_GWOL; 1877 dev->ethtool_ops->get_wol(dev, &cur_wol); 1878 1879 if (copy_from_user(&wol, useraddr, sizeof(wol))) 1880 return -EFAULT; 1881 1882 if (wol.wolopts & ~cur_wol.supported) 1883 return -EINVAL; 1884 1885 if (wol.wolopts == cur_wol.wolopts && 1886 !memcmp(wol.sopass, cur_wol.sopass, sizeof(wol.sopass))) 1887 return 0; 1888 1889 ret = dev->ethtool_ops->set_wol(dev, &wol); 1890 if (ret) 1891 return ret; 1892 1893 dev->ethtool->wol_enabled = !!wol.wolopts; 1894 ethtool_notify(dev, ETHTOOL_MSG_WOL_NTF, NULL); 1895 1896 return 0; 1897 } 1898 1899 static void eee_to_keee(struct ethtool_keee *keee, 1900 const struct ethtool_eee *eee) 1901 { 1902 memset(keee, 0, sizeof(*keee)); 1903 1904 keee->eee_enabled = eee->eee_enabled; 1905 keee->tx_lpi_enabled = eee->tx_lpi_enabled; 1906 keee->tx_lpi_timer = eee->tx_lpi_timer; 1907 1908 ethtool_convert_legacy_u32_to_link_mode(keee->advertised, 1909 eee->advertised); 1910 } 1911 1912 static void keee_to_eee(struct ethtool_eee *eee, 1913 const struct ethtool_keee *keee) 1914 { 1915 bool overflow; 1916 1917 memset(eee, 0, sizeof(*eee)); 1918 1919 eee->eee_active = keee->eee_active; 1920 eee->eee_enabled = keee->eee_enabled; 1921 eee->tx_lpi_enabled = keee->tx_lpi_enabled; 1922 eee->tx_lpi_timer = keee->tx_lpi_timer; 1923 1924 overflow = !ethtool_convert_link_mode_to_legacy_u32(&eee->supported, 1925 keee->supported); 1926 ethtool_convert_link_mode_to_legacy_u32(&eee->advertised, 1927 keee->advertised); 1928 ethtool_convert_link_mode_to_legacy_u32(&eee->lp_advertised, 1929 keee->lp_advertised); 1930 if (overflow) 1931 pr_warn("Ethtool ioctl interface doesn't support passing EEE linkmodes beyond bit 32\n"); 1932 } 1933 1934 static int ethtool_get_eee(struct net_device *dev, char __user *useraddr) 1935 { 1936 struct ethtool_keee keee; 1937 struct ethtool_eee eee; 1938 int rc; 1939 1940 if (!dev->ethtool_ops->get_eee) 1941 return -EOPNOTSUPP; 1942 1943 memset(&keee, 0, sizeof(keee)); 1944 rc = dev->ethtool_ops->get_eee(dev, &keee); 1945 if (rc) 1946 return rc; 1947 1948 keee_to_eee(&eee, &keee); 1949 if (copy_to_user(useraddr, &eee, sizeof(eee))) 1950 return -EFAULT; 1951 1952 return 0; 1953 } 1954 1955 static int ethtool_set_eee(struct net_device *dev, char __user *useraddr) 1956 { 1957 struct ethtool_keee keee; 1958 struct ethtool_eee eee; 1959 int ret; 1960 1961 if (!dev->ethtool_ops->set_eee) 1962 return -EOPNOTSUPP; 1963 1964 if (copy_from_user(&eee, useraddr, sizeof(eee))) 1965 return -EFAULT; 1966 1967 eee_to_keee(&keee, &eee); 1968 ret = dev->ethtool_ops->set_eee(dev, &keee); 1969 if (!ret) 1970 ethtool_notify(dev, ETHTOOL_MSG_EEE_NTF, NULL); 1971 return ret; 1972 } 1973 1974 static int ethtool_nway_reset(struct net_device *dev) 1975 { 1976 if (!dev->ethtool_ops->nway_reset) 1977 return -EOPNOTSUPP; 1978 1979 return dev->ethtool_ops->nway_reset(dev); 1980 } 1981 1982 static int ethtool_get_link(struct net_device *dev, char __user *useraddr) 1983 { 1984 struct ethtool_value edata = { .cmd = ETHTOOL_GLINK }; 1985 int link = __ethtool_get_link(dev); 1986 1987 if (link < 0) 1988 return link; 1989 1990 edata.data = link; 1991 if (copy_to_user(useraddr, &edata, sizeof(edata))) 1992 return -EFAULT; 1993 return 0; 1994 } 1995 1996 static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr, 1997 int (*getter)(struct net_device *, 1998 struct ethtool_eeprom *, u8 *), 1999 u32 total_len) 2000 { 2001 struct ethtool_eeprom eeprom; 2002 void __user *userbuf = useraddr + sizeof(eeprom); 2003 u32 bytes_remaining; 2004 u8 *data; 2005 int ret = 0; 2006 2007 if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) 2008 return -EFAULT; 2009 2010 /* Check for wrap and zero */ 2011 if (eeprom.offset + eeprom.len <= eeprom.offset) 2012 return -EINVAL; 2013 2014 /* Check for exceeding total eeprom len */ 2015 if (eeprom.offset + eeprom.len > total_len) 2016 return -EINVAL; 2017 2018 data = kzalloc(PAGE_SIZE, GFP_USER); 2019 if (!data) 2020 return -ENOMEM; 2021 2022 bytes_remaining = eeprom.len; 2023 while (bytes_remaining > 0) { 2024 eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE); 2025 2026 ret = getter(dev, &eeprom, data); 2027 if (ret) 2028 break; 2029 if (!eeprom.len) { 2030 ret = -EIO; 2031 break; 2032 } 2033 if (copy_to_user(userbuf, data, eeprom.len)) { 2034 ret = -EFAULT; 2035 break; 2036 } 2037 userbuf += eeprom.len; 2038 eeprom.offset += eeprom.len; 2039 bytes_remaining -= eeprom.len; 2040 } 2041 2042 eeprom.len = userbuf - (useraddr + sizeof(eeprom)); 2043 eeprom.offset -= eeprom.len; 2044 if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) 2045 ret = -EFAULT; 2046 2047 kfree(data); 2048 return ret; 2049 } 2050 2051 static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) 2052 { 2053 const struct ethtool_ops *ops = dev->ethtool_ops; 2054 2055 if (!ops->get_eeprom || !ops->get_eeprom_len || 2056 !ops->get_eeprom_len(dev)) 2057 return -EOPNOTSUPP; 2058 2059 return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom, 2060 ops->get_eeprom_len(dev)); 2061 } 2062 2063 static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) 2064 { 2065 struct ethtool_eeprom eeprom; 2066 const struct ethtool_ops *ops = dev->ethtool_ops; 2067 void __user *userbuf = useraddr + sizeof(eeprom); 2068 u32 bytes_remaining; 2069 u8 *data; 2070 int ret = 0; 2071 2072 if (!ops->set_eeprom || !ops->get_eeprom_len || 2073 !ops->get_eeprom_len(dev)) 2074 return -EOPNOTSUPP; 2075 2076 if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) 2077 return -EFAULT; 2078 2079 /* Check for wrap and zero */ 2080 if (eeprom.offset + eeprom.len <= eeprom.offset) 2081 return -EINVAL; 2082 2083 /* Check for exceeding total eeprom len */ 2084 if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) 2085 return -EINVAL; 2086 2087 data = kzalloc(PAGE_SIZE, GFP_USER); 2088 if (!data) 2089 return -ENOMEM; 2090 2091 bytes_remaining = eeprom.len; 2092 while (bytes_remaining > 0) { 2093 eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE); 2094 2095 if (copy_from_user(data, userbuf, eeprom.len)) { 2096 ret = -EFAULT; 2097 break; 2098 } 2099 ret = ops->set_eeprom(dev, &eeprom, data); 2100 if (ret) 2101 break; 2102 userbuf += eeprom.len; 2103 eeprom.offset += eeprom.len; 2104 bytes_remaining -= eeprom.len; 2105 } 2106 2107 kfree(data); 2108 return ret; 2109 } 2110 2111 static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, 2112 void __user *useraddr) 2113 { 2114 struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; 2115 struct kernel_ethtool_coalesce kernel_coalesce = {}; 2116 int ret; 2117 2118 if (!dev->ethtool_ops->get_coalesce) 2119 return -EOPNOTSUPP; 2120 2121 ret = dev->ethtool_ops->get_coalesce(dev, &coalesce, &kernel_coalesce, 2122 NULL); 2123 if (ret) 2124 return ret; 2125 2126 if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) 2127 return -EFAULT; 2128 return 0; 2129 } 2130 2131 static bool 2132 ethtool_set_coalesce_supported(struct net_device *dev, 2133 struct ethtool_coalesce *coalesce) 2134 { 2135 u32 supported_params = dev->ethtool_ops->supported_coalesce_params; 2136 u32 nonzero_params = 0; 2137 2138 if (coalesce->rx_coalesce_usecs) 2139 nonzero_params |= ETHTOOL_COALESCE_RX_USECS; 2140 if (coalesce->rx_max_coalesced_frames) 2141 nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES; 2142 if (coalesce->rx_coalesce_usecs_irq) 2143 nonzero_params |= ETHTOOL_COALESCE_RX_USECS_IRQ; 2144 if (coalesce->rx_max_coalesced_frames_irq) 2145 nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ; 2146 if (coalesce->tx_coalesce_usecs) 2147 nonzero_params |= ETHTOOL_COALESCE_TX_USECS; 2148 if (coalesce->tx_max_coalesced_frames) 2149 nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES; 2150 if (coalesce->tx_coalesce_usecs_irq) 2151 nonzero_params |= ETHTOOL_COALESCE_TX_USECS_IRQ; 2152 if (coalesce->tx_max_coalesced_frames_irq) 2153 nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ; 2154 if (coalesce->stats_block_coalesce_usecs) 2155 nonzero_params |= ETHTOOL_COALESCE_STATS_BLOCK_USECS; 2156 if (coalesce->use_adaptive_rx_coalesce) 2157 nonzero_params |= ETHTOOL_COALESCE_USE_ADAPTIVE_RX; 2158 if (coalesce->use_adaptive_tx_coalesce) 2159 nonzero_params |= ETHTOOL_COALESCE_USE_ADAPTIVE_TX; 2160 if (coalesce->pkt_rate_low) 2161 nonzero_params |= ETHTOOL_COALESCE_PKT_RATE_LOW; 2162 if (coalesce->rx_coalesce_usecs_low) 2163 nonzero_params |= ETHTOOL_COALESCE_RX_USECS_LOW; 2164 if (coalesce->rx_max_coalesced_frames_low) 2165 nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW; 2166 if (coalesce->tx_coalesce_usecs_low) 2167 nonzero_params |= ETHTOOL_COALESCE_TX_USECS_LOW; 2168 if (coalesce->tx_max_coalesced_frames_low) 2169 nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW; 2170 if (coalesce->pkt_rate_high) 2171 nonzero_params |= ETHTOOL_COALESCE_PKT_RATE_HIGH; 2172 if (coalesce->rx_coalesce_usecs_high) 2173 nonzero_params |= ETHTOOL_COALESCE_RX_USECS_HIGH; 2174 if (coalesce->rx_max_coalesced_frames_high) 2175 nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH; 2176 if (coalesce->tx_coalesce_usecs_high) 2177 nonzero_params |= ETHTOOL_COALESCE_TX_USECS_HIGH; 2178 if (coalesce->tx_max_coalesced_frames_high) 2179 nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH; 2180 if (coalesce->rate_sample_interval) 2181 nonzero_params |= ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL; 2182 2183 return (supported_params & nonzero_params) == nonzero_params; 2184 } 2185 2186 static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, 2187 void __user *useraddr) 2188 { 2189 struct kernel_ethtool_coalesce kernel_coalesce = {}; 2190 struct ethtool_coalesce coalesce; 2191 int ret; 2192 2193 if (!dev->ethtool_ops->set_coalesce || !dev->ethtool_ops->get_coalesce) 2194 return -EOPNOTSUPP; 2195 2196 ret = dev->ethtool_ops->get_coalesce(dev, &coalesce, &kernel_coalesce, 2197 NULL); 2198 if (ret) 2199 return ret; 2200 2201 if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) 2202 return -EFAULT; 2203 2204 if (!ethtool_set_coalesce_supported(dev, &coalesce)) 2205 return -EOPNOTSUPP; 2206 2207 ret = dev->ethtool_ops->set_coalesce(dev, &coalesce, &kernel_coalesce, 2208 NULL); 2209 if (!ret) 2210 ethtool_notify(dev, ETHTOOL_MSG_COALESCE_NTF, NULL); 2211 return ret; 2212 } 2213 2214 static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) 2215 { 2216 struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM }; 2217 struct kernel_ethtool_ringparam kernel_ringparam = {}; 2218 2219 if (!dev->ethtool_ops->get_ringparam) 2220 return -EOPNOTSUPP; 2221 2222 dev->ethtool_ops->get_ringparam(dev, &ringparam, 2223 &kernel_ringparam, NULL); 2224 2225 if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) 2226 return -EFAULT; 2227 return 0; 2228 } 2229 2230 static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr) 2231 { 2232 struct kernel_ethtool_ringparam kernel_ringparam; 2233 struct ethtool_ringparam ringparam, max; 2234 int ret; 2235 2236 if (!dev->ethtool_ops->set_ringparam || !dev->ethtool_ops->get_ringparam) 2237 return -EOPNOTSUPP; 2238 2239 if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) 2240 return -EFAULT; 2241 2242 ethtool_ringparam_get_cfg(dev, &max, &kernel_ringparam, NULL); 2243 2244 /* ensure new ring parameters are within the maximums */ 2245 if (ringparam.rx_pending > max.rx_max_pending || 2246 ringparam.rx_mini_pending > max.rx_mini_max_pending || 2247 ringparam.rx_jumbo_pending > max.rx_jumbo_max_pending || 2248 ringparam.tx_pending > max.tx_max_pending) 2249 return -EINVAL; 2250 2251 ret = dev->ethtool_ops->set_ringparam(dev, &ringparam, 2252 &kernel_ringparam, NULL); 2253 if (!ret) 2254 ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL); 2255 return ret; 2256 } 2257 2258 static noinline_for_stack int ethtool_get_channels(struct net_device *dev, 2259 void __user *useraddr) 2260 { 2261 struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS }; 2262 2263 if (!dev->ethtool_ops->get_channels) 2264 return -EOPNOTSUPP; 2265 2266 dev->ethtool_ops->get_channels(dev, &channels); 2267 2268 if (copy_to_user(useraddr, &channels, sizeof(channels))) 2269 return -EFAULT; 2270 return 0; 2271 } 2272 2273 static noinline_for_stack int ethtool_set_channels(struct net_device *dev, 2274 void __user *useraddr) 2275 { 2276 struct ethtool_channels channels, curr = { .cmd = ETHTOOL_GCHANNELS }; 2277 u16 from_channel, to_channel; 2278 unsigned int i; 2279 int ret; 2280 2281 if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels) 2282 return -EOPNOTSUPP; 2283 2284 if (copy_from_user(&channels, useraddr, sizeof(channels))) 2285 return -EFAULT; 2286 2287 dev->ethtool_ops->get_channels(dev, &curr); 2288 2289 if (channels.rx_count == curr.rx_count && 2290 channels.tx_count == curr.tx_count && 2291 channels.combined_count == curr.combined_count && 2292 channels.other_count == curr.other_count) 2293 return 0; 2294 2295 /* ensure new counts are within the maximums */ 2296 if (channels.rx_count > curr.max_rx || 2297 channels.tx_count > curr.max_tx || 2298 channels.combined_count > curr.max_combined || 2299 channels.other_count > curr.max_other) 2300 return -EINVAL; 2301 2302 /* ensure there is at least one RX and one TX channel */ 2303 if (!channels.combined_count && 2304 (!channels.rx_count || !channels.tx_count)) 2305 return -EINVAL; 2306 2307 ret = ethtool_check_max_channel(dev, channels, NULL); 2308 if (ret) 2309 return ret; 2310 2311 /* Disabling channels, query zero-copy AF_XDP sockets */ 2312 from_channel = channels.combined_count + 2313 min(channels.rx_count, channels.tx_count); 2314 to_channel = curr.combined_count + max(curr.rx_count, curr.tx_count); 2315 for (i = from_channel; i < to_channel; i++) 2316 if (xsk_get_pool_from_qid(dev, i)) 2317 return -EINVAL; 2318 2319 ret = dev->ethtool_ops->set_channels(dev, &channels); 2320 if (!ret) 2321 ethtool_notify(dev, ETHTOOL_MSG_CHANNELS_NTF, NULL); 2322 return ret; 2323 } 2324 2325 static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr) 2326 { 2327 struct ethtool_pauseparam pauseparam = { .cmd = ETHTOOL_GPAUSEPARAM }; 2328 2329 if (!dev->ethtool_ops->get_pauseparam) 2330 return -EOPNOTSUPP; 2331 2332 dev->ethtool_ops->get_pauseparam(dev, &pauseparam); 2333 2334 if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) 2335 return -EFAULT; 2336 return 0; 2337 } 2338 2339 static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr) 2340 { 2341 struct ethtool_pauseparam pauseparam; 2342 int ret; 2343 2344 if (!dev->ethtool_ops->set_pauseparam) 2345 return -EOPNOTSUPP; 2346 2347 if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) 2348 return -EFAULT; 2349 2350 ret = dev->ethtool_ops->set_pauseparam(dev, &pauseparam); 2351 if (!ret) 2352 ethtool_notify(dev, ETHTOOL_MSG_PAUSE_NTF, NULL); 2353 return ret; 2354 } 2355 2356 static int ethtool_self_test(struct net_device *dev, char __user *useraddr) 2357 { 2358 struct ethtool_test test; 2359 const struct ethtool_ops *ops = dev->ethtool_ops; 2360 u64 *data; 2361 int ret, test_len; 2362 2363 if (!ops->self_test || !ops->get_sset_count) 2364 return -EOPNOTSUPP; 2365 2366 test_len = ops->get_sset_count(dev, ETH_SS_TEST); 2367 if (test_len < 0) 2368 return test_len; 2369 WARN_ON(test_len == 0); 2370 2371 if (copy_from_user(&test, useraddr, sizeof(test))) 2372 return -EFAULT; 2373 2374 test.len = test_len; 2375 data = kcalloc(test_len, sizeof(u64), GFP_USER); 2376 if (!data) 2377 return -ENOMEM; 2378 2379 netif_testing_on(dev); 2380 ops->self_test(dev, &test, data); 2381 netif_testing_off(dev); 2382 2383 ret = -EFAULT; 2384 if (copy_to_user(useraddr, &test, sizeof(test))) 2385 goto out; 2386 useraddr += sizeof(test); 2387 if (copy_to_user(useraddr, data, array_size(test.len, sizeof(u64)))) 2388 goto out; 2389 ret = 0; 2390 2391 out: 2392 kfree(data); 2393 return ret; 2394 } 2395 2396 static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) 2397 { 2398 struct ethtool_gstrings gstrings; 2399 u8 *data; 2400 int ret; 2401 2402 if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) 2403 return -EFAULT; 2404 2405 ret = __ethtool_get_sset_count(dev, gstrings.string_set); 2406 if (ret < 0) 2407 return ret; 2408 if (ret > S32_MAX / ETH_GSTRING_LEN) 2409 return -ENOMEM; 2410 WARN_ON_ONCE(!ret); 2411 2412 gstrings.len = ret; 2413 2414 if (gstrings.len) { 2415 data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN)); 2416 if (!data) 2417 return -ENOMEM; 2418 2419 __ethtool_get_strings(dev, gstrings.string_set, data); 2420 } else { 2421 data = NULL; 2422 } 2423 2424 ret = -EFAULT; 2425 if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) 2426 goto out; 2427 useraddr += sizeof(gstrings); 2428 if (gstrings.len && 2429 copy_to_user(useraddr, data, 2430 array_size(gstrings.len, ETH_GSTRING_LEN))) 2431 goto out; 2432 ret = 0; 2433 2434 out: 2435 vfree(data); 2436 return ret; 2437 } 2438 2439 __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...) 2440 { 2441 va_list args; 2442 2443 va_start(args, fmt); 2444 vsnprintf(*data, ETH_GSTRING_LEN, fmt, args); 2445 va_end(args); 2446 2447 *data += ETH_GSTRING_LEN; 2448 } 2449 EXPORT_SYMBOL(ethtool_sprintf); 2450 2451 void ethtool_puts(u8 **data, const char *str) 2452 { 2453 strscpy(*data, str, ETH_GSTRING_LEN); 2454 *data += ETH_GSTRING_LEN; 2455 } 2456 EXPORT_SYMBOL(ethtool_puts); 2457 2458 static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) 2459 { 2460 struct ethtool_value id; 2461 static bool busy; 2462 const struct ethtool_ops *ops = dev->ethtool_ops; 2463 netdevice_tracker dev_tracker; 2464 int rc; 2465 2466 if (!ops->set_phys_id) 2467 return -EOPNOTSUPP; 2468 2469 if (busy) 2470 return -EBUSY; 2471 2472 if (copy_from_user(&id, useraddr, sizeof(id))) 2473 return -EFAULT; 2474 2475 rc = ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE); 2476 if (rc < 0) 2477 return rc; 2478 2479 /* Drop the RTNL lock while waiting, but prevent reentry or 2480 * removal of the device. 2481 */ 2482 busy = true; 2483 netdev_hold(dev, &dev_tracker, GFP_KERNEL); 2484 netdev_unlock_ops(dev); 2485 rtnl_unlock(); 2486 2487 if (rc == 0) { 2488 /* Driver will handle this itself */ 2489 schedule_timeout_interruptible( 2490 id.data ? (id.data * HZ) : MAX_SCHEDULE_TIMEOUT); 2491 } else { 2492 /* Driver expects to be called at twice the frequency in rc */ 2493 int n = rc * 2, interval = HZ / n; 2494 u64 count = mul_u32_u32(n, id.data); 2495 u64 i = 0; 2496 2497 do { 2498 rtnl_lock(); 2499 netdev_lock_ops(dev); 2500 rc = ops->set_phys_id(dev, 2501 (i++ & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON); 2502 netdev_unlock_ops(dev); 2503 rtnl_unlock(); 2504 if (rc) 2505 break; 2506 schedule_timeout_interruptible(interval); 2507 } while (!signal_pending(current) && (!id.data || i < count)); 2508 } 2509 2510 rtnl_lock(); 2511 netdev_lock_ops(dev); 2512 netdev_put(dev, &dev_tracker); 2513 busy = false; 2514 2515 (void) ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE); 2516 return rc; 2517 } 2518 2519 static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) 2520 { 2521 struct ethtool_stats stats; 2522 const struct ethtool_ops *ops = dev->ethtool_ops; 2523 u64 *data; 2524 int ret, n_stats; 2525 2526 if (!ops->get_ethtool_stats || !ops->get_sset_count) 2527 return -EOPNOTSUPP; 2528 2529 n_stats = ops->get_sset_count(dev, ETH_SS_STATS); 2530 if (n_stats < 0) 2531 return n_stats; 2532 if (n_stats > S32_MAX / sizeof(u64)) 2533 return -ENOMEM; 2534 WARN_ON_ONCE(!n_stats); 2535 if (copy_from_user(&stats, useraddr, sizeof(stats))) 2536 return -EFAULT; 2537 2538 stats.n_stats = n_stats; 2539 2540 if (n_stats) { 2541 data = vzalloc(array_size(n_stats, sizeof(u64))); 2542 if (!data) 2543 return -ENOMEM; 2544 ops->get_ethtool_stats(dev, &stats, data); 2545 } else { 2546 data = NULL; 2547 } 2548 2549 ret = -EFAULT; 2550 if (copy_to_user(useraddr, &stats, sizeof(stats))) 2551 goto out; 2552 useraddr += sizeof(stats); 2553 if (n_stats && copy_to_user(useraddr, data, array_size(n_stats, sizeof(u64)))) 2554 goto out; 2555 ret = 0; 2556 2557 out: 2558 vfree(data); 2559 return ret; 2560 } 2561 2562 static int ethtool_vzalloc_stats_array(int n_stats, u64 **data) 2563 { 2564 if (n_stats < 0) 2565 return n_stats; 2566 if (n_stats > S32_MAX / sizeof(u64)) 2567 return -ENOMEM; 2568 if (WARN_ON_ONCE(!n_stats)) 2569 return -EOPNOTSUPP; 2570 2571 *data = vzalloc(array_size(n_stats, sizeof(u64))); 2572 if (!*data) 2573 return -ENOMEM; 2574 2575 return 0; 2576 } 2577 2578 static int ethtool_get_phy_stats_phydev(struct phy_device *phydev, 2579 struct ethtool_stats *stats, 2580 u64 **data) 2581 { 2582 const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; 2583 int n_stats, ret; 2584 2585 if (!phy_ops || !phy_ops->get_sset_count || !phy_ops->get_stats) 2586 return -EOPNOTSUPP; 2587 2588 n_stats = phy_ops->get_sset_count(phydev); 2589 2590 ret = ethtool_vzalloc_stats_array(n_stats, data); 2591 if (ret) 2592 return ret; 2593 2594 stats->n_stats = n_stats; 2595 return phy_ops->get_stats(phydev, stats, *data); 2596 } 2597 2598 static int ethtool_get_phy_stats_ethtool(struct net_device *dev, 2599 struct ethtool_stats *stats, 2600 u64 **data) 2601 { 2602 const struct ethtool_ops *ops = dev->ethtool_ops; 2603 int n_stats, ret; 2604 2605 if (!ops || !ops->get_sset_count || !ops->get_ethtool_phy_stats) 2606 return -EOPNOTSUPP; 2607 2608 n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS); 2609 2610 ret = ethtool_vzalloc_stats_array(n_stats, data); 2611 if (ret) 2612 return ret; 2613 2614 stats->n_stats = n_stats; 2615 ops->get_ethtool_phy_stats(dev, stats, *data); 2616 2617 return 0; 2618 } 2619 2620 static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) 2621 { 2622 struct phy_device *phydev = dev->phydev; 2623 struct ethtool_stats stats; 2624 u64 *data = NULL; 2625 int ret = -EOPNOTSUPP; 2626 2627 if (copy_from_user(&stats, useraddr, sizeof(stats))) 2628 return -EFAULT; 2629 2630 if (phydev) 2631 ret = ethtool_get_phy_stats_phydev(phydev, &stats, &data); 2632 2633 if (ret == -EOPNOTSUPP) 2634 ret = ethtool_get_phy_stats_ethtool(dev, &stats, &data); 2635 2636 if (ret) 2637 goto out; 2638 2639 if (copy_to_user(useraddr, &stats, sizeof(stats))) { 2640 ret = -EFAULT; 2641 goto out; 2642 } 2643 2644 useraddr += sizeof(stats); 2645 if (copy_to_user(useraddr, data, array_size(stats.n_stats, sizeof(u64)))) 2646 ret = -EFAULT; 2647 2648 out: 2649 vfree(data); 2650 return ret; 2651 } 2652 2653 static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr) 2654 { 2655 struct ethtool_perm_addr epaddr; 2656 2657 if (copy_from_user(&epaddr, useraddr, sizeof(epaddr))) 2658 return -EFAULT; 2659 2660 if (epaddr.size < dev->addr_len) 2661 return -ETOOSMALL; 2662 epaddr.size = dev->addr_len; 2663 2664 if (copy_to_user(useraddr, &epaddr, sizeof(epaddr))) 2665 return -EFAULT; 2666 useraddr += sizeof(epaddr); 2667 if (copy_to_user(useraddr, dev->perm_addr, epaddr.size)) 2668 return -EFAULT; 2669 return 0; 2670 } 2671 2672 static int ethtool_get_value(struct net_device *dev, char __user *useraddr, 2673 u32 cmd, u32 (*actor)(struct net_device *)) 2674 { 2675 struct ethtool_value edata = { .cmd = cmd }; 2676 2677 if (!actor) 2678 return -EOPNOTSUPP; 2679 2680 edata.data = actor(dev); 2681 2682 if (copy_to_user(useraddr, &edata, sizeof(edata))) 2683 return -EFAULT; 2684 return 0; 2685 } 2686 2687 static int ethtool_set_value_void(struct net_device *dev, char __user *useraddr, 2688 void (*actor)(struct net_device *, u32)) 2689 { 2690 struct ethtool_value edata; 2691 2692 if (!actor) 2693 return -EOPNOTSUPP; 2694 2695 if (copy_from_user(&edata, useraddr, sizeof(edata))) 2696 return -EFAULT; 2697 2698 actor(dev, edata.data); 2699 return 0; 2700 } 2701 2702 static int ethtool_set_value(struct net_device *dev, char __user *useraddr, 2703 int (*actor)(struct net_device *, u32)) 2704 { 2705 struct ethtool_value edata; 2706 2707 if (!actor) 2708 return -EOPNOTSUPP; 2709 2710 if (copy_from_user(&edata, useraddr, sizeof(edata))) 2711 return -EFAULT; 2712 2713 return actor(dev, edata.data); 2714 } 2715 2716 static int 2717 ethtool_flash_device(struct net_device *dev, struct ethtool_devlink_compat *req) 2718 { 2719 if (!dev->ethtool_ops->flash_device) { 2720 req->devlink = netdev_to_devlink_get(dev); 2721 return 0; 2722 } 2723 2724 return dev->ethtool_ops->flash_device(dev, &req->efl); 2725 } 2726 2727 static int ethtool_set_dump(struct net_device *dev, 2728 void __user *useraddr) 2729 { 2730 struct ethtool_dump dump; 2731 2732 if (!dev->ethtool_ops->set_dump) 2733 return -EOPNOTSUPP; 2734 2735 if (copy_from_user(&dump, useraddr, sizeof(dump))) 2736 return -EFAULT; 2737 2738 return dev->ethtool_ops->set_dump(dev, &dump); 2739 } 2740 2741 static int ethtool_get_dump_flag(struct net_device *dev, 2742 void __user *useraddr) 2743 { 2744 int ret; 2745 struct ethtool_dump dump; 2746 const struct ethtool_ops *ops = dev->ethtool_ops; 2747 2748 if (!ops->get_dump_flag) 2749 return -EOPNOTSUPP; 2750 2751 if (copy_from_user(&dump, useraddr, sizeof(dump))) 2752 return -EFAULT; 2753 2754 ret = ops->get_dump_flag(dev, &dump); 2755 if (ret) 2756 return ret; 2757 2758 if (copy_to_user(useraddr, &dump, sizeof(dump))) 2759 return -EFAULT; 2760 return 0; 2761 } 2762 2763 static int ethtool_get_dump_data(struct net_device *dev, 2764 void __user *useraddr) 2765 { 2766 int ret; 2767 __u32 len; 2768 struct ethtool_dump dump, tmp; 2769 const struct ethtool_ops *ops = dev->ethtool_ops; 2770 void *data = NULL; 2771 2772 if (!ops->get_dump_data || !ops->get_dump_flag) 2773 return -EOPNOTSUPP; 2774 2775 if (copy_from_user(&dump, useraddr, sizeof(dump))) 2776 return -EFAULT; 2777 2778 memset(&tmp, 0, sizeof(tmp)); 2779 tmp.cmd = ETHTOOL_GET_DUMP_FLAG; 2780 ret = ops->get_dump_flag(dev, &tmp); 2781 if (ret) 2782 return ret; 2783 2784 len = min(tmp.len, dump.len); 2785 if (!len) 2786 return -EFAULT; 2787 2788 /* Don't ever let the driver think there's more space available 2789 * than it requested with .get_dump_flag(). 2790 */ 2791 dump.len = len; 2792 2793 /* Always allocate enough space to hold the whole thing so that the 2794 * driver does not need to check the length and bother with partial 2795 * dumping. 2796 */ 2797 data = vzalloc(tmp.len); 2798 if (!data) 2799 return -ENOMEM; 2800 ret = ops->get_dump_data(dev, &dump, data); 2801 if (ret) 2802 goto out; 2803 2804 /* There are two sane possibilities: 2805 * 1. The driver's .get_dump_data() does not touch dump.len. 2806 * 2. Or it may set dump.len to how much it really writes, which 2807 * should be tmp.len (or len if it can do a partial dump). 2808 * In any case respond to userspace with the actual length of data 2809 * it's receiving. 2810 */ 2811 WARN_ON(dump.len != len && dump.len != tmp.len); 2812 dump.len = len; 2813 2814 if (copy_to_user(useraddr, &dump, sizeof(dump))) { 2815 ret = -EFAULT; 2816 goto out; 2817 } 2818 useraddr += offsetof(struct ethtool_dump, data); 2819 if (copy_to_user(useraddr, data, len)) 2820 ret = -EFAULT; 2821 out: 2822 vfree(data); 2823 return ret; 2824 } 2825 2826 static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr) 2827 { 2828 struct kernel_ethtool_ts_info kernel_info; 2829 struct ethtool_ts_info info = {}; 2830 int err; 2831 2832 err = __ethtool_get_ts_info(dev, &kernel_info); 2833 if (err) 2834 return err; 2835 2836 info.cmd = kernel_info.cmd; 2837 info.so_timestamping = kernel_info.so_timestamping; 2838 info.phc_index = kernel_info.phc_index; 2839 info.tx_types = kernel_info.tx_types; 2840 info.rx_filters = kernel_info.rx_filters; 2841 2842 if (copy_to_user(useraddr, &info, sizeof(info))) 2843 return -EFAULT; 2844 2845 return 0; 2846 } 2847 2848 int ethtool_get_module_info_call(struct net_device *dev, 2849 struct ethtool_modinfo *modinfo) 2850 { 2851 const struct ethtool_ops *ops = dev->ethtool_ops; 2852 struct phy_device *phydev = dev->phydev; 2853 2854 if (dev->ethtool->module_fw_flash_in_progress) 2855 return -EBUSY; 2856 2857 if (dev->sfp_bus) 2858 return sfp_get_module_info(dev->sfp_bus, modinfo); 2859 2860 if (phydev && phydev->drv && phydev->drv->module_info) 2861 return phydev->drv->module_info(phydev, modinfo); 2862 2863 if (ops->get_module_info) 2864 return ops->get_module_info(dev, modinfo); 2865 2866 return -EOPNOTSUPP; 2867 } 2868 2869 static int ethtool_get_module_info(struct net_device *dev, 2870 void __user *useraddr) 2871 { 2872 int ret; 2873 struct ethtool_modinfo modinfo; 2874 2875 if (copy_from_user(&modinfo, useraddr, sizeof(modinfo))) 2876 return -EFAULT; 2877 2878 ret = ethtool_get_module_info_call(dev, &modinfo); 2879 if (ret) 2880 return ret; 2881 2882 if (copy_to_user(useraddr, &modinfo, sizeof(modinfo))) 2883 return -EFAULT; 2884 2885 return 0; 2886 } 2887 2888 int ethtool_get_module_eeprom_call(struct net_device *dev, 2889 struct ethtool_eeprom *ee, u8 *data) 2890 { 2891 const struct ethtool_ops *ops = dev->ethtool_ops; 2892 struct phy_device *phydev = dev->phydev; 2893 2894 if (dev->ethtool->module_fw_flash_in_progress) 2895 return -EBUSY; 2896 2897 if (dev->sfp_bus) 2898 return sfp_get_module_eeprom(dev->sfp_bus, ee, data); 2899 2900 if (phydev && phydev->drv && phydev->drv->module_eeprom) 2901 return phydev->drv->module_eeprom(phydev, ee, data); 2902 2903 if (ops->get_module_eeprom) 2904 return ops->get_module_eeprom(dev, ee, data); 2905 2906 return -EOPNOTSUPP; 2907 } 2908 2909 static int ethtool_get_module_eeprom(struct net_device *dev, 2910 void __user *useraddr) 2911 { 2912 int ret; 2913 struct ethtool_modinfo modinfo; 2914 2915 ret = ethtool_get_module_info_call(dev, &modinfo); 2916 if (ret) 2917 return ret; 2918 2919 return ethtool_get_any_eeprom(dev, useraddr, 2920 ethtool_get_module_eeprom_call, 2921 modinfo.eeprom_len); 2922 } 2923 2924 static int ethtool_tunable_valid(const struct ethtool_tunable *tuna) 2925 { 2926 switch (tuna->id) { 2927 case ETHTOOL_RX_COPYBREAK: 2928 case ETHTOOL_TX_COPYBREAK: 2929 case ETHTOOL_TX_COPYBREAK_BUF_SIZE: 2930 if (tuna->len != sizeof(u32) || 2931 tuna->type_id != ETHTOOL_TUNABLE_U32) 2932 return -EINVAL; 2933 break; 2934 case ETHTOOL_PFC_PREVENTION_TOUT: 2935 if (tuna->len != sizeof(u16) || 2936 tuna->type_id != ETHTOOL_TUNABLE_U16) 2937 return -EINVAL; 2938 break; 2939 default: 2940 return -EINVAL; 2941 } 2942 2943 return 0; 2944 } 2945 2946 static int ethtool_get_tunable(struct net_device *dev, void __user *useraddr) 2947 { 2948 int ret; 2949 struct ethtool_tunable tuna; 2950 const struct ethtool_ops *ops = dev->ethtool_ops; 2951 void *data; 2952 2953 if (!ops->get_tunable) 2954 return -EOPNOTSUPP; 2955 if (copy_from_user(&tuna, useraddr, sizeof(tuna))) 2956 return -EFAULT; 2957 ret = ethtool_tunable_valid(&tuna); 2958 if (ret) 2959 return ret; 2960 data = kzalloc(tuna.len, GFP_USER); 2961 if (!data) 2962 return -ENOMEM; 2963 ret = ops->get_tunable(dev, &tuna, data); 2964 if (ret) 2965 goto out; 2966 useraddr += sizeof(tuna); 2967 ret = -EFAULT; 2968 if (copy_to_user(useraddr, data, tuna.len)) 2969 goto out; 2970 ret = 0; 2971 2972 out: 2973 kfree(data); 2974 return ret; 2975 } 2976 2977 static int ethtool_set_tunable(struct net_device *dev, void __user *useraddr) 2978 { 2979 int ret; 2980 struct ethtool_tunable tuna; 2981 const struct ethtool_ops *ops = dev->ethtool_ops; 2982 void *data; 2983 2984 if (!ops->set_tunable) 2985 return -EOPNOTSUPP; 2986 if (copy_from_user(&tuna, useraddr, sizeof(tuna))) 2987 return -EFAULT; 2988 ret = ethtool_tunable_valid(&tuna); 2989 if (ret) 2990 return ret; 2991 useraddr += sizeof(tuna); 2992 data = memdup_user(useraddr, tuna.len); 2993 if (IS_ERR(data)) 2994 return PTR_ERR(data); 2995 ret = ops->set_tunable(dev, &tuna, data); 2996 2997 kfree(data); 2998 return ret; 2999 } 3000 3001 static noinline_for_stack int 3002 ethtool_get_per_queue_coalesce(struct net_device *dev, 3003 void __user *useraddr, 3004 struct ethtool_per_queue_op *per_queue_opt) 3005 { 3006 u32 bit; 3007 int ret; 3008 DECLARE_BITMAP(queue_mask, MAX_NUM_QUEUE); 3009 3010 if (!dev->ethtool_ops->get_per_queue_coalesce) 3011 return -EOPNOTSUPP; 3012 3013 useraddr += sizeof(*per_queue_opt); 3014 3015 bitmap_from_arr32(queue_mask, per_queue_opt->queue_mask, 3016 MAX_NUM_QUEUE); 3017 3018 for_each_set_bit(bit, queue_mask, MAX_NUM_QUEUE) { 3019 struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; 3020 3021 ret = dev->ethtool_ops->get_per_queue_coalesce(dev, bit, &coalesce); 3022 if (ret != 0) 3023 return ret; 3024 if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) 3025 return -EFAULT; 3026 useraddr += sizeof(coalesce); 3027 } 3028 3029 return 0; 3030 } 3031 3032 static noinline_for_stack int 3033 ethtool_set_per_queue_coalesce(struct net_device *dev, 3034 void __user *useraddr, 3035 struct ethtool_per_queue_op *per_queue_opt) 3036 { 3037 u32 bit; 3038 int i, ret = 0; 3039 int n_queue; 3040 struct ethtool_coalesce *backup = NULL, *tmp = NULL; 3041 DECLARE_BITMAP(queue_mask, MAX_NUM_QUEUE); 3042 3043 if ((!dev->ethtool_ops->set_per_queue_coalesce) || 3044 (!dev->ethtool_ops->get_per_queue_coalesce)) 3045 return -EOPNOTSUPP; 3046 3047 useraddr += sizeof(*per_queue_opt); 3048 3049 bitmap_from_arr32(queue_mask, per_queue_opt->queue_mask, MAX_NUM_QUEUE); 3050 n_queue = bitmap_weight(queue_mask, MAX_NUM_QUEUE); 3051 tmp = backup = kmalloc_array(n_queue, sizeof(*backup), GFP_KERNEL); 3052 if (!backup) 3053 return -ENOMEM; 3054 3055 for_each_set_bit(bit, queue_mask, MAX_NUM_QUEUE) { 3056 struct ethtool_coalesce coalesce; 3057 3058 ret = dev->ethtool_ops->get_per_queue_coalesce(dev, bit, tmp); 3059 if (ret != 0) 3060 goto roll_back; 3061 3062 tmp++; 3063 3064 if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) { 3065 ret = -EFAULT; 3066 goto roll_back; 3067 } 3068 3069 if (!ethtool_set_coalesce_supported(dev, &coalesce)) { 3070 ret = -EOPNOTSUPP; 3071 goto roll_back; 3072 } 3073 3074 ret = dev->ethtool_ops->set_per_queue_coalesce(dev, bit, &coalesce); 3075 if (ret != 0) 3076 goto roll_back; 3077 3078 useraddr += sizeof(coalesce); 3079 } 3080 3081 roll_back: 3082 if (ret != 0) { 3083 tmp = backup; 3084 for_each_set_bit(i, queue_mask, bit) { 3085 dev->ethtool_ops->set_per_queue_coalesce(dev, i, tmp); 3086 tmp++; 3087 } 3088 } 3089 kfree(backup); 3090 3091 return ret; 3092 } 3093 3094 static int noinline_for_stack ethtool_set_per_queue(struct net_device *dev, 3095 void __user *useraddr, u32 sub_cmd) 3096 { 3097 struct ethtool_per_queue_op per_queue_opt; 3098 3099 if (copy_from_user(&per_queue_opt, useraddr, sizeof(per_queue_opt))) 3100 return -EFAULT; 3101 3102 if (per_queue_opt.sub_command != sub_cmd) 3103 return -EINVAL; 3104 3105 switch (per_queue_opt.sub_command) { 3106 case ETHTOOL_GCOALESCE: 3107 return ethtool_get_per_queue_coalesce(dev, useraddr, &per_queue_opt); 3108 case ETHTOOL_SCOALESCE: 3109 return ethtool_set_per_queue_coalesce(dev, useraddr, &per_queue_opt); 3110 default: 3111 return -EOPNOTSUPP; 3112 } 3113 } 3114 3115 static int ethtool_phy_tunable_valid(const struct ethtool_tunable *tuna) 3116 { 3117 switch (tuna->id) { 3118 case ETHTOOL_PHY_DOWNSHIFT: 3119 case ETHTOOL_PHY_FAST_LINK_DOWN: 3120 if (tuna->len != sizeof(u8) || 3121 tuna->type_id != ETHTOOL_TUNABLE_U8) 3122 return -EINVAL; 3123 break; 3124 case ETHTOOL_PHY_EDPD: 3125 if (tuna->len != sizeof(u16) || 3126 tuna->type_id != ETHTOOL_TUNABLE_U16) 3127 return -EINVAL; 3128 break; 3129 default: 3130 return -EINVAL; 3131 } 3132 3133 return 0; 3134 } 3135 3136 static int get_phy_tunable(struct net_device *dev, void __user *useraddr) 3137 { 3138 struct phy_device *phydev = dev->phydev; 3139 struct ethtool_tunable tuna; 3140 bool phy_drv_tunable; 3141 void *data; 3142 int ret; 3143 3144 phy_drv_tunable = phydev && phydev->drv && phydev->drv->get_tunable; 3145 if (!phy_drv_tunable && !dev->ethtool_ops->get_phy_tunable) 3146 return -EOPNOTSUPP; 3147 if (copy_from_user(&tuna, useraddr, sizeof(tuna))) 3148 return -EFAULT; 3149 ret = ethtool_phy_tunable_valid(&tuna); 3150 if (ret) 3151 return ret; 3152 data = kzalloc(tuna.len, GFP_USER); 3153 if (!data) 3154 return -ENOMEM; 3155 if (phy_drv_tunable) { 3156 mutex_lock(&phydev->lock); 3157 ret = phydev->drv->get_tunable(phydev, &tuna, data); 3158 mutex_unlock(&phydev->lock); 3159 } else { 3160 ret = dev->ethtool_ops->get_phy_tunable(dev, &tuna, data); 3161 } 3162 if (ret) 3163 goto out; 3164 useraddr += sizeof(tuna); 3165 ret = -EFAULT; 3166 if (copy_to_user(useraddr, data, tuna.len)) 3167 goto out; 3168 ret = 0; 3169 3170 out: 3171 kfree(data); 3172 return ret; 3173 } 3174 3175 static int set_phy_tunable(struct net_device *dev, void __user *useraddr) 3176 { 3177 struct phy_device *phydev = dev->phydev; 3178 struct ethtool_tunable tuna; 3179 bool phy_drv_tunable; 3180 void *data; 3181 int ret; 3182 3183 phy_drv_tunable = phydev && phydev->drv && phydev->drv->get_tunable; 3184 if (!phy_drv_tunable && !dev->ethtool_ops->set_phy_tunable) 3185 return -EOPNOTSUPP; 3186 if (copy_from_user(&tuna, useraddr, sizeof(tuna))) 3187 return -EFAULT; 3188 ret = ethtool_phy_tunable_valid(&tuna); 3189 if (ret) 3190 return ret; 3191 useraddr += sizeof(tuna); 3192 data = memdup_user(useraddr, tuna.len); 3193 if (IS_ERR(data)) 3194 return PTR_ERR(data); 3195 if (phy_drv_tunable) { 3196 mutex_lock(&phydev->lock); 3197 ret = phydev->drv->set_tunable(phydev, &tuna, data); 3198 mutex_unlock(&phydev->lock); 3199 } else { 3200 ret = dev->ethtool_ops->set_phy_tunable(dev, &tuna, data); 3201 } 3202 3203 kfree(data); 3204 return ret; 3205 } 3206 3207 static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr) 3208 { 3209 struct ethtool_fecparam fecparam = { .cmd = ETHTOOL_GFECPARAM }; 3210 int rc; 3211 3212 if (!dev->ethtool_ops->get_fecparam) 3213 return -EOPNOTSUPP; 3214 3215 rc = dev->ethtool_ops->get_fecparam(dev, &fecparam); 3216 if (rc) 3217 return rc; 3218 3219 if (WARN_ON_ONCE(fecparam.reserved)) 3220 fecparam.reserved = 0; 3221 3222 if (copy_to_user(useraddr, &fecparam, sizeof(fecparam))) 3223 return -EFAULT; 3224 return 0; 3225 } 3226 3227 static int ethtool_set_fecparam(struct net_device *dev, void __user *useraddr) 3228 { 3229 struct ethtool_fecparam fecparam; 3230 3231 if (!dev->ethtool_ops->set_fecparam) 3232 return -EOPNOTSUPP; 3233 3234 if (copy_from_user(&fecparam, useraddr, sizeof(fecparam))) 3235 return -EFAULT; 3236 3237 if (!fecparam.fec || fecparam.fec & ETHTOOL_FEC_NONE) 3238 return -EINVAL; 3239 3240 fecparam.active_fec = 0; 3241 fecparam.reserved = 0; 3242 3243 return dev->ethtool_ops->set_fecparam(dev, &fecparam); 3244 } 3245 3246 /* The main entry point in this file. Called from net/core/dev_ioctl.c */ 3247 3248 static int 3249 __dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr, 3250 u32 ethcmd, struct ethtool_devlink_compat *devlink_state) 3251 { 3252 struct net_device *dev; 3253 u32 sub_cmd; 3254 int rc; 3255 netdev_features_t old_features; 3256 3257 dev = __dev_get_by_name(net, ifr->ifr_name); 3258 if (!dev) 3259 return -ENODEV; 3260 3261 if (ethcmd == ETHTOOL_PERQUEUE) { 3262 if (copy_from_user(&sub_cmd, useraddr + sizeof(ethcmd), sizeof(sub_cmd))) 3263 return -EFAULT; 3264 } else { 3265 sub_cmd = ethcmd; 3266 } 3267 /* Allow some commands to be done by anyone */ 3268 switch (sub_cmd) { 3269 case ETHTOOL_GSET: 3270 case ETHTOOL_GDRVINFO: 3271 case ETHTOOL_GMSGLVL: 3272 case ETHTOOL_GLINK: 3273 case ETHTOOL_GCOALESCE: 3274 case ETHTOOL_GRINGPARAM: 3275 case ETHTOOL_GPAUSEPARAM: 3276 case ETHTOOL_GRXCSUM: 3277 case ETHTOOL_GTXCSUM: 3278 case ETHTOOL_GSG: 3279 case ETHTOOL_GSSET_INFO: 3280 case ETHTOOL_GSTRINGS: 3281 case ETHTOOL_GSTATS: 3282 case ETHTOOL_GPHYSTATS: 3283 case ETHTOOL_GTSO: 3284 case ETHTOOL_GPERMADDR: 3285 case ETHTOOL_GUFO: 3286 case ETHTOOL_GGSO: 3287 case ETHTOOL_GGRO: 3288 case ETHTOOL_GFLAGS: 3289 case ETHTOOL_GPFLAGS: 3290 case ETHTOOL_GRXFH: 3291 case ETHTOOL_GRXRINGS: 3292 case ETHTOOL_GRXCLSRLCNT: 3293 case ETHTOOL_GRXCLSRULE: 3294 case ETHTOOL_GRXCLSRLALL: 3295 case ETHTOOL_GRXFHINDIR: 3296 case ETHTOOL_GRSSH: 3297 case ETHTOOL_GFEATURES: 3298 case ETHTOOL_GCHANNELS: 3299 case ETHTOOL_GET_TS_INFO: 3300 case ETHTOOL_GEEE: 3301 case ETHTOOL_GTUNABLE: 3302 case ETHTOOL_PHY_GTUNABLE: 3303 case ETHTOOL_GLINKSETTINGS: 3304 case ETHTOOL_GFECPARAM: 3305 break; 3306 default: 3307 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 3308 return -EPERM; 3309 } 3310 3311 netdev_lock_ops(dev); 3312 if (dev->dev.parent) 3313 pm_runtime_get_sync(dev->dev.parent); 3314 3315 if (!netif_device_present(dev)) { 3316 rc = -ENODEV; 3317 goto out; 3318 } 3319 3320 if (dev->ethtool_ops->begin) { 3321 rc = dev->ethtool_ops->begin(dev); 3322 if (rc < 0) 3323 goto out; 3324 } 3325 old_features = dev->features; 3326 3327 switch (ethcmd) { 3328 case ETHTOOL_GSET: 3329 rc = ethtool_get_settings(dev, useraddr); 3330 break; 3331 case ETHTOOL_SSET: 3332 rc = ethtool_set_settings(dev, useraddr); 3333 break; 3334 case ETHTOOL_GDRVINFO: 3335 rc = ethtool_get_drvinfo(dev, devlink_state); 3336 break; 3337 case ETHTOOL_GREGS: 3338 rc = ethtool_get_regs(dev, useraddr); 3339 break; 3340 case ETHTOOL_GWOL: 3341 rc = ethtool_get_wol(dev, useraddr); 3342 break; 3343 case ETHTOOL_SWOL: 3344 rc = ethtool_set_wol(dev, useraddr); 3345 break; 3346 case ETHTOOL_GMSGLVL: 3347 rc = ethtool_get_value(dev, useraddr, ethcmd, 3348 dev->ethtool_ops->get_msglevel); 3349 break; 3350 case ETHTOOL_SMSGLVL: 3351 rc = ethtool_set_value_void(dev, useraddr, 3352 dev->ethtool_ops->set_msglevel); 3353 if (!rc) 3354 ethtool_notify(dev, ETHTOOL_MSG_DEBUG_NTF, NULL); 3355 break; 3356 case ETHTOOL_GEEE: 3357 rc = ethtool_get_eee(dev, useraddr); 3358 break; 3359 case ETHTOOL_SEEE: 3360 rc = ethtool_set_eee(dev, useraddr); 3361 break; 3362 case ETHTOOL_NWAY_RST: 3363 rc = ethtool_nway_reset(dev); 3364 break; 3365 case ETHTOOL_GLINK: 3366 rc = ethtool_get_link(dev, useraddr); 3367 break; 3368 case ETHTOOL_GEEPROM: 3369 rc = ethtool_get_eeprom(dev, useraddr); 3370 break; 3371 case ETHTOOL_SEEPROM: 3372 rc = ethtool_set_eeprom(dev, useraddr); 3373 break; 3374 case ETHTOOL_GCOALESCE: 3375 rc = ethtool_get_coalesce(dev, useraddr); 3376 break; 3377 case ETHTOOL_SCOALESCE: 3378 rc = ethtool_set_coalesce(dev, useraddr); 3379 break; 3380 case ETHTOOL_GRINGPARAM: 3381 rc = ethtool_get_ringparam(dev, useraddr); 3382 break; 3383 case ETHTOOL_SRINGPARAM: 3384 rc = ethtool_set_ringparam(dev, useraddr); 3385 break; 3386 case ETHTOOL_GPAUSEPARAM: 3387 rc = ethtool_get_pauseparam(dev, useraddr); 3388 break; 3389 case ETHTOOL_SPAUSEPARAM: 3390 rc = ethtool_set_pauseparam(dev, useraddr); 3391 break; 3392 case ETHTOOL_TEST: 3393 rc = ethtool_self_test(dev, useraddr); 3394 break; 3395 case ETHTOOL_GSTRINGS: 3396 rc = ethtool_get_strings(dev, useraddr); 3397 break; 3398 case ETHTOOL_PHYS_ID: 3399 rc = ethtool_phys_id(dev, useraddr); 3400 break; 3401 case ETHTOOL_GSTATS: 3402 rc = ethtool_get_stats(dev, useraddr); 3403 break; 3404 case ETHTOOL_GPERMADDR: 3405 rc = ethtool_get_perm_addr(dev, useraddr); 3406 break; 3407 case ETHTOOL_GFLAGS: 3408 rc = ethtool_get_value(dev, useraddr, ethcmd, 3409 __ethtool_get_flags); 3410 break; 3411 case ETHTOOL_SFLAGS: 3412 rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags); 3413 break; 3414 case ETHTOOL_GPFLAGS: 3415 rc = ethtool_get_value(dev, useraddr, ethcmd, 3416 dev->ethtool_ops->get_priv_flags); 3417 if (!rc) 3418 ethtool_notify(dev, ETHTOOL_MSG_PRIVFLAGS_NTF, NULL); 3419 break; 3420 case ETHTOOL_SPFLAGS: 3421 rc = ethtool_set_value(dev, useraddr, 3422 dev->ethtool_ops->set_priv_flags); 3423 break; 3424 case ETHTOOL_GRXFH: 3425 rc = ethtool_get_rxfh_fields(dev, ethcmd, useraddr); 3426 break; 3427 case ETHTOOL_SRXFH: 3428 rc = ethtool_set_rxfh_fields(dev, ethcmd, useraddr); 3429 break; 3430 case ETHTOOL_GRXRINGS: 3431 case ETHTOOL_GRXCLSRLCNT: 3432 case ETHTOOL_GRXCLSRULE: 3433 case ETHTOOL_GRXCLSRLALL: 3434 rc = ethtool_get_rxnfc(dev, ethcmd, useraddr); 3435 break; 3436 case ETHTOOL_SRXCLSRLDEL: 3437 case ETHTOOL_SRXCLSRLINS: 3438 rc = ethtool_set_rxnfc(dev, ethcmd, useraddr); 3439 break; 3440 case ETHTOOL_FLASHDEV: 3441 rc = ethtool_flash_device(dev, devlink_state); 3442 break; 3443 case ETHTOOL_RESET: 3444 rc = ethtool_reset(dev, useraddr); 3445 break; 3446 case ETHTOOL_GSSET_INFO: 3447 rc = ethtool_get_sset_info(dev, useraddr); 3448 break; 3449 case ETHTOOL_GRXFHINDIR: 3450 rc = ethtool_get_rxfh_indir(dev, useraddr); 3451 break; 3452 case ETHTOOL_SRXFHINDIR: 3453 rc = ethtool_set_rxfh_indir(dev, useraddr); 3454 break; 3455 case ETHTOOL_GRSSH: 3456 rc = ethtool_get_rxfh(dev, useraddr); 3457 break; 3458 case ETHTOOL_SRSSH: 3459 rc = ethtool_set_rxfh(dev, useraddr); 3460 break; 3461 case ETHTOOL_GFEATURES: 3462 rc = ethtool_get_features(dev, useraddr); 3463 break; 3464 case ETHTOOL_SFEATURES: 3465 rc = ethtool_set_features(dev, useraddr); 3466 break; 3467 case ETHTOOL_GTXCSUM: 3468 case ETHTOOL_GRXCSUM: 3469 case ETHTOOL_GSG: 3470 case ETHTOOL_GTSO: 3471 case ETHTOOL_GGSO: 3472 case ETHTOOL_GGRO: 3473 rc = ethtool_get_one_feature(dev, useraddr, ethcmd); 3474 break; 3475 case ETHTOOL_STXCSUM: 3476 case ETHTOOL_SRXCSUM: 3477 case ETHTOOL_SSG: 3478 case ETHTOOL_STSO: 3479 case ETHTOOL_SGSO: 3480 case ETHTOOL_SGRO: 3481 rc = ethtool_set_one_feature(dev, useraddr, ethcmd); 3482 break; 3483 case ETHTOOL_GCHANNELS: 3484 rc = ethtool_get_channels(dev, useraddr); 3485 break; 3486 case ETHTOOL_SCHANNELS: 3487 rc = ethtool_set_channels(dev, useraddr); 3488 break; 3489 case ETHTOOL_SET_DUMP: 3490 rc = ethtool_set_dump(dev, useraddr); 3491 break; 3492 case ETHTOOL_GET_DUMP_FLAG: 3493 rc = ethtool_get_dump_flag(dev, useraddr); 3494 break; 3495 case ETHTOOL_GET_DUMP_DATA: 3496 rc = ethtool_get_dump_data(dev, useraddr); 3497 break; 3498 case ETHTOOL_GET_TS_INFO: 3499 rc = ethtool_get_ts_info(dev, useraddr); 3500 break; 3501 case ETHTOOL_GMODULEINFO: 3502 rc = ethtool_get_module_info(dev, useraddr); 3503 break; 3504 case ETHTOOL_GMODULEEEPROM: 3505 rc = ethtool_get_module_eeprom(dev, useraddr); 3506 break; 3507 case ETHTOOL_GTUNABLE: 3508 rc = ethtool_get_tunable(dev, useraddr); 3509 break; 3510 case ETHTOOL_STUNABLE: 3511 rc = ethtool_set_tunable(dev, useraddr); 3512 break; 3513 case ETHTOOL_GPHYSTATS: 3514 rc = ethtool_get_phy_stats(dev, useraddr); 3515 break; 3516 case ETHTOOL_PERQUEUE: 3517 rc = ethtool_set_per_queue(dev, useraddr, sub_cmd); 3518 break; 3519 case ETHTOOL_GLINKSETTINGS: 3520 rc = ethtool_get_link_ksettings(dev, useraddr); 3521 break; 3522 case ETHTOOL_SLINKSETTINGS: 3523 rc = ethtool_set_link_ksettings(dev, useraddr); 3524 break; 3525 case ETHTOOL_PHY_GTUNABLE: 3526 rc = get_phy_tunable(dev, useraddr); 3527 break; 3528 case ETHTOOL_PHY_STUNABLE: 3529 rc = set_phy_tunable(dev, useraddr); 3530 break; 3531 case ETHTOOL_GFECPARAM: 3532 rc = ethtool_get_fecparam(dev, useraddr); 3533 break; 3534 case ETHTOOL_SFECPARAM: 3535 rc = ethtool_set_fecparam(dev, useraddr); 3536 break; 3537 default: 3538 rc = -EOPNOTSUPP; 3539 } 3540 3541 if (dev->ethtool_ops->complete) 3542 dev->ethtool_ops->complete(dev); 3543 3544 if (old_features != dev->features) 3545 netdev_features_change(dev); 3546 out: 3547 if (dev->dev.parent) 3548 pm_runtime_put(dev->dev.parent); 3549 netdev_unlock_ops(dev); 3550 3551 return rc; 3552 } 3553 3554 int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr) 3555 { 3556 struct ethtool_devlink_compat *state; 3557 u32 ethcmd; 3558 int rc; 3559 3560 if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd))) 3561 return -EFAULT; 3562 3563 state = kzalloc(sizeof(*state), GFP_KERNEL); 3564 if (!state) 3565 return -ENOMEM; 3566 3567 switch (ethcmd) { 3568 case ETHTOOL_FLASHDEV: 3569 if (copy_from_user(&state->efl, useraddr, sizeof(state->efl))) { 3570 rc = -EFAULT; 3571 goto exit_free; 3572 } 3573 state->efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0; 3574 break; 3575 } 3576 3577 rtnl_lock(); 3578 rc = __dev_ethtool(net, ifr, useraddr, ethcmd, state); 3579 rtnl_unlock(); 3580 if (rc) 3581 goto exit_free; 3582 3583 switch (ethcmd) { 3584 case ETHTOOL_FLASHDEV: 3585 if (state->devlink) 3586 rc = devlink_compat_flash_update(state->devlink, 3587 state->efl.data); 3588 break; 3589 case ETHTOOL_GDRVINFO: 3590 if (state->devlink) 3591 devlink_compat_running_version(state->devlink, 3592 state->info.fw_version, 3593 sizeof(state->info.fw_version)); 3594 if (copy_to_user(useraddr, &state->info, sizeof(state->info))) { 3595 rc = -EFAULT; 3596 goto exit_free; 3597 } 3598 break; 3599 } 3600 3601 exit_free: 3602 if (state->devlink) 3603 devlink_put(state->devlink); 3604 kfree(state); 3605 return rc; 3606 } 3607 3608 struct ethtool_rx_flow_key { 3609 struct flow_dissector_key_basic basic; 3610 union { 3611 struct flow_dissector_key_ipv4_addrs ipv4; 3612 struct flow_dissector_key_ipv6_addrs ipv6; 3613 }; 3614 struct flow_dissector_key_ports tp; 3615 struct flow_dissector_key_ip ip; 3616 struct flow_dissector_key_vlan vlan; 3617 struct flow_dissector_key_eth_addrs eth_addrs; 3618 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ 3619 3620 struct ethtool_rx_flow_match { 3621 struct flow_dissector dissector; 3622 struct ethtool_rx_flow_key key; 3623 struct ethtool_rx_flow_key mask; 3624 }; 3625 3626 struct ethtool_rx_flow_rule * 3627 ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input) 3628 { 3629 const struct ethtool_rx_flow_spec *fs = input->fs; 3630 struct ethtool_rx_flow_match *match; 3631 struct ethtool_rx_flow_rule *flow; 3632 struct flow_action_entry *act; 3633 3634 flow = kzalloc(sizeof(struct ethtool_rx_flow_rule) + 3635 sizeof(struct ethtool_rx_flow_match), GFP_KERNEL); 3636 if (!flow) 3637 return ERR_PTR(-ENOMEM); 3638 3639 /* ethtool_rx supports only one single action per rule. */ 3640 flow->rule = flow_rule_alloc(1); 3641 if (!flow->rule) { 3642 kfree(flow); 3643 return ERR_PTR(-ENOMEM); 3644 } 3645 3646 match = (struct ethtool_rx_flow_match *)flow->priv; 3647 flow->rule->match.dissector = &match->dissector; 3648 flow->rule->match.mask = &match->mask; 3649 flow->rule->match.key = &match->key; 3650 3651 match->mask.basic.n_proto = htons(0xffff); 3652 3653 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) { 3654 case ETHER_FLOW: { 3655 const struct ethhdr *ether_spec, *ether_m_spec; 3656 3657 ether_spec = &fs->h_u.ether_spec; 3658 ether_m_spec = &fs->m_u.ether_spec; 3659 3660 if (!is_zero_ether_addr(ether_m_spec->h_source)) { 3661 ether_addr_copy(match->key.eth_addrs.src, 3662 ether_spec->h_source); 3663 ether_addr_copy(match->mask.eth_addrs.src, 3664 ether_m_spec->h_source); 3665 } 3666 if (!is_zero_ether_addr(ether_m_spec->h_dest)) { 3667 ether_addr_copy(match->key.eth_addrs.dst, 3668 ether_spec->h_dest); 3669 ether_addr_copy(match->mask.eth_addrs.dst, 3670 ether_m_spec->h_dest); 3671 } 3672 if (ether_m_spec->h_proto) { 3673 match->key.basic.n_proto = ether_spec->h_proto; 3674 match->mask.basic.n_proto = ether_m_spec->h_proto; 3675 } 3676 } 3677 break; 3678 case TCP_V4_FLOW: 3679 case UDP_V4_FLOW: { 3680 const struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec; 3681 3682 match->key.basic.n_proto = htons(ETH_P_IP); 3683 3684 v4_spec = &fs->h_u.tcp_ip4_spec; 3685 v4_m_spec = &fs->m_u.tcp_ip4_spec; 3686 3687 if (v4_m_spec->ip4src) { 3688 match->key.ipv4.src = v4_spec->ip4src; 3689 match->mask.ipv4.src = v4_m_spec->ip4src; 3690 } 3691 if (v4_m_spec->ip4dst) { 3692 match->key.ipv4.dst = v4_spec->ip4dst; 3693 match->mask.ipv4.dst = v4_m_spec->ip4dst; 3694 } 3695 if (v4_m_spec->ip4src || 3696 v4_m_spec->ip4dst) { 3697 match->dissector.used_keys |= 3698 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS); 3699 match->dissector.offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 3700 offsetof(struct ethtool_rx_flow_key, ipv4); 3701 } 3702 if (v4_m_spec->psrc) { 3703 match->key.tp.src = v4_spec->psrc; 3704 match->mask.tp.src = v4_m_spec->psrc; 3705 } 3706 if (v4_m_spec->pdst) { 3707 match->key.tp.dst = v4_spec->pdst; 3708 match->mask.tp.dst = v4_m_spec->pdst; 3709 } 3710 if (v4_m_spec->psrc || 3711 v4_m_spec->pdst) { 3712 match->dissector.used_keys |= 3713 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS); 3714 match->dissector.offset[FLOW_DISSECTOR_KEY_PORTS] = 3715 offsetof(struct ethtool_rx_flow_key, tp); 3716 } 3717 if (v4_m_spec->tos) { 3718 match->key.ip.tos = v4_spec->tos; 3719 match->mask.ip.tos = v4_m_spec->tos; 3720 match->dissector.used_keys |= 3721 BIT(FLOW_DISSECTOR_KEY_IP); 3722 match->dissector.offset[FLOW_DISSECTOR_KEY_IP] = 3723 offsetof(struct ethtool_rx_flow_key, ip); 3724 } 3725 } 3726 break; 3727 case TCP_V6_FLOW: 3728 case UDP_V6_FLOW: { 3729 const struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec; 3730 3731 match->key.basic.n_proto = htons(ETH_P_IPV6); 3732 3733 v6_spec = &fs->h_u.tcp_ip6_spec; 3734 v6_m_spec = &fs->m_u.tcp_ip6_spec; 3735 if (!ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6src)) { 3736 memcpy(&match->key.ipv6.src, v6_spec->ip6src, 3737 sizeof(match->key.ipv6.src)); 3738 memcpy(&match->mask.ipv6.src, v6_m_spec->ip6src, 3739 sizeof(match->mask.ipv6.src)); 3740 } 3741 if (!ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6dst)) { 3742 memcpy(&match->key.ipv6.dst, v6_spec->ip6dst, 3743 sizeof(match->key.ipv6.dst)); 3744 memcpy(&match->mask.ipv6.dst, v6_m_spec->ip6dst, 3745 sizeof(match->mask.ipv6.dst)); 3746 } 3747 if (!ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6src) || 3748 !ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6dst)) { 3749 match->dissector.used_keys |= 3750 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS); 3751 match->dissector.offset[FLOW_DISSECTOR_KEY_IPV6_ADDRS] = 3752 offsetof(struct ethtool_rx_flow_key, ipv6); 3753 } 3754 if (v6_m_spec->psrc) { 3755 match->key.tp.src = v6_spec->psrc; 3756 match->mask.tp.src = v6_m_spec->psrc; 3757 } 3758 if (v6_m_spec->pdst) { 3759 match->key.tp.dst = v6_spec->pdst; 3760 match->mask.tp.dst = v6_m_spec->pdst; 3761 } 3762 if (v6_m_spec->psrc || 3763 v6_m_spec->pdst) { 3764 match->dissector.used_keys |= 3765 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS); 3766 match->dissector.offset[FLOW_DISSECTOR_KEY_PORTS] = 3767 offsetof(struct ethtool_rx_flow_key, tp); 3768 } 3769 if (v6_m_spec->tclass) { 3770 match->key.ip.tos = v6_spec->tclass; 3771 match->mask.ip.tos = v6_m_spec->tclass; 3772 match->dissector.used_keys |= 3773 BIT_ULL(FLOW_DISSECTOR_KEY_IP); 3774 match->dissector.offset[FLOW_DISSECTOR_KEY_IP] = 3775 offsetof(struct ethtool_rx_flow_key, ip); 3776 } 3777 } 3778 break; 3779 default: 3780 ethtool_rx_flow_rule_destroy(flow); 3781 return ERR_PTR(-EINVAL); 3782 } 3783 3784 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) { 3785 case TCP_V4_FLOW: 3786 case TCP_V6_FLOW: 3787 match->key.basic.ip_proto = IPPROTO_TCP; 3788 match->mask.basic.ip_proto = 0xff; 3789 break; 3790 case UDP_V4_FLOW: 3791 case UDP_V6_FLOW: 3792 match->key.basic.ip_proto = IPPROTO_UDP; 3793 match->mask.basic.ip_proto = 0xff; 3794 break; 3795 } 3796 3797 match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_BASIC); 3798 match->dissector.offset[FLOW_DISSECTOR_KEY_BASIC] = 3799 offsetof(struct ethtool_rx_flow_key, basic); 3800 3801 if (fs->flow_type & FLOW_EXT) { 3802 const struct ethtool_flow_ext *ext_h_spec = &fs->h_ext; 3803 const struct ethtool_flow_ext *ext_m_spec = &fs->m_ext; 3804 3805 if (ext_m_spec->vlan_etype) { 3806 match->key.vlan.vlan_tpid = ext_h_spec->vlan_etype; 3807 match->mask.vlan.vlan_tpid = ext_m_spec->vlan_etype; 3808 } 3809 3810 if (ext_m_spec->vlan_tci) { 3811 match->key.vlan.vlan_id = 3812 ntohs(ext_h_spec->vlan_tci) & 0x0fff; 3813 match->mask.vlan.vlan_id = 3814 ntohs(ext_m_spec->vlan_tci) & 0x0fff; 3815 3816 match->key.vlan.vlan_dei = 3817 !!(ext_h_spec->vlan_tci & htons(0x1000)); 3818 match->mask.vlan.vlan_dei = 3819 !!(ext_m_spec->vlan_tci & htons(0x1000)); 3820 3821 match->key.vlan.vlan_priority = 3822 (ntohs(ext_h_spec->vlan_tci) & 0xe000) >> 13; 3823 match->mask.vlan.vlan_priority = 3824 (ntohs(ext_m_spec->vlan_tci) & 0xe000) >> 13; 3825 } 3826 3827 if (ext_m_spec->vlan_etype || 3828 ext_m_spec->vlan_tci) { 3829 match->dissector.used_keys |= 3830 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN); 3831 match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] = 3832 offsetof(struct ethtool_rx_flow_key, vlan); 3833 } 3834 } 3835 if (fs->flow_type & FLOW_MAC_EXT) { 3836 const struct ethtool_flow_ext *ext_h_spec = &fs->h_ext; 3837 const struct ethtool_flow_ext *ext_m_spec = &fs->m_ext; 3838 3839 memcpy(match->key.eth_addrs.dst, ext_h_spec->h_dest, 3840 ETH_ALEN); 3841 memcpy(match->mask.eth_addrs.dst, ext_m_spec->h_dest, 3842 ETH_ALEN); 3843 3844 match->dissector.used_keys |= 3845 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS); 3846 match->dissector.offset[FLOW_DISSECTOR_KEY_ETH_ADDRS] = 3847 offsetof(struct ethtool_rx_flow_key, eth_addrs); 3848 } 3849 3850 act = &flow->rule->action.entries[0]; 3851 switch (fs->ring_cookie) { 3852 case RX_CLS_FLOW_DISC: 3853 act->id = FLOW_ACTION_DROP; 3854 break; 3855 case RX_CLS_FLOW_WAKE: 3856 act->id = FLOW_ACTION_WAKE; 3857 break; 3858 default: 3859 act->id = FLOW_ACTION_QUEUE; 3860 if (fs->flow_type & FLOW_RSS) 3861 act->queue.ctx = input->rss_ctx; 3862 3863 act->queue.vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 3864 act->queue.index = ethtool_get_flow_spec_ring(fs->ring_cookie); 3865 break; 3866 } 3867 3868 return flow; 3869 } 3870 EXPORT_SYMBOL(ethtool_rx_flow_rule_create); 3871 3872 void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *flow) 3873 { 3874 kfree(flow->rule); 3875 kfree(flow); 3876 } 3877 EXPORT_SYMBOL(ethtool_rx_flow_rule_destroy); 3878