1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/core/ethtool.c - Ethtool ioctl handler 4 * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx> 5 * 6 * This file is where we call all the ethtool_ops commands to get 7 * the information ethtool needs. 8 */ 9 10 #include <linux/compat.h> 11 #include <linux/etherdevice.h> 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/capability.h> 15 #include <linux/errno.h> 16 #include <linux/ethtool.h> 17 #include <linux/netdevice.h> 18 #include <linux/net_tstamp.h> 19 #include <linux/phy.h> 20 #include <linux/bitops.h> 21 #include <linux/uaccess.h> 22 #include <linux/vmalloc.h> 23 #include <linux/sfp.h> 24 #include <linux/slab.h> 25 #include <linux/rtnetlink.h> 26 #include <linux/sched/signal.h> 27 #include <linux/net.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/utsname.h> 30 #include <net/devlink.h> 31 #include <net/ipv6.h> 32 #include <net/xdp_sock_drv.h> 33 #include <net/flow_offload.h> 34 #include <net/netdev_lock.h> 35 #include <linux/ethtool_netlink.h> 36 #include "common.h" 37 38 /* State held across locks and calls for commands which have devlink fallback */ 39 struct ethtool_devlink_compat { 40 struct devlink *devlink; 41 union { 42 struct ethtool_flash efl; 43 struct ethtool_drvinfo info; 44 }; 45 }; 46 47 static struct devlink *netdev_to_devlink_get(struct net_device *dev) 48 { 49 if (!dev->devlink_port) 50 return NULL; 51 return devlink_try_get(dev->devlink_port->devlink); 52 } 53 54 /* 55 * Some useful ethtool_ops methods that're device independent. 56 * If we find that all drivers want to do the same thing here, 57 * we can turn these into dev_() function calls. 58 */ 59 60 u32 ethtool_op_get_link(struct net_device *dev) 61 { 62 /* Synchronize carrier state with link watch, see also rtnl_getlink() */ 63 __linkwatch_sync_dev(dev); 64 65 return netif_carrier_ok(dev) ? 1 : 0; 66 } 67 EXPORT_SYMBOL(ethtool_op_get_link); 68 69 int ethtool_op_get_ts_info(struct net_device *dev, 70 struct kernel_ethtool_ts_info *info) 71 { 72 info->so_timestamping = 73 SOF_TIMESTAMPING_TX_SOFTWARE | 74 SOF_TIMESTAMPING_RX_SOFTWARE | 75 SOF_TIMESTAMPING_SOFTWARE; 76 info->phc_index = -1; 77 return 0; 78 } 79 EXPORT_SYMBOL(ethtool_op_get_ts_info); 80 81 /* Handlers for each ethtool command */ 82 83 static int ethtool_get_features(struct net_device *dev, void __user *useraddr) 84 { 85 struct ethtool_gfeatures cmd = { 86 .cmd = ETHTOOL_GFEATURES, 87 .size = ETHTOOL_DEV_FEATURE_WORDS, 88 }; 89 struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS]; 90 u32 __user *sizeaddr; 91 u32 copy_size; 92 int i; 93 94 /* in case feature bits run out again */ 95 BUILD_BUG_ON(ETHTOOL_DEV_FEATURE_WORDS * sizeof(u32) > sizeof(netdev_features_t)); 96 97 for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) { 98 features[i].available = (u32)(dev->hw_features >> (32 * i)); 99 features[i].requested = (u32)(dev->wanted_features >> (32 * i)); 100 features[i].active = (u32)(dev->features >> (32 * i)); 101 features[i].never_changed = 102 (u32)(NETIF_F_NEVER_CHANGE >> (32 * i)); 103 } 104 105 sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size); 106 if (get_user(copy_size, sizeaddr)) 107 return -EFAULT; 108 109 if (copy_size > ETHTOOL_DEV_FEATURE_WORDS) 110 copy_size = ETHTOOL_DEV_FEATURE_WORDS; 111 112 if (copy_to_user(useraddr, &cmd, sizeof(cmd))) 113 return -EFAULT; 114 useraddr += sizeof(cmd); 115 if (copy_to_user(useraddr, features, 116 array_size(copy_size, sizeof(*features)))) 117 return -EFAULT; 118 119 return 0; 120 } 121 122 static int ethtool_set_features(struct net_device *dev, void __user *useraddr) 123 { 124 struct ethtool_sfeatures cmd; 125 struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS]; 126 netdev_features_t wanted = 0, valid = 0; 127 int i, ret = 0; 128 129 if (copy_from_user(&cmd, useraddr, sizeof(cmd))) 130 return -EFAULT; 131 useraddr += sizeof(cmd); 132 133 if (cmd.size != ETHTOOL_DEV_FEATURE_WORDS) 134 return -EINVAL; 135 136 if (copy_from_user(features, useraddr, sizeof(features))) 137 return -EFAULT; 138 139 for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) { 140 valid |= (netdev_features_t)features[i].valid << (32 * i); 141 wanted |= (netdev_features_t)features[i].requested << (32 * i); 142 } 143 144 if (valid & ~NETIF_F_ETHTOOL_BITS) 145 return -EINVAL; 146 147 if (valid & ~dev->hw_features) { 148 valid &= dev->hw_features; 149 ret |= ETHTOOL_F_UNSUPPORTED; 150 } 151 152 dev->wanted_features &= ~valid; 153 dev->wanted_features |= wanted & valid; 154 __netdev_update_features(dev); 155 156 if ((dev->wanted_features ^ dev->features) & valid) 157 ret |= ETHTOOL_F_WISH; 158 159 return ret; 160 } 161 162 static int __ethtool_get_sset_count(struct net_device *dev, int sset) 163 { 164 const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; 165 const struct ethtool_ops *ops = dev->ethtool_ops; 166 167 if (sset == ETH_SS_FEATURES) 168 return ARRAY_SIZE(netdev_features_strings); 169 170 if (sset == ETH_SS_RSS_HASH_FUNCS) 171 return ARRAY_SIZE(rss_hash_func_strings); 172 173 if (sset == ETH_SS_TUNABLES) 174 return ARRAY_SIZE(tunable_strings); 175 176 if (sset == ETH_SS_PHY_TUNABLES) 177 return ARRAY_SIZE(phy_tunable_strings); 178 179 if (sset == ETH_SS_PHY_STATS && dev->phydev && 180 !ops->get_ethtool_phy_stats && 181 phy_ops && phy_ops->get_sset_count) 182 return phy_ops->get_sset_count(dev->phydev); 183 184 if (sset == ETH_SS_LINK_MODES) 185 return __ETHTOOL_LINK_MODE_MASK_NBITS; 186 187 if (ops->get_sset_count && ops->get_strings) 188 return ops->get_sset_count(dev, sset); 189 else 190 return -EOPNOTSUPP; 191 } 192 193 static void __ethtool_get_strings(struct net_device *dev, 194 u32 stringset, u8 *data) 195 { 196 const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; 197 const struct ethtool_ops *ops = dev->ethtool_ops; 198 199 if (stringset == ETH_SS_FEATURES) 200 memcpy(data, netdev_features_strings, 201 sizeof(netdev_features_strings)); 202 else if (stringset == ETH_SS_RSS_HASH_FUNCS) 203 memcpy(data, rss_hash_func_strings, 204 sizeof(rss_hash_func_strings)); 205 else if (stringset == ETH_SS_TUNABLES) 206 memcpy(data, tunable_strings, sizeof(tunable_strings)); 207 else if (stringset == ETH_SS_PHY_TUNABLES) 208 memcpy(data, phy_tunable_strings, sizeof(phy_tunable_strings)); 209 else if (stringset == ETH_SS_PHY_STATS && dev->phydev && 210 !ops->get_ethtool_phy_stats && phy_ops && 211 phy_ops->get_strings) 212 phy_ops->get_strings(dev->phydev, data); 213 else if (stringset == ETH_SS_LINK_MODES) 214 memcpy(data, link_mode_names, 215 __ETHTOOL_LINK_MODE_MASK_NBITS * ETH_GSTRING_LEN); 216 else 217 /* ops->get_strings is valid because checked earlier */ 218 ops->get_strings(dev, stringset, data); 219 } 220 221 static netdev_features_t ethtool_get_feature_mask(u32 eth_cmd) 222 { 223 /* feature masks of legacy discrete ethtool ops */ 224 225 switch (eth_cmd) { 226 case ETHTOOL_GTXCSUM: 227 case ETHTOOL_STXCSUM: 228 return NETIF_F_CSUM_MASK | NETIF_F_FCOE_CRC | 229 NETIF_F_SCTP_CRC; 230 case ETHTOOL_GRXCSUM: 231 case ETHTOOL_SRXCSUM: 232 return NETIF_F_RXCSUM; 233 case ETHTOOL_GSG: 234 case ETHTOOL_SSG: 235 return NETIF_F_SG | NETIF_F_FRAGLIST; 236 case ETHTOOL_GTSO: 237 case ETHTOOL_STSO: 238 return NETIF_F_ALL_TSO; 239 case ETHTOOL_GGSO: 240 case ETHTOOL_SGSO: 241 return NETIF_F_GSO; 242 case ETHTOOL_GGRO: 243 case ETHTOOL_SGRO: 244 return NETIF_F_GRO; 245 default: 246 BUG(); 247 } 248 } 249 250 static int ethtool_get_one_feature(struct net_device *dev, 251 char __user *useraddr, u32 ethcmd) 252 { 253 netdev_features_t mask = ethtool_get_feature_mask(ethcmd); 254 struct ethtool_value edata = { 255 .cmd = ethcmd, 256 .data = !!(dev->features & mask), 257 }; 258 259 if (copy_to_user(useraddr, &edata, sizeof(edata))) 260 return -EFAULT; 261 return 0; 262 } 263 264 static int ethtool_set_one_feature(struct net_device *dev, 265 void __user *useraddr, u32 ethcmd) 266 { 267 struct ethtool_value edata; 268 netdev_features_t mask; 269 270 if (copy_from_user(&edata, useraddr, sizeof(edata))) 271 return -EFAULT; 272 273 mask = ethtool_get_feature_mask(ethcmd); 274 mask &= dev->hw_features; 275 if (!mask) 276 return -EOPNOTSUPP; 277 278 if (edata.data) 279 dev->wanted_features |= mask; 280 else 281 dev->wanted_features &= ~mask; 282 283 __netdev_update_features(dev); 284 285 return 0; 286 } 287 288 #define ETH_ALL_FLAGS (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \ 289 ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH) 290 #define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_CTAG_RX | \ 291 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_NTUPLE | \ 292 NETIF_F_RXHASH) 293 294 static u32 __ethtool_get_flags(struct net_device *dev) 295 { 296 u32 flags = 0; 297 298 if (dev->features & NETIF_F_LRO) 299 flags |= ETH_FLAG_LRO; 300 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 301 flags |= ETH_FLAG_RXVLAN; 302 if (dev->features & NETIF_F_HW_VLAN_CTAG_TX) 303 flags |= ETH_FLAG_TXVLAN; 304 if (dev->features & NETIF_F_NTUPLE) 305 flags |= ETH_FLAG_NTUPLE; 306 if (dev->features & NETIF_F_RXHASH) 307 flags |= ETH_FLAG_RXHASH; 308 309 return flags; 310 } 311 312 static int __ethtool_set_flags(struct net_device *dev, u32 data) 313 { 314 netdev_features_t features = 0, changed; 315 316 if (data & ~ETH_ALL_FLAGS) 317 return -EINVAL; 318 319 if (data & ETH_FLAG_LRO) 320 features |= NETIF_F_LRO; 321 if (data & ETH_FLAG_RXVLAN) 322 features |= NETIF_F_HW_VLAN_CTAG_RX; 323 if (data & ETH_FLAG_TXVLAN) 324 features |= NETIF_F_HW_VLAN_CTAG_TX; 325 if (data & ETH_FLAG_NTUPLE) 326 features |= NETIF_F_NTUPLE; 327 if (data & ETH_FLAG_RXHASH) 328 features |= NETIF_F_RXHASH; 329 330 /* allow changing only bits set in hw_features */ 331 changed = (features ^ dev->features) & ETH_ALL_FEATURES; 332 if (changed & ~dev->hw_features) 333 return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP; 334 335 dev->wanted_features = 336 (dev->wanted_features & ~changed) | (features & changed); 337 338 __netdev_update_features(dev); 339 340 return 0; 341 } 342 343 /* Given two link masks, AND them together and save the result in dst. */ 344 void ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, 345 struct ethtool_link_ksettings *src) 346 { 347 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); 348 unsigned int idx = 0; 349 350 for (; idx < size; idx++) { 351 dst->link_modes.supported[idx] &= 352 src->link_modes.supported[idx]; 353 dst->link_modes.advertising[idx] &= 354 src->link_modes.advertising[idx]; 355 } 356 } 357 EXPORT_SYMBOL(ethtool_intersect_link_masks); 358 359 void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, 360 u32 legacy_u32) 361 { 362 linkmode_zero(dst); 363 dst[0] = legacy_u32; 364 } 365 EXPORT_SYMBOL(ethtool_convert_legacy_u32_to_link_mode); 366 367 /* return false if src had higher bits set. lower bits always updated. */ 368 bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, 369 const unsigned long *src) 370 { 371 *legacy_u32 = src[0]; 372 return find_next_bit(src, __ETHTOOL_LINK_MODE_MASK_NBITS, 32) == 373 __ETHTOOL_LINK_MODE_MASK_NBITS; 374 } 375 EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32); 376 377 /* return false if ksettings link modes had higher bits 378 * set. legacy_settings always updated (best effort) 379 */ 380 static bool 381 convert_link_ksettings_to_legacy_settings( 382 struct ethtool_cmd *legacy_settings, 383 const struct ethtool_link_ksettings *link_ksettings) 384 { 385 bool retval = true; 386 387 memset(legacy_settings, 0, sizeof(*legacy_settings)); 388 /* this also clears the deprecated fields in legacy structure: 389 * __u8 transceiver; 390 * __u32 maxtxpkt; 391 * __u32 maxrxpkt; 392 */ 393 394 retval &= ethtool_convert_link_mode_to_legacy_u32( 395 &legacy_settings->supported, 396 link_ksettings->link_modes.supported); 397 retval &= ethtool_convert_link_mode_to_legacy_u32( 398 &legacy_settings->advertising, 399 link_ksettings->link_modes.advertising); 400 retval &= ethtool_convert_link_mode_to_legacy_u32( 401 &legacy_settings->lp_advertising, 402 link_ksettings->link_modes.lp_advertising); 403 ethtool_cmd_speed_set(legacy_settings, link_ksettings->base.speed); 404 legacy_settings->duplex 405 = link_ksettings->base.duplex; 406 legacy_settings->port 407 = link_ksettings->base.port; 408 legacy_settings->phy_address 409 = link_ksettings->base.phy_address; 410 legacy_settings->autoneg 411 = link_ksettings->base.autoneg; 412 legacy_settings->mdio_support 413 = link_ksettings->base.mdio_support; 414 legacy_settings->eth_tp_mdix 415 = link_ksettings->base.eth_tp_mdix; 416 legacy_settings->eth_tp_mdix_ctrl 417 = link_ksettings->base.eth_tp_mdix_ctrl; 418 legacy_settings->transceiver 419 = link_ksettings->base.transceiver; 420 return retval; 421 } 422 423 /* number of 32-bit words to store the user's link mode bitmaps */ 424 #define __ETHTOOL_LINK_MODE_MASK_NU32 \ 425 DIV_ROUND_UP(__ETHTOOL_LINK_MODE_MASK_NBITS, 32) 426 427 /* layout of the struct passed from/to userland */ 428 struct ethtool_link_usettings { 429 struct ethtool_link_settings base; 430 struct { 431 __u32 supported[__ETHTOOL_LINK_MODE_MASK_NU32]; 432 __u32 advertising[__ETHTOOL_LINK_MODE_MASK_NU32]; 433 __u32 lp_advertising[__ETHTOOL_LINK_MODE_MASK_NU32]; 434 } link_modes; 435 }; 436 437 /* Internal kernel helper to query a device ethtool_link_settings. */ 438 int __ethtool_get_link_ksettings(struct net_device *dev, 439 struct ethtool_link_ksettings *link_ksettings) 440 { 441 ASSERT_RTNL(); 442 443 if (!dev->ethtool_ops->get_link_ksettings) 444 return -EOPNOTSUPP; 445 446 if (!netif_device_present(dev)) 447 return -ENODEV; 448 449 memset(link_ksettings, 0, sizeof(*link_ksettings)); 450 return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings); 451 } 452 EXPORT_SYMBOL(__ethtool_get_link_ksettings); 453 454 /* convert ethtool_link_usettings in user space to a kernel internal 455 * ethtool_link_ksettings. return 0 on success, errno on error. 456 */ 457 static int load_link_ksettings_from_user(struct ethtool_link_ksettings *to, 458 const void __user *from) 459 { 460 struct ethtool_link_usettings link_usettings; 461 462 if (copy_from_user(&link_usettings, from, sizeof(link_usettings))) 463 return -EFAULT; 464 465 memcpy(&to->base, &link_usettings.base, sizeof(to->base)); 466 bitmap_from_arr32(to->link_modes.supported, 467 link_usettings.link_modes.supported, 468 __ETHTOOL_LINK_MODE_MASK_NBITS); 469 bitmap_from_arr32(to->link_modes.advertising, 470 link_usettings.link_modes.advertising, 471 __ETHTOOL_LINK_MODE_MASK_NBITS); 472 bitmap_from_arr32(to->link_modes.lp_advertising, 473 link_usettings.link_modes.lp_advertising, 474 __ETHTOOL_LINK_MODE_MASK_NBITS); 475 476 return 0; 477 } 478 479 /* Check if the user is trying to change anything besides speed/duplex */ 480 bool ethtool_virtdev_validate_cmd(const struct ethtool_link_ksettings *cmd) 481 { 482 struct ethtool_link_settings base2 = {}; 483 484 base2.speed = cmd->base.speed; 485 base2.port = PORT_OTHER; 486 base2.duplex = cmd->base.duplex; 487 base2.cmd = cmd->base.cmd; 488 base2.link_mode_masks_nwords = cmd->base.link_mode_masks_nwords; 489 490 return !memcmp(&base2, &cmd->base, sizeof(base2)) && 491 bitmap_empty(cmd->link_modes.supported, 492 __ETHTOOL_LINK_MODE_MASK_NBITS) && 493 bitmap_empty(cmd->link_modes.lp_advertising, 494 __ETHTOOL_LINK_MODE_MASK_NBITS); 495 } 496 497 /* convert a kernel internal ethtool_link_ksettings to 498 * ethtool_link_usettings in user space. return 0 on success, errno on 499 * error. 500 */ 501 static int 502 store_link_ksettings_for_user(void __user *to, 503 const struct ethtool_link_ksettings *from) 504 { 505 struct ethtool_link_usettings link_usettings; 506 507 memcpy(&link_usettings, from, sizeof(link_usettings)); 508 bitmap_to_arr32(link_usettings.link_modes.supported, 509 from->link_modes.supported, 510 __ETHTOOL_LINK_MODE_MASK_NBITS); 511 bitmap_to_arr32(link_usettings.link_modes.advertising, 512 from->link_modes.advertising, 513 __ETHTOOL_LINK_MODE_MASK_NBITS); 514 bitmap_to_arr32(link_usettings.link_modes.lp_advertising, 515 from->link_modes.lp_advertising, 516 __ETHTOOL_LINK_MODE_MASK_NBITS); 517 518 if (copy_to_user(to, &link_usettings, sizeof(link_usettings))) 519 return -EFAULT; 520 521 return 0; 522 } 523 524 /* Query device for its ethtool_link_settings. */ 525 static int ethtool_get_link_ksettings(struct net_device *dev, 526 void __user *useraddr) 527 { 528 int err = 0; 529 struct ethtool_link_ksettings link_ksettings; 530 531 ASSERT_RTNL(); 532 if (!dev->ethtool_ops->get_link_ksettings) 533 return -EOPNOTSUPP; 534 535 /* handle bitmap nbits handshake */ 536 if (copy_from_user(&link_ksettings.base, useraddr, 537 sizeof(link_ksettings.base))) 538 return -EFAULT; 539 540 if (__ETHTOOL_LINK_MODE_MASK_NU32 541 != link_ksettings.base.link_mode_masks_nwords) { 542 /* wrong link mode nbits requested */ 543 memset(&link_ksettings, 0, sizeof(link_ksettings)); 544 link_ksettings.base.cmd = ETHTOOL_GLINKSETTINGS; 545 /* send back number of words required as negative val */ 546 compiletime_assert(__ETHTOOL_LINK_MODE_MASK_NU32 <= S8_MAX, 547 "need too many bits for link modes!"); 548 link_ksettings.base.link_mode_masks_nwords 549 = -((s8)__ETHTOOL_LINK_MODE_MASK_NU32); 550 551 /* copy the base fields back to user, not the link 552 * mode bitmaps 553 */ 554 if (copy_to_user(useraddr, &link_ksettings.base, 555 sizeof(link_ksettings.base))) 556 return -EFAULT; 557 558 return 0; 559 } 560 561 /* handshake successful: user/kernel agree on 562 * link_mode_masks_nwords 563 */ 564 565 memset(&link_ksettings, 0, sizeof(link_ksettings)); 566 err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings); 567 if (err < 0) 568 return err; 569 570 /* make sure we tell the right values to user */ 571 link_ksettings.base.cmd = ETHTOOL_GLINKSETTINGS; 572 link_ksettings.base.link_mode_masks_nwords 573 = __ETHTOOL_LINK_MODE_MASK_NU32; 574 link_ksettings.base.master_slave_cfg = MASTER_SLAVE_CFG_UNSUPPORTED; 575 link_ksettings.base.master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED; 576 link_ksettings.base.rate_matching = RATE_MATCH_NONE; 577 578 return store_link_ksettings_for_user(useraddr, &link_ksettings); 579 } 580 581 /* Update device ethtool_link_settings. */ 582 static int ethtool_set_link_ksettings(struct net_device *dev, 583 void __user *useraddr) 584 { 585 struct ethtool_link_ksettings link_ksettings = {}; 586 int err; 587 588 ASSERT_RTNL(); 589 590 if (!dev->ethtool_ops->set_link_ksettings) 591 return -EOPNOTSUPP; 592 593 /* make sure nbits field has expected value */ 594 if (copy_from_user(&link_ksettings.base, useraddr, 595 sizeof(link_ksettings.base))) 596 return -EFAULT; 597 598 if (__ETHTOOL_LINK_MODE_MASK_NU32 599 != link_ksettings.base.link_mode_masks_nwords) 600 return -EINVAL; 601 602 /* copy the whole structure, now that we know it has expected 603 * format 604 */ 605 err = load_link_ksettings_from_user(&link_ksettings, useraddr); 606 if (err) 607 return err; 608 609 /* re-check nwords field, just in case */ 610 if (__ETHTOOL_LINK_MODE_MASK_NU32 611 != link_ksettings.base.link_mode_masks_nwords) 612 return -EINVAL; 613 614 if (link_ksettings.base.master_slave_cfg || 615 link_ksettings.base.master_slave_state) 616 return -EINVAL; 617 618 err = dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings); 619 if (err >= 0) { 620 ethtool_notify(dev, ETHTOOL_MSG_LINKINFO_NTF, NULL); 621 ethtool_notify(dev, ETHTOOL_MSG_LINKMODES_NTF, NULL); 622 } 623 return err; 624 } 625 626 int ethtool_virtdev_set_link_ksettings(struct net_device *dev, 627 const struct ethtool_link_ksettings *cmd, 628 u32 *dev_speed, u8 *dev_duplex) 629 { 630 u32 speed; 631 u8 duplex; 632 633 speed = cmd->base.speed; 634 duplex = cmd->base.duplex; 635 /* don't allow custom speed and duplex */ 636 if (!ethtool_validate_speed(speed) || 637 !ethtool_validate_duplex(duplex) || 638 !ethtool_virtdev_validate_cmd(cmd)) 639 return -EINVAL; 640 *dev_speed = speed; 641 *dev_duplex = duplex; 642 643 return 0; 644 } 645 EXPORT_SYMBOL(ethtool_virtdev_set_link_ksettings); 646 647 /* Query device for its ethtool_cmd settings. 648 * 649 * Backward compatibility note: for compatibility with legacy ethtool, this is 650 * now implemented via get_link_ksettings. When driver reports higher link mode 651 * bits, a kernel warning is logged once (with name of 1st driver/device) to 652 * recommend user to upgrade ethtool, but the command is successful (only the 653 * lower link mode bits reported back to user). Deprecated fields from 654 * ethtool_cmd (transceiver/maxrxpkt/maxtxpkt) are always set to zero. 655 */ 656 static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) 657 { 658 struct ethtool_link_ksettings link_ksettings; 659 struct ethtool_cmd cmd; 660 int err; 661 662 ASSERT_RTNL(); 663 if (!dev->ethtool_ops->get_link_ksettings) 664 return -EOPNOTSUPP; 665 666 if (dev->ethtool->module_fw_flash_in_progress) 667 return -EBUSY; 668 669 memset(&link_ksettings, 0, sizeof(link_ksettings)); 670 err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings); 671 if (err < 0) 672 return err; 673 convert_link_ksettings_to_legacy_settings(&cmd, &link_ksettings); 674 675 /* send a sensible cmd tag back to user */ 676 cmd.cmd = ETHTOOL_GSET; 677 678 if (copy_to_user(useraddr, &cmd, sizeof(cmd))) 679 return -EFAULT; 680 681 return 0; 682 } 683 684 /* Update device link settings with given ethtool_cmd. 685 * 686 * Backward compatibility note: for compatibility with legacy ethtool, this is 687 * now always implemented via set_link_settings. When user's request updates 688 * deprecated ethtool_cmd fields (transceiver/maxrxpkt/maxtxpkt), a kernel 689 * warning is logged once (with name of 1st driver/device) to recommend user to 690 * upgrade ethtool, and the request is rejected. 691 */ 692 static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) 693 { 694 struct ethtool_link_ksettings link_ksettings; 695 struct ethtool_cmd cmd; 696 int ret; 697 698 ASSERT_RTNL(); 699 700 if (copy_from_user(&cmd, useraddr, sizeof(cmd))) 701 return -EFAULT; 702 if (!dev->ethtool_ops->set_link_ksettings) 703 return -EOPNOTSUPP; 704 705 if (!convert_legacy_settings_to_link_ksettings(&link_ksettings, &cmd)) 706 return -EINVAL; 707 link_ksettings.base.link_mode_masks_nwords = 708 __ETHTOOL_LINK_MODE_MASK_NU32; 709 ret = dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings); 710 if (ret >= 0) { 711 ethtool_notify(dev, ETHTOOL_MSG_LINKINFO_NTF, NULL); 712 ethtool_notify(dev, ETHTOOL_MSG_LINKMODES_NTF, NULL); 713 } 714 return ret; 715 } 716 717 static int 718 ethtool_get_drvinfo(struct net_device *dev, struct ethtool_devlink_compat *rsp) 719 { 720 const struct ethtool_ops *ops = dev->ethtool_ops; 721 struct device *parent = dev->dev.parent; 722 723 rsp->info.cmd = ETHTOOL_GDRVINFO; 724 strscpy(rsp->info.version, init_uts_ns.name.release, 725 sizeof(rsp->info.version)); 726 if (ops->get_drvinfo) { 727 ops->get_drvinfo(dev, &rsp->info); 728 if (!rsp->info.bus_info[0] && parent) 729 strscpy(rsp->info.bus_info, dev_name(parent), 730 sizeof(rsp->info.bus_info)); 731 if (!rsp->info.driver[0] && parent && parent->driver) 732 strscpy(rsp->info.driver, parent->driver->name, 733 sizeof(rsp->info.driver)); 734 } else if (parent && parent->driver) { 735 strscpy(rsp->info.bus_info, dev_name(parent), 736 sizeof(rsp->info.bus_info)); 737 strscpy(rsp->info.driver, parent->driver->name, 738 sizeof(rsp->info.driver)); 739 } else if (dev->rtnl_link_ops) { 740 strscpy(rsp->info.driver, dev->rtnl_link_ops->kind, 741 sizeof(rsp->info.driver)); 742 } else { 743 return -EOPNOTSUPP; 744 } 745 746 /* 747 * this method of obtaining string set info is deprecated; 748 * Use ETHTOOL_GSSET_INFO instead. 749 */ 750 if (ops->get_sset_count) { 751 int rc; 752 753 rc = ops->get_sset_count(dev, ETH_SS_TEST); 754 if (rc >= 0) 755 rsp->info.testinfo_len = rc; 756 rc = ops->get_sset_count(dev, ETH_SS_STATS); 757 if (rc >= 0) 758 rsp->info.n_stats = rc; 759 rc = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS); 760 if (rc >= 0) 761 rsp->info.n_priv_flags = rc; 762 } 763 if (ops->get_regs_len) { 764 int ret = ops->get_regs_len(dev); 765 766 if (ret > 0) 767 rsp->info.regdump_len = ret; 768 } 769 770 if (ops->get_eeprom_len) 771 rsp->info.eedump_len = ops->get_eeprom_len(dev); 772 773 if (!rsp->info.fw_version[0]) 774 rsp->devlink = netdev_to_devlink_get(dev); 775 776 return 0; 777 } 778 779 static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, 780 void __user *useraddr) 781 { 782 struct ethtool_sset_info info; 783 u64 sset_mask; 784 int i, idx = 0, n_bits = 0, ret, rc; 785 u32 *info_buf = NULL; 786 787 if (copy_from_user(&info, useraddr, sizeof(info))) 788 return -EFAULT; 789 790 /* store copy of mask, because we zero struct later on */ 791 sset_mask = info.sset_mask; 792 if (!sset_mask) 793 return 0; 794 795 /* calculate size of return buffer */ 796 n_bits = hweight64(sset_mask); 797 798 memset(&info, 0, sizeof(info)); 799 info.cmd = ETHTOOL_GSSET_INFO; 800 801 info_buf = kcalloc(n_bits, sizeof(u32), GFP_USER); 802 if (!info_buf) 803 return -ENOMEM; 804 805 /* 806 * fill return buffer based on input bitmask and successful 807 * get_sset_count return 808 */ 809 for (i = 0; i < 64; i++) { 810 if (!(sset_mask & (1ULL << i))) 811 continue; 812 813 rc = __ethtool_get_sset_count(dev, i); 814 if (rc >= 0) { 815 info.sset_mask |= (1ULL << i); 816 info_buf[idx++] = rc; 817 } 818 } 819 820 ret = -EFAULT; 821 if (copy_to_user(useraddr, &info, sizeof(info))) 822 goto out; 823 824 useraddr += offsetof(struct ethtool_sset_info, data); 825 if (copy_to_user(useraddr, info_buf, array_size(idx, sizeof(u32)))) 826 goto out; 827 828 ret = 0; 829 830 out: 831 kfree(info_buf); 832 return ret; 833 } 834 835 static noinline_for_stack int 836 ethtool_rxnfc_copy_from_compat(struct ethtool_rxnfc *rxnfc, 837 const struct compat_ethtool_rxnfc __user *useraddr, 838 size_t size) 839 { 840 struct compat_ethtool_rxnfc crxnfc = {}; 841 842 /* We expect there to be holes between fs.m_ext and 843 * fs.ring_cookie and at the end of fs, but nowhere else. 844 * On non-x86, no conversion should be needed. 845 */ 846 BUILD_BUG_ON(!IS_ENABLED(CONFIG_X86_64) && 847 sizeof(struct compat_ethtool_rxnfc) != 848 sizeof(struct ethtool_rxnfc)); 849 BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) + 850 sizeof(useraddr->fs.m_ext) != 851 offsetof(struct ethtool_rxnfc, fs.m_ext) + 852 sizeof(rxnfc->fs.m_ext)); 853 BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.location) - 854 offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != 855 offsetof(struct ethtool_rxnfc, fs.location) - 856 offsetof(struct ethtool_rxnfc, fs.ring_cookie)); 857 858 if (copy_from_user(&crxnfc, useraddr, min(size, sizeof(crxnfc)))) 859 return -EFAULT; 860 861 *rxnfc = (struct ethtool_rxnfc) { 862 .cmd = crxnfc.cmd, 863 .flow_type = crxnfc.flow_type, 864 .data = crxnfc.data, 865 .fs = { 866 .flow_type = crxnfc.fs.flow_type, 867 .h_u = crxnfc.fs.h_u, 868 .h_ext = crxnfc.fs.h_ext, 869 .m_u = crxnfc.fs.m_u, 870 .m_ext = crxnfc.fs.m_ext, 871 .ring_cookie = crxnfc.fs.ring_cookie, 872 .location = crxnfc.fs.location, 873 }, 874 .rule_cnt = crxnfc.rule_cnt, 875 }; 876 877 return 0; 878 } 879 880 static int ethtool_rxnfc_copy_from_user(struct ethtool_rxnfc *rxnfc, 881 const void __user *useraddr, 882 size_t size) 883 { 884 if (compat_need_64bit_alignment_fixup()) 885 return ethtool_rxnfc_copy_from_compat(rxnfc, useraddr, size); 886 887 if (copy_from_user(rxnfc, useraddr, size)) 888 return -EFAULT; 889 890 return 0; 891 } 892 893 static int ethtool_rxnfc_copy_to_compat(void __user *useraddr, 894 const struct ethtool_rxnfc *rxnfc, 895 size_t size, const u32 *rule_buf) 896 { 897 struct compat_ethtool_rxnfc crxnfc; 898 899 memset(&crxnfc, 0, sizeof(crxnfc)); 900 crxnfc = (struct compat_ethtool_rxnfc) { 901 .cmd = rxnfc->cmd, 902 .flow_type = rxnfc->flow_type, 903 .data = rxnfc->data, 904 .fs = { 905 .flow_type = rxnfc->fs.flow_type, 906 .h_u = rxnfc->fs.h_u, 907 .h_ext = rxnfc->fs.h_ext, 908 .m_u = rxnfc->fs.m_u, 909 .m_ext = rxnfc->fs.m_ext, 910 .ring_cookie = rxnfc->fs.ring_cookie, 911 .location = rxnfc->fs.location, 912 }, 913 .rule_cnt = rxnfc->rule_cnt, 914 }; 915 916 if (copy_to_user(useraddr, &crxnfc, min(size, sizeof(crxnfc)))) 917 return -EFAULT; 918 919 return 0; 920 } 921 922 static int ethtool_rxnfc_copy_struct(u32 cmd, struct ethtool_rxnfc *info, 923 size_t *info_size, void __user *useraddr) 924 { 925 /* struct ethtool_rxnfc was originally defined for 926 * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data 927 * members. User-space might still be using that 928 * definition. 929 */ 930 if (cmd == ETHTOOL_GRXFH || cmd == ETHTOOL_SRXFH) 931 *info_size = (offsetof(struct ethtool_rxnfc, data) + 932 sizeof(info->data)); 933 934 if (ethtool_rxnfc_copy_from_user(info, useraddr, *info_size)) 935 return -EFAULT; 936 937 if ((cmd == ETHTOOL_GRXFH || cmd == ETHTOOL_SRXFH) && info->flow_type & FLOW_RSS) { 938 *info_size = sizeof(*info); 939 if (ethtool_rxnfc_copy_from_user(info, useraddr, *info_size)) 940 return -EFAULT; 941 /* Since malicious users may modify the original data, 942 * we need to check whether FLOW_RSS is still requested. 943 */ 944 if (!(info->flow_type & FLOW_RSS)) 945 return -EINVAL; 946 } 947 948 if (info->cmd != cmd) 949 return -EINVAL; 950 951 return 0; 952 } 953 954 static int ethtool_rxnfc_copy_to_user(void __user *useraddr, 955 const struct ethtool_rxnfc *rxnfc, 956 size_t size, const u32 *rule_buf) 957 { 958 int ret; 959 960 if (compat_need_64bit_alignment_fixup()) { 961 ret = ethtool_rxnfc_copy_to_compat(useraddr, rxnfc, size, 962 rule_buf); 963 useraddr += offsetof(struct compat_ethtool_rxnfc, rule_locs); 964 } else { 965 ret = copy_to_user(useraddr, rxnfc, size); 966 useraddr += offsetof(struct ethtool_rxnfc, rule_locs); 967 } 968 969 if (ret) 970 return -EFAULT; 971 972 if (rule_buf) { 973 if (copy_to_user(useraddr, rule_buf, 974 rxnfc->rule_cnt * sizeof(u32))) 975 return -EFAULT; 976 } 977 978 return 0; 979 } 980 981 static bool flow_type_hashable(u32 flow_type) 982 { 983 switch (flow_type) { 984 case TCP_V4_FLOW: 985 case UDP_V4_FLOW: 986 case SCTP_V4_FLOW: 987 case AH_ESP_V4_FLOW: 988 case TCP_V6_FLOW: 989 case UDP_V6_FLOW: 990 case SCTP_V6_FLOW: 991 case AH_ESP_V6_FLOW: 992 case AH_V4_FLOW: 993 case ESP_V4_FLOW: 994 case AH_V6_FLOW: 995 case ESP_V6_FLOW: 996 case IPV4_FLOW: 997 case IPV6_FLOW: 998 case GTPU_V4_FLOW: 999 case GTPU_V6_FLOW: 1000 case GTPC_V4_FLOW: 1001 case GTPC_V6_FLOW: 1002 case GTPC_TEID_V4_FLOW: 1003 case GTPC_TEID_V6_FLOW: 1004 case GTPU_EH_V4_FLOW: 1005 case GTPU_EH_V6_FLOW: 1006 case GTPU_UL_V4_FLOW: 1007 case GTPU_UL_V6_FLOW: 1008 case GTPU_DL_V4_FLOW: 1009 case GTPU_DL_V6_FLOW: 1010 return true; 1011 } 1012 1013 return false; 1014 } 1015 1016 /* When adding a new type, update the assert and, if it's hashable, add it to 1017 * the flow_type_hashable switch case. 1018 */ 1019 static_assert(GTPU_DL_V6_FLOW + 1 == __FLOW_TYPE_COUNT); 1020 1021 static int ethtool_check_xfrm_rxfh(u32 input_xfrm, u64 rxfh) 1022 { 1023 /* Sanity check: if symmetric-xor/symmetric-or-xor is set, then: 1024 * 1 - no other fields besides IP src/dst and/or L4 src/dst are set 1025 * 2 - If src is set, dst must also be set 1026 */ 1027 if ((input_xfrm != RXH_XFRM_NO_CHANGE && 1028 input_xfrm & (RXH_XFRM_SYM_XOR | RXH_XFRM_SYM_OR_XOR)) && 1029 ((rxfh & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) || 1030 (!!(rxfh & RXH_IP_SRC) ^ !!(rxfh & RXH_IP_DST)) || 1031 (!!(rxfh & RXH_L4_B_0_1) ^ !!(rxfh & RXH_L4_B_2_3)))) 1032 return -EINVAL; 1033 1034 return 0; 1035 } 1036 1037 static int ethtool_check_flow_types(struct net_device *dev, u32 input_xfrm) 1038 { 1039 const struct ethtool_ops *ops = dev->ethtool_ops; 1040 int err; 1041 u32 i; 1042 1043 for (i = 0; i < __FLOW_TYPE_COUNT; i++) { 1044 struct ethtool_rxfh_fields fields = { 1045 .flow_type = i, 1046 }; 1047 1048 if (!flow_type_hashable(i)) 1049 continue; 1050 1051 if (ops->get_rxfh_fields(dev, &fields)) 1052 continue; 1053 1054 err = ethtool_check_xfrm_rxfh(input_xfrm, fields.data); 1055 if (err) 1056 return err; 1057 } 1058 1059 return 0; 1060 } 1061 1062 static noinline_for_stack int 1063 ethtool_set_rxfh_fields(struct net_device *dev, u32 cmd, void __user *useraddr) 1064 { 1065 const struct ethtool_ops *ops = dev->ethtool_ops; 1066 struct ethtool_rxfh_fields fields = {}; 1067 struct ethtool_rxnfc info; 1068 size_t info_size = sizeof(info); 1069 int rc; 1070 1071 if (!ops->set_rxfh_fields) 1072 return -EOPNOTSUPP; 1073 1074 rc = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr); 1075 if (rc) 1076 return rc; 1077 1078 if (info.flow_type & FLOW_RSS && info.rss_context && 1079 !ops->rxfh_per_ctx_fields) 1080 return -EINVAL; 1081 1082 if (ops->get_rxfh) { 1083 struct ethtool_rxfh_param rxfh = {}; 1084 1085 rc = ops->get_rxfh(dev, &rxfh); 1086 if (rc) 1087 return rc; 1088 1089 rc = ethtool_check_xfrm_rxfh(rxfh.input_xfrm, info.data); 1090 if (rc) 1091 return rc; 1092 } 1093 1094 fields.data = info.data; 1095 fields.flow_type = info.flow_type & ~FLOW_RSS; 1096 if (info.flow_type & FLOW_RSS) 1097 fields.rss_context = info.rss_context; 1098 1099 return ops->set_rxfh_fields(dev, &fields, NULL); 1100 } 1101 1102 static noinline_for_stack int 1103 ethtool_get_rxfh_fields(struct net_device *dev, u32 cmd, void __user *useraddr) 1104 { 1105 struct ethtool_rxnfc info; 1106 size_t info_size = sizeof(info); 1107 const struct ethtool_ops *ops = dev->ethtool_ops; 1108 struct ethtool_rxfh_fields fields = {}; 1109 int ret; 1110 1111 if (!ops->get_rxfh_fields) 1112 return -EOPNOTSUPP; 1113 1114 ret = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr); 1115 if (ret) 1116 return ret; 1117 1118 if (info.flow_type & FLOW_RSS && info.rss_context && 1119 !ops->rxfh_per_ctx_fields) 1120 return -EINVAL; 1121 1122 fields.flow_type = info.flow_type & ~FLOW_RSS; 1123 if (info.flow_type & FLOW_RSS) 1124 fields.rss_context = info.rss_context; 1125 1126 ret = ops->get_rxfh_fields(dev, &fields); 1127 if (ret < 0) 1128 return ret; 1129 1130 info.data = fields.data; 1131 1132 return ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, NULL); 1133 } 1134 1135 static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, 1136 u32 cmd, void __user *useraddr) 1137 { 1138 const struct ethtool_ops *ops = dev->ethtool_ops; 1139 struct ethtool_rxnfc info; 1140 size_t info_size = sizeof(info); 1141 int rc; 1142 1143 if (!ops->set_rxnfc) 1144 return -EOPNOTSUPP; 1145 1146 rc = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr); 1147 if (rc) 1148 return rc; 1149 1150 if (cmd == ETHTOOL_SRXCLSRLINS && info.fs.flow_type & FLOW_RSS) { 1151 /* Nonzero ring with RSS only makes sense 1152 * if NIC adds them together 1153 */ 1154 if (!ops->cap_rss_rxnfc_adds && 1155 ethtool_get_flow_spec_ring(info.fs.ring_cookie)) 1156 return -EINVAL; 1157 1158 if (info.rss_context && 1159 !xa_load(&dev->ethtool->rss_ctx, info.rss_context)) 1160 return -EINVAL; 1161 } 1162 1163 rc = ops->set_rxnfc(dev, &info); 1164 if (rc) 1165 return rc; 1166 1167 if (cmd == ETHTOOL_SRXCLSRLINS && 1168 ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, NULL)) 1169 return -EFAULT; 1170 1171 return 0; 1172 } 1173 1174 static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, 1175 u32 cmd, void __user *useraddr) 1176 { 1177 struct ethtool_rxnfc info; 1178 size_t info_size = sizeof(info); 1179 const struct ethtool_ops *ops = dev->ethtool_ops; 1180 int ret; 1181 void *rule_buf = NULL; 1182 1183 if (!ops->get_rxnfc) 1184 return -EOPNOTSUPP; 1185 1186 ret = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr); 1187 if (ret) 1188 return ret; 1189 1190 if (info.cmd == ETHTOOL_GRXCLSRLALL) { 1191 if (info.rule_cnt > 0) { 1192 if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32)) 1193 rule_buf = kcalloc(info.rule_cnt, sizeof(u32), 1194 GFP_USER); 1195 if (!rule_buf) 1196 return -ENOMEM; 1197 } 1198 } 1199 1200 ret = ops->get_rxnfc(dev, &info, rule_buf); 1201 if (ret < 0) 1202 goto err_out; 1203 1204 ret = ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, rule_buf); 1205 err_out: 1206 kfree(rule_buf); 1207 1208 return ret; 1209 } 1210 1211 static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr, 1212 struct ethtool_rxnfc *rx_rings, 1213 u32 size) 1214 { 1215 int i; 1216 1217 if (copy_from_user(indir, useraddr, array_size(size, sizeof(indir[0])))) 1218 return -EFAULT; 1219 1220 /* Validate ring indices */ 1221 for (i = 0; i < size; i++) 1222 if (indir[i] >= rx_rings->data) 1223 return -EINVAL; 1224 1225 return 0; 1226 } 1227 1228 u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; 1229 1230 void netdev_rss_key_fill(void *buffer, size_t len) 1231 { 1232 BUG_ON(len > sizeof(netdev_rss_key)); 1233 net_get_random_once(netdev_rss_key, sizeof(netdev_rss_key)); 1234 memcpy(buffer, netdev_rss_key, len); 1235 } 1236 EXPORT_SYMBOL(netdev_rss_key_fill); 1237 1238 static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, 1239 void __user *useraddr) 1240 { 1241 struct ethtool_rxfh_param rxfh = {}; 1242 u32 user_size; 1243 int ret; 1244 1245 if (!dev->ethtool_ops->get_rxfh_indir_size || 1246 !dev->ethtool_ops->get_rxfh) 1247 return -EOPNOTSUPP; 1248 rxfh.indir_size = dev->ethtool_ops->get_rxfh_indir_size(dev); 1249 if (rxfh.indir_size == 0) 1250 return -EOPNOTSUPP; 1251 1252 if (copy_from_user(&user_size, 1253 useraddr + offsetof(struct ethtool_rxfh_indir, size), 1254 sizeof(user_size))) 1255 return -EFAULT; 1256 1257 if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh_indir, size), 1258 &rxfh.indir_size, sizeof(rxfh.indir_size))) 1259 return -EFAULT; 1260 1261 /* If the user buffer size is 0, this is just a query for the 1262 * device table size. Otherwise, if it's smaller than the 1263 * device table size it's an error. 1264 */ 1265 if (user_size < rxfh.indir_size) 1266 return user_size == 0 ? 0 : -EINVAL; 1267 1268 rxfh.indir = kcalloc(rxfh.indir_size, sizeof(rxfh.indir[0]), GFP_USER); 1269 if (!rxfh.indir) 1270 return -ENOMEM; 1271 1272 ret = dev->ethtool_ops->get_rxfh(dev, &rxfh); 1273 if (ret) 1274 goto out; 1275 if (copy_to_user(useraddr + 1276 offsetof(struct ethtool_rxfh_indir, ring_index[0]), 1277 rxfh.indir, rxfh.indir_size * sizeof(*rxfh.indir))) 1278 ret = -EFAULT; 1279 1280 out: 1281 kfree(rxfh.indir); 1282 return ret; 1283 } 1284 1285 static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev, 1286 void __user *useraddr) 1287 { 1288 const struct ethtool_ops *ops = dev->ethtool_ops; 1289 struct ethtool_rxfh_param rxfh_dev = {}; 1290 struct netlink_ext_ack *extack = NULL; 1291 struct ethtool_rxnfc rx_rings; 1292 u32 user_size, i; 1293 int ret; 1294 u32 ringidx_offset = offsetof(struct ethtool_rxfh_indir, ring_index[0]); 1295 1296 if (!ops->get_rxfh_indir_size || !ops->set_rxfh || 1297 !ops->get_rxnfc) 1298 return -EOPNOTSUPP; 1299 1300 rxfh_dev.indir_size = ops->get_rxfh_indir_size(dev); 1301 if (rxfh_dev.indir_size == 0) 1302 return -EOPNOTSUPP; 1303 1304 if (copy_from_user(&user_size, 1305 useraddr + offsetof(struct ethtool_rxfh_indir, size), 1306 sizeof(user_size))) 1307 return -EFAULT; 1308 1309 if (user_size != 0 && user_size != rxfh_dev.indir_size) 1310 return -EINVAL; 1311 1312 rxfh_dev.indir = kcalloc(rxfh_dev.indir_size, 1313 sizeof(rxfh_dev.indir[0]), GFP_USER); 1314 if (!rxfh_dev.indir) 1315 return -ENOMEM; 1316 1317 rx_rings.cmd = ETHTOOL_GRXRINGS; 1318 ret = ops->get_rxnfc(dev, &rx_rings, NULL); 1319 if (ret) 1320 goto out; 1321 1322 if (user_size == 0) { 1323 u32 *indir = rxfh_dev.indir; 1324 1325 for (i = 0; i < rxfh_dev.indir_size; i++) 1326 indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data); 1327 } else { 1328 ret = ethtool_copy_validate_indir(rxfh_dev.indir, 1329 useraddr + ringidx_offset, 1330 &rx_rings, 1331 rxfh_dev.indir_size); 1332 if (ret) 1333 goto out; 1334 } 1335 1336 rxfh_dev.hfunc = ETH_RSS_HASH_NO_CHANGE; 1337 ret = ops->set_rxfh(dev, &rxfh_dev, extack); 1338 if (ret) 1339 goto out; 1340 1341 /* indicate whether rxfh was set to default */ 1342 if (user_size == 0) 1343 dev->priv_flags &= ~IFF_RXFH_CONFIGURED; 1344 else 1345 dev->priv_flags |= IFF_RXFH_CONFIGURED; 1346 1347 out: 1348 kfree(rxfh_dev.indir); 1349 return ret; 1350 } 1351 1352 static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev, 1353 void __user *useraddr) 1354 { 1355 const struct ethtool_ops *ops = dev->ethtool_ops; 1356 struct ethtool_rxfh_param rxfh_dev = {}; 1357 u32 user_indir_size, user_key_size; 1358 struct ethtool_rxfh_context *ctx; 1359 struct ethtool_rxfh rxfh; 1360 u32 indir_bytes; 1361 u8 *rss_config; 1362 u32 total_size; 1363 int ret; 1364 1365 if (!ops->get_rxfh) 1366 return -EOPNOTSUPP; 1367 1368 if (ops->get_rxfh_indir_size) 1369 rxfh_dev.indir_size = ops->get_rxfh_indir_size(dev); 1370 if (ops->get_rxfh_key_size) 1371 rxfh_dev.key_size = ops->get_rxfh_key_size(dev); 1372 1373 if (copy_from_user(&rxfh, useraddr, sizeof(rxfh))) 1374 return -EFAULT; 1375 user_indir_size = rxfh.indir_size; 1376 user_key_size = rxfh.key_size; 1377 1378 /* Check that reserved fields are 0 for now */ 1379 if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd32) 1380 return -EINVAL; 1381 /* Most drivers don't handle rss_context, check it's 0 as well */ 1382 if (rxfh.rss_context && !(ops->cap_rss_ctx_supported || 1383 ops->create_rxfh_context)) 1384 return -EOPNOTSUPP; 1385 1386 rxfh.indir_size = rxfh_dev.indir_size; 1387 rxfh.key_size = rxfh_dev.key_size; 1388 if (copy_to_user(useraddr, &rxfh, sizeof(rxfh))) 1389 return -EFAULT; 1390 1391 if ((user_indir_size && user_indir_size != rxfh_dev.indir_size) || 1392 (user_key_size && user_key_size != rxfh_dev.key_size)) 1393 return -EINVAL; 1394 1395 indir_bytes = user_indir_size * sizeof(rxfh_dev.indir[0]); 1396 total_size = indir_bytes + user_key_size; 1397 rss_config = kzalloc(total_size, GFP_USER); 1398 if (!rss_config) 1399 return -ENOMEM; 1400 1401 if (user_indir_size) 1402 rxfh_dev.indir = (u32 *)rss_config; 1403 1404 if (user_key_size) 1405 rxfh_dev.key = rss_config + indir_bytes; 1406 1407 if (rxfh.rss_context) { 1408 ctx = xa_load(&dev->ethtool->rss_ctx, rxfh.rss_context); 1409 if (!ctx) { 1410 ret = -ENOENT; 1411 goto out; 1412 } 1413 if (rxfh_dev.indir) 1414 memcpy(rxfh_dev.indir, ethtool_rxfh_context_indir(ctx), 1415 indir_bytes); 1416 if (!ops->rxfh_per_ctx_key) { 1417 rxfh_dev.key_size = 0; 1418 } else { 1419 if (rxfh_dev.key) 1420 memcpy(rxfh_dev.key, 1421 ethtool_rxfh_context_key(ctx), 1422 user_key_size); 1423 rxfh_dev.hfunc = ctx->hfunc; 1424 } 1425 rxfh_dev.input_xfrm = ctx->input_xfrm; 1426 ret = 0; 1427 } else { 1428 ret = dev->ethtool_ops->get_rxfh(dev, &rxfh_dev); 1429 if (ret) 1430 goto out; 1431 } 1432 1433 if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, hfunc), 1434 &rxfh_dev.hfunc, sizeof(rxfh.hfunc))) { 1435 ret = -EFAULT; 1436 } else if (copy_to_user(useraddr + 1437 offsetof(struct ethtool_rxfh, input_xfrm), 1438 &rxfh_dev.input_xfrm, 1439 sizeof(rxfh.input_xfrm))) { 1440 ret = -EFAULT; 1441 } else if (copy_to_user(useraddr + 1442 offsetof(struct ethtool_rxfh, key_size), 1443 &rxfh_dev.key_size, 1444 sizeof(rxfh.key_size))) { 1445 ret = -EFAULT; 1446 } else if (copy_to_user(useraddr + 1447 offsetof(struct ethtool_rxfh, rss_config[0]), 1448 rss_config, total_size)) { 1449 ret = -EFAULT; 1450 } 1451 out: 1452 kfree(rss_config); 1453 1454 return ret; 1455 } 1456 1457 static struct ethtool_rxfh_context * 1458 ethtool_rxfh_ctx_alloc(const struct ethtool_ops *ops, 1459 u32 indir_size, u32 key_size) 1460 { 1461 size_t indir_bytes, flex_len, key_off, size; 1462 struct ethtool_rxfh_context *ctx; 1463 u32 priv_bytes, indir_max; 1464 u16 key_max; 1465 1466 key_max = max(key_size, ops->rxfh_key_space); 1467 indir_max = max(indir_size, ops->rxfh_indir_space); 1468 1469 priv_bytes = ALIGN(ops->rxfh_priv_size, sizeof(u32)); 1470 indir_bytes = array_size(indir_max, sizeof(u32)); 1471 1472 key_off = size_add(priv_bytes, indir_bytes); 1473 flex_len = size_add(key_off, key_max); 1474 size = struct_size_t(struct ethtool_rxfh_context, data, flex_len); 1475 1476 ctx = kzalloc(size, GFP_KERNEL_ACCOUNT); 1477 if (!ctx) 1478 return NULL; 1479 1480 ctx->indir_size = indir_size; 1481 ctx->key_size = key_size; 1482 ctx->key_off = key_off; 1483 ctx->priv_size = ops->rxfh_priv_size; 1484 1485 ctx->hfunc = ETH_RSS_HASH_NO_CHANGE; 1486 ctx->input_xfrm = RXH_XFRM_NO_CHANGE; 1487 1488 return ctx; 1489 } 1490 1491 static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, 1492 void __user *useraddr) 1493 { 1494 u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]); 1495 const struct ethtool_ops *ops = dev->ethtool_ops; 1496 u32 dev_indir_size = 0, dev_key_size = 0, i; 1497 u32 user_indir_len = 0, indir_bytes = 0; 1498 struct ethtool_rxfh_param rxfh_dev = {}; 1499 struct ethtool_rxfh_context *ctx = NULL; 1500 struct netlink_ext_ack *extack = NULL; 1501 struct ethtool_rxnfc rx_rings; 1502 struct ethtool_rxfh rxfh; 1503 bool locked = false; /* dev->ethtool->rss_lock taken */ 1504 bool create = false; 1505 u8 *rss_config; 1506 int ret; 1507 1508 if (!ops->get_rxnfc || !ops->get_rxfh_fields || !ops->set_rxfh) 1509 return -EOPNOTSUPP; 1510 1511 if (ops->get_rxfh_indir_size) 1512 dev_indir_size = ops->get_rxfh_indir_size(dev); 1513 if (ops->get_rxfh_key_size) 1514 dev_key_size = ops->get_rxfh_key_size(dev); 1515 1516 if (copy_from_user(&rxfh, useraddr, sizeof(rxfh))) 1517 return -EFAULT; 1518 1519 /* Check that reserved fields are 0 for now */ 1520 if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd32) 1521 return -EINVAL; 1522 /* Most drivers don't handle rss_context, check it's 0 as well */ 1523 if (rxfh.rss_context && !(ops->cap_rss_ctx_supported || 1524 ops->create_rxfh_context)) 1525 return -EOPNOTSUPP; 1526 /* Check input data transformation capabilities */ 1527 if (rxfh.input_xfrm && rxfh.input_xfrm != RXH_XFRM_SYM_XOR && 1528 rxfh.input_xfrm != RXH_XFRM_SYM_OR_XOR && 1529 rxfh.input_xfrm != RXH_XFRM_NO_CHANGE) 1530 return -EINVAL; 1531 if (rxfh.input_xfrm != RXH_XFRM_NO_CHANGE && 1532 rxfh.input_xfrm & ~ops->supported_input_xfrm) 1533 return -EOPNOTSUPP; 1534 create = rxfh.rss_context == ETH_RXFH_CONTEXT_ALLOC; 1535 1536 if ((rxfh.indir_size && 1537 rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE && 1538 rxfh.indir_size != dev_indir_size) || 1539 (rxfh.key_size && rxfh.key_size != dev_key_size)) 1540 return -EINVAL; 1541 1542 /* Must request at least one change: indir size, hash key, function 1543 * or input transformation. 1544 * There's no need for any of it in case of context creation. 1545 */ 1546 if (!create && 1547 (rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE && 1548 rxfh.key_size == 0 && rxfh.hfunc == ETH_RSS_HASH_NO_CHANGE && 1549 rxfh.input_xfrm == RXH_XFRM_NO_CHANGE)) 1550 return -EINVAL; 1551 1552 ret = ethtool_check_flow_types(dev, rxfh.input_xfrm); 1553 if (ret) 1554 return ret; 1555 1556 indir_bytes = dev_indir_size * sizeof(rxfh_dev.indir[0]); 1557 1558 /* Check settings which may be global rather than per RSS-context */ 1559 if (rxfh.rss_context && !ops->rxfh_per_ctx_key) 1560 if (rxfh.key_size || 1561 (rxfh.hfunc && rxfh.hfunc != ETH_RSS_HASH_NO_CHANGE) || 1562 (rxfh.input_xfrm && rxfh.input_xfrm != RXH_XFRM_NO_CHANGE)) 1563 return -EOPNOTSUPP; 1564 1565 rss_config = kzalloc(indir_bytes + dev_key_size, GFP_USER); 1566 if (!rss_config) 1567 return -ENOMEM; 1568 1569 rx_rings.cmd = ETHTOOL_GRXRINGS; 1570 ret = ops->get_rxnfc(dev, &rx_rings, NULL); 1571 if (ret) 1572 goto out; 1573 1574 /* rxfh.indir_size == 0 means reset the indir table to default (master 1575 * context) or delete the context (other RSS contexts). 1576 * rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE means leave it unchanged. 1577 */ 1578 if (rxfh.indir_size && 1579 rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) { 1580 user_indir_len = indir_bytes; 1581 rxfh_dev.indir = (u32 *)rss_config; 1582 rxfh_dev.indir_size = dev_indir_size; 1583 ret = ethtool_copy_validate_indir(rxfh_dev.indir, 1584 useraddr + rss_cfg_offset, 1585 &rx_rings, 1586 rxfh.indir_size); 1587 if (ret) 1588 goto out; 1589 } else if (rxfh.indir_size == 0) { 1590 if (rxfh.rss_context == 0) { 1591 u32 *indir; 1592 1593 rxfh_dev.indir = (u32 *)rss_config; 1594 rxfh_dev.indir_size = dev_indir_size; 1595 indir = rxfh_dev.indir; 1596 for (i = 0; i < dev_indir_size; i++) 1597 indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data); 1598 } else { 1599 rxfh_dev.rss_delete = true; 1600 } 1601 } 1602 1603 if (rxfh.key_size) { 1604 rxfh_dev.key_size = dev_key_size; 1605 rxfh_dev.key = rss_config + indir_bytes; 1606 if (copy_from_user(rxfh_dev.key, 1607 useraddr + rss_cfg_offset + user_indir_len, 1608 rxfh.key_size)) { 1609 ret = -EFAULT; 1610 goto out; 1611 } 1612 } 1613 1614 if (rxfh.rss_context) { 1615 mutex_lock(&dev->ethtool->rss_lock); 1616 locked = true; 1617 } 1618 1619 if (rxfh.rss_context && rxfh_dev.rss_delete) { 1620 ret = ethtool_check_rss_ctx_busy(dev, rxfh.rss_context); 1621 if (ret) 1622 goto out; 1623 } 1624 1625 if (create) { 1626 if (rxfh_dev.rss_delete) { 1627 ret = -EINVAL; 1628 goto out; 1629 } 1630 ctx = ethtool_rxfh_ctx_alloc(ops, dev_indir_size, dev_key_size); 1631 if (!ctx) { 1632 ret = -ENOMEM; 1633 goto out; 1634 } 1635 1636 if (ops->create_rxfh_context) { 1637 u32 limit = ops->rxfh_max_num_contexts ?: U32_MAX; 1638 u32 ctx_id; 1639 1640 /* driver uses new API, core allocates ID */ 1641 ret = xa_alloc(&dev->ethtool->rss_ctx, &ctx_id, ctx, 1642 XA_LIMIT(1, limit - 1), 1643 GFP_KERNEL_ACCOUNT); 1644 if (ret < 0) { 1645 kfree(ctx); 1646 goto out; 1647 } 1648 WARN_ON(!ctx_id); /* can't happen */ 1649 rxfh.rss_context = ctx_id; 1650 } 1651 } else if (rxfh.rss_context) { 1652 ctx = xa_load(&dev->ethtool->rss_ctx, rxfh.rss_context); 1653 if (!ctx) { 1654 ret = -ENOENT; 1655 goto out; 1656 } 1657 } 1658 rxfh_dev.hfunc = rxfh.hfunc; 1659 rxfh_dev.rss_context = rxfh.rss_context; 1660 rxfh_dev.input_xfrm = rxfh.input_xfrm; 1661 1662 if (rxfh.rss_context && ops->create_rxfh_context) { 1663 if (create) { 1664 ret = ops->create_rxfh_context(dev, ctx, &rxfh_dev, 1665 extack); 1666 /* Make sure driver populates defaults */ 1667 WARN_ON_ONCE(!ret && !rxfh_dev.key && 1668 ops->rxfh_per_ctx_key && 1669 !memchr_inv(ethtool_rxfh_context_key(ctx), 1670 0, ctx->key_size)); 1671 } else if (rxfh_dev.rss_delete) { 1672 ret = ops->remove_rxfh_context(dev, ctx, 1673 rxfh.rss_context, 1674 extack); 1675 } else { 1676 ret = ops->modify_rxfh_context(dev, ctx, &rxfh_dev, 1677 extack); 1678 } 1679 } else { 1680 ret = ops->set_rxfh(dev, &rxfh_dev, extack); 1681 } 1682 if (ret) { 1683 if (create) { 1684 /* failed to create, free our new tracking entry */ 1685 if (ops->create_rxfh_context) 1686 xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context); 1687 kfree(ctx); 1688 } 1689 goto out; 1690 } 1691 1692 if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, rss_context), 1693 &rxfh_dev.rss_context, sizeof(rxfh_dev.rss_context))) 1694 ret = -EFAULT; 1695 1696 if (!rxfh_dev.rss_context) { 1697 /* indicate whether rxfh was set to default */ 1698 if (rxfh.indir_size == 0) 1699 dev->priv_flags &= ~IFF_RXFH_CONFIGURED; 1700 else if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) 1701 dev->priv_flags |= IFF_RXFH_CONFIGURED; 1702 } 1703 /* Update rss_ctx tracking */ 1704 if (create && !ops->create_rxfh_context) { 1705 /* driver uses old API, it chose context ID */ 1706 if (WARN_ON(xa_load(&dev->ethtool->rss_ctx, rxfh_dev.rss_context))) { 1707 /* context ID reused, our tracking is screwed */ 1708 kfree(ctx); 1709 goto out; 1710 } 1711 /* Allocate the exact ID the driver gave us */ 1712 if (xa_is_err(xa_store(&dev->ethtool->rss_ctx, rxfh_dev.rss_context, 1713 ctx, GFP_KERNEL))) { 1714 kfree(ctx); 1715 goto out; 1716 } 1717 1718 /* Fetch the defaults for the old API, in the new API drivers 1719 * should write defaults into ctx themselves. 1720 */ 1721 rxfh_dev.indir = (u32 *)rss_config; 1722 rxfh_dev.indir_size = dev_indir_size; 1723 1724 rxfh_dev.key = rss_config + indir_bytes; 1725 rxfh_dev.key_size = dev_key_size; 1726 1727 ret = ops->get_rxfh(dev, &rxfh_dev); 1728 if (WARN_ON(ret)) { 1729 xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context); 1730 kfree(ctx); 1731 goto out; 1732 } 1733 } 1734 if (rxfh_dev.rss_delete) { 1735 WARN_ON(xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context) != ctx); 1736 kfree(ctx); 1737 } else if (ctx) { 1738 if (rxfh_dev.indir) { 1739 for (i = 0; i < dev_indir_size; i++) 1740 ethtool_rxfh_context_indir(ctx)[i] = rxfh_dev.indir[i]; 1741 ctx->indir_configured = 1742 rxfh.indir_size && 1743 rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE; 1744 } 1745 if (rxfh_dev.key) { 1746 memcpy(ethtool_rxfh_context_key(ctx), rxfh_dev.key, 1747 dev_key_size); 1748 ctx->key_configured = !!rxfh.key_size; 1749 } 1750 if (rxfh_dev.hfunc != ETH_RSS_HASH_NO_CHANGE) 1751 ctx->hfunc = rxfh_dev.hfunc; 1752 if (rxfh_dev.input_xfrm != RXH_XFRM_NO_CHANGE) 1753 ctx->input_xfrm = rxfh_dev.input_xfrm; 1754 } 1755 1756 out: 1757 if (locked) 1758 mutex_unlock(&dev->ethtool->rss_lock); 1759 kfree(rss_config); 1760 return ret; 1761 } 1762 1763 static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) 1764 { 1765 struct ethtool_regs regs; 1766 const struct ethtool_ops *ops = dev->ethtool_ops; 1767 void *regbuf; 1768 int reglen, ret; 1769 1770 if (!ops->get_regs || !ops->get_regs_len) 1771 return -EOPNOTSUPP; 1772 1773 if (copy_from_user(®s, useraddr, sizeof(regs))) 1774 return -EFAULT; 1775 1776 reglen = ops->get_regs_len(dev); 1777 if (reglen <= 0) 1778 return reglen; 1779 1780 if (regs.len > reglen) 1781 regs.len = reglen; 1782 1783 regbuf = vzalloc(reglen); 1784 if (!regbuf) 1785 return -ENOMEM; 1786 1787 if (regs.len < reglen) 1788 reglen = regs.len; 1789 1790 ops->get_regs(dev, ®s, regbuf); 1791 1792 ret = -EFAULT; 1793 if (copy_to_user(useraddr, ®s, sizeof(regs))) 1794 goto out; 1795 useraddr += offsetof(struct ethtool_regs, data); 1796 if (copy_to_user(useraddr, regbuf, reglen)) 1797 goto out; 1798 ret = 0; 1799 1800 out: 1801 vfree(regbuf); 1802 return ret; 1803 } 1804 1805 static int ethtool_reset(struct net_device *dev, char __user *useraddr) 1806 { 1807 struct ethtool_value reset; 1808 int ret; 1809 1810 if (!dev->ethtool_ops->reset) 1811 return -EOPNOTSUPP; 1812 1813 if (dev->ethtool->module_fw_flash_in_progress) 1814 return -EBUSY; 1815 1816 if (copy_from_user(&reset, useraddr, sizeof(reset))) 1817 return -EFAULT; 1818 1819 ret = dev->ethtool_ops->reset(dev, &reset.data); 1820 if (ret) 1821 return ret; 1822 1823 if (copy_to_user(useraddr, &reset, sizeof(reset))) 1824 return -EFAULT; 1825 return 0; 1826 } 1827 1828 static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) 1829 { 1830 struct ethtool_wolinfo wol; 1831 1832 if (!dev->ethtool_ops->get_wol) 1833 return -EOPNOTSUPP; 1834 1835 memset(&wol, 0, sizeof(struct ethtool_wolinfo)); 1836 wol.cmd = ETHTOOL_GWOL; 1837 dev->ethtool_ops->get_wol(dev, &wol); 1838 1839 if (copy_to_user(useraddr, &wol, sizeof(wol))) 1840 return -EFAULT; 1841 return 0; 1842 } 1843 1844 static int ethtool_set_wol(struct net_device *dev, char __user *useraddr) 1845 { 1846 struct ethtool_wolinfo wol, cur_wol; 1847 int ret; 1848 1849 if (!dev->ethtool_ops->get_wol || !dev->ethtool_ops->set_wol) 1850 return -EOPNOTSUPP; 1851 1852 memset(&cur_wol, 0, sizeof(struct ethtool_wolinfo)); 1853 cur_wol.cmd = ETHTOOL_GWOL; 1854 dev->ethtool_ops->get_wol(dev, &cur_wol); 1855 1856 if (copy_from_user(&wol, useraddr, sizeof(wol))) 1857 return -EFAULT; 1858 1859 if (wol.wolopts & ~cur_wol.supported) 1860 return -EINVAL; 1861 1862 if (wol.wolopts == cur_wol.wolopts && 1863 !memcmp(wol.sopass, cur_wol.sopass, sizeof(wol.sopass))) 1864 return 0; 1865 1866 ret = dev->ethtool_ops->set_wol(dev, &wol); 1867 if (ret) 1868 return ret; 1869 1870 dev->ethtool->wol_enabled = !!wol.wolopts; 1871 ethtool_notify(dev, ETHTOOL_MSG_WOL_NTF, NULL); 1872 1873 return 0; 1874 } 1875 1876 static void eee_to_keee(struct ethtool_keee *keee, 1877 const struct ethtool_eee *eee) 1878 { 1879 memset(keee, 0, sizeof(*keee)); 1880 1881 keee->eee_enabled = eee->eee_enabled; 1882 keee->tx_lpi_enabled = eee->tx_lpi_enabled; 1883 keee->tx_lpi_timer = eee->tx_lpi_timer; 1884 1885 ethtool_convert_legacy_u32_to_link_mode(keee->advertised, 1886 eee->advertised); 1887 } 1888 1889 static void keee_to_eee(struct ethtool_eee *eee, 1890 const struct ethtool_keee *keee) 1891 { 1892 bool overflow; 1893 1894 memset(eee, 0, sizeof(*eee)); 1895 1896 eee->eee_active = keee->eee_active; 1897 eee->eee_enabled = keee->eee_enabled; 1898 eee->tx_lpi_enabled = keee->tx_lpi_enabled; 1899 eee->tx_lpi_timer = keee->tx_lpi_timer; 1900 1901 overflow = !ethtool_convert_link_mode_to_legacy_u32(&eee->supported, 1902 keee->supported); 1903 ethtool_convert_link_mode_to_legacy_u32(&eee->advertised, 1904 keee->advertised); 1905 ethtool_convert_link_mode_to_legacy_u32(&eee->lp_advertised, 1906 keee->lp_advertised); 1907 if (overflow) 1908 pr_warn("Ethtool ioctl interface doesn't support passing EEE linkmodes beyond bit 32\n"); 1909 } 1910 1911 static int ethtool_get_eee(struct net_device *dev, char __user *useraddr) 1912 { 1913 struct ethtool_keee keee; 1914 struct ethtool_eee eee; 1915 int rc; 1916 1917 if (!dev->ethtool_ops->get_eee) 1918 return -EOPNOTSUPP; 1919 1920 memset(&keee, 0, sizeof(keee)); 1921 rc = dev->ethtool_ops->get_eee(dev, &keee); 1922 if (rc) 1923 return rc; 1924 1925 keee_to_eee(&eee, &keee); 1926 if (copy_to_user(useraddr, &eee, sizeof(eee))) 1927 return -EFAULT; 1928 1929 return 0; 1930 } 1931 1932 static int ethtool_set_eee(struct net_device *dev, char __user *useraddr) 1933 { 1934 struct ethtool_keee keee; 1935 struct ethtool_eee eee; 1936 int ret; 1937 1938 if (!dev->ethtool_ops->set_eee) 1939 return -EOPNOTSUPP; 1940 1941 if (copy_from_user(&eee, useraddr, sizeof(eee))) 1942 return -EFAULT; 1943 1944 eee_to_keee(&keee, &eee); 1945 ret = dev->ethtool_ops->set_eee(dev, &keee); 1946 if (!ret) 1947 ethtool_notify(dev, ETHTOOL_MSG_EEE_NTF, NULL); 1948 return ret; 1949 } 1950 1951 static int ethtool_nway_reset(struct net_device *dev) 1952 { 1953 if (!dev->ethtool_ops->nway_reset) 1954 return -EOPNOTSUPP; 1955 1956 return dev->ethtool_ops->nway_reset(dev); 1957 } 1958 1959 static int ethtool_get_link(struct net_device *dev, char __user *useraddr) 1960 { 1961 struct ethtool_value edata = { .cmd = ETHTOOL_GLINK }; 1962 int link = __ethtool_get_link(dev); 1963 1964 if (link < 0) 1965 return link; 1966 1967 edata.data = link; 1968 if (copy_to_user(useraddr, &edata, sizeof(edata))) 1969 return -EFAULT; 1970 return 0; 1971 } 1972 1973 static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr, 1974 int (*getter)(struct net_device *, 1975 struct ethtool_eeprom *, u8 *), 1976 u32 total_len) 1977 { 1978 struct ethtool_eeprom eeprom; 1979 void __user *userbuf = useraddr + sizeof(eeprom); 1980 u32 bytes_remaining; 1981 u8 *data; 1982 int ret = 0; 1983 1984 if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) 1985 return -EFAULT; 1986 1987 /* Check for wrap and zero */ 1988 if (eeprom.offset + eeprom.len <= eeprom.offset) 1989 return -EINVAL; 1990 1991 /* Check for exceeding total eeprom len */ 1992 if (eeprom.offset + eeprom.len > total_len) 1993 return -EINVAL; 1994 1995 data = kzalloc(PAGE_SIZE, GFP_USER); 1996 if (!data) 1997 return -ENOMEM; 1998 1999 bytes_remaining = eeprom.len; 2000 while (bytes_remaining > 0) { 2001 eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE); 2002 2003 ret = getter(dev, &eeprom, data); 2004 if (ret) 2005 break; 2006 if (!eeprom.len) { 2007 ret = -EIO; 2008 break; 2009 } 2010 if (copy_to_user(userbuf, data, eeprom.len)) { 2011 ret = -EFAULT; 2012 break; 2013 } 2014 userbuf += eeprom.len; 2015 eeprom.offset += eeprom.len; 2016 bytes_remaining -= eeprom.len; 2017 } 2018 2019 eeprom.len = userbuf - (useraddr + sizeof(eeprom)); 2020 eeprom.offset -= eeprom.len; 2021 if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) 2022 ret = -EFAULT; 2023 2024 kfree(data); 2025 return ret; 2026 } 2027 2028 static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) 2029 { 2030 const struct ethtool_ops *ops = dev->ethtool_ops; 2031 2032 if (!ops->get_eeprom || !ops->get_eeprom_len || 2033 !ops->get_eeprom_len(dev)) 2034 return -EOPNOTSUPP; 2035 2036 return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom, 2037 ops->get_eeprom_len(dev)); 2038 } 2039 2040 static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) 2041 { 2042 struct ethtool_eeprom eeprom; 2043 const struct ethtool_ops *ops = dev->ethtool_ops; 2044 void __user *userbuf = useraddr + sizeof(eeprom); 2045 u32 bytes_remaining; 2046 u8 *data; 2047 int ret = 0; 2048 2049 if (!ops->set_eeprom || !ops->get_eeprom_len || 2050 !ops->get_eeprom_len(dev)) 2051 return -EOPNOTSUPP; 2052 2053 if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) 2054 return -EFAULT; 2055 2056 /* Check for wrap and zero */ 2057 if (eeprom.offset + eeprom.len <= eeprom.offset) 2058 return -EINVAL; 2059 2060 /* Check for exceeding total eeprom len */ 2061 if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) 2062 return -EINVAL; 2063 2064 data = kzalloc(PAGE_SIZE, GFP_USER); 2065 if (!data) 2066 return -ENOMEM; 2067 2068 bytes_remaining = eeprom.len; 2069 while (bytes_remaining > 0) { 2070 eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE); 2071 2072 if (copy_from_user(data, userbuf, eeprom.len)) { 2073 ret = -EFAULT; 2074 break; 2075 } 2076 ret = ops->set_eeprom(dev, &eeprom, data); 2077 if (ret) 2078 break; 2079 userbuf += eeprom.len; 2080 eeprom.offset += eeprom.len; 2081 bytes_remaining -= eeprom.len; 2082 } 2083 2084 kfree(data); 2085 return ret; 2086 } 2087 2088 static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, 2089 void __user *useraddr) 2090 { 2091 struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; 2092 struct kernel_ethtool_coalesce kernel_coalesce = {}; 2093 int ret; 2094 2095 if (!dev->ethtool_ops->get_coalesce) 2096 return -EOPNOTSUPP; 2097 2098 ret = dev->ethtool_ops->get_coalesce(dev, &coalesce, &kernel_coalesce, 2099 NULL); 2100 if (ret) 2101 return ret; 2102 2103 if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) 2104 return -EFAULT; 2105 return 0; 2106 } 2107 2108 static bool 2109 ethtool_set_coalesce_supported(struct net_device *dev, 2110 struct ethtool_coalesce *coalesce) 2111 { 2112 u32 supported_params = dev->ethtool_ops->supported_coalesce_params; 2113 u32 nonzero_params = 0; 2114 2115 if (coalesce->rx_coalesce_usecs) 2116 nonzero_params |= ETHTOOL_COALESCE_RX_USECS; 2117 if (coalesce->rx_max_coalesced_frames) 2118 nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES; 2119 if (coalesce->rx_coalesce_usecs_irq) 2120 nonzero_params |= ETHTOOL_COALESCE_RX_USECS_IRQ; 2121 if (coalesce->rx_max_coalesced_frames_irq) 2122 nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ; 2123 if (coalesce->tx_coalesce_usecs) 2124 nonzero_params |= ETHTOOL_COALESCE_TX_USECS; 2125 if (coalesce->tx_max_coalesced_frames) 2126 nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES; 2127 if (coalesce->tx_coalesce_usecs_irq) 2128 nonzero_params |= ETHTOOL_COALESCE_TX_USECS_IRQ; 2129 if (coalesce->tx_max_coalesced_frames_irq) 2130 nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ; 2131 if (coalesce->stats_block_coalesce_usecs) 2132 nonzero_params |= ETHTOOL_COALESCE_STATS_BLOCK_USECS; 2133 if (coalesce->use_adaptive_rx_coalesce) 2134 nonzero_params |= ETHTOOL_COALESCE_USE_ADAPTIVE_RX; 2135 if (coalesce->use_adaptive_tx_coalesce) 2136 nonzero_params |= ETHTOOL_COALESCE_USE_ADAPTIVE_TX; 2137 if (coalesce->pkt_rate_low) 2138 nonzero_params |= ETHTOOL_COALESCE_PKT_RATE_LOW; 2139 if (coalesce->rx_coalesce_usecs_low) 2140 nonzero_params |= ETHTOOL_COALESCE_RX_USECS_LOW; 2141 if (coalesce->rx_max_coalesced_frames_low) 2142 nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW; 2143 if (coalesce->tx_coalesce_usecs_low) 2144 nonzero_params |= ETHTOOL_COALESCE_TX_USECS_LOW; 2145 if (coalesce->tx_max_coalesced_frames_low) 2146 nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW; 2147 if (coalesce->pkt_rate_high) 2148 nonzero_params |= ETHTOOL_COALESCE_PKT_RATE_HIGH; 2149 if (coalesce->rx_coalesce_usecs_high) 2150 nonzero_params |= ETHTOOL_COALESCE_RX_USECS_HIGH; 2151 if (coalesce->rx_max_coalesced_frames_high) 2152 nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH; 2153 if (coalesce->tx_coalesce_usecs_high) 2154 nonzero_params |= ETHTOOL_COALESCE_TX_USECS_HIGH; 2155 if (coalesce->tx_max_coalesced_frames_high) 2156 nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH; 2157 if (coalesce->rate_sample_interval) 2158 nonzero_params |= ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL; 2159 2160 return (supported_params & nonzero_params) == nonzero_params; 2161 } 2162 2163 static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, 2164 void __user *useraddr) 2165 { 2166 struct kernel_ethtool_coalesce kernel_coalesce = {}; 2167 struct ethtool_coalesce coalesce; 2168 int ret; 2169 2170 if (!dev->ethtool_ops->set_coalesce || !dev->ethtool_ops->get_coalesce) 2171 return -EOPNOTSUPP; 2172 2173 ret = dev->ethtool_ops->get_coalesce(dev, &coalesce, &kernel_coalesce, 2174 NULL); 2175 if (ret) 2176 return ret; 2177 2178 if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) 2179 return -EFAULT; 2180 2181 if (!ethtool_set_coalesce_supported(dev, &coalesce)) 2182 return -EOPNOTSUPP; 2183 2184 ret = dev->ethtool_ops->set_coalesce(dev, &coalesce, &kernel_coalesce, 2185 NULL); 2186 if (!ret) 2187 ethtool_notify(dev, ETHTOOL_MSG_COALESCE_NTF, NULL); 2188 return ret; 2189 } 2190 2191 static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) 2192 { 2193 struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM }; 2194 struct kernel_ethtool_ringparam kernel_ringparam = {}; 2195 2196 if (!dev->ethtool_ops->get_ringparam) 2197 return -EOPNOTSUPP; 2198 2199 dev->ethtool_ops->get_ringparam(dev, &ringparam, 2200 &kernel_ringparam, NULL); 2201 2202 if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) 2203 return -EFAULT; 2204 return 0; 2205 } 2206 2207 static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr) 2208 { 2209 struct kernel_ethtool_ringparam kernel_ringparam; 2210 struct ethtool_ringparam ringparam, max; 2211 int ret; 2212 2213 if (!dev->ethtool_ops->set_ringparam || !dev->ethtool_ops->get_ringparam) 2214 return -EOPNOTSUPP; 2215 2216 if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) 2217 return -EFAULT; 2218 2219 ethtool_ringparam_get_cfg(dev, &max, &kernel_ringparam, NULL); 2220 2221 /* ensure new ring parameters are within the maximums */ 2222 if (ringparam.rx_pending > max.rx_max_pending || 2223 ringparam.rx_mini_pending > max.rx_mini_max_pending || 2224 ringparam.rx_jumbo_pending > max.rx_jumbo_max_pending || 2225 ringparam.tx_pending > max.tx_max_pending) 2226 return -EINVAL; 2227 2228 ret = dev->ethtool_ops->set_ringparam(dev, &ringparam, 2229 &kernel_ringparam, NULL); 2230 if (!ret) 2231 ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL); 2232 return ret; 2233 } 2234 2235 static noinline_for_stack int ethtool_get_channels(struct net_device *dev, 2236 void __user *useraddr) 2237 { 2238 struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS }; 2239 2240 if (!dev->ethtool_ops->get_channels) 2241 return -EOPNOTSUPP; 2242 2243 dev->ethtool_ops->get_channels(dev, &channels); 2244 2245 if (copy_to_user(useraddr, &channels, sizeof(channels))) 2246 return -EFAULT; 2247 return 0; 2248 } 2249 2250 static noinline_for_stack int ethtool_set_channels(struct net_device *dev, 2251 void __user *useraddr) 2252 { 2253 struct ethtool_channels channels, curr = { .cmd = ETHTOOL_GCHANNELS }; 2254 u16 from_channel, to_channel; 2255 unsigned int i; 2256 int ret; 2257 2258 if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels) 2259 return -EOPNOTSUPP; 2260 2261 if (copy_from_user(&channels, useraddr, sizeof(channels))) 2262 return -EFAULT; 2263 2264 dev->ethtool_ops->get_channels(dev, &curr); 2265 2266 if (channels.rx_count == curr.rx_count && 2267 channels.tx_count == curr.tx_count && 2268 channels.combined_count == curr.combined_count && 2269 channels.other_count == curr.other_count) 2270 return 0; 2271 2272 /* ensure new counts are within the maximums */ 2273 if (channels.rx_count > curr.max_rx || 2274 channels.tx_count > curr.max_tx || 2275 channels.combined_count > curr.max_combined || 2276 channels.other_count > curr.max_other) 2277 return -EINVAL; 2278 2279 /* ensure there is at least one RX and one TX channel */ 2280 if (!channels.combined_count && 2281 (!channels.rx_count || !channels.tx_count)) 2282 return -EINVAL; 2283 2284 ret = ethtool_check_max_channel(dev, channels, NULL); 2285 if (ret) 2286 return ret; 2287 2288 /* Disabling channels, query zero-copy AF_XDP sockets */ 2289 from_channel = channels.combined_count + 2290 min(channels.rx_count, channels.tx_count); 2291 to_channel = curr.combined_count + max(curr.rx_count, curr.tx_count); 2292 for (i = from_channel; i < to_channel; i++) 2293 if (xsk_get_pool_from_qid(dev, i)) 2294 return -EINVAL; 2295 2296 ret = dev->ethtool_ops->set_channels(dev, &channels); 2297 if (!ret) 2298 ethtool_notify(dev, ETHTOOL_MSG_CHANNELS_NTF, NULL); 2299 return ret; 2300 } 2301 2302 static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr) 2303 { 2304 struct ethtool_pauseparam pauseparam = { .cmd = ETHTOOL_GPAUSEPARAM }; 2305 2306 if (!dev->ethtool_ops->get_pauseparam) 2307 return -EOPNOTSUPP; 2308 2309 dev->ethtool_ops->get_pauseparam(dev, &pauseparam); 2310 2311 if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) 2312 return -EFAULT; 2313 return 0; 2314 } 2315 2316 static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr) 2317 { 2318 struct ethtool_pauseparam pauseparam; 2319 int ret; 2320 2321 if (!dev->ethtool_ops->set_pauseparam) 2322 return -EOPNOTSUPP; 2323 2324 if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) 2325 return -EFAULT; 2326 2327 ret = dev->ethtool_ops->set_pauseparam(dev, &pauseparam); 2328 if (!ret) 2329 ethtool_notify(dev, ETHTOOL_MSG_PAUSE_NTF, NULL); 2330 return ret; 2331 } 2332 2333 static int ethtool_self_test(struct net_device *dev, char __user *useraddr) 2334 { 2335 struct ethtool_test test; 2336 const struct ethtool_ops *ops = dev->ethtool_ops; 2337 u64 *data; 2338 int ret, test_len; 2339 2340 if (!ops->self_test || !ops->get_sset_count) 2341 return -EOPNOTSUPP; 2342 2343 test_len = ops->get_sset_count(dev, ETH_SS_TEST); 2344 if (test_len < 0) 2345 return test_len; 2346 WARN_ON(test_len == 0); 2347 2348 if (copy_from_user(&test, useraddr, sizeof(test))) 2349 return -EFAULT; 2350 2351 test.len = test_len; 2352 data = kcalloc(test_len, sizeof(u64), GFP_USER); 2353 if (!data) 2354 return -ENOMEM; 2355 2356 netif_testing_on(dev); 2357 ops->self_test(dev, &test, data); 2358 netif_testing_off(dev); 2359 2360 ret = -EFAULT; 2361 if (copy_to_user(useraddr, &test, sizeof(test))) 2362 goto out; 2363 useraddr += sizeof(test); 2364 if (copy_to_user(useraddr, data, array_size(test.len, sizeof(u64)))) 2365 goto out; 2366 ret = 0; 2367 2368 out: 2369 kfree(data); 2370 return ret; 2371 } 2372 2373 static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) 2374 { 2375 struct ethtool_gstrings gstrings; 2376 u8 *data; 2377 int ret; 2378 2379 if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) 2380 return -EFAULT; 2381 2382 ret = __ethtool_get_sset_count(dev, gstrings.string_set); 2383 if (ret < 0) 2384 return ret; 2385 if (ret > S32_MAX / ETH_GSTRING_LEN) 2386 return -ENOMEM; 2387 WARN_ON_ONCE(!ret); 2388 2389 gstrings.len = ret; 2390 2391 if (gstrings.len) { 2392 data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN)); 2393 if (!data) 2394 return -ENOMEM; 2395 2396 __ethtool_get_strings(dev, gstrings.string_set, data); 2397 } else { 2398 data = NULL; 2399 } 2400 2401 ret = -EFAULT; 2402 if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) 2403 goto out; 2404 useraddr += sizeof(gstrings); 2405 if (gstrings.len && 2406 copy_to_user(useraddr, data, 2407 array_size(gstrings.len, ETH_GSTRING_LEN))) 2408 goto out; 2409 ret = 0; 2410 2411 out: 2412 vfree(data); 2413 return ret; 2414 } 2415 2416 __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...) 2417 { 2418 va_list args; 2419 2420 va_start(args, fmt); 2421 vsnprintf(*data, ETH_GSTRING_LEN, fmt, args); 2422 va_end(args); 2423 2424 *data += ETH_GSTRING_LEN; 2425 } 2426 EXPORT_SYMBOL(ethtool_sprintf); 2427 2428 void ethtool_puts(u8 **data, const char *str) 2429 { 2430 strscpy(*data, str, ETH_GSTRING_LEN); 2431 *data += ETH_GSTRING_LEN; 2432 } 2433 EXPORT_SYMBOL(ethtool_puts); 2434 2435 static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) 2436 { 2437 struct ethtool_value id; 2438 static bool busy; 2439 const struct ethtool_ops *ops = dev->ethtool_ops; 2440 netdevice_tracker dev_tracker; 2441 int rc; 2442 2443 if (!ops->set_phys_id) 2444 return -EOPNOTSUPP; 2445 2446 if (busy) 2447 return -EBUSY; 2448 2449 if (copy_from_user(&id, useraddr, sizeof(id))) 2450 return -EFAULT; 2451 2452 rc = ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE); 2453 if (rc < 0) 2454 return rc; 2455 2456 /* Drop the RTNL lock while waiting, but prevent reentry or 2457 * removal of the device. 2458 */ 2459 busy = true; 2460 netdev_hold(dev, &dev_tracker, GFP_KERNEL); 2461 netdev_unlock_ops(dev); 2462 rtnl_unlock(); 2463 2464 if (rc == 0) { 2465 /* Driver will handle this itself */ 2466 schedule_timeout_interruptible( 2467 id.data ? (id.data * HZ) : MAX_SCHEDULE_TIMEOUT); 2468 } else { 2469 /* Driver expects to be called at twice the frequency in rc */ 2470 int n = rc * 2, interval = HZ / n; 2471 u64 count = mul_u32_u32(n, id.data); 2472 u64 i = 0; 2473 2474 do { 2475 rtnl_lock(); 2476 netdev_lock_ops(dev); 2477 rc = ops->set_phys_id(dev, 2478 (i++ & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON); 2479 netdev_unlock_ops(dev); 2480 rtnl_unlock(); 2481 if (rc) 2482 break; 2483 schedule_timeout_interruptible(interval); 2484 } while (!signal_pending(current) && (!id.data || i < count)); 2485 } 2486 2487 rtnl_lock(); 2488 netdev_lock_ops(dev); 2489 netdev_put(dev, &dev_tracker); 2490 busy = false; 2491 2492 (void) ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE); 2493 return rc; 2494 } 2495 2496 static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) 2497 { 2498 struct ethtool_stats stats; 2499 const struct ethtool_ops *ops = dev->ethtool_ops; 2500 u64 *data; 2501 int ret, n_stats; 2502 2503 if (!ops->get_ethtool_stats || !ops->get_sset_count) 2504 return -EOPNOTSUPP; 2505 2506 n_stats = ops->get_sset_count(dev, ETH_SS_STATS); 2507 if (n_stats < 0) 2508 return n_stats; 2509 if (n_stats > S32_MAX / sizeof(u64)) 2510 return -ENOMEM; 2511 WARN_ON_ONCE(!n_stats); 2512 if (copy_from_user(&stats, useraddr, sizeof(stats))) 2513 return -EFAULT; 2514 2515 stats.n_stats = n_stats; 2516 2517 if (n_stats) { 2518 data = vzalloc(array_size(n_stats, sizeof(u64))); 2519 if (!data) 2520 return -ENOMEM; 2521 ops->get_ethtool_stats(dev, &stats, data); 2522 } else { 2523 data = NULL; 2524 } 2525 2526 ret = -EFAULT; 2527 if (copy_to_user(useraddr, &stats, sizeof(stats))) 2528 goto out; 2529 useraddr += sizeof(stats); 2530 if (n_stats && copy_to_user(useraddr, data, array_size(n_stats, sizeof(u64)))) 2531 goto out; 2532 ret = 0; 2533 2534 out: 2535 vfree(data); 2536 return ret; 2537 } 2538 2539 static int ethtool_vzalloc_stats_array(int n_stats, u64 **data) 2540 { 2541 if (n_stats < 0) 2542 return n_stats; 2543 if (n_stats > S32_MAX / sizeof(u64)) 2544 return -ENOMEM; 2545 if (WARN_ON_ONCE(!n_stats)) 2546 return -EOPNOTSUPP; 2547 2548 *data = vzalloc(array_size(n_stats, sizeof(u64))); 2549 if (!*data) 2550 return -ENOMEM; 2551 2552 return 0; 2553 } 2554 2555 static int ethtool_get_phy_stats_phydev(struct phy_device *phydev, 2556 struct ethtool_stats *stats, 2557 u64 **data) 2558 { 2559 const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; 2560 int n_stats, ret; 2561 2562 if (!phy_ops || !phy_ops->get_sset_count || !phy_ops->get_stats) 2563 return -EOPNOTSUPP; 2564 2565 n_stats = phy_ops->get_sset_count(phydev); 2566 2567 ret = ethtool_vzalloc_stats_array(n_stats, data); 2568 if (ret) 2569 return ret; 2570 2571 stats->n_stats = n_stats; 2572 return phy_ops->get_stats(phydev, stats, *data); 2573 } 2574 2575 static int ethtool_get_phy_stats_ethtool(struct net_device *dev, 2576 struct ethtool_stats *stats, 2577 u64 **data) 2578 { 2579 const struct ethtool_ops *ops = dev->ethtool_ops; 2580 int n_stats, ret; 2581 2582 if (!ops || !ops->get_sset_count || !ops->get_ethtool_phy_stats) 2583 return -EOPNOTSUPP; 2584 2585 n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS); 2586 2587 ret = ethtool_vzalloc_stats_array(n_stats, data); 2588 if (ret) 2589 return ret; 2590 2591 stats->n_stats = n_stats; 2592 ops->get_ethtool_phy_stats(dev, stats, *data); 2593 2594 return 0; 2595 } 2596 2597 static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) 2598 { 2599 struct phy_device *phydev = dev->phydev; 2600 struct ethtool_stats stats; 2601 u64 *data = NULL; 2602 int ret = -EOPNOTSUPP; 2603 2604 if (copy_from_user(&stats, useraddr, sizeof(stats))) 2605 return -EFAULT; 2606 2607 if (phydev) 2608 ret = ethtool_get_phy_stats_phydev(phydev, &stats, &data); 2609 2610 if (ret == -EOPNOTSUPP) 2611 ret = ethtool_get_phy_stats_ethtool(dev, &stats, &data); 2612 2613 if (ret) 2614 goto out; 2615 2616 if (copy_to_user(useraddr, &stats, sizeof(stats))) { 2617 ret = -EFAULT; 2618 goto out; 2619 } 2620 2621 useraddr += sizeof(stats); 2622 if (copy_to_user(useraddr, data, array_size(stats.n_stats, sizeof(u64)))) 2623 ret = -EFAULT; 2624 2625 out: 2626 vfree(data); 2627 return ret; 2628 } 2629 2630 static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr) 2631 { 2632 struct ethtool_perm_addr epaddr; 2633 2634 if (copy_from_user(&epaddr, useraddr, sizeof(epaddr))) 2635 return -EFAULT; 2636 2637 if (epaddr.size < dev->addr_len) 2638 return -ETOOSMALL; 2639 epaddr.size = dev->addr_len; 2640 2641 if (copy_to_user(useraddr, &epaddr, sizeof(epaddr))) 2642 return -EFAULT; 2643 useraddr += sizeof(epaddr); 2644 if (copy_to_user(useraddr, dev->perm_addr, epaddr.size)) 2645 return -EFAULT; 2646 return 0; 2647 } 2648 2649 static int ethtool_get_value(struct net_device *dev, char __user *useraddr, 2650 u32 cmd, u32 (*actor)(struct net_device *)) 2651 { 2652 struct ethtool_value edata = { .cmd = cmd }; 2653 2654 if (!actor) 2655 return -EOPNOTSUPP; 2656 2657 edata.data = actor(dev); 2658 2659 if (copy_to_user(useraddr, &edata, sizeof(edata))) 2660 return -EFAULT; 2661 return 0; 2662 } 2663 2664 static int ethtool_set_value_void(struct net_device *dev, char __user *useraddr, 2665 void (*actor)(struct net_device *, u32)) 2666 { 2667 struct ethtool_value edata; 2668 2669 if (!actor) 2670 return -EOPNOTSUPP; 2671 2672 if (copy_from_user(&edata, useraddr, sizeof(edata))) 2673 return -EFAULT; 2674 2675 actor(dev, edata.data); 2676 return 0; 2677 } 2678 2679 static int ethtool_set_value(struct net_device *dev, char __user *useraddr, 2680 int (*actor)(struct net_device *, u32)) 2681 { 2682 struct ethtool_value edata; 2683 2684 if (!actor) 2685 return -EOPNOTSUPP; 2686 2687 if (copy_from_user(&edata, useraddr, sizeof(edata))) 2688 return -EFAULT; 2689 2690 return actor(dev, edata.data); 2691 } 2692 2693 static int 2694 ethtool_flash_device(struct net_device *dev, struct ethtool_devlink_compat *req) 2695 { 2696 if (!dev->ethtool_ops->flash_device) { 2697 req->devlink = netdev_to_devlink_get(dev); 2698 return 0; 2699 } 2700 2701 return dev->ethtool_ops->flash_device(dev, &req->efl); 2702 } 2703 2704 static int ethtool_set_dump(struct net_device *dev, 2705 void __user *useraddr) 2706 { 2707 struct ethtool_dump dump; 2708 2709 if (!dev->ethtool_ops->set_dump) 2710 return -EOPNOTSUPP; 2711 2712 if (copy_from_user(&dump, useraddr, sizeof(dump))) 2713 return -EFAULT; 2714 2715 return dev->ethtool_ops->set_dump(dev, &dump); 2716 } 2717 2718 static int ethtool_get_dump_flag(struct net_device *dev, 2719 void __user *useraddr) 2720 { 2721 int ret; 2722 struct ethtool_dump dump; 2723 const struct ethtool_ops *ops = dev->ethtool_ops; 2724 2725 if (!ops->get_dump_flag) 2726 return -EOPNOTSUPP; 2727 2728 if (copy_from_user(&dump, useraddr, sizeof(dump))) 2729 return -EFAULT; 2730 2731 ret = ops->get_dump_flag(dev, &dump); 2732 if (ret) 2733 return ret; 2734 2735 if (copy_to_user(useraddr, &dump, sizeof(dump))) 2736 return -EFAULT; 2737 return 0; 2738 } 2739 2740 static int ethtool_get_dump_data(struct net_device *dev, 2741 void __user *useraddr) 2742 { 2743 int ret; 2744 __u32 len; 2745 struct ethtool_dump dump, tmp; 2746 const struct ethtool_ops *ops = dev->ethtool_ops; 2747 void *data = NULL; 2748 2749 if (!ops->get_dump_data || !ops->get_dump_flag) 2750 return -EOPNOTSUPP; 2751 2752 if (copy_from_user(&dump, useraddr, sizeof(dump))) 2753 return -EFAULT; 2754 2755 memset(&tmp, 0, sizeof(tmp)); 2756 tmp.cmd = ETHTOOL_GET_DUMP_FLAG; 2757 ret = ops->get_dump_flag(dev, &tmp); 2758 if (ret) 2759 return ret; 2760 2761 len = min(tmp.len, dump.len); 2762 if (!len) 2763 return -EFAULT; 2764 2765 /* Don't ever let the driver think there's more space available 2766 * than it requested with .get_dump_flag(). 2767 */ 2768 dump.len = len; 2769 2770 /* Always allocate enough space to hold the whole thing so that the 2771 * driver does not need to check the length and bother with partial 2772 * dumping. 2773 */ 2774 data = vzalloc(tmp.len); 2775 if (!data) 2776 return -ENOMEM; 2777 ret = ops->get_dump_data(dev, &dump, data); 2778 if (ret) 2779 goto out; 2780 2781 /* There are two sane possibilities: 2782 * 1. The driver's .get_dump_data() does not touch dump.len. 2783 * 2. Or it may set dump.len to how much it really writes, which 2784 * should be tmp.len (or len if it can do a partial dump). 2785 * In any case respond to userspace with the actual length of data 2786 * it's receiving. 2787 */ 2788 WARN_ON(dump.len != len && dump.len != tmp.len); 2789 dump.len = len; 2790 2791 if (copy_to_user(useraddr, &dump, sizeof(dump))) { 2792 ret = -EFAULT; 2793 goto out; 2794 } 2795 useraddr += offsetof(struct ethtool_dump, data); 2796 if (copy_to_user(useraddr, data, len)) 2797 ret = -EFAULT; 2798 out: 2799 vfree(data); 2800 return ret; 2801 } 2802 2803 static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr) 2804 { 2805 struct kernel_ethtool_ts_info kernel_info; 2806 struct ethtool_ts_info info = {}; 2807 int err; 2808 2809 err = __ethtool_get_ts_info(dev, &kernel_info); 2810 if (err) 2811 return err; 2812 2813 info.cmd = kernel_info.cmd; 2814 info.so_timestamping = kernel_info.so_timestamping; 2815 info.phc_index = kernel_info.phc_index; 2816 info.tx_types = kernel_info.tx_types; 2817 info.rx_filters = kernel_info.rx_filters; 2818 2819 if (copy_to_user(useraddr, &info, sizeof(info))) 2820 return -EFAULT; 2821 2822 return 0; 2823 } 2824 2825 int ethtool_get_module_info_call(struct net_device *dev, 2826 struct ethtool_modinfo *modinfo) 2827 { 2828 const struct ethtool_ops *ops = dev->ethtool_ops; 2829 struct phy_device *phydev = dev->phydev; 2830 2831 if (dev->ethtool->module_fw_flash_in_progress) 2832 return -EBUSY; 2833 2834 if (dev->sfp_bus) 2835 return sfp_get_module_info(dev->sfp_bus, modinfo); 2836 2837 if (phydev && phydev->drv && phydev->drv->module_info) 2838 return phydev->drv->module_info(phydev, modinfo); 2839 2840 if (ops->get_module_info) 2841 return ops->get_module_info(dev, modinfo); 2842 2843 return -EOPNOTSUPP; 2844 } 2845 2846 static int ethtool_get_module_info(struct net_device *dev, 2847 void __user *useraddr) 2848 { 2849 int ret; 2850 struct ethtool_modinfo modinfo; 2851 2852 if (copy_from_user(&modinfo, useraddr, sizeof(modinfo))) 2853 return -EFAULT; 2854 2855 ret = ethtool_get_module_info_call(dev, &modinfo); 2856 if (ret) 2857 return ret; 2858 2859 if (copy_to_user(useraddr, &modinfo, sizeof(modinfo))) 2860 return -EFAULT; 2861 2862 return 0; 2863 } 2864 2865 int ethtool_get_module_eeprom_call(struct net_device *dev, 2866 struct ethtool_eeprom *ee, u8 *data) 2867 { 2868 const struct ethtool_ops *ops = dev->ethtool_ops; 2869 struct phy_device *phydev = dev->phydev; 2870 2871 if (dev->ethtool->module_fw_flash_in_progress) 2872 return -EBUSY; 2873 2874 if (dev->sfp_bus) 2875 return sfp_get_module_eeprom(dev->sfp_bus, ee, data); 2876 2877 if (phydev && phydev->drv && phydev->drv->module_eeprom) 2878 return phydev->drv->module_eeprom(phydev, ee, data); 2879 2880 if (ops->get_module_eeprom) 2881 return ops->get_module_eeprom(dev, ee, data); 2882 2883 return -EOPNOTSUPP; 2884 } 2885 2886 static int ethtool_get_module_eeprom(struct net_device *dev, 2887 void __user *useraddr) 2888 { 2889 int ret; 2890 struct ethtool_modinfo modinfo; 2891 2892 ret = ethtool_get_module_info_call(dev, &modinfo); 2893 if (ret) 2894 return ret; 2895 2896 return ethtool_get_any_eeprom(dev, useraddr, 2897 ethtool_get_module_eeprom_call, 2898 modinfo.eeprom_len); 2899 } 2900 2901 static int ethtool_tunable_valid(const struct ethtool_tunable *tuna) 2902 { 2903 switch (tuna->id) { 2904 case ETHTOOL_RX_COPYBREAK: 2905 case ETHTOOL_TX_COPYBREAK: 2906 case ETHTOOL_TX_COPYBREAK_BUF_SIZE: 2907 if (tuna->len != sizeof(u32) || 2908 tuna->type_id != ETHTOOL_TUNABLE_U32) 2909 return -EINVAL; 2910 break; 2911 case ETHTOOL_PFC_PREVENTION_TOUT: 2912 if (tuna->len != sizeof(u16) || 2913 tuna->type_id != ETHTOOL_TUNABLE_U16) 2914 return -EINVAL; 2915 break; 2916 default: 2917 return -EINVAL; 2918 } 2919 2920 return 0; 2921 } 2922 2923 static int ethtool_get_tunable(struct net_device *dev, void __user *useraddr) 2924 { 2925 int ret; 2926 struct ethtool_tunable tuna; 2927 const struct ethtool_ops *ops = dev->ethtool_ops; 2928 void *data; 2929 2930 if (!ops->get_tunable) 2931 return -EOPNOTSUPP; 2932 if (copy_from_user(&tuna, useraddr, sizeof(tuna))) 2933 return -EFAULT; 2934 ret = ethtool_tunable_valid(&tuna); 2935 if (ret) 2936 return ret; 2937 data = kzalloc(tuna.len, GFP_USER); 2938 if (!data) 2939 return -ENOMEM; 2940 ret = ops->get_tunable(dev, &tuna, data); 2941 if (ret) 2942 goto out; 2943 useraddr += sizeof(tuna); 2944 ret = -EFAULT; 2945 if (copy_to_user(useraddr, data, tuna.len)) 2946 goto out; 2947 ret = 0; 2948 2949 out: 2950 kfree(data); 2951 return ret; 2952 } 2953 2954 static int ethtool_set_tunable(struct net_device *dev, void __user *useraddr) 2955 { 2956 int ret; 2957 struct ethtool_tunable tuna; 2958 const struct ethtool_ops *ops = dev->ethtool_ops; 2959 void *data; 2960 2961 if (!ops->set_tunable) 2962 return -EOPNOTSUPP; 2963 if (copy_from_user(&tuna, useraddr, sizeof(tuna))) 2964 return -EFAULT; 2965 ret = ethtool_tunable_valid(&tuna); 2966 if (ret) 2967 return ret; 2968 useraddr += sizeof(tuna); 2969 data = memdup_user(useraddr, tuna.len); 2970 if (IS_ERR(data)) 2971 return PTR_ERR(data); 2972 ret = ops->set_tunable(dev, &tuna, data); 2973 2974 kfree(data); 2975 return ret; 2976 } 2977 2978 static noinline_for_stack int 2979 ethtool_get_per_queue_coalesce(struct net_device *dev, 2980 void __user *useraddr, 2981 struct ethtool_per_queue_op *per_queue_opt) 2982 { 2983 u32 bit; 2984 int ret; 2985 DECLARE_BITMAP(queue_mask, MAX_NUM_QUEUE); 2986 2987 if (!dev->ethtool_ops->get_per_queue_coalesce) 2988 return -EOPNOTSUPP; 2989 2990 useraddr += sizeof(*per_queue_opt); 2991 2992 bitmap_from_arr32(queue_mask, per_queue_opt->queue_mask, 2993 MAX_NUM_QUEUE); 2994 2995 for_each_set_bit(bit, queue_mask, MAX_NUM_QUEUE) { 2996 struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; 2997 2998 ret = dev->ethtool_ops->get_per_queue_coalesce(dev, bit, &coalesce); 2999 if (ret != 0) 3000 return ret; 3001 if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) 3002 return -EFAULT; 3003 useraddr += sizeof(coalesce); 3004 } 3005 3006 return 0; 3007 } 3008 3009 static noinline_for_stack int 3010 ethtool_set_per_queue_coalesce(struct net_device *dev, 3011 void __user *useraddr, 3012 struct ethtool_per_queue_op *per_queue_opt) 3013 { 3014 u32 bit; 3015 int i, ret = 0; 3016 int n_queue; 3017 struct ethtool_coalesce *backup = NULL, *tmp = NULL; 3018 DECLARE_BITMAP(queue_mask, MAX_NUM_QUEUE); 3019 3020 if ((!dev->ethtool_ops->set_per_queue_coalesce) || 3021 (!dev->ethtool_ops->get_per_queue_coalesce)) 3022 return -EOPNOTSUPP; 3023 3024 useraddr += sizeof(*per_queue_opt); 3025 3026 bitmap_from_arr32(queue_mask, per_queue_opt->queue_mask, MAX_NUM_QUEUE); 3027 n_queue = bitmap_weight(queue_mask, MAX_NUM_QUEUE); 3028 tmp = backup = kmalloc_array(n_queue, sizeof(*backup), GFP_KERNEL); 3029 if (!backup) 3030 return -ENOMEM; 3031 3032 for_each_set_bit(bit, queue_mask, MAX_NUM_QUEUE) { 3033 struct ethtool_coalesce coalesce; 3034 3035 ret = dev->ethtool_ops->get_per_queue_coalesce(dev, bit, tmp); 3036 if (ret != 0) 3037 goto roll_back; 3038 3039 tmp++; 3040 3041 if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) { 3042 ret = -EFAULT; 3043 goto roll_back; 3044 } 3045 3046 if (!ethtool_set_coalesce_supported(dev, &coalesce)) { 3047 ret = -EOPNOTSUPP; 3048 goto roll_back; 3049 } 3050 3051 ret = dev->ethtool_ops->set_per_queue_coalesce(dev, bit, &coalesce); 3052 if (ret != 0) 3053 goto roll_back; 3054 3055 useraddr += sizeof(coalesce); 3056 } 3057 3058 roll_back: 3059 if (ret != 0) { 3060 tmp = backup; 3061 for_each_set_bit(i, queue_mask, bit) { 3062 dev->ethtool_ops->set_per_queue_coalesce(dev, i, tmp); 3063 tmp++; 3064 } 3065 } 3066 kfree(backup); 3067 3068 return ret; 3069 } 3070 3071 static int noinline_for_stack ethtool_set_per_queue(struct net_device *dev, 3072 void __user *useraddr, u32 sub_cmd) 3073 { 3074 struct ethtool_per_queue_op per_queue_opt; 3075 3076 if (copy_from_user(&per_queue_opt, useraddr, sizeof(per_queue_opt))) 3077 return -EFAULT; 3078 3079 if (per_queue_opt.sub_command != sub_cmd) 3080 return -EINVAL; 3081 3082 switch (per_queue_opt.sub_command) { 3083 case ETHTOOL_GCOALESCE: 3084 return ethtool_get_per_queue_coalesce(dev, useraddr, &per_queue_opt); 3085 case ETHTOOL_SCOALESCE: 3086 return ethtool_set_per_queue_coalesce(dev, useraddr, &per_queue_opt); 3087 default: 3088 return -EOPNOTSUPP; 3089 } 3090 } 3091 3092 static int ethtool_phy_tunable_valid(const struct ethtool_tunable *tuna) 3093 { 3094 switch (tuna->id) { 3095 case ETHTOOL_PHY_DOWNSHIFT: 3096 case ETHTOOL_PHY_FAST_LINK_DOWN: 3097 if (tuna->len != sizeof(u8) || 3098 tuna->type_id != ETHTOOL_TUNABLE_U8) 3099 return -EINVAL; 3100 break; 3101 case ETHTOOL_PHY_EDPD: 3102 if (tuna->len != sizeof(u16) || 3103 tuna->type_id != ETHTOOL_TUNABLE_U16) 3104 return -EINVAL; 3105 break; 3106 default: 3107 return -EINVAL; 3108 } 3109 3110 return 0; 3111 } 3112 3113 static int get_phy_tunable(struct net_device *dev, void __user *useraddr) 3114 { 3115 struct phy_device *phydev = dev->phydev; 3116 struct ethtool_tunable tuna; 3117 bool phy_drv_tunable; 3118 void *data; 3119 int ret; 3120 3121 phy_drv_tunable = phydev && phydev->drv && phydev->drv->get_tunable; 3122 if (!phy_drv_tunable && !dev->ethtool_ops->get_phy_tunable) 3123 return -EOPNOTSUPP; 3124 if (copy_from_user(&tuna, useraddr, sizeof(tuna))) 3125 return -EFAULT; 3126 ret = ethtool_phy_tunable_valid(&tuna); 3127 if (ret) 3128 return ret; 3129 data = kzalloc(tuna.len, GFP_USER); 3130 if (!data) 3131 return -ENOMEM; 3132 if (phy_drv_tunable) { 3133 mutex_lock(&phydev->lock); 3134 ret = phydev->drv->get_tunable(phydev, &tuna, data); 3135 mutex_unlock(&phydev->lock); 3136 } else { 3137 ret = dev->ethtool_ops->get_phy_tunable(dev, &tuna, data); 3138 } 3139 if (ret) 3140 goto out; 3141 useraddr += sizeof(tuna); 3142 ret = -EFAULT; 3143 if (copy_to_user(useraddr, data, tuna.len)) 3144 goto out; 3145 ret = 0; 3146 3147 out: 3148 kfree(data); 3149 return ret; 3150 } 3151 3152 static int set_phy_tunable(struct net_device *dev, void __user *useraddr) 3153 { 3154 struct phy_device *phydev = dev->phydev; 3155 struct ethtool_tunable tuna; 3156 bool phy_drv_tunable; 3157 void *data; 3158 int ret; 3159 3160 phy_drv_tunable = phydev && phydev->drv && phydev->drv->get_tunable; 3161 if (!phy_drv_tunable && !dev->ethtool_ops->set_phy_tunable) 3162 return -EOPNOTSUPP; 3163 if (copy_from_user(&tuna, useraddr, sizeof(tuna))) 3164 return -EFAULT; 3165 ret = ethtool_phy_tunable_valid(&tuna); 3166 if (ret) 3167 return ret; 3168 useraddr += sizeof(tuna); 3169 data = memdup_user(useraddr, tuna.len); 3170 if (IS_ERR(data)) 3171 return PTR_ERR(data); 3172 if (phy_drv_tunable) { 3173 mutex_lock(&phydev->lock); 3174 ret = phydev->drv->set_tunable(phydev, &tuna, data); 3175 mutex_unlock(&phydev->lock); 3176 } else { 3177 ret = dev->ethtool_ops->set_phy_tunable(dev, &tuna, data); 3178 } 3179 3180 kfree(data); 3181 return ret; 3182 } 3183 3184 static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr) 3185 { 3186 struct ethtool_fecparam fecparam = { .cmd = ETHTOOL_GFECPARAM }; 3187 int rc; 3188 3189 if (!dev->ethtool_ops->get_fecparam) 3190 return -EOPNOTSUPP; 3191 3192 rc = dev->ethtool_ops->get_fecparam(dev, &fecparam); 3193 if (rc) 3194 return rc; 3195 3196 if (WARN_ON_ONCE(fecparam.reserved)) 3197 fecparam.reserved = 0; 3198 3199 if (copy_to_user(useraddr, &fecparam, sizeof(fecparam))) 3200 return -EFAULT; 3201 return 0; 3202 } 3203 3204 static int ethtool_set_fecparam(struct net_device *dev, void __user *useraddr) 3205 { 3206 struct ethtool_fecparam fecparam; 3207 3208 if (!dev->ethtool_ops->set_fecparam) 3209 return -EOPNOTSUPP; 3210 3211 if (copy_from_user(&fecparam, useraddr, sizeof(fecparam))) 3212 return -EFAULT; 3213 3214 if (!fecparam.fec || fecparam.fec & ETHTOOL_FEC_NONE) 3215 return -EINVAL; 3216 3217 fecparam.active_fec = 0; 3218 fecparam.reserved = 0; 3219 3220 return dev->ethtool_ops->set_fecparam(dev, &fecparam); 3221 } 3222 3223 /* The main entry point in this file. Called from net/core/dev_ioctl.c */ 3224 3225 static int 3226 __dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr, 3227 u32 ethcmd, struct ethtool_devlink_compat *devlink_state) 3228 { 3229 struct net_device *dev; 3230 u32 sub_cmd; 3231 int rc; 3232 netdev_features_t old_features; 3233 3234 dev = __dev_get_by_name(net, ifr->ifr_name); 3235 if (!dev) 3236 return -ENODEV; 3237 3238 if (ethcmd == ETHTOOL_PERQUEUE) { 3239 if (copy_from_user(&sub_cmd, useraddr + sizeof(ethcmd), sizeof(sub_cmd))) 3240 return -EFAULT; 3241 } else { 3242 sub_cmd = ethcmd; 3243 } 3244 /* Allow some commands to be done by anyone */ 3245 switch (sub_cmd) { 3246 case ETHTOOL_GSET: 3247 case ETHTOOL_GDRVINFO: 3248 case ETHTOOL_GMSGLVL: 3249 case ETHTOOL_GLINK: 3250 case ETHTOOL_GCOALESCE: 3251 case ETHTOOL_GRINGPARAM: 3252 case ETHTOOL_GPAUSEPARAM: 3253 case ETHTOOL_GRXCSUM: 3254 case ETHTOOL_GTXCSUM: 3255 case ETHTOOL_GSG: 3256 case ETHTOOL_GSSET_INFO: 3257 case ETHTOOL_GSTRINGS: 3258 case ETHTOOL_GSTATS: 3259 case ETHTOOL_GPHYSTATS: 3260 case ETHTOOL_GTSO: 3261 case ETHTOOL_GPERMADDR: 3262 case ETHTOOL_GUFO: 3263 case ETHTOOL_GGSO: 3264 case ETHTOOL_GGRO: 3265 case ETHTOOL_GFLAGS: 3266 case ETHTOOL_GPFLAGS: 3267 case ETHTOOL_GRXFH: 3268 case ETHTOOL_GRXRINGS: 3269 case ETHTOOL_GRXCLSRLCNT: 3270 case ETHTOOL_GRXCLSRULE: 3271 case ETHTOOL_GRXCLSRLALL: 3272 case ETHTOOL_GRXFHINDIR: 3273 case ETHTOOL_GRSSH: 3274 case ETHTOOL_GFEATURES: 3275 case ETHTOOL_GCHANNELS: 3276 case ETHTOOL_GET_TS_INFO: 3277 case ETHTOOL_GEEE: 3278 case ETHTOOL_GTUNABLE: 3279 case ETHTOOL_PHY_GTUNABLE: 3280 case ETHTOOL_GLINKSETTINGS: 3281 case ETHTOOL_GFECPARAM: 3282 break; 3283 default: 3284 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 3285 return -EPERM; 3286 } 3287 3288 netdev_lock_ops(dev); 3289 if (dev->dev.parent) 3290 pm_runtime_get_sync(dev->dev.parent); 3291 3292 if (!netif_device_present(dev)) { 3293 rc = -ENODEV; 3294 goto out; 3295 } 3296 3297 if (dev->ethtool_ops->begin) { 3298 rc = dev->ethtool_ops->begin(dev); 3299 if (rc < 0) 3300 goto out; 3301 } 3302 old_features = dev->features; 3303 3304 switch (ethcmd) { 3305 case ETHTOOL_GSET: 3306 rc = ethtool_get_settings(dev, useraddr); 3307 break; 3308 case ETHTOOL_SSET: 3309 rc = ethtool_set_settings(dev, useraddr); 3310 break; 3311 case ETHTOOL_GDRVINFO: 3312 rc = ethtool_get_drvinfo(dev, devlink_state); 3313 break; 3314 case ETHTOOL_GREGS: 3315 rc = ethtool_get_regs(dev, useraddr); 3316 break; 3317 case ETHTOOL_GWOL: 3318 rc = ethtool_get_wol(dev, useraddr); 3319 break; 3320 case ETHTOOL_SWOL: 3321 rc = ethtool_set_wol(dev, useraddr); 3322 break; 3323 case ETHTOOL_GMSGLVL: 3324 rc = ethtool_get_value(dev, useraddr, ethcmd, 3325 dev->ethtool_ops->get_msglevel); 3326 break; 3327 case ETHTOOL_SMSGLVL: 3328 rc = ethtool_set_value_void(dev, useraddr, 3329 dev->ethtool_ops->set_msglevel); 3330 if (!rc) 3331 ethtool_notify(dev, ETHTOOL_MSG_DEBUG_NTF, NULL); 3332 break; 3333 case ETHTOOL_GEEE: 3334 rc = ethtool_get_eee(dev, useraddr); 3335 break; 3336 case ETHTOOL_SEEE: 3337 rc = ethtool_set_eee(dev, useraddr); 3338 break; 3339 case ETHTOOL_NWAY_RST: 3340 rc = ethtool_nway_reset(dev); 3341 break; 3342 case ETHTOOL_GLINK: 3343 rc = ethtool_get_link(dev, useraddr); 3344 break; 3345 case ETHTOOL_GEEPROM: 3346 rc = ethtool_get_eeprom(dev, useraddr); 3347 break; 3348 case ETHTOOL_SEEPROM: 3349 rc = ethtool_set_eeprom(dev, useraddr); 3350 break; 3351 case ETHTOOL_GCOALESCE: 3352 rc = ethtool_get_coalesce(dev, useraddr); 3353 break; 3354 case ETHTOOL_SCOALESCE: 3355 rc = ethtool_set_coalesce(dev, useraddr); 3356 break; 3357 case ETHTOOL_GRINGPARAM: 3358 rc = ethtool_get_ringparam(dev, useraddr); 3359 break; 3360 case ETHTOOL_SRINGPARAM: 3361 rc = ethtool_set_ringparam(dev, useraddr); 3362 break; 3363 case ETHTOOL_GPAUSEPARAM: 3364 rc = ethtool_get_pauseparam(dev, useraddr); 3365 break; 3366 case ETHTOOL_SPAUSEPARAM: 3367 rc = ethtool_set_pauseparam(dev, useraddr); 3368 break; 3369 case ETHTOOL_TEST: 3370 rc = ethtool_self_test(dev, useraddr); 3371 break; 3372 case ETHTOOL_GSTRINGS: 3373 rc = ethtool_get_strings(dev, useraddr); 3374 break; 3375 case ETHTOOL_PHYS_ID: 3376 rc = ethtool_phys_id(dev, useraddr); 3377 break; 3378 case ETHTOOL_GSTATS: 3379 rc = ethtool_get_stats(dev, useraddr); 3380 break; 3381 case ETHTOOL_GPERMADDR: 3382 rc = ethtool_get_perm_addr(dev, useraddr); 3383 break; 3384 case ETHTOOL_GFLAGS: 3385 rc = ethtool_get_value(dev, useraddr, ethcmd, 3386 __ethtool_get_flags); 3387 break; 3388 case ETHTOOL_SFLAGS: 3389 rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags); 3390 break; 3391 case ETHTOOL_GPFLAGS: 3392 rc = ethtool_get_value(dev, useraddr, ethcmd, 3393 dev->ethtool_ops->get_priv_flags); 3394 if (!rc) 3395 ethtool_notify(dev, ETHTOOL_MSG_PRIVFLAGS_NTF, NULL); 3396 break; 3397 case ETHTOOL_SPFLAGS: 3398 rc = ethtool_set_value(dev, useraddr, 3399 dev->ethtool_ops->set_priv_flags); 3400 break; 3401 case ETHTOOL_GRXFH: 3402 rc = ethtool_get_rxfh_fields(dev, ethcmd, useraddr); 3403 break; 3404 case ETHTOOL_SRXFH: 3405 rc = ethtool_set_rxfh_fields(dev, ethcmd, useraddr); 3406 break; 3407 case ETHTOOL_GRXRINGS: 3408 case ETHTOOL_GRXCLSRLCNT: 3409 case ETHTOOL_GRXCLSRULE: 3410 case ETHTOOL_GRXCLSRLALL: 3411 rc = ethtool_get_rxnfc(dev, ethcmd, useraddr); 3412 break; 3413 case ETHTOOL_SRXCLSRLDEL: 3414 case ETHTOOL_SRXCLSRLINS: 3415 rc = ethtool_set_rxnfc(dev, ethcmd, useraddr); 3416 break; 3417 case ETHTOOL_FLASHDEV: 3418 rc = ethtool_flash_device(dev, devlink_state); 3419 break; 3420 case ETHTOOL_RESET: 3421 rc = ethtool_reset(dev, useraddr); 3422 break; 3423 case ETHTOOL_GSSET_INFO: 3424 rc = ethtool_get_sset_info(dev, useraddr); 3425 break; 3426 case ETHTOOL_GRXFHINDIR: 3427 rc = ethtool_get_rxfh_indir(dev, useraddr); 3428 break; 3429 case ETHTOOL_SRXFHINDIR: 3430 rc = ethtool_set_rxfh_indir(dev, useraddr); 3431 break; 3432 case ETHTOOL_GRSSH: 3433 rc = ethtool_get_rxfh(dev, useraddr); 3434 break; 3435 case ETHTOOL_SRSSH: 3436 rc = ethtool_set_rxfh(dev, useraddr); 3437 break; 3438 case ETHTOOL_GFEATURES: 3439 rc = ethtool_get_features(dev, useraddr); 3440 break; 3441 case ETHTOOL_SFEATURES: 3442 rc = ethtool_set_features(dev, useraddr); 3443 break; 3444 case ETHTOOL_GTXCSUM: 3445 case ETHTOOL_GRXCSUM: 3446 case ETHTOOL_GSG: 3447 case ETHTOOL_GTSO: 3448 case ETHTOOL_GGSO: 3449 case ETHTOOL_GGRO: 3450 rc = ethtool_get_one_feature(dev, useraddr, ethcmd); 3451 break; 3452 case ETHTOOL_STXCSUM: 3453 case ETHTOOL_SRXCSUM: 3454 case ETHTOOL_SSG: 3455 case ETHTOOL_STSO: 3456 case ETHTOOL_SGSO: 3457 case ETHTOOL_SGRO: 3458 rc = ethtool_set_one_feature(dev, useraddr, ethcmd); 3459 break; 3460 case ETHTOOL_GCHANNELS: 3461 rc = ethtool_get_channels(dev, useraddr); 3462 break; 3463 case ETHTOOL_SCHANNELS: 3464 rc = ethtool_set_channels(dev, useraddr); 3465 break; 3466 case ETHTOOL_SET_DUMP: 3467 rc = ethtool_set_dump(dev, useraddr); 3468 break; 3469 case ETHTOOL_GET_DUMP_FLAG: 3470 rc = ethtool_get_dump_flag(dev, useraddr); 3471 break; 3472 case ETHTOOL_GET_DUMP_DATA: 3473 rc = ethtool_get_dump_data(dev, useraddr); 3474 break; 3475 case ETHTOOL_GET_TS_INFO: 3476 rc = ethtool_get_ts_info(dev, useraddr); 3477 break; 3478 case ETHTOOL_GMODULEINFO: 3479 rc = ethtool_get_module_info(dev, useraddr); 3480 break; 3481 case ETHTOOL_GMODULEEEPROM: 3482 rc = ethtool_get_module_eeprom(dev, useraddr); 3483 break; 3484 case ETHTOOL_GTUNABLE: 3485 rc = ethtool_get_tunable(dev, useraddr); 3486 break; 3487 case ETHTOOL_STUNABLE: 3488 rc = ethtool_set_tunable(dev, useraddr); 3489 break; 3490 case ETHTOOL_GPHYSTATS: 3491 rc = ethtool_get_phy_stats(dev, useraddr); 3492 break; 3493 case ETHTOOL_PERQUEUE: 3494 rc = ethtool_set_per_queue(dev, useraddr, sub_cmd); 3495 break; 3496 case ETHTOOL_GLINKSETTINGS: 3497 rc = ethtool_get_link_ksettings(dev, useraddr); 3498 break; 3499 case ETHTOOL_SLINKSETTINGS: 3500 rc = ethtool_set_link_ksettings(dev, useraddr); 3501 break; 3502 case ETHTOOL_PHY_GTUNABLE: 3503 rc = get_phy_tunable(dev, useraddr); 3504 break; 3505 case ETHTOOL_PHY_STUNABLE: 3506 rc = set_phy_tunable(dev, useraddr); 3507 break; 3508 case ETHTOOL_GFECPARAM: 3509 rc = ethtool_get_fecparam(dev, useraddr); 3510 break; 3511 case ETHTOOL_SFECPARAM: 3512 rc = ethtool_set_fecparam(dev, useraddr); 3513 break; 3514 default: 3515 rc = -EOPNOTSUPP; 3516 } 3517 3518 if (dev->ethtool_ops->complete) 3519 dev->ethtool_ops->complete(dev); 3520 3521 if (old_features != dev->features) 3522 netdev_features_change(dev); 3523 out: 3524 if (dev->dev.parent) 3525 pm_runtime_put(dev->dev.parent); 3526 netdev_unlock_ops(dev); 3527 3528 return rc; 3529 } 3530 3531 int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr) 3532 { 3533 struct ethtool_devlink_compat *state; 3534 u32 ethcmd; 3535 int rc; 3536 3537 if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd))) 3538 return -EFAULT; 3539 3540 state = kzalloc(sizeof(*state), GFP_KERNEL); 3541 if (!state) 3542 return -ENOMEM; 3543 3544 switch (ethcmd) { 3545 case ETHTOOL_FLASHDEV: 3546 if (copy_from_user(&state->efl, useraddr, sizeof(state->efl))) { 3547 rc = -EFAULT; 3548 goto exit_free; 3549 } 3550 state->efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0; 3551 break; 3552 } 3553 3554 rtnl_lock(); 3555 rc = __dev_ethtool(net, ifr, useraddr, ethcmd, state); 3556 rtnl_unlock(); 3557 if (rc) 3558 goto exit_free; 3559 3560 switch (ethcmd) { 3561 case ETHTOOL_FLASHDEV: 3562 if (state->devlink) 3563 rc = devlink_compat_flash_update(state->devlink, 3564 state->efl.data); 3565 break; 3566 case ETHTOOL_GDRVINFO: 3567 if (state->devlink) 3568 devlink_compat_running_version(state->devlink, 3569 state->info.fw_version, 3570 sizeof(state->info.fw_version)); 3571 if (copy_to_user(useraddr, &state->info, sizeof(state->info))) { 3572 rc = -EFAULT; 3573 goto exit_free; 3574 } 3575 break; 3576 } 3577 3578 exit_free: 3579 if (state->devlink) 3580 devlink_put(state->devlink); 3581 kfree(state); 3582 return rc; 3583 } 3584 3585 struct ethtool_rx_flow_key { 3586 struct flow_dissector_key_basic basic; 3587 union { 3588 struct flow_dissector_key_ipv4_addrs ipv4; 3589 struct flow_dissector_key_ipv6_addrs ipv6; 3590 }; 3591 struct flow_dissector_key_ports tp; 3592 struct flow_dissector_key_ip ip; 3593 struct flow_dissector_key_vlan vlan; 3594 struct flow_dissector_key_eth_addrs eth_addrs; 3595 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ 3596 3597 struct ethtool_rx_flow_match { 3598 struct flow_dissector dissector; 3599 struct ethtool_rx_flow_key key; 3600 struct ethtool_rx_flow_key mask; 3601 }; 3602 3603 struct ethtool_rx_flow_rule * 3604 ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input) 3605 { 3606 const struct ethtool_rx_flow_spec *fs = input->fs; 3607 struct ethtool_rx_flow_match *match; 3608 struct ethtool_rx_flow_rule *flow; 3609 struct flow_action_entry *act; 3610 3611 flow = kzalloc(sizeof(struct ethtool_rx_flow_rule) + 3612 sizeof(struct ethtool_rx_flow_match), GFP_KERNEL); 3613 if (!flow) 3614 return ERR_PTR(-ENOMEM); 3615 3616 /* ethtool_rx supports only one single action per rule. */ 3617 flow->rule = flow_rule_alloc(1); 3618 if (!flow->rule) { 3619 kfree(flow); 3620 return ERR_PTR(-ENOMEM); 3621 } 3622 3623 match = (struct ethtool_rx_flow_match *)flow->priv; 3624 flow->rule->match.dissector = &match->dissector; 3625 flow->rule->match.mask = &match->mask; 3626 flow->rule->match.key = &match->key; 3627 3628 match->mask.basic.n_proto = htons(0xffff); 3629 3630 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) { 3631 case ETHER_FLOW: { 3632 const struct ethhdr *ether_spec, *ether_m_spec; 3633 3634 ether_spec = &fs->h_u.ether_spec; 3635 ether_m_spec = &fs->m_u.ether_spec; 3636 3637 if (!is_zero_ether_addr(ether_m_spec->h_source)) { 3638 ether_addr_copy(match->key.eth_addrs.src, 3639 ether_spec->h_source); 3640 ether_addr_copy(match->mask.eth_addrs.src, 3641 ether_m_spec->h_source); 3642 } 3643 if (!is_zero_ether_addr(ether_m_spec->h_dest)) { 3644 ether_addr_copy(match->key.eth_addrs.dst, 3645 ether_spec->h_dest); 3646 ether_addr_copy(match->mask.eth_addrs.dst, 3647 ether_m_spec->h_dest); 3648 } 3649 if (ether_m_spec->h_proto) { 3650 match->key.basic.n_proto = ether_spec->h_proto; 3651 match->mask.basic.n_proto = ether_m_spec->h_proto; 3652 } 3653 } 3654 break; 3655 case TCP_V4_FLOW: 3656 case UDP_V4_FLOW: { 3657 const struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec; 3658 3659 match->key.basic.n_proto = htons(ETH_P_IP); 3660 3661 v4_spec = &fs->h_u.tcp_ip4_spec; 3662 v4_m_spec = &fs->m_u.tcp_ip4_spec; 3663 3664 if (v4_m_spec->ip4src) { 3665 match->key.ipv4.src = v4_spec->ip4src; 3666 match->mask.ipv4.src = v4_m_spec->ip4src; 3667 } 3668 if (v4_m_spec->ip4dst) { 3669 match->key.ipv4.dst = v4_spec->ip4dst; 3670 match->mask.ipv4.dst = v4_m_spec->ip4dst; 3671 } 3672 if (v4_m_spec->ip4src || 3673 v4_m_spec->ip4dst) { 3674 match->dissector.used_keys |= 3675 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS); 3676 match->dissector.offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 3677 offsetof(struct ethtool_rx_flow_key, ipv4); 3678 } 3679 if (v4_m_spec->psrc) { 3680 match->key.tp.src = v4_spec->psrc; 3681 match->mask.tp.src = v4_m_spec->psrc; 3682 } 3683 if (v4_m_spec->pdst) { 3684 match->key.tp.dst = v4_spec->pdst; 3685 match->mask.tp.dst = v4_m_spec->pdst; 3686 } 3687 if (v4_m_spec->psrc || 3688 v4_m_spec->pdst) { 3689 match->dissector.used_keys |= 3690 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS); 3691 match->dissector.offset[FLOW_DISSECTOR_KEY_PORTS] = 3692 offsetof(struct ethtool_rx_flow_key, tp); 3693 } 3694 if (v4_m_spec->tos) { 3695 match->key.ip.tos = v4_spec->tos; 3696 match->mask.ip.tos = v4_m_spec->tos; 3697 match->dissector.used_keys |= 3698 BIT(FLOW_DISSECTOR_KEY_IP); 3699 match->dissector.offset[FLOW_DISSECTOR_KEY_IP] = 3700 offsetof(struct ethtool_rx_flow_key, ip); 3701 } 3702 } 3703 break; 3704 case TCP_V6_FLOW: 3705 case UDP_V6_FLOW: { 3706 const struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec; 3707 3708 match->key.basic.n_proto = htons(ETH_P_IPV6); 3709 3710 v6_spec = &fs->h_u.tcp_ip6_spec; 3711 v6_m_spec = &fs->m_u.tcp_ip6_spec; 3712 if (!ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6src)) { 3713 memcpy(&match->key.ipv6.src, v6_spec->ip6src, 3714 sizeof(match->key.ipv6.src)); 3715 memcpy(&match->mask.ipv6.src, v6_m_spec->ip6src, 3716 sizeof(match->mask.ipv6.src)); 3717 } 3718 if (!ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6dst)) { 3719 memcpy(&match->key.ipv6.dst, v6_spec->ip6dst, 3720 sizeof(match->key.ipv6.dst)); 3721 memcpy(&match->mask.ipv6.dst, v6_m_spec->ip6dst, 3722 sizeof(match->mask.ipv6.dst)); 3723 } 3724 if (!ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6src) || 3725 !ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6dst)) { 3726 match->dissector.used_keys |= 3727 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS); 3728 match->dissector.offset[FLOW_DISSECTOR_KEY_IPV6_ADDRS] = 3729 offsetof(struct ethtool_rx_flow_key, ipv6); 3730 } 3731 if (v6_m_spec->psrc) { 3732 match->key.tp.src = v6_spec->psrc; 3733 match->mask.tp.src = v6_m_spec->psrc; 3734 } 3735 if (v6_m_spec->pdst) { 3736 match->key.tp.dst = v6_spec->pdst; 3737 match->mask.tp.dst = v6_m_spec->pdst; 3738 } 3739 if (v6_m_spec->psrc || 3740 v6_m_spec->pdst) { 3741 match->dissector.used_keys |= 3742 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS); 3743 match->dissector.offset[FLOW_DISSECTOR_KEY_PORTS] = 3744 offsetof(struct ethtool_rx_flow_key, tp); 3745 } 3746 if (v6_m_spec->tclass) { 3747 match->key.ip.tos = v6_spec->tclass; 3748 match->mask.ip.tos = v6_m_spec->tclass; 3749 match->dissector.used_keys |= 3750 BIT_ULL(FLOW_DISSECTOR_KEY_IP); 3751 match->dissector.offset[FLOW_DISSECTOR_KEY_IP] = 3752 offsetof(struct ethtool_rx_flow_key, ip); 3753 } 3754 } 3755 break; 3756 default: 3757 ethtool_rx_flow_rule_destroy(flow); 3758 return ERR_PTR(-EINVAL); 3759 } 3760 3761 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) { 3762 case TCP_V4_FLOW: 3763 case TCP_V6_FLOW: 3764 match->key.basic.ip_proto = IPPROTO_TCP; 3765 match->mask.basic.ip_proto = 0xff; 3766 break; 3767 case UDP_V4_FLOW: 3768 case UDP_V6_FLOW: 3769 match->key.basic.ip_proto = IPPROTO_UDP; 3770 match->mask.basic.ip_proto = 0xff; 3771 break; 3772 } 3773 3774 match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_BASIC); 3775 match->dissector.offset[FLOW_DISSECTOR_KEY_BASIC] = 3776 offsetof(struct ethtool_rx_flow_key, basic); 3777 3778 if (fs->flow_type & FLOW_EXT) { 3779 const struct ethtool_flow_ext *ext_h_spec = &fs->h_ext; 3780 const struct ethtool_flow_ext *ext_m_spec = &fs->m_ext; 3781 3782 if (ext_m_spec->vlan_etype) { 3783 match->key.vlan.vlan_tpid = ext_h_spec->vlan_etype; 3784 match->mask.vlan.vlan_tpid = ext_m_spec->vlan_etype; 3785 } 3786 3787 if (ext_m_spec->vlan_tci) { 3788 match->key.vlan.vlan_id = 3789 ntohs(ext_h_spec->vlan_tci) & 0x0fff; 3790 match->mask.vlan.vlan_id = 3791 ntohs(ext_m_spec->vlan_tci) & 0x0fff; 3792 3793 match->key.vlan.vlan_dei = 3794 !!(ext_h_spec->vlan_tci & htons(0x1000)); 3795 match->mask.vlan.vlan_dei = 3796 !!(ext_m_spec->vlan_tci & htons(0x1000)); 3797 3798 match->key.vlan.vlan_priority = 3799 (ntohs(ext_h_spec->vlan_tci) & 0xe000) >> 13; 3800 match->mask.vlan.vlan_priority = 3801 (ntohs(ext_m_spec->vlan_tci) & 0xe000) >> 13; 3802 } 3803 3804 if (ext_m_spec->vlan_etype || 3805 ext_m_spec->vlan_tci) { 3806 match->dissector.used_keys |= 3807 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN); 3808 match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] = 3809 offsetof(struct ethtool_rx_flow_key, vlan); 3810 } 3811 } 3812 if (fs->flow_type & FLOW_MAC_EXT) { 3813 const struct ethtool_flow_ext *ext_h_spec = &fs->h_ext; 3814 const struct ethtool_flow_ext *ext_m_spec = &fs->m_ext; 3815 3816 memcpy(match->key.eth_addrs.dst, ext_h_spec->h_dest, 3817 ETH_ALEN); 3818 memcpy(match->mask.eth_addrs.dst, ext_m_spec->h_dest, 3819 ETH_ALEN); 3820 3821 match->dissector.used_keys |= 3822 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS); 3823 match->dissector.offset[FLOW_DISSECTOR_KEY_ETH_ADDRS] = 3824 offsetof(struct ethtool_rx_flow_key, eth_addrs); 3825 } 3826 3827 act = &flow->rule->action.entries[0]; 3828 switch (fs->ring_cookie) { 3829 case RX_CLS_FLOW_DISC: 3830 act->id = FLOW_ACTION_DROP; 3831 break; 3832 case RX_CLS_FLOW_WAKE: 3833 act->id = FLOW_ACTION_WAKE; 3834 break; 3835 default: 3836 act->id = FLOW_ACTION_QUEUE; 3837 if (fs->flow_type & FLOW_RSS) 3838 act->queue.ctx = input->rss_ctx; 3839 3840 act->queue.vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 3841 act->queue.index = ethtool_get_flow_spec_ring(fs->ring_cookie); 3842 break; 3843 } 3844 3845 return flow; 3846 } 3847 EXPORT_SYMBOL(ethtool_rx_flow_rule_create); 3848 3849 void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *flow) 3850 { 3851 kfree(flow->rule); 3852 kfree(flow); 3853 } 3854 EXPORT_SYMBOL(ethtool_rx_flow_rule_destroy); 3855