1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2023 Intel Corporation */ 3 4 #include "idpf.h" 5 #include "idpf_ptp.h" 6 7 /** 8 * idpf_get_rxnfc - command to get RX flow classification rules 9 * @netdev: network interface device structure 10 * @cmd: ethtool rxnfc command 11 * @rule_locs: pointer to store rule locations 12 * 13 * Returns Success if the command is supported. 14 */ 15 static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 16 u32 __always_unused *rule_locs) 17 { 18 struct idpf_vport *vport; 19 20 idpf_vport_ctrl_lock(netdev); 21 vport = idpf_netdev_to_vport(netdev); 22 23 switch (cmd->cmd) { 24 case ETHTOOL_GRXRINGS: 25 cmd->data = vport->num_rxq; 26 idpf_vport_ctrl_unlock(netdev); 27 28 return 0; 29 default: 30 break; 31 } 32 33 idpf_vport_ctrl_unlock(netdev); 34 35 return -EOPNOTSUPP; 36 } 37 38 /** 39 * idpf_get_rxfh_key_size - get the RSS hash key size 40 * @netdev: network interface device structure 41 * 42 * Returns the key size on success, error value on failure. 43 */ 44 static u32 idpf_get_rxfh_key_size(struct net_device *netdev) 45 { 46 struct idpf_netdev_priv *np = netdev_priv(netdev); 47 struct idpf_vport_user_config_data *user_config; 48 49 if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) 50 return -EOPNOTSUPP; 51 52 user_config = &np->adapter->vport_config[np->vport_idx]->user_config; 53 54 return user_config->rss_data.rss_key_size; 55 } 56 57 /** 58 * idpf_get_rxfh_indir_size - get the rx flow hash indirection table size 59 * @netdev: network interface device structure 60 * 61 * Returns the table size on success, error value on failure. 62 */ 63 static u32 idpf_get_rxfh_indir_size(struct net_device *netdev) 64 { 65 struct idpf_netdev_priv *np = netdev_priv(netdev); 66 struct idpf_vport_user_config_data *user_config; 67 68 if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) 69 return -EOPNOTSUPP; 70 71 user_config = &np->adapter->vport_config[np->vport_idx]->user_config; 72 73 return user_config->rss_data.rss_lut_size; 74 } 75 76 /** 77 * idpf_get_rxfh - get the rx flow hash indirection table 78 * @netdev: network interface device structure 79 * @rxfh: pointer to param struct (indir, key, hfunc) 80 * 81 * Reads the indirection table directly from the hardware. Always returns 0. 82 */ 83 static int idpf_get_rxfh(struct net_device *netdev, 84 struct ethtool_rxfh_param *rxfh) 85 { 86 struct idpf_netdev_priv *np = netdev_priv(netdev); 87 struct idpf_rss_data *rss_data; 88 struct idpf_adapter *adapter; 89 int err = 0; 90 u16 i; 91 92 idpf_vport_ctrl_lock(netdev); 93 94 adapter = np->adapter; 95 96 if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) { 97 err = -EOPNOTSUPP; 98 goto unlock_mutex; 99 } 100 101 rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data; 102 if (np->state != __IDPF_VPORT_UP) 103 goto unlock_mutex; 104 105 rxfh->hfunc = ETH_RSS_HASH_TOP; 106 107 if (rxfh->key) 108 memcpy(rxfh->key, rss_data->rss_key, rss_data->rss_key_size); 109 110 if (rxfh->indir) { 111 for (i = 0; i < rss_data->rss_lut_size; i++) 112 rxfh->indir[i] = rss_data->rss_lut[i]; 113 } 114 115 unlock_mutex: 116 idpf_vport_ctrl_unlock(netdev); 117 118 return err; 119 } 120 121 /** 122 * idpf_set_rxfh - set the rx flow hash indirection table 123 * @netdev: network interface device structure 124 * @rxfh: pointer to param struct (indir, key, hfunc) 125 * @extack: extended ACK from the Netlink message 126 * 127 * Returns -EINVAL if the table specifies an invalid queue id, otherwise 128 * returns 0 after programming the table. 129 */ 130 static int idpf_set_rxfh(struct net_device *netdev, 131 struct ethtool_rxfh_param *rxfh, 132 struct netlink_ext_ack *extack) 133 { 134 struct idpf_netdev_priv *np = netdev_priv(netdev); 135 struct idpf_rss_data *rss_data; 136 struct idpf_adapter *adapter; 137 struct idpf_vport *vport; 138 int err = 0; 139 u16 lut; 140 141 idpf_vport_ctrl_lock(netdev); 142 vport = idpf_netdev_to_vport(netdev); 143 144 adapter = vport->adapter; 145 146 if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) { 147 err = -EOPNOTSUPP; 148 goto unlock_mutex; 149 } 150 151 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; 152 if (np->state != __IDPF_VPORT_UP) 153 goto unlock_mutex; 154 155 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && 156 rxfh->hfunc != ETH_RSS_HASH_TOP) { 157 err = -EOPNOTSUPP; 158 goto unlock_mutex; 159 } 160 161 if (rxfh->key) 162 memcpy(rss_data->rss_key, rxfh->key, rss_data->rss_key_size); 163 164 if (rxfh->indir) { 165 for (lut = 0; lut < rss_data->rss_lut_size; lut++) 166 rss_data->rss_lut[lut] = rxfh->indir[lut]; 167 } 168 169 err = idpf_config_rss(vport); 170 171 unlock_mutex: 172 idpf_vport_ctrl_unlock(netdev); 173 174 return err; 175 } 176 177 /** 178 * idpf_get_channels: get the number of channels supported by the device 179 * @netdev: network interface device structure 180 * @ch: channel information structure 181 * 182 * Report maximum of TX and RX. Report one extra channel to match our MailBox 183 * Queue. 184 */ 185 static void idpf_get_channels(struct net_device *netdev, 186 struct ethtool_channels *ch) 187 { 188 struct idpf_netdev_priv *np = netdev_priv(netdev); 189 struct idpf_vport_config *vport_config; 190 u16 num_txq, num_rxq; 191 u16 combined; 192 193 vport_config = np->adapter->vport_config[np->vport_idx]; 194 195 num_txq = vport_config->user_config.num_req_tx_qs; 196 num_rxq = vport_config->user_config.num_req_rx_qs; 197 198 combined = min(num_txq, num_rxq); 199 200 /* Report maximum channels */ 201 ch->max_combined = min_t(u16, vport_config->max_q.max_txq, 202 vport_config->max_q.max_rxq); 203 ch->max_rx = vport_config->max_q.max_rxq; 204 ch->max_tx = vport_config->max_q.max_txq; 205 206 ch->max_other = IDPF_MAX_MBXQ; 207 ch->other_count = IDPF_MAX_MBXQ; 208 209 ch->combined_count = combined; 210 ch->rx_count = num_rxq - combined; 211 ch->tx_count = num_txq - combined; 212 } 213 214 /** 215 * idpf_set_channels: set the new channel count 216 * @netdev: network interface device structure 217 * @ch: channel information structure 218 * 219 * Negotiate a new number of channels with CP. Returns 0 on success, negative 220 * on failure. 221 */ 222 static int idpf_set_channels(struct net_device *netdev, 223 struct ethtool_channels *ch) 224 { 225 struct idpf_vport_config *vport_config; 226 unsigned int num_req_tx_q; 227 unsigned int num_req_rx_q; 228 struct idpf_vport *vport; 229 u16 num_txq, num_rxq; 230 struct device *dev; 231 int err = 0; 232 u16 idx; 233 234 if (ch->rx_count && ch->tx_count) { 235 netdev_err(netdev, "Dedicated RX or TX channels cannot be used simultaneously\n"); 236 return -EINVAL; 237 } 238 239 idpf_vport_ctrl_lock(netdev); 240 vport = idpf_netdev_to_vport(netdev); 241 242 idx = vport->idx; 243 vport_config = vport->adapter->vport_config[idx]; 244 245 num_txq = vport_config->user_config.num_req_tx_qs; 246 num_rxq = vport_config->user_config.num_req_rx_qs; 247 248 num_req_tx_q = ch->combined_count + ch->tx_count; 249 num_req_rx_q = ch->combined_count + ch->rx_count; 250 251 dev = &vport->adapter->pdev->dev; 252 /* It's possible to specify number of queues that exceeds max. 253 * Stack checks max combined_count and max [tx|rx]_count but not the 254 * max combined_count + [tx|rx]_count. These checks should catch that. 255 */ 256 if (num_req_tx_q > vport_config->max_q.max_txq) { 257 dev_info(dev, "Maximum TX queues is %d\n", 258 vport_config->max_q.max_txq); 259 err = -EINVAL; 260 goto unlock_mutex; 261 } 262 if (num_req_rx_q > vport_config->max_q.max_rxq) { 263 dev_info(dev, "Maximum RX queues is %d\n", 264 vport_config->max_q.max_rxq); 265 err = -EINVAL; 266 goto unlock_mutex; 267 } 268 269 if (num_req_tx_q == num_txq && num_req_rx_q == num_rxq) 270 goto unlock_mutex; 271 272 vport_config->user_config.num_req_tx_qs = num_req_tx_q; 273 vport_config->user_config.num_req_rx_qs = num_req_rx_q; 274 275 err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE); 276 if (err) { 277 /* roll back queue change */ 278 vport_config->user_config.num_req_tx_qs = num_txq; 279 vport_config->user_config.num_req_rx_qs = num_rxq; 280 } 281 282 unlock_mutex: 283 idpf_vport_ctrl_unlock(netdev); 284 285 return err; 286 } 287 288 /** 289 * idpf_get_ringparam - Get ring parameters 290 * @netdev: network interface device structure 291 * @ring: ethtool ringparam structure 292 * @kring: unused 293 * @ext_ack: unused 294 * 295 * Returns current ring parameters. TX and RX rings are reported separately, 296 * but the number of rings is not reported. 297 */ 298 static void idpf_get_ringparam(struct net_device *netdev, 299 struct ethtool_ringparam *ring, 300 struct kernel_ethtool_ringparam *kring, 301 struct netlink_ext_ack *ext_ack) 302 { 303 struct idpf_vport *vport; 304 305 idpf_vport_ctrl_lock(netdev); 306 vport = idpf_netdev_to_vport(netdev); 307 308 ring->rx_max_pending = IDPF_MAX_RXQ_DESC; 309 ring->tx_max_pending = IDPF_MAX_TXQ_DESC; 310 ring->rx_pending = vport->rxq_desc_count; 311 ring->tx_pending = vport->txq_desc_count; 312 313 kring->tcp_data_split = idpf_vport_get_hsplit(vport); 314 315 idpf_vport_ctrl_unlock(netdev); 316 } 317 318 /** 319 * idpf_set_ringparam - Set ring parameters 320 * @netdev: network interface device structure 321 * @ring: ethtool ringparam structure 322 * @kring: unused 323 * @ext_ack: unused 324 * 325 * Sets ring parameters. TX and RX rings are controlled separately, but the 326 * number of rings is not specified, so all rings get the same settings. 327 */ 328 static int idpf_set_ringparam(struct net_device *netdev, 329 struct ethtool_ringparam *ring, 330 struct kernel_ethtool_ringparam *kring, 331 struct netlink_ext_ack *ext_ack) 332 { 333 struct idpf_vport_user_config_data *config_data; 334 u32 new_rx_count, new_tx_count; 335 struct idpf_vport *vport; 336 int i, err = 0; 337 u16 idx; 338 339 idpf_vport_ctrl_lock(netdev); 340 vport = idpf_netdev_to_vport(netdev); 341 342 idx = vport->idx; 343 344 if (ring->tx_pending < IDPF_MIN_TXQ_DESC) { 345 netdev_err(netdev, "Descriptors requested (Tx: %u) is less than min supported (%u)\n", 346 ring->tx_pending, 347 IDPF_MIN_TXQ_DESC); 348 err = -EINVAL; 349 goto unlock_mutex; 350 } 351 352 if (ring->rx_pending < IDPF_MIN_RXQ_DESC) { 353 netdev_err(netdev, "Descriptors requested (Rx: %u) is less than min supported (%u)\n", 354 ring->rx_pending, 355 IDPF_MIN_RXQ_DESC); 356 err = -EINVAL; 357 goto unlock_mutex; 358 } 359 360 new_rx_count = ALIGN(ring->rx_pending, IDPF_REQ_RXQ_DESC_MULTIPLE); 361 if (new_rx_count != ring->rx_pending) 362 netdev_info(netdev, "Requested Rx descriptor count rounded up to %u\n", 363 new_rx_count); 364 365 new_tx_count = ALIGN(ring->tx_pending, IDPF_REQ_DESC_MULTIPLE); 366 if (new_tx_count != ring->tx_pending) 367 netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n", 368 new_tx_count); 369 370 if (new_tx_count == vport->txq_desc_count && 371 new_rx_count == vport->rxq_desc_count && 372 kring->tcp_data_split == idpf_vport_get_hsplit(vport)) 373 goto unlock_mutex; 374 375 if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) { 376 NL_SET_ERR_MSG_MOD(ext_ack, 377 "setting TCP data split is not supported"); 378 err = -EOPNOTSUPP; 379 380 goto unlock_mutex; 381 } 382 383 config_data = &vport->adapter->vport_config[idx]->user_config; 384 config_data->num_req_txq_desc = new_tx_count; 385 config_data->num_req_rxq_desc = new_rx_count; 386 387 /* Since we adjusted the RX completion queue count, the RX buffer queue 388 * descriptor count needs to be adjusted as well 389 */ 390 for (i = 0; i < vport->num_bufqs_per_qgrp; i++) 391 vport->bufq_desc_count[i] = 392 IDPF_RX_BUFQ_DESC_COUNT(new_rx_count, 393 vport->num_bufqs_per_qgrp); 394 395 err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_DESC_CHANGE); 396 397 unlock_mutex: 398 idpf_vport_ctrl_unlock(netdev); 399 400 return err; 401 } 402 403 /** 404 * struct idpf_stats - definition for an ethtool statistic 405 * @stat_string: statistic name to display in ethtool -S output 406 * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) 407 * @stat_offset: offsetof() the stat from a base pointer 408 * 409 * This structure defines a statistic to be added to the ethtool stats buffer. 410 * It defines a statistic as offset from a common base pointer. Stats should 411 * be defined in constant arrays using the IDPF_STAT macro, with every element 412 * of the array using the same _type for calculating the sizeof_stat and 413 * stat_offset. 414 * 415 * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or 416 * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from 417 * the idpf_add_ethtool_stat() helper function. 418 * 419 * The @stat_string is interpreted as a format string, allowing formatted 420 * values to be inserted while looping over multiple structures for a given 421 * statistics array. Thus, every statistic string in an array should have the 422 * same type and number of format specifiers, to be formatted by variadic 423 * arguments to the idpf_add_stat_string() helper function. 424 */ 425 struct idpf_stats { 426 char stat_string[ETH_GSTRING_LEN]; 427 int sizeof_stat; 428 int stat_offset; 429 }; 430 431 /* Helper macro to define an idpf_stat structure with proper size and type. 432 * Use this when defining constant statistics arrays. Note that @_type expects 433 * only a type name and is used multiple times. 434 */ 435 #define IDPF_STAT(_type, _name, _stat) { \ 436 .stat_string = _name, \ 437 .sizeof_stat = sizeof_field(_type, _stat), \ 438 .stat_offset = offsetof(_type, _stat) \ 439 } 440 441 /* Helper macros for defining some statistics related to queues */ 442 #define IDPF_RX_QUEUE_STAT(_name, _stat) \ 443 IDPF_STAT(struct idpf_rx_queue, _name, _stat) 444 #define IDPF_TX_QUEUE_STAT(_name, _stat) \ 445 IDPF_STAT(struct idpf_tx_queue, _name, _stat) 446 447 /* Stats associated with a Tx queue */ 448 static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = { 449 IDPF_TX_QUEUE_STAT("pkts", q_stats.packets), 450 IDPF_TX_QUEUE_STAT("bytes", q_stats.bytes), 451 IDPF_TX_QUEUE_STAT("lso_pkts", q_stats.lso_pkts), 452 }; 453 454 /* Stats associated with an Rx queue */ 455 static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = { 456 IDPF_RX_QUEUE_STAT("pkts", q_stats.packets), 457 IDPF_RX_QUEUE_STAT("bytes", q_stats.bytes), 458 IDPF_RX_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rsc_pkts), 459 }; 460 461 #define IDPF_TX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_tx_queue_stats) 462 #define IDPF_RX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_rx_queue_stats) 463 464 #define IDPF_PORT_STAT(_name, _stat) \ 465 IDPF_STAT(struct idpf_vport, _name, _stat) 466 467 static const struct idpf_stats idpf_gstrings_port_stats[] = { 468 IDPF_PORT_STAT("rx-csum_errors", port_stats.rx_hw_csum_err), 469 IDPF_PORT_STAT("rx-hsplit", port_stats.rx_hsplit), 470 IDPF_PORT_STAT("rx-hsplit_hbo", port_stats.rx_hsplit_hbo), 471 IDPF_PORT_STAT("rx-bad_descs", port_stats.rx_bad_descs), 472 IDPF_PORT_STAT("tx-skb_drops", port_stats.tx_drops), 473 IDPF_PORT_STAT("tx-dma_map_errs", port_stats.tx_dma_map_errs), 474 IDPF_PORT_STAT("tx-linearized_pkts", port_stats.tx_linearize), 475 IDPF_PORT_STAT("tx-busy_events", port_stats.tx_busy), 476 IDPF_PORT_STAT("rx-unicast_pkts", port_stats.vport_stats.rx_unicast), 477 IDPF_PORT_STAT("rx-multicast_pkts", port_stats.vport_stats.rx_multicast), 478 IDPF_PORT_STAT("rx-broadcast_pkts", port_stats.vport_stats.rx_broadcast), 479 IDPF_PORT_STAT("rx-unknown_protocol", port_stats.vport_stats.rx_unknown_protocol), 480 IDPF_PORT_STAT("tx-unicast_pkts", port_stats.vport_stats.tx_unicast), 481 IDPF_PORT_STAT("tx-multicast_pkts", port_stats.vport_stats.tx_multicast), 482 IDPF_PORT_STAT("tx-broadcast_pkts", port_stats.vport_stats.tx_broadcast), 483 }; 484 485 #define IDPF_PORT_STATS_LEN ARRAY_SIZE(idpf_gstrings_port_stats) 486 487 /** 488 * __idpf_add_qstat_strings - copy stat strings into ethtool buffer 489 * @p: ethtool supplied buffer 490 * @stats: stat definitions array 491 * @size: size of the stats array 492 * @type: stat type 493 * @idx: stat index 494 * 495 * Format and copy the strings described by stats into the buffer pointed at 496 * by p. 497 */ 498 static void __idpf_add_qstat_strings(u8 **p, const struct idpf_stats *stats, 499 const unsigned int size, const char *type, 500 unsigned int idx) 501 { 502 unsigned int i; 503 504 for (i = 0; i < size; i++) 505 ethtool_sprintf(p, "%s_q-%u_%s", 506 type, idx, stats[i].stat_string); 507 } 508 509 /** 510 * idpf_add_qstat_strings - Copy queue stat strings into ethtool buffer 511 * @p: ethtool supplied buffer 512 * @stats: stat definitions array 513 * @type: stat type 514 * @idx: stat idx 515 * 516 * Format and copy the strings described by the const static stats value into 517 * the buffer pointed at by p. 518 * 519 * The parameter @stats is evaluated twice, so parameters with side effects 520 * should be avoided. Additionally, stats must be an array such that 521 * ARRAY_SIZE can be called on it. 522 */ 523 #define idpf_add_qstat_strings(p, stats, type, idx) \ 524 __idpf_add_qstat_strings(p, stats, ARRAY_SIZE(stats), type, idx) 525 526 /** 527 * idpf_add_stat_strings - Copy port stat strings into ethtool buffer 528 * @p: ethtool buffer 529 * @stats: struct to copy from 530 * @size: size of stats array to copy from 531 */ 532 static void idpf_add_stat_strings(u8 **p, const struct idpf_stats *stats, 533 const unsigned int size) 534 { 535 unsigned int i; 536 537 for (i = 0; i < size; i++) 538 ethtool_puts(p, stats[i].stat_string); 539 } 540 541 /** 542 * idpf_get_stat_strings - Get stat strings 543 * @netdev: network interface device structure 544 * @data: buffer for string data 545 * 546 * Builds the statistics string table 547 */ 548 static void idpf_get_stat_strings(struct net_device *netdev, u8 *data) 549 { 550 struct idpf_netdev_priv *np = netdev_priv(netdev); 551 struct idpf_vport_config *vport_config; 552 unsigned int i; 553 554 idpf_add_stat_strings(&data, idpf_gstrings_port_stats, 555 IDPF_PORT_STATS_LEN); 556 557 vport_config = np->adapter->vport_config[np->vport_idx]; 558 /* It's critical that we always report a constant number of strings and 559 * that the strings are reported in the same order regardless of how 560 * many queues are actually in use. 561 */ 562 for (i = 0; i < vport_config->max_q.max_txq; i++) 563 idpf_add_qstat_strings(&data, idpf_gstrings_tx_queue_stats, 564 "tx", i); 565 566 for (i = 0; i < vport_config->max_q.max_rxq; i++) 567 idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats, 568 "rx", i); 569 } 570 571 /** 572 * idpf_get_strings - Get string set 573 * @netdev: network interface device structure 574 * @sset: id of string set 575 * @data: buffer for string data 576 * 577 * Builds string tables for various string sets 578 */ 579 static void idpf_get_strings(struct net_device *netdev, u32 sset, u8 *data) 580 { 581 switch (sset) { 582 case ETH_SS_STATS: 583 idpf_get_stat_strings(netdev, data); 584 break; 585 default: 586 break; 587 } 588 } 589 590 /** 591 * idpf_get_sset_count - Get length of string set 592 * @netdev: network interface device structure 593 * @sset: id of string set 594 * 595 * Reports size of various string tables. 596 */ 597 static int idpf_get_sset_count(struct net_device *netdev, int sset) 598 { 599 struct idpf_netdev_priv *np = netdev_priv(netdev); 600 struct idpf_vport_config *vport_config; 601 u16 max_txq, max_rxq; 602 603 if (sset != ETH_SS_STATS) 604 return -EINVAL; 605 606 vport_config = np->adapter->vport_config[np->vport_idx]; 607 /* This size reported back here *must* be constant throughout the 608 * lifecycle of the netdevice, i.e. we must report the maximum length 609 * even for queues that don't technically exist. This is due to the 610 * fact that this userspace API uses three separate ioctl calls to get 611 * stats data but has no way to communicate back to userspace when that 612 * size has changed, which can typically happen as a result of changing 613 * number of queues. If the number/order of stats change in the middle 614 * of this call chain it will lead to userspace crashing/accessing bad 615 * data through buffer under/overflow. 616 */ 617 max_txq = vport_config->max_q.max_txq; 618 max_rxq = vport_config->max_q.max_rxq; 619 620 return IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) + 621 (IDPF_RX_QUEUE_STATS_LEN * max_rxq); 622 } 623 624 /** 625 * idpf_add_one_ethtool_stat - copy the stat into the supplied buffer 626 * @data: location to store the stat value 627 * @pstat: old stat pointer to copy from 628 * @stat: the stat definition 629 * 630 * Copies the stat data defined by the pointer and stat structure pair into 631 * the memory supplied as data. If the pointer is null, data will be zero'd. 632 */ 633 static void idpf_add_one_ethtool_stat(u64 *data, const void *pstat, 634 const struct idpf_stats *stat) 635 { 636 char *p; 637 638 if (!pstat) { 639 /* Ensure that the ethtool data buffer is zero'd for any stats 640 * which don't have a valid pointer. 641 */ 642 *data = 0; 643 return; 644 } 645 646 p = (char *)pstat + stat->stat_offset; 647 switch (stat->sizeof_stat) { 648 case sizeof(u64): 649 *data = *((u64 *)p); 650 break; 651 case sizeof(u32): 652 *data = *((u32 *)p); 653 break; 654 case sizeof(u16): 655 *data = *((u16 *)p); 656 break; 657 case sizeof(u8): 658 *data = *((u8 *)p); 659 break; 660 default: 661 WARN_ONCE(1, "unexpected stat size for %s", 662 stat->stat_string); 663 *data = 0; 664 } 665 } 666 667 /** 668 * idpf_add_queue_stats - copy queue statistics into supplied buffer 669 * @data: ethtool stats buffer 670 * @q: the queue to copy 671 * @type: type of the queue 672 * 673 * Queue statistics must be copied while protected by u64_stats_fetch_begin, 674 * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats 675 * are defined in idpf_gstrings_queue_stats. If the queue pointer is null, 676 * zero out the queue stat values and update the data pointer. Otherwise 677 * safely copy the stats from the queue into the supplied buffer and update 678 * the data pointer when finished. 679 * 680 * This function expects to be called while under rcu_read_lock(). 681 */ 682 static void idpf_add_queue_stats(u64 **data, const void *q, 683 enum virtchnl2_queue_type type) 684 { 685 const struct u64_stats_sync *stats_sync; 686 const struct idpf_stats *stats; 687 unsigned int start; 688 unsigned int size; 689 unsigned int i; 690 691 if (type == VIRTCHNL2_QUEUE_TYPE_RX) { 692 size = IDPF_RX_QUEUE_STATS_LEN; 693 stats = idpf_gstrings_rx_queue_stats; 694 stats_sync = &((const struct idpf_rx_queue *)q)->stats_sync; 695 } else { 696 size = IDPF_TX_QUEUE_STATS_LEN; 697 stats = idpf_gstrings_tx_queue_stats; 698 stats_sync = &((const struct idpf_tx_queue *)q)->stats_sync; 699 } 700 701 /* To avoid invalid statistics values, ensure that we keep retrying 702 * the copy until we get a consistent value according to 703 * u64_stats_fetch_retry. 704 */ 705 do { 706 start = u64_stats_fetch_begin(stats_sync); 707 for (i = 0; i < size; i++) 708 idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]); 709 } while (u64_stats_fetch_retry(stats_sync, start)); 710 711 /* Once we successfully copy the stats in, update the data pointer */ 712 *data += size; 713 } 714 715 /** 716 * idpf_add_empty_queue_stats - Add stats for a non-existent queue 717 * @data: pointer to data buffer 718 * @qtype: type of data queue 719 * 720 * We must report a constant length of stats back to userspace regardless of 721 * how many queues are actually in use because stats collection happens over 722 * three separate ioctls and there's no way to notify userspace the size 723 * changed between those calls. This adds empty to data to the stats since we 724 * don't have a real queue to refer to for this stats slot. 725 */ 726 static void idpf_add_empty_queue_stats(u64 **data, u16 qtype) 727 { 728 unsigned int i; 729 int stats_len; 730 731 if (qtype == VIRTCHNL2_QUEUE_TYPE_RX) 732 stats_len = IDPF_RX_QUEUE_STATS_LEN; 733 else 734 stats_len = IDPF_TX_QUEUE_STATS_LEN; 735 736 for (i = 0; i < stats_len; i++) 737 (*data)[i] = 0; 738 *data += stats_len; 739 } 740 741 /** 742 * idpf_add_port_stats - Copy port stats into ethtool buffer 743 * @vport: virtual port struct 744 * @data: ethtool buffer to copy into 745 */ 746 static void idpf_add_port_stats(struct idpf_vport *vport, u64 **data) 747 { 748 unsigned int size = IDPF_PORT_STATS_LEN; 749 unsigned int start; 750 unsigned int i; 751 752 do { 753 start = u64_stats_fetch_begin(&vport->port_stats.stats_sync); 754 for (i = 0; i < size; i++) 755 idpf_add_one_ethtool_stat(&(*data)[i], vport, 756 &idpf_gstrings_port_stats[i]); 757 } while (u64_stats_fetch_retry(&vport->port_stats.stats_sync, start)); 758 759 *data += size; 760 } 761 762 /** 763 * idpf_collect_queue_stats - accumulate various per queue stats 764 * into port level stats 765 * @vport: pointer to vport struct 766 **/ 767 static void idpf_collect_queue_stats(struct idpf_vport *vport) 768 { 769 struct idpf_port_stats *pstats = &vport->port_stats; 770 int i, j; 771 772 /* zero out port stats since they're actually tracked in per 773 * queue stats; this is only for reporting 774 */ 775 u64_stats_update_begin(&pstats->stats_sync); 776 u64_stats_set(&pstats->rx_hw_csum_err, 0); 777 u64_stats_set(&pstats->rx_hsplit, 0); 778 u64_stats_set(&pstats->rx_hsplit_hbo, 0); 779 u64_stats_set(&pstats->rx_bad_descs, 0); 780 u64_stats_set(&pstats->tx_linearize, 0); 781 u64_stats_set(&pstats->tx_busy, 0); 782 u64_stats_set(&pstats->tx_drops, 0); 783 u64_stats_set(&pstats->tx_dma_map_errs, 0); 784 u64_stats_update_end(&pstats->stats_sync); 785 786 for (i = 0; i < vport->num_rxq_grp; i++) { 787 struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i]; 788 u16 num_rxq; 789 790 if (idpf_is_queue_model_split(vport->rxq_model)) 791 num_rxq = rxq_grp->splitq.num_rxq_sets; 792 else 793 num_rxq = rxq_grp->singleq.num_rxq; 794 795 for (j = 0; j < num_rxq; j++) { 796 u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs; 797 struct idpf_rx_queue_stats *stats; 798 struct idpf_rx_queue *rxq; 799 unsigned int start; 800 801 if (idpf_is_queue_model_split(vport->rxq_model)) 802 rxq = &rxq_grp->splitq.rxq_sets[j]->rxq; 803 else 804 rxq = rxq_grp->singleq.rxqs[j]; 805 806 if (!rxq) 807 continue; 808 809 do { 810 start = u64_stats_fetch_begin(&rxq->stats_sync); 811 812 stats = &rxq->q_stats; 813 hw_csum_err = u64_stats_read(&stats->hw_csum_err); 814 hsplit = u64_stats_read(&stats->hsplit_pkts); 815 hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf); 816 bad_descs = u64_stats_read(&stats->bad_descs); 817 } while (u64_stats_fetch_retry(&rxq->stats_sync, start)); 818 819 u64_stats_update_begin(&pstats->stats_sync); 820 u64_stats_add(&pstats->rx_hw_csum_err, hw_csum_err); 821 u64_stats_add(&pstats->rx_hsplit, hsplit); 822 u64_stats_add(&pstats->rx_hsplit_hbo, hsplit_hbo); 823 u64_stats_add(&pstats->rx_bad_descs, bad_descs); 824 u64_stats_update_end(&pstats->stats_sync); 825 } 826 } 827 828 for (i = 0; i < vport->num_txq_grp; i++) { 829 struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; 830 831 for (j = 0; j < txq_grp->num_txq; j++) { 832 u64 linearize, qbusy, skb_drops, dma_map_errs; 833 struct idpf_tx_queue *txq = txq_grp->txqs[j]; 834 struct idpf_tx_queue_stats *stats; 835 unsigned int start; 836 837 if (!txq) 838 continue; 839 840 do { 841 start = u64_stats_fetch_begin(&txq->stats_sync); 842 843 stats = &txq->q_stats; 844 linearize = u64_stats_read(&stats->linearize); 845 qbusy = u64_stats_read(&stats->q_busy); 846 skb_drops = u64_stats_read(&stats->skb_drops); 847 dma_map_errs = u64_stats_read(&stats->dma_map_errs); 848 } while (u64_stats_fetch_retry(&txq->stats_sync, start)); 849 850 u64_stats_update_begin(&pstats->stats_sync); 851 u64_stats_add(&pstats->tx_linearize, linearize); 852 u64_stats_add(&pstats->tx_busy, qbusy); 853 u64_stats_add(&pstats->tx_drops, skb_drops); 854 u64_stats_add(&pstats->tx_dma_map_errs, dma_map_errs); 855 u64_stats_update_end(&pstats->stats_sync); 856 } 857 } 858 } 859 860 /** 861 * idpf_get_ethtool_stats - report device statistics 862 * @netdev: network interface device structure 863 * @stats: ethtool statistics structure 864 * @data: pointer to data buffer 865 * 866 * All statistics are added to the data buffer as an array of u64. 867 */ 868 static void idpf_get_ethtool_stats(struct net_device *netdev, 869 struct ethtool_stats __always_unused *stats, 870 u64 *data) 871 { 872 struct idpf_netdev_priv *np = netdev_priv(netdev); 873 struct idpf_vport_config *vport_config; 874 struct idpf_vport *vport; 875 unsigned int total = 0; 876 unsigned int i, j; 877 bool is_splitq; 878 u16 qtype; 879 880 idpf_vport_ctrl_lock(netdev); 881 vport = idpf_netdev_to_vport(netdev); 882 883 if (np->state != __IDPF_VPORT_UP) { 884 idpf_vport_ctrl_unlock(netdev); 885 886 return; 887 } 888 889 rcu_read_lock(); 890 891 idpf_collect_queue_stats(vport); 892 idpf_add_port_stats(vport, &data); 893 894 for (i = 0; i < vport->num_txq_grp; i++) { 895 struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; 896 897 qtype = VIRTCHNL2_QUEUE_TYPE_TX; 898 899 for (j = 0; j < txq_grp->num_txq; j++, total++) { 900 struct idpf_tx_queue *txq = txq_grp->txqs[j]; 901 902 if (!txq) 903 idpf_add_empty_queue_stats(&data, qtype); 904 else 905 idpf_add_queue_stats(&data, txq, qtype); 906 } 907 } 908 909 vport_config = vport->adapter->vport_config[vport->idx]; 910 /* It is critical we provide a constant number of stats back to 911 * userspace regardless of how many queues are actually in use because 912 * there is no way to inform userspace the size has changed between 913 * ioctl calls. This will fill in any missing stats with zero. 914 */ 915 for (; total < vport_config->max_q.max_txq; total++) 916 idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_TX); 917 total = 0; 918 919 is_splitq = idpf_is_queue_model_split(vport->rxq_model); 920 921 for (i = 0; i < vport->num_rxq_grp; i++) { 922 struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i]; 923 u16 num_rxq; 924 925 qtype = VIRTCHNL2_QUEUE_TYPE_RX; 926 927 if (is_splitq) 928 num_rxq = rxq_grp->splitq.num_rxq_sets; 929 else 930 num_rxq = rxq_grp->singleq.num_rxq; 931 932 for (j = 0; j < num_rxq; j++, total++) { 933 struct idpf_rx_queue *rxq; 934 935 if (is_splitq) 936 rxq = &rxq_grp->splitq.rxq_sets[j]->rxq; 937 else 938 rxq = rxq_grp->singleq.rxqs[j]; 939 if (!rxq) 940 idpf_add_empty_queue_stats(&data, qtype); 941 else 942 idpf_add_queue_stats(&data, rxq, qtype); 943 } 944 } 945 946 for (; total < vport_config->max_q.max_rxq; total++) 947 idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX); 948 949 rcu_read_unlock(); 950 951 idpf_vport_ctrl_unlock(netdev); 952 } 953 954 /** 955 * idpf_find_rxq_vec - find rxq vector from q index 956 * @vport: virtual port associated to queue 957 * @q_num: q index used to find queue 958 * 959 * returns pointer to rx vector 960 */ 961 static struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport, 962 int q_num) 963 { 964 int q_grp, q_idx; 965 966 if (!idpf_is_queue_model_split(vport->rxq_model)) 967 return vport->rxq_grps->singleq.rxqs[q_num]->q_vector; 968 969 q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; 970 q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; 971 972 return vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector; 973 } 974 975 /** 976 * idpf_find_txq_vec - find txq vector from q index 977 * @vport: virtual port associated to queue 978 * @q_num: q index used to find queue 979 * 980 * returns pointer to tx vector 981 */ 982 static struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport, 983 int q_num) 984 { 985 int q_grp; 986 987 if (!idpf_is_queue_model_split(vport->txq_model)) 988 return vport->txqs[q_num]->q_vector; 989 990 q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP; 991 992 return vport->txq_grps[q_grp].complq->q_vector; 993 } 994 995 /** 996 * __idpf_get_q_coalesce - get ITR values for specific queue 997 * @ec: ethtool structure to fill with driver's coalesce settings 998 * @q_vector: queue vector corresponding to this queue 999 * @type: queue type 1000 */ 1001 static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec, 1002 const struct idpf_q_vector *q_vector, 1003 enum virtchnl2_queue_type type) 1004 { 1005 if (type == VIRTCHNL2_QUEUE_TYPE_RX) { 1006 ec->use_adaptive_rx_coalesce = 1007 IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode); 1008 ec->rx_coalesce_usecs = q_vector->rx_itr_value; 1009 } else { 1010 ec->use_adaptive_tx_coalesce = 1011 IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode); 1012 ec->tx_coalesce_usecs = q_vector->tx_itr_value; 1013 } 1014 } 1015 1016 /** 1017 * idpf_get_q_coalesce - get ITR values for specific queue 1018 * @netdev: pointer to the netdev associated with this query 1019 * @ec: coalesce settings to program the device with 1020 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index 1021 * 1022 * Return 0 on success, and negative on failure 1023 */ 1024 static int idpf_get_q_coalesce(struct net_device *netdev, 1025 struct ethtool_coalesce *ec, 1026 u32 q_num) 1027 { 1028 const struct idpf_netdev_priv *np = netdev_priv(netdev); 1029 const struct idpf_vport *vport; 1030 int err = 0; 1031 1032 idpf_vport_ctrl_lock(netdev); 1033 vport = idpf_netdev_to_vport(netdev); 1034 1035 if (np->state != __IDPF_VPORT_UP) 1036 goto unlock_mutex; 1037 1038 if (q_num >= vport->num_rxq && q_num >= vport->num_txq) { 1039 err = -EINVAL; 1040 goto unlock_mutex; 1041 } 1042 1043 if (q_num < vport->num_rxq) 1044 __idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num), 1045 VIRTCHNL2_QUEUE_TYPE_RX); 1046 1047 if (q_num < vport->num_txq) 1048 __idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num), 1049 VIRTCHNL2_QUEUE_TYPE_TX); 1050 1051 unlock_mutex: 1052 idpf_vport_ctrl_unlock(netdev); 1053 1054 return err; 1055 } 1056 1057 /** 1058 * idpf_get_coalesce - get ITR values as requested by user 1059 * @netdev: pointer to the netdev associated with this query 1060 * @ec: coalesce settings to be filled 1061 * @kec: unused 1062 * @extack: unused 1063 * 1064 * Return 0 on success, and negative on failure 1065 */ 1066 static int idpf_get_coalesce(struct net_device *netdev, 1067 struct ethtool_coalesce *ec, 1068 struct kernel_ethtool_coalesce *kec, 1069 struct netlink_ext_ack *extack) 1070 { 1071 /* Return coalesce based on queue number zero */ 1072 return idpf_get_q_coalesce(netdev, ec, 0); 1073 } 1074 1075 /** 1076 * idpf_get_per_q_coalesce - get ITR values as requested by user 1077 * @netdev: pointer to the netdev associated with this query 1078 * @q_num: queue for which the itr values has to retrieved 1079 * @ec: coalesce settings to be filled 1080 * 1081 * Return 0 on success, and negative on failure 1082 */ 1083 1084 static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num, 1085 struct ethtool_coalesce *ec) 1086 { 1087 return idpf_get_q_coalesce(netdev, ec, q_num); 1088 } 1089 1090 /** 1091 * __idpf_set_q_coalesce - set ITR values for specific queue 1092 * @ec: ethtool structure from user to update ITR settings 1093 * @qv: queue vector for which itr values has to be set 1094 * @is_rxq: is queue type rx 1095 * 1096 * Returns 0 on success, negative otherwise. 1097 */ 1098 static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec, 1099 struct idpf_q_vector *qv, bool is_rxq) 1100 { 1101 u32 use_adaptive_coalesce, coalesce_usecs; 1102 bool is_dim_ena = false; 1103 u16 itr_val; 1104 1105 if (is_rxq) { 1106 is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode); 1107 use_adaptive_coalesce = ec->use_adaptive_rx_coalesce; 1108 coalesce_usecs = ec->rx_coalesce_usecs; 1109 itr_val = qv->rx_itr_value; 1110 } else { 1111 is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode); 1112 use_adaptive_coalesce = ec->use_adaptive_tx_coalesce; 1113 coalesce_usecs = ec->tx_coalesce_usecs; 1114 itr_val = qv->tx_itr_value; 1115 } 1116 if (coalesce_usecs != itr_val && use_adaptive_coalesce) { 1117 netdev_err(qv->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n"); 1118 1119 return -EINVAL; 1120 } 1121 1122 if (is_dim_ena && use_adaptive_coalesce) 1123 return 0; 1124 1125 if (coalesce_usecs > IDPF_ITR_MAX) { 1126 netdev_err(qv->vport->netdev, 1127 "Invalid value, %d-usecs range is 0-%d\n", 1128 coalesce_usecs, IDPF_ITR_MAX); 1129 1130 return -EINVAL; 1131 } 1132 1133 if (coalesce_usecs % 2) { 1134 coalesce_usecs--; 1135 netdev_info(qv->vport->netdev, 1136 "HW only supports even ITR values, ITR rounded to %d\n", 1137 coalesce_usecs); 1138 } 1139 1140 if (is_rxq) { 1141 qv->rx_itr_value = coalesce_usecs; 1142 if (use_adaptive_coalesce) { 1143 qv->rx_intr_mode = IDPF_ITR_DYNAMIC; 1144 } else { 1145 qv->rx_intr_mode = !IDPF_ITR_DYNAMIC; 1146 idpf_vport_intr_write_itr(qv, qv->rx_itr_value, 1147 false); 1148 } 1149 } else { 1150 qv->tx_itr_value = coalesce_usecs; 1151 if (use_adaptive_coalesce) { 1152 qv->tx_intr_mode = IDPF_ITR_DYNAMIC; 1153 } else { 1154 qv->tx_intr_mode = !IDPF_ITR_DYNAMIC; 1155 idpf_vport_intr_write_itr(qv, qv->tx_itr_value, true); 1156 } 1157 } 1158 1159 /* Update of static/dynamic itr will be taken care when interrupt is 1160 * fired 1161 */ 1162 return 0; 1163 } 1164 1165 /** 1166 * idpf_set_q_coalesce - set ITR values for specific queue 1167 * @vport: vport associated to the queue that need updating 1168 * @ec: coalesce settings to program the device with 1169 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index 1170 * @is_rxq: is queue type rx 1171 * 1172 * Return 0 on success, and negative on failure 1173 */ 1174 static int idpf_set_q_coalesce(const struct idpf_vport *vport, 1175 const struct ethtool_coalesce *ec, 1176 int q_num, bool is_rxq) 1177 { 1178 struct idpf_q_vector *qv; 1179 1180 qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) : 1181 idpf_find_txq_vec(vport, q_num); 1182 1183 if (qv && __idpf_set_q_coalesce(ec, qv, is_rxq)) 1184 return -EINVAL; 1185 1186 return 0; 1187 } 1188 1189 /** 1190 * idpf_set_coalesce - set ITR values as requested by user 1191 * @netdev: pointer to the netdev associated with this query 1192 * @ec: coalesce settings to program the device with 1193 * @kec: unused 1194 * @extack: unused 1195 * 1196 * Return 0 on success, and negative on failure 1197 */ 1198 static int idpf_set_coalesce(struct net_device *netdev, 1199 struct ethtool_coalesce *ec, 1200 struct kernel_ethtool_coalesce *kec, 1201 struct netlink_ext_ack *extack) 1202 { 1203 struct idpf_netdev_priv *np = netdev_priv(netdev); 1204 struct idpf_vport *vport; 1205 int i, err = 0; 1206 1207 idpf_vport_ctrl_lock(netdev); 1208 vport = idpf_netdev_to_vport(netdev); 1209 1210 if (np->state != __IDPF_VPORT_UP) 1211 goto unlock_mutex; 1212 1213 for (i = 0; i < vport->num_txq; i++) { 1214 err = idpf_set_q_coalesce(vport, ec, i, false); 1215 if (err) 1216 goto unlock_mutex; 1217 } 1218 1219 for (i = 0; i < vport->num_rxq; i++) { 1220 err = idpf_set_q_coalesce(vport, ec, i, true); 1221 if (err) 1222 goto unlock_mutex; 1223 } 1224 1225 unlock_mutex: 1226 idpf_vport_ctrl_unlock(netdev); 1227 1228 return err; 1229 } 1230 1231 /** 1232 * idpf_set_per_q_coalesce - set ITR values as requested by user 1233 * @netdev: pointer to the netdev associated with this query 1234 * @q_num: queue for which the itr values has to be set 1235 * @ec: coalesce settings to program the device with 1236 * 1237 * Return 0 on success, and negative on failure 1238 */ 1239 static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num, 1240 struct ethtool_coalesce *ec) 1241 { 1242 struct idpf_vport *vport; 1243 int err; 1244 1245 idpf_vport_ctrl_lock(netdev); 1246 vport = idpf_netdev_to_vport(netdev); 1247 1248 err = idpf_set_q_coalesce(vport, ec, q_num, false); 1249 if (err) { 1250 idpf_vport_ctrl_unlock(netdev); 1251 1252 return err; 1253 } 1254 1255 err = idpf_set_q_coalesce(vport, ec, q_num, true); 1256 1257 idpf_vport_ctrl_unlock(netdev); 1258 1259 return err; 1260 } 1261 1262 /** 1263 * idpf_get_msglevel - Get debug message level 1264 * @netdev: network interface device structure 1265 * 1266 * Returns current debug message level. 1267 */ 1268 static u32 idpf_get_msglevel(struct net_device *netdev) 1269 { 1270 struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev); 1271 1272 return adapter->msg_enable; 1273 } 1274 1275 /** 1276 * idpf_set_msglevel - Set debug message level 1277 * @netdev: network interface device structure 1278 * @data: message level 1279 * 1280 * Set current debug message level. Higher values cause the driver to 1281 * be noisier. 1282 */ 1283 static void idpf_set_msglevel(struct net_device *netdev, u32 data) 1284 { 1285 struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev); 1286 1287 adapter->msg_enable = data; 1288 } 1289 1290 /** 1291 * idpf_get_link_ksettings - Get Link Speed and Duplex settings 1292 * @netdev: network interface device structure 1293 * @cmd: ethtool command 1294 * 1295 * Reports speed/duplex settings. 1296 **/ 1297 static int idpf_get_link_ksettings(struct net_device *netdev, 1298 struct ethtool_link_ksettings *cmd) 1299 { 1300 struct idpf_netdev_priv *np = netdev_priv(netdev); 1301 1302 ethtool_link_ksettings_zero_link_mode(cmd, supported); 1303 cmd->base.autoneg = AUTONEG_DISABLE; 1304 cmd->base.port = PORT_NONE; 1305 if (netif_carrier_ok(netdev)) { 1306 cmd->base.duplex = DUPLEX_FULL; 1307 cmd->base.speed = np->link_speed_mbps; 1308 } else { 1309 cmd->base.duplex = DUPLEX_UNKNOWN; 1310 cmd->base.speed = SPEED_UNKNOWN; 1311 } 1312 1313 return 0; 1314 } 1315 1316 /** 1317 * idpf_get_timestamp_filters - Get the supported timestamping mode 1318 * @vport: Virtual port structure 1319 * @info: ethtool timestamping info structure 1320 * 1321 * Get the Tx/Rx timestamp filters. 1322 */ 1323 static void idpf_get_timestamp_filters(const struct idpf_vport *vport, 1324 struct kernel_ethtool_ts_info *info) 1325 { 1326 info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE | 1327 SOF_TIMESTAMPING_RAW_HARDWARE; 1328 1329 info->tx_types = BIT(HWTSTAMP_TX_OFF); 1330 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); 1331 1332 if (!vport->tx_tstamp_caps || 1333 vport->adapter->ptp->tx_tstamp_access == IDPF_PTP_NONE) 1334 return; 1335 1336 info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | 1337 SOF_TIMESTAMPING_TX_HARDWARE; 1338 1339 info->tx_types |= BIT(HWTSTAMP_TX_ON); 1340 } 1341 1342 /** 1343 * idpf_get_ts_info - Get device PHC association 1344 * @netdev: network interface device structure 1345 * @info: ethtool timestamping info structure 1346 * 1347 * Return: 0 on success, -errno otherwise. 1348 */ 1349 static int idpf_get_ts_info(struct net_device *netdev, 1350 struct kernel_ethtool_ts_info *info) 1351 { 1352 struct idpf_netdev_priv *np = netdev_priv(netdev); 1353 struct idpf_vport *vport; 1354 int err = 0; 1355 1356 if (!mutex_trylock(&np->adapter->vport_ctrl_lock)) 1357 return -EBUSY; 1358 1359 vport = idpf_netdev_to_vport(netdev); 1360 1361 if (!vport->adapter->ptp) { 1362 err = -EOPNOTSUPP; 1363 goto unlock; 1364 } 1365 1366 if (idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP) && 1367 vport->adapter->ptp->clock) { 1368 info->phc_index = ptp_clock_index(vport->adapter->ptp->clock); 1369 idpf_get_timestamp_filters(vport, info); 1370 } else { 1371 pci_dbg(vport->adapter->pdev, "PTP clock not detected\n"); 1372 err = ethtool_op_get_ts_info(netdev, info); 1373 } 1374 1375 unlock: 1376 mutex_unlock(&np->adapter->vport_ctrl_lock); 1377 1378 return err; 1379 } 1380 1381 static const struct ethtool_ops idpf_ethtool_ops = { 1382 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 1383 ETHTOOL_COALESCE_USE_ADAPTIVE, 1384 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, 1385 .get_msglevel = idpf_get_msglevel, 1386 .set_msglevel = idpf_set_msglevel, 1387 .get_link = ethtool_op_get_link, 1388 .get_coalesce = idpf_get_coalesce, 1389 .set_coalesce = idpf_set_coalesce, 1390 .get_per_queue_coalesce = idpf_get_per_q_coalesce, 1391 .set_per_queue_coalesce = idpf_set_per_q_coalesce, 1392 .get_ethtool_stats = idpf_get_ethtool_stats, 1393 .get_strings = idpf_get_strings, 1394 .get_sset_count = idpf_get_sset_count, 1395 .get_channels = idpf_get_channels, 1396 .get_rxnfc = idpf_get_rxnfc, 1397 .get_rxfh_key_size = idpf_get_rxfh_key_size, 1398 .get_rxfh_indir_size = idpf_get_rxfh_indir_size, 1399 .get_rxfh = idpf_get_rxfh, 1400 .set_rxfh = idpf_set_rxfh, 1401 .set_channels = idpf_set_channels, 1402 .get_ringparam = idpf_get_ringparam, 1403 .set_ringparam = idpf_set_ringparam, 1404 .get_link_ksettings = idpf_get_link_ksettings, 1405 .get_ts_info = idpf_get_ts_info, 1406 }; 1407 1408 /** 1409 * idpf_set_ethtool_ops - Initialize ethtool ops struct 1410 * @netdev: network interface device structure 1411 * 1412 * Sets ethtool ops struct in our netdev so that ethtool can call 1413 * our functions. 1414 */ 1415 void idpf_set_ethtool_ops(struct net_device *netdev) 1416 { 1417 netdev->ethtool_ops = &idpf_ethtool_ops; 1418 } 1419