1 /* 2 * Copyright 2015 Amazon.com, Inc. or its affiliates. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/pci.h> 34 35 #include "ena_netdev.h" 36 37 struct ena_stats { 38 char name[ETH_GSTRING_LEN]; 39 int stat_offset; 40 }; 41 42 #define ENA_STAT_ENA_COM_ENTRY(stat) { \ 43 .name = #stat, \ 44 .stat_offset = offsetof(struct ena_com_stats_admin, stat) \ 45 } 46 47 #define ENA_STAT_ENTRY(stat, stat_type) { \ 48 .name = #stat, \ 49 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 50 } 51 52 #define ENA_STAT_RX_ENTRY(stat) \ 53 ENA_STAT_ENTRY(stat, rx) 54 55 #define ENA_STAT_TX_ENTRY(stat) \ 56 ENA_STAT_ENTRY(stat, tx) 57 58 #define ENA_STAT_GLOBAL_ENTRY(stat) \ 59 ENA_STAT_ENTRY(stat, dev) 60 61 static const struct ena_stats ena_stats_global_strings[] = { 62 ENA_STAT_GLOBAL_ENTRY(tx_timeout), 63 ENA_STAT_GLOBAL_ENTRY(suspend), 64 ENA_STAT_GLOBAL_ENTRY(resume), 65 ENA_STAT_GLOBAL_ENTRY(wd_expired), 66 ENA_STAT_GLOBAL_ENTRY(interface_up), 67 ENA_STAT_GLOBAL_ENTRY(interface_down), 68 ENA_STAT_GLOBAL_ENTRY(admin_q_pause), 69 }; 70 71 static const struct ena_stats ena_stats_tx_strings[] = { 72 ENA_STAT_TX_ENTRY(cnt), 73 ENA_STAT_TX_ENTRY(bytes), 74 ENA_STAT_TX_ENTRY(queue_stop), 75 ENA_STAT_TX_ENTRY(queue_wakeup), 76 ENA_STAT_TX_ENTRY(dma_mapping_err), 77 ENA_STAT_TX_ENTRY(linearize), 78 ENA_STAT_TX_ENTRY(linearize_failed), 79 ENA_STAT_TX_ENTRY(napi_comp), 80 ENA_STAT_TX_ENTRY(tx_poll), 81 ENA_STAT_TX_ENTRY(doorbells), 82 ENA_STAT_TX_ENTRY(prepare_ctx_err), 83 ENA_STAT_TX_ENTRY(bad_req_id), 84 ENA_STAT_TX_ENTRY(llq_buffer_copy), 85 ENA_STAT_TX_ENTRY(missed_tx), 86 ENA_STAT_TX_ENTRY(unmask_interrupt), 87 }; 88 89 static const struct ena_stats ena_stats_rx_strings[] = { 90 ENA_STAT_RX_ENTRY(cnt), 91 ENA_STAT_RX_ENTRY(bytes), 92 ENA_STAT_RX_ENTRY(rx_copybreak_pkt), 93 ENA_STAT_RX_ENTRY(csum_good), 94 ENA_STAT_RX_ENTRY(refil_partial), 95 ENA_STAT_RX_ENTRY(bad_csum), 96 ENA_STAT_RX_ENTRY(page_alloc_fail), 97 ENA_STAT_RX_ENTRY(skb_alloc_fail), 98 ENA_STAT_RX_ENTRY(dma_mapping_err), 99 ENA_STAT_RX_ENTRY(bad_desc_num), 100 ENA_STAT_RX_ENTRY(bad_req_id), 101 ENA_STAT_RX_ENTRY(empty_rx_ring), 102 ENA_STAT_RX_ENTRY(csum_unchecked), 103 }; 104 105 static const struct ena_stats ena_stats_ena_com_strings[] = { 106 ENA_STAT_ENA_COM_ENTRY(aborted_cmd), 107 ENA_STAT_ENA_COM_ENTRY(submitted_cmd), 108 ENA_STAT_ENA_COM_ENTRY(completed_cmd), 109 ENA_STAT_ENA_COM_ENTRY(out_of_space), 110 ENA_STAT_ENA_COM_ENTRY(no_completion), 111 }; 112 113 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 114 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 115 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 116 #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) 117 118 static void ena_safe_update_stat(u64 *src, u64 *dst, 119 struct u64_stats_sync *syncp) 120 { 121 unsigned int start; 122 123 do { 124 start = u64_stats_fetch_begin_irq(syncp); 125 *(dst) = *src; 126 } while (u64_stats_fetch_retry_irq(syncp, start)); 127 } 128 129 static void ena_queue_stats(struct ena_adapter *adapter, u64 **data) 130 { 131 const struct ena_stats *ena_stats; 132 struct ena_ring *ring; 133 134 u64 *ptr; 135 int i, j; 136 137 for (i = 0; i < adapter->num_io_queues; i++) { 138 /* Tx stats */ 139 ring = &adapter->tx_ring[i]; 140 141 for (j = 0; j < ENA_STATS_ARRAY_TX; j++) { 142 ena_stats = &ena_stats_tx_strings[j]; 143 144 ptr = (u64 *)((uintptr_t)&ring->tx_stats + 145 (uintptr_t)ena_stats->stat_offset); 146 147 ena_safe_update_stat(ptr, (*data)++, &ring->syncp); 148 } 149 150 /* Rx stats */ 151 ring = &adapter->rx_ring[i]; 152 153 for (j = 0; j < ENA_STATS_ARRAY_RX; j++) { 154 ena_stats = &ena_stats_rx_strings[j]; 155 156 ptr = (u64 *)((uintptr_t)&ring->rx_stats + 157 (uintptr_t)ena_stats->stat_offset); 158 159 ena_safe_update_stat(ptr, (*data)++, &ring->syncp); 160 } 161 } 162 } 163 164 static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data) 165 { 166 const struct ena_stats *ena_stats; 167 u32 *ptr; 168 int i; 169 170 for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) { 171 ena_stats = &ena_stats_ena_com_strings[i]; 172 173 ptr = (u32 *)((uintptr_t)&adapter->ena_dev->admin_queue.stats + 174 (uintptr_t)ena_stats->stat_offset); 175 176 *(*data)++ = *ptr; 177 } 178 } 179 180 static void ena_get_ethtool_stats(struct net_device *netdev, 181 struct ethtool_stats *stats, 182 u64 *data) 183 { 184 struct ena_adapter *adapter = netdev_priv(netdev); 185 const struct ena_stats *ena_stats; 186 u64 *ptr; 187 int i; 188 189 for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { 190 ena_stats = &ena_stats_global_strings[i]; 191 192 ptr = (u64 *)((uintptr_t)&adapter->dev_stats + 193 (uintptr_t)ena_stats->stat_offset); 194 195 ena_safe_update_stat(ptr, data++, &adapter->syncp); 196 } 197 198 ena_queue_stats(adapter, &data); 199 ena_dev_admin_queue_stats(adapter, &data); 200 } 201 202 int ena_get_sset_count(struct net_device *netdev, int sset) 203 { 204 struct ena_adapter *adapter = netdev_priv(netdev); 205 206 if (sset != ETH_SS_STATS) 207 return -EOPNOTSUPP; 208 209 return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) 210 + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; 211 } 212 213 static void ena_queue_strings(struct ena_adapter *adapter, u8 **data) 214 { 215 const struct ena_stats *ena_stats; 216 int i, j; 217 218 for (i = 0; i < adapter->num_io_queues; i++) { 219 /* Tx stats */ 220 for (j = 0; j < ENA_STATS_ARRAY_TX; j++) { 221 ena_stats = &ena_stats_tx_strings[j]; 222 223 snprintf(*data, ETH_GSTRING_LEN, 224 "queue_%u_tx_%s", i, ena_stats->name); 225 (*data) += ETH_GSTRING_LEN; 226 } 227 /* Rx stats */ 228 for (j = 0; j < ENA_STATS_ARRAY_RX; j++) { 229 ena_stats = &ena_stats_rx_strings[j]; 230 231 snprintf(*data, ETH_GSTRING_LEN, 232 "queue_%u_rx_%s", i, ena_stats->name); 233 (*data) += ETH_GSTRING_LEN; 234 } 235 } 236 } 237 238 static void ena_com_dev_strings(u8 **data) 239 { 240 const struct ena_stats *ena_stats; 241 int i; 242 243 for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) { 244 ena_stats = &ena_stats_ena_com_strings[i]; 245 246 snprintf(*data, ETH_GSTRING_LEN, 247 "ena_admin_q_%s", ena_stats->name); 248 (*data) += ETH_GSTRING_LEN; 249 } 250 } 251 252 static void ena_get_strings(struct net_device *netdev, u32 sset, u8 *data) 253 { 254 struct ena_adapter *adapter = netdev_priv(netdev); 255 const struct ena_stats *ena_stats; 256 int i; 257 258 if (sset != ETH_SS_STATS) 259 return; 260 261 for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { 262 ena_stats = &ena_stats_global_strings[i]; 263 264 memcpy(data, ena_stats->name, ETH_GSTRING_LEN); 265 data += ETH_GSTRING_LEN; 266 } 267 268 ena_queue_strings(adapter, &data); 269 ena_com_dev_strings(&data); 270 } 271 272 static int ena_get_link_ksettings(struct net_device *netdev, 273 struct ethtool_link_ksettings *link_ksettings) 274 { 275 struct ena_adapter *adapter = netdev_priv(netdev); 276 struct ena_com_dev *ena_dev = adapter->ena_dev; 277 struct ena_admin_get_feature_link_desc *link; 278 struct ena_admin_get_feat_resp feat_resp; 279 int rc; 280 281 rc = ena_com_get_link_params(ena_dev, &feat_resp); 282 if (rc) 283 return rc; 284 285 link = &feat_resp.u.link; 286 link_ksettings->base.speed = link->speed; 287 288 if (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) { 289 ethtool_link_ksettings_add_link_mode(link_ksettings, 290 supported, Autoneg); 291 ethtool_link_ksettings_add_link_mode(link_ksettings, 292 supported, Autoneg); 293 } 294 295 link_ksettings->base.autoneg = 296 (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) ? 297 AUTONEG_ENABLE : AUTONEG_DISABLE; 298 299 link_ksettings->base.duplex = DUPLEX_FULL; 300 301 return 0; 302 } 303 304 static int ena_get_coalesce(struct net_device *net_dev, 305 struct ethtool_coalesce *coalesce) 306 { 307 struct ena_adapter *adapter = netdev_priv(net_dev); 308 struct ena_com_dev *ena_dev = adapter->ena_dev; 309 310 if (!ena_com_interrupt_moderation_supported(ena_dev)) { 311 /* the devie doesn't support interrupt moderation */ 312 return -EOPNOTSUPP; 313 } 314 315 coalesce->tx_coalesce_usecs = 316 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) * 317 ena_dev->intr_delay_resolution; 318 319 coalesce->rx_coalesce_usecs = 320 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev) 321 * ena_dev->intr_delay_resolution; 322 323 coalesce->use_adaptive_rx_coalesce = 324 ena_com_get_adaptive_moderation_enabled(ena_dev); 325 326 return 0; 327 } 328 329 static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter) 330 { 331 unsigned int val; 332 int i; 333 334 val = ena_com_get_nonadaptive_moderation_interval_tx(adapter->ena_dev); 335 336 for (i = 0; i < adapter->num_io_queues; i++) 337 adapter->tx_ring[i].smoothed_interval = val; 338 } 339 340 static void ena_update_rx_rings_intr_moderation(struct ena_adapter *adapter) 341 { 342 unsigned int val; 343 int i; 344 345 val = ena_com_get_nonadaptive_moderation_interval_rx(adapter->ena_dev); 346 347 for (i = 0; i < adapter->num_io_queues; i++) 348 adapter->rx_ring[i].smoothed_interval = val; 349 } 350 351 static int ena_set_coalesce(struct net_device *net_dev, 352 struct ethtool_coalesce *coalesce) 353 { 354 struct ena_adapter *adapter = netdev_priv(net_dev); 355 struct ena_com_dev *ena_dev = adapter->ena_dev; 356 int rc; 357 358 if (!ena_com_interrupt_moderation_supported(ena_dev)) { 359 /* the devie doesn't support interrupt moderation */ 360 return -EOPNOTSUPP; 361 } 362 363 rc = ena_com_update_nonadaptive_moderation_interval_tx(ena_dev, 364 coalesce->tx_coalesce_usecs); 365 if (rc) 366 return rc; 367 368 ena_update_tx_rings_intr_moderation(adapter); 369 370 rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev, 371 coalesce->rx_coalesce_usecs); 372 if (rc) 373 return rc; 374 375 ena_update_rx_rings_intr_moderation(adapter); 376 377 if (coalesce->use_adaptive_rx_coalesce && 378 !ena_com_get_adaptive_moderation_enabled(ena_dev)) 379 ena_com_enable_adaptive_moderation(ena_dev); 380 381 if (!coalesce->use_adaptive_rx_coalesce && 382 ena_com_get_adaptive_moderation_enabled(ena_dev)) 383 ena_com_disable_adaptive_moderation(ena_dev); 384 385 return 0; 386 } 387 388 static u32 ena_get_msglevel(struct net_device *netdev) 389 { 390 struct ena_adapter *adapter = netdev_priv(netdev); 391 392 return adapter->msg_enable; 393 } 394 395 static void ena_set_msglevel(struct net_device *netdev, u32 value) 396 { 397 struct ena_adapter *adapter = netdev_priv(netdev); 398 399 adapter->msg_enable = value; 400 } 401 402 static void ena_get_drvinfo(struct net_device *dev, 403 struct ethtool_drvinfo *info) 404 { 405 struct ena_adapter *adapter = netdev_priv(dev); 406 407 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 408 strlcpy(info->bus_info, pci_name(adapter->pdev), 409 sizeof(info->bus_info)); 410 } 411 412 static void ena_get_ringparam(struct net_device *netdev, 413 struct ethtool_ringparam *ring) 414 { 415 struct ena_adapter *adapter = netdev_priv(netdev); 416 417 ring->tx_max_pending = adapter->max_tx_ring_size; 418 ring->rx_max_pending = adapter->max_rx_ring_size; 419 ring->tx_pending = adapter->tx_ring[0].ring_size; 420 ring->rx_pending = adapter->rx_ring[0].ring_size; 421 } 422 423 static int ena_set_ringparam(struct net_device *netdev, 424 struct ethtool_ringparam *ring) 425 { 426 struct ena_adapter *adapter = netdev_priv(netdev); 427 u32 new_tx_size, new_rx_size; 428 429 new_tx_size = ring->tx_pending < ENA_MIN_RING_SIZE ? 430 ENA_MIN_RING_SIZE : ring->tx_pending; 431 new_tx_size = rounddown_pow_of_two(new_tx_size); 432 433 new_rx_size = ring->rx_pending < ENA_MIN_RING_SIZE ? 434 ENA_MIN_RING_SIZE : ring->rx_pending; 435 new_rx_size = rounddown_pow_of_two(new_rx_size); 436 437 if (new_tx_size == adapter->requested_tx_ring_size && 438 new_rx_size == adapter->requested_rx_ring_size) 439 return 0; 440 441 return ena_update_queue_sizes(adapter, new_tx_size, new_rx_size); 442 } 443 444 static u32 ena_flow_hash_to_flow_type(u16 hash_fields) 445 { 446 u32 data = 0; 447 448 if (hash_fields & ENA_ADMIN_RSS_L2_DA) 449 data |= RXH_L2DA; 450 451 if (hash_fields & ENA_ADMIN_RSS_L3_DA) 452 data |= RXH_IP_DST; 453 454 if (hash_fields & ENA_ADMIN_RSS_L3_SA) 455 data |= RXH_IP_SRC; 456 457 if (hash_fields & ENA_ADMIN_RSS_L4_DP) 458 data |= RXH_L4_B_2_3; 459 460 if (hash_fields & ENA_ADMIN_RSS_L4_SP) 461 data |= RXH_L4_B_0_1; 462 463 return data; 464 } 465 466 static u16 ena_flow_data_to_flow_hash(u32 hash_fields) 467 { 468 u16 data = 0; 469 470 if (hash_fields & RXH_L2DA) 471 data |= ENA_ADMIN_RSS_L2_DA; 472 473 if (hash_fields & RXH_IP_DST) 474 data |= ENA_ADMIN_RSS_L3_DA; 475 476 if (hash_fields & RXH_IP_SRC) 477 data |= ENA_ADMIN_RSS_L3_SA; 478 479 if (hash_fields & RXH_L4_B_2_3) 480 data |= ENA_ADMIN_RSS_L4_DP; 481 482 if (hash_fields & RXH_L4_B_0_1) 483 data |= ENA_ADMIN_RSS_L4_SP; 484 485 return data; 486 } 487 488 static int ena_get_rss_hash(struct ena_com_dev *ena_dev, 489 struct ethtool_rxnfc *cmd) 490 { 491 enum ena_admin_flow_hash_proto proto; 492 u16 hash_fields; 493 int rc; 494 495 cmd->data = 0; 496 497 switch (cmd->flow_type) { 498 case TCP_V4_FLOW: 499 proto = ENA_ADMIN_RSS_TCP4; 500 break; 501 case UDP_V4_FLOW: 502 proto = ENA_ADMIN_RSS_UDP4; 503 break; 504 case TCP_V6_FLOW: 505 proto = ENA_ADMIN_RSS_TCP6; 506 break; 507 case UDP_V6_FLOW: 508 proto = ENA_ADMIN_RSS_UDP6; 509 break; 510 case IPV4_FLOW: 511 proto = ENA_ADMIN_RSS_IP4; 512 break; 513 case IPV6_FLOW: 514 proto = ENA_ADMIN_RSS_IP6; 515 break; 516 case ETHER_FLOW: 517 proto = ENA_ADMIN_RSS_NOT_IP; 518 break; 519 case AH_V4_FLOW: 520 case ESP_V4_FLOW: 521 case AH_V6_FLOW: 522 case ESP_V6_FLOW: 523 case SCTP_V4_FLOW: 524 case AH_ESP_V4_FLOW: 525 return -EOPNOTSUPP; 526 default: 527 return -EINVAL; 528 } 529 530 rc = ena_com_get_hash_ctrl(ena_dev, proto, &hash_fields); 531 if (rc) 532 return rc; 533 534 cmd->data = ena_flow_hash_to_flow_type(hash_fields); 535 536 return 0; 537 } 538 539 static int ena_set_rss_hash(struct ena_com_dev *ena_dev, 540 struct ethtool_rxnfc *cmd) 541 { 542 enum ena_admin_flow_hash_proto proto; 543 u16 hash_fields; 544 545 switch (cmd->flow_type) { 546 case TCP_V4_FLOW: 547 proto = ENA_ADMIN_RSS_TCP4; 548 break; 549 case UDP_V4_FLOW: 550 proto = ENA_ADMIN_RSS_UDP4; 551 break; 552 case TCP_V6_FLOW: 553 proto = ENA_ADMIN_RSS_TCP6; 554 break; 555 case UDP_V6_FLOW: 556 proto = ENA_ADMIN_RSS_UDP6; 557 break; 558 case IPV4_FLOW: 559 proto = ENA_ADMIN_RSS_IP4; 560 break; 561 case IPV6_FLOW: 562 proto = ENA_ADMIN_RSS_IP6; 563 break; 564 case ETHER_FLOW: 565 proto = ENA_ADMIN_RSS_NOT_IP; 566 break; 567 case AH_V4_FLOW: 568 case ESP_V4_FLOW: 569 case AH_V6_FLOW: 570 case ESP_V6_FLOW: 571 case SCTP_V4_FLOW: 572 case AH_ESP_V4_FLOW: 573 return -EOPNOTSUPP; 574 default: 575 return -EINVAL; 576 } 577 578 hash_fields = ena_flow_data_to_flow_hash(cmd->data); 579 580 return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields); 581 } 582 583 static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) 584 { 585 struct ena_adapter *adapter = netdev_priv(netdev); 586 int rc = 0; 587 588 switch (info->cmd) { 589 case ETHTOOL_SRXFH: 590 rc = ena_set_rss_hash(adapter->ena_dev, info); 591 break; 592 case ETHTOOL_SRXCLSRLDEL: 593 case ETHTOOL_SRXCLSRLINS: 594 default: 595 netif_err(adapter, drv, netdev, 596 "Command parameter %d is not supported\n", info->cmd); 597 rc = -EOPNOTSUPP; 598 } 599 600 return rc; 601 } 602 603 static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, 604 u32 *rules) 605 { 606 struct ena_adapter *adapter = netdev_priv(netdev); 607 int rc = 0; 608 609 switch (info->cmd) { 610 case ETHTOOL_GRXRINGS: 611 info->data = adapter->num_io_queues; 612 rc = 0; 613 break; 614 case ETHTOOL_GRXFH: 615 rc = ena_get_rss_hash(adapter->ena_dev, info); 616 break; 617 case ETHTOOL_GRXCLSRLCNT: 618 case ETHTOOL_GRXCLSRULE: 619 case ETHTOOL_GRXCLSRLALL: 620 default: 621 netif_err(adapter, drv, netdev, 622 "Command parameter %d is not supported\n", info->cmd); 623 rc = -EOPNOTSUPP; 624 } 625 626 return rc; 627 } 628 629 static u32 ena_get_rxfh_indir_size(struct net_device *netdev) 630 { 631 return ENA_RX_RSS_TABLE_SIZE; 632 } 633 634 static u32 ena_get_rxfh_key_size(struct net_device *netdev) 635 { 636 return ENA_HASH_KEY_SIZE; 637 } 638 639 static int ena_indirection_table_set(struct ena_adapter *adapter, 640 const u32 *indir) 641 { 642 struct ena_com_dev *ena_dev = adapter->ena_dev; 643 int i, rc; 644 645 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 646 rc = ena_com_indirect_table_fill_entry(ena_dev, 647 i, 648 ENA_IO_RXQ_IDX(indir[i])); 649 if (unlikely(rc)) { 650 netif_err(adapter, drv, adapter->netdev, 651 "Cannot fill indirect table (index is too large)\n"); 652 return rc; 653 } 654 } 655 656 rc = ena_com_indirect_table_set(ena_dev); 657 if (rc) { 658 netif_err(adapter, drv, adapter->netdev, 659 "Cannot set indirect table\n"); 660 return rc == -EPERM ? -EOPNOTSUPP : rc; 661 } 662 return rc; 663 } 664 665 static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir) 666 { 667 struct ena_com_dev *ena_dev = adapter->ena_dev; 668 int i, rc; 669 670 if (!indir) 671 return 0; 672 673 rc = ena_com_indirect_table_get(ena_dev, indir); 674 if (rc) 675 return rc; 676 677 /* Our internal representation of the indices is: even indices 678 * for Tx and uneven indices for Rx. We need to convert the Rx 679 * indices to be consecutive 680 */ 681 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) 682 indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]); 683 684 return rc; 685 } 686 687 static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, 688 u8 *hfunc) 689 { 690 struct ena_adapter *adapter = netdev_priv(netdev); 691 enum ena_admin_hash_functions ena_func; 692 u8 func; 693 int rc; 694 695 rc = ena_indirection_table_get(adapter, indir); 696 if (rc) 697 return rc; 698 699 /* We call this function in order to check if the device 700 * supports getting/setting the hash function. 701 */ 702 rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func); 703 if (rc) { 704 if (rc == -EOPNOTSUPP) 705 rc = 0; 706 707 return rc; 708 } 709 710 rc = ena_com_get_hash_key(adapter->ena_dev, key); 711 if (rc) 712 return rc; 713 714 switch (ena_func) { 715 case ENA_ADMIN_TOEPLITZ: 716 func = ETH_RSS_HASH_TOP; 717 break; 718 case ENA_ADMIN_CRC32: 719 func = ETH_RSS_HASH_CRC32; 720 break; 721 default: 722 netif_err(adapter, drv, netdev, 723 "Command parameter is not supported\n"); 724 return -EOPNOTSUPP; 725 } 726 727 if (hfunc) 728 *hfunc = func; 729 730 return 0; 731 } 732 733 static int ena_set_rxfh(struct net_device *netdev, const u32 *indir, 734 const u8 *key, const u8 hfunc) 735 { 736 struct ena_adapter *adapter = netdev_priv(netdev); 737 struct ena_com_dev *ena_dev = adapter->ena_dev; 738 enum ena_admin_hash_functions func = 0; 739 int rc; 740 741 if (indir) { 742 rc = ena_indirection_table_set(adapter, indir); 743 if (rc) 744 return rc; 745 } 746 747 switch (hfunc) { 748 case ETH_RSS_HASH_NO_CHANGE: 749 func = ena_com_get_current_hash_function(ena_dev); 750 break; 751 case ETH_RSS_HASH_TOP: 752 func = ENA_ADMIN_TOEPLITZ; 753 break; 754 case ETH_RSS_HASH_CRC32: 755 func = ENA_ADMIN_CRC32; 756 break; 757 default: 758 netif_err(adapter, drv, netdev, "Unsupported hfunc %d\n", 759 hfunc); 760 return -EOPNOTSUPP; 761 } 762 763 if (key || func) { 764 rc = ena_com_fill_hash_function(ena_dev, func, key, 765 ENA_HASH_KEY_SIZE, 766 0xFFFFFFFF); 767 if (unlikely(rc)) { 768 netif_err(adapter, drv, netdev, "Cannot fill key\n"); 769 return rc == -EPERM ? -EOPNOTSUPP : rc; 770 } 771 } 772 773 return 0; 774 } 775 776 static void ena_get_channels(struct net_device *netdev, 777 struct ethtool_channels *channels) 778 { 779 struct ena_adapter *adapter = netdev_priv(netdev); 780 781 channels->max_combined = adapter->max_num_io_queues; 782 channels->combined_count = adapter->num_io_queues; 783 } 784 785 static int ena_set_channels(struct net_device *netdev, 786 struct ethtool_channels *channels) 787 { 788 struct ena_adapter *adapter = netdev_priv(netdev); 789 u32 count = channels->combined_count; 790 /* The check for max value is already done in ethtool */ 791 if (count < ENA_MIN_NUM_IO_QUEUES || 792 (ena_xdp_present(adapter) && 793 !ena_xdp_legal_queue_count(adapter, channels->combined_count))) 794 return -EINVAL; 795 796 return ena_update_queue_count(adapter, count); 797 } 798 799 static int ena_get_tunable(struct net_device *netdev, 800 const struct ethtool_tunable *tuna, void *data) 801 { 802 struct ena_adapter *adapter = netdev_priv(netdev); 803 int ret = 0; 804 805 switch (tuna->id) { 806 case ETHTOOL_RX_COPYBREAK: 807 *(u32 *)data = adapter->rx_copybreak; 808 break; 809 default: 810 ret = -EINVAL; 811 break; 812 } 813 814 return ret; 815 } 816 817 static int ena_set_tunable(struct net_device *netdev, 818 const struct ethtool_tunable *tuna, 819 const void *data) 820 { 821 struct ena_adapter *adapter = netdev_priv(netdev); 822 int ret = 0; 823 u32 len; 824 825 switch (tuna->id) { 826 case ETHTOOL_RX_COPYBREAK: 827 len = *(u32 *)data; 828 if (len > adapter->netdev->mtu) { 829 ret = -EINVAL; 830 break; 831 } 832 adapter->rx_copybreak = len; 833 break; 834 default: 835 ret = -EINVAL; 836 break; 837 } 838 839 return ret; 840 } 841 842 static const struct ethtool_ops ena_ethtool_ops = { 843 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 844 ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 845 .get_link_ksettings = ena_get_link_ksettings, 846 .get_drvinfo = ena_get_drvinfo, 847 .get_msglevel = ena_get_msglevel, 848 .set_msglevel = ena_set_msglevel, 849 .get_link = ethtool_op_get_link, 850 .get_coalesce = ena_get_coalesce, 851 .set_coalesce = ena_set_coalesce, 852 .get_ringparam = ena_get_ringparam, 853 .set_ringparam = ena_set_ringparam, 854 .get_sset_count = ena_get_sset_count, 855 .get_strings = ena_get_strings, 856 .get_ethtool_stats = ena_get_ethtool_stats, 857 .get_rxnfc = ena_get_rxnfc, 858 .set_rxnfc = ena_set_rxnfc, 859 .get_rxfh_indir_size = ena_get_rxfh_indir_size, 860 .get_rxfh_key_size = ena_get_rxfh_key_size, 861 .get_rxfh = ena_get_rxfh, 862 .set_rxfh = ena_set_rxfh, 863 .get_channels = ena_get_channels, 864 .set_channels = ena_set_channels, 865 .get_tunable = ena_get_tunable, 866 .set_tunable = ena_set_tunable, 867 .get_ts_info = ethtool_op_get_ts_info, 868 }; 869 870 void ena_set_ethtool_ops(struct net_device *netdev) 871 { 872 netdev->ethtool_ops = &ena_ethtool_ops; 873 } 874 875 static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf) 876 { 877 struct net_device *netdev = adapter->netdev; 878 u8 *strings_buf; 879 u64 *data_buf; 880 int strings_num; 881 int i, rc; 882 883 strings_num = ena_get_sset_count(netdev, ETH_SS_STATS); 884 if (strings_num <= 0) { 885 netif_err(adapter, drv, netdev, "Can't get stats num\n"); 886 return; 887 } 888 889 strings_buf = devm_kcalloc(&adapter->pdev->dev, 890 ETH_GSTRING_LEN, strings_num, 891 GFP_ATOMIC); 892 if (!strings_buf) { 893 netif_err(adapter, drv, netdev, 894 "failed to alloc strings_buf\n"); 895 return; 896 } 897 898 data_buf = devm_kcalloc(&adapter->pdev->dev, 899 strings_num, sizeof(u64), 900 GFP_ATOMIC); 901 if (!data_buf) { 902 netif_err(adapter, drv, netdev, 903 "failed to allocate data buf\n"); 904 devm_kfree(&adapter->pdev->dev, strings_buf); 905 return; 906 } 907 908 ena_get_strings(netdev, ETH_SS_STATS, strings_buf); 909 ena_get_ethtool_stats(netdev, NULL, data_buf); 910 911 /* If there is a buffer, dump stats, otherwise print them to dmesg */ 912 if (buf) 913 for (i = 0; i < strings_num; i++) { 914 rc = snprintf(buf, ETH_GSTRING_LEN + sizeof(u64), 915 "%s %llu\n", 916 strings_buf + i * ETH_GSTRING_LEN, 917 data_buf[i]); 918 buf += rc; 919 } 920 else 921 for (i = 0; i < strings_num; i++) 922 netif_err(adapter, drv, netdev, "%s: %llu\n", 923 strings_buf + i * ETH_GSTRING_LEN, 924 data_buf[i]); 925 926 devm_kfree(&adapter->pdev->dev, strings_buf); 927 devm_kfree(&adapter->pdev->dev, data_buf); 928 } 929 930 void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf) 931 { 932 if (!buf) 933 return; 934 935 ena_dump_stats_ex(adapter, buf); 936 } 937 938 void ena_dump_stats_to_dmesg(struct ena_adapter *adapter) 939 { 940 ena_dump_stats_ex(adapter, NULL); 941 } 942