1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved. 4 */ 5 6 #include <linux/ethtool.h> 7 #include <linux/pci.h> 8 9 #include "ena_netdev.h" 10 #include "ena_xdp.h" 11 12 struct ena_stats { 13 char name[ETH_GSTRING_LEN]; 14 int stat_offset; 15 }; 16 17 struct ena_hw_metrics { 18 char name[ETH_GSTRING_LEN]; 19 }; 20 21 #define ENA_STAT_ENA_COM_ENTRY(stat) { \ 22 .name = #stat, \ 23 .stat_offset = offsetof(struct ena_com_stats_admin, stat) / sizeof(u64) \ 24 } 25 26 #define ENA_STAT_ENTRY(stat, stat_type) { \ 27 .name = #stat, \ 28 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) / sizeof(u64) \ 29 } 30 31 #define ENA_STAT_HW_ENTRY(stat, stat_type) { \ 32 .name = #stat, \ 33 .stat_offset = offsetof(struct ena_admin_##stat_type, stat) / sizeof(u64) \ 34 } 35 36 #define ENA_STAT_RX_ENTRY(stat) \ 37 ENA_STAT_ENTRY(stat, rx) 38 39 #define ENA_STAT_TX_ENTRY(stat) \ 40 ENA_STAT_ENTRY(stat, tx) 41 42 #define ENA_STAT_GLOBAL_ENTRY(stat) \ 43 ENA_STAT_ENTRY(stat, dev) 44 45 #define ENA_STAT_ENI_ENTRY(stat) \ 46 ENA_STAT_HW_ENTRY(stat, eni_stats) 47 48 #define ENA_STAT_ENA_SRD_ENTRY(stat) \ 49 ENA_STAT_HW_ENTRY(stat, ena_srd_stats) 50 51 #define ENA_STAT_ENA_SRD_MODE_ENTRY(stat) { \ 52 .name = #stat, \ 53 .stat_offset = offsetof(struct ena_admin_ena_srd_info, flags) / sizeof(u64) \ 54 } 55 56 #define ENA_METRIC_ENI_ENTRY(stat) { \ 57 .name = #stat \ 58 } 59 60 static const struct ena_stats ena_stats_global_strings[] = { 61 ENA_STAT_GLOBAL_ENTRY(tx_timeout), 62 ENA_STAT_GLOBAL_ENTRY(suspend), 63 ENA_STAT_GLOBAL_ENTRY(resume), 64 ENA_STAT_GLOBAL_ENTRY(wd_expired), 65 ENA_STAT_GLOBAL_ENTRY(interface_up), 66 ENA_STAT_GLOBAL_ENTRY(interface_down), 67 ENA_STAT_GLOBAL_ENTRY(admin_q_pause), 68 ENA_STAT_GLOBAL_ENTRY(reset_fail), 69 }; 70 71 /* A partial list of hw stats. Used when admin command 72 * with type ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS is not supported 73 */ 74 static const struct ena_stats ena_stats_eni_strings[] = { 75 ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded), 76 ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded), 77 ENA_STAT_ENI_ENTRY(pps_allowance_exceeded), 78 ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded), 79 ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded), 80 }; 81 82 static const struct ena_hw_metrics ena_hw_stats_strings[] = { 83 ENA_METRIC_ENI_ENTRY(bw_in_allowance_exceeded), 84 ENA_METRIC_ENI_ENTRY(bw_out_allowance_exceeded), 85 ENA_METRIC_ENI_ENTRY(pps_allowance_exceeded), 86 ENA_METRIC_ENI_ENTRY(conntrack_allowance_exceeded), 87 ENA_METRIC_ENI_ENTRY(linklocal_allowance_exceeded), 88 ENA_METRIC_ENI_ENTRY(conntrack_allowance_available), 89 }; 90 91 static const struct ena_stats ena_srd_info_strings[] = { 92 ENA_STAT_ENA_SRD_MODE_ENTRY(ena_srd_mode), 93 ENA_STAT_ENA_SRD_ENTRY(ena_srd_tx_pkts), 94 ENA_STAT_ENA_SRD_ENTRY(ena_srd_eligible_tx_pkts), 95 ENA_STAT_ENA_SRD_ENTRY(ena_srd_rx_pkts), 96 ENA_STAT_ENA_SRD_ENTRY(ena_srd_resource_utilization) 97 }; 98 99 static const struct ena_stats ena_stats_tx_strings[] = { 100 ENA_STAT_TX_ENTRY(cnt), 101 ENA_STAT_TX_ENTRY(bytes), 102 ENA_STAT_TX_ENTRY(queue_stop), 103 ENA_STAT_TX_ENTRY(queue_wakeup), 104 ENA_STAT_TX_ENTRY(dma_mapping_err), 105 ENA_STAT_TX_ENTRY(linearize), 106 ENA_STAT_TX_ENTRY(linearize_failed), 107 ENA_STAT_TX_ENTRY(napi_comp), 108 ENA_STAT_TX_ENTRY(tx_poll), 109 ENA_STAT_TX_ENTRY(doorbells), 110 ENA_STAT_TX_ENTRY(prepare_ctx_err), 111 ENA_STAT_TX_ENTRY(bad_req_id), 112 ENA_STAT_TX_ENTRY(llq_buffer_copy), 113 ENA_STAT_TX_ENTRY(missed_tx), 114 ENA_STAT_TX_ENTRY(unmask_interrupt), 115 }; 116 117 static const struct ena_stats ena_stats_rx_strings[] = { 118 ENA_STAT_RX_ENTRY(cnt), 119 ENA_STAT_RX_ENTRY(bytes), 120 ENA_STAT_RX_ENTRY(rx_copybreak_pkt), 121 ENA_STAT_RX_ENTRY(csum_good), 122 ENA_STAT_RX_ENTRY(refil_partial), 123 ENA_STAT_RX_ENTRY(csum_bad), 124 ENA_STAT_RX_ENTRY(page_alloc_fail), 125 ENA_STAT_RX_ENTRY(skb_alloc_fail), 126 ENA_STAT_RX_ENTRY(dma_mapping_err), 127 ENA_STAT_RX_ENTRY(bad_desc_num), 128 ENA_STAT_RX_ENTRY(bad_req_id), 129 ENA_STAT_RX_ENTRY(empty_rx_ring), 130 ENA_STAT_RX_ENTRY(csum_unchecked), 131 ENA_STAT_RX_ENTRY(xdp_aborted), 132 ENA_STAT_RX_ENTRY(xdp_drop), 133 ENA_STAT_RX_ENTRY(xdp_pass), 134 ENA_STAT_RX_ENTRY(xdp_tx), 135 ENA_STAT_RX_ENTRY(xdp_invalid), 136 ENA_STAT_RX_ENTRY(xdp_redirect), 137 }; 138 139 static const struct ena_stats ena_stats_ena_com_strings[] = { 140 ENA_STAT_ENA_COM_ENTRY(aborted_cmd), 141 ENA_STAT_ENA_COM_ENTRY(submitted_cmd), 142 ENA_STAT_ENA_COM_ENTRY(completed_cmd), 143 ENA_STAT_ENA_COM_ENTRY(out_of_space), 144 ENA_STAT_ENA_COM_ENTRY(no_completion), 145 }; 146 147 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 148 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 149 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 150 #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) 151 #define ENA_STATS_ARRAY_ENI ARRAY_SIZE(ena_stats_eni_strings) 152 #define ENA_STATS_ARRAY_ENA_SRD ARRAY_SIZE(ena_srd_info_strings) 153 #define ENA_METRICS_ARRAY_ENI ARRAY_SIZE(ena_hw_stats_strings) 154 155 static void ena_safe_update_stat(u64 *src, u64 *dst, 156 struct u64_stats_sync *syncp) 157 { 158 unsigned int start; 159 160 do { 161 start = u64_stats_fetch_begin(syncp); 162 *(dst) = *src; 163 } while (u64_stats_fetch_retry(syncp, start)); 164 } 165 166 static void ena_metrics_stats(struct ena_adapter *adapter, u64 **data) 167 { 168 struct ena_com_dev *dev = adapter->ena_dev; 169 const struct ena_stats *ena_stats; 170 u64 *ptr; 171 int i; 172 173 if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS)) { 174 u32 supported_metrics_count; 175 int len; 176 177 supported_metrics_count = ena_com_get_customer_metric_count(dev); 178 len = supported_metrics_count * sizeof(u64); 179 180 /* Fill the data buffer, and advance its pointer */ 181 ena_com_get_customer_metrics(dev, (char *)(*data), len); 182 (*data) += supported_metrics_count; 183 184 } else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)) { 185 ena_com_get_eni_stats(dev, &adapter->eni_stats); 186 /* Updating regardless of rc - once we told ethtool how many stats we have 187 * it will print that much stats. We can't leave holes in the stats 188 */ 189 for (i = 0; i < ENA_STATS_ARRAY_ENI; i++) { 190 ena_stats = &ena_stats_eni_strings[i]; 191 192 ptr = (u64 *)&adapter->eni_stats + 193 ena_stats->stat_offset; 194 195 ena_safe_update_stat(ptr, (*data)++, &adapter->syncp); 196 } 197 } 198 199 if (ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO)) { 200 ena_com_get_ena_srd_info(dev, &adapter->ena_srd_info); 201 /* Get ENA SRD mode */ 202 ptr = (u64 *)&adapter->ena_srd_info; 203 ena_safe_update_stat(ptr, (*data)++, &adapter->syncp); 204 for (i = 1; i < ENA_STATS_ARRAY_ENA_SRD; i++) { 205 ena_stats = &ena_srd_info_strings[i]; 206 /* Wrapped within an outer struct - need to accommodate an 207 * additional offset of the ENA SRD mode that was already processed 208 */ 209 ptr = (u64 *)&adapter->ena_srd_info + 210 ena_stats->stat_offset + 1; 211 212 ena_safe_update_stat(ptr, (*data)++, &adapter->syncp); 213 } 214 } 215 } 216 217 static void ena_queue_stats(struct ena_adapter *adapter, u64 **data) 218 { 219 const struct ena_stats *ena_stats; 220 struct ena_ring *ring; 221 222 u64 *ptr; 223 int i, j; 224 225 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { 226 /* Tx stats */ 227 ring = &adapter->tx_ring[i]; 228 229 for (j = 0; j < ENA_STATS_ARRAY_TX; j++) { 230 ena_stats = &ena_stats_tx_strings[j]; 231 232 ptr = (u64 *)&ring->tx_stats + ena_stats->stat_offset; 233 234 ena_safe_update_stat(ptr, (*data)++, &ring->syncp); 235 } 236 /* XDP TX queues don't have a RX queue counterpart */ 237 if (!ENA_IS_XDP_INDEX(adapter, i)) { 238 /* Rx stats */ 239 ring = &adapter->rx_ring[i]; 240 241 for (j = 0; j < ENA_STATS_ARRAY_RX; j++) { 242 ena_stats = &ena_stats_rx_strings[j]; 243 244 ptr = (u64 *)&ring->rx_stats + 245 ena_stats->stat_offset; 246 247 ena_safe_update_stat(ptr, (*data)++, &ring->syncp); 248 } 249 } 250 } 251 } 252 253 static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data) 254 { 255 const struct ena_stats *ena_stats; 256 u64 *ptr; 257 int i; 258 259 for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) { 260 ena_stats = &ena_stats_ena_com_strings[i]; 261 262 ptr = (u64 *)&adapter->ena_dev->admin_queue.stats + 263 ena_stats->stat_offset; 264 265 *(*data)++ = *ptr; 266 } 267 } 268 269 static void ena_get_stats(struct ena_adapter *adapter, 270 u64 *data, 271 bool hw_stats_needed) 272 { 273 const struct ena_stats *ena_stats; 274 u64 *ptr; 275 int i; 276 277 for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { 278 ena_stats = &ena_stats_global_strings[i]; 279 280 ptr = (u64 *)&adapter->dev_stats + ena_stats->stat_offset; 281 282 ena_safe_update_stat(ptr, data++, &adapter->syncp); 283 } 284 285 if (hw_stats_needed) 286 ena_metrics_stats(adapter, &data); 287 288 ena_queue_stats(adapter, &data); 289 ena_dev_admin_queue_stats(adapter, &data); 290 } 291 292 static void ena_get_ethtool_stats(struct net_device *netdev, 293 struct ethtool_stats *stats, 294 u64 *data) 295 { 296 struct ena_adapter *adapter = netdev_priv(netdev); 297 298 ena_get_stats(adapter, data, true); 299 } 300 301 static int ena_get_sw_stats_count(struct ena_adapter *adapter) 302 { 303 return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) 304 + adapter->xdp_num_queues * ENA_STATS_ARRAY_TX 305 + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; 306 } 307 308 static int ena_get_hw_stats_count(struct ena_adapter *adapter) 309 { 310 struct ena_com_dev *dev = adapter->ena_dev; 311 int count; 312 313 count = ENA_STATS_ARRAY_ENA_SRD * ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO); 314 315 if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS)) 316 count += ena_com_get_customer_metric_count(dev); 317 else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)) 318 count += ENA_STATS_ARRAY_ENI; 319 320 return count; 321 } 322 323 int ena_get_sset_count(struct net_device *netdev, int sset) 324 { 325 struct ena_adapter *adapter = netdev_priv(netdev); 326 327 switch (sset) { 328 case ETH_SS_STATS: 329 return ena_get_sw_stats_count(adapter) + 330 ena_get_hw_stats_count(adapter); 331 } 332 333 return -EOPNOTSUPP; 334 } 335 336 static void ena_metrics_stats_strings(struct ena_adapter *adapter, u8 **data) 337 { 338 struct ena_com_dev *dev = adapter->ena_dev; 339 const struct ena_hw_metrics *ena_metrics; 340 const struct ena_stats *ena_stats; 341 int i; 342 343 if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS)) { 344 for (i = 0; i < ENA_METRICS_ARRAY_ENI; i++) { 345 if (ena_com_get_customer_metric_support(dev, i)) { 346 ena_metrics = &ena_hw_stats_strings[i]; 347 ethtool_puts(data, ena_metrics->name); 348 } 349 } 350 } else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)) { 351 for (i = 0; i < ENA_STATS_ARRAY_ENI; i++) { 352 ena_stats = &ena_stats_eni_strings[i]; 353 ethtool_puts(data, ena_stats->name); 354 } 355 } 356 357 if (ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO)) { 358 for (i = 0; i < ENA_STATS_ARRAY_ENA_SRD; i++) { 359 ena_stats = &ena_srd_info_strings[i]; 360 ethtool_puts(data, ena_stats->name); 361 } 362 } 363 } 364 365 static void ena_queue_strings(struct ena_adapter *adapter, u8 **data) 366 { 367 const struct ena_stats *ena_stats; 368 bool is_xdp; 369 int i, j; 370 371 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { 372 is_xdp = ENA_IS_XDP_INDEX(adapter, i); 373 /* Tx stats */ 374 for (j = 0; j < ENA_STATS_ARRAY_TX; j++) { 375 ena_stats = &ena_stats_tx_strings[j]; 376 377 ethtool_sprintf(data, 378 "queue_%u_%s_%s", i, 379 is_xdp ? "xdp_tx" : "tx", 380 ena_stats->name); 381 } 382 383 /* In XDP there isn't an RX queue counterpart */ 384 if (is_xdp) 385 continue; 386 387 for (j = 0; j < ENA_STATS_ARRAY_RX; j++) { 388 ena_stats = &ena_stats_rx_strings[j]; 389 390 ethtool_sprintf(data, "queue_%u_rx_%s", i, ena_stats->name); 391 } 392 } 393 } 394 395 static void ena_com_dev_strings(u8 **data) 396 { 397 const struct ena_stats *ena_stats; 398 int i; 399 400 for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) { 401 ena_stats = &ena_stats_ena_com_strings[i]; 402 403 ethtool_sprintf(data, 404 "ena_admin_q_%s", ena_stats->name); 405 } 406 } 407 408 static void ena_get_strings(struct ena_adapter *adapter, 409 u8 *data, 410 bool hw_stats_needed) 411 { 412 const struct ena_stats *ena_stats; 413 int i; 414 415 for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { 416 ena_stats = &ena_stats_global_strings[i]; 417 ethtool_puts(&data, ena_stats->name); 418 } 419 420 if (hw_stats_needed) 421 ena_metrics_stats_strings(adapter, &data); 422 423 ena_queue_strings(adapter, &data); 424 ena_com_dev_strings(&data); 425 } 426 427 static void ena_get_ethtool_strings(struct net_device *netdev, 428 u32 sset, 429 u8 *data) 430 { 431 struct ena_adapter *adapter = netdev_priv(netdev); 432 433 switch (sset) { 434 case ETH_SS_STATS: 435 ena_get_strings(adapter, data, true); 436 break; 437 } 438 } 439 440 static int ena_get_link_ksettings(struct net_device *netdev, 441 struct ethtool_link_ksettings *link_ksettings) 442 { 443 struct ena_adapter *adapter = netdev_priv(netdev); 444 struct ena_com_dev *ena_dev = adapter->ena_dev; 445 struct ena_admin_get_feature_link_desc *link; 446 struct ena_admin_get_feat_resp feat_resp; 447 int rc; 448 449 rc = ena_com_get_link_params(ena_dev, &feat_resp); 450 if (rc) 451 return rc; 452 453 link = &feat_resp.u.link; 454 link_ksettings->base.speed = link->speed; 455 456 if (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) { 457 ethtool_link_ksettings_add_link_mode(link_ksettings, 458 supported, Autoneg); 459 ethtool_link_ksettings_add_link_mode(link_ksettings, 460 supported, Autoneg); 461 } 462 463 link_ksettings->base.autoneg = 464 (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) ? 465 AUTONEG_ENABLE : AUTONEG_DISABLE; 466 467 link_ksettings->base.duplex = DUPLEX_FULL; 468 469 return 0; 470 } 471 472 static int ena_get_coalesce(struct net_device *net_dev, 473 struct ethtool_coalesce *coalesce, 474 struct kernel_ethtool_coalesce *kernel_coal, 475 struct netlink_ext_ack *extack) 476 { 477 struct ena_adapter *adapter = netdev_priv(net_dev); 478 struct ena_com_dev *ena_dev = adapter->ena_dev; 479 480 if (!ena_com_interrupt_moderation_supported(ena_dev)) 481 return -EOPNOTSUPP; 482 483 coalesce->tx_coalesce_usecs = 484 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) * 485 ena_dev->intr_delay_resolution; 486 487 coalesce->rx_coalesce_usecs = 488 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev) 489 * ena_dev->intr_delay_resolution; 490 491 coalesce->use_adaptive_rx_coalesce = 492 ena_com_get_adaptive_moderation_enabled(ena_dev); 493 494 return 0; 495 } 496 497 static void ena_update_tx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter) 498 { 499 unsigned int val; 500 int i; 501 502 val = ena_com_get_nonadaptive_moderation_interval_tx(adapter->ena_dev); 503 504 for (i = 0; i < adapter->num_io_queues; i++) 505 adapter->tx_ring[i].smoothed_interval = val; 506 } 507 508 static void ena_update_rx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter) 509 { 510 unsigned int val; 511 int i; 512 513 val = ena_com_get_nonadaptive_moderation_interval_rx(adapter->ena_dev); 514 515 for (i = 0; i < adapter->num_io_queues; i++) 516 adapter->rx_ring[i].smoothed_interval = val; 517 } 518 519 static int ena_set_coalesce(struct net_device *net_dev, 520 struct ethtool_coalesce *coalesce, 521 struct kernel_ethtool_coalesce *kernel_coal, 522 struct netlink_ext_ack *extack) 523 { 524 struct ena_adapter *adapter = netdev_priv(net_dev); 525 struct ena_com_dev *ena_dev = adapter->ena_dev; 526 int rc; 527 528 if (!ena_com_interrupt_moderation_supported(ena_dev)) 529 return -EOPNOTSUPP; 530 531 rc = ena_com_update_nonadaptive_moderation_interval_tx(ena_dev, 532 coalesce->tx_coalesce_usecs); 533 if (rc) 534 return rc; 535 536 ena_update_tx_rings_nonadaptive_intr_moderation(adapter); 537 538 rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev, 539 coalesce->rx_coalesce_usecs); 540 if (rc) 541 return rc; 542 543 ena_update_rx_rings_nonadaptive_intr_moderation(adapter); 544 545 if (coalesce->use_adaptive_rx_coalesce && 546 !ena_com_get_adaptive_moderation_enabled(ena_dev)) 547 ena_com_enable_adaptive_moderation(ena_dev); 548 549 if (!coalesce->use_adaptive_rx_coalesce && 550 ena_com_get_adaptive_moderation_enabled(ena_dev)) 551 ena_com_disable_adaptive_moderation(ena_dev); 552 553 return 0; 554 } 555 556 static u32 ena_get_msglevel(struct net_device *netdev) 557 { 558 struct ena_adapter *adapter = netdev_priv(netdev); 559 560 return adapter->msg_enable; 561 } 562 563 static void ena_set_msglevel(struct net_device *netdev, u32 value) 564 { 565 struct ena_adapter *adapter = netdev_priv(netdev); 566 567 adapter->msg_enable = value; 568 } 569 570 static void ena_get_drvinfo(struct net_device *dev, 571 struct ethtool_drvinfo *info) 572 { 573 struct ena_adapter *adapter = netdev_priv(dev); 574 ssize_t ret = 0; 575 576 ret = strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 577 if (ret < 0) 578 netif_dbg(adapter, drv, dev, 579 "module name will be truncated, status = %zd\n", ret); 580 581 ret = strscpy(info->bus_info, pci_name(adapter->pdev), 582 sizeof(info->bus_info)); 583 if (ret < 0) 584 netif_dbg(adapter, drv, dev, 585 "bus info will be truncated, status = %zd\n", ret); 586 } 587 588 static void ena_get_ringparam(struct net_device *netdev, 589 struct ethtool_ringparam *ring, 590 struct kernel_ethtool_ringparam *kernel_ring, 591 struct netlink_ext_ack *extack) 592 { 593 struct ena_adapter *adapter = netdev_priv(netdev); 594 595 ring->tx_max_pending = adapter->max_tx_ring_size; 596 ring->rx_max_pending = adapter->max_rx_ring_size; 597 if (adapter->ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 598 bool large_llq_supported = adapter->large_llq_header_supported; 599 600 kernel_ring->tx_push = true; 601 kernel_ring->tx_push_buf_len = adapter->ena_dev->tx_max_header_size; 602 if (large_llq_supported) 603 kernel_ring->tx_push_buf_max_len = ENA_LLQ_LARGE_HEADER; 604 else 605 kernel_ring->tx_push_buf_max_len = ENA_LLQ_HEADER; 606 } else { 607 kernel_ring->tx_push = false; 608 kernel_ring->tx_push_buf_max_len = 0; 609 kernel_ring->tx_push_buf_len = 0; 610 } 611 612 ring->tx_pending = adapter->tx_ring[0].ring_size; 613 ring->rx_pending = adapter->rx_ring[0].ring_size; 614 } 615 616 static int ena_set_ringparam(struct net_device *netdev, 617 struct ethtool_ringparam *ring, 618 struct kernel_ethtool_ringparam *kernel_ring, 619 struct netlink_ext_ack *extack) 620 { 621 struct ena_adapter *adapter = netdev_priv(netdev); 622 u32 new_tx_size, new_rx_size, new_tx_push_buf_len; 623 bool changed = false; 624 625 new_tx_size = ring->tx_pending < ENA_MIN_RING_SIZE ? 626 ENA_MIN_RING_SIZE : ring->tx_pending; 627 new_tx_size = rounddown_pow_of_two(new_tx_size); 628 629 new_rx_size = ring->rx_pending < ENA_MIN_RING_SIZE ? 630 ENA_MIN_RING_SIZE : ring->rx_pending; 631 new_rx_size = rounddown_pow_of_two(new_rx_size); 632 633 changed |= new_tx_size != adapter->requested_tx_ring_size || 634 new_rx_size != adapter->requested_rx_ring_size; 635 636 /* This value is ignored if LLQ is not supported */ 637 new_tx_push_buf_len = adapter->ena_dev->tx_max_header_size; 638 639 if ((adapter->ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) != 640 kernel_ring->tx_push) { 641 NL_SET_ERR_MSG_MOD(extack, "Push mode state cannot be modified"); 642 return -EINVAL; 643 } 644 645 /* Validate that the push buffer is supported on the underlying device */ 646 if (kernel_ring->tx_push_buf_len) { 647 enum ena_admin_placement_policy_type placement; 648 649 new_tx_push_buf_len = kernel_ring->tx_push_buf_len; 650 651 placement = adapter->ena_dev->tx_mem_queue_type; 652 if (placement == ENA_ADMIN_PLACEMENT_POLICY_HOST) 653 return -EOPNOTSUPP; 654 655 if (new_tx_push_buf_len != ENA_LLQ_HEADER && 656 new_tx_push_buf_len != ENA_LLQ_LARGE_HEADER) { 657 bool large_llq_sup = adapter->large_llq_header_supported; 658 char large_llq_size_str[40]; 659 660 snprintf(large_llq_size_str, 40, ", %lu", ENA_LLQ_LARGE_HEADER); 661 662 NL_SET_ERR_MSG_FMT_MOD(extack, 663 "Supported tx push buff values: [%lu%s]", 664 ENA_LLQ_HEADER, 665 large_llq_sup ? large_llq_size_str : ""); 666 667 return -EINVAL; 668 } 669 670 changed |= new_tx_push_buf_len != adapter->ena_dev->tx_max_header_size; 671 } 672 673 if (!changed) 674 return 0; 675 676 return ena_update_queue_params(adapter, new_tx_size, new_rx_size, 677 new_tx_push_buf_len); 678 } 679 680 static u32 ena_flow_hash_to_flow_type(u16 hash_fields) 681 { 682 u32 data = 0; 683 684 if (hash_fields & ENA_ADMIN_RSS_L2_DA) 685 data |= RXH_L2DA; 686 687 if (hash_fields & ENA_ADMIN_RSS_L3_DA) 688 data |= RXH_IP_DST; 689 690 if (hash_fields & ENA_ADMIN_RSS_L3_SA) 691 data |= RXH_IP_SRC; 692 693 if (hash_fields & ENA_ADMIN_RSS_L4_DP) 694 data |= RXH_L4_B_2_3; 695 696 if (hash_fields & ENA_ADMIN_RSS_L4_SP) 697 data |= RXH_L4_B_0_1; 698 699 return data; 700 } 701 702 static u16 ena_flow_data_to_flow_hash(u32 hash_fields) 703 { 704 u16 data = 0; 705 706 if (hash_fields & RXH_L2DA) 707 data |= ENA_ADMIN_RSS_L2_DA; 708 709 if (hash_fields & RXH_IP_DST) 710 data |= ENA_ADMIN_RSS_L3_DA; 711 712 if (hash_fields & RXH_IP_SRC) 713 data |= ENA_ADMIN_RSS_L3_SA; 714 715 if (hash_fields & RXH_L4_B_2_3) 716 data |= ENA_ADMIN_RSS_L4_DP; 717 718 if (hash_fields & RXH_L4_B_0_1) 719 data |= ENA_ADMIN_RSS_L4_SP; 720 721 return data; 722 } 723 724 static int ena_get_rss_hash(struct ena_com_dev *ena_dev, 725 struct ethtool_rxnfc *cmd) 726 { 727 enum ena_admin_flow_hash_proto proto; 728 u16 hash_fields; 729 int rc; 730 731 cmd->data = 0; 732 733 switch (cmd->flow_type) { 734 case TCP_V4_FLOW: 735 proto = ENA_ADMIN_RSS_TCP4; 736 break; 737 case UDP_V4_FLOW: 738 proto = ENA_ADMIN_RSS_UDP4; 739 break; 740 case TCP_V6_FLOW: 741 proto = ENA_ADMIN_RSS_TCP6; 742 break; 743 case UDP_V6_FLOW: 744 proto = ENA_ADMIN_RSS_UDP6; 745 break; 746 case IPV4_FLOW: 747 proto = ENA_ADMIN_RSS_IP4; 748 break; 749 case IPV6_FLOW: 750 proto = ENA_ADMIN_RSS_IP6; 751 break; 752 case ETHER_FLOW: 753 proto = ENA_ADMIN_RSS_NOT_IP; 754 break; 755 case AH_V4_FLOW: 756 case ESP_V4_FLOW: 757 case AH_V6_FLOW: 758 case ESP_V6_FLOW: 759 case SCTP_V4_FLOW: 760 case AH_ESP_V4_FLOW: 761 return -EOPNOTSUPP; 762 default: 763 return -EINVAL; 764 } 765 766 rc = ena_com_get_hash_ctrl(ena_dev, proto, &hash_fields); 767 if (rc) 768 return rc; 769 770 cmd->data = ena_flow_hash_to_flow_type(hash_fields); 771 772 return 0; 773 } 774 775 static int ena_set_rss_hash(struct ena_com_dev *ena_dev, 776 struct ethtool_rxnfc *cmd) 777 { 778 enum ena_admin_flow_hash_proto proto; 779 u16 hash_fields; 780 781 switch (cmd->flow_type) { 782 case TCP_V4_FLOW: 783 proto = ENA_ADMIN_RSS_TCP4; 784 break; 785 case UDP_V4_FLOW: 786 proto = ENA_ADMIN_RSS_UDP4; 787 break; 788 case TCP_V6_FLOW: 789 proto = ENA_ADMIN_RSS_TCP6; 790 break; 791 case UDP_V6_FLOW: 792 proto = ENA_ADMIN_RSS_UDP6; 793 break; 794 case IPV4_FLOW: 795 proto = ENA_ADMIN_RSS_IP4; 796 break; 797 case IPV6_FLOW: 798 proto = ENA_ADMIN_RSS_IP6; 799 break; 800 case ETHER_FLOW: 801 proto = ENA_ADMIN_RSS_NOT_IP; 802 break; 803 case AH_V4_FLOW: 804 case ESP_V4_FLOW: 805 case AH_V6_FLOW: 806 case ESP_V6_FLOW: 807 case SCTP_V4_FLOW: 808 case AH_ESP_V4_FLOW: 809 return -EOPNOTSUPP; 810 default: 811 return -EINVAL; 812 } 813 814 hash_fields = ena_flow_data_to_flow_hash(cmd->data); 815 816 return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields); 817 } 818 819 static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) 820 { 821 struct ena_adapter *adapter = netdev_priv(netdev); 822 int rc = 0; 823 824 switch (info->cmd) { 825 case ETHTOOL_SRXFH: 826 rc = ena_set_rss_hash(adapter->ena_dev, info); 827 break; 828 case ETHTOOL_SRXCLSRLDEL: 829 case ETHTOOL_SRXCLSRLINS: 830 default: 831 netif_err(adapter, drv, netdev, 832 "Command parameter %d is not supported\n", info->cmd); 833 rc = -EOPNOTSUPP; 834 } 835 836 return rc; 837 } 838 839 static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, 840 u32 *rules) 841 { 842 struct ena_adapter *adapter = netdev_priv(netdev); 843 int rc = 0; 844 845 switch (info->cmd) { 846 case ETHTOOL_GRXRINGS: 847 info->data = adapter->num_io_queues; 848 rc = 0; 849 break; 850 case ETHTOOL_GRXFH: 851 rc = ena_get_rss_hash(adapter->ena_dev, info); 852 break; 853 case ETHTOOL_GRXCLSRLCNT: 854 case ETHTOOL_GRXCLSRULE: 855 case ETHTOOL_GRXCLSRLALL: 856 default: 857 netif_err(adapter, drv, netdev, 858 "Command parameter %d is not supported\n", info->cmd); 859 rc = -EOPNOTSUPP; 860 } 861 862 return rc; 863 } 864 865 static u32 ena_get_rxfh_indir_size(struct net_device *netdev) 866 { 867 return ENA_RX_RSS_TABLE_SIZE; 868 } 869 870 static u32 ena_get_rxfh_key_size(struct net_device *netdev) 871 { 872 return ENA_HASH_KEY_SIZE; 873 } 874 875 static int ena_indirection_table_set(struct ena_adapter *adapter, 876 const u32 *indir) 877 { 878 struct ena_com_dev *ena_dev = adapter->ena_dev; 879 int i, rc; 880 881 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 882 rc = ena_com_indirect_table_fill_entry(ena_dev, 883 i, 884 ENA_IO_RXQ_IDX(indir[i])); 885 if (unlikely(rc)) { 886 netif_err(adapter, drv, adapter->netdev, 887 "Cannot fill indirect table (index is too large)\n"); 888 return rc; 889 } 890 } 891 892 rc = ena_com_indirect_table_set(ena_dev); 893 if (rc) { 894 netif_err(adapter, drv, adapter->netdev, 895 "Cannot set indirect table\n"); 896 return rc == -EPERM ? -EOPNOTSUPP : rc; 897 } 898 return rc; 899 } 900 901 static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir) 902 { 903 struct ena_com_dev *ena_dev = adapter->ena_dev; 904 int i, rc; 905 906 if (!indir) 907 return 0; 908 909 rc = ena_com_indirect_table_get(ena_dev, indir); 910 if (rc) 911 return rc; 912 913 /* Our internal representation of the indices is: even indices 914 * for Tx and uneven indices for Rx. We need to convert the Rx 915 * indices to be consecutive 916 */ 917 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) 918 indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]); 919 920 return rc; 921 } 922 923 static int ena_get_rxfh(struct net_device *netdev, 924 struct ethtool_rxfh_param *rxfh) 925 { 926 struct ena_adapter *adapter = netdev_priv(netdev); 927 enum ena_admin_hash_functions ena_func; 928 u8 func; 929 int rc; 930 931 rc = ena_indirection_table_get(adapter, rxfh->indir); 932 if (rc) 933 return rc; 934 935 /* We call this function in order to check if the device 936 * supports getting/setting the hash function. 937 */ 938 rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func); 939 if (rc) { 940 if (rc == -EOPNOTSUPP) 941 rc = 0; 942 943 return rc; 944 } 945 946 rc = ena_com_get_hash_key(adapter->ena_dev, rxfh->key); 947 if (rc) 948 return rc; 949 950 switch (ena_func) { 951 case ENA_ADMIN_TOEPLITZ: 952 func = ETH_RSS_HASH_TOP; 953 break; 954 case ENA_ADMIN_CRC32: 955 func = ETH_RSS_HASH_CRC32; 956 break; 957 default: 958 netif_err(adapter, drv, netdev, 959 "Command parameter is not supported\n"); 960 return -EOPNOTSUPP; 961 } 962 963 rxfh->hfunc = func; 964 965 return 0; 966 } 967 968 static int ena_set_rxfh(struct net_device *netdev, 969 struct ethtool_rxfh_param *rxfh, 970 struct netlink_ext_ack *extack) 971 { 972 struct ena_adapter *adapter = netdev_priv(netdev); 973 struct ena_com_dev *ena_dev = adapter->ena_dev; 974 enum ena_admin_hash_functions func = 0; 975 int rc; 976 977 if (rxfh->indir) { 978 rc = ena_indirection_table_set(adapter, rxfh->indir); 979 if (rc) 980 return rc; 981 } 982 983 switch (rxfh->hfunc) { 984 case ETH_RSS_HASH_NO_CHANGE: 985 func = ena_com_get_current_hash_function(ena_dev); 986 break; 987 case ETH_RSS_HASH_TOP: 988 func = ENA_ADMIN_TOEPLITZ; 989 break; 990 case ETH_RSS_HASH_CRC32: 991 func = ENA_ADMIN_CRC32; 992 break; 993 default: 994 netif_err(adapter, drv, netdev, "Unsupported hfunc %d\n", 995 rxfh->hfunc); 996 return -EOPNOTSUPP; 997 } 998 999 if (rxfh->key || func) { 1000 rc = ena_com_fill_hash_function(ena_dev, func, rxfh->key, 1001 ENA_HASH_KEY_SIZE, 1002 0xFFFFFFFF); 1003 if (unlikely(rc)) { 1004 netif_err(adapter, drv, netdev, "Cannot fill key\n"); 1005 return rc == -EPERM ? -EOPNOTSUPP : rc; 1006 } 1007 } 1008 1009 return 0; 1010 } 1011 1012 static void ena_get_channels(struct net_device *netdev, 1013 struct ethtool_channels *channels) 1014 { 1015 struct ena_adapter *adapter = netdev_priv(netdev); 1016 1017 channels->max_combined = adapter->max_num_io_queues; 1018 channels->combined_count = adapter->num_io_queues; 1019 } 1020 1021 static int ena_set_channels(struct net_device *netdev, 1022 struct ethtool_channels *channels) 1023 { 1024 struct ena_adapter *adapter = netdev_priv(netdev); 1025 u32 count = channels->combined_count; 1026 /* The check for max value is already done in ethtool */ 1027 if (count < ENA_MIN_NUM_IO_QUEUES) 1028 return -EINVAL; 1029 1030 if (!ena_xdp_legal_queue_count(adapter, count)) { 1031 if (ena_xdp_present(adapter)) 1032 return -EINVAL; 1033 1034 xdp_clear_features_flag(netdev); 1035 } else { 1036 xdp_set_features_flag(netdev, 1037 NETDEV_XDP_ACT_BASIC | 1038 NETDEV_XDP_ACT_REDIRECT); 1039 } 1040 1041 return ena_update_queue_count(adapter, count); 1042 } 1043 1044 static int ena_get_tunable(struct net_device *netdev, 1045 const struct ethtool_tunable *tuna, void *data) 1046 { 1047 struct ena_adapter *adapter = netdev_priv(netdev); 1048 int ret = 0; 1049 1050 switch (tuna->id) { 1051 case ETHTOOL_RX_COPYBREAK: 1052 *(u32 *)data = adapter->rx_copybreak; 1053 break; 1054 default: 1055 ret = -EINVAL; 1056 break; 1057 } 1058 1059 return ret; 1060 } 1061 1062 static int ena_set_tunable(struct net_device *netdev, 1063 const struct ethtool_tunable *tuna, 1064 const void *data) 1065 { 1066 struct ena_adapter *adapter = netdev_priv(netdev); 1067 int ret = 0; 1068 u32 len; 1069 1070 switch (tuna->id) { 1071 case ETHTOOL_RX_COPYBREAK: 1072 len = *(u32 *)data; 1073 ret = ena_set_rx_copybreak(adapter, len); 1074 break; 1075 default: 1076 ret = -EINVAL; 1077 break; 1078 } 1079 1080 return ret; 1081 } 1082 1083 static const struct ethtool_ops ena_ethtool_ops = { 1084 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 1085 ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 1086 .supported_ring_params = ETHTOOL_RING_USE_TX_PUSH_BUF_LEN | 1087 ETHTOOL_RING_USE_TX_PUSH, 1088 .get_link_ksettings = ena_get_link_ksettings, 1089 .get_drvinfo = ena_get_drvinfo, 1090 .get_msglevel = ena_get_msglevel, 1091 .set_msglevel = ena_set_msglevel, 1092 .get_link = ethtool_op_get_link, 1093 .get_coalesce = ena_get_coalesce, 1094 .set_coalesce = ena_set_coalesce, 1095 .get_ringparam = ena_get_ringparam, 1096 .set_ringparam = ena_set_ringparam, 1097 .get_sset_count = ena_get_sset_count, 1098 .get_strings = ena_get_ethtool_strings, 1099 .get_ethtool_stats = ena_get_ethtool_stats, 1100 .get_rxnfc = ena_get_rxnfc, 1101 .set_rxnfc = ena_set_rxnfc, 1102 .get_rxfh_indir_size = ena_get_rxfh_indir_size, 1103 .get_rxfh_key_size = ena_get_rxfh_key_size, 1104 .get_rxfh = ena_get_rxfh, 1105 .set_rxfh = ena_set_rxfh, 1106 .get_channels = ena_get_channels, 1107 .set_channels = ena_set_channels, 1108 .get_tunable = ena_get_tunable, 1109 .set_tunable = ena_set_tunable, 1110 .get_ts_info = ethtool_op_get_ts_info, 1111 }; 1112 1113 void ena_set_ethtool_ops(struct net_device *netdev) 1114 { 1115 netdev->ethtool_ops = &ena_ethtool_ops; 1116 } 1117 1118 static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf) 1119 { 1120 struct net_device *netdev = adapter->netdev; 1121 u8 *strings_buf; 1122 u64 *data_buf; 1123 int strings_num; 1124 int i, rc; 1125 1126 strings_num = ena_get_sw_stats_count(adapter); 1127 if (strings_num <= 0) { 1128 netif_err(adapter, drv, netdev, "Can't get stats num\n"); 1129 return; 1130 } 1131 1132 strings_buf = devm_kcalloc(&adapter->pdev->dev, 1133 ETH_GSTRING_LEN, strings_num, 1134 GFP_ATOMIC); 1135 if (!strings_buf) { 1136 netif_err(adapter, drv, netdev, 1137 "Failed to allocate strings_buf\n"); 1138 return; 1139 } 1140 1141 data_buf = devm_kcalloc(&adapter->pdev->dev, 1142 strings_num, sizeof(u64), 1143 GFP_ATOMIC); 1144 if (!data_buf) { 1145 netif_err(adapter, drv, netdev, 1146 "Failed to allocate data buf\n"); 1147 devm_kfree(&adapter->pdev->dev, strings_buf); 1148 return; 1149 } 1150 1151 ena_get_strings(adapter, strings_buf, false); 1152 ena_get_stats(adapter, data_buf, false); 1153 1154 /* If there is a buffer, dump stats, otherwise print them to dmesg */ 1155 if (buf) 1156 for (i = 0; i < strings_num; i++) { 1157 rc = snprintf(buf, ETH_GSTRING_LEN + sizeof(u64), 1158 "%s %llu\n", 1159 strings_buf + i * ETH_GSTRING_LEN, 1160 data_buf[i]); 1161 buf += rc; 1162 } 1163 else 1164 for (i = 0; i < strings_num; i++) 1165 netif_err(adapter, drv, netdev, "%s: %llu\n", 1166 strings_buf + i * ETH_GSTRING_LEN, 1167 data_buf[i]); 1168 1169 devm_kfree(&adapter->pdev->dev, strings_buf); 1170 devm_kfree(&adapter->pdev->dev, data_buf); 1171 } 1172 1173 void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf) 1174 { 1175 if (!buf) 1176 return; 1177 1178 ena_dump_stats_ex(adapter, buf); 1179 } 1180 1181 void ena_dump_stats_to_dmesg(struct ena_adapter *adapter) 1182 { 1183 ena_dump_stats_ex(adapter, NULL); 1184 } 1185