1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 /* Google virtual Ethernet (gve) driver 3 * 4 * Copyright (C) 2015-2019 Google, Inc. 5 */ 6 7 #include <linux/ethtool.h> 8 #include <linux/rtnetlink.h> 9 #include "gve.h" 10 #include "gve_adminq.h" 11 12 static void gve_get_drvinfo(struct net_device *netdev, 13 struct ethtool_drvinfo *info) 14 { 15 struct gve_priv *priv = netdev_priv(netdev); 16 17 strlcpy(info->driver, "gve", sizeof(info->driver)); 18 strlcpy(info->version, gve_version_str, sizeof(info->version)); 19 strlcpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info)); 20 } 21 22 static void gve_set_msglevel(struct net_device *netdev, u32 value) 23 { 24 struct gve_priv *priv = netdev_priv(netdev); 25 26 priv->msg_enable = value; 27 } 28 29 static u32 gve_get_msglevel(struct net_device *netdev) 30 { 31 struct gve_priv *priv = netdev_priv(netdev); 32 33 return priv->msg_enable; 34 } 35 36 static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = { 37 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", 38 "rx_dropped", "tx_dropped", "tx_timeouts", 39 "rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt", 40 "interface_up_cnt", "interface_down_cnt", "reset_cnt", 41 "page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt", 42 }; 43 44 static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = { 45 "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_bytes[%u]", 46 "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]", 47 "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]", 48 "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]", 49 }; 50 51 static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = { 52 "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_bytes[%u]", 53 "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]", 54 }; 55 56 static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = { 57 "adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts", 58 "adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt", 59 "adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt", 60 "adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt", 61 "adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt", 62 "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt", 63 "adminq_report_stats_cnt", "adminq_report_link_speed_cnt" 64 }; 65 66 static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = { 67 "report-stats", 68 }; 69 70 #define GVE_MAIN_STATS_LEN ARRAY_SIZE(gve_gstrings_main_stats) 71 #define GVE_ADMINQ_STATS_LEN ARRAY_SIZE(gve_gstrings_adminq_stats) 72 #define NUM_GVE_TX_CNTS ARRAY_SIZE(gve_gstrings_tx_stats) 73 #define NUM_GVE_RX_CNTS ARRAY_SIZE(gve_gstrings_rx_stats) 74 #define GVE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(gve_gstrings_priv_flags) 75 76 static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 77 { 78 struct gve_priv *priv = netdev_priv(netdev); 79 char *s = (char *)data; 80 int i, j; 81 82 switch (stringset) { 83 case ETH_SS_STATS: 84 memcpy(s, *gve_gstrings_main_stats, 85 sizeof(gve_gstrings_main_stats)); 86 s += sizeof(gve_gstrings_main_stats); 87 88 for (i = 0; i < priv->rx_cfg.num_queues; i++) { 89 for (j = 0; j < NUM_GVE_RX_CNTS; j++) { 90 snprintf(s, ETH_GSTRING_LEN, 91 gve_gstrings_rx_stats[j], i); 92 s += ETH_GSTRING_LEN; 93 } 94 } 95 96 for (i = 0; i < priv->tx_cfg.num_queues; i++) { 97 for (j = 0; j < NUM_GVE_TX_CNTS; j++) { 98 snprintf(s, ETH_GSTRING_LEN, 99 gve_gstrings_tx_stats[j], i); 100 s += ETH_GSTRING_LEN; 101 } 102 } 103 104 memcpy(s, *gve_gstrings_adminq_stats, 105 sizeof(gve_gstrings_adminq_stats)); 106 s += sizeof(gve_gstrings_adminq_stats); 107 break; 108 109 case ETH_SS_PRIV_FLAGS: 110 memcpy(s, *gve_gstrings_priv_flags, 111 sizeof(gve_gstrings_priv_flags)); 112 s += sizeof(gve_gstrings_priv_flags); 113 break; 114 115 default: 116 break; 117 } 118 } 119 120 static int gve_get_sset_count(struct net_device *netdev, int sset) 121 { 122 struct gve_priv *priv = netdev_priv(netdev); 123 124 switch (sset) { 125 case ETH_SS_STATS: 126 return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN + 127 (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) + 128 (priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS); 129 case ETH_SS_PRIV_FLAGS: 130 return GVE_PRIV_FLAGS_STR_LEN; 131 default: 132 return -EOPNOTSUPP; 133 } 134 } 135 136 static void 137 gve_get_ethtool_stats(struct net_device *netdev, 138 struct ethtool_stats *stats, u64 *data) 139 { 140 u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail, 141 tmp_rx_desc_err_dropped_pkt, tmp_tx_pkts, tmp_tx_bytes; 142 u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts, 143 rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes; 144 int stats_idx, base_stats_idx, max_stats_idx; 145 struct stats *report_stats; 146 int *rx_qid_to_stats_idx; 147 int *tx_qid_to_stats_idx; 148 struct gve_priv *priv; 149 bool skip_nic_stats; 150 unsigned int start; 151 int ring; 152 int i, j; 153 154 ASSERT_RTNL(); 155 156 priv = netdev_priv(netdev); 157 report_stats = priv->stats_report->stats; 158 rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues, 159 sizeof(int), GFP_KERNEL); 160 if (!rx_qid_to_stats_idx) 161 return; 162 tx_qid_to_stats_idx = kmalloc_array(priv->tx_cfg.num_queues, 163 sizeof(int), GFP_KERNEL); 164 if (!tx_qid_to_stats_idx) { 165 kfree(rx_qid_to_stats_idx); 166 return; 167 } 168 for (rx_pkts = 0, rx_bytes = 0, rx_skb_alloc_fail = 0, 169 rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, ring = 0; 170 ring < priv->rx_cfg.num_queues; ring++) { 171 if (priv->rx) { 172 do { 173 struct gve_rx_ring *rx = &priv->rx[ring]; 174 175 start = 176 u64_stats_fetch_begin(&priv->rx[ring].statss); 177 tmp_rx_pkts = rx->rpackets; 178 tmp_rx_bytes = rx->rbytes; 179 tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; 180 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; 181 tmp_rx_desc_err_dropped_pkt = 182 rx->rx_desc_err_dropped_pkt; 183 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 184 start)); 185 rx_pkts += tmp_rx_pkts; 186 rx_bytes += tmp_rx_bytes; 187 rx_skb_alloc_fail += tmp_rx_skb_alloc_fail; 188 rx_buf_alloc_fail += tmp_rx_buf_alloc_fail; 189 rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt; 190 } 191 } 192 for (tx_pkts = 0, tx_bytes = 0, ring = 0; 193 ring < priv->tx_cfg.num_queues; ring++) { 194 if (priv->tx) { 195 do { 196 start = 197 u64_stats_fetch_begin(&priv->tx[ring].statss); 198 tmp_tx_pkts = priv->tx[ring].pkt_done; 199 tmp_tx_bytes = priv->tx[ring].bytes_done; 200 } while (u64_stats_fetch_retry(&priv->tx[ring].statss, 201 start)); 202 tx_pkts += tmp_tx_pkts; 203 tx_bytes += tmp_tx_bytes; 204 } 205 } 206 207 i = 0; 208 data[i++] = rx_pkts; 209 data[i++] = tx_pkts; 210 data[i++] = rx_bytes; 211 data[i++] = tx_bytes; 212 /* total rx dropped packets */ 213 data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail + 214 rx_desc_err_dropped_pkt; 215 /* Skip tx_dropped */ 216 i++; 217 218 data[i++] = priv->tx_timeo_cnt; 219 data[i++] = rx_skb_alloc_fail; 220 data[i++] = rx_buf_alloc_fail; 221 data[i++] = rx_desc_err_dropped_pkt; 222 data[i++] = priv->interface_up_cnt; 223 data[i++] = priv->interface_down_cnt; 224 data[i++] = priv->reset_cnt; 225 data[i++] = priv->page_alloc_fail; 226 data[i++] = priv->dma_mapping_error; 227 data[i++] = priv->stats_report_trigger_cnt; 228 i = GVE_MAIN_STATS_LEN; 229 230 /* For rx cross-reporting stats, start from nic rx stats in report */ 231 base_stats_idx = GVE_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues + 232 GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues; 233 max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues + 234 base_stats_idx; 235 /* Preprocess the stats report for rx, map queue id to start index */ 236 skip_nic_stats = false; 237 for (stats_idx = base_stats_idx; stats_idx < max_stats_idx; 238 stats_idx += NIC_RX_STATS_REPORT_NUM) { 239 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name); 240 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id); 241 242 if (stat_name == 0) { 243 /* no stats written by NIC yet */ 244 skip_nic_stats = true; 245 break; 246 } 247 rx_qid_to_stats_idx[queue_id] = stats_idx; 248 } 249 /* walk RX rings */ 250 if (priv->rx) { 251 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { 252 struct gve_rx_ring *rx = &priv->rx[ring]; 253 254 data[i++] = rx->fill_cnt; 255 data[i++] = rx->cnt; 256 do { 257 start = 258 u64_stats_fetch_begin(&priv->rx[ring].statss); 259 tmp_rx_bytes = rx->rbytes; 260 tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; 261 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; 262 tmp_rx_desc_err_dropped_pkt = 263 rx->rx_desc_err_dropped_pkt; 264 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 265 start)); 266 data[i++] = tmp_rx_bytes; 267 /* rx dropped packets */ 268 data[i++] = tmp_rx_skb_alloc_fail + 269 tmp_rx_buf_alloc_fail + 270 tmp_rx_desc_err_dropped_pkt; 271 data[i++] = rx->rx_copybreak_pkt; 272 data[i++] = rx->rx_copied_pkt; 273 /* stats from NIC */ 274 if (skip_nic_stats) { 275 /* skip NIC rx stats */ 276 i += NIC_RX_STATS_REPORT_NUM; 277 continue; 278 } 279 for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) { 280 u64 value = 281 be64_to_cpu(report_stats[rx_qid_to_stats_idx[ring] + j].value); 282 283 data[i++] = value; 284 } 285 } 286 } else { 287 i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS; 288 } 289 290 /* For tx cross-reporting stats, start from nic tx stats in report */ 291 base_stats_idx = max_stats_idx; 292 max_stats_idx = NIC_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues + 293 max_stats_idx; 294 /* Preprocess the stats report for tx, map queue id to start index */ 295 skip_nic_stats = false; 296 for (stats_idx = base_stats_idx; stats_idx < max_stats_idx; 297 stats_idx += NIC_TX_STATS_REPORT_NUM) { 298 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name); 299 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id); 300 301 if (stat_name == 0) { 302 /* no stats written by NIC yet */ 303 skip_nic_stats = true; 304 break; 305 } 306 tx_qid_to_stats_idx[queue_id] = stats_idx; 307 } 308 /* walk TX rings */ 309 if (priv->tx) { 310 for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) { 311 struct gve_tx_ring *tx = &priv->tx[ring]; 312 313 data[i++] = tx->req; 314 data[i++] = tx->done; 315 do { 316 start = 317 u64_stats_fetch_begin(&priv->tx[ring].statss); 318 tmp_tx_bytes = tx->bytes_done; 319 } while (u64_stats_fetch_retry(&priv->tx[ring].statss, 320 start)); 321 data[i++] = tmp_tx_bytes; 322 data[i++] = tx->wake_queue; 323 data[i++] = tx->stop_queue; 324 data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv, 325 tx)); 326 /* stats from NIC */ 327 if (skip_nic_stats) { 328 /* skip NIC tx stats */ 329 i += NIC_TX_STATS_REPORT_NUM; 330 continue; 331 } 332 for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) { 333 u64 value = 334 be64_to_cpu(report_stats[tx_qid_to_stats_idx[ring] + j].value); 335 data[i++] = value; 336 } 337 } 338 } else { 339 i += priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS; 340 } 341 342 kfree(rx_qid_to_stats_idx); 343 kfree(tx_qid_to_stats_idx); 344 /* AQ Stats */ 345 data[i++] = priv->adminq_prod_cnt; 346 data[i++] = priv->adminq_cmd_fail; 347 data[i++] = priv->adminq_timeouts; 348 data[i++] = priv->adminq_describe_device_cnt; 349 data[i++] = priv->adminq_cfg_device_resources_cnt; 350 data[i++] = priv->adminq_register_page_list_cnt; 351 data[i++] = priv->adminq_unregister_page_list_cnt; 352 data[i++] = priv->adminq_create_tx_queue_cnt; 353 data[i++] = priv->adminq_create_rx_queue_cnt; 354 data[i++] = priv->adminq_destroy_tx_queue_cnt; 355 data[i++] = priv->adminq_destroy_rx_queue_cnt; 356 data[i++] = priv->adminq_dcfg_device_resources_cnt; 357 data[i++] = priv->adminq_set_driver_parameter_cnt; 358 data[i++] = priv->adminq_report_stats_cnt; 359 data[i++] = priv->adminq_report_link_speed_cnt; 360 } 361 362 static void gve_get_channels(struct net_device *netdev, 363 struct ethtool_channels *cmd) 364 { 365 struct gve_priv *priv = netdev_priv(netdev); 366 367 cmd->max_rx = priv->rx_cfg.max_queues; 368 cmd->max_tx = priv->tx_cfg.max_queues; 369 cmd->max_other = 0; 370 cmd->max_combined = 0; 371 cmd->rx_count = priv->rx_cfg.num_queues; 372 cmd->tx_count = priv->tx_cfg.num_queues; 373 cmd->other_count = 0; 374 cmd->combined_count = 0; 375 } 376 377 static int gve_set_channels(struct net_device *netdev, 378 struct ethtool_channels *cmd) 379 { 380 struct gve_priv *priv = netdev_priv(netdev); 381 struct gve_queue_config new_tx_cfg = priv->tx_cfg; 382 struct gve_queue_config new_rx_cfg = priv->rx_cfg; 383 struct ethtool_channels old_settings; 384 int new_tx = cmd->tx_count; 385 int new_rx = cmd->rx_count; 386 387 gve_get_channels(netdev, &old_settings); 388 389 /* Changing combined is not allowed allowed */ 390 if (cmd->combined_count != old_settings.combined_count) 391 return -EINVAL; 392 393 if (!new_rx || !new_tx) 394 return -EINVAL; 395 396 if (!netif_carrier_ok(netdev)) { 397 priv->tx_cfg.num_queues = new_tx; 398 priv->rx_cfg.num_queues = new_rx; 399 return 0; 400 } 401 402 new_tx_cfg.num_queues = new_tx; 403 new_rx_cfg.num_queues = new_rx; 404 405 return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg); 406 } 407 408 static void gve_get_ringparam(struct net_device *netdev, 409 struct ethtool_ringparam *cmd) 410 { 411 struct gve_priv *priv = netdev_priv(netdev); 412 413 cmd->rx_max_pending = priv->rx_desc_cnt; 414 cmd->tx_max_pending = priv->tx_desc_cnt; 415 cmd->rx_pending = priv->rx_desc_cnt; 416 cmd->tx_pending = priv->tx_desc_cnt; 417 } 418 419 static int gve_user_reset(struct net_device *netdev, u32 *flags) 420 { 421 struct gve_priv *priv = netdev_priv(netdev); 422 423 if (*flags == ETH_RESET_ALL) { 424 *flags = 0; 425 return gve_reset(priv, true); 426 } 427 428 return -EOPNOTSUPP; 429 } 430 431 static int gve_get_tunable(struct net_device *netdev, 432 const struct ethtool_tunable *etuna, void *value) 433 { 434 struct gve_priv *priv = netdev_priv(netdev); 435 436 switch (etuna->id) { 437 case ETHTOOL_RX_COPYBREAK: 438 *(u32 *)value = priv->rx_copybreak; 439 return 0; 440 default: 441 return -EOPNOTSUPP; 442 } 443 } 444 445 static int gve_set_tunable(struct net_device *netdev, 446 const struct ethtool_tunable *etuna, 447 const void *value) 448 { 449 struct gve_priv *priv = netdev_priv(netdev); 450 u32 len; 451 452 switch (etuna->id) { 453 case ETHTOOL_RX_COPYBREAK: 454 len = *(u32 *)value; 455 if (len > PAGE_SIZE / 2) 456 return -EINVAL; 457 priv->rx_copybreak = len; 458 return 0; 459 default: 460 return -EOPNOTSUPP; 461 } 462 } 463 464 static u32 gve_get_priv_flags(struct net_device *netdev) 465 { 466 struct gve_priv *priv = netdev_priv(netdev); 467 u32 ret_flags = 0; 468 469 /* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */ 470 if (priv->ethtool_flags & BIT(0)) 471 ret_flags |= BIT(0); 472 return ret_flags; 473 } 474 475 static int gve_set_priv_flags(struct net_device *netdev, u32 flags) 476 { 477 struct gve_priv *priv = netdev_priv(netdev); 478 u64 ori_flags, new_flags; 479 480 ori_flags = READ_ONCE(priv->ethtool_flags); 481 new_flags = ori_flags; 482 483 /* Only one priv flag exists: report-stats (BIT(0))*/ 484 if (flags & BIT(0)) 485 new_flags |= BIT(0); 486 else 487 new_flags &= ~(BIT(0)); 488 priv->ethtool_flags = new_flags; 489 /* start report-stats timer when user turns report stats on. */ 490 if (flags & BIT(0)) { 491 mod_timer(&priv->stats_report_timer, 492 round_jiffies(jiffies + 493 msecs_to_jiffies(priv->stats_report_timer_period))); 494 } 495 /* Zero off gve stats when report-stats turned off and */ 496 /* delete report stats timer. */ 497 if (!(flags & BIT(0)) && (ori_flags & BIT(0))) { 498 int tx_stats_num = GVE_TX_STATS_REPORT_NUM * 499 priv->tx_cfg.num_queues; 500 int rx_stats_num = GVE_RX_STATS_REPORT_NUM * 501 priv->rx_cfg.num_queues; 502 503 memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) * 504 sizeof(struct stats)); 505 del_timer_sync(&priv->stats_report_timer); 506 } 507 return 0; 508 } 509 510 static int gve_get_link_ksettings(struct net_device *netdev, 511 struct ethtool_link_ksettings *cmd) 512 { 513 struct gve_priv *priv = netdev_priv(netdev); 514 int err = gve_adminq_report_link_speed(priv); 515 516 cmd->base.speed = priv->link_speed; 517 return err; 518 } 519 520 const struct ethtool_ops gve_ethtool_ops = { 521 .get_drvinfo = gve_get_drvinfo, 522 .get_strings = gve_get_strings, 523 .get_sset_count = gve_get_sset_count, 524 .get_ethtool_stats = gve_get_ethtool_stats, 525 .set_msglevel = gve_set_msglevel, 526 .get_msglevel = gve_get_msglevel, 527 .set_channels = gve_set_channels, 528 .get_channels = gve_get_channels, 529 .get_link = ethtool_op_get_link, 530 .get_ringparam = gve_get_ringparam, 531 .reset = gve_user_reset, 532 .get_tunable = gve_get_tunable, 533 .set_tunable = gve_set_tunable, 534 .get_priv_flags = gve_get_priv_flags, 535 .set_priv_flags = gve_set_priv_flags, 536 .get_link_ksettings = gve_get_link_ksettings 537 }; 538