1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 /* Google virtual Ethernet (gve) driver 3 * 4 * Copyright (C) 2015-2024 Google LLC 5 */ 6 7 #include <linux/rtnetlink.h> 8 #include "gve.h" 9 #include "gve_adminq.h" 10 #include "gve_dqo.h" 11 #include "gve_utils.h" 12 13 static void gve_get_drvinfo(struct net_device *netdev, 14 struct ethtool_drvinfo *info) 15 { 16 struct gve_priv *priv = netdev_priv(netdev); 17 18 strscpy(info->driver, gve_driver_name, sizeof(info->driver)); 19 strscpy(info->version, gve_version_str, sizeof(info->version)); 20 strscpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info)); 21 } 22 23 static void gve_set_msglevel(struct net_device *netdev, u32 value) 24 { 25 struct gve_priv *priv = netdev_priv(netdev); 26 27 priv->msg_enable = value; 28 } 29 30 static u32 gve_get_msglevel(struct net_device *netdev) 31 { 32 struct gve_priv *priv = netdev_priv(netdev); 33 34 return priv->msg_enable; 35 } 36 37 /* For the following stats column string names, make sure the order 38 * matches how it is filled in the code. For xdp_aborted, xdp_drop, 39 * xdp_pass, xdp_tx, xdp_redirect, make sure it also matches the order 40 * as declared in enum xdp_action inside file uapi/linux/bpf.h . 41 */ 42 static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = { 43 "rx_packets", "rx_hsplit_pkt", "tx_packets", "rx_bytes", 44 "tx_bytes", "rx_dropped", "tx_dropped", "tx_timeouts", 45 "rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt", 46 "rx_hsplit_unsplit_pkt", 47 "interface_up_cnt", "interface_down_cnt", "reset_cnt", 48 "page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt", 49 }; 50 51 static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = { 52 "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", 53 "rx_bytes[%u]", "rx_hsplit_bytes[%u]", "rx_cont_packet_cnt[%u]", 54 "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]", "rx_frag_alloc_cnt[%u]", 55 "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]", 56 "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]", 57 "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]", 58 "rx_xdp_aborted[%u]", "rx_xdp_drop[%u]", "rx_xdp_pass[%u]", 59 "rx_xdp_tx[%u]", "rx_xdp_redirect[%u]", 60 "rx_xdp_tx_errors[%u]", "rx_xdp_redirect_errors[%u]", "rx_xdp_alloc_fails[%u]", 61 }; 62 63 static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = { 64 "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]", 65 "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]", 66 "tx_dma_mapping_error[%u]", "tx_xsk_wakeup[%u]", 67 "tx_xsk_done[%u]", "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]" 68 }; 69 70 static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = { 71 "adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts", 72 "adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt", 73 "adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt", 74 "adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt", 75 "adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt", 76 "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt", 77 "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt", 78 "adminq_query_flow_rules", "adminq_cfg_flow_rule", 79 }; 80 81 static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = { 82 "report-stats", 83 }; 84 85 #define GVE_MAIN_STATS_LEN ARRAY_SIZE(gve_gstrings_main_stats) 86 #define GVE_ADMINQ_STATS_LEN ARRAY_SIZE(gve_gstrings_adminq_stats) 87 #define NUM_GVE_TX_CNTS ARRAY_SIZE(gve_gstrings_tx_stats) 88 #define NUM_GVE_RX_CNTS ARRAY_SIZE(gve_gstrings_rx_stats) 89 #define GVE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(gve_gstrings_priv_flags) 90 91 static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 92 { 93 struct gve_priv *priv = netdev_priv(netdev); 94 u8 *s = (char *)data; 95 int num_tx_queues; 96 int i, j; 97 98 num_tx_queues = gve_num_tx_queues(priv); 99 switch (stringset) { 100 case ETH_SS_STATS: 101 for (i = 0; i < ARRAY_SIZE(gve_gstrings_main_stats); i++) 102 ethtool_puts(&s, gve_gstrings_main_stats[i]); 103 104 for (i = 0; i < priv->rx_cfg.num_queues; i++) 105 for (j = 0; j < NUM_GVE_RX_CNTS; j++) 106 ethtool_sprintf(&s, gve_gstrings_rx_stats[j], 107 i); 108 109 for (i = 0; i < num_tx_queues; i++) 110 for (j = 0; j < NUM_GVE_TX_CNTS; j++) 111 ethtool_sprintf(&s, gve_gstrings_tx_stats[j], 112 i); 113 114 for (i = 0; i < ARRAY_SIZE(gve_gstrings_adminq_stats); i++) 115 ethtool_puts(&s, gve_gstrings_adminq_stats[i]); 116 117 break; 118 119 case ETH_SS_PRIV_FLAGS: 120 for (i = 0; i < ARRAY_SIZE(gve_gstrings_priv_flags); i++) 121 ethtool_puts(&s, gve_gstrings_priv_flags[i]); 122 break; 123 124 default: 125 break; 126 } 127 } 128 129 static int gve_get_sset_count(struct net_device *netdev, int sset) 130 { 131 struct gve_priv *priv = netdev_priv(netdev); 132 int num_tx_queues; 133 134 num_tx_queues = gve_num_tx_queues(priv); 135 switch (sset) { 136 case ETH_SS_STATS: 137 return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN + 138 (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) + 139 (num_tx_queues * NUM_GVE_TX_CNTS); 140 case ETH_SS_PRIV_FLAGS: 141 return GVE_PRIV_FLAGS_STR_LEN; 142 default: 143 return -EOPNOTSUPP; 144 } 145 } 146 147 static void 148 gve_get_ethtool_stats(struct net_device *netdev, 149 struct ethtool_stats *stats, u64 *data) 150 { 151 u64 tmp_rx_pkts, tmp_rx_hsplit_pkt, tmp_rx_bytes, tmp_rx_hsplit_bytes, 152 tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail, 153 tmp_rx_desc_err_dropped_pkt, tmp_rx_hsplit_unsplit_pkt, 154 tmp_tx_pkts, tmp_tx_bytes; 155 u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_hsplit_unsplit_pkt, 156 rx_pkts, rx_hsplit_pkt, rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, 157 tx_dropped; 158 int stats_idx, base_stats_idx, max_stats_idx; 159 struct stats *report_stats; 160 int *rx_qid_to_stats_idx; 161 int *tx_qid_to_stats_idx; 162 int num_stopped_rxqs = 0; 163 int num_stopped_txqs = 0; 164 struct gve_priv *priv; 165 bool skip_nic_stats; 166 unsigned int start; 167 int num_tx_queues; 168 int ring; 169 int i, j; 170 171 ASSERT_RTNL(); 172 173 priv = netdev_priv(netdev); 174 num_tx_queues = gve_num_tx_queues(priv); 175 report_stats = priv->stats_report->stats; 176 rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues, 177 sizeof(int), GFP_KERNEL); 178 if (!rx_qid_to_stats_idx) 179 return; 180 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { 181 rx_qid_to_stats_idx[ring] = -1; 182 if (!gve_rx_was_added_to_block(priv, ring)) 183 num_stopped_rxqs++; 184 } 185 tx_qid_to_stats_idx = kmalloc_array(num_tx_queues, 186 sizeof(int), GFP_KERNEL); 187 if (!tx_qid_to_stats_idx) { 188 kfree(rx_qid_to_stats_idx); 189 return; 190 } 191 for (ring = 0; ring < num_tx_queues; ring++) { 192 tx_qid_to_stats_idx[ring] = -1; 193 if (!gve_tx_was_added_to_block(priv, ring)) 194 num_stopped_txqs++; 195 } 196 197 for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0, 198 rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0, 199 rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0, 200 ring = 0; 201 ring < priv->rx_cfg.num_queues; ring++) { 202 if (priv->rx) { 203 do { 204 struct gve_rx_ring *rx = &priv->rx[ring]; 205 206 start = 207 u64_stats_fetch_begin(&priv->rx[ring].statss); 208 tmp_rx_pkts = rx->rpackets; 209 tmp_rx_hsplit_pkt = rx->rx_hsplit_pkt; 210 tmp_rx_bytes = rx->rbytes; 211 tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; 212 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; 213 tmp_rx_desc_err_dropped_pkt = 214 rx->rx_desc_err_dropped_pkt; 215 tmp_rx_hsplit_unsplit_pkt = 216 rx->rx_hsplit_unsplit_pkt; 217 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 218 start)); 219 rx_pkts += tmp_rx_pkts; 220 rx_hsplit_pkt += tmp_rx_hsplit_pkt; 221 rx_bytes += tmp_rx_bytes; 222 rx_skb_alloc_fail += tmp_rx_skb_alloc_fail; 223 rx_buf_alloc_fail += tmp_rx_buf_alloc_fail; 224 rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt; 225 rx_hsplit_unsplit_pkt += tmp_rx_hsplit_unsplit_pkt; 226 } 227 } 228 for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0; 229 ring < num_tx_queues; ring++) { 230 if (priv->tx) { 231 do { 232 start = 233 u64_stats_fetch_begin(&priv->tx[ring].statss); 234 tmp_tx_pkts = priv->tx[ring].pkt_done; 235 tmp_tx_bytes = priv->tx[ring].bytes_done; 236 } while (u64_stats_fetch_retry(&priv->tx[ring].statss, 237 start)); 238 tx_pkts += tmp_tx_pkts; 239 tx_bytes += tmp_tx_bytes; 240 tx_dropped += priv->tx[ring].dropped_pkt; 241 } 242 } 243 244 i = 0; 245 data[i++] = rx_pkts; 246 data[i++] = rx_hsplit_pkt; 247 data[i++] = tx_pkts; 248 data[i++] = rx_bytes; 249 data[i++] = tx_bytes; 250 /* total rx dropped packets */ 251 data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail + 252 rx_desc_err_dropped_pkt; 253 data[i++] = tx_dropped; 254 data[i++] = priv->tx_timeo_cnt; 255 data[i++] = rx_skb_alloc_fail; 256 data[i++] = rx_buf_alloc_fail; 257 data[i++] = rx_desc_err_dropped_pkt; 258 data[i++] = rx_hsplit_unsplit_pkt; 259 data[i++] = priv->interface_up_cnt; 260 data[i++] = priv->interface_down_cnt; 261 data[i++] = priv->reset_cnt; 262 data[i++] = priv->page_alloc_fail; 263 data[i++] = priv->dma_mapping_error; 264 data[i++] = priv->stats_report_trigger_cnt; 265 i = GVE_MAIN_STATS_LEN; 266 267 /* For rx cross-reporting stats, start from nic rx stats in report */ 268 base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues + 269 GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues; 270 /* The boundary between driver stats and NIC stats shifts if there are 271 * stopped queues. 272 */ 273 base_stats_idx += NIC_RX_STATS_REPORT_NUM * num_stopped_rxqs + 274 NIC_TX_STATS_REPORT_NUM * num_stopped_txqs; 275 max_stats_idx = NIC_RX_STATS_REPORT_NUM * 276 (priv->rx_cfg.num_queues - num_stopped_rxqs) + 277 base_stats_idx; 278 /* Preprocess the stats report for rx, map queue id to start index */ 279 skip_nic_stats = false; 280 for (stats_idx = base_stats_idx; stats_idx < max_stats_idx; 281 stats_idx += NIC_RX_STATS_REPORT_NUM) { 282 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name); 283 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id); 284 285 if (stat_name == 0) { 286 /* no stats written by NIC yet */ 287 skip_nic_stats = true; 288 break; 289 } 290 if (queue_id < 0 || queue_id >= priv->rx_cfg.num_queues) { 291 net_err_ratelimited("Invalid rxq id in NIC stats\n"); 292 continue; 293 } 294 rx_qid_to_stats_idx[queue_id] = stats_idx; 295 } 296 /* walk RX rings */ 297 if (priv->rx) { 298 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { 299 struct gve_rx_ring *rx = &priv->rx[ring]; 300 301 data[i++] = rx->fill_cnt; 302 data[i++] = rx->cnt; 303 data[i++] = rx->fill_cnt - rx->cnt; 304 do { 305 start = 306 u64_stats_fetch_begin(&priv->rx[ring].statss); 307 tmp_rx_bytes = rx->rbytes; 308 tmp_rx_hsplit_bytes = rx->rx_hsplit_bytes; 309 tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; 310 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; 311 tmp_rx_desc_err_dropped_pkt = 312 rx->rx_desc_err_dropped_pkt; 313 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 314 start)); 315 data[i++] = tmp_rx_bytes; 316 data[i++] = tmp_rx_hsplit_bytes; 317 data[i++] = rx->rx_cont_packet_cnt; 318 data[i++] = rx->rx_frag_flip_cnt; 319 data[i++] = rx->rx_frag_copy_cnt; 320 data[i++] = rx->rx_frag_alloc_cnt; 321 /* rx dropped packets */ 322 data[i++] = tmp_rx_skb_alloc_fail + 323 tmp_rx_buf_alloc_fail + 324 tmp_rx_desc_err_dropped_pkt; 325 data[i++] = rx->rx_copybreak_pkt; 326 data[i++] = rx->rx_copied_pkt; 327 /* stats from NIC */ 328 stats_idx = rx_qid_to_stats_idx[ring]; 329 if (skip_nic_stats || stats_idx < 0) { 330 /* skip NIC rx stats */ 331 i += NIC_RX_STATS_REPORT_NUM; 332 } else { 333 for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) { 334 u64 value = 335 be64_to_cpu(report_stats[stats_idx + j].value); 336 337 data[i++] = value; 338 } 339 } 340 /* XDP rx counters */ 341 do { 342 start = u64_stats_fetch_begin(&priv->rx[ring].statss); 343 for (j = 0; j < GVE_XDP_ACTIONS; j++) 344 data[i + j] = rx->xdp_actions[j]; 345 data[i + j++] = rx->xdp_tx_errors; 346 data[i + j++] = rx->xdp_redirect_errors; 347 data[i + j++] = rx->xdp_alloc_fails; 348 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 349 start)); 350 i += GVE_XDP_ACTIONS + 3; /* XDP rx counters */ 351 } 352 } else { 353 i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS; 354 } 355 356 /* For tx cross-reporting stats, start from nic tx stats in report */ 357 base_stats_idx = max_stats_idx; 358 max_stats_idx = NIC_TX_STATS_REPORT_NUM * 359 (num_tx_queues - num_stopped_txqs) + 360 max_stats_idx; 361 /* Preprocess the stats report for tx, map queue id to start index */ 362 skip_nic_stats = false; 363 for (stats_idx = base_stats_idx; stats_idx < max_stats_idx; 364 stats_idx += NIC_TX_STATS_REPORT_NUM) { 365 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name); 366 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id); 367 368 if (stat_name == 0) { 369 /* no stats written by NIC yet */ 370 skip_nic_stats = true; 371 break; 372 } 373 if (queue_id < 0 || queue_id >= num_tx_queues) { 374 net_err_ratelimited("Invalid txq id in NIC stats\n"); 375 continue; 376 } 377 tx_qid_to_stats_idx[queue_id] = stats_idx; 378 } 379 /* walk TX rings */ 380 if (priv->tx) { 381 for (ring = 0; ring < num_tx_queues; ring++) { 382 struct gve_tx_ring *tx = &priv->tx[ring]; 383 384 if (gve_is_gqi(priv)) { 385 data[i++] = tx->req; 386 data[i++] = tx->done; 387 data[i++] = tx->req - tx->done; 388 } else { 389 /* DQO doesn't currently support 390 * posted/completed descriptor counts; 391 */ 392 data[i++] = 0; 393 data[i++] = 0; 394 data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head; 395 } 396 do { 397 start = 398 u64_stats_fetch_begin(&priv->tx[ring].statss); 399 tmp_tx_bytes = tx->bytes_done; 400 } while (u64_stats_fetch_retry(&priv->tx[ring].statss, 401 start)); 402 data[i++] = tmp_tx_bytes; 403 data[i++] = tx->wake_queue; 404 data[i++] = tx->stop_queue; 405 data[i++] = gve_tx_load_event_counter(priv, tx); 406 data[i++] = tx->dma_mapping_error; 407 /* stats from NIC */ 408 stats_idx = tx_qid_to_stats_idx[ring]; 409 if (skip_nic_stats || stats_idx < 0) { 410 /* skip NIC tx stats */ 411 i += NIC_TX_STATS_REPORT_NUM; 412 } else { 413 for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) { 414 u64 value = 415 be64_to_cpu(report_stats[stats_idx + j].value); 416 data[i++] = value; 417 } 418 } 419 /* XDP xsk counters */ 420 data[i++] = tx->xdp_xsk_wakeup; 421 data[i++] = tx->xdp_xsk_done; 422 do { 423 start = u64_stats_fetch_begin(&priv->tx[ring].statss); 424 data[i] = tx->xdp_xsk_sent; 425 data[i + 1] = tx->xdp_xmit; 426 data[i + 2] = tx->xdp_xmit_errors; 427 } while (u64_stats_fetch_retry(&priv->tx[ring].statss, 428 start)); 429 i += 3; /* XDP tx counters */ 430 } 431 } else { 432 i += num_tx_queues * NUM_GVE_TX_CNTS; 433 } 434 435 kfree(rx_qid_to_stats_idx); 436 kfree(tx_qid_to_stats_idx); 437 /* AQ Stats */ 438 data[i++] = priv->adminq_prod_cnt; 439 data[i++] = priv->adminq_cmd_fail; 440 data[i++] = priv->adminq_timeouts; 441 data[i++] = priv->adminq_describe_device_cnt; 442 data[i++] = priv->adminq_cfg_device_resources_cnt; 443 data[i++] = priv->adminq_register_page_list_cnt; 444 data[i++] = priv->adminq_unregister_page_list_cnt; 445 data[i++] = priv->adminq_create_tx_queue_cnt; 446 data[i++] = priv->adminq_create_rx_queue_cnt; 447 data[i++] = priv->adminq_destroy_tx_queue_cnt; 448 data[i++] = priv->adminq_destroy_rx_queue_cnt; 449 data[i++] = priv->adminq_dcfg_device_resources_cnt; 450 data[i++] = priv->adminq_set_driver_parameter_cnt; 451 data[i++] = priv->adminq_report_stats_cnt; 452 data[i++] = priv->adminq_report_link_speed_cnt; 453 data[i++] = priv->adminq_get_ptype_map_cnt; 454 data[i++] = priv->adminq_query_flow_rules_cnt; 455 data[i++] = priv->adminq_cfg_flow_rule_cnt; 456 } 457 458 static void gve_get_channels(struct net_device *netdev, 459 struct ethtool_channels *cmd) 460 { 461 struct gve_priv *priv = netdev_priv(netdev); 462 463 cmd->max_rx = priv->rx_cfg.max_queues; 464 cmd->max_tx = priv->tx_cfg.max_queues; 465 cmd->max_other = 0; 466 cmd->max_combined = 0; 467 cmd->rx_count = priv->rx_cfg.num_queues; 468 cmd->tx_count = priv->tx_cfg.num_queues; 469 cmd->other_count = 0; 470 cmd->combined_count = 0; 471 } 472 473 static int gve_set_channels(struct net_device *netdev, 474 struct ethtool_channels *cmd) 475 { 476 struct gve_priv *priv = netdev_priv(netdev); 477 struct gve_queue_config new_tx_cfg = priv->tx_cfg; 478 struct gve_queue_config new_rx_cfg = priv->rx_cfg; 479 struct ethtool_channels old_settings; 480 int new_tx = cmd->tx_count; 481 int new_rx = cmd->rx_count; 482 483 gve_get_channels(netdev, &old_settings); 484 485 /* Changing combined is not allowed */ 486 if (cmd->combined_count != old_settings.combined_count) 487 return -EINVAL; 488 489 if (!new_rx || !new_tx) 490 return -EINVAL; 491 492 if (priv->num_xdp_queues && 493 (new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) { 494 dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues"); 495 return -EINVAL; 496 } 497 498 if (!netif_carrier_ok(netdev)) { 499 priv->tx_cfg.num_queues = new_tx; 500 priv->rx_cfg.num_queues = new_rx; 501 return 0; 502 } 503 504 new_tx_cfg.num_queues = new_tx; 505 new_rx_cfg.num_queues = new_rx; 506 507 return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg); 508 } 509 510 static void gve_get_ringparam(struct net_device *netdev, 511 struct ethtool_ringparam *cmd, 512 struct kernel_ethtool_ringparam *kernel_cmd, 513 struct netlink_ext_ack *extack) 514 { 515 struct gve_priv *priv = netdev_priv(netdev); 516 517 cmd->rx_max_pending = priv->max_rx_desc_cnt; 518 cmd->tx_max_pending = priv->max_tx_desc_cnt; 519 cmd->rx_pending = priv->rx_desc_cnt; 520 cmd->tx_pending = priv->tx_desc_cnt; 521 522 if (!gve_header_split_supported(priv)) 523 kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN; 524 else if (priv->header_split_enabled) 525 kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED; 526 else 527 kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED; 528 } 529 530 static int gve_adjust_ring_sizes(struct gve_priv *priv, 531 u16 new_tx_desc_cnt, 532 u16 new_rx_desc_cnt) 533 { 534 struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; 535 struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; 536 int err; 537 538 /* get current queue configuration */ 539 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); 540 541 /* copy over the new ring_size from ethtool */ 542 tx_alloc_cfg.ring_size = new_tx_desc_cnt; 543 rx_alloc_cfg.ring_size = new_rx_desc_cnt; 544 545 if (netif_running(priv->dev)) { 546 err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); 547 if (err) 548 return err; 549 } 550 551 /* Set new ring_size for the next up */ 552 priv->tx_desc_cnt = new_tx_desc_cnt; 553 priv->rx_desc_cnt = new_rx_desc_cnt; 554 555 return 0; 556 } 557 558 static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt, 559 u16 new_rx_desc_cnt) 560 { 561 /* check for valid range */ 562 if (new_tx_desc_cnt < priv->min_tx_desc_cnt || 563 new_tx_desc_cnt > priv->max_tx_desc_cnt || 564 new_rx_desc_cnt < priv->min_rx_desc_cnt || 565 new_rx_desc_cnt > priv->max_rx_desc_cnt) { 566 dev_err(&priv->pdev->dev, "Requested descriptor count out of range\n"); 567 return -EINVAL; 568 } 569 570 if (!is_power_of_2(new_tx_desc_cnt) || !is_power_of_2(new_rx_desc_cnt)) { 571 dev_err(&priv->pdev->dev, "Requested descriptor count has to be a power of 2\n"); 572 return -EINVAL; 573 } 574 return 0; 575 } 576 577 static int gve_set_ringparam(struct net_device *netdev, 578 struct ethtool_ringparam *cmd, 579 struct kernel_ethtool_ringparam *kernel_cmd, 580 struct netlink_ext_ack *extack) 581 { 582 struct gve_priv *priv = netdev_priv(netdev); 583 u16 new_tx_cnt, new_rx_cnt; 584 int err; 585 586 err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split); 587 if (err) 588 return err; 589 590 if (cmd->tx_pending == priv->tx_desc_cnt && cmd->rx_pending == priv->rx_desc_cnt) 591 return 0; 592 593 if (!priv->modify_ring_size_enabled) { 594 dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n"); 595 return -EOPNOTSUPP; 596 } 597 598 new_tx_cnt = cmd->tx_pending; 599 new_rx_cnt = cmd->rx_pending; 600 601 if (gve_validate_req_ring_size(priv, new_tx_cnt, new_rx_cnt)) 602 return -EINVAL; 603 604 return gve_adjust_ring_sizes(priv, new_tx_cnt, new_rx_cnt); 605 } 606 607 static int gve_user_reset(struct net_device *netdev, u32 *flags) 608 { 609 struct gve_priv *priv = netdev_priv(netdev); 610 611 if (*flags == ETH_RESET_ALL) { 612 *flags = 0; 613 return gve_reset(priv, true); 614 } 615 616 return -EOPNOTSUPP; 617 } 618 619 static int gve_get_tunable(struct net_device *netdev, 620 const struct ethtool_tunable *etuna, void *value) 621 { 622 struct gve_priv *priv = netdev_priv(netdev); 623 624 switch (etuna->id) { 625 case ETHTOOL_RX_COPYBREAK: 626 *(u32 *)value = priv->rx_copybreak; 627 return 0; 628 default: 629 return -EOPNOTSUPP; 630 } 631 } 632 633 static int gve_set_tunable(struct net_device *netdev, 634 const struct ethtool_tunable *etuna, 635 const void *value) 636 { 637 struct gve_priv *priv = netdev_priv(netdev); 638 u32 len; 639 640 switch (etuna->id) { 641 case ETHTOOL_RX_COPYBREAK: 642 { 643 u32 max_copybreak = gve_is_gqi(priv) ? 644 GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo; 645 646 len = *(u32 *)value; 647 if (len > max_copybreak) 648 return -EINVAL; 649 priv->rx_copybreak = len; 650 return 0; 651 } 652 default: 653 return -EOPNOTSUPP; 654 } 655 } 656 657 static u32 gve_get_priv_flags(struct net_device *netdev) 658 { 659 struct gve_priv *priv = netdev_priv(netdev); 660 u32 ret_flags = 0; 661 662 /* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */ 663 if (priv->ethtool_flags & BIT(0)) 664 ret_flags |= BIT(0); 665 return ret_flags; 666 } 667 668 static int gve_set_priv_flags(struct net_device *netdev, u32 flags) 669 { 670 struct gve_priv *priv = netdev_priv(netdev); 671 u64 ori_flags, new_flags; 672 int num_tx_queues; 673 674 num_tx_queues = gve_num_tx_queues(priv); 675 ori_flags = READ_ONCE(priv->ethtool_flags); 676 new_flags = ori_flags; 677 678 /* Only one priv flag exists: report-stats (BIT(0))*/ 679 if (flags & BIT(0)) 680 new_flags |= BIT(0); 681 else 682 new_flags &= ~(BIT(0)); 683 priv->ethtool_flags = new_flags; 684 /* start report-stats timer when user turns report stats on. */ 685 if (flags & BIT(0)) { 686 mod_timer(&priv->stats_report_timer, 687 round_jiffies(jiffies + 688 msecs_to_jiffies(priv->stats_report_timer_period))); 689 } 690 /* Zero off gve stats when report-stats turned off and */ 691 /* delete report stats timer. */ 692 if (!(flags & BIT(0)) && (ori_flags & BIT(0))) { 693 int tx_stats_num = GVE_TX_STATS_REPORT_NUM * 694 num_tx_queues; 695 int rx_stats_num = GVE_RX_STATS_REPORT_NUM * 696 priv->rx_cfg.num_queues; 697 698 memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) * 699 sizeof(struct stats)); 700 del_timer_sync(&priv->stats_report_timer); 701 } 702 return 0; 703 } 704 705 static int gve_get_link_ksettings(struct net_device *netdev, 706 struct ethtool_link_ksettings *cmd) 707 { 708 struct gve_priv *priv = netdev_priv(netdev); 709 int err = 0; 710 711 if (priv->link_speed == 0) 712 err = gve_adminq_report_link_speed(priv); 713 714 cmd->base.speed = priv->link_speed; 715 716 cmd->base.duplex = DUPLEX_FULL; 717 718 return err; 719 } 720 721 static int gve_get_coalesce(struct net_device *netdev, 722 struct ethtool_coalesce *ec, 723 struct kernel_ethtool_coalesce *kernel_ec, 724 struct netlink_ext_ack *extack) 725 { 726 struct gve_priv *priv = netdev_priv(netdev); 727 728 if (gve_is_gqi(priv)) 729 return -EOPNOTSUPP; 730 ec->tx_coalesce_usecs = priv->tx_coalesce_usecs; 731 ec->rx_coalesce_usecs = priv->rx_coalesce_usecs; 732 733 return 0; 734 } 735 736 static int gve_set_coalesce(struct net_device *netdev, 737 struct ethtool_coalesce *ec, 738 struct kernel_ethtool_coalesce *kernel_ec, 739 struct netlink_ext_ack *extack) 740 { 741 struct gve_priv *priv = netdev_priv(netdev); 742 u32 tx_usecs_orig = priv->tx_coalesce_usecs; 743 u32 rx_usecs_orig = priv->rx_coalesce_usecs; 744 int idx; 745 746 if (gve_is_gqi(priv)) 747 return -EOPNOTSUPP; 748 749 if (ec->tx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO || 750 ec->rx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO) 751 return -EINVAL; 752 priv->tx_coalesce_usecs = ec->tx_coalesce_usecs; 753 priv->rx_coalesce_usecs = ec->rx_coalesce_usecs; 754 755 if (tx_usecs_orig != priv->tx_coalesce_usecs) { 756 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { 757 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); 758 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; 759 760 gve_set_itr_coalesce_usecs_dqo(priv, block, 761 priv->tx_coalesce_usecs); 762 } 763 } 764 765 if (rx_usecs_orig != priv->rx_coalesce_usecs) { 766 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { 767 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); 768 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; 769 770 gve_set_itr_coalesce_usecs_dqo(priv, block, 771 priv->rx_coalesce_usecs); 772 } 773 } 774 775 return 0; 776 } 777 778 static int gve_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 779 { 780 struct gve_priv *priv = netdev_priv(netdev); 781 int err = 0; 782 783 if (!(netdev->features & NETIF_F_NTUPLE)) 784 return -EOPNOTSUPP; 785 786 switch (cmd->cmd) { 787 case ETHTOOL_SRXCLSRLINS: 788 err = gve_add_flow_rule(priv, cmd); 789 break; 790 case ETHTOOL_SRXCLSRLDEL: 791 err = gve_del_flow_rule(priv, cmd); 792 break; 793 case ETHTOOL_SRXFH: 794 err = -EOPNOTSUPP; 795 break; 796 default: 797 err = -EOPNOTSUPP; 798 break; 799 } 800 801 return err; 802 } 803 804 static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) 805 { 806 struct gve_priv *priv = netdev_priv(netdev); 807 int err = 0; 808 809 switch (cmd->cmd) { 810 case ETHTOOL_GRXRINGS: 811 cmd->data = priv->rx_cfg.num_queues; 812 break; 813 case ETHTOOL_GRXCLSRLCNT: 814 if (!priv->max_flow_rules) 815 return -EOPNOTSUPP; 816 817 err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_STATS, 0); 818 if (err) 819 return err; 820 821 cmd->rule_cnt = priv->num_flow_rules; 822 cmd->data = priv->max_flow_rules; 823 break; 824 case ETHTOOL_GRXCLSRULE: 825 err = gve_get_flow_rule_entry(priv, cmd); 826 break; 827 case ETHTOOL_GRXCLSRLALL: 828 err = gve_get_flow_rule_ids(priv, cmd, (u32 *)rule_locs); 829 break; 830 case ETHTOOL_GRXFH: 831 err = -EOPNOTSUPP; 832 break; 833 default: 834 err = -EOPNOTSUPP; 835 break; 836 } 837 838 return err; 839 } 840 841 const struct ethtool_ops gve_ethtool_ops = { 842 .supported_coalesce_params = ETHTOOL_COALESCE_USECS, 843 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, 844 .get_drvinfo = gve_get_drvinfo, 845 .get_strings = gve_get_strings, 846 .get_sset_count = gve_get_sset_count, 847 .get_ethtool_stats = gve_get_ethtool_stats, 848 .set_msglevel = gve_set_msglevel, 849 .get_msglevel = gve_get_msglevel, 850 .set_channels = gve_set_channels, 851 .get_channels = gve_get_channels, 852 .set_rxnfc = gve_set_rxnfc, 853 .get_rxnfc = gve_get_rxnfc, 854 .get_link = ethtool_op_get_link, 855 .get_coalesce = gve_get_coalesce, 856 .set_coalesce = gve_set_coalesce, 857 .get_ringparam = gve_get_ringparam, 858 .set_ringparam = gve_set_ringparam, 859 .reset = gve_user_reset, 860 .get_tunable = gve_get_tunable, 861 .set_tunable = gve_set_tunable, 862 .get_priv_flags = gve_get_priv_flags, 863 .set_priv_flags = gve_set_priv_flags, 864 .get_link_ksettings = gve_get_link_ksettings, 865 .get_ts_info = ethtool_op_get_ts_info, 866 }; 867