1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 /* Google virtual Ethernet (gve) driver 3 * 4 * Copyright (C) 2015-2024 Google LLC 5 */ 6 7 #include <linux/rtnetlink.h> 8 #include "gve.h" 9 #include "gve_adminq.h" 10 #include "gve_dqo.h" 11 #include "gve_utils.h" 12 13 static void gve_get_drvinfo(struct net_device *netdev, 14 struct ethtool_drvinfo *info) 15 { 16 struct gve_priv *priv = netdev_priv(netdev); 17 18 strscpy(info->driver, gve_driver_name, sizeof(info->driver)); 19 strscpy(info->version, gve_version_str, sizeof(info->version)); 20 strscpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info)); 21 } 22 23 static void gve_set_msglevel(struct net_device *netdev, u32 value) 24 { 25 struct gve_priv *priv = netdev_priv(netdev); 26 27 priv->msg_enable = value; 28 } 29 30 static u32 gve_get_msglevel(struct net_device *netdev) 31 { 32 struct gve_priv *priv = netdev_priv(netdev); 33 34 return priv->msg_enable; 35 } 36 37 /* For the following stats column string names, make sure the order 38 * matches how it is filled in the code. For xdp_aborted, xdp_drop, 39 * xdp_pass, xdp_tx, xdp_redirect, make sure it also matches the order 40 * as declared in enum xdp_action inside file uapi/linux/bpf.h . 41 */ 42 static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = { 43 "rx_packets", "rx_hsplit_pkt", "tx_packets", "rx_bytes", 44 "tx_bytes", "rx_dropped", "tx_dropped", "tx_timeouts", 45 "rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt", 46 "rx_hsplit_unsplit_pkt", 47 "interface_up_cnt", "interface_down_cnt", "reset_cnt", 48 "page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt", 49 }; 50 51 static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = { 52 "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", 53 "rx_bytes[%u]", "rx_hsplit_bytes[%u]", "rx_cont_packet_cnt[%u]", 54 "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]", "rx_frag_alloc_cnt[%u]", 55 "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]", 56 "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]", 57 "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]", 58 "rx_xdp_aborted[%u]", "rx_xdp_drop[%u]", "rx_xdp_pass[%u]", 59 "rx_xdp_tx[%u]", "rx_xdp_redirect[%u]", 60 "rx_xdp_tx_errors[%u]", "rx_xdp_redirect_errors[%u]", "rx_xdp_alloc_fails[%u]", 61 }; 62 63 static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = { 64 "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]", 65 "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]", 66 "tx_dma_mapping_error[%u]", "tx_xsk_wakeup[%u]", 67 "tx_xsk_done[%u]", "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]" 68 }; 69 70 static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = { 71 "adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts", 72 "adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt", 73 "adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt", 74 "adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt", 75 "adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt", 76 "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt", 77 "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt", 78 "adminq_query_flow_rules", "adminq_cfg_flow_rule", "adminq_cfg_rss_cnt", 79 "adminq_query_rss_cnt", 80 }; 81 82 static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = { 83 "report-stats", 84 }; 85 86 #define GVE_MAIN_STATS_LEN ARRAY_SIZE(gve_gstrings_main_stats) 87 #define GVE_ADMINQ_STATS_LEN ARRAY_SIZE(gve_gstrings_adminq_stats) 88 #define NUM_GVE_TX_CNTS ARRAY_SIZE(gve_gstrings_tx_stats) 89 #define NUM_GVE_RX_CNTS ARRAY_SIZE(gve_gstrings_rx_stats) 90 #define GVE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(gve_gstrings_priv_flags) 91 92 static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 93 { 94 struct gve_priv *priv = netdev_priv(netdev); 95 u8 *s = (char *)data; 96 int num_tx_queues; 97 int i, j; 98 99 num_tx_queues = gve_num_tx_queues(priv); 100 switch (stringset) { 101 case ETH_SS_STATS: 102 for (i = 0; i < ARRAY_SIZE(gve_gstrings_main_stats); i++) 103 ethtool_puts(&s, gve_gstrings_main_stats[i]); 104 105 for (i = 0; i < priv->rx_cfg.num_queues; i++) 106 for (j = 0; j < NUM_GVE_RX_CNTS; j++) 107 ethtool_sprintf(&s, gve_gstrings_rx_stats[j], 108 i); 109 110 for (i = 0; i < num_tx_queues; i++) 111 for (j = 0; j < NUM_GVE_TX_CNTS; j++) 112 ethtool_sprintf(&s, gve_gstrings_tx_stats[j], 113 i); 114 115 for (i = 0; i < ARRAY_SIZE(gve_gstrings_adminq_stats); i++) 116 ethtool_puts(&s, gve_gstrings_adminq_stats[i]); 117 118 break; 119 120 case ETH_SS_PRIV_FLAGS: 121 for (i = 0; i < ARRAY_SIZE(gve_gstrings_priv_flags); i++) 122 ethtool_puts(&s, gve_gstrings_priv_flags[i]); 123 break; 124 125 default: 126 break; 127 } 128 } 129 130 static int gve_get_sset_count(struct net_device *netdev, int sset) 131 { 132 struct gve_priv *priv = netdev_priv(netdev); 133 int num_tx_queues; 134 135 num_tx_queues = gve_num_tx_queues(priv); 136 switch (sset) { 137 case ETH_SS_STATS: 138 return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN + 139 (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) + 140 (num_tx_queues * NUM_GVE_TX_CNTS); 141 case ETH_SS_PRIV_FLAGS: 142 return GVE_PRIV_FLAGS_STR_LEN; 143 default: 144 return -EOPNOTSUPP; 145 } 146 } 147 148 static void 149 gve_get_ethtool_stats(struct net_device *netdev, 150 struct ethtool_stats *stats, u64 *data) 151 { 152 u64 tmp_rx_pkts, tmp_rx_hsplit_pkt, tmp_rx_bytes, tmp_rx_hsplit_bytes, 153 tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail, 154 tmp_rx_desc_err_dropped_pkt, tmp_rx_hsplit_unsplit_pkt, 155 tmp_tx_pkts, tmp_tx_bytes; 156 u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_hsplit_unsplit_pkt, 157 rx_pkts, rx_hsplit_pkt, rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, 158 tx_dropped; 159 int stats_idx, base_stats_idx, max_stats_idx; 160 struct stats *report_stats; 161 int *rx_qid_to_stats_idx; 162 int *tx_qid_to_stats_idx; 163 int num_stopped_rxqs = 0; 164 int num_stopped_txqs = 0; 165 struct gve_priv *priv; 166 bool skip_nic_stats; 167 unsigned int start; 168 int num_tx_queues; 169 int ring; 170 int i, j; 171 172 ASSERT_RTNL(); 173 174 priv = netdev_priv(netdev); 175 num_tx_queues = gve_num_tx_queues(priv); 176 report_stats = priv->stats_report->stats; 177 rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues, 178 sizeof(int), GFP_KERNEL); 179 if (!rx_qid_to_stats_idx) 180 return; 181 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { 182 rx_qid_to_stats_idx[ring] = -1; 183 if (!gve_rx_was_added_to_block(priv, ring)) 184 num_stopped_rxqs++; 185 } 186 tx_qid_to_stats_idx = kmalloc_array(num_tx_queues, 187 sizeof(int), GFP_KERNEL); 188 if (!tx_qid_to_stats_idx) { 189 kfree(rx_qid_to_stats_idx); 190 return; 191 } 192 for (ring = 0; ring < num_tx_queues; ring++) { 193 tx_qid_to_stats_idx[ring] = -1; 194 if (!gve_tx_was_added_to_block(priv, ring)) 195 num_stopped_txqs++; 196 } 197 198 for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0, 199 rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0, 200 rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0, 201 ring = 0; 202 ring < priv->rx_cfg.num_queues; ring++) { 203 if (priv->rx) { 204 do { 205 struct gve_rx_ring *rx = &priv->rx[ring]; 206 207 start = 208 u64_stats_fetch_begin(&priv->rx[ring].statss); 209 tmp_rx_pkts = rx->rpackets; 210 tmp_rx_hsplit_pkt = rx->rx_hsplit_pkt; 211 tmp_rx_bytes = rx->rbytes; 212 tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; 213 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; 214 tmp_rx_desc_err_dropped_pkt = 215 rx->rx_desc_err_dropped_pkt; 216 tmp_rx_hsplit_unsplit_pkt = 217 rx->rx_hsplit_unsplit_pkt; 218 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 219 start)); 220 rx_pkts += tmp_rx_pkts; 221 rx_hsplit_pkt += tmp_rx_hsplit_pkt; 222 rx_bytes += tmp_rx_bytes; 223 rx_skb_alloc_fail += tmp_rx_skb_alloc_fail; 224 rx_buf_alloc_fail += tmp_rx_buf_alloc_fail; 225 rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt; 226 rx_hsplit_unsplit_pkt += tmp_rx_hsplit_unsplit_pkt; 227 } 228 } 229 for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0; 230 ring < num_tx_queues; ring++) { 231 if (priv->tx) { 232 do { 233 start = 234 u64_stats_fetch_begin(&priv->tx[ring].statss); 235 tmp_tx_pkts = priv->tx[ring].pkt_done; 236 tmp_tx_bytes = priv->tx[ring].bytes_done; 237 } while (u64_stats_fetch_retry(&priv->tx[ring].statss, 238 start)); 239 tx_pkts += tmp_tx_pkts; 240 tx_bytes += tmp_tx_bytes; 241 tx_dropped += priv->tx[ring].dropped_pkt; 242 } 243 } 244 245 i = 0; 246 data[i++] = rx_pkts; 247 data[i++] = rx_hsplit_pkt; 248 data[i++] = tx_pkts; 249 data[i++] = rx_bytes; 250 data[i++] = tx_bytes; 251 /* total rx dropped packets */ 252 data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail + 253 rx_desc_err_dropped_pkt; 254 data[i++] = tx_dropped; 255 data[i++] = priv->tx_timeo_cnt; 256 data[i++] = rx_skb_alloc_fail; 257 data[i++] = rx_buf_alloc_fail; 258 data[i++] = rx_desc_err_dropped_pkt; 259 data[i++] = rx_hsplit_unsplit_pkt; 260 data[i++] = priv->interface_up_cnt; 261 data[i++] = priv->interface_down_cnt; 262 data[i++] = priv->reset_cnt; 263 data[i++] = priv->page_alloc_fail; 264 data[i++] = priv->dma_mapping_error; 265 data[i++] = priv->stats_report_trigger_cnt; 266 i = GVE_MAIN_STATS_LEN; 267 268 /* For rx cross-reporting stats, start from nic rx stats in report */ 269 base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues + 270 GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues; 271 /* The boundary between driver stats and NIC stats shifts if there are 272 * stopped queues. 273 */ 274 base_stats_idx += NIC_RX_STATS_REPORT_NUM * num_stopped_rxqs + 275 NIC_TX_STATS_REPORT_NUM * num_stopped_txqs; 276 max_stats_idx = NIC_RX_STATS_REPORT_NUM * 277 (priv->rx_cfg.num_queues - num_stopped_rxqs) + 278 base_stats_idx; 279 /* Preprocess the stats report for rx, map queue id to start index */ 280 skip_nic_stats = false; 281 for (stats_idx = base_stats_idx; stats_idx < max_stats_idx; 282 stats_idx += NIC_RX_STATS_REPORT_NUM) { 283 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name); 284 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id); 285 286 if (stat_name == 0) { 287 /* no stats written by NIC yet */ 288 skip_nic_stats = true; 289 break; 290 } 291 if (queue_id < 0 || queue_id >= priv->rx_cfg.num_queues) { 292 net_err_ratelimited("Invalid rxq id in NIC stats\n"); 293 continue; 294 } 295 rx_qid_to_stats_idx[queue_id] = stats_idx; 296 } 297 /* walk RX rings */ 298 if (priv->rx) { 299 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { 300 struct gve_rx_ring *rx = &priv->rx[ring]; 301 302 data[i++] = rx->fill_cnt; 303 data[i++] = rx->cnt; 304 data[i++] = rx->fill_cnt - rx->cnt; 305 do { 306 start = 307 u64_stats_fetch_begin(&priv->rx[ring].statss); 308 tmp_rx_bytes = rx->rbytes; 309 tmp_rx_hsplit_bytes = rx->rx_hsplit_bytes; 310 tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; 311 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; 312 tmp_rx_desc_err_dropped_pkt = 313 rx->rx_desc_err_dropped_pkt; 314 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 315 start)); 316 data[i++] = tmp_rx_bytes; 317 data[i++] = tmp_rx_hsplit_bytes; 318 data[i++] = rx->rx_cont_packet_cnt; 319 data[i++] = rx->rx_frag_flip_cnt; 320 data[i++] = rx->rx_frag_copy_cnt; 321 data[i++] = rx->rx_frag_alloc_cnt; 322 /* rx dropped packets */ 323 data[i++] = tmp_rx_skb_alloc_fail + 324 tmp_rx_buf_alloc_fail + 325 tmp_rx_desc_err_dropped_pkt; 326 data[i++] = rx->rx_copybreak_pkt; 327 data[i++] = rx->rx_copied_pkt; 328 /* stats from NIC */ 329 stats_idx = rx_qid_to_stats_idx[ring]; 330 if (skip_nic_stats || stats_idx < 0) { 331 /* skip NIC rx stats */ 332 i += NIC_RX_STATS_REPORT_NUM; 333 } else { 334 for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) { 335 u64 value = 336 be64_to_cpu(report_stats[stats_idx + j].value); 337 338 data[i++] = value; 339 } 340 } 341 /* XDP rx counters */ 342 do { 343 start = u64_stats_fetch_begin(&priv->rx[ring].statss); 344 for (j = 0; j < GVE_XDP_ACTIONS; j++) 345 data[i + j] = rx->xdp_actions[j]; 346 data[i + j++] = rx->xdp_tx_errors; 347 data[i + j++] = rx->xdp_redirect_errors; 348 data[i + j++] = rx->xdp_alloc_fails; 349 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 350 start)); 351 i += GVE_XDP_ACTIONS + 3; /* XDP rx counters */ 352 } 353 } else { 354 i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS; 355 } 356 357 /* For tx cross-reporting stats, start from nic tx stats in report */ 358 base_stats_idx = max_stats_idx; 359 max_stats_idx = NIC_TX_STATS_REPORT_NUM * 360 (num_tx_queues - num_stopped_txqs) + 361 max_stats_idx; 362 /* Preprocess the stats report for tx, map queue id to start index */ 363 skip_nic_stats = false; 364 for (stats_idx = base_stats_idx; stats_idx < max_stats_idx; 365 stats_idx += NIC_TX_STATS_REPORT_NUM) { 366 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name); 367 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id); 368 369 if (stat_name == 0) { 370 /* no stats written by NIC yet */ 371 skip_nic_stats = true; 372 break; 373 } 374 if (queue_id < 0 || queue_id >= num_tx_queues) { 375 net_err_ratelimited("Invalid txq id in NIC stats\n"); 376 continue; 377 } 378 tx_qid_to_stats_idx[queue_id] = stats_idx; 379 } 380 /* walk TX rings */ 381 if (priv->tx) { 382 for (ring = 0; ring < num_tx_queues; ring++) { 383 struct gve_tx_ring *tx = &priv->tx[ring]; 384 385 if (gve_is_gqi(priv)) { 386 data[i++] = tx->req; 387 data[i++] = tx->done; 388 data[i++] = tx->req - tx->done; 389 } else { 390 /* DQO doesn't currently support 391 * posted/completed descriptor counts; 392 */ 393 data[i++] = 0; 394 data[i++] = 0; 395 data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head; 396 } 397 do { 398 start = 399 u64_stats_fetch_begin(&priv->tx[ring].statss); 400 tmp_tx_bytes = tx->bytes_done; 401 } while (u64_stats_fetch_retry(&priv->tx[ring].statss, 402 start)); 403 data[i++] = tmp_tx_bytes; 404 data[i++] = tx->wake_queue; 405 data[i++] = tx->stop_queue; 406 data[i++] = gve_tx_load_event_counter(priv, tx); 407 data[i++] = tx->dma_mapping_error; 408 /* stats from NIC */ 409 stats_idx = tx_qid_to_stats_idx[ring]; 410 if (skip_nic_stats || stats_idx < 0) { 411 /* skip NIC tx stats */ 412 i += NIC_TX_STATS_REPORT_NUM; 413 } else { 414 for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) { 415 u64 value = 416 be64_to_cpu(report_stats[stats_idx + j].value); 417 data[i++] = value; 418 } 419 } 420 /* XDP xsk counters */ 421 data[i++] = tx->xdp_xsk_wakeup; 422 data[i++] = tx->xdp_xsk_done; 423 do { 424 start = u64_stats_fetch_begin(&priv->tx[ring].statss); 425 data[i] = tx->xdp_xsk_sent; 426 data[i + 1] = tx->xdp_xmit; 427 data[i + 2] = tx->xdp_xmit_errors; 428 } while (u64_stats_fetch_retry(&priv->tx[ring].statss, 429 start)); 430 i += 3; /* XDP tx counters */ 431 } 432 } else { 433 i += num_tx_queues * NUM_GVE_TX_CNTS; 434 } 435 436 kfree(rx_qid_to_stats_idx); 437 kfree(tx_qid_to_stats_idx); 438 /* AQ Stats */ 439 data[i++] = priv->adminq_prod_cnt; 440 data[i++] = priv->adminq_cmd_fail; 441 data[i++] = priv->adminq_timeouts; 442 data[i++] = priv->adminq_describe_device_cnt; 443 data[i++] = priv->adminq_cfg_device_resources_cnt; 444 data[i++] = priv->adminq_register_page_list_cnt; 445 data[i++] = priv->adminq_unregister_page_list_cnt; 446 data[i++] = priv->adminq_create_tx_queue_cnt; 447 data[i++] = priv->adminq_create_rx_queue_cnt; 448 data[i++] = priv->adminq_destroy_tx_queue_cnt; 449 data[i++] = priv->adminq_destroy_rx_queue_cnt; 450 data[i++] = priv->adminq_dcfg_device_resources_cnt; 451 data[i++] = priv->adminq_set_driver_parameter_cnt; 452 data[i++] = priv->adminq_report_stats_cnt; 453 data[i++] = priv->adminq_report_link_speed_cnt; 454 data[i++] = priv->adminq_get_ptype_map_cnt; 455 data[i++] = priv->adminq_query_flow_rules_cnt; 456 data[i++] = priv->adminq_cfg_flow_rule_cnt; 457 data[i++] = priv->adminq_cfg_rss_cnt; 458 data[i++] = priv->adminq_query_rss_cnt; 459 } 460 461 static void gve_get_channels(struct net_device *netdev, 462 struct ethtool_channels *cmd) 463 { 464 struct gve_priv *priv = netdev_priv(netdev); 465 466 cmd->max_rx = priv->rx_cfg.max_queues; 467 cmd->max_tx = priv->tx_cfg.max_queues; 468 cmd->max_other = 0; 469 cmd->max_combined = 0; 470 cmd->rx_count = priv->rx_cfg.num_queues; 471 cmd->tx_count = priv->tx_cfg.num_queues; 472 cmd->other_count = 0; 473 cmd->combined_count = 0; 474 } 475 476 static int gve_set_channels(struct net_device *netdev, 477 struct ethtool_channels *cmd) 478 { 479 struct gve_priv *priv = netdev_priv(netdev); 480 struct gve_queue_config new_tx_cfg = priv->tx_cfg; 481 struct gve_queue_config new_rx_cfg = priv->rx_cfg; 482 struct ethtool_channels old_settings; 483 int new_tx = cmd->tx_count; 484 int new_rx = cmd->rx_count; 485 bool reset_rss = false; 486 487 gve_get_channels(netdev, &old_settings); 488 489 /* Changing combined is not allowed */ 490 if (cmd->combined_count != old_settings.combined_count) 491 return -EINVAL; 492 493 if (!new_rx || !new_tx) 494 return -EINVAL; 495 496 if (priv->num_xdp_queues && 497 (new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) { 498 dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues"); 499 return -EINVAL; 500 } 501 502 if (new_rx != priv->rx_cfg.num_queues && 503 priv->cache_rss_config && !netif_is_rxfh_configured(netdev)) 504 reset_rss = true; 505 506 new_tx_cfg.num_queues = new_tx; 507 new_rx_cfg.num_queues = new_rx; 508 509 return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg, reset_rss); 510 } 511 512 static void gve_get_ringparam(struct net_device *netdev, 513 struct ethtool_ringparam *cmd, 514 struct kernel_ethtool_ringparam *kernel_cmd, 515 struct netlink_ext_ack *extack) 516 { 517 struct gve_priv *priv = netdev_priv(netdev); 518 519 cmd->rx_max_pending = priv->max_rx_desc_cnt; 520 cmd->tx_max_pending = priv->max_tx_desc_cnt; 521 cmd->rx_pending = priv->rx_desc_cnt; 522 cmd->tx_pending = priv->tx_desc_cnt; 523 524 if (!gve_header_split_supported(priv)) 525 kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN; 526 else if (priv->header_split_enabled) 527 kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED; 528 else 529 kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED; 530 } 531 532 static int gve_adjust_ring_sizes(struct gve_priv *priv, 533 u16 new_tx_desc_cnt, 534 u16 new_rx_desc_cnt) 535 { 536 struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; 537 struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; 538 int err; 539 540 /* get current queue configuration */ 541 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); 542 543 /* copy over the new ring_size from ethtool */ 544 tx_alloc_cfg.ring_size = new_tx_desc_cnt; 545 rx_alloc_cfg.ring_size = new_rx_desc_cnt; 546 547 if (netif_running(priv->dev)) { 548 err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); 549 if (err) 550 return err; 551 } 552 553 /* Set new ring_size for the next up */ 554 priv->tx_desc_cnt = new_tx_desc_cnt; 555 priv->rx_desc_cnt = new_rx_desc_cnt; 556 557 return 0; 558 } 559 560 static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt, 561 u16 new_rx_desc_cnt) 562 { 563 /* check for valid range */ 564 if (new_tx_desc_cnt < priv->min_tx_desc_cnt || 565 new_tx_desc_cnt > priv->max_tx_desc_cnt || 566 new_rx_desc_cnt < priv->min_rx_desc_cnt || 567 new_rx_desc_cnt > priv->max_rx_desc_cnt) { 568 dev_err(&priv->pdev->dev, "Requested descriptor count out of range\n"); 569 return -EINVAL; 570 } 571 572 if (!is_power_of_2(new_tx_desc_cnt) || !is_power_of_2(new_rx_desc_cnt)) { 573 dev_err(&priv->pdev->dev, "Requested descriptor count has to be a power of 2\n"); 574 return -EINVAL; 575 } 576 return 0; 577 } 578 579 static int gve_set_ringparam(struct net_device *netdev, 580 struct ethtool_ringparam *cmd, 581 struct kernel_ethtool_ringparam *kernel_cmd, 582 struct netlink_ext_ack *extack) 583 { 584 struct gve_priv *priv = netdev_priv(netdev); 585 u16 new_tx_cnt, new_rx_cnt; 586 int err; 587 588 err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split); 589 if (err) 590 return err; 591 592 if (cmd->tx_pending == priv->tx_desc_cnt && cmd->rx_pending == priv->rx_desc_cnt) 593 return 0; 594 595 if (!priv->modify_ring_size_enabled) { 596 dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n"); 597 return -EOPNOTSUPP; 598 } 599 600 new_tx_cnt = cmd->tx_pending; 601 new_rx_cnt = cmd->rx_pending; 602 603 if (gve_validate_req_ring_size(priv, new_tx_cnt, new_rx_cnt)) 604 return -EINVAL; 605 606 return gve_adjust_ring_sizes(priv, new_tx_cnt, new_rx_cnt); 607 } 608 609 static int gve_user_reset(struct net_device *netdev, u32 *flags) 610 { 611 struct gve_priv *priv = netdev_priv(netdev); 612 613 if (*flags == ETH_RESET_ALL) { 614 *flags = 0; 615 return gve_reset(priv, true); 616 } 617 618 return -EOPNOTSUPP; 619 } 620 621 static int gve_get_tunable(struct net_device *netdev, 622 const struct ethtool_tunable *etuna, void *value) 623 { 624 struct gve_priv *priv = netdev_priv(netdev); 625 626 switch (etuna->id) { 627 case ETHTOOL_RX_COPYBREAK: 628 *(u32 *)value = priv->rx_copybreak; 629 return 0; 630 default: 631 return -EOPNOTSUPP; 632 } 633 } 634 635 static int gve_set_tunable(struct net_device *netdev, 636 const struct ethtool_tunable *etuna, 637 const void *value) 638 { 639 struct gve_priv *priv = netdev_priv(netdev); 640 u32 len; 641 642 switch (etuna->id) { 643 case ETHTOOL_RX_COPYBREAK: 644 { 645 u32 max_copybreak = gve_is_gqi(priv) ? 646 GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo; 647 648 len = *(u32 *)value; 649 if (len > max_copybreak) 650 return -EINVAL; 651 priv->rx_copybreak = len; 652 return 0; 653 } 654 default: 655 return -EOPNOTSUPP; 656 } 657 } 658 659 static u32 gve_get_priv_flags(struct net_device *netdev) 660 { 661 struct gve_priv *priv = netdev_priv(netdev); 662 u32 ret_flags = 0; 663 664 /* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */ 665 if (priv->ethtool_flags & BIT(0)) 666 ret_flags |= BIT(0); 667 return ret_flags; 668 } 669 670 static int gve_set_priv_flags(struct net_device *netdev, u32 flags) 671 { 672 struct gve_priv *priv = netdev_priv(netdev); 673 u64 ori_flags, new_flags; 674 int num_tx_queues; 675 676 num_tx_queues = gve_num_tx_queues(priv); 677 ori_flags = READ_ONCE(priv->ethtool_flags); 678 new_flags = ori_flags; 679 680 /* Only one priv flag exists: report-stats (BIT(0))*/ 681 if (flags & BIT(0)) 682 new_flags |= BIT(0); 683 else 684 new_flags &= ~(BIT(0)); 685 priv->ethtool_flags = new_flags; 686 /* start report-stats timer when user turns report stats on. */ 687 if (flags & BIT(0)) { 688 mod_timer(&priv->stats_report_timer, 689 round_jiffies(jiffies + 690 msecs_to_jiffies(priv->stats_report_timer_period))); 691 } 692 /* Zero off gve stats when report-stats turned off and */ 693 /* delete report stats timer. */ 694 if (!(flags & BIT(0)) && (ori_flags & BIT(0))) { 695 int tx_stats_num = GVE_TX_STATS_REPORT_NUM * 696 num_tx_queues; 697 int rx_stats_num = GVE_RX_STATS_REPORT_NUM * 698 priv->rx_cfg.num_queues; 699 700 memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) * 701 sizeof(struct stats)); 702 del_timer_sync(&priv->stats_report_timer); 703 } 704 return 0; 705 } 706 707 static int gve_get_link_ksettings(struct net_device *netdev, 708 struct ethtool_link_ksettings *cmd) 709 { 710 struct gve_priv *priv = netdev_priv(netdev); 711 int err = 0; 712 713 if (priv->link_speed == 0) 714 err = gve_adminq_report_link_speed(priv); 715 716 cmd->base.speed = priv->link_speed; 717 718 cmd->base.duplex = DUPLEX_FULL; 719 720 return err; 721 } 722 723 static int gve_get_coalesce(struct net_device *netdev, 724 struct ethtool_coalesce *ec, 725 struct kernel_ethtool_coalesce *kernel_ec, 726 struct netlink_ext_ack *extack) 727 { 728 struct gve_priv *priv = netdev_priv(netdev); 729 730 if (gve_is_gqi(priv)) 731 return -EOPNOTSUPP; 732 ec->tx_coalesce_usecs = priv->tx_coalesce_usecs; 733 ec->rx_coalesce_usecs = priv->rx_coalesce_usecs; 734 735 return 0; 736 } 737 738 static int gve_set_coalesce(struct net_device *netdev, 739 struct ethtool_coalesce *ec, 740 struct kernel_ethtool_coalesce *kernel_ec, 741 struct netlink_ext_ack *extack) 742 { 743 struct gve_priv *priv = netdev_priv(netdev); 744 u32 tx_usecs_orig = priv->tx_coalesce_usecs; 745 u32 rx_usecs_orig = priv->rx_coalesce_usecs; 746 int idx; 747 748 if (gve_is_gqi(priv)) 749 return -EOPNOTSUPP; 750 751 if (ec->tx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO || 752 ec->rx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO) 753 return -EINVAL; 754 priv->tx_coalesce_usecs = ec->tx_coalesce_usecs; 755 priv->rx_coalesce_usecs = ec->rx_coalesce_usecs; 756 757 if (tx_usecs_orig != priv->tx_coalesce_usecs) { 758 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { 759 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); 760 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; 761 762 gve_set_itr_coalesce_usecs_dqo(priv, block, 763 priv->tx_coalesce_usecs); 764 } 765 } 766 767 if (rx_usecs_orig != priv->rx_coalesce_usecs) { 768 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { 769 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); 770 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; 771 772 gve_set_itr_coalesce_usecs_dqo(priv, block, 773 priv->rx_coalesce_usecs); 774 } 775 } 776 777 return 0; 778 } 779 780 static int gve_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 781 { 782 struct gve_priv *priv = netdev_priv(netdev); 783 int err = 0; 784 785 if (!(netdev->features & NETIF_F_NTUPLE)) 786 return -EOPNOTSUPP; 787 788 switch (cmd->cmd) { 789 case ETHTOOL_SRXCLSRLINS: 790 err = gve_add_flow_rule(priv, cmd); 791 break; 792 case ETHTOOL_SRXCLSRLDEL: 793 err = gve_del_flow_rule(priv, cmd); 794 break; 795 case ETHTOOL_SRXFH: 796 err = -EOPNOTSUPP; 797 break; 798 default: 799 err = -EOPNOTSUPP; 800 break; 801 } 802 803 return err; 804 } 805 806 static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) 807 { 808 struct gve_priv *priv = netdev_priv(netdev); 809 int err = 0; 810 811 switch (cmd->cmd) { 812 case ETHTOOL_GRXRINGS: 813 cmd->data = priv->rx_cfg.num_queues; 814 break; 815 case ETHTOOL_GRXCLSRLCNT: 816 if (!priv->max_flow_rules) 817 return -EOPNOTSUPP; 818 819 err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_STATS, 0); 820 if (err) 821 return err; 822 823 cmd->rule_cnt = priv->num_flow_rules; 824 cmd->data = priv->max_flow_rules; 825 break; 826 case ETHTOOL_GRXCLSRULE: 827 err = gve_get_flow_rule_entry(priv, cmd); 828 break; 829 case ETHTOOL_GRXCLSRLALL: 830 err = gve_get_flow_rule_ids(priv, cmd, (u32 *)rule_locs); 831 break; 832 case ETHTOOL_GRXFH: 833 err = -EOPNOTSUPP; 834 break; 835 default: 836 err = -EOPNOTSUPP; 837 break; 838 } 839 840 return err; 841 } 842 843 static u32 gve_get_rxfh_key_size(struct net_device *netdev) 844 { 845 struct gve_priv *priv = netdev_priv(netdev); 846 847 return priv->rss_key_size; 848 } 849 850 static u32 gve_get_rxfh_indir_size(struct net_device *netdev) 851 { 852 struct gve_priv *priv = netdev_priv(netdev); 853 854 return priv->rss_lut_size; 855 } 856 857 static void gve_get_rss_config_cache(struct gve_priv *priv, 858 struct ethtool_rxfh_param *rxfh) 859 { 860 struct gve_rss_config *rss_config = &priv->rss_config; 861 862 rxfh->hfunc = ETH_RSS_HASH_TOP; 863 864 if (rxfh->key) { 865 rxfh->key_size = priv->rss_key_size; 866 memcpy(rxfh->key, rss_config->hash_key, priv->rss_key_size); 867 } 868 869 if (rxfh->indir) { 870 rxfh->indir_size = priv->rss_lut_size; 871 memcpy(rxfh->indir, rss_config->hash_lut, 872 priv->rss_lut_size * sizeof(*rxfh->indir)); 873 } 874 } 875 876 static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh) 877 { 878 struct gve_priv *priv = netdev_priv(netdev); 879 880 if (!priv->rss_key_size || !priv->rss_lut_size) 881 return -EOPNOTSUPP; 882 883 if (priv->cache_rss_config) { 884 gve_get_rss_config_cache(priv, rxfh); 885 return 0; 886 } 887 888 return gve_adminq_query_rss_config(priv, rxfh); 889 } 890 891 static void gve_set_rss_config_cache(struct gve_priv *priv, 892 struct ethtool_rxfh_param *rxfh) 893 { 894 struct gve_rss_config *rss_config = &priv->rss_config; 895 896 if (rxfh->key) 897 memcpy(rss_config->hash_key, rxfh->key, priv->rss_key_size); 898 899 if (rxfh->indir) 900 memcpy(rss_config->hash_lut, rxfh->indir, 901 priv->rss_lut_size * sizeof(*rxfh->indir)); 902 } 903 904 static int gve_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh, 905 struct netlink_ext_ack *extack) 906 { 907 struct gve_priv *priv = netdev_priv(netdev); 908 int err; 909 910 if (!priv->rss_key_size || !priv->rss_lut_size) 911 return -EOPNOTSUPP; 912 913 err = gve_adminq_configure_rss(priv, rxfh); 914 if (err) { 915 NL_SET_ERR_MSG_MOD(extack, "Fail to configure RSS config"); 916 return err; 917 } 918 919 if (priv->cache_rss_config) 920 gve_set_rss_config_cache(priv, rxfh); 921 922 return 0; 923 } 924 925 const struct ethtool_ops gve_ethtool_ops = { 926 .supported_coalesce_params = ETHTOOL_COALESCE_USECS, 927 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, 928 .get_drvinfo = gve_get_drvinfo, 929 .get_strings = gve_get_strings, 930 .get_sset_count = gve_get_sset_count, 931 .get_ethtool_stats = gve_get_ethtool_stats, 932 .set_msglevel = gve_set_msglevel, 933 .get_msglevel = gve_get_msglevel, 934 .set_channels = gve_set_channels, 935 .get_channels = gve_get_channels, 936 .set_rxnfc = gve_set_rxnfc, 937 .get_rxnfc = gve_get_rxnfc, 938 .get_rxfh_indir_size = gve_get_rxfh_indir_size, 939 .get_rxfh_key_size = gve_get_rxfh_key_size, 940 .get_rxfh = gve_get_rxfh, 941 .set_rxfh = gve_set_rxfh, 942 .get_link = ethtool_op_get_link, 943 .get_coalesce = gve_get_coalesce, 944 .set_coalesce = gve_set_coalesce, 945 .get_ringparam = gve_get_ringparam, 946 .set_ringparam = gve_set_ringparam, 947 .reset = gve_user_reset, 948 .get_tunable = gve_get_tunable, 949 .set_tunable = gve_set_tunable, 950 .get_priv_flags = gve_get_priv_flags, 951 .set_priv_flags = gve_set_priv_flags, 952 .get_link_ksettings = gve_get_link_ksettings, 953 .get_ts_info = ethtool_op_get_ts_info, 954 }; 955