1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2024 Google LLC
5 */
6
7 #include <linux/rtnetlink.h>
8 #include "gve.h"
9 #include "gve_adminq.h"
10 #include "gve_dqo.h"
11 #include "gve_utils.h"
12
gve_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * info)13 static void gve_get_drvinfo(struct net_device *netdev,
14 struct ethtool_drvinfo *info)
15 {
16 struct gve_priv *priv = netdev_priv(netdev);
17
18 strscpy(info->driver, gve_driver_name, sizeof(info->driver));
19 strscpy(info->version, gve_version_str, sizeof(info->version));
20 strscpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info));
21 }
22
gve_set_msglevel(struct net_device * netdev,u32 value)23 static void gve_set_msglevel(struct net_device *netdev, u32 value)
24 {
25 struct gve_priv *priv = netdev_priv(netdev);
26
27 priv->msg_enable = value;
28 }
29
gve_get_msglevel(struct net_device * netdev)30 static u32 gve_get_msglevel(struct net_device *netdev)
31 {
32 struct gve_priv *priv = netdev_priv(netdev);
33
34 return priv->msg_enable;
35 }
36
37 /* For the following stats column string names, make sure the order
38 * matches how it is filled in the code. For xdp_aborted, xdp_drop,
39 * xdp_pass, xdp_tx, xdp_redirect, make sure it also matches the order
40 * as declared in enum xdp_action inside file uapi/linux/bpf.h .
41 */
42 static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
43 "rx_packets", "rx_hsplit_pkt", "tx_packets", "rx_bytes",
44 "tx_bytes", "rx_dropped", "tx_dropped", "tx_timeouts",
45 "rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
46 "rx_hsplit_unsplit_pkt",
47 "interface_up_cnt", "interface_down_cnt", "reset_cnt",
48 "page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
49 };
50
51 static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
52 "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]",
53 "rx_bytes[%u]", "rx_hsplit_bytes[%u]", "rx_cont_packet_cnt[%u]",
54 "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]", "rx_frag_alloc_cnt[%u]",
55 "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
56 "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
57 "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
58 "rx_xdp_aborted[%u]", "rx_xdp_drop[%u]", "rx_xdp_pass[%u]",
59 "rx_xdp_tx[%u]", "rx_xdp_redirect[%u]",
60 "rx_xdp_tx_errors[%u]", "rx_xdp_redirect_errors[%u]", "rx_xdp_alloc_fails[%u]",
61 };
62
63 static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
64 "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
65 "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
66 "tx_dma_mapping_error[%u]",
67 "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
68 };
69
70 static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] __nonstring_array = {
71 "adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts",
72 "adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt",
73 "adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt",
74 "adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
75 "adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
76 "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
77 "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt",
78 "adminq_query_flow_rules", "adminq_cfg_flow_rule", "adminq_cfg_rss_cnt",
79 "adminq_query_rss_cnt", "adminq_report_nic_timestamp_cnt",
80 };
81
82 static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
83 "report-stats",
84 };
85
86 #define GVE_MAIN_STATS_LEN ARRAY_SIZE(gve_gstrings_main_stats)
87 #define GVE_ADMINQ_STATS_LEN ARRAY_SIZE(gve_gstrings_adminq_stats)
88 #define NUM_GVE_TX_CNTS ARRAY_SIZE(gve_gstrings_tx_stats)
89 #define NUM_GVE_RX_CNTS ARRAY_SIZE(gve_gstrings_rx_stats)
90 #define GVE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(gve_gstrings_priv_flags)
91
gve_get_strings(struct net_device * netdev,u32 stringset,u8 * data)92 static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
93 {
94 struct gve_priv *priv = netdev_priv(netdev);
95 u8 *s = (char *)data;
96 int num_tx_queues;
97 int i, j;
98
99 num_tx_queues = gve_num_tx_queues(priv);
100 switch (stringset) {
101 case ETH_SS_STATS:
102 for (i = 0; i < ARRAY_SIZE(gve_gstrings_main_stats); i++)
103 ethtool_puts(&s, gve_gstrings_main_stats[i]);
104
105 for (i = 0; i < priv->rx_cfg.num_queues; i++)
106 for (j = 0; j < NUM_GVE_RX_CNTS; j++)
107 ethtool_sprintf(&s, gve_gstrings_rx_stats[j],
108 i);
109
110 for (i = 0; i < num_tx_queues; i++)
111 for (j = 0; j < NUM_GVE_TX_CNTS; j++)
112 ethtool_sprintf(&s, gve_gstrings_tx_stats[j],
113 i);
114
115 for (i = 0; i < ARRAY_SIZE(gve_gstrings_adminq_stats); i++)
116 ethtool_cpy(&s, gve_gstrings_adminq_stats[i]);
117
118 break;
119
120 case ETH_SS_PRIV_FLAGS:
121 for (i = 0; i < ARRAY_SIZE(gve_gstrings_priv_flags); i++)
122 ethtool_puts(&s, gve_gstrings_priv_flags[i]);
123 break;
124
125 default:
126 break;
127 }
128 }
129
gve_get_sset_count(struct net_device * netdev,int sset)130 static int gve_get_sset_count(struct net_device *netdev, int sset)
131 {
132 struct gve_priv *priv = netdev_priv(netdev);
133 int num_tx_queues;
134
135 num_tx_queues = gve_num_tx_queues(priv);
136 switch (sset) {
137 case ETH_SS_STATS:
138 return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN +
139 (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) +
140 (num_tx_queues * NUM_GVE_TX_CNTS);
141 case ETH_SS_PRIV_FLAGS:
142 return GVE_PRIV_FLAGS_STR_LEN;
143 default:
144 return -EOPNOTSUPP;
145 }
146 }
147
148 static void
gve_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)149 gve_get_ethtool_stats(struct net_device *netdev,
150 struct ethtool_stats *stats, u64 *data)
151 {
152 u64 tmp_rx_pkts, tmp_rx_hsplit_pkt, tmp_rx_bytes, tmp_rx_hsplit_bytes,
153 tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail,
154 tmp_rx_desc_err_dropped_pkt, tmp_rx_hsplit_unsplit_pkt,
155 tmp_tx_pkts, tmp_tx_bytes,
156 tmp_xdp_tx_errors, tmp_xdp_redirect_errors;
157 u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_hsplit_unsplit_pkt,
158 rx_pkts, rx_hsplit_pkt, rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes,
159 tx_dropped, xdp_tx_errors, xdp_redirect_errors;
160 int rx_base_stats_idx, max_rx_stats_idx, max_tx_stats_idx;
161 int stats_idx, stats_region_len, nic_stats_len;
162 struct stats *report_stats;
163 int *rx_qid_to_stats_idx;
164 int *tx_qid_to_stats_idx;
165 int num_stopped_rxqs = 0;
166 int num_stopped_txqs = 0;
167 struct gve_priv *priv;
168 bool skip_nic_stats;
169 unsigned int start;
170 int num_tx_queues;
171 int ring;
172 int i, j;
173
174 ASSERT_RTNL();
175
176 priv = netdev_priv(netdev);
177 num_tx_queues = gve_num_tx_queues(priv);
178 report_stats = priv->stats_report->stats;
179 rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues,
180 sizeof(int), GFP_KERNEL);
181 if (!rx_qid_to_stats_idx)
182 return;
183 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
184 rx_qid_to_stats_idx[ring] = -1;
185 if (!gve_rx_was_added_to_block(priv, ring))
186 num_stopped_rxqs++;
187 }
188 tx_qid_to_stats_idx = kmalloc_array(num_tx_queues,
189 sizeof(int), GFP_KERNEL);
190 if (!tx_qid_to_stats_idx) {
191 kfree(rx_qid_to_stats_idx);
192 return;
193 }
194 for (ring = 0; ring < num_tx_queues; ring++) {
195 tx_qid_to_stats_idx[ring] = -1;
196 if (!gve_tx_was_added_to_block(priv, ring))
197 num_stopped_txqs++;
198 }
199
200 for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0,
201 rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0,
202 rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0,
203 xdp_tx_errors = 0, xdp_redirect_errors = 0,
204 ring = 0;
205 ring < priv->rx_cfg.num_queues; ring++) {
206 if (priv->rx) {
207 do {
208 struct gve_rx_ring *rx = &priv->rx[ring];
209
210 start =
211 u64_stats_fetch_begin(&priv->rx[ring].statss);
212 tmp_rx_pkts = rx->rpackets;
213 tmp_rx_hsplit_pkt = rx->rx_hsplit_pkt;
214 tmp_rx_bytes = rx->rbytes;
215 tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
216 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
217 tmp_rx_desc_err_dropped_pkt =
218 rx->rx_desc_err_dropped_pkt;
219 tmp_rx_hsplit_unsplit_pkt =
220 rx->rx_hsplit_unsplit_pkt;
221 tmp_xdp_tx_errors = rx->xdp_tx_errors;
222 tmp_xdp_redirect_errors =
223 rx->xdp_redirect_errors;
224 } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
225 start));
226 rx_pkts += tmp_rx_pkts;
227 rx_hsplit_pkt += tmp_rx_hsplit_pkt;
228 rx_bytes += tmp_rx_bytes;
229 rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
230 rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
231 rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
232 rx_hsplit_unsplit_pkt += tmp_rx_hsplit_unsplit_pkt;
233 xdp_tx_errors += tmp_xdp_tx_errors;
234 xdp_redirect_errors += tmp_xdp_redirect_errors;
235 }
236 }
237 for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
238 ring < num_tx_queues; ring++) {
239 if (priv->tx) {
240 do {
241 start =
242 u64_stats_fetch_begin(&priv->tx[ring].statss);
243 tmp_tx_pkts = priv->tx[ring].pkt_done;
244 tmp_tx_bytes = priv->tx[ring].bytes_done;
245 } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
246 start));
247 tx_pkts += tmp_tx_pkts;
248 tx_bytes += tmp_tx_bytes;
249 tx_dropped += priv->tx[ring].dropped_pkt;
250 }
251 }
252
253 i = 0;
254 data[i++] = rx_pkts;
255 data[i++] = rx_hsplit_pkt;
256 data[i++] = tx_pkts;
257 data[i++] = rx_bytes;
258 data[i++] = tx_bytes;
259 /* total rx dropped packets */
260 data[i++] = rx_skb_alloc_fail + rx_desc_err_dropped_pkt +
261 xdp_tx_errors + xdp_redirect_errors;
262 data[i++] = tx_dropped;
263 data[i++] = priv->tx_timeo_cnt;
264 data[i++] = rx_skb_alloc_fail;
265 data[i++] = rx_buf_alloc_fail;
266 data[i++] = rx_desc_err_dropped_pkt;
267 data[i++] = rx_hsplit_unsplit_pkt;
268 data[i++] = priv->interface_up_cnt;
269 data[i++] = priv->interface_down_cnt;
270 data[i++] = priv->reset_cnt;
271 data[i++] = priv->page_alloc_fail;
272 data[i++] = priv->dma_mapping_error;
273 data[i++] = priv->stats_report_trigger_cnt;
274 i = GVE_MAIN_STATS_LEN;
275
276 rx_base_stats_idx = 0;
277 max_rx_stats_idx = 0;
278 max_tx_stats_idx = 0;
279 stats_region_len = priv->stats_report_len -
280 sizeof(struct gve_stats_report);
281 nic_stats_len = (NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
282 NIC_TX_STATS_REPORT_NUM * num_tx_queues) * sizeof(struct stats);
283 if (unlikely((stats_region_len -
284 nic_stats_len) % sizeof(struct stats))) {
285 net_err_ratelimited("Starting index of NIC stats should be multiple of stats size");
286 } else {
287 /* For rx cross-reporting stats,
288 * start from nic rx stats in report
289 */
290 rx_base_stats_idx = (stats_region_len - nic_stats_len) /
291 sizeof(struct stats);
292 /* The boundary between driver stats and NIC stats
293 * shifts if there are stopped queues
294 */
295 rx_base_stats_idx += NIC_RX_STATS_REPORT_NUM *
296 num_stopped_rxqs + NIC_TX_STATS_REPORT_NUM *
297 num_stopped_txqs;
298 max_rx_stats_idx = NIC_RX_STATS_REPORT_NUM *
299 (priv->rx_cfg.num_queues - num_stopped_rxqs) +
300 rx_base_stats_idx;
301 max_tx_stats_idx = NIC_TX_STATS_REPORT_NUM *
302 (num_tx_queues - num_stopped_txqs) +
303 max_rx_stats_idx;
304 }
305 /* Preprocess the stats report for rx, map queue id to start index */
306 skip_nic_stats = false;
307 for (stats_idx = rx_base_stats_idx; stats_idx < max_rx_stats_idx;
308 stats_idx += NIC_RX_STATS_REPORT_NUM) {
309 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
310 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
311
312 if (stat_name == 0) {
313 /* no stats written by NIC yet */
314 skip_nic_stats = true;
315 break;
316 }
317 if (queue_id < 0 || queue_id >= priv->rx_cfg.num_queues) {
318 net_err_ratelimited("Invalid rxq id in NIC stats\n");
319 continue;
320 }
321 rx_qid_to_stats_idx[queue_id] = stats_idx;
322 }
323 /* walk RX rings */
324 if (priv->rx) {
325 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
326 struct gve_rx_ring *rx = &priv->rx[ring];
327
328 data[i++] = rx->fill_cnt;
329 data[i++] = rx->cnt;
330 data[i++] = rx->fill_cnt - rx->cnt;
331 do {
332 start =
333 u64_stats_fetch_begin(&priv->rx[ring].statss);
334 tmp_rx_bytes = rx->rbytes;
335 tmp_rx_hsplit_bytes = rx->rx_hsplit_bytes;
336 tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
337 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
338 tmp_rx_desc_err_dropped_pkt =
339 rx->rx_desc_err_dropped_pkt;
340 tmp_xdp_tx_errors = rx->xdp_tx_errors;
341 tmp_xdp_redirect_errors =
342 rx->xdp_redirect_errors;
343 } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
344 start));
345 data[i++] = tmp_rx_bytes;
346 data[i++] = tmp_rx_hsplit_bytes;
347 data[i++] = rx->rx_cont_packet_cnt;
348 data[i++] = rx->rx_frag_flip_cnt;
349 data[i++] = rx->rx_frag_copy_cnt;
350 data[i++] = rx->rx_frag_alloc_cnt;
351 /* rx dropped packets */
352 data[i++] = tmp_rx_skb_alloc_fail +
353 tmp_rx_desc_err_dropped_pkt +
354 tmp_xdp_tx_errors +
355 tmp_xdp_redirect_errors;
356 data[i++] = rx->rx_copybreak_pkt;
357 data[i++] = rx->rx_copied_pkt;
358 /* stats from NIC */
359 stats_idx = rx_qid_to_stats_idx[ring];
360 if (skip_nic_stats || stats_idx < 0) {
361 /* skip NIC rx stats */
362 i += NIC_RX_STATS_REPORT_NUM;
363 } else {
364 for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
365 u64 value =
366 be64_to_cpu(report_stats[stats_idx + j].value);
367
368 data[i++] = value;
369 }
370 }
371 /* XDP rx counters */
372 do {
373 start = u64_stats_fetch_begin(&priv->rx[ring].statss);
374 for (j = 0; j < GVE_XDP_ACTIONS; j++)
375 data[i + j] = rx->xdp_actions[j];
376 data[i + j++] = rx->xdp_tx_errors;
377 data[i + j++] = rx->xdp_redirect_errors;
378 data[i + j++] = rx->xdp_alloc_fails;
379 } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
380 start));
381 i += GVE_XDP_ACTIONS + 3; /* XDP rx counters */
382 }
383 } else {
384 i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
385 }
386
387 skip_nic_stats = false;
388 /* NIC TX stats start right after NIC RX stats */
389 for (stats_idx = max_rx_stats_idx; stats_idx < max_tx_stats_idx;
390 stats_idx += NIC_TX_STATS_REPORT_NUM) {
391 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
392 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
393
394 if (stat_name == 0) {
395 /* no stats written by NIC yet */
396 skip_nic_stats = true;
397 break;
398 }
399 if (queue_id < 0 || queue_id >= num_tx_queues) {
400 net_err_ratelimited("Invalid txq id in NIC stats\n");
401 continue;
402 }
403 tx_qid_to_stats_idx[queue_id] = stats_idx;
404 }
405 /* walk TX rings */
406 if (priv->tx) {
407 for (ring = 0; ring < num_tx_queues; ring++) {
408 struct gve_tx_ring *tx = &priv->tx[ring];
409
410 if (gve_is_gqi(priv)) {
411 data[i++] = tx->req;
412 data[i++] = tx->done;
413 data[i++] = tx->req - tx->done;
414 } else {
415 /* DQO doesn't currently support
416 * posted/completed descriptor counts;
417 */
418 data[i++] = 0;
419 data[i++] = 0;
420 data[i++] =
421 (tx->dqo_tx.tail - tx->dqo_tx.head) &
422 tx->mask;
423 }
424 do {
425 start =
426 u64_stats_fetch_begin(&priv->tx[ring].statss);
427 tmp_tx_bytes = tx->bytes_done;
428 } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
429 start));
430 data[i++] = tmp_tx_bytes;
431 data[i++] = tx->wake_queue;
432 data[i++] = tx->stop_queue;
433 data[i++] = gve_tx_load_event_counter(priv, tx);
434 data[i++] = tx->dma_mapping_error;
435 /* stats from NIC */
436 stats_idx = tx_qid_to_stats_idx[ring];
437 if (skip_nic_stats || stats_idx < 0) {
438 /* skip NIC tx stats */
439 i += NIC_TX_STATS_REPORT_NUM;
440 } else {
441 for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
442 u64 value =
443 be64_to_cpu(report_stats[stats_idx + j].value);
444 data[i++] = value;
445 }
446 }
447 /* XDP counters */
448 do {
449 start = u64_stats_fetch_begin(&priv->tx[ring].statss);
450 data[i] = tx->xdp_xsk_sent;
451 data[i + 1] = tx->xdp_xmit;
452 data[i + 2] = tx->xdp_xmit_errors;
453 } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
454 start));
455 i += 3; /* XDP tx counters */
456 }
457 } else {
458 i += num_tx_queues * NUM_GVE_TX_CNTS;
459 }
460
461 kfree(rx_qid_to_stats_idx);
462 kfree(tx_qid_to_stats_idx);
463 /* AQ Stats */
464 data[i++] = priv->adminq_prod_cnt;
465 data[i++] = priv->adminq_cmd_fail;
466 data[i++] = priv->adminq_timeouts;
467 data[i++] = priv->adminq_describe_device_cnt;
468 data[i++] = priv->adminq_cfg_device_resources_cnt;
469 data[i++] = priv->adminq_register_page_list_cnt;
470 data[i++] = priv->adminq_unregister_page_list_cnt;
471 data[i++] = priv->adminq_create_tx_queue_cnt;
472 data[i++] = priv->adminq_create_rx_queue_cnt;
473 data[i++] = priv->adminq_destroy_tx_queue_cnt;
474 data[i++] = priv->adminq_destroy_rx_queue_cnt;
475 data[i++] = priv->adminq_dcfg_device_resources_cnt;
476 data[i++] = priv->adminq_set_driver_parameter_cnt;
477 data[i++] = priv->adminq_report_stats_cnt;
478 data[i++] = priv->adminq_report_link_speed_cnt;
479 data[i++] = priv->adminq_get_ptype_map_cnt;
480 data[i++] = priv->adminq_query_flow_rules_cnt;
481 data[i++] = priv->adminq_cfg_flow_rule_cnt;
482 data[i++] = priv->adminq_cfg_rss_cnt;
483 data[i++] = priv->adminq_query_rss_cnt;
484 data[i++] = priv->adminq_report_nic_timestamp_cnt;
485 }
486
gve_get_channels(struct net_device * netdev,struct ethtool_channels * cmd)487 static void gve_get_channels(struct net_device *netdev,
488 struct ethtool_channels *cmd)
489 {
490 struct gve_priv *priv = netdev_priv(netdev);
491
492 cmd->max_rx = priv->rx_cfg.max_queues;
493 cmd->max_tx = priv->tx_cfg.max_queues;
494 cmd->max_other = 0;
495 cmd->max_combined = 0;
496 cmd->rx_count = priv->rx_cfg.num_queues;
497 cmd->tx_count = priv->tx_cfg.num_queues;
498 cmd->other_count = 0;
499 cmd->combined_count = 0;
500 }
501
gve_set_channels(struct net_device * netdev,struct ethtool_channels * cmd)502 static int gve_set_channels(struct net_device *netdev,
503 struct ethtool_channels *cmd)
504 {
505 struct gve_priv *priv = netdev_priv(netdev);
506 struct gve_tx_queue_config new_tx_cfg = priv->tx_cfg;
507 struct gve_rx_queue_config new_rx_cfg = priv->rx_cfg;
508 struct ethtool_channels old_settings;
509 int new_tx = cmd->tx_count;
510 int new_rx = cmd->rx_count;
511 bool reset_rss = false;
512
513 gve_get_channels(netdev, &old_settings);
514
515 /* Changing combined is not allowed */
516 if (cmd->combined_count != old_settings.combined_count)
517 return -EINVAL;
518
519 if (!new_rx || !new_tx)
520 return -EINVAL;
521
522 if (priv->xdp_prog) {
523 if (new_tx != new_rx ||
524 (2 * new_tx > priv->tx_cfg.max_queues)) {
525 dev_err(&priv->pdev->dev, "The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues when XDP program is installed");
526 return -EINVAL;
527 }
528
529 /* One XDP TX queue per RX queue. */
530 new_tx_cfg.num_xdp_queues = new_rx;
531 } else {
532 new_tx_cfg.num_xdp_queues = 0;
533 }
534
535 if (new_rx != priv->rx_cfg.num_queues &&
536 priv->cache_rss_config && !netif_is_rxfh_configured(netdev))
537 reset_rss = true;
538
539 new_tx_cfg.num_queues = new_tx;
540 new_rx_cfg.num_queues = new_rx;
541
542 return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg, reset_rss);
543 }
544
gve_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * cmd,struct kernel_ethtool_ringparam * kernel_cmd,struct netlink_ext_ack * extack)545 static void gve_get_ringparam(struct net_device *netdev,
546 struct ethtool_ringparam *cmd,
547 struct kernel_ethtool_ringparam *kernel_cmd,
548 struct netlink_ext_ack *extack)
549 {
550 struct gve_priv *priv = netdev_priv(netdev);
551
552 cmd->rx_max_pending = priv->max_rx_desc_cnt;
553 cmd->tx_max_pending = priv->max_tx_desc_cnt;
554 cmd->rx_pending = priv->rx_desc_cnt;
555 cmd->tx_pending = priv->tx_desc_cnt;
556
557 kernel_cmd->rx_buf_len = priv->rx_cfg.packet_buffer_size;
558
559 if (!gve_header_split_supported(priv))
560 kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
561 else if (priv->header_split_enabled)
562 kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
563 else
564 kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
565 }
566
gve_validate_req_ring_size(struct gve_priv * priv,u16 new_tx_desc_cnt,u16 new_rx_desc_cnt)567 static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt,
568 u16 new_rx_desc_cnt)
569 {
570 /* check for valid range */
571 if (new_tx_desc_cnt < priv->min_tx_desc_cnt ||
572 new_tx_desc_cnt > priv->max_tx_desc_cnt ||
573 new_rx_desc_cnt < priv->min_rx_desc_cnt ||
574 new_rx_desc_cnt > priv->max_rx_desc_cnt) {
575 dev_err(&priv->pdev->dev, "Requested descriptor count out of range\n");
576 return -EINVAL;
577 }
578
579 if (!is_power_of_2(new_tx_desc_cnt) || !is_power_of_2(new_rx_desc_cnt)) {
580 dev_err(&priv->pdev->dev, "Requested descriptor count has to be a power of 2\n");
581 return -EINVAL;
582 }
583 return 0;
584 }
585
gve_set_ring_sizes_config(struct gve_priv * priv,u16 new_tx_desc_cnt,u16 new_rx_desc_cnt,struct gve_tx_alloc_rings_cfg * tx_alloc_cfg,struct gve_rx_alloc_rings_cfg * rx_alloc_cfg)586 static int gve_set_ring_sizes_config(struct gve_priv *priv, u16 new_tx_desc_cnt,
587 u16 new_rx_desc_cnt,
588 struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
589 struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
590 {
591 if (new_tx_desc_cnt == priv->tx_desc_cnt &&
592 new_rx_desc_cnt == priv->rx_desc_cnt)
593 return 0;
594
595 if (!priv->modify_ring_size_enabled) {
596 dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n");
597 return -EOPNOTSUPP;
598 }
599
600 if (gve_validate_req_ring_size(priv, new_tx_desc_cnt, new_rx_desc_cnt))
601 return -EINVAL;
602
603 tx_alloc_cfg->ring_size = new_tx_desc_cnt;
604 rx_alloc_cfg->ring_size = new_rx_desc_cnt;
605 return 0;
606 }
607
gve_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * cmd,struct kernel_ethtool_ringparam * kernel_cmd,struct netlink_ext_ack * extack)608 static int gve_set_ringparam(struct net_device *netdev,
609 struct ethtool_ringparam *cmd,
610 struct kernel_ethtool_ringparam *kernel_cmd,
611 struct netlink_ext_ack *extack)
612 {
613 struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
614 struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
615 struct gve_priv *priv = netdev_priv(netdev);
616 int err;
617
618 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
619
620 err = gve_set_rx_buf_len_config(priv, kernel_cmd->rx_buf_len, extack,
621 &rx_alloc_cfg);
622 if (err)
623 return err;
624
625 err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split,
626 &rx_alloc_cfg);
627 if (err)
628 return err;
629
630 err = gve_set_ring_sizes_config(priv, cmd->tx_pending, cmd->rx_pending,
631 &tx_alloc_cfg, &rx_alloc_cfg);
632 if (err)
633 return err;
634
635 if (netif_running(priv->dev)) {
636 err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
637 if (err)
638 return err;
639 } else {
640 /* Set ring params for the next up */
641 priv->rx_cfg.packet_buffer_size =
642 rx_alloc_cfg.packet_buffer_size;
643 priv->header_split_enabled = rx_alloc_cfg.enable_header_split;
644 priv->tx_desc_cnt = tx_alloc_cfg.ring_size;
645 priv->rx_desc_cnt = rx_alloc_cfg.ring_size;
646 }
647 return 0;
648 }
649
gve_user_reset(struct net_device * netdev,u32 * flags)650 static int gve_user_reset(struct net_device *netdev, u32 *flags)
651 {
652 struct gve_priv *priv = netdev_priv(netdev);
653
654 if (*flags == ETH_RESET_ALL) {
655 *flags = 0;
656 return gve_reset(priv, true);
657 }
658
659 return -EOPNOTSUPP;
660 }
661
gve_get_tunable(struct net_device * netdev,const struct ethtool_tunable * etuna,void * value)662 static int gve_get_tunable(struct net_device *netdev,
663 const struct ethtool_tunable *etuna, void *value)
664 {
665 struct gve_priv *priv = netdev_priv(netdev);
666
667 switch (etuna->id) {
668 case ETHTOOL_RX_COPYBREAK:
669 *(u32 *)value = priv->rx_copybreak;
670 return 0;
671 default:
672 return -EOPNOTSUPP;
673 }
674 }
675
gve_set_tunable(struct net_device * netdev,const struct ethtool_tunable * etuna,const void * value)676 static int gve_set_tunable(struct net_device *netdev,
677 const struct ethtool_tunable *etuna,
678 const void *value)
679 {
680 struct gve_priv *priv = netdev_priv(netdev);
681 u32 len;
682
683 switch (etuna->id) {
684 case ETHTOOL_RX_COPYBREAK:
685 {
686 u32 max_copybreak = priv->rx_cfg.packet_buffer_size;
687
688 len = *(u32 *)value;
689 if (len > max_copybreak)
690 return -EINVAL;
691 priv->rx_copybreak = len;
692 return 0;
693 }
694 default:
695 return -EOPNOTSUPP;
696 }
697 }
698
gve_get_priv_flags(struct net_device * netdev)699 static u32 gve_get_priv_flags(struct net_device *netdev)
700 {
701 struct gve_priv *priv = netdev_priv(netdev);
702 u32 ret_flags = 0;
703
704 /* Only 1 flag exists currently: report-stats (BIT(0)), so set that flag. */
705 if (priv->ethtool_flags & BIT(0))
706 ret_flags |= BIT(0);
707 return ret_flags;
708 }
709
gve_set_priv_flags(struct net_device * netdev,u32 flags)710 static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
711 {
712 struct gve_priv *priv = netdev_priv(netdev);
713 u64 ori_flags, new_flags;
714 int num_tx_queues;
715
716 num_tx_queues = gve_num_tx_queues(priv);
717 ori_flags = READ_ONCE(priv->ethtool_flags);
718 new_flags = ori_flags;
719
720 /* Only one priv flag exists: report-stats (BIT(0))*/
721 if (flags & BIT(0))
722 new_flags |= BIT(0);
723 else
724 new_flags &= ~(BIT(0));
725 priv->ethtool_flags = new_flags;
726 /* start report-stats timer when user turns report stats on. */
727 if (flags & BIT(0)) {
728 mod_timer(&priv->stats_report_timer,
729 round_jiffies(jiffies +
730 msecs_to_jiffies(priv->stats_report_timer_period)));
731 }
732 /* Zero off gve stats when report-stats turned off and */
733 /* delete report stats timer. */
734 if (!(flags & BIT(0)) && (ori_flags & BIT(0))) {
735 int tx_stats_num = GVE_TX_STATS_REPORT_NUM *
736 num_tx_queues;
737 int rx_stats_num = GVE_RX_STATS_REPORT_NUM *
738 priv->rx_cfg.num_queues;
739
740 memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) *
741 sizeof(struct stats));
742 timer_delete_sync(&priv->stats_report_timer);
743 }
744 return 0;
745 }
746
gve_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)747 static int gve_get_link_ksettings(struct net_device *netdev,
748 struct ethtool_link_ksettings *cmd)
749 {
750 struct gve_priv *priv = netdev_priv(netdev);
751 int err = 0;
752
753 if (priv->link_speed == 0)
754 err = gve_adminq_report_link_speed(priv);
755
756 cmd->base.speed = priv->link_speed;
757
758 cmd->base.duplex = DUPLEX_FULL;
759
760 return err;
761 }
762
gve_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_ec,struct netlink_ext_ack * extack)763 static int gve_get_coalesce(struct net_device *netdev,
764 struct ethtool_coalesce *ec,
765 struct kernel_ethtool_coalesce *kernel_ec,
766 struct netlink_ext_ack *extack)
767 {
768 struct gve_priv *priv = netdev_priv(netdev);
769
770 if (gve_is_gqi(priv))
771 return -EOPNOTSUPP;
772 ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
773 ec->rx_coalesce_usecs = priv->rx_coalesce_usecs;
774
775 return 0;
776 }
777
gve_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_ec,struct netlink_ext_ack * extack)778 static int gve_set_coalesce(struct net_device *netdev,
779 struct ethtool_coalesce *ec,
780 struct kernel_ethtool_coalesce *kernel_ec,
781 struct netlink_ext_ack *extack)
782 {
783 struct gve_priv *priv = netdev_priv(netdev);
784 u32 tx_usecs_orig = priv->tx_coalesce_usecs;
785 u32 rx_usecs_orig = priv->rx_coalesce_usecs;
786 int idx;
787
788 if (gve_is_gqi(priv))
789 return -EOPNOTSUPP;
790
791 if (ec->tx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO ||
792 ec->rx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO)
793 return -EINVAL;
794 priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
795 priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
796
797 if (tx_usecs_orig != priv->tx_coalesce_usecs) {
798 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
799 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
800 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
801
802 gve_set_itr_coalesce_usecs_dqo(priv, block,
803 priv->tx_coalesce_usecs);
804 }
805 }
806
807 if (rx_usecs_orig != priv->rx_coalesce_usecs) {
808 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
809 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
810 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
811
812 gve_set_itr_coalesce_usecs_dqo(priv, block,
813 priv->rx_coalesce_usecs);
814 }
815 }
816
817 return 0;
818 }
819
gve_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd)820 static int gve_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
821 {
822 struct gve_priv *priv = netdev_priv(netdev);
823 int err = 0;
824
825 if (!(netdev->features & NETIF_F_NTUPLE))
826 return -EOPNOTSUPP;
827
828 switch (cmd->cmd) {
829 case ETHTOOL_SRXCLSRLINS:
830 err = gve_add_flow_rule(priv, cmd);
831 break;
832 case ETHTOOL_SRXCLSRLDEL:
833 err = gve_del_flow_rule(priv, cmd);
834 break;
835 default:
836 err = -EOPNOTSUPP;
837 break;
838 }
839
840 return err;
841 }
842
gve_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 * rule_locs)843 static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs)
844 {
845 struct gve_priv *priv = netdev_priv(netdev);
846 int err = 0;
847
848 switch (cmd->cmd) {
849 case ETHTOOL_GRXRINGS:
850 cmd->data = priv->rx_cfg.num_queues;
851 break;
852 case ETHTOOL_GRXCLSRLCNT:
853 if (!priv->max_flow_rules)
854 return -EOPNOTSUPP;
855
856 err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_STATS, 0);
857 if (err)
858 return err;
859
860 cmd->rule_cnt = priv->num_flow_rules;
861 cmd->data = priv->max_flow_rules;
862 break;
863 case ETHTOOL_GRXCLSRULE:
864 err = gve_get_flow_rule_entry(priv, cmd);
865 break;
866 case ETHTOOL_GRXCLSRLALL:
867 err = gve_get_flow_rule_ids(priv, cmd, (u32 *)rule_locs);
868 break;
869 default:
870 err = -EOPNOTSUPP;
871 break;
872 }
873
874 return err;
875 }
876
gve_get_rxfh_key_size(struct net_device * netdev)877 static u32 gve_get_rxfh_key_size(struct net_device *netdev)
878 {
879 struct gve_priv *priv = netdev_priv(netdev);
880
881 return priv->rss_key_size;
882 }
883
gve_get_rxfh_indir_size(struct net_device * netdev)884 static u32 gve_get_rxfh_indir_size(struct net_device *netdev)
885 {
886 struct gve_priv *priv = netdev_priv(netdev);
887
888 return priv->rss_lut_size;
889 }
890
gve_get_rss_config_cache(struct gve_priv * priv,struct ethtool_rxfh_param * rxfh)891 static void gve_get_rss_config_cache(struct gve_priv *priv,
892 struct ethtool_rxfh_param *rxfh)
893 {
894 struct gve_rss_config *rss_config = &priv->rss_config;
895
896 rxfh->hfunc = ETH_RSS_HASH_TOP;
897
898 if (rxfh->key) {
899 rxfh->key_size = priv->rss_key_size;
900 memcpy(rxfh->key, rss_config->hash_key, priv->rss_key_size);
901 }
902
903 if (rxfh->indir) {
904 rxfh->indir_size = priv->rss_lut_size;
905 memcpy(rxfh->indir, rss_config->hash_lut,
906 priv->rss_lut_size * sizeof(*rxfh->indir));
907 }
908 }
909
gve_get_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh)910 static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
911 {
912 struct gve_priv *priv = netdev_priv(netdev);
913
914 if (!priv->rss_key_size || !priv->rss_lut_size)
915 return -EOPNOTSUPP;
916
917 if (priv->cache_rss_config) {
918 gve_get_rss_config_cache(priv, rxfh);
919 return 0;
920 }
921
922 return gve_adminq_query_rss_config(priv, rxfh);
923 }
924
gve_set_rss_config_cache(struct gve_priv * priv,struct ethtool_rxfh_param * rxfh)925 static void gve_set_rss_config_cache(struct gve_priv *priv,
926 struct ethtool_rxfh_param *rxfh)
927 {
928 struct gve_rss_config *rss_config = &priv->rss_config;
929
930 if (rxfh->key)
931 memcpy(rss_config->hash_key, rxfh->key, priv->rss_key_size);
932
933 if (rxfh->indir)
934 memcpy(rss_config->hash_lut, rxfh->indir,
935 priv->rss_lut_size * sizeof(*rxfh->indir));
936 }
937
gve_set_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)938 static int gve_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
939 struct netlink_ext_ack *extack)
940 {
941 struct gve_priv *priv = netdev_priv(netdev);
942 int err;
943
944 if (!priv->rss_key_size || !priv->rss_lut_size)
945 return -EOPNOTSUPP;
946
947 err = gve_adminq_configure_rss(priv, rxfh);
948 if (err) {
949 NL_SET_ERR_MSG_MOD(extack, "Fail to configure RSS config");
950 return err;
951 }
952
953 if (priv->cache_rss_config)
954 gve_set_rss_config_cache(priv, rxfh);
955
956 return 0;
957 }
958
gve_get_ts_info(struct net_device * netdev,struct kernel_ethtool_ts_info * info)959 static int gve_get_ts_info(struct net_device *netdev,
960 struct kernel_ethtool_ts_info *info)
961 {
962 struct gve_priv *priv = netdev_priv(netdev);
963
964 ethtool_op_get_ts_info(netdev, info);
965
966 if (gve_is_clock_enabled(priv)) {
967 info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
968 SOF_TIMESTAMPING_RAW_HARDWARE;
969
970 info->rx_filters |= BIT(HWTSTAMP_FILTER_NONE) |
971 BIT(HWTSTAMP_FILTER_ALL);
972
973 if (priv->ptp)
974 info->phc_index = ptp_clock_index(priv->ptp->clock);
975 }
976
977 return 0;
978 }
979
980 const struct ethtool_ops gve_ethtool_ops = {
981 .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
982 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
983 ETHTOOL_RING_USE_RX_BUF_LEN,
984 .get_drvinfo = gve_get_drvinfo,
985 .get_strings = gve_get_strings,
986 .get_sset_count = gve_get_sset_count,
987 .get_ethtool_stats = gve_get_ethtool_stats,
988 .set_msglevel = gve_set_msglevel,
989 .get_msglevel = gve_get_msglevel,
990 .set_channels = gve_set_channels,
991 .get_channels = gve_get_channels,
992 .set_rxnfc = gve_set_rxnfc,
993 .get_rxnfc = gve_get_rxnfc,
994 .get_rxfh_indir_size = gve_get_rxfh_indir_size,
995 .get_rxfh_key_size = gve_get_rxfh_key_size,
996 .get_rxfh = gve_get_rxfh,
997 .set_rxfh = gve_set_rxfh,
998 .get_link = ethtool_op_get_link,
999 .get_coalesce = gve_get_coalesce,
1000 .set_coalesce = gve_set_coalesce,
1001 .get_ringparam = gve_get_ringparam,
1002 .set_ringparam = gve_set_ringparam,
1003 .reset = gve_user_reset,
1004 .get_tunable = gve_get_tunable,
1005 .set_tunable = gve_set_tunable,
1006 .get_priv_flags = gve_get_priv_flags,
1007 .set_priv_flags = gve_set_priv_flags,
1008 .get_link_ksettings = gve_get_link_ksettings,
1009 .get_ts_info = gve_get_ts_info,
1010 };
1011