xref: /linux/drivers/net/ethernet/google/gve/gve_ethtool.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2024 Google LLC
5  */
6 
7 #include <linux/rtnetlink.h>
8 #include "gve.h"
9 #include "gve_adminq.h"
10 #include "gve_dqo.h"
11 #include "gve_utils.h"
12 
13 static void gve_get_drvinfo(struct net_device *netdev,
14 			    struct ethtool_drvinfo *info)
15 {
16 	struct gve_priv *priv = netdev_priv(netdev);
17 
18 	strscpy(info->driver, gve_driver_name, sizeof(info->driver));
19 	strscpy(info->version, gve_version_str, sizeof(info->version));
20 	strscpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info));
21 }
22 
23 static void gve_set_msglevel(struct net_device *netdev, u32 value)
24 {
25 	struct gve_priv *priv = netdev_priv(netdev);
26 
27 	priv->msg_enable = value;
28 }
29 
30 static u32 gve_get_msglevel(struct net_device *netdev)
31 {
32 	struct gve_priv *priv = netdev_priv(netdev);
33 
34 	return priv->msg_enable;
35 }
36 
37 /* For the following stats column string names, make sure the order
38  * matches how it is filled in the code. For xdp_aborted, xdp_drop,
39  * xdp_pass, xdp_tx, xdp_redirect, make sure it also matches the order
40  * as declared in enum xdp_action inside file uapi/linux/bpf.h .
41  */
42 static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
43 	"rx_packets", "rx_hsplit_pkt", "tx_packets", "rx_bytes",
44 	"tx_bytes", "rx_dropped", "tx_dropped", "tx_timeouts",
45 	"rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
46 	"rx_hsplit_unsplit_pkt",
47 	"interface_up_cnt", "interface_down_cnt", "reset_cnt",
48 	"page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
49 };
50 
51 static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
52 	"rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]",
53 	"rx_bytes[%u]", "rx_hsplit_bytes[%u]", "rx_cont_packet_cnt[%u]",
54 	"rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]", "rx_frag_alloc_cnt[%u]",
55 	"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
56 	"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
57 	"rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
58 	"rx_xdp_aborted[%u]", "rx_xdp_drop[%u]", "rx_xdp_pass[%u]",
59 	"rx_xdp_tx[%u]", "rx_xdp_redirect[%u]",
60 	"rx_xdp_tx_errors[%u]", "rx_xdp_redirect_errors[%u]", "rx_xdp_alloc_fails[%u]",
61 };
62 
63 static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
64 	"tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
65 	"tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
66 	"tx_dma_mapping_error[%u]",
67 	"tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
68 };
69 
70 static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] __nonstring_array = {
71 	"adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts",
72 	"adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt",
73 	"adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt",
74 	"adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
75 	"adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
76 	"adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
77 	"adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt",
78 	"adminq_query_flow_rules", "adminq_cfg_flow_rule", "adminq_cfg_rss_cnt",
79 	"adminq_query_rss_cnt", "adminq_report_nic_timestamp_cnt",
80 };
81 
82 static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
83 	"report-stats",
84 };
85 
86 #define GVE_MAIN_STATS_LEN  ARRAY_SIZE(gve_gstrings_main_stats)
87 #define GVE_ADMINQ_STATS_LEN  ARRAY_SIZE(gve_gstrings_adminq_stats)
88 #define NUM_GVE_TX_CNTS	ARRAY_SIZE(gve_gstrings_tx_stats)
89 #define NUM_GVE_RX_CNTS	ARRAY_SIZE(gve_gstrings_rx_stats)
90 #define GVE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(gve_gstrings_priv_flags)
91 
92 static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
93 {
94 	struct gve_priv *priv = netdev_priv(netdev);
95 	u8 *s = (char *)data;
96 	int num_tx_queues;
97 	int i, j;
98 
99 	num_tx_queues = gve_num_tx_queues(priv);
100 	switch (stringset) {
101 	case ETH_SS_STATS:
102 		for (i = 0; i < ARRAY_SIZE(gve_gstrings_main_stats); i++)
103 			ethtool_puts(&s, gve_gstrings_main_stats[i]);
104 
105 		for (i = 0; i < priv->rx_cfg.num_queues; i++)
106 			for (j = 0; j < NUM_GVE_RX_CNTS; j++)
107 				ethtool_sprintf(&s, gve_gstrings_rx_stats[j],
108 						i);
109 
110 		for (i = 0; i < num_tx_queues; i++)
111 			for (j = 0; j < NUM_GVE_TX_CNTS; j++)
112 				ethtool_sprintf(&s, gve_gstrings_tx_stats[j],
113 						i);
114 
115 		for (i = 0; i < ARRAY_SIZE(gve_gstrings_adminq_stats); i++)
116 			ethtool_cpy(&s, gve_gstrings_adminq_stats[i]);
117 
118 		break;
119 
120 	case ETH_SS_PRIV_FLAGS:
121 		for (i = 0; i < ARRAY_SIZE(gve_gstrings_priv_flags); i++)
122 			ethtool_puts(&s, gve_gstrings_priv_flags[i]);
123 		break;
124 
125 	default:
126 		break;
127 	}
128 }
129 
130 static int gve_get_sset_count(struct net_device *netdev, int sset)
131 {
132 	struct gve_priv *priv = netdev_priv(netdev);
133 	int num_tx_queues;
134 
135 	num_tx_queues = gve_num_tx_queues(priv);
136 	switch (sset) {
137 	case ETH_SS_STATS:
138 		return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN +
139 		       (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) +
140 		       (num_tx_queues * NUM_GVE_TX_CNTS);
141 	case ETH_SS_PRIV_FLAGS:
142 		return GVE_PRIV_FLAGS_STR_LEN;
143 	default:
144 		return -EOPNOTSUPP;
145 	}
146 }
147 
148 static void
149 gve_get_ethtool_stats(struct net_device *netdev,
150 		      struct ethtool_stats *stats, u64 *data)
151 {
152 	u64 tmp_rx_pkts, tmp_rx_hsplit_pkt, tmp_rx_bytes, tmp_rx_hsplit_bytes,
153 		tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail,
154 		tmp_rx_desc_err_dropped_pkt, tmp_rx_hsplit_unsplit_pkt,
155 		tmp_tx_pkts, tmp_tx_bytes,
156 		tmp_xdp_tx_errors, tmp_xdp_redirect_errors;
157 	u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_hsplit_unsplit_pkt,
158 		rx_pkts, rx_hsplit_pkt, rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes,
159 		tx_dropped, xdp_tx_errors, xdp_redirect_errors;
160 	int rx_base_stats_idx, max_rx_stats_idx, max_tx_stats_idx;
161 	int stats_idx, stats_region_len, nic_stats_len;
162 	struct stats *report_stats;
163 	int *rx_qid_to_stats_idx;
164 	int *tx_qid_to_stats_idx;
165 	int num_stopped_rxqs = 0;
166 	int num_stopped_txqs = 0;
167 	struct gve_priv *priv;
168 	bool skip_nic_stats;
169 	unsigned int start;
170 	int num_tx_queues;
171 	int ring;
172 	int i, j;
173 
174 	ASSERT_RTNL();
175 
176 	priv = netdev_priv(netdev);
177 	num_tx_queues = gve_num_tx_queues(priv);
178 	report_stats = priv->stats_report->stats;
179 	rx_qid_to_stats_idx = kmalloc_objs(int, priv->rx_cfg.num_queues);
180 	if (!rx_qid_to_stats_idx)
181 		return;
182 	for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
183 		rx_qid_to_stats_idx[ring] = -1;
184 		if (!gve_rx_was_added_to_block(priv, ring))
185 			num_stopped_rxqs++;
186 	}
187 	tx_qid_to_stats_idx = kmalloc_objs(int, num_tx_queues);
188 	if (!tx_qid_to_stats_idx) {
189 		kfree(rx_qid_to_stats_idx);
190 		return;
191 	}
192 	for (ring = 0; ring < num_tx_queues; ring++) {
193 		tx_qid_to_stats_idx[ring] = -1;
194 		if (!gve_tx_was_added_to_block(priv, ring))
195 			num_stopped_txqs++;
196 	}
197 
198 	for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0,
199 	     rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0,
200 	     rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0,
201 	     xdp_tx_errors = 0, xdp_redirect_errors = 0,
202 	     ring = 0;
203 	     ring < priv->rx_cfg.num_queues; ring++) {
204 		if (priv->rx) {
205 			do {
206 				struct gve_rx_ring *rx = &priv->rx[ring];
207 
208 				start =
209 				  u64_stats_fetch_begin(&priv->rx[ring].statss);
210 				tmp_rx_pkts = rx->rpackets;
211 				tmp_rx_hsplit_pkt = rx->rx_hsplit_pkt;
212 				tmp_rx_bytes = rx->rbytes;
213 				tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
214 				tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
215 				tmp_rx_desc_err_dropped_pkt =
216 					rx->rx_desc_err_dropped_pkt;
217 				tmp_rx_hsplit_unsplit_pkt =
218 					rx->rx_hsplit_unsplit_pkt;
219 				tmp_xdp_tx_errors = rx->xdp_tx_errors;
220 				tmp_xdp_redirect_errors =
221 					rx->xdp_redirect_errors;
222 			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
223 						       start));
224 			rx_pkts += tmp_rx_pkts;
225 			rx_hsplit_pkt += tmp_rx_hsplit_pkt;
226 			rx_bytes += tmp_rx_bytes;
227 			rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
228 			rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
229 			rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
230 			rx_hsplit_unsplit_pkt += tmp_rx_hsplit_unsplit_pkt;
231 			xdp_tx_errors += tmp_xdp_tx_errors;
232 			xdp_redirect_errors += tmp_xdp_redirect_errors;
233 		}
234 	}
235 	for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
236 	     ring < num_tx_queues; ring++) {
237 		if (priv->tx) {
238 			do {
239 				start =
240 				  u64_stats_fetch_begin(&priv->tx[ring].statss);
241 				tmp_tx_pkts = priv->tx[ring].pkt_done;
242 				tmp_tx_bytes = priv->tx[ring].bytes_done;
243 			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
244 						       start));
245 			tx_pkts += tmp_tx_pkts;
246 			tx_bytes += tmp_tx_bytes;
247 			tx_dropped += priv->tx[ring].dropped_pkt;
248 		}
249 	}
250 
251 	i = 0;
252 	data[i++] = rx_pkts;
253 	data[i++] = rx_hsplit_pkt;
254 	data[i++] = tx_pkts;
255 	data[i++] = rx_bytes;
256 	data[i++] = tx_bytes;
257 	/* total rx dropped packets */
258 	data[i++] = rx_skb_alloc_fail + rx_desc_err_dropped_pkt +
259 		    xdp_tx_errors + xdp_redirect_errors;
260 	data[i++] = tx_dropped;
261 	data[i++] = priv->tx_timeo_cnt;
262 	data[i++] = rx_skb_alloc_fail;
263 	data[i++] = rx_buf_alloc_fail;
264 	data[i++] = rx_desc_err_dropped_pkt;
265 	data[i++] = rx_hsplit_unsplit_pkt;
266 	data[i++] = priv->interface_up_cnt;
267 	data[i++] = priv->interface_down_cnt;
268 	data[i++] = priv->reset_cnt;
269 	data[i++] = priv->page_alloc_fail;
270 	data[i++] = priv->dma_mapping_error;
271 	data[i++] = priv->stats_report_trigger_cnt;
272 	i = GVE_MAIN_STATS_LEN;
273 
274 	rx_base_stats_idx = 0;
275 	max_rx_stats_idx = 0;
276 	max_tx_stats_idx = 0;
277 	stats_region_len = priv->stats_report_len -
278 				sizeof(struct gve_stats_report);
279 	nic_stats_len = (NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
280 		NIC_TX_STATS_REPORT_NUM * num_tx_queues) * sizeof(struct stats);
281 	if (unlikely((stats_region_len -
282 				nic_stats_len) % sizeof(struct stats))) {
283 		net_err_ratelimited("Starting index of NIC stats should be multiple of stats size");
284 	} else {
285 		/* For rx cross-reporting stats,
286 		 * start from nic rx stats in report
287 		 */
288 		rx_base_stats_idx = (stats_region_len - nic_stats_len) /
289 							sizeof(struct stats);
290 		/* The boundary between driver stats and NIC stats
291 		 * shifts if there are stopped queues
292 		 */
293 		rx_base_stats_idx += NIC_RX_STATS_REPORT_NUM *
294 			num_stopped_rxqs + NIC_TX_STATS_REPORT_NUM *
295 			num_stopped_txqs;
296 		max_rx_stats_idx = NIC_RX_STATS_REPORT_NUM *
297 			(priv->rx_cfg.num_queues - num_stopped_rxqs) +
298 			rx_base_stats_idx;
299 		max_tx_stats_idx = NIC_TX_STATS_REPORT_NUM *
300 			(num_tx_queues - num_stopped_txqs) +
301 			max_rx_stats_idx;
302 	}
303 	/* Preprocess the stats report for rx, map queue id to start index */
304 	skip_nic_stats = false;
305 	for (stats_idx = rx_base_stats_idx; stats_idx < max_rx_stats_idx;
306 		stats_idx += NIC_RX_STATS_REPORT_NUM) {
307 		u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
308 		u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
309 
310 		if (stat_name == 0) {
311 			/* no stats written by NIC yet */
312 			skip_nic_stats = true;
313 			break;
314 		}
315 		if (queue_id < 0 || queue_id >= priv->rx_cfg.num_queues) {
316 			net_err_ratelimited("Invalid rxq id in NIC stats\n");
317 			continue;
318 		}
319 		rx_qid_to_stats_idx[queue_id] = stats_idx;
320 	}
321 	/* walk RX rings */
322 	if (priv->rx) {
323 		for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
324 			struct gve_rx_ring *rx = &priv->rx[ring];
325 
326 			data[i++] = rx->fill_cnt;
327 			data[i++] = rx->cnt;
328 			data[i++] = rx->fill_cnt - rx->cnt;
329 			do {
330 				start =
331 				  u64_stats_fetch_begin(&priv->rx[ring].statss);
332 				tmp_rx_bytes = rx->rbytes;
333 				tmp_rx_hsplit_bytes = rx->rx_hsplit_bytes;
334 				tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
335 				tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
336 				tmp_rx_desc_err_dropped_pkt =
337 					rx->rx_desc_err_dropped_pkt;
338 				tmp_xdp_tx_errors = rx->xdp_tx_errors;
339 				tmp_xdp_redirect_errors =
340 					rx->xdp_redirect_errors;
341 			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
342 						       start));
343 			data[i++] = tmp_rx_bytes;
344 			data[i++] = tmp_rx_hsplit_bytes;
345 			data[i++] = rx->rx_cont_packet_cnt;
346 			data[i++] = rx->rx_frag_flip_cnt;
347 			data[i++] = rx->rx_frag_copy_cnt;
348 			data[i++] = rx->rx_frag_alloc_cnt;
349 			/* rx dropped packets */
350 			data[i++] = tmp_rx_skb_alloc_fail +
351 				    tmp_rx_desc_err_dropped_pkt +
352 				    tmp_xdp_tx_errors +
353 				    tmp_xdp_redirect_errors;
354 			data[i++] = rx->rx_copybreak_pkt;
355 			data[i++] = rx->rx_copied_pkt;
356 			/* stats from NIC */
357 			stats_idx = rx_qid_to_stats_idx[ring];
358 			if (skip_nic_stats || stats_idx < 0) {
359 				/* skip NIC rx stats */
360 				i += NIC_RX_STATS_REPORT_NUM;
361 			} else {
362 				for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
363 					u64 value =
364 						be64_to_cpu(report_stats[stats_idx + j].value);
365 
366 					data[i++] = value;
367 				}
368 			}
369 			/* XDP rx counters */
370 			do {
371 				start =	u64_stats_fetch_begin(&priv->rx[ring].statss);
372 				for (j = 0; j < GVE_XDP_ACTIONS; j++)
373 					data[i + j] = rx->xdp_actions[j];
374 				data[i + j++] = rx->xdp_tx_errors;
375 				data[i + j++] = rx->xdp_redirect_errors;
376 				data[i + j++] = rx->xdp_alloc_fails;
377 			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
378 						       start));
379 			i += GVE_XDP_ACTIONS + 3; /* XDP rx counters */
380 		}
381 	} else {
382 		i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
383 	}
384 
385 	skip_nic_stats = false;
386 	/* NIC TX stats start right after NIC RX stats */
387 	for (stats_idx = max_rx_stats_idx; stats_idx < max_tx_stats_idx;
388 		stats_idx += NIC_TX_STATS_REPORT_NUM) {
389 		u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
390 		u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
391 
392 		if (stat_name == 0) {
393 			/* no stats written by NIC yet */
394 			skip_nic_stats = true;
395 			break;
396 		}
397 		if (queue_id < 0 || queue_id >= num_tx_queues) {
398 			net_err_ratelimited("Invalid txq id in NIC stats\n");
399 			continue;
400 		}
401 		tx_qid_to_stats_idx[queue_id] = stats_idx;
402 	}
403 	/* walk TX rings */
404 	if (priv->tx) {
405 		for (ring = 0; ring < num_tx_queues; ring++) {
406 			struct gve_tx_ring *tx = &priv->tx[ring];
407 
408 			if (gve_is_gqi(priv)) {
409 				data[i++] = tx->req;
410 				data[i++] = tx->done;
411 				data[i++] = tx->req - tx->done;
412 			} else {
413 				/* DQO doesn't currently support
414 				 * posted/completed descriptor counts;
415 				 */
416 				data[i++] = 0;
417 				data[i++] = 0;
418 				data[i++] =
419 					(tx->dqo_tx.tail - tx->dqo_tx.head) &
420 					tx->mask;
421 			}
422 			do {
423 				start =
424 				  u64_stats_fetch_begin(&priv->tx[ring].statss);
425 				tmp_tx_bytes = tx->bytes_done;
426 			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
427 						       start));
428 			data[i++] = tmp_tx_bytes;
429 			data[i++] = tx->wake_queue;
430 			data[i++] = tx->stop_queue;
431 			data[i++] = gve_tx_load_event_counter(priv, tx);
432 			data[i++] = tx->dma_mapping_error;
433 			/* stats from NIC */
434 			stats_idx = tx_qid_to_stats_idx[ring];
435 			if (skip_nic_stats || stats_idx < 0) {
436 				/* skip NIC tx stats */
437 				i += NIC_TX_STATS_REPORT_NUM;
438 			} else {
439 				for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
440 					u64 value =
441 						be64_to_cpu(report_stats[stats_idx + j].value);
442 					data[i++] = value;
443 				}
444 			}
445 			/* XDP counters */
446 			do {
447 				start = u64_stats_fetch_begin(&priv->tx[ring].statss);
448 				data[i] = tx->xdp_xsk_sent;
449 				data[i + 1] = tx->xdp_xmit;
450 				data[i + 2] = tx->xdp_xmit_errors;
451 			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
452 						       start));
453 			i += 3; /* XDP tx counters */
454 		}
455 	} else {
456 		i += num_tx_queues * NUM_GVE_TX_CNTS;
457 	}
458 
459 	kfree(rx_qid_to_stats_idx);
460 	kfree(tx_qid_to_stats_idx);
461 	/* AQ Stats */
462 	data[i++] = priv->adminq_prod_cnt;
463 	data[i++] = priv->adminq_cmd_fail;
464 	data[i++] = priv->adminq_timeouts;
465 	data[i++] = priv->adminq_describe_device_cnt;
466 	data[i++] = priv->adminq_cfg_device_resources_cnt;
467 	data[i++] = priv->adminq_register_page_list_cnt;
468 	data[i++] = priv->adminq_unregister_page_list_cnt;
469 	data[i++] = priv->adminq_create_tx_queue_cnt;
470 	data[i++] = priv->adminq_create_rx_queue_cnt;
471 	data[i++] = priv->adminq_destroy_tx_queue_cnt;
472 	data[i++] = priv->adminq_destroy_rx_queue_cnt;
473 	data[i++] = priv->adminq_dcfg_device_resources_cnt;
474 	data[i++] = priv->adminq_set_driver_parameter_cnt;
475 	data[i++] = priv->adminq_report_stats_cnt;
476 	data[i++] = priv->adminq_report_link_speed_cnt;
477 	data[i++] = priv->adminq_get_ptype_map_cnt;
478 	data[i++] = priv->adminq_query_flow_rules_cnt;
479 	data[i++] = priv->adminq_cfg_flow_rule_cnt;
480 	data[i++] = priv->adminq_cfg_rss_cnt;
481 	data[i++] = priv->adminq_query_rss_cnt;
482 	data[i++] = priv->adminq_report_nic_timestamp_cnt;
483 }
484 
485 static void gve_get_channels(struct net_device *netdev,
486 			     struct ethtool_channels *cmd)
487 {
488 	struct gve_priv *priv = netdev_priv(netdev);
489 
490 	cmd->max_rx = priv->rx_cfg.max_queues;
491 	cmd->max_tx = priv->tx_cfg.max_queues;
492 	cmd->max_other = 0;
493 	cmd->max_combined = 0;
494 	cmd->rx_count = priv->rx_cfg.num_queues;
495 	cmd->tx_count = priv->tx_cfg.num_queues;
496 	cmd->other_count = 0;
497 	cmd->combined_count = 0;
498 }
499 
500 static int gve_set_channels(struct net_device *netdev,
501 			    struct ethtool_channels *cmd)
502 {
503 	struct gve_priv *priv = netdev_priv(netdev);
504 	struct gve_tx_queue_config new_tx_cfg = priv->tx_cfg;
505 	struct gve_rx_queue_config new_rx_cfg = priv->rx_cfg;
506 	struct ethtool_channels old_settings;
507 	int new_tx = cmd->tx_count;
508 	int new_rx = cmd->rx_count;
509 	bool reset_rss = false;
510 
511 	gve_get_channels(netdev, &old_settings);
512 
513 	/* Changing combined is not allowed */
514 	if (cmd->combined_count != old_settings.combined_count)
515 		return -EINVAL;
516 
517 	if (!new_rx || !new_tx)
518 		return -EINVAL;
519 
520 	if (priv->xdp_prog) {
521 		if (new_tx != new_rx ||
522 		    (2 * new_tx > priv->tx_cfg.max_queues)) {
523 			dev_err(&priv->pdev->dev, "The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues when XDP program is installed");
524 			return -EINVAL;
525 		}
526 
527 		/* One XDP TX queue per RX queue. */
528 		new_tx_cfg.num_xdp_queues = new_rx;
529 	} else {
530 		new_tx_cfg.num_xdp_queues = 0;
531 	}
532 
533 	if (new_rx != priv->rx_cfg.num_queues &&
534 	    priv->cache_rss_config && !netif_is_rxfh_configured(netdev))
535 		reset_rss = true;
536 
537 	new_tx_cfg.num_queues = new_tx;
538 	new_rx_cfg.num_queues = new_rx;
539 
540 	return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg, reset_rss);
541 }
542 
543 static void gve_get_ringparam(struct net_device *netdev,
544 			      struct ethtool_ringparam *cmd,
545 			      struct kernel_ethtool_ringparam *kernel_cmd,
546 			      struct netlink_ext_ack *extack)
547 {
548 	struct gve_priv *priv = netdev_priv(netdev);
549 
550 	cmd->rx_max_pending = priv->max_rx_desc_cnt;
551 	cmd->tx_max_pending = priv->max_tx_desc_cnt;
552 	cmd->rx_pending = priv->rx_desc_cnt;
553 	cmd->tx_pending = priv->tx_desc_cnt;
554 
555 	kernel_cmd->rx_buf_len = priv->rx_cfg.packet_buffer_size;
556 
557 	if (!gve_header_split_supported(priv))
558 		kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
559 	else if (priv->header_split_enabled)
560 		kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
561 	else
562 		kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
563 }
564 
565 static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt,
566 				      u16 new_rx_desc_cnt)
567 {
568 	/* check for valid range */
569 	if (new_tx_desc_cnt < priv->min_tx_desc_cnt ||
570 	    new_tx_desc_cnt > priv->max_tx_desc_cnt ||
571 	    new_rx_desc_cnt < priv->min_rx_desc_cnt ||
572 	    new_rx_desc_cnt > priv->max_rx_desc_cnt) {
573 		dev_err(&priv->pdev->dev, "Requested descriptor count out of range\n");
574 		return -EINVAL;
575 	}
576 
577 	if (!is_power_of_2(new_tx_desc_cnt) || !is_power_of_2(new_rx_desc_cnt)) {
578 		dev_err(&priv->pdev->dev, "Requested descriptor count has to be a power of 2\n");
579 		return -EINVAL;
580 	}
581 	return 0;
582 }
583 
584 static int gve_set_ring_sizes_config(struct gve_priv *priv, u16 new_tx_desc_cnt,
585 				     u16 new_rx_desc_cnt,
586 				     struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
587 				     struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
588 {
589 	if (new_tx_desc_cnt == priv->tx_desc_cnt &&
590 	    new_rx_desc_cnt == priv->rx_desc_cnt)
591 		return 0;
592 
593 	if (!priv->modify_ring_size_enabled) {
594 		dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n");
595 		return -EOPNOTSUPP;
596 	}
597 
598 	if (gve_validate_req_ring_size(priv, new_tx_desc_cnt, new_rx_desc_cnt))
599 		return -EINVAL;
600 
601 	tx_alloc_cfg->ring_size = new_tx_desc_cnt;
602 	rx_alloc_cfg->ring_size = new_rx_desc_cnt;
603 	return 0;
604 }
605 
606 static int gve_set_ringparam(struct net_device *netdev,
607 			     struct ethtool_ringparam *cmd,
608 			     struct kernel_ethtool_ringparam *kernel_cmd,
609 			     struct netlink_ext_ack *extack)
610 {
611 	struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
612 	struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
613 	struct gve_priv *priv = netdev_priv(netdev);
614 	int err;
615 
616 	gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
617 
618 	err = gve_set_rx_buf_len_config(priv, kernel_cmd->rx_buf_len, extack,
619 					&rx_alloc_cfg);
620 	if (err)
621 		return err;
622 
623 	err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split,
624 				    &rx_alloc_cfg);
625 	if (err)
626 		return err;
627 
628 	err = gve_set_ring_sizes_config(priv, cmd->tx_pending, cmd->rx_pending,
629 					&tx_alloc_cfg, &rx_alloc_cfg);
630 	if (err)
631 		return err;
632 
633 	if (netif_running(priv->dev)) {
634 		err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
635 		if (err)
636 			return err;
637 	} else {
638 		/* Set ring params for the next up */
639 		priv->rx_cfg.packet_buffer_size =
640 			rx_alloc_cfg.packet_buffer_size;
641 		priv->header_split_enabled = rx_alloc_cfg.enable_header_split;
642 		priv->tx_desc_cnt = tx_alloc_cfg.ring_size;
643 		priv->rx_desc_cnt = rx_alloc_cfg.ring_size;
644 	}
645 	return 0;
646 }
647 
648 static int gve_user_reset(struct net_device *netdev, u32 *flags)
649 {
650 	struct gve_priv *priv = netdev_priv(netdev);
651 
652 	if (*flags == ETH_RESET_ALL) {
653 		*flags = 0;
654 		return gve_reset(priv, true);
655 	}
656 
657 	return -EOPNOTSUPP;
658 }
659 
660 static int gve_get_tunable(struct net_device *netdev,
661 			   const struct ethtool_tunable *etuna, void *value)
662 {
663 	struct gve_priv *priv = netdev_priv(netdev);
664 
665 	switch (etuna->id) {
666 	case ETHTOOL_RX_COPYBREAK:
667 		*(u32 *)value = priv->rx_copybreak;
668 		return 0;
669 	default:
670 		return -EOPNOTSUPP;
671 	}
672 }
673 
674 static int gve_set_tunable(struct net_device *netdev,
675 			   const struct ethtool_tunable *etuna,
676 			   const void *value)
677 {
678 	struct gve_priv *priv = netdev_priv(netdev);
679 	u32 len;
680 
681 	switch (etuna->id) {
682 	case ETHTOOL_RX_COPYBREAK:
683 	{
684 		u32 max_copybreak = priv->rx_cfg.packet_buffer_size;
685 
686 		len = *(u32 *)value;
687 		if (len > max_copybreak)
688 			return -EINVAL;
689 		priv->rx_copybreak = len;
690 		return 0;
691 	}
692 	default:
693 		return -EOPNOTSUPP;
694 	}
695 }
696 
697 static u32 gve_get_priv_flags(struct net_device *netdev)
698 {
699 	struct gve_priv *priv = netdev_priv(netdev);
700 	u32 ret_flags = 0;
701 
702 	/* Only 1 flag exists currently: report-stats (BIT(0)), so set that flag. */
703 	if (priv->ethtool_flags & BIT(0))
704 		ret_flags |= BIT(0);
705 	return ret_flags;
706 }
707 
708 static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
709 {
710 	struct gve_priv *priv = netdev_priv(netdev);
711 	u64 ori_flags, new_flags;
712 	int num_tx_queues;
713 
714 	num_tx_queues = gve_num_tx_queues(priv);
715 	ori_flags = READ_ONCE(priv->ethtool_flags);
716 	new_flags = ori_flags;
717 
718 	/* Only one priv flag exists: report-stats (BIT(0))*/
719 	if (flags & BIT(0))
720 		new_flags |= BIT(0);
721 	else
722 		new_flags &= ~(BIT(0));
723 	priv->ethtool_flags = new_flags;
724 	/* start report-stats timer when user turns report stats on. */
725 	if (flags & BIT(0)) {
726 		mod_timer(&priv->stats_report_timer,
727 			  round_jiffies(jiffies +
728 					msecs_to_jiffies(priv->stats_report_timer_period)));
729 	}
730 	/* Zero off gve stats when report-stats turned off and */
731 	/* delete report stats timer. */
732 	if (!(flags & BIT(0)) && (ori_flags & BIT(0))) {
733 		int tx_stats_num = GVE_TX_STATS_REPORT_NUM *
734 			num_tx_queues;
735 		int rx_stats_num = GVE_RX_STATS_REPORT_NUM *
736 			priv->rx_cfg.num_queues;
737 
738 		memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) *
739 				   sizeof(struct stats));
740 		timer_delete_sync(&priv->stats_report_timer);
741 	}
742 	return 0;
743 }
744 
745 static int gve_get_link_ksettings(struct net_device *netdev,
746 				  struct ethtool_link_ksettings *cmd)
747 {
748 	struct gve_priv *priv = netdev_priv(netdev);
749 	int err = 0;
750 
751 	if (priv->link_speed == 0)
752 		err = gve_adminq_report_link_speed(priv);
753 
754 	cmd->base.speed = priv->link_speed;
755 
756 	cmd->base.duplex = DUPLEX_FULL;
757 
758 	return err;
759 }
760 
761 static int gve_get_coalesce(struct net_device *netdev,
762 			    struct ethtool_coalesce *ec,
763 			    struct kernel_ethtool_coalesce *kernel_ec,
764 			    struct netlink_ext_ack *extack)
765 {
766 	struct gve_priv *priv = netdev_priv(netdev);
767 
768 	if (gve_is_gqi(priv))
769 		return -EOPNOTSUPP;
770 	ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
771 	ec->rx_coalesce_usecs = priv->rx_coalesce_usecs;
772 
773 	return 0;
774 }
775 
776 static int gve_set_coalesce(struct net_device *netdev,
777 			    struct ethtool_coalesce *ec,
778 			    struct kernel_ethtool_coalesce *kernel_ec,
779 			    struct netlink_ext_ack *extack)
780 {
781 	struct gve_priv *priv = netdev_priv(netdev);
782 	u32 tx_usecs_orig = priv->tx_coalesce_usecs;
783 	u32 rx_usecs_orig = priv->rx_coalesce_usecs;
784 	int idx;
785 
786 	if (gve_is_gqi(priv))
787 		return -EOPNOTSUPP;
788 
789 	if (ec->tx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO ||
790 	    ec->rx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO)
791 		return -EINVAL;
792 	priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
793 	priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
794 
795 	if (tx_usecs_orig != priv->tx_coalesce_usecs) {
796 		for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
797 			int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
798 			struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
799 
800 			gve_set_itr_coalesce_usecs_dqo(priv, block,
801 						       priv->tx_coalesce_usecs);
802 		}
803 	}
804 
805 	if (rx_usecs_orig != priv->rx_coalesce_usecs) {
806 		for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
807 			int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
808 			struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
809 
810 			gve_set_itr_coalesce_usecs_dqo(priv, block,
811 						       priv->rx_coalesce_usecs);
812 		}
813 	}
814 
815 	return 0;
816 }
817 
818 static int gve_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
819 {
820 	struct gve_priv *priv = netdev_priv(netdev);
821 	int err = 0;
822 
823 	if (!(netdev->features & NETIF_F_NTUPLE))
824 		return -EOPNOTSUPP;
825 
826 	switch (cmd->cmd) {
827 	case ETHTOOL_SRXCLSRLINS:
828 		err = gve_add_flow_rule(priv, cmd);
829 		break;
830 	case ETHTOOL_SRXCLSRLDEL:
831 		err = gve_del_flow_rule(priv, cmd);
832 		break;
833 	default:
834 		err = -EOPNOTSUPP;
835 		break;
836 	}
837 
838 	return err;
839 }
840 
841 static u32 gve_get_rx_ring_count(struct net_device *netdev)
842 {
843 	struct gve_priv *priv = netdev_priv(netdev);
844 
845 	return priv->rx_cfg.num_queues;
846 }
847 
848 static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs)
849 {
850 	struct gve_priv *priv = netdev_priv(netdev);
851 	int err = 0;
852 
853 	switch (cmd->cmd) {
854 	case ETHTOOL_GRXCLSRLCNT:
855 		if (!priv->max_flow_rules)
856 			return -EOPNOTSUPP;
857 
858 		err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_STATS, 0);
859 		if (err)
860 			return err;
861 
862 		cmd->rule_cnt = priv->num_flow_rules;
863 		cmd->data = priv->max_flow_rules;
864 		break;
865 	case ETHTOOL_GRXCLSRULE:
866 		err = gve_get_flow_rule_entry(priv, cmd);
867 		break;
868 	case ETHTOOL_GRXCLSRLALL:
869 		err = gve_get_flow_rule_ids(priv, cmd, (u32 *)rule_locs);
870 		break;
871 	default:
872 		err = -EOPNOTSUPP;
873 		break;
874 	}
875 
876 	return err;
877 }
878 
879 static u32 gve_get_rxfh_key_size(struct net_device *netdev)
880 {
881 	struct gve_priv *priv = netdev_priv(netdev);
882 
883 	return priv->rss_key_size;
884 }
885 
886 static u32 gve_get_rxfh_indir_size(struct net_device *netdev)
887 {
888 	struct gve_priv *priv = netdev_priv(netdev);
889 
890 	return priv->rss_lut_size;
891 }
892 
893 static void gve_get_rss_config_cache(struct gve_priv *priv,
894 				     struct ethtool_rxfh_param *rxfh)
895 {
896 	struct gve_rss_config *rss_config = &priv->rss_config;
897 
898 	rxfh->hfunc = ETH_RSS_HASH_TOP;
899 
900 	if (rxfh->key) {
901 		rxfh->key_size = priv->rss_key_size;
902 		memcpy(rxfh->key, rss_config->hash_key, priv->rss_key_size);
903 	}
904 
905 	if (rxfh->indir) {
906 		rxfh->indir_size = priv->rss_lut_size;
907 		memcpy(rxfh->indir, rss_config->hash_lut,
908 		       priv->rss_lut_size * sizeof(*rxfh->indir));
909 	}
910 }
911 
912 static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
913 {
914 	struct gve_priv *priv = netdev_priv(netdev);
915 
916 	if (!priv->rss_key_size || !priv->rss_lut_size)
917 		return -EOPNOTSUPP;
918 
919 	if (priv->cache_rss_config) {
920 		gve_get_rss_config_cache(priv, rxfh);
921 		return 0;
922 	}
923 
924 	return gve_adminq_query_rss_config(priv, rxfh);
925 }
926 
927 static void gve_set_rss_config_cache(struct gve_priv *priv,
928 				     struct ethtool_rxfh_param *rxfh)
929 {
930 	struct gve_rss_config *rss_config = &priv->rss_config;
931 
932 	if (rxfh->key)
933 		memcpy(rss_config->hash_key, rxfh->key, priv->rss_key_size);
934 
935 	if (rxfh->indir)
936 		memcpy(rss_config->hash_lut, rxfh->indir,
937 		       priv->rss_lut_size * sizeof(*rxfh->indir));
938 }
939 
940 static int gve_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
941 			struct netlink_ext_ack *extack)
942 {
943 	struct gve_priv *priv = netdev_priv(netdev);
944 	int err;
945 
946 	if (!priv->rss_key_size || !priv->rss_lut_size)
947 		return -EOPNOTSUPP;
948 
949 	err = gve_adminq_configure_rss(priv, rxfh);
950 	if (err) {
951 		NL_SET_ERR_MSG_MOD(extack, "Fail to configure RSS config");
952 		return err;
953 	}
954 
955 	if (priv->cache_rss_config)
956 		gve_set_rss_config_cache(priv, rxfh);
957 
958 	return 0;
959 }
960 
961 static int gve_get_ts_info(struct net_device *netdev,
962 			   struct kernel_ethtool_ts_info *info)
963 {
964 	struct gve_priv *priv = netdev_priv(netdev);
965 
966 	ethtool_op_get_ts_info(netdev, info);
967 
968 	if (gve_is_clock_enabled(priv)) {
969 		info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
970 					 SOF_TIMESTAMPING_RAW_HARDWARE;
971 
972 		info->rx_filters |= BIT(HWTSTAMP_FILTER_NONE) |
973 				    BIT(HWTSTAMP_FILTER_ALL);
974 
975 		if (priv->ptp)
976 			info->phc_index = ptp_clock_index(priv->ptp->clock);
977 	}
978 
979 	return 0;
980 }
981 
982 const struct ethtool_ops gve_ethtool_ops = {
983 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
984 	.supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
985 				 ETHTOOL_RING_USE_RX_BUF_LEN,
986 	.get_drvinfo = gve_get_drvinfo,
987 	.get_strings = gve_get_strings,
988 	.get_sset_count = gve_get_sset_count,
989 	.get_ethtool_stats = gve_get_ethtool_stats,
990 	.set_msglevel = gve_set_msglevel,
991 	.get_msglevel = gve_get_msglevel,
992 	.set_channels = gve_set_channels,
993 	.get_channels = gve_get_channels,
994 	.set_rxnfc = gve_set_rxnfc,
995 	.get_rxnfc = gve_get_rxnfc,
996 	.get_rx_ring_count = gve_get_rx_ring_count,
997 	.get_rxfh_indir_size = gve_get_rxfh_indir_size,
998 	.get_rxfh_key_size = gve_get_rxfh_key_size,
999 	.get_rxfh = gve_get_rxfh,
1000 	.set_rxfh = gve_set_rxfh,
1001 	.get_link = ethtool_op_get_link,
1002 	.get_coalesce = gve_get_coalesce,
1003 	.set_coalesce = gve_set_coalesce,
1004 	.get_ringparam = gve_get_ringparam,
1005 	.set_ringparam = gve_set_ringparam,
1006 	.reset = gve_user_reset,
1007 	.get_tunable = gve_get_tunable,
1008 	.set_tunable = gve_set_tunable,
1009 	.get_priv_flags = gve_get_priv_flags,
1010 	.set_priv_flags = gve_set_priv_flags,
1011 	.get_link_ksettings = gve_get_link_ksettings,
1012 	.get_ts_info = gve_get_ts_info,
1013 };
1014