xref: /linux/drivers/net/ethernet/google/gve/gve_ethtool.c (revision ee8287e068a3995b0f8001dd6931e221dfb7c530)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6 
7 #include <linux/rtnetlink.h>
8 #include "gve.h"
9 #include "gve_adminq.h"
10 #include "gve_dqo.h"
11 #include "gve_utils.h"
12 
13 static void gve_get_drvinfo(struct net_device *netdev,
14 			    struct ethtool_drvinfo *info)
15 {
16 	struct gve_priv *priv = netdev_priv(netdev);
17 
18 	strscpy(info->driver, gve_driver_name, sizeof(info->driver));
19 	strscpy(info->version, gve_version_str, sizeof(info->version));
20 	strscpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info));
21 }
22 
23 static void gve_set_msglevel(struct net_device *netdev, u32 value)
24 {
25 	struct gve_priv *priv = netdev_priv(netdev);
26 
27 	priv->msg_enable = value;
28 }
29 
30 static u32 gve_get_msglevel(struct net_device *netdev)
31 {
32 	struct gve_priv *priv = netdev_priv(netdev);
33 
34 	return priv->msg_enable;
35 }
36 
37 /* For the following stats column string names, make sure the order
38  * matches how it is filled in the code. For xdp_aborted, xdp_drop,
39  * xdp_pass, xdp_tx, xdp_redirect, make sure it also matches the order
40  * as declared in enum xdp_action inside file uapi/linux/bpf.h .
41  */
42 static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
43 	"rx_packets", "rx_hsplit_pkt", "tx_packets", "rx_bytes",
44 	"tx_bytes", "rx_dropped", "tx_dropped", "tx_timeouts",
45 	"rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
46 	"rx_hsplit_unsplit_pkt",
47 	"interface_up_cnt", "interface_down_cnt", "reset_cnt",
48 	"page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
49 };
50 
51 static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
52 	"rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]",
53 	"rx_bytes[%u]", "rx_hsplit_bytes[%u]", "rx_cont_packet_cnt[%u]",
54 	"rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]", "rx_frag_alloc_cnt[%u]",
55 	"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
56 	"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
57 	"rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
58 	"rx_xdp_aborted[%u]", "rx_xdp_drop[%u]", "rx_xdp_pass[%u]",
59 	"rx_xdp_tx[%u]", "rx_xdp_redirect[%u]",
60 	"rx_xdp_tx_errors[%u]", "rx_xdp_redirect_errors[%u]", "rx_xdp_alloc_fails[%u]",
61 };
62 
63 static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
64 	"tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
65 	"tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
66 	"tx_dma_mapping_error[%u]", "tx_xsk_wakeup[%u]",
67 	"tx_xsk_done[%u]", "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
68 };
69 
70 static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
71 	"adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts",
72 	"adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt",
73 	"adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt",
74 	"adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
75 	"adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
76 	"adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
77 	"adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt"
78 };
79 
80 static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
81 	"report-stats",
82 };
83 
84 #define GVE_MAIN_STATS_LEN  ARRAY_SIZE(gve_gstrings_main_stats)
85 #define GVE_ADMINQ_STATS_LEN  ARRAY_SIZE(gve_gstrings_adminq_stats)
86 #define NUM_GVE_TX_CNTS	ARRAY_SIZE(gve_gstrings_tx_stats)
87 #define NUM_GVE_RX_CNTS	ARRAY_SIZE(gve_gstrings_rx_stats)
88 #define GVE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(gve_gstrings_priv_flags)
89 
90 static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
91 {
92 	struct gve_priv *priv = netdev_priv(netdev);
93 	u8 *s = (char *)data;
94 	int num_tx_queues;
95 	int i, j;
96 
97 	num_tx_queues = gve_num_tx_queues(priv);
98 	switch (stringset) {
99 	case ETH_SS_STATS:
100 		for (i = 0; i < ARRAY_SIZE(gve_gstrings_main_stats); i++)
101 			ethtool_puts(&s, gve_gstrings_main_stats[i]);
102 
103 		for (i = 0; i < priv->rx_cfg.num_queues; i++)
104 			for (j = 0; j < NUM_GVE_RX_CNTS; j++)
105 				ethtool_sprintf(&s, gve_gstrings_rx_stats[j],
106 						i);
107 
108 		for (i = 0; i < num_tx_queues; i++)
109 			for (j = 0; j < NUM_GVE_TX_CNTS; j++)
110 				ethtool_sprintf(&s, gve_gstrings_tx_stats[j],
111 						i);
112 
113 		for (i = 0; i < ARRAY_SIZE(gve_gstrings_adminq_stats); i++)
114 			ethtool_puts(&s, gve_gstrings_adminq_stats[i]);
115 
116 		break;
117 
118 	case ETH_SS_PRIV_FLAGS:
119 		for (i = 0; i < ARRAY_SIZE(gve_gstrings_priv_flags); i++)
120 			ethtool_puts(&s, gve_gstrings_priv_flags[i]);
121 		break;
122 
123 	default:
124 		break;
125 	}
126 }
127 
128 static int gve_get_sset_count(struct net_device *netdev, int sset)
129 {
130 	struct gve_priv *priv = netdev_priv(netdev);
131 	int num_tx_queues;
132 
133 	num_tx_queues = gve_num_tx_queues(priv);
134 	switch (sset) {
135 	case ETH_SS_STATS:
136 		return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN +
137 		       (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) +
138 		       (num_tx_queues * NUM_GVE_TX_CNTS);
139 	case ETH_SS_PRIV_FLAGS:
140 		return GVE_PRIV_FLAGS_STR_LEN;
141 	default:
142 		return -EOPNOTSUPP;
143 	}
144 }
145 
146 static void
147 gve_get_ethtool_stats(struct net_device *netdev,
148 		      struct ethtool_stats *stats, u64 *data)
149 {
150 	u64 tmp_rx_pkts, tmp_rx_hsplit_pkt, tmp_rx_bytes, tmp_rx_hsplit_bytes,
151 		tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail,
152 		tmp_rx_desc_err_dropped_pkt, tmp_rx_hsplit_unsplit_pkt,
153 		tmp_tx_pkts, tmp_tx_bytes;
154 	u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_hsplit_unsplit_pkt,
155 		rx_pkts, rx_hsplit_pkt, rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes,
156 		tx_dropped;
157 	int stats_idx, base_stats_idx, max_stats_idx;
158 	struct stats *report_stats;
159 	int *rx_qid_to_stats_idx;
160 	int *tx_qid_to_stats_idx;
161 	int num_stopped_rxqs = 0;
162 	int num_stopped_txqs = 0;
163 	struct gve_priv *priv;
164 	bool skip_nic_stats;
165 	unsigned int start;
166 	int num_tx_queues;
167 	int ring;
168 	int i, j;
169 
170 	ASSERT_RTNL();
171 
172 	priv = netdev_priv(netdev);
173 	num_tx_queues = gve_num_tx_queues(priv);
174 	report_stats = priv->stats_report->stats;
175 	rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues,
176 					    sizeof(int), GFP_KERNEL);
177 	if (!rx_qid_to_stats_idx)
178 		return;
179 	for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
180 		rx_qid_to_stats_idx[ring] = -1;
181 		if (!gve_rx_was_added_to_block(priv, ring))
182 			num_stopped_rxqs++;
183 	}
184 	tx_qid_to_stats_idx = kmalloc_array(num_tx_queues,
185 					    sizeof(int), GFP_KERNEL);
186 	if (!tx_qid_to_stats_idx) {
187 		kfree(rx_qid_to_stats_idx);
188 		return;
189 	}
190 	for (ring = 0; ring < num_tx_queues; ring++) {
191 		tx_qid_to_stats_idx[ring] = -1;
192 		if (!gve_tx_was_added_to_block(priv, ring))
193 			num_stopped_txqs++;
194 	}
195 
196 	for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0,
197 	     rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0,
198 	     rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0,
199 	     ring = 0;
200 	     ring < priv->rx_cfg.num_queues; ring++) {
201 		if (priv->rx) {
202 			do {
203 				struct gve_rx_ring *rx = &priv->rx[ring];
204 
205 				start =
206 				  u64_stats_fetch_begin(&priv->rx[ring].statss);
207 				tmp_rx_pkts = rx->rpackets;
208 				tmp_rx_hsplit_pkt = rx->rx_hsplit_pkt;
209 				tmp_rx_bytes = rx->rbytes;
210 				tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
211 				tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
212 				tmp_rx_desc_err_dropped_pkt =
213 					rx->rx_desc_err_dropped_pkt;
214 				tmp_rx_hsplit_unsplit_pkt =
215 					rx->rx_hsplit_unsplit_pkt;
216 			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
217 						       start));
218 			rx_pkts += tmp_rx_pkts;
219 			rx_hsplit_pkt += tmp_rx_hsplit_pkt;
220 			rx_bytes += tmp_rx_bytes;
221 			rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
222 			rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
223 			rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
224 			rx_hsplit_unsplit_pkt += tmp_rx_hsplit_unsplit_pkt;
225 		}
226 	}
227 	for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
228 	     ring < num_tx_queues; ring++) {
229 		if (priv->tx) {
230 			do {
231 				start =
232 				  u64_stats_fetch_begin(&priv->tx[ring].statss);
233 				tmp_tx_pkts = priv->tx[ring].pkt_done;
234 				tmp_tx_bytes = priv->tx[ring].bytes_done;
235 			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
236 						       start));
237 			tx_pkts += tmp_tx_pkts;
238 			tx_bytes += tmp_tx_bytes;
239 			tx_dropped += priv->tx[ring].dropped_pkt;
240 		}
241 	}
242 
243 	i = 0;
244 	data[i++] = rx_pkts;
245 	data[i++] = rx_hsplit_pkt;
246 	data[i++] = tx_pkts;
247 	data[i++] = rx_bytes;
248 	data[i++] = tx_bytes;
249 	/* total rx dropped packets */
250 	data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail +
251 		    rx_desc_err_dropped_pkt;
252 	data[i++] = tx_dropped;
253 	data[i++] = priv->tx_timeo_cnt;
254 	data[i++] = rx_skb_alloc_fail;
255 	data[i++] = rx_buf_alloc_fail;
256 	data[i++] = rx_desc_err_dropped_pkt;
257 	data[i++] = rx_hsplit_unsplit_pkt;
258 	data[i++] = priv->interface_up_cnt;
259 	data[i++] = priv->interface_down_cnt;
260 	data[i++] = priv->reset_cnt;
261 	data[i++] = priv->page_alloc_fail;
262 	data[i++] = priv->dma_mapping_error;
263 	data[i++] = priv->stats_report_trigger_cnt;
264 	i = GVE_MAIN_STATS_LEN;
265 
266 	/* For rx cross-reporting stats, start from nic rx stats in report */
267 	base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues +
268 		GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
269 	/* The boundary between driver stats and NIC stats shifts if there are
270 	 * stopped queues.
271 	 */
272 	base_stats_idx += NIC_RX_STATS_REPORT_NUM * num_stopped_rxqs +
273 		NIC_TX_STATS_REPORT_NUM * num_stopped_txqs;
274 	max_stats_idx = NIC_RX_STATS_REPORT_NUM *
275 		(priv->rx_cfg.num_queues - num_stopped_rxqs) +
276 		base_stats_idx;
277 	/* Preprocess the stats report for rx, map queue id to start index */
278 	skip_nic_stats = false;
279 	for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
280 		stats_idx += NIC_RX_STATS_REPORT_NUM) {
281 		u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
282 		u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
283 
284 		if (stat_name == 0) {
285 			/* no stats written by NIC yet */
286 			skip_nic_stats = true;
287 			break;
288 		}
289 		if (queue_id < 0 || queue_id >= priv->rx_cfg.num_queues) {
290 			net_err_ratelimited("Invalid rxq id in NIC stats\n");
291 			continue;
292 		}
293 		rx_qid_to_stats_idx[queue_id] = stats_idx;
294 	}
295 	/* walk RX rings */
296 	if (priv->rx) {
297 		for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
298 			struct gve_rx_ring *rx = &priv->rx[ring];
299 
300 			data[i++] = rx->fill_cnt;
301 			data[i++] = rx->cnt;
302 			data[i++] = rx->fill_cnt - rx->cnt;
303 			do {
304 				start =
305 				  u64_stats_fetch_begin(&priv->rx[ring].statss);
306 				tmp_rx_bytes = rx->rbytes;
307 				tmp_rx_hsplit_bytes = rx->rx_hsplit_bytes;
308 				tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
309 				tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
310 				tmp_rx_desc_err_dropped_pkt =
311 					rx->rx_desc_err_dropped_pkt;
312 			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
313 						       start));
314 			data[i++] = tmp_rx_bytes;
315 			data[i++] = tmp_rx_hsplit_bytes;
316 			data[i++] = rx->rx_cont_packet_cnt;
317 			data[i++] = rx->rx_frag_flip_cnt;
318 			data[i++] = rx->rx_frag_copy_cnt;
319 			data[i++] = rx->rx_frag_alloc_cnt;
320 			/* rx dropped packets */
321 			data[i++] = tmp_rx_skb_alloc_fail +
322 				tmp_rx_buf_alloc_fail +
323 				tmp_rx_desc_err_dropped_pkt;
324 			data[i++] = rx->rx_copybreak_pkt;
325 			data[i++] = rx->rx_copied_pkt;
326 			/* stats from NIC */
327 			stats_idx = rx_qid_to_stats_idx[ring];
328 			if (skip_nic_stats || stats_idx < 0) {
329 				/* skip NIC rx stats */
330 				i += NIC_RX_STATS_REPORT_NUM;
331 			} else {
332 				for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
333 					u64 value =
334 						be64_to_cpu(report_stats[stats_idx + j].value);
335 
336 					data[i++] = value;
337 				}
338 			}
339 			/* XDP rx counters */
340 			do {
341 				start =	u64_stats_fetch_begin(&priv->rx[ring].statss);
342 				for (j = 0; j < GVE_XDP_ACTIONS; j++)
343 					data[i + j] = rx->xdp_actions[j];
344 				data[i + j++] = rx->xdp_tx_errors;
345 				data[i + j++] = rx->xdp_redirect_errors;
346 				data[i + j++] = rx->xdp_alloc_fails;
347 			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
348 						       start));
349 			i += GVE_XDP_ACTIONS + 3; /* XDP rx counters */
350 		}
351 	} else {
352 		i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
353 	}
354 
355 	/* For tx cross-reporting stats, start from nic tx stats in report */
356 	base_stats_idx = max_stats_idx;
357 	max_stats_idx = NIC_TX_STATS_REPORT_NUM *
358 		(num_tx_queues - num_stopped_txqs) +
359 		max_stats_idx;
360 	/* Preprocess the stats report for tx, map queue id to start index */
361 	skip_nic_stats = false;
362 	for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
363 		stats_idx += NIC_TX_STATS_REPORT_NUM) {
364 		u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
365 		u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
366 
367 		if (stat_name == 0) {
368 			/* no stats written by NIC yet */
369 			skip_nic_stats = true;
370 			break;
371 		}
372 		if (queue_id < 0 || queue_id >= num_tx_queues) {
373 			net_err_ratelimited("Invalid txq id in NIC stats\n");
374 			continue;
375 		}
376 		tx_qid_to_stats_idx[queue_id] = stats_idx;
377 	}
378 	/* walk TX rings */
379 	if (priv->tx) {
380 		for (ring = 0; ring < num_tx_queues; ring++) {
381 			struct gve_tx_ring *tx = &priv->tx[ring];
382 
383 			if (gve_is_gqi(priv)) {
384 				data[i++] = tx->req;
385 				data[i++] = tx->done;
386 				data[i++] = tx->req - tx->done;
387 			} else {
388 				/* DQO doesn't currently support
389 				 * posted/completed descriptor counts;
390 				 */
391 				data[i++] = 0;
392 				data[i++] = 0;
393 				data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head;
394 			}
395 			do {
396 				start =
397 				  u64_stats_fetch_begin(&priv->tx[ring].statss);
398 				tmp_tx_bytes = tx->bytes_done;
399 			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
400 						       start));
401 			data[i++] = tmp_tx_bytes;
402 			data[i++] = tx->wake_queue;
403 			data[i++] = tx->stop_queue;
404 			data[i++] = gve_tx_load_event_counter(priv, tx);
405 			data[i++] = tx->dma_mapping_error;
406 			/* stats from NIC */
407 			stats_idx = tx_qid_to_stats_idx[ring];
408 			if (skip_nic_stats || stats_idx < 0) {
409 				/* skip NIC tx stats */
410 				i += NIC_TX_STATS_REPORT_NUM;
411 			} else {
412 				for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
413 					u64 value =
414 						be64_to_cpu(report_stats[stats_idx + j].value);
415 					data[i++] = value;
416 				}
417 			}
418 			/* XDP xsk counters */
419 			data[i++] = tx->xdp_xsk_wakeup;
420 			data[i++] = tx->xdp_xsk_done;
421 			do {
422 				start = u64_stats_fetch_begin(&priv->tx[ring].statss);
423 				data[i] = tx->xdp_xsk_sent;
424 				data[i + 1] = tx->xdp_xmit;
425 				data[i + 2] = tx->xdp_xmit_errors;
426 			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
427 						       start));
428 			i += 3; /* XDP tx counters */
429 		}
430 	} else {
431 		i += num_tx_queues * NUM_GVE_TX_CNTS;
432 	}
433 
434 	kfree(rx_qid_to_stats_idx);
435 	kfree(tx_qid_to_stats_idx);
436 	/* AQ Stats */
437 	data[i++] = priv->adminq_prod_cnt;
438 	data[i++] = priv->adminq_cmd_fail;
439 	data[i++] = priv->adminq_timeouts;
440 	data[i++] = priv->adminq_describe_device_cnt;
441 	data[i++] = priv->adminq_cfg_device_resources_cnt;
442 	data[i++] = priv->adminq_register_page_list_cnt;
443 	data[i++] = priv->adminq_unregister_page_list_cnt;
444 	data[i++] = priv->adminq_create_tx_queue_cnt;
445 	data[i++] = priv->adminq_create_rx_queue_cnt;
446 	data[i++] = priv->adminq_destroy_tx_queue_cnt;
447 	data[i++] = priv->adminq_destroy_rx_queue_cnt;
448 	data[i++] = priv->adminq_dcfg_device_resources_cnt;
449 	data[i++] = priv->adminq_set_driver_parameter_cnt;
450 	data[i++] = priv->adminq_report_stats_cnt;
451 	data[i++] = priv->adminq_report_link_speed_cnt;
452 	data[i++] = priv->adminq_get_ptype_map_cnt;
453 }
454 
455 static void gve_get_channels(struct net_device *netdev,
456 			     struct ethtool_channels *cmd)
457 {
458 	struct gve_priv *priv = netdev_priv(netdev);
459 
460 	cmd->max_rx = priv->rx_cfg.max_queues;
461 	cmd->max_tx = priv->tx_cfg.max_queues;
462 	cmd->max_other = 0;
463 	cmd->max_combined = 0;
464 	cmd->rx_count = priv->rx_cfg.num_queues;
465 	cmd->tx_count = priv->tx_cfg.num_queues;
466 	cmd->other_count = 0;
467 	cmd->combined_count = 0;
468 }
469 
470 static int gve_set_channels(struct net_device *netdev,
471 			    struct ethtool_channels *cmd)
472 {
473 	struct gve_priv *priv = netdev_priv(netdev);
474 	struct gve_queue_config new_tx_cfg = priv->tx_cfg;
475 	struct gve_queue_config new_rx_cfg = priv->rx_cfg;
476 	struct ethtool_channels old_settings;
477 	int new_tx = cmd->tx_count;
478 	int new_rx = cmd->rx_count;
479 
480 	gve_get_channels(netdev, &old_settings);
481 
482 	/* Changing combined is not allowed */
483 	if (cmd->combined_count != old_settings.combined_count)
484 		return -EINVAL;
485 
486 	if (!new_rx || !new_tx)
487 		return -EINVAL;
488 
489 	if (priv->num_xdp_queues &&
490 	    (new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) {
491 		dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues");
492 		return -EINVAL;
493 	}
494 
495 	if (!netif_carrier_ok(netdev)) {
496 		priv->tx_cfg.num_queues = new_tx;
497 		priv->rx_cfg.num_queues = new_rx;
498 		return 0;
499 	}
500 
501 	new_tx_cfg.num_queues = new_tx;
502 	new_rx_cfg.num_queues = new_rx;
503 
504 	return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg);
505 }
506 
507 static void gve_get_ringparam(struct net_device *netdev,
508 			      struct ethtool_ringparam *cmd,
509 			      struct kernel_ethtool_ringparam *kernel_cmd,
510 			      struct netlink_ext_ack *extack)
511 {
512 	struct gve_priv *priv = netdev_priv(netdev);
513 
514 	cmd->rx_max_pending = priv->max_rx_desc_cnt;
515 	cmd->tx_max_pending = priv->max_tx_desc_cnt;
516 	cmd->rx_pending = priv->rx_desc_cnt;
517 	cmd->tx_pending = priv->tx_desc_cnt;
518 
519 	if (!gve_header_split_supported(priv))
520 		kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
521 	else if (priv->header_split_enabled)
522 		kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
523 	else
524 		kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
525 }
526 
527 static int gve_adjust_ring_sizes(struct gve_priv *priv,
528 				 u16 new_tx_desc_cnt,
529 				 u16 new_rx_desc_cnt)
530 {
531 	struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
532 	struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
533 	int err;
534 
535 	/* get current queue configuration */
536 	gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
537 
538 	/* copy over the new ring_size from ethtool */
539 	tx_alloc_cfg.ring_size = new_tx_desc_cnt;
540 	rx_alloc_cfg.ring_size = new_rx_desc_cnt;
541 
542 	if (netif_running(priv->dev)) {
543 		err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
544 		if (err)
545 			return err;
546 	}
547 
548 	/* Set new ring_size for the next up */
549 	priv->tx_desc_cnt = new_tx_desc_cnt;
550 	priv->rx_desc_cnt = new_rx_desc_cnt;
551 
552 	return 0;
553 }
554 
555 static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt,
556 				      u16 new_rx_desc_cnt)
557 {
558 	/* check for valid range */
559 	if (new_tx_desc_cnt < priv->min_tx_desc_cnt ||
560 	    new_tx_desc_cnt > priv->max_tx_desc_cnt ||
561 	    new_rx_desc_cnt < priv->min_rx_desc_cnt ||
562 	    new_rx_desc_cnt > priv->max_rx_desc_cnt) {
563 		dev_err(&priv->pdev->dev, "Requested descriptor count out of range\n");
564 		return -EINVAL;
565 	}
566 
567 	if (!is_power_of_2(new_tx_desc_cnt) || !is_power_of_2(new_rx_desc_cnt)) {
568 		dev_err(&priv->pdev->dev, "Requested descriptor count has to be a power of 2\n");
569 		return -EINVAL;
570 	}
571 	return 0;
572 }
573 
574 static int gve_set_ringparam(struct net_device *netdev,
575 			     struct ethtool_ringparam *cmd,
576 			     struct kernel_ethtool_ringparam *kernel_cmd,
577 			     struct netlink_ext_ack *extack)
578 {
579 	struct gve_priv *priv = netdev_priv(netdev);
580 	u16 new_tx_cnt, new_rx_cnt;
581 	int err;
582 
583 	err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
584 	if (err)
585 		return err;
586 
587 	if (cmd->tx_pending == priv->tx_desc_cnt && cmd->rx_pending == priv->rx_desc_cnt)
588 		return 0;
589 
590 	if (!priv->modify_ring_size_enabled) {
591 		dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n");
592 		return -EOPNOTSUPP;
593 	}
594 
595 	new_tx_cnt = cmd->tx_pending;
596 	new_rx_cnt = cmd->rx_pending;
597 
598 	if (gve_validate_req_ring_size(priv, new_tx_cnt, new_rx_cnt))
599 		return -EINVAL;
600 
601 	return gve_adjust_ring_sizes(priv, new_tx_cnt, new_rx_cnt);
602 }
603 
604 static int gve_user_reset(struct net_device *netdev, u32 *flags)
605 {
606 	struct gve_priv *priv = netdev_priv(netdev);
607 
608 	if (*flags == ETH_RESET_ALL) {
609 		*flags = 0;
610 		return gve_reset(priv, true);
611 	}
612 
613 	return -EOPNOTSUPP;
614 }
615 
616 static int gve_get_tunable(struct net_device *netdev,
617 			   const struct ethtool_tunable *etuna, void *value)
618 {
619 	struct gve_priv *priv = netdev_priv(netdev);
620 
621 	switch (etuna->id) {
622 	case ETHTOOL_RX_COPYBREAK:
623 		*(u32 *)value = priv->rx_copybreak;
624 		return 0;
625 	default:
626 		return -EOPNOTSUPP;
627 	}
628 }
629 
630 static int gve_set_tunable(struct net_device *netdev,
631 			   const struct ethtool_tunable *etuna,
632 			   const void *value)
633 {
634 	struct gve_priv *priv = netdev_priv(netdev);
635 	u32 len;
636 
637 	switch (etuna->id) {
638 	case ETHTOOL_RX_COPYBREAK:
639 	{
640 		u32 max_copybreak = gve_is_gqi(priv) ?
641 			GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo;
642 
643 		len = *(u32 *)value;
644 		if (len > max_copybreak)
645 			return -EINVAL;
646 		priv->rx_copybreak = len;
647 		return 0;
648 	}
649 	default:
650 		return -EOPNOTSUPP;
651 	}
652 }
653 
654 static u32 gve_get_priv_flags(struct net_device *netdev)
655 {
656 	struct gve_priv *priv = netdev_priv(netdev);
657 	u32 ret_flags = 0;
658 
659 	/* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */
660 	if (priv->ethtool_flags & BIT(0))
661 		ret_flags |= BIT(0);
662 	return ret_flags;
663 }
664 
665 static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
666 {
667 	struct gve_priv *priv = netdev_priv(netdev);
668 	u64 ori_flags, new_flags;
669 	int num_tx_queues;
670 
671 	num_tx_queues = gve_num_tx_queues(priv);
672 	ori_flags = READ_ONCE(priv->ethtool_flags);
673 	new_flags = ori_flags;
674 
675 	/* Only one priv flag exists: report-stats (BIT(0))*/
676 	if (flags & BIT(0))
677 		new_flags |= BIT(0);
678 	else
679 		new_flags &= ~(BIT(0));
680 	priv->ethtool_flags = new_flags;
681 	/* start report-stats timer when user turns report stats on. */
682 	if (flags & BIT(0)) {
683 		mod_timer(&priv->stats_report_timer,
684 			  round_jiffies(jiffies +
685 					msecs_to_jiffies(priv->stats_report_timer_period)));
686 	}
687 	/* Zero off gve stats when report-stats turned off and */
688 	/* delete report stats timer. */
689 	if (!(flags & BIT(0)) && (ori_flags & BIT(0))) {
690 		int tx_stats_num = GVE_TX_STATS_REPORT_NUM *
691 			num_tx_queues;
692 		int rx_stats_num = GVE_RX_STATS_REPORT_NUM *
693 			priv->rx_cfg.num_queues;
694 
695 		memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) *
696 				   sizeof(struct stats));
697 		del_timer_sync(&priv->stats_report_timer);
698 	}
699 	return 0;
700 }
701 
702 static int gve_get_link_ksettings(struct net_device *netdev,
703 				  struct ethtool_link_ksettings *cmd)
704 {
705 	struct gve_priv *priv = netdev_priv(netdev);
706 	int err = 0;
707 
708 	if (priv->link_speed == 0)
709 		err = gve_adminq_report_link_speed(priv);
710 
711 	cmd->base.speed = priv->link_speed;
712 
713 	cmd->base.duplex = DUPLEX_FULL;
714 
715 	return err;
716 }
717 
718 static int gve_get_coalesce(struct net_device *netdev,
719 			    struct ethtool_coalesce *ec,
720 			    struct kernel_ethtool_coalesce *kernel_ec,
721 			    struct netlink_ext_ack *extack)
722 {
723 	struct gve_priv *priv = netdev_priv(netdev);
724 
725 	if (gve_is_gqi(priv))
726 		return -EOPNOTSUPP;
727 	ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
728 	ec->rx_coalesce_usecs = priv->rx_coalesce_usecs;
729 
730 	return 0;
731 }
732 
733 static int gve_set_coalesce(struct net_device *netdev,
734 			    struct ethtool_coalesce *ec,
735 			    struct kernel_ethtool_coalesce *kernel_ec,
736 			    struct netlink_ext_ack *extack)
737 {
738 	struct gve_priv *priv = netdev_priv(netdev);
739 	u32 tx_usecs_orig = priv->tx_coalesce_usecs;
740 	u32 rx_usecs_orig = priv->rx_coalesce_usecs;
741 	int idx;
742 
743 	if (gve_is_gqi(priv))
744 		return -EOPNOTSUPP;
745 
746 	if (ec->tx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO ||
747 	    ec->rx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO)
748 		return -EINVAL;
749 	priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
750 	priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
751 
752 	if (tx_usecs_orig != priv->tx_coalesce_usecs) {
753 		for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
754 			int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
755 			struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
756 
757 			gve_set_itr_coalesce_usecs_dqo(priv, block,
758 						       priv->tx_coalesce_usecs);
759 		}
760 	}
761 
762 	if (rx_usecs_orig != priv->rx_coalesce_usecs) {
763 		for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
764 			int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
765 			struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
766 
767 			gve_set_itr_coalesce_usecs_dqo(priv, block,
768 						       priv->rx_coalesce_usecs);
769 		}
770 	}
771 
772 	return 0;
773 }
774 
775 const struct ethtool_ops gve_ethtool_ops = {
776 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
777 	.supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
778 	.get_drvinfo = gve_get_drvinfo,
779 	.get_strings = gve_get_strings,
780 	.get_sset_count = gve_get_sset_count,
781 	.get_ethtool_stats = gve_get_ethtool_stats,
782 	.set_msglevel = gve_set_msglevel,
783 	.get_msglevel = gve_get_msglevel,
784 	.set_channels = gve_set_channels,
785 	.get_channels = gve_get_channels,
786 	.get_link = ethtool_op_get_link,
787 	.get_coalesce = gve_get_coalesce,
788 	.set_coalesce = gve_set_coalesce,
789 	.get_ringparam = gve_get_ringparam,
790 	.set_ringparam = gve_set_ringparam,
791 	.reset = gve_user_reset,
792 	.get_tunable = gve_get_tunable,
793 	.set_tunable = gve_set_tunable,
794 	.get_priv_flags = gve_get_priv_flags,
795 	.set_priv_flags = gve_set_priv_flags,
796 	.get_link_ksettings = gve_get_link_ksettings,
797 	.get_ts_info = ethtool_op_get_ts_info,
798 };
799