xref: /freebsd/sys/dev/gve/gve_sysctl.c (revision c27f7d6b9cf6d4ab01cb3d0972726c14e0aca146)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2023-2024 Google LLC
5  *
6  * Redistribution and use in source and binary forms, with or without modification,
7  * are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  *    list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the copyright holder nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 #include "gve.h"
32 
33 static SYSCTL_NODE(_hw, OID_AUTO, gve, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
34     "GVE driver parameters");
35 
36 bool gve_disable_hw_lro = false;
37 SYSCTL_BOOL(_hw_gve, OID_AUTO, disable_hw_lro, CTLFLAG_RDTUN,
38     &gve_disable_hw_lro, 0, "Controls if hardware LRO is used");
39 
40 char gve_queue_format[8];
41 SYSCTL_STRING(_hw_gve, OID_AUTO, queue_format, CTLFLAG_RD,
42     &gve_queue_format, 0, "Queue format being used by the iface");
43 
44 char gve_version[8];
45 SYSCTL_STRING(_hw_gve, OID_AUTO, driver_version, CTLFLAG_RD,
46     &gve_version, 0, "Driver version");
47 
48 static void
49 gve_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
50     struct sysctl_oid_list *child, struct gve_rx_ring *rxq)
51 {
52 	struct sysctl_oid *node;
53 	struct sysctl_oid_list *list;
54 	struct gve_rxq_stats *stats;
55 	char namebuf[16];
56 
57 	snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->com.id);
58 	node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
59 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Receive Queue");
60 	list = SYSCTL_CHILDREN(node);
61 
62 	stats = &rxq->stats;
63 
64 	SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO,
65 	    "rx_bytes", CTLFLAG_RD,
66 	    &stats->rbytes, "Bytes received");
67 	SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO,
68 	    "rx_packets", CTLFLAG_RD,
69 	    &stats->rpackets, "Packets received");
70 	SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO, "rx_copybreak_cnt",
71 	    CTLFLAG_RD, &stats->rx_copybreak_cnt,
72 	    "Total frags with mbufs allocated for copybreak");
73 	SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO, "rx_frag_flip_cnt",
74 	    CTLFLAG_RD, &stats->rx_frag_flip_cnt,
75 	    "Total frags that allocated mbuf with page flip");
76 	SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO, "rx_frag_copy_cnt",
77 	    CTLFLAG_RD, &stats->rx_frag_copy_cnt,
78 	    "Total frags with mbuf that copied payload into mbuf");
79 	SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO, "rx_dropped_pkt",
80 	    CTLFLAG_RD, &stats->rx_dropped_pkt,
81 	    "Total rx packets dropped");
82 	SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO,
83 	    "rx_dropped_pkt_desc_err", CTLFLAG_RD,
84 	    &stats->rx_dropped_pkt_desc_err,
85 	    "Packets dropped due to descriptor error");
86 	SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO,
87 	    "rx_dropped_pkt_buf_post_fail", CTLFLAG_RD,
88 	    &stats->rx_dropped_pkt_buf_post_fail,
89 	    "Packets dropped due to failure to post enough buffers");
90 	SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO,
91 	    "rx_dropped_pkt_mbuf_alloc_fail", CTLFLAG_RD,
92 	    &stats->rx_dropped_pkt_mbuf_alloc_fail,
93 	    "Packets dropped due to failed mbuf allocation");
94 	SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO,
95 	    "rx_mbuf_dmamap_err", CTLFLAG_RD,
96 	    &stats->rx_mbuf_dmamap_err,
97 	    "Number of rx mbufs which could not be dma mapped");
98 	SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO,
99 	    "rx_mbuf_mclget_null", CTLFLAG_RD,
100 	    &stats->rx_mbuf_mclget_null,
101 	    "Number of times when there were no cluster mbufs");
102 	SYSCTL_ADD_U32(ctx, list, OID_AUTO,
103 	    "rx_completed_desc", CTLFLAG_RD,
104 	    &rxq->cnt, 0, "Number of descriptors completed");
105 	SYSCTL_ADD_U32(ctx, list, OID_AUTO,
106 	    "num_desc_posted", CTLFLAG_RD,
107 	    &rxq->fill_cnt, rxq->fill_cnt,
108 	    "Toal number of descriptors posted");
109 }
110 
111 static void
112 gve_setup_txq_sysctl(struct sysctl_ctx_list *ctx,
113     struct sysctl_oid_list *child, struct gve_tx_ring *txq)
114 {
115 	struct sysctl_oid *node;
116 	struct sysctl_oid_list *tx_list;
117 	struct gve_txq_stats *stats;
118 	char namebuf[16];
119 
120 	snprintf(namebuf, sizeof(namebuf), "txq%d", txq->com.id);
121 	node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
122 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Transmit Queue");
123 	tx_list = SYSCTL_CHILDREN(node);
124 
125 	stats = &txq->stats;
126 
127 	SYSCTL_ADD_U32(ctx, tx_list, OID_AUTO,
128 	    "tx_posted_desc", CTLFLAG_RD,
129 	    &txq->req, 0, "Number of descriptors posted by NIC");
130 	SYSCTL_ADD_U32(ctx, tx_list, OID_AUTO,
131 	    "tx_completed_desc", CTLFLAG_RD,
132 	    &txq->done, 0, "Number of descriptors completed");
133 	SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
134 	    "tx_packets", CTLFLAG_RD,
135 	    &stats->tpackets, "Packets transmitted");
136 	SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
137 	    "tx_tso_packets", CTLFLAG_RD,
138 	    &stats->tso_packet_cnt, "TSO Packets transmitted");
139 	SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
140 	    "tx_bytes", CTLFLAG_RD,
141 	    &stats->tbytes, "Bytes transmitted");
142 	SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
143 	    "tx_delayed_pkt_nospace_device", CTLFLAG_RD,
144 	    &stats->tx_delayed_pkt_nospace_device,
145 	    "Packets delayed due to no space in device");
146 	SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
147 	    "tx_dropped_pkt_nospace_bufring", CTLFLAG_RD,
148 	    &stats->tx_dropped_pkt_nospace_bufring,
149 	    "Packets dropped due to no space in br ring");
150 	SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
151 	    "tx_dropped_pkt_vlan", CTLFLAG_RD,
152 	    &stats->tx_dropped_pkt_vlan,
153 	    "Dropped VLAN packets");
154 	SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
155 	    "tx_delayed_pkt_nospace_descring", CTLFLAG_RD,
156 	    &stats->tx_delayed_pkt_nospace_descring,
157 	    "Packets delayed due to no space in desc ring");
158 	SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
159 	    "tx_delayed_pkt_nospace_compring", CTLFLAG_RD,
160 	    &stats->tx_delayed_pkt_nospace_compring,
161 	    "Packets delayed due to no space in comp ring");
162 	SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
163 	    "tx_delayed_pkt_nospace_qpl_bufs", CTLFLAG_RD,
164 	    &stats->tx_delayed_pkt_nospace_qpl_bufs,
165 	    "Packets delayed due to not enough qpl bufs");
166 	SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
167 	    "tx_delayed_pkt_tsoerr", CTLFLAG_RD,
168 	    &stats->tx_delayed_pkt_tsoerr,
169 	    "TSO packets delayed due to err in prep errors");
170 	SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
171 	    "tx_mbuf_collapse", CTLFLAG_RD,
172 	    &stats->tx_mbuf_collapse,
173 	    "tx mbufs that had to be collapsed");
174 	SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
175 	    "tx_mbuf_defrag", CTLFLAG_RD,
176 	    &stats->tx_mbuf_defrag,
177 	    "tx mbufs that had to be defragged");
178 	SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
179 	    "tx_mbuf_defrag_err", CTLFLAG_RD,
180 	    &stats->tx_mbuf_defrag_err,
181 	    "tx mbufs that failed defrag");
182 	SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
183 	    "tx_mbuf_dmamap_enomem_err", CTLFLAG_RD,
184 	    &stats->tx_mbuf_dmamap_enomem_err,
185 	    "tx mbufs that could not be dma-mapped due to low mem");
186 	SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
187 	    "tx_mbuf_dmamap_err", CTLFLAG_RD,
188 	    &stats->tx_mbuf_dmamap_err,
189 	    "tx mbufs that could not be dma-mapped");
190 	SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
191 	    "tx_timeout", CTLFLAG_RD,
192 	    &stats->tx_timeout,
193 	    "detections of timed out packets on tx queues");
194 }
195 
196 static void
197 gve_setup_queue_stat_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child,
198     struct gve_priv *priv)
199 {
200 	int i;
201 
202 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
203 		gve_setup_rxq_sysctl(ctx, child, &priv->rx[i]);
204 	}
205 	for (i = 0; i < priv->tx_cfg.num_queues; i++) {
206 		gve_setup_txq_sysctl(ctx, child, &priv->tx[i]);
207 	}
208 }
209 
210 static void
211 gve_setup_adminq_stat_sysctl(struct sysctl_ctx_list *ctx,
212     struct sysctl_oid_list *child, struct gve_priv *priv)
213 {
214 	struct sysctl_oid *admin_node;
215 	struct sysctl_oid_list *admin_list;
216 
217 	/* Admin queue stats */
218 	admin_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "adminq_stats",
219 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Admin Queue statistics");
220 	admin_list = SYSCTL_CHILDREN(admin_node);
221 
222 	SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "adminq_prod_cnt", CTLFLAG_RD,
223 	    &priv->adminq_prod_cnt, 0, "Adminq Commands issued");
224 	SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "adminq_cmd_fail", CTLFLAG_RD,
225 	    &priv->adminq_cmd_fail, 0, "Aqminq Failed commands");
226 	SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "adminq_timeouts", CTLFLAG_RD,
227 	    &priv->adminq_timeouts, 0, "Adminq Timedout commands");
228 	SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "adminq_describe_device_cnt",
229 	    CTLFLAG_RD, &priv->adminq_describe_device_cnt, 0,
230 	    "adminq_describe_device_cnt");
231 	SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO,
232 	    "adminq_cfg_device_resources_cnt", CTLFLAG_RD,
233 	    &priv->adminq_cfg_device_resources_cnt, 0,
234 	    "adminq_cfg_device_resources_cnt");
235 	SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO,
236 	    "adminq_register_page_list_cnt", CTLFLAG_RD,
237 	    &priv->adminq_register_page_list_cnt, 0,
238 	    "adminq_register_page_list_cnt");
239 	SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO,
240 	    "adminq_unregister_page_list_cnt", CTLFLAG_RD,
241 	    &priv->adminq_unregister_page_list_cnt, 0,
242 	    "adminq_unregister_page_list_cnt");
243 	SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "adminq_create_tx_queue_cnt",
244 	    CTLFLAG_RD, &priv->adminq_create_tx_queue_cnt, 0,
245 	    "adminq_create_tx_queue_cnt");
246 	SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "adminq_create_rx_queue_cnt",
247 	    CTLFLAG_RD, &priv->adminq_create_rx_queue_cnt, 0,
248 	    "adminq_create_rx_queue_cnt");
249 	SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "adminq_destroy_tx_queue_cnt",
250 	    CTLFLAG_RD, &priv->adminq_destroy_tx_queue_cnt, 0,
251 	    "adminq_destroy_tx_queue_cnt");
252 	SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "adminq_destroy_rx_queue_cnt",
253 	    CTLFLAG_RD, &priv->adminq_destroy_rx_queue_cnt, 0,
254 	    "adminq_destroy_rx_queue_cnt");
255 	SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "adminq_get_ptype_map_cnt",
256 	    CTLFLAG_RD, &priv->adminq_get_ptype_map_cnt, 0,
257 	    "adminq_get_ptype_map_cnt");
258 	SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO,
259 	    "adminq_dcfg_device_resources_cnt", CTLFLAG_RD,
260 	    &priv->adminq_dcfg_device_resources_cnt, 0,
261 	    "adminq_dcfg_device_resources_cnt");
262 	SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO,
263 	    "adminq_set_driver_parameter_cnt", CTLFLAG_RD,
264 	    &priv->adminq_set_driver_parameter_cnt, 0,
265 	    "adminq_set_driver_parameter_cnt");
266 	SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO,
267 	    "adminq_verify_driver_compatibility_cnt", CTLFLAG_RD,
268 	    &priv->adminq_verify_driver_compatibility_cnt, 0,
269 	    "adminq_verify_driver_compatibility_cnt");
270 }
271 
272 static void
273 gve_setup_main_stat_sysctl(struct sysctl_ctx_list *ctx,
274     struct sysctl_oid_list *child, struct gve_priv *priv)
275 {
276 	struct sysctl_oid *main_node;
277 	struct sysctl_oid_list *main_list;
278 
279 	/* Main stats */
280 	main_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "main_stats",
281 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Main statistics");
282 	main_list = SYSCTL_CHILDREN(main_node);
283 
284 	SYSCTL_ADD_U32(ctx, main_list, OID_AUTO, "interface_up_cnt", CTLFLAG_RD,
285 	    &priv->interface_up_cnt, 0, "Times interface was set to up");
286 	SYSCTL_ADD_U32(ctx, main_list, OID_AUTO, "interface_down_cnt", CTLFLAG_RD,
287 	    &priv->interface_down_cnt, 0, "Times interface was set to down");
288 	SYSCTL_ADD_U32(ctx, main_list, OID_AUTO, "reset_cnt", CTLFLAG_RD,
289 	    &priv->reset_cnt, 0, "Times reset");
290 }
291 
292 static int
293 gve_check_num_queues(struct gve_priv *priv, int val, bool is_rx)
294 {
295 	if (val < 1) {
296 		device_printf(priv->dev,
297 		    "Requested num queues (%u) must be a positive integer\n", val);
298 		return (EINVAL);
299 	}
300 
301 	if (val > (is_rx ? priv->rx_cfg.max_queues : priv->tx_cfg.max_queues)) {
302 		device_printf(priv->dev,
303 		    "Requested num queues (%u) is too large\n", val);
304 		return (EINVAL);
305 	}
306 
307 	return (0);
308 }
309 
310 static int
311 gve_sysctl_num_tx_queues(SYSCTL_HANDLER_ARGS)
312 {
313 	struct gve_priv *priv = arg1;
314 	int val;
315 	int err;
316 
317 	val = priv->tx_cfg.num_queues;
318 	err = sysctl_handle_int(oidp, &val, 0, req);
319 	if (err != 0 || req->newptr == NULL)
320 		return (err);
321 
322 	err = gve_check_num_queues(priv, val, /*is_rx=*/false);
323 	if (err != 0)
324 		return (err);
325 
326 	if (val != priv->tx_cfg.num_queues) {
327 		GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
328 		err = gve_adjust_tx_queues(priv, val);
329 		GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
330 	}
331 
332 	return (err);
333 }
334 
335 static int
336 gve_sysctl_num_rx_queues(SYSCTL_HANDLER_ARGS)
337 {
338 	struct gve_priv *priv = arg1;
339 	int val;
340 	int err;
341 
342 	val = priv->rx_cfg.num_queues;
343 	err = sysctl_handle_int(oidp, &val, 0, req);
344 	if (err != 0 || req->newptr == NULL)
345 		return (err);
346 
347 	err = gve_check_num_queues(priv, val, /*is_rx=*/true);
348 
349 	if (err != 0)
350 		return (err);
351 
352 	if (val != priv->rx_cfg.num_queues) {
353 		GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
354 		err = gve_adjust_rx_queues(priv, val);
355 		GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
356 	}
357 
358 	return (err);
359 }
360 
361 static int
362 gve_check_ring_size(struct gve_priv *priv, int val, bool is_rx)
363 {
364 	if (!powerof2(val) || val == 0) {
365 		device_printf(priv->dev,
366 		    "Requested ring size (%u) must be a power of 2\n", val);
367 		return (EINVAL);
368 	}
369 
370 	if (val < (is_rx ? priv->min_rx_desc_cnt : priv->min_tx_desc_cnt)) {
371 		device_printf(priv->dev,
372 		    "Requested ring size (%u) cannot be less than %d\n", val,
373 		    (is_rx ? priv->min_rx_desc_cnt : priv->min_tx_desc_cnt));
374 		return (EINVAL);
375 	}
376 
377 
378 	if (val > (is_rx ? priv->max_rx_desc_cnt : priv->max_tx_desc_cnt)) {
379 		device_printf(priv->dev,
380 		    "Requested ring size (%u) cannot be greater than %d\n", val,
381 		    (is_rx ? priv->max_rx_desc_cnt : priv->max_tx_desc_cnt));
382 		return (EINVAL);
383 	}
384 
385 	return (0);
386 }
387 
388 static int
389 gve_sysctl_tx_ring_size(SYSCTL_HANDLER_ARGS)
390 {
391 	struct gve_priv *priv = arg1;
392 	int val;
393 	int err;
394 
395 	val = priv->tx_desc_cnt;
396 	err = sysctl_handle_int(oidp, &val, 0, req);
397 	if (err != 0 || req->newptr == NULL)
398 		return (err);
399 
400 	err = gve_check_ring_size(priv, val, /*is_rx=*/false);
401 	if (err != 0)
402 		return (err);
403 
404 	if (val != priv->tx_desc_cnt) {
405 		GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
406 		err = gve_adjust_ring_sizes(priv, val, /*is_rx=*/false);
407 		GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
408 	}
409 
410 	return (err);
411 }
412 
413 static int
414 gve_sysctl_rx_ring_size(SYSCTL_HANDLER_ARGS)
415 {
416 	struct gve_priv *priv = arg1;
417 	int val;
418 	int err;
419 
420 	val = priv->rx_desc_cnt;
421 	err = sysctl_handle_int(oidp, &val, 0, req);
422 	if (err != 0 || req->newptr == NULL)
423 		return (err);
424 
425 	err = gve_check_ring_size(priv, val, /*is_rx=*/true);
426 	if (err != 0)
427 		return (err);
428 
429 	if (val != priv->rx_desc_cnt) {
430 		GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
431 		err = gve_adjust_ring_sizes(priv, val, /*is_rx=*/true);
432 		GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
433 	}
434 
435 	return (err);
436 }
437 
438 static void
439 gve_setup_sysctl_writables(struct sysctl_ctx_list *ctx,
440     struct sysctl_oid_list *child, struct gve_priv *priv)
441 {
442 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "num_tx_queues",
443 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
444 	    gve_sysctl_num_tx_queues, "I", "Number of TX queues");
445 
446 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "num_rx_queues",
447 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
448 	    gve_sysctl_num_rx_queues, "I", "Number of RX queues");
449 
450 	if (priv->modify_ringsize_enabled) {
451 		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_ring_size",
452 		    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
453 		    gve_sysctl_tx_ring_size, "I", "TX ring size");
454 
455 		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_ring_size",
456 		    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
457 		    gve_sysctl_rx_ring_size, "I", "RX ring size");
458 	}
459 }
460 
461 void gve_setup_sysctl(struct gve_priv *priv)
462 {
463 	device_t dev;
464 	struct sysctl_ctx_list *ctx;
465 	struct sysctl_oid *tree;
466 	struct sysctl_oid_list *child;
467 
468 	dev = priv->dev;
469 	ctx = device_get_sysctl_ctx(dev);
470 	tree = device_get_sysctl_tree(dev);
471 	child = SYSCTL_CHILDREN(tree);
472 
473 	gve_setup_queue_stat_sysctl(ctx, child, priv);
474 	gve_setup_adminq_stat_sysctl(ctx, child, priv);
475 	gve_setup_main_stat_sysctl(ctx, child, priv);
476 	gve_setup_sysctl_writables(ctx, child, priv);
477 }
478 
479 void
480 gve_accum_stats(struct gve_priv *priv, uint64_t *rpackets,
481     uint64_t *rbytes, uint64_t *rx_dropped_pkt, uint64_t *tpackets,
482     uint64_t *tbytes, uint64_t *tx_dropped_pkt)
483 {
484 	struct gve_rxq_stats *rxqstats;
485 	struct gve_txq_stats *txqstats;
486 	int i;
487 
488 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
489 		rxqstats = &priv->rx[i].stats;
490 		*rpackets += counter_u64_fetch(rxqstats->rpackets);
491 		*rbytes += counter_u64_fetch(rxqstats->rbytes);
492 		*rx_dropped_pkt += counter_u64_fetch(rxqstats->rx_dropped_pkt);
493 	}
494 
495 	for (i = 0; i < priv->tx_cfg.num_queues; i++) {
496 		txqstats = &priv->tx[i].stats;
497 		*tpackets += counter_u64_fetch(txqstats->tpackets);
498 		*tbytes += counter_u64_fetch(txqstats->tbytes);
499 		*tx_dropped_pkt += counter_u64_fetch(txqstats->tx_dropped_pkt);
500 	}
501 }
502