1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2023-2024 Google LLC 5 * 6 * Redistribution and use in source and binary forms, with or without modification, 7 * are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright notice, this 10 * list of conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright notice, 13 * this list of conditions and the following disclaimer in the documentation 14 * and/or other materials provided with the distribution. 15 * 16 * 3. Neither the name of the copyright holder nor the names of its contributors 17 * may be used to endorse or promote products derived from this software without 18 * specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 24 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 27 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 #include "gve.h" 32 33 static SYSCTL_NODE(_hw, OID_AUTO, gve, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 34 "GVE driver parameters"); 35 36 bool gve_disable_hw_lro = false; 37 SYSCTL_BOOL(_hw_gve, OID_AUTO, disable_hw_lro, CTLFLAG_RDTUN, 38 &gve_disable_hw_lro, 0, "Controls if hardware LRO is used"); 39 40 bool gve_allow_4k_rx_buffers = false; 41 SYSCTL_BOOL(_hw_gve, OID_AUTO, allow_4k_rx_buffers, CTLFLAG_RDTUN, 42 &gve_allow_4k_rx_buffers, 0, "Controls if 4K RX Buffers are allowed"); 43 44 char gve_queue_format[8]; 45 SYSCTL_STRING(_hw_gve, OID_AUTO, queue_format, CTLFLAG_RD, 46 &gve_queue_format, 0, "Queue format being used by the iface"); 47 48 char gve_version[8]; 49 SYSCTL_STRING(_hw_gve, OID_AUTO, driver_version, CTLFLAG_RD, 50 &gve_version, 0, "Driver version"); 51 52 static void 53 gve_setup_rxq_sysctl(struct sysctl_ctx_list *ctx, 54 struct sysctl_oid_list *child, struct gve_rx_ring *rxq) 55 { 56 struct sysctl_oid *node; 57 struct sysctl_oid_list *list; 58 struct gve_rxq_stats *stats; 59 char namebuf[16]; 60 61 snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->com.id); 62 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 63 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Receive Queue"); 64 list = SYSCTL_CHILDREN(node); 65 66 stats = &rxq->stats; 67 68 SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO, 69 "rx_bytes", CTLFLAG_RD, 70 &stats->rbytes, "Bytes received"); 71 SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO, 72 "rx_packets", CTLFLAG_RD, 73 &stats->rpackets, "Packets received"); 74 SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO, "rx_copybreak_cnt", 75 CTLFLAG_RD, &stats->rx_copybreak_cnt, 76 "Total frags with mbufs allocated for copybreak"); 77 SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO, "rx_frag_flip_cnt", 78 CTLFLAG_RD, &stats->rx_frag_flip_cnt, 79 "Total frags that allocated mbuf with page flip"); 80 SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO, "rx_frag_copy_cnt", 81 CTLFLAG_RD, &stats->rx_frag_copy_cnt, 82 "Total frags with mbuf that copied payload into mbuf"); 83 SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO, "rx_dropped_pkt", 84 CTLFLAG_RD, &stats->rx_dropped_pkt, 85 "Total rx packets dropped"); 86 SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO, 87 "rx_dropped_pkt_desc_err", CTLFLAG_RD, 88 &stats->rx_dropped_pkt_desc_err, 89 "Packets dropped due to descriptor error"); 90 SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO, 91 "rx_dropped_pkt_buf_post_fail", CTLFLAG_RD, 92 &stats->rx_dropped_pkt_buf_post_fail, 93 "Packets dropped due to failure to post enough buffers"); 94 SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO, 95 "rx_dropped_pkt_mbuf_alloc_fail", CTLFLAG_RD, 96 &stats->rx_dropped_pkt_mbuf_alloc_fail, 97 "Packets dropped due to failed mbuf allocation"); 98 SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO, 99 "rx_mbuf_dmamap_err", CTLFLAG_RD, 100 &stats->rx_mbuf_dmamap_err, 101 "Number of rx mbufs which could not be dma mapped"); 102 SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO, 103 "rx_mbuf_mclget_null", CTLFLAG_RD, 104 &stats->rx_mbuf_mclget_null, 105 "Number of times when there were no cluster mbufs"); 106 SYSCTL_ADD_U32(ctx, list, OID_AUTO, 107 "rx_completed_desc", CTLFLAG_RD, 108 &rxq->cnt, 0, "Number of descriptors completed"); 109 SYSCTL_ADD_U32(ctx, list, OID_AUTO, 110 "num_desc_posted", CTLFLAG_RD, 111 &rxq->fill_cnt, rxq->fill_cnt, 112 "Toal number of descriptors posted"); 113 } 114 115 static void 116 gve_setup_txq_sysctl(struct sysctl_ctx_list *ctx, 117 struct sysctl_oid_list *child, struct gve_tx_ring *txq) 118 { 119 struct sysctl_oid *node; 120 struct sysctl_oid_list *tx_list; 121 struct gve_txq_stats *stats; 122 char namebuf[16]; 123 124 snprintf(namebuf, sizeof(namebuf), "txq%d", txq->com.id); 125 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 126 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Transmit Queue"); 127 tx_list = SYSCTL_CHILDREN(node); 128 129 stats = &txq->stats; 130 131 SYSCTL_ADD_U32(ctx, tx_list, OID_AUTO, 132 "tx_posted_desc", CTLFLAG_RD, 133 &txq->req, 0, "Number of descriptors posted by NIC"); 134 SYSCTL_ADD_U32(ctx, tx_list, OID_AUTO, 135 "tx_completed_desc", CTLFLAG_RD, 136 &txq->done, 0, "Number of descriptors completed"); 137 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 138 "tx_packets", CTLFLAG_RD, 139 &stats->tpackets, "Packets transmitted"); 140 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 141 "tx_tso_packets", CTLFLAG_RD, 142 &stats->tso_packet_cnt, "TSO Packets transmitted"); 143 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 144 "tx_bytes", CTLFLAG_RD, 145 &stats->tbytes, "Bytes transmitted"); 146 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 147 "tx_delayed_pkt_nospace_device", CTLFLAG_RD, 148 &stats->tx_delayed_pkt_nospace_device, 149 "Packets delayed due to no space in device"); 150 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 151 "tx_dropped_pkt_nospace_bufring", CTLFLAG_RD, 152 &stats->tx_dropped_pkt_nospace_bufring, 153 "Packets dropped due to no space in br ring"); 154 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 155 "tx_dropped_pkt_vlan", CTLFLAG_RD, 156 &stats->tx_dropped_pkt_vlan, 157 "Dropped VLAN packets"); 158 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 159 "tx_delayed_pkt_nospace_descring", CTLFLAG_RD, 160 &stats->tx_delayed_pkt_nospace_descring, 161 "Packets delayed due to no space in desc ring"); 162 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 163 "tx_delayed_pkt_nospace_compring", CTLFLAG_RD, 164 &stats->tx_delayed_pkt_nospace_compring, 165 "Packets delayed due to no space in comp ring"); 166 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 167 "tx_delayed_pkt_nospace_qpl_bufs", CTLFLAG_RD, 168 &stats->tx_delayed_pkt_nospace_qpl_bufs, 169 "Packets delayed due to not enough qpl bufs"); 170 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 171 "tx_delayed_pkt_tsoerr", CTLFLAG_RD, 172 &stats->tx_delayed_pkt_tsoerr, 173 "TSO packets delayed due to err in prep errors"); 174 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 175 "tx_mbuf_collapse", CTLFLAG_RD, 176 &stats->tx_mbuf_collapse, 177 "tx mbufs that had to be collapsed"); 178 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 179 "tx_mbuf_defrag", CTLFLAG_RD, 180 &stats->tx_mbuf_defrag, 181 "tx mbufs that had to be defragged"); 182 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 183 "tx_mbuf_defrag_err", CTLFLAG_RD, 184 &stats->tx_mbuf_defrag_err, 185 "tx mbufs that failed defrag"); 186 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 187 "tx_mbuf_dmamap_enomem_err", CTLFLAG_RD, 188 &stats->tx_mbuf_dmamap_enomem_err, 189 "tx mbufs that could not be dma-mapped due to low mem"); 190 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 191 "tx_mbuf_dmamap_err", CTLFLAG_RD, 192 &stats->tx_mbuf_dmamap_err, 193 "tx mbufs that could not be dma-mapped"); 194 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 195 "tx_timeout", CTLFLAG_RD, 196 &stats->tx_timeout, 197 "detections of timed out packets on tx queues"); 198 } 199 200 static void 201 gve_setup_queue_stat_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, 202 struct gve_priv *priv) 203 { 204 int i; 205 206 for (i = 0; i < priv->rx_cfg.num_queues; i++) { 207 gve_setup_rxq_sysctl(ctx, child, &priv->rx[i]); 208 } 209 for (i = 0; i < priv->tx_cfg.num_queues; i++) { 210 gve_setup_txq_sysctl(ctx, child, &priv->tx[i]); 211 } 212 } 213 214 static void 215 gve_setup_adminq_stat_sysctl(struct sysctl_ctx_list *ctx, 216 struct sysctl_oid_list *child, struct gve_priv *priv) 217 { 218 struct sysctl_oid *admin_node; 219 struct sysctl_oid_list *admin_list; 220 221 /* Admin queue stats */ 222 admin_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "adminq_stats", 223 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Admin Queue statistics"); 224 admin_list = SYSCTL_CHILDREN(admin_node); 225 226 SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "adminq_prod_cnt", CTLFLAG_RD, 227 &priv->adminq_prod_cnt, 0, "Adminq Commands issued"); 228 SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "adminq_cmd_fail", CTLFLAG_RD, 229 &priv->adminq_cmd_fail, 0, "Aqminq Failed commands"); 230 SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "adminq_timeouts", CTLFLAG_RD, 231 &priv->adminq_timeouts, 0, "Adminq Timedout commands"); 232 SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "adminq_describe_device_cnt", 233 CTLFLAG_RD, &priv->adminq_describe_device_cnt, 0, 234 "adminq_describe_device_cnt"); 235 SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, 236 "adminq_cfg_device_resources_cnt", CTLFLAG_RD, 237 &priv->adminq_cfg_device_resources_cnt, 0, 238 "adminq_cfg_device_resources_cnt"); 239 SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, 240 "adminq_register_page_list_cnt", CTLFLAG_RD, 241 &priv->adminq_register_page_list_cnt, 0, 242 "adminq_register_page_list_cnt"); 243 SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, 244 "adminq_unregister_page_list_cnt", CTLFLAG_RD, 245 &priv->adminq_unregister_page_list_cnt, 0, 246 "adminq_unregister_page_list_cnt"); 247 SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "adminq_create_tx_queue_cnt", 248 CTLFLAG_RD, &priv->adminq_create_tx_queue_cnt, 0, 249 "adminq_create_tx_queue_cnt"); 250 SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "adminq_create_rx_queue_cnt", 251 CTLFLAG_RD, &priv->adminq_create_rx_queue_cnt, 0, 252 "adminq_create_rx_queue_cnt"); 253 SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "adminq_destroy_tx_queue_cnt", 254 CTLFLAG_RD, &priv->adminq_destroy_tx_queue_cnt, 0, 255 "adminq_destroy_tx_queue_cnt"); 256 SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "adminq_destroy_rx_queue_cnt", 257 CTLFLAG_RD, &priv->adminq_destroy_rx_queue_cnt, 0, 258 "adminq_destroy_rx_queue_cnt"); 259 SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "adminq_get_ptype_map_cnt", 260 CTLFLAG_RD, &priv->adminq_get_ptype_map_cnt, 0, 261 "adminq_get_ptype_map_cnt"); 262 SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, 263 "adminq_dcfg_device_resources_cnt", CTLFLAG_RD, 264 &priv->adminq_dcfg_device_resources_cnt, 0, 265 "adminq_dcfg_device_resources_cnt"); 266 SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, 267 "adminq_set_driver_parameter_cnt", CTLFLAG_RD, 268 &priv->adminq_set_driver_parameter_cnt, 0, 269 "adminq_set_driver_parameter_cnt"); 270 SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, 271 "adminq_verify_driver_compatibility_cnt", CTLFLAG_RD, 272 &priv->adminq_verify_driver_compatibility_cnt, 0, 273 "adminq_verify_driver_compatibility_cnt"); 274 } 275 276 static void 277 gve_setup_main_stat_sysctl(struct sysctl_ctx_list *ctx, 278 struct sysctl_oid_list *child, struct gve_priv *priv) 279 { 280 struct sysctl_oid *main_node; 281 struct sysctl_oid_list *main_list; 282 283 /* Main stats */ 284 main_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "main_stats", 285 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Main statistics"); 286 main_list = SYSCTL_CHILDREN(main_node); 287 288 SYSCTL_ADD_U32(ctx, main_list, OID_AUTO, "interface_up_cnt", CTLFLAG_RD, 289 &priv->interface_up_cnt, 0, "Times interface was set to up"); 290 SYSCTL_ADD_U32(ctx, main_list, OID_AUTO, "interface_down_cnt", CTLFLAG_RD, 291 &priv->interface_down_cnt, 0, "Times interface was set to down"); 292 SYSCTL_ADD_U32(ctx, main_list, OID_AUTO, "reset_cnt", CTLFLAG_RD, 293 &priv->reset_cnt, 0, "Times reset"); 294 } 295 296 static int 297 gve_check_num_queues(struct gve_priv *priv, int val, bool is_rx) 298 { 299 if (val < 1) { 300 device_printf(priv->dev, 301 "Requested num queues (%u) must be a positive integer\n", val); 302 return (EINVAL); 303 } 304 305 if (val > (is_rx ? priv->rx_cfg.max_queues : priv->tx_cfg.max_queues)) { 306 device_printf(priv->dev, 307 "Requested num queues (%u) is too large\n", val); 308 return (EINVAL); 309 } 310 311 return (0); 312 } 313 314 static int 315 gve_sysctl_num_tx_queues(SYSCTL_HANDLER_ARGS) 316 { 317 struct gve_priv *priv = arg1; 318 int val; 319 int err; 320 321 val = priv->tx_cfg.num_queues; 322 err = sysctl_handle_int(oidp, &val, 0, req); 323 if (err != 0 || req->newptr == NULL) 324 return (err); 325 326 err = gve_check_num_queues(priv, val, /*is_rx=*/false); 327 if (err != 0) 328 return (err); 329 330 if (val != priv->tx_cfg.num_queues) { 331 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); 332 err = gve_adjust_tx_queues(priv, val); 333 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); 334 } 335 336 return (err); 337 } 338 339 static int 340 gve_sysctl_num_rx_queues(SYSCTL_HANDLER_ARGS) 341 { 342 struct gve_priv *priv = arg1; 343 int val; 344 int err; 345 346 val = priv->rx_cfg.num_queues; 347 err = sysctl_handle_int(oidp, &val, 0, req); 348 if (err != 0 || req->newptr == NULL) 349 return (err); 350 351 err = gve_check_num_queues(priv, val, /*is_rx=*/true); 352 353 if (err != 0) 354 return (err); 355 356 if (val != priv->rx_cfg.num_queues) { 357 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); 358 err = gve_adjust_rx_queues(priv, val); 359 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); 360 } 361 362 return (err); 363 } 364 365 static int 366 gve_check_ring_size(struct gve_priv *priv, int val, bool is_rx) 367 { 368 if (!powerof2(val) || val == 0) { 369 device_printf(priv->dev, 370 "Requested ring size (%u) must be a power of 2\n", val); 371 return (EINVAL); 372 } 373 374 if (val < (is_rx ? priv->min_rx_desc_cnt : priv->min_tx_desc_cnt)) { 375 device_printf(priv->dev, 376 "Requested ring size (%u) cannot be less than %d\n", val, 377 (is_rx ? priv->min_rx_desc_cnt : priv->min_tx_desc_cnt)); 378 return (EINVAL); 379 } 380 381 382 if (val > (is_rx ? priv->max_rx_desc_cnt : priv->max_tx_desc_cnt)) { 383 device_printf(priv->dev, 384 "Requested ring size (%u) cannot be greater than %d\n", val, 385 (is_rx ? priv->max_rx_desc_cnt : priv->max_tx_desc_cnt)); 386 return (EINVAL); 387 } 388 389 return (0); 390 } 391 392 static int 393 gve_sysctl_tx_ring_size(SYSCTL_HANDLER_ARGS) 394 { 395 struct gve_priv *priv = arg1; 396 int val; 397 int err; 398 399 val = priv->tx_desc_cnt; 400 err = sysctl_handle_int(oidp, &val, 0, req); 401 if (err != 0 || req->newptr == NULL) 402 return (err); 403 404 err = gve_check_ring_size(priv, val, /*is_rx=*/false); 405 if (err != 0) 406 return (err); 407 408 if (val != priv->tx_desc_cnt) { 409 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); 410 err = gve_adjust_ring_sizes(priv, val, /*is_rx=*/false); 411 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); 412 } 413 414 return (err); 415 } 416 417 static int 418 gve_sysctl_rx_ring_size(SYSCTL_HANDLER_ARGS) 419 { 420 struct gve_priv *priv = arg1; 421 int val; 422 int err; 423 424 val = priv->rx_desc_cnt; 425 err = sysctl_handle_int(oidp, &val, 0, req); 426 if (err != 0 || req->newptr == NULL) 427 return (err); 428 429 err = gve_check_ring_size(priv, val, /*is_rx=*/true); 430 if (err != 0) 431 return (err); 432 433 if (val != priv->rx_desc_cnt) { 434 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); 435 err = gve_adjust_ring_sizes(priv, val, /*is_rx=*/true); 436 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); 437 } 438 439 return (err); 440 } 441 442 static void 443 gve_setup_sysctl_writables(struct sysctl_ctx_list *ctx, 444 struct sysctl_oid_list *child, struct gve_priv *priv) 445 { 446 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "num_tx_queues", 447 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 448 gve_sysctl_num_tx_queues, "I", "Number of TX queues"); 449 450 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "num_rx_queues", 451 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 452 gve_sysctl_num_rx_queues, "I", "Number of RX queues"); 453 454 if (priv->modify_ringsize_enabled) { 455 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_ring_size", 456 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 457 gve_sysctl_tx_ring_size, "I", "TX ring size"); 458 459 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_ring_size", 460 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 461 gve_sysctl_rx_ring_size, "I", "RX ring size"); 462 } 463 } 464 465 void gve_setup_sysctl(struct gve_priv *priv) 466 { 467 device_t dev; 468 struct sysctl_ctx_list *ctx; 469 struct sysctl_oid *tree; 470 struct sysctl_oid_list *child; 471 472 dev = priv->dev; 473 ctx = device_get_sysctl_ctx(dev); 474 tree = device_get_sysctl_tree(dev); 475 child = SYSCTL_CHILDREN(tree); 476 477 gve_setup_queue_stat_sysctl(ctx, child, priv); 478 gve_setup_adminq_stat_sysctl(ctx, child, priv); 479 gve_setup_main_stat_sysctl(ctx, child, priv); 480 gve_setup_sysctl_writables(ctx, child, priv); 481 } 482 483 void 484 gve_accum_stats(struct gve_priv *priv, uint64_t *rpackets, 485 uint64_t *rbytes, uint64_t *rx_dropped_pkt, uint64_t *tpackets, 486 uint64_t *tbytes, uint64_t *tx_dropped_pkt) 487 { 488 struct gve_rxq_stats *rxqstats; 489 struct gve_txq_stats *txqstats; 490 int i; 491 492 for (i = 0; i < priv->rx_cfg.num_queues; i++) { 493 rxqstats = &priv->rx[i].stats; 494 *rpackets += counter_u64_fetch(rxqstats->rpackets); 495 *rbytes += counter_u64_fetch(rxqstats->rbytes); 496 *rx_dropped_pkt += counter_u64_fetch(rxqstats->rx_dropped_pkt); 497 } 498 499 for (i = 0; i < priv->tx_cfg.num_queues; i++) { 500 txqstats = &priv->tx[i].stats; 501 *tpackets += counter_u64_fetch(txqstats->tpackets); 502 *tbytes += counter_u64_fetch(txqstats->tbytes); 503 *tx_dropped_pkt += counter_u64_fetch(txqstats->tx_dropped_pkt); 504 } 505 } 506