1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2021 Microsoft Corp. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 #include <sys/cdefs.h> 31 #include "mana_sysctl.h" 32 33 static int mana_sysctl_cleanup_thread_cpu(SYSCTL_HANDLER_ARGS); 34 35 int mana_log_level = MANA_ALERT | MANA_WARNING | MANA_INFO; 36 37 unsigned int mana_tx_req_size; 38 unsigned int mana_rx_req_size; 39 unsigned int mana_rx_refill_threshold; 40 41 SYSCTL_NODE(_hw, OID_AUTO, mana, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 42 "MANA driver parameters"); 43 44 SYSCTL_UINT(_hw_mana, OID_AUTO, tx_req_size, CTLFLAG_RWTUN, 45 &mana_tx_req_size, 0, "requested number of unit of tx queue"); 46 SYSCTL_UINT(_hw_mana, OID_AUTO, rx_req_size, CTLFLAG_RWTUN, 47 &mana_rx_req_size, 0, "requested number of unit of rx queue"); 48 SYSCTL_UINT(_hw_mana, OID_AUTO, rx_refill_thresh, CTLFLAG_RWTUN, 49 &mana_rx_refill_threshold, 0, 50 "number of rx slots before starting the refill"); 51 52 /* 53 * Logging level for changing verbosity of the output 54 */ 55 SYSCTL_INT(_hw_mana, OID_AUTO, log_level, CTLFLAG_RWTUN, 56 &mana_log_level, 0, "Logging level indicating verbosity of the logs"); 57 58 SYSCTL_CONST_STRING(_hw_mana, OID_AUTO, driver_version, CTLFLAG_RD, 59 DRV_MODULE_VERSION, "MANA driver version"); 60 61 static int 62 mana_sysctl_rx_stat_agg_u64(SYSCTL_HANDLER_ARGS) 63 { 64 struct mana_port_context *apc = arg1; 65 int offset = arg2, i, err; 66 struct mana_rxq *rxq; 67 uint64_t stat; 68 69 stat = 0; 70 for (i = 0; i < apc->num_queues; i++) { 71 rxq = apc->rxqs[i]; 72 stat += *((uint64_t *)((uint8_t *)rxq + offset)); 73 } 74 75 err = sysctl_handle_64(oidp, &stat, 0, req); 76 if (err || req->newptr == NULL) 77 return err; 78 79 for (i = 0; i < apc->num_queues; i++) { 80 rxq = apc->rxqs[i]; 81 *((uint64_t *)((uint8_t *)rxq + offset)) = 0; 82 } 83 return 0; 84 } 85 86 static int 87 mana_sysctl_rx_stat_u16(SYSCTL_HANDLER_ARGS) 88 { 89 struct mana_port_context *apc = arg1; 90 int offset = arg2, err; 91 struct mana_rxq *rxq; 92 uint64_t stat; 93 uint16_t val; 94 95 rxq = apc->rxqs[0]; 96 val = *((uint16_t *)((uint8_t *)rxq + offset)); 97 stat = val; 98 99 err = sysctl_handle_64(oidp, &stat, 0, req); 100 if (err || req->newptr == NULL) 101 return err; 102 else 103 return 0; 104 } 105 106 static int 107 mana_sysctl_rx_stat_u32(SYSCTL_HANDLER_ARGS) 108 { 109 struct mana_port_context *apc = arg1; 110 int offset = arg2, err; 111 struct mana_rxq *rxq; 112 uint64_t stat; 113 uint32_t val; 114 115 rxq = apc->rxqs[0]; 116 val = *((uint32_t *)((uint8_t *)rxq + offset)); 117 stat = val; 118 119 err = sysctl_handle_64(oidp, &stat, 0, req); 120 if (err || req->newptr == NULL) 121 return err; 122 else 123 return 0; 124 } 125 126 static int 127 mana_sysctl_tx_stat_agg_u64(SYSCTL_HANDLER_ARGS) 128 { 129 struct mana_port_context *apc = arg1; 130 int offset = arg2, i, err; 131 struct mana_txq *txq; 132 uint64_t stat; 133 134 stat = 0; 135 for (i = 0; i < apc->num_queues; i++) { 136 txq = &apc->tx_qp[i].txq; 137 stat += *((uint64_t *)((uint8_t *)txq + offset)); 138 } 139 140 err = sysctl_handle_64(oidp, &stat, 0, req); 141 if (err || req->newptr == NULL) 142 return err; 143 144 for (i = 0; i < apc->num_queues; i++) { 145 txq = &apc->tx_qp[i].txq; 146 *((uint64_t *)((uint8_t *)txq + offset)) = 0; 147 } 148 return 0; 149 } 150 151 void 152 mana_sysctl_add_port(struct mana_port_context *apc) 153 { 154 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; 155 device_t dev = gc->dev; 156 struct sysctl_ctx_list *ctx; 157 struct sysctl_oid *tree; 158 struct sysctl_oid_list *child; 159 struct mana_port_stats *port_stats; 160 char node_name[32]; 161 162 struct sysctl_oid *port_node, *stats_node; 163 struct sysctl_oid_list *stats_list; 164 165 ctx = device_get_sysctl_ctx(dev); 166 tree = device_get_sysctl_tree(dev); 167 child = SYSCTL_CHILDREN(tree); 168 169 port_stats = &apc->port_stats; 170 171 snprintf(node_name, 32, "port%d", apc->port_idx); 172 173 port_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, 174 node_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Port Name"); 175 apc->port_list = SYSCTL_CHILDREN(port_node); 176 177 SYSCTL_ADD_BOOL(ctx, apc->port_list, OID_AUTO, 178 "enable_altq", CTLFLAG_RW, &apc->enable_tx_altq, 0, 179 "Choose alternative txq under heavy load"); 180 181 SYSCTL_ADD_UINT(ctx, apc->port_list, OID_AUTO, 182 "tx_queue_size", CTLFLAG_RD, &apc->tx_queue_size, 0, 183 "number of unit of tx queue"); 184 185 SYSCTL_ADD_UINT(ctx, apc->port_list, OID_AUTO, 186 "rx_queue_size", CTLFLAG_RD, &apc->rx_queue_size, 0, 187 "number of unit of rx queue"); 188 189 SYSCTL_ADD_PROC(ctx, apc->port_list, OID_AUTO, 190 "bind_cleanup_thread_cpu", 191 CTLTYPE_U8 | CTLFLAG_RW | CTLFLAG_MPSAFE, 192 apc, 0, mana_sysctl_cleanup_thread_cpu, "I", 193 "Bind cleanup thread to a cpu. 0 disables it."); 194 195 stats_node = SYSCTL_ADD_NODE(ctx, apc->port_list, OID_AUTO, 196 "port_stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 197 "Statistics of port"); 198 stats_list = SYSCTL_CHILDREN(stats_node); 199 200 SYSCTL_ADD_COUNTER_U64(ctx, stats_list, OID_AUTO, "rx_packets", 201 CTLFLAG_RD, &port_stats->rx_packets, "Packets received"); 202 SYSCTL_ADD_COUNTER_U64(ctx, stats_list, OID_AUTO, "tx_packets", 203 CTLFLAG_RD, &port_stats->tx_packets, "Packets transmitted"); 204 SYSCTL_ADD_COUNTER_U64(ctx, stats_list, OID_AUTO, "rx_bytes", 205 CTLFLAG_RD, &port_stats->rx_bytes, "Bytes received"); 206 SYSCTL_ADD_COUNTER_U64(ctx, stats_list, OID_AUTO, "tx_bytes", 207 CTLFLAG_RD, &port_stats->tx_bytes, "Bytes transmitted"); 208 SYSCTL_ADD_COUNTER_U64(ctx, stats_list, OID_AUTO, "rx_drops", 209 CTLFLAG_RD, &port_stats->rx_drops, "Receive packet drops"); 210 SYSCTL_ADD_COUNTER_U64(ctx, stats_list, OID_AUTO, "tx_drops", 211 CTLFLAG_RD, &port_stats->tx_drops, "Transmit packet drops"); 212 213 SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "rx_lro_queued", 214 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_STATS, apc, 215 __offsetof(struct mana_rxq, lro.lro_queued), 216 mana_sysctl_rx_stat_agg_u64, "LU", "LRO queued"); 217 SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "rx_lro_flushed", 218 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_STATS, apc, 219 __offsetof(struct mana_rxq, lro.lro_flushed), 220 mana_sysctl_rx_stat_agg_u64, "LU", "LRO flushed"); 221 SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "rx_lro_bad_csum", 222 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_STATS, apc, 223 __offsetof(struct mana_rxq, lro.lro_bad_csum), 224 mana_sysctl_rx_stat_agg_u64, "LU", "LRO bad checksum"); 225 SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "rx_lro_tried", 226 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc, 227 __offsetof(struct mana_rxq, lro_tried), 228 mana_sysctl_rx_stat_agg_u64, "LU", "LRO tried"); 229 SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "rx_lro_failed", 230 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc, 231 __offsetof(struct mana_rxq, lro_failed), 232 mana_sysctl_rx_stat_agg_u64, "LU", "LRO failed"); 233 234 SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "lro_ackcnt_lim", 235 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc, 236 __offsetof(struct mana_rxq, lro.lro_ackcnt_lim), 237 mana_sysctl_rx_stat_u16, 238 "LU", "Max # of ACKs to be aggregated by LRO"); 239 SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "lro_length_lim", 240 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc, 241 __offsetof(struct mana_rxq, lro.lro_length_lim), 242 mana_sysctl_rx_stat_u32, 243 "LU", "Max len of aggregated data in byte by LRO"); 244 SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "lro_cnt", 245 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc, 246 __offsetof(struct mana_rxq, lro.lro_cnt), 247 mana_sysctl_rx_stat_u32, 248 "LU", "Max # or LRO packet count"); 249 250 SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "tx_tso_packets", 251 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc, 252 __offsetof(struct mana_txq, tso_pkts), 253 mana_sysctl_tx_stat_agg_u64, "LU", "TSO packets"); 254 SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "tx_tso_bytes", 255 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc, 256 __offsetof(struct mana_txq, tso_bytes), 257 mana_sysctl_tx_stat_agg_u64, "LU", "TSO bytes"); 258 } 259 260 void 261 mana_sysctl_add_queues(struct mana_port_context *apc) 262 { 263 struct sysctl_ctx_list *ctx = &apc->que_sysctl_ctx; 264 struct sysctl_oid_list *child = apc->port_list; 265 266 struct sysctl_oid *queue_node, *tx_node, *rx_node; 267 struct sysctl_oid_list *queue_list, *tx_list, *rx_list; 268 struct mana_txq *txq; 269 struct mana_rxq *rxq; 270 struct mana_stats *tx_stats, *rx_stats; 271 char que_name[32]; 272 int i; 273 274 sysctl_ctx_init(ctx); 275 276 for (i = 0; i < apc->num_queues; i++) { 277 rxq = apc->rxqs[i]; 278 txq = &apc->tx_qp[i].txq; 279 280 snprintf(que_name, 32, "queue%d", i); 281 282 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, 283 que_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 284 queue_list = SYSCTL_CHILDREN(queue_node); 285 286 /* TX stats */ 287 tx_node = SYSCTL_ADD_NODE(ctx, queue_list, OID_AUTO, 288 "txq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX queue"); 289 tx_list = SYSCTL_CHILDREN(tx_node); 290 291 tx_stats = &txq->stats; 292 293 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "count", 294 CTLFLAG_RD, &tx_stats->packets, "Packets sent"); 295 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "bytes", 296 CTLFLAG_RD, &tx_stats->bytes, "Bytes sent"); 297 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "queue_wakeups", 298 CTLFLAG_RD, &tx_stats->wakeup, "Queue wakeups"); 299 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "queue_stops", 300 CTLFLAG_RD, &tx_stats->stop, "Queue stops"); 301 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "mbuf_collapse", 302 CTLFLAG_RD, &tx_stats->collapse, "Mbuf collapse count"); 303 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 304 "mbuf_collapse_err", CTLFLAG_RD, 305 &tx_stats->collapse_err, "Mbuf collapse failures"); 306 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 307 "dma_mapping_err", CTLFLAG_RD, 308 &tx_stats->dma_mapping_err, "DMA mapping failures"); 309 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 310 "alt_chg", CTLFLAG_RD, 311 &tx_stats->alt_chg, "Switch to alternative txq"); 312 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 313 "alt_reset", CTLFLAG_RD, 314 &tx_stats->alt_reset, "Reset to self txq"); 315 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 316 "cqe_err", CTLFLAG_RD, 317 &tx_stats->cqe_err, "Error CQE count"); 318 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 319 "cqe_unknown_type", CTLFLAG_RD, 320 &tx_stats->cqe_unknown_type, "Unknown CQE count"); 321 322 /* RX stats */ 323 rx_node = SYSCTL_ADD_NODE(ctx, queue_list, OID_AUTO, 324 "rxq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX queue"); 325 rx_list = SYSCTL_CHILDREN(rx_node); 326 327 rx_stats = &rxq->stats; 328 329 SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, "count", 330 CTLFLAG_RD, &rx_stats->packets, "Packets received"); 331 SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, "bytes", 332 CTLFLAG_RD, &rx_stats->bytes, "Bytes received"); 333 SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, 334 "mbuf_alloc_fail", CTLFLAG_RD, 335 &rx_stats->mbuf_alloc_fail, "Failed mbuf allocs"); 336 SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, 337 "partial_refill", CTLFLAG_RD, 338 &rx_stats->partial_refill, "Partially refilled mbuf"); 339 SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, 340 "dma_mapping_err", CTLFLAG_RD, 341 &rx_stats->dma_mapping_err, "DMA mapping errors"); 342 } 343 } 344 345 /* 346 * Free all queues' sysctl trees attached to the port's tree. 347 */ 348 void 349 mana_sysctl_free_queues(struct mana_port_context *apc) 350 { 351 sysctl_ctx_free(&apc->que_sysctl_ctx); 352 } 353 354 static int 355 mana_sysctl_cleanup_thread_cpu(SYSCTL_HANDLER_ARGS) 356 { 357 struct mana_port_context *apc = arg1; 358 bool bind_cpu = false; 359 uint8_t val; 360 int err; 361 362 val = 0; 363 err = sysctl_wire_old_buffer(req, sizeof(val)); 364 if (err == 0) { 365 val = apc->bind_cleanup_thread_cpu; 366 err = sysctl_handle_8(oidp, &val, 0, req); 367 } 368 369 if (err != 0 || req->newptr == NULL) 370 return (err); 371 372 if (val != 0) 373 bind_cpu = true; 374 375 if (bind_cpu != apc->bind_cleanup_thread_cpu) { 376 apc->bind_cleanup_thread_cpu = bind_cpu; 377 err = mana_restart(apc); 378 } 379 380 return (err); 381 } 382