1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2021 Microsoft Corp. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 #include <sys/cdefs.h> 31 #include "mana_sysctl.h" 32 33 static int mana_sysctl_cleanup_thread_cpu(SYSCTL_HANDLER_ARGS); 34 35 int mana_log_level = MANA_ALERT | MANA_WARNING | MANA_INFO; 36 37 unsigned int mana_tx_req_size; 38 unsigned int mana_rx_req_size; 39 40 SYSCTL_NODE(_hw, OID_AUTO, mana, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 41 "MANA driver parameters"); 42 43 SYSCTL_UINT(_hw_mana, OID_AUTO, tx_req_size, CTLFLAG_RWTUN, 44 &mana_tx_req_size, 0, "requested number of unit of tx queue"); 45 SYSCTL_UINT(_hw_mana, OID_AUTO, rx_req_size, CTLFLAG_RWTUN, 46 &mana_rx_req_size, 0, "requested number of unit of rx queue"); 47 48 /* 49 * Logging level for changing verbosity of the output 50 */ 51 SYSCTL_INT(_hw_mana, OID_AUTO, log_level, CTLFLAG_RWTUN, 52 &mana_log_level, 0, "Logging level indicating verbosity of the logs"); 53 54 SYSCTL_CONST_STRING(_hw_mana, OID_AUTO, driver_version, CTLFLAG_RD, 55 DRV_MODULE_VERSION, "MANA driver version"); 56 57 static int 58 mana_sysctl_rx_stat_agg_u64(SYSCTL_HANDLER_ARGS) 59 { 60 struct mana_port_context *apc = arg1; 61 int offset = arg2, i, err; 62 struct mana_rxq *rxq; 63 uint64_t stat; 64 65 stat = 0; 66 for (i = 0; i < apc->num_queues; i++) { 67 rxq = apc->rxqs[i]; 68 stat += *((uint64_t *)((uint8_t *)rxq + offset)); 69 } 70 71 err = sysctl_handle_64(oidp, &stat, 0, req); 72 if (err || req->newptr == NULL) 73 return err; 74 75 for (i = 0; i < apc->num_queues; i++) { 76 rxq = apc->rxqs[i]; 77 *((uint64_t *)((uint8_t *)rxq + offset)) = 0; 78 } 79 return 0; 80 } 81 82 static int 83 mana_sysctl_rx_stat_u16(SYSCTL_HANDLER_ARGS) 84 { 85 struct mana_port_context *apc = arg1; 86 int offset = arg2, err; 87 struct mana_rxq *rxq; 88 uint64_t stat; 89 uint16_t val; 90 91 rxq = apc->rxqs[0]; 92 val = *((uint16_t *)((uint8_t *)rxq + offset)); 93 stat = val; 94 95 err = sysctl_handle_64(oidp, &stat, 0, req); 96 if (err || req->newptr == NULL) 97 return err; 98 else 99 return 0; 100 } 101 102 static int 103 mana_sysctl_rx_stat_u32(SYSCTL_HANDLER_ARGS) 104 { 105 struct mana_port_context *apc = arg1; 106 int offset = arg2, err; 107 struct mana_rxq *rxq; 108 uint64_t stat; 109 uint32_t val; 110 111 rxq = apc->rxqs[0]; 112 val = *((uint32_t *)((uint8_t *)rxq + offset)); 113 stat = val; 114 115 err = sysctl_handle_64(oidp, &stat, 0, req); 116 if (err || req->newptr == NULL) 117 return err; 118 else 119 return 0; 120 } 121 122 static int 123 mana_sysctl_tx_stat_agg_u64(SYSCTL_HANDLER_ARGS) 124 { 125 struct mana_port_context *apc = arg1; 126 int offset = arg2, i, err; 127 struct mana_txq *txq; 128 uint64_t stat; 129 130 stat = 0; 131 for (i = 0; i < apc->num_queues; i++) { 132 txq = &apc->tx_qp[i].txq; 133 stat += *((uint64_t *)((uint8_t *)txq + offset)); 134 } 135 136 err = sysctl_handle_64(oidp, &stat, 0, req); 137 if (err || req->newptr == NULL) 138 return err; 139 140 for (i = 0; i < apc->num_queues; i++) { 141 txq = &apc->tx_qp[i].txq; 142 *((uint64_t *)((uint8_t *)txq + offset)) = 0; 143 } 144 return 0; 145 } 146 147 void 148 mana_sysctl_add_port(struct mana_port_context *apc) 149 { 150 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; 151 device_t dev = gc->dev; 152 struct sysctl_ctx_list *ctx; 153 struct sysctl_oid *tree; 154 struct sysctl_oid_list *child; 155 struct mana_port_stats *port_stats; 156 char node_name[32]; 157 158 struct sysctl_oid *port_node, *stats_node; 159 struct sysctl_oid_list *stats_list; 160 161 ctx = device_get_sysctl_ctx(dev); 162 tree = device_get_sysctl_tree(dev); 163 child = SYSCTL_CHILDREN(tree); 164 165 port_stats = &apc->port_stats; 166 167 snprintf(node_name, 32, "port%d", apc->port_idx); 168 169 port_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, 170 node_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Port Name"); 171 apc->port_list = SYSCTL_CHILDREN(port_node); 172 173 SYSCTL_ADD_BOOL(ctx, apc->port_list, OID_AUTO, 174 "enable_altq", CTLFLAG_RW, &apc->enable_tx_altq, 0, 175 "Choose alternative txq under heavy load"); 176 177 SYSCTL_ADD_UINT(ctx, apc->port_list, OID_AUTO, 178 "tx_queue_size", CTLFLAG_RD, &apc->tx_queue_size, 0, 179 "number of unit of tx queue"); 180 181 SYSCTL_ADD_UINT(ctx, apc->port_list, OID_AUTO, 182 "rx_queue_size", CTLFLAG_RD, &apc->rx_queue_size, 0, 183 "number of unit of rx queue"); 184 185 SYSCTL_ADD_PROC(ctx, apc->port_list, OID_AUTO, 186 "bind_cleanup_thread_cpu", 187 CTLTYPE_U8 | CTLFLAG_RW | CTLFLAG_MPSAFE, 188 apc, 0, mana_sysctl_cleanup_thread_cpu, "I", 189 "Bind cleanup thread to a cpu. 0 disables it."); 190 191 stats_node = SYSCTL_ADD_NODE(ctx, apc->port_list, OID_AUTO, 192 "port_stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 193 "Statistics of port"); 194 stats_list = SYSCTL_CHILDREN(stats_node); 195 196 SYSCTL_ADD_COUNTER_U64(ctx, stats_list, OID_AUTO, "rx_packets", 197 CTLFLAG_RD, &port_stats->rx_packets, "Packets received"); 198 SYSCTL_ADD_COUNTER_U64(ctx, stats_list, OID_AUTO, "tx_packets", 199 CTLFLAG_RD, &port_stats->tx_packets, "Packets transmitted"); 200 SYSCTL_ADD_COUNTER_U64(ctx, stats_list, OID_AUTO, "rx_bytes", 201 CTLFLAG_RD, &port_stats->rx_bytes, "Bytes received"); 202 SYSCTL_ADD_COUNTER_U64(ctx, stats_list, OID_AUTO, "tx_bytes", 203 CTLFLAG_RD, &port_stats->tx_bytes, "Bytes transmitted"); 204 SYSCTL_ADD_COUNTER_U64(ctx, stats_list, OID_AUTO, "rx_drops", 205 CTLFLAG_RD, &port_stats->rx_drops, "Receive packet drops"); 206 SYSCTL_ADD_COUNTER_U64(ctx, stats_list, OID_AUTO, "tx_drops", 207 CTLFLAG_RD, &port_stats->tx_drops, "Transmit packet drops"); 208 209 SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "rx_lro_queued", 210 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_STATS, apc, 211 __offsetof(struct mana_rxq, lro.lro_queued), 212 mana_sysctl_rx_stat_agg_u64, "LU", "LRO queued"); 213 SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "rx_lro_flushed", 214 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_STATS, apc, 215 __offsetof(struct mana_rxq, lro.lro_flushed), 216 mana_sysctl_rx_stat_agg_u64, "LU", "LRO flushed"); 217 SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "rx_lro_bad_csum", 218 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_STATS, apc, 219 __offsetof(struct mana_rxq, lro.lro_bad_csum), 220 mana_sysctl_rx_stat_agg_u64, "LU", "LRO bad checksum"); 221 SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "rx_lro_tried", 222 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc, 223 __offsetof(struct mana_rxq, lro_tried), 224 mana_sysctl_rx_stat_agg_u64, "LU", "LRO tried"); 225 SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "rx_lro_failed", 226 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc, 227 __offsetof(struct mana_rxq, lro_failed), 228 mana_sysctl_rx_stat_agg_u64, "LU", "LRO failed"); 229 230 SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "lro_ackcnt_lim", 231 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc, 232 __offsetof(struct mana_rxq, lro.lro_ackcnt_lim), 233 mana_sysctl_rx_stat_u16, 234 "LU", "Max # of ACKs to be aggregated by LRO"); 235 SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "lro_length_lim", 236 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc, 237 __offsetof(struct mana_rxq, lro.lro_length_lim), 238 mana_sysctl_rx_stat_u32, 239 "LU", "Max len of aggregated data in byte by LRO"); 240 SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "lro_cnt", 241 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc, 242 __offsetof(struct mana_rxq, lro.lro_cnt), 243 mana_sysctl_rx_stat_u32, 244 "LU", "Max # or LRO packet count"); 245 246 SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "tx_tso_packets", 247 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc, 248 __offsetof(struct mana_txq, tso_pkts), 249 mana_sysctl_tx_stat_agg_u64, "LU", "TSO packets"); 250 SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "tx_tso_bytes", 251 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc, 252 __offsetof(struct mana_txq, tso_bytes), 253 mana_sysctl_tx_stat_agg_u64, "LU", "TSO bytes"); 254 } 255 256 void 257 mana_sysctl_add_queues(struct mana_port_context *apc) 258 { 259 struct sysctl_ctx_list *ctx = &apc->que_sysctl_ctx; 260 struct sysctl_oid_list *child = apc->port_list; 261 262 struct sysctl_oid *queue_node, *tx_node, *rx_node; 263 struct sysctl_oid_list *queue_list, *tx_list, *rx_list; 264 struct mana_txq *txq; 265 struct mana_rxq *rxq; 266 struct mana_stats *tx_stats, *rx_stats; 267 char que_name[32]; 268 int i; 269 270 sysctl_ctx_init(ctx); 271 272 for (i = 0; i < apc->num_queues; i++) { 273 rxq = apc->rxqs[i]; 274 txq = &apc->tx_qp[i].txq; 275 276 snprintf(que_name, 32, "queue%d", i); 277 278 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, 279 que_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 280 queue_list = SYSCTL_CHILDREN(queue_node); 281 282 /* TX stats */ 283 tx_node = SYSCTL_ADD_NODE(ctx, queue_list, OID_AUTO, 284 "txq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX queue"); 285 tx_list = SYSCTL_CHILDREN(tx_node); 286 287 tx_stats = &txq->stats; 288 289 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "count", 290 CTLFLAG_RD, &tx_stats->packets, "Packets sent"); 291 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "bytes", 292 CTLFLAG_RD, &tx_stats->bytes, "Bytes sent"); 293 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "queue_wakeups", 294 CTLFLAG_RD, &tx_stats->wakeup, "Queue wakeups"); 295 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "queue_stops", 296 CTLFLAG_RD, &tx_stats->stop, "Queue stops"); 297 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "mbuf_collapse", 298 CTLFLAG_RD, &tx_stats->collapse, "Mbuf collapse count"); 299 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 300 "mbuf_collapse_err", CTLFLAG_RD, 301 &tx_stats->collapse_err, "Mbuf collapse failures"); 302 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 303 "dma_mapping_err", CTLFLAG_RD, 304 &tx_stats->dma_mapping_err, "DMA mapping failures"); 305 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 306 "alt_chg", CTLFLAG_RD, 307 &tx_stats->alt_chg, "Switch to alternative txq"); 308 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 309 "alt_reset", CTLFLAG_RD, 310 &tx_stats->alt_reset, "Reset to self txq"); 311 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 312 "cqe_err", CTLFLAG_RD, 313 &tx_stats->cqe_err, "Error CQE count"); 314 SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, 315 "cqe_unknown_type", CTLFLAG_RD, 316 &tx_stats->cqe_unknown_type, "Unknown CQE count"); 317 318 /* RX stats */ 319 rx_node = SYSCTL_ADD_NODE(ctx, queue_list, OID_AUTO, 320 "rxq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX queue"); 321 rx_list = SYSCTL_CHILDREN(rx_node); 322 323 rx_stats = &rxq->stats; 324 325 SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, "count", 326 CTLFLAG_RD, &rx_stats->packets, "Packets received"); 327 SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, "bytes", 328 CTLFLAG_RD, &rx_stats->bytes, "Bytes received"); 329 SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, 330 "mbuf_alloc_fail", CTLFLAG_RD, 331 &rx_stats->mbuf_alloc_fail, "Failed mbuf allocs"); 332 SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO, 333 "dma_mapping_err", CTLFLAG_RD, 334 &rx_stats->dma_mapping_err, "DMA mapping errors"); 335 } 336 } 337 338 /* 339 * Free all queues' sysctl trees attached to the port's tree. 340 */ 341 void 342 mana_sysctl_free_queues(struct mana_port_context *apc) 343 { 344 sysctl_ctx_free(&apc->que_sysctl_ctx); 345 } 346 347 static int 348 mana_sysctl_cleanup_thread_cpu(SYSCTL_HANDLER_ARGS) 349 { 350 struct mana_port_context *apc = arg1; 351 bool bind_cpu = false; 352 uint8_t val; 353 int err; 354 355 val = 0; 356 err = sysctl_wire_old_buffer(req, sizeof(val)); 357 if (err == 0) { 358 val = apc->bind_cleanup_thread_cpu; 359 err = sysctl_handle_8(oidp, &val, 0, req); 360 } 361 362 if (err != 0 || req->newptr == NULL) 363 return (err); 364 365 if (val != 0) 366 bind_cpu = true; 367 368 if (bind_cpu != apc->bind_cleanup_thread_cpu) { 369 apc->bind_cleanup_thread_cpu = bind_cpu; 370 err = mana_restart(apc); 371 } 372 373 return (err); 374 } 375