1 /*- 2 * Copyright (C) 2012-2016 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bus.h> 32 #include <sys/sysctl.h> 33 34 #include "nvme_private.h" 35 36 int nvme_use_nvd = 1; 37 38 SYSCTL_NODE(_hw, OID_AUTO, nvme, CTLFLAG_RD, 0, "NVMe sysctl tunables"); 39 SYSCTL_INT(_hw_nvme, OID_AUTO, use_nvd, CTLFLAG_RDTUN, 40 &nvme_use_nvd, 1, "1 = Create NVD devices, 0 = Create NDA devices"); 41 42 /* 43 * CTLTYPE_S64 and sysctl_handle_64 were added in r217616. Define these 44 * explicitly here for older kernels that don't include the r217616 45 * changeset. 46 */ 47 #ifndef CTLTYPE_S64 48 #define CTLTYPE_S64 CTLTYPE_QUAD 49 #define sysctl_handle_64 sysctl_handle_quad 50 #endif 51 52 static void 53 nvme_dump_queue(struct nvme_qpair *qpair) 54 { 55 struct nvme_completion *cpl; 56 struct nvme_command *cmd; 57 int i; 58 59 printf("id:%04Xh phase:%d\n", qpair->id, qpair->phase); 60 61 printf("Completion queue:\n"); 62 for (i = 0; i < qpair->num_entries; i++) { 63 cpl = &qpair->cpl[i]; 64 printf("%05d: ", i); 65 nvme_dump_completion(cpl); 66 } 67 68 printf("Submission queue:\n"); 69 for (i = 0; i < qpair->num_entries; i++) { 70 cmd = &qpair->cmd[i]; 71 printf("%05d: ", i); 72 nvme_dump_command(cmd); 73 } 74 } 75 76 77 static int 78 nvme_sysctl_dump_debug(SYSCTL_HANDLER_ARGS) 79 { 80 struct nvme_qpair *qpair = arg1; 81 uint32_t val = 0; 82 83 int error = sysctl_handle_int(oidp, &val, 0, req); 84 85 if (error) 86 return (error); 87 88 if (val != 0) 89 nvme_dump_queue(qpair); 90 91 return (0); 92 } 93 94 static int 95 nvme_sysctl_int_coal_time(SYSCTL_HANDLER_ARGS) 96 { 97 struct nvme_controller *ctrlr = arg1; 98 uint32_t oldval = ctrlr->int_coal_time; 99 int error = sysctl_handle_int(oidp, &ctrlr->int_coal_time, 0, 100 req); 101 102 if (error) 103 return (error); 104 105 if (oldval != ctrlr->int_coal_time) 106 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, 107 ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL, 108 NULL); 109 110 return (0); 111 } 112 113 static int 114 nvme_sysctl_int_coal_threshold(SYSCTL_HANDLER_ARGS) 115 { 116 struct nvme_controller *ctrlr = arg1; 117 uint32_t oldval = ctrlr->int_coal_threshold; 118 int error = sysctl_handle_int(oidp, &ctrlr->int_coal_threshold, 0, 119 req); 120 121 if (error) 122 return (error); 123 124 if (oldval != ctrlr->int_coal_threshold) 125 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, 126 ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL, 127 NULL); 128 129 return (0); 130 } 131 132 static int 133 nvme_sysctl_timeout_period(SYSCTL_HANDLER_ARGS) 134 { 135 struct nvme_controller *ctrlr = arg1; 136 uint32_t oldval = ctrlr->timeout_period; 137 int error = sysctl_handle_int(oidp, &ctrlr->timeout_period, 0, req); 138 139 if (error) 140 return (error); 141 142 if (ctrlr->timeout_period > NVME_MAX_TIMEOUT_PERIOD || 143 ctrlr->timeout_period < NVME_MIN_TIMEOUT_PERIOD) { 144 ctrlr->timeout_period = oldval; 145 return (EINVAL); 146 } 147 148 return (0); 149 } 150 151 static void 152 nvme_qpair_reset_stats(struct nvme_qpair *qpair) 153 { 154 155 qpair->num_cmds = 0; 156 qpair->num_intr_handler_calls = 0; 157 } 158 159 static int 160 nvme_sysctl_num_cmds(SYSCTL_HANDLER_ARGS) 161 { 162 struct nvme_controller *ctrlr = arg1; 163 int64_t num_cmds = 0; 164 int i; 165 166 num_cmds = ctrlr->adminq.num_cmds; 167 168 for (i = 0; i < ctrlr->num_io_queues; i++) 169 num_cmds += ctrlr->ioq[i].num_cmds; 170 171 return (sysctl_handle_64(oidp, &num_cmds, 0, req)); 172 } 173 174 static int 175 nvme_sysctl_num_intr_handler_calls(SYSCTL_HANDLER_ARGS) 176 { 177 struct nvme_controller *ctrlr = arg1; 178 int64_t num_intr_handler_calls = 0; 179 int i; 180 181 num_intr_handler_calls = ctrlr->adminq.num_intr_handler_calls; 182 183 for (i = 0; i < ctrlr->num_io_queues; i++) 184 num_intr_handler_calls += ctrlr->ioq[i].num_intr_handler_calls; 185 186 return (sysctl_handle_64(oidp, &num_intr_handler_calls, 0, req)); 187 } 188 189 static int 190 nvme_sysctl_reset_stats(SYSCTL_HANDLER_ARGS) 191 { 192 struct nvme_controller *ctrlr = arg1; 193 uint32_t i, val = 0; 194 195 int error = sysctl_handle_int(oidp, &val, 0, req); 196 197 if (error) 198 return (error); 199 200 if (val != 0) { 201 nvme_qpair_reset_stats(&ctrlr->adminq); 202 203 for (i = 0; i < ctrlr->num_io_queues; i++) 204 nvme_qpair_reset_stats(&ctrlr->ioq[i]); 205 } 206 207 return (0); 208 } 209 210 211 static void 212 nvme_sysctl_initialize_queue(struct nvme_qpair *qpair, 213 struct sysctl_ctx_list *ctrlr_ctx, struct sysctl_oid *que_tree) 214 { 215 struct sysctl_oid_list *que_list = SYSCTL_CHILDREN(que_tree); 216 217 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_entries", 218 CTLFLAG_RD, &qpair->num_entries, 0, 219 "Number of entries in hardware queue"); 220 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_trackers", 221 CTLFLAG_RD, &qpair->num_trackers, 0, 222 "Number of trackers pre-allocated for this queue pair"); 223 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_head", 224 CTLFLAG_RD, &qpair->sq_head, 0, 225 "Current head of submission queue (as observed by driver)"); 226 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_tail", 227 CTLFLAG_RD, &qpair->sq_tail, 0, 228 "Current tail of submission queue (as observed by driver)"); 229 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "cq_head", 230 CTLFLAG_RD, &qpair->cq_head, 0, 231 "Current head of completion queue (as observed by driver)"); 232 233 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_cmds", 234 CTLFLAG_RD, &qpair->num_cmds, "Number of commands submitted"); 235 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_intr_handler_calls", 236 CTLFLAG_RD, &qpair->num_intr_handler_calls, 237 "Number of times interrupt handler was invoked (will typically be " 238 "less than number of actual interrupts generated due to " 239 "coalescing)"); 240 241 SYSCTL_ADD_PROC(ctrlr_ctx, que_list, OID_AUTO, 242 "dump_debug", CTLTYPE_UINT | CTLFLAG_RW, qpair, 0, 243 nvme_sysctl_dump_debug, "IU", "Dump debug data"); 244 } 245 246 void 247 nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr) 248 { 249 struct sysctl_ctx_list *ctrlr_ctx; 250 struct sysctl_oid *ctrlr_tree, *que_tree; 251 struct sysctl_oid_list *ctrlr_list; 252 #define QUEUE_NAME_LENGTH 16 253 char queue_name[QUEUE_NAME_LENGTH]; 254 int i; 255 256 ctrlr_ctx = device_get_sysctl_ctx(ctrlr->dev); 257 ctrlr_tree = device_get_sysctl_tree(ctrlr->dev); 258 ctrlr_list = SYSCTL_CHILDREN(ctrlr_tree); 259 260 SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_cpus_per_ioq", 261 CTLFLAG_RD, &ctrlr->num_cpus_per_ioq, 0, 262 "Number of CPUs assigned per I/O queue pair"); 263 264 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, 265 "int_coal_time", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0, 266 nvme_sysctl_int_coal_time, "IU", 267 "Interrupt coalescing timeout (in microseconds)"); 268 269 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, 270 "int_coal_threshold", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0, 271 nvme_sysctl_int_coal_threshold, "IU", 272 "Interrupt coalescing threshold"); 273 274 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, 275 "timeout_period", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0, 276 nvme_sysctl_timeout_period, "IU", 277 "Timeout period (in seconds)"); 278 279 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, 280 "num_cmds", CTLTYPE_S64 | CTLFLAG_RD, 281 ctrlr, 0, nvme_sysctl_num_cmds, "IU", 282 "Number of commands submitted"); 283 284 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, 285 "num_intr_handler_calls", CTLTYPE_S64 | CTLFLAG_RD, 286 ctrlr, 0, nvme_sysctl_num_intr_handler_calls, "IU", 287 "Number of times interrupt handler was invoked (will " 288 "typically be less than number of actual interrupts " 289 "generated due to coalescing)"); 290 291 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, 292 "reset_stats", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0, 293 nvme_sysctl_reset_stats, "IU", "Reset statistics to zero"); 294 295 que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "adminq", 296 CTLFLAG_RD, NULL, "Admin Queue"); 297 298 nvme_sysctl_initialize_queue(&ctrlr->adminq, ctrlr_ctx, que_tree); 299 300 for (i = 0; i < ctrlr->num_io_queues; i++) { 301 snprintf(queue_name, QUEUE_NAME_LENGTH, "ioq%d", i); 302 que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, 303 queue_name, CTLFLAG_RD, NULL, "IO Queue"); 304 nvme_sysctl_initialize_queue(&ctrlr->ioq[i], ctrlr_ctx, 305 que_tree); 306 } 307 } 308