1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (C) 2012-2014 Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #ifndef __NVME_PRIVATE_H__ 30 #define __NVME_PRIVATE_H__ 31 32 #include <sys/param.h> 33 #include <sys/bio.h> 34 #include <sys/bus.h> 35 #include <sys/kernel.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/memdesc.h> 39 #include <sys/module.h> 40 #include <sys/mutex.h> 41 #include <sys/rman.h> 42 #include <sys/systm.h> 43 #include <sys/taskqueue.h> 44 45 #include <vm/uma.h> 46 47 #include <machine/bus.h> 48 49 #include "nvme.h" 50 51 #define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev)) 52 53 MALLOC_DECLARE(M_NVME); 54 55 #define IDT32_PCI_ID 0x80d0111d /* 32 channel board */ 56 #define IDT8_PCI_ID 0x80d2111d /* 8 channel board */ 57 58 #define NVME_ADMIN_TRACKERS (16) 59 #define NVME_ADMIN_ENTRIES (128) 60 /* min and max are defined in admin queue attributes section of spec */ 61 #define NVME_MIN_ADMIN_ENTRIES (2) 62 #define NVME_MAX_ADMIN_ENTRIES (4096) 63 64 /* 65 * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion 66 * queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we 67 * will allow outstanding on an I/O qpair at any time. The only advantage in 68 * having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping 69 * the contents of the submission and completion queues, it will show a longer 70 * history of data. 71 */ 72 #define NVME_IO_ENTRIES (256) 73 #define NVME_IO_TRACKERS (128) 74 #define NVME_MIN_IO_TRACKERS (4) 75 #define NVME_MAX_IO_TRACKERS (1024) 76 77 /* 78 * NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES 79 * for each controller. 80 */ 81 82 #define NVME_INT_COAL_TIME (0) /* disabled */ 83 #define NVME_INT_COAL_THRESHOLD (0) /* 0-based */ 84 85 #define NVME_MAX_NAMESPACES (16) 86 #define NVME_MAX_CONSUMERS (2) 87 #define NVME_MAX_ASYNC_EVENTS (8) 88 89 #define NVME_ADMIN_TIMEOUT_PERIOD (60) /* in seconds */ 90 #define NVME_DEFAULT_TIMEOUT_PERIOD (30) /* in seconds */ 91 #define NVME_MIN_TIMEOUT_PERIOD (5) 92 #define NVME_MAX_TIMEOUT_PERIOD (120) 93 94 #define NVME_DEFAULT_RETRY_COUNT (4) 95 96 /* Maximum log page size to fetch for AERs. */ 97 #define NVME_MAX_AER_LOG_SIZE (4096) 98 99 /* 100 * Define CACHE_LINE_SIZE here for older FreeBSD versions that do not define 101 * it. 102 */ 103 #ifndef CACHE_LINE_SIZE 104 #define CACHE_LINE_SIZE (64) 105 #endif 106 107 #define NVME_GONE 0xfffffffful 108 109 extern int32_t nvme_retry_count; 110 extern bool nvme_verbose_cmd_dump; 111 112 struct nvme_completion_poll_status { 113 struct nvme_completion cpl; 114 int done; 115 }; 116 117 struct nvme_request { 118 struct nvme_command cmd; 119 struct nvme_qpair *qpair; 120 struct memdesc payload; 121 nvme_cb_fn_t cb_fn; 122 void *cb_arg; 123 int32_t retries; 124 bool payload_valid; 125 bool timeout; 126 bool spare[2]; /* Future use */ 127 STAILQ_ENTRY(nvme_request) stailq; 128 }; 129 130 struct nvme_async_event_request { 131 struct nvme_controller *ctrlr; 132 struct nvme_request *req; 133 struct nvme_completion cpl; 134 uint32_t log_page_id; 135 uint32_t log_page_size; 136 uint8_t log_page_buffer[NVME_MAX_AER_LOG_SIZE]; 137 }; 138 139 struct nvme_tracker { 140 TAILQ_ENTRY(nvme_tracker) tailq; 141 struct nvme_request *req; 142 struct nvme_qpair *qpair; 143 sbintime_t deadline; 144 bus_dmamap_t payload_dma_map; 145 uint16_t cid; 146 147 uint64_t *prp; 148 bus_addr_t prp_bus_addr; 149 }; 150 151 enum nvme_recovery { 152 RECOVERY_NONE = 0, /* Normal operations */ 153 RECOVERY_WAITING, /* waiting for the reset to complete */ 154 }; 155 struct nvme_qpair { 156 struct nvme_controller *ctrlr; 157 uint32_t id; 158 int domain; 159 int cpu; 160 161 uint16_t vector; 162 int rid; 163 struct resource *res; 164 void *tag; 165 166 struct callout timer; /* recovery lock */ 167 bool timer_armed; /* recovery lock */ 168 enum nvme_recovery recovery_state; /* recovery lock */ 169 170 uint32_t num_entries; 171 uint32_t num_trackers; 172 uint32_t sq_tdbl_off; 173 uint32_t cq_hdbl_off; 174 175 uint32_t phase; 176 uint32_t sq_head; 177 uint32_t sq_tail; 178 uint32_t cq_head; 179 180 int64_t num_cmds; 181 int64_t num_intr_handler_calls; 182 int64_t num_retries; 183 int64_t num_failures; 184 int64_t num_ignored; 185 int64_t num_recovery_nolock; 186 187 struct nvme_command *cmd; 188 struct nvme_completion *cpl; 189 190 bus_dma_tag_t dma_tag; 191 bus_dma_tag_t dma_tag_payload; 192 193 bus_dmamap_t queuemem_map; 194 uint64_t cmd_bus_addr; 195 uint64_t cpl_bus_addr; 196 197 TAILQ_HEAD(, nvme_tracker) free_tr; 198 TAILQ_HEAD(, nvme_tracker) outstanding_tr; 199 STAILQ_HEAD(, nvme_request) queued_req; 200 201 struct nvme_tracker **act_tr; 202 203 struct mtx_padalign lock; 204 struct mtx_padalign recovery; 205 } __aligned(CACHE_LINE_SIZE); 206 207 struct nvme_namespace { 208 struct nvme_controller *ctrlr; 209 struct nvme_namespace_data data; 210 uint32_t id; 211 uint32_t flags; 212 struct cdev *cdev; 213 void *cons_cookie[NVME_MAX_CONSUMERS]; 214 uint32_t boundary; 215 struct mtx lock; 216 }; 217 218 /* 219 * One of these per allocated PCI device. 220 */ 221 struct nvme_controller { 222 device_t dev; 223 224 struct mtx lock; 225 int domain; 226 uint32_t ready_timeout_in_ms; 227 uint32_t quirks; 228 #define QUIRK_DELAY_B4_CHK_RDY 1 /* Can't touch MMIO on disable */ 229 #define QUIRK_DISABLE_TIMEOUT 2 /* Disable broken completion timeout feature */ 230 #define QUIRK_INTEL_ALIGNMENT 4 /* Pre NVMe 1.3 performance alignment */ 231 #define QUIRK_AHCI 8 /* Attached via AHCI redirect */ 232 233 bus_space_tag_t bus_tag; 234 bus_space_handle_t bus_handle; 235 int resource_id; 236 struct resource *resource; 237 238 /* 239 * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5, 240 * separate from the control registers which are in BAR 0/1. These 241 * members track the mapping of BAR 4/5 for that reason. 242 */ 243 int bar4_resource_id; 244 struct resource *bar4_resource; 245 246 int msi_count; 247 uint32_t enable_aborts; 248 249 uint32_t num_io_queues; 250 uint32_t max_hw_pend_io; 251 252 /* Fields for tracking progress during controller initialization. */ 253 struct intr_config_hook config_hook; 254 uint32_t ns_identified; 255 uint32_t queues_created; 256 257 struct task reset_task; 258 struct taskqueue *taskqueue; 259 260 /* For shared legacy interrupt. */ 261 int rid; 262 struct resource *res; 263 void *tag; 264 265 /** maximum i/o size in bytes */ 266 uint32_t max_xfer_size; 267 268 /** LO and HI capacity mask */ 269 uint32_t cap_lo; 270 uint32_t cap_hi; 271 272 /** Page size and log2(page_size) - 12 that we're currently using */ 273 uint32_t page_size; 274 uint32_t mps; 275 276 /** interrupt coalescing time period (in microseconds) */ 277 uint32_t int_coal_time; 278 279 /** interrupt coalescing threshold */ 280 uint32_t int_coal_threshold; 281 282 /** timeout period in seconds */ 283 uint32_t admin_timeout_period; 284 uint32_t timeout_period; 285 286 /** doorbell stride */ 287 uint32_t dstrd; 288 289 struct nvme_qpair adminq; 290 struct nvme_qpair *ioq; 291 292 struct nvme_registers *regs; 293 294 struct nvme_controller_data cdata; 295 struct nvme_namespace ns[NVME_MAX_NAMESPACES]; 296 297 struct cdev *cdev; 298 299 /** bit mask of event types currently enabled for async events */ 300 uint32_t async_event_config; 301 302 uint32_t num_aers; 303 struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS]; 304 305 void *cons_cookie[NVME_MAX_CONSUMERS]; 306 307 uint32_t is_resetting; 308 uint32_t is_initialized; 309 uint32_t notification_sent; 310 311 bool is_failed; 312 bool is_dying; 313 STAILQ_HEAD(, nvme_request) fail_req; 314 315 /* Host Memory Buffer */ 316 int hmb_nchunks; 317 size_t hmb_chunk; 318 bus_dma_tag_t hmb_tag; 319 struct nvme_hmb_chunk { 320 bus_dmamap_t hmbc_map; 321 void *hmbc_vaddr; 322 uint64_t hmbc_paddr; 323 } *hmb_chunks; 324 bus_dma_tag_t hmb_desc_tag; 325 bus_dmamap_t hmb_desc_map; 326 struct nvme_hmb_desc *hmb_desc_vaddr; 327 uint64_t hmb_desc_paddr; 328 }; 329 330 #define nvme_mmio_offsetof(reg) \ 331 offsetof(struct nvme_registers, reg) 332 333 #define nvme_mmio_read_4(sc, reg) \ 334 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \ 335 nvme_mmio_offsetof(reg)) 336 337 #define nvme_mmio_write_4(sc, reg, val) \ 338 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ 339 nvme_mmio_offsetof(reg), val) 340 341 #define nvme_mmio_write_8(sc, reg, val) \ 342 do { \ 343 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ 344 nvme_mmio_offsetof(reg), val & 0xFFFFFFFF); \ 345 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ 346 nvme_mmio_offsetof(reg)+4, \ 347 (val & 0xFFFFFFFF00000000ULL) >> 32); \ 348 } while (0); 349 350 #define nvme_printf(ctrlr, fmt, args...) \ 351 device_printf(ctrlr->dev, fmt, ##args) 352 353 void nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg); 354 355 void nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, 356 void *payload, 357 nvme_cb_fn_t cb_fn, void *cb_arg); 358 void nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, 359 uint32_t nsid, void *payload, 360 nvme_cb_fn_t cb_fn, void *cb_arg); 361 void nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr, 362 uint32_t microseconds, 363 uint32_t threshold, 364 nvme_cb_fn_t cb_fn, 365 void *cb_arg); 366 void nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr, 367 struct nvme_error_information_entry *payload, 368 uint32_t num_entries, /* 0 = max */ 369 nvme_cb_fn_t cb_fn, 370 void *cb_arg); 371 void nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr, 372 uint32_t nsid, 373 struct nvme_health_information_page *payload, 374 nvme_cb_fn_t cb_fn, 375 void *cb_arg); 376 void nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr, 377 struct nvme_firmware_page *payload, 378 nvme_cb_fn_t cb_fn, 379 void *cb_arg); 380 void nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr, 381 struct nvme_qpair *io_que, 382 nvme_cb_fn_t cb_fn, void *cb_arg); 383 void nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr, 384 struct nvme_qpair *io_que, 385 nvme_cb_fn_t cb_fn, void *cb_arg); 386 void nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr, 387 struct nvme_qpair *io_que, 388 nvme_cb_fn_t cb_fn, void *cb_arg); 389 void nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr, 390 struct nvme_qpair *io_que, 391 nvme_cb_fn_t cb_fn, void *cb_arg); 392 void nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr, 393 uint32_t num_queues, nvme_cb_fn_t cb_fn, 394 void *cb_arg); 395 void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr, 396 uint32_t state, 397 nvme_cb_fn_t cb_fn, void *cb_arg); 398 void nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid, 399 uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg); 400 401 void nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl); 402 403 int nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev); 404 void nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev); 405 void nvme_ctrlr_shutdown(struct nvme_controller *ctrlr); 406 void nvme_ctrlr_reset(struct nvme_controller *ctrlr); 407 /* ctrlr defined as void * to allow use with config_intrhook. */ 408 void nvme_ctrlr_start_config_hook(void *ctrlr_arg); 409 void nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, 410 struct nvme_request *req); 411 void nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr, 412 struct nvme_request *req); 413 414 int nvme_qpair_construct(struct nvme_qpair *qpair, 415 uint32_t num_entries, uint32_t num_trackers, 416 struct nvme_controller *ctrlr); 417 void nvme_qpair_submit_tracker(struct nvme_qpair *qpair, 418 struct nvme_tracker *tr); 419 bool nvme_qpair_process_completions(struct nvme_qpair *qpair); 420 void nvme_qpair_submit_request(struct nvme_qpair *qpair, 421 struct nvme_request *req); 422 void nvme_qpair_reset(struct nvme_qpair *qpair); 423 void nvme_qpair_fail(struct nvme_qpair *qpair); 424 void nvme_qpair_manual_complete_request(struct nvme_qpair *qpair, 425 struct nvme_request *req, 426 uint32_t sct, uint32_t sc); 427 428 void nvme_admin_qpair_enable(struct nvme_qpair *qpair); 429 void nvme_admin_qpair_disable(struct nvme_qpair *qpair); 430 void nvme_admin_qpair_destroy(struct nvme_qpair *qpair); 431 432 void nvme_io_qpair_enable(struct nvme_qpair *qpair); 433 void nvme_io_qpair_disable(struct nvme_qpair *qpair); 434 void nvme_io_qpair_destroy(struct nvme_qpair *qpair); 435 436 int nvme_ns_construct(struct nvme_namespace *ns, uint32_t id, 437 struct nvme_controller *ctrlr); 438 void nvme_ns_destruct(struct nvme_namespace *ns); 439 440 void nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr); 441 442 void nvme_qpair_print_command(struct nvme_qpair *qpair, 443 struct nvme_command *cmd); 444 void nvme_qpair_print_completion(struct nvme_qpair *qpair, 445 struct nvme_completion *cpl); 446 447 int nvme_attach(device_t dev); 448 int nvme_shutdown(device_t dev); 449 int nvme_detach(device_t dev); 450 451 /* 452 * Wait for a command to complete using the nvme_completion_poll_cb. Used in 453 * limited contexts where the caller knows it's OK to block briefly while the 454 * command runs. The ISR will run the callback which will set status->done to 455 * true, usually within microseconds. If not, then after one second timeout 456 * handler should reset the controller and abort all outstanding requests 457 * including this polled one. If still not after ten seconds, then something is 458 * wrong with the driver, and panic is the only way to recover. 459 * 460 * Most commands using this interface aren't actual I/O to the drive's media so 461 * complete within a few microseconds. Adaptively spin for one tick to catch the 462 * vast majority of these without waiting for a tick plus scheduling delays. Since 463 * these are on startup, this drastically reduces startup time. 464 */ 465 static __inline 466 void 467 nvme_completion_poll(struct nvme_completion_poll_status *status) 468 { 469 int timeout = ticks + 10 * hz; 470 sbintime_t delta_t = SBT_1US; 471 472 while (!atomic_load_acq_int(&status->done)) { 473 if (timeout - ticks < 0) 474 panic("NVME polled command failed to complete within 10s."); 475 pause_sbt("nvme", delta_t, 0, C_PREL(1)); 476 delta_t = min(SBT_1MS, delta_t * 3 / 2); 477 } 478 } 479 480 static __inline void 481 nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 482 { 483 uint64_t *bus_addr = (uint64_t *)arg; 484 485 KASSERT(nseg == 1, ("number of segments (%d) is not 1", nseg)); 486 if (error != 0) 487 printf("nvme_single_map err %d\n", error); 488 *bus_addr = seg[0].ds_addr; 489 } 490 491 static __inline struct nvme_request * 492 _nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg) 493 { 494 struct nvme_request *req; 495 496 req = malloc(sizeof(*req), M_NVME, M_NOWAIT | M_ZERO); 497 if (req != NULL) { 498 req->cb_fn = cb_fn; 499 req->cb_arg = cb_arg; 500 req->timeout = true; 501 } 502 return (req); 503 } 504 505 static __inline struct nvme_request * 506 nvme_allocate_request_vaddr(void *payload, uint32_t payload_size, 507 nvme_cb_fn_t cb_fn, void *cb_arg) 508 { 509 struct nvme_request *req; 510 511 req = _nvme_allocate_request(cb_fn, cb_arg); 512 if (req != NULL) { 513 req->payload = memdesc_vaddr(payload, payload_size); 514 req->payload_valid = true; 515 } 516 return (req); 517 } 518 519 static __inline struct nvme_request * 520 nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg) 521 { 522 struct nvme_request *req; 523 524 req = _nvme_allocate_request(cb_fn, cb_arg); 525 return (req); 526 } 527 528 static __inline struct nvme_request * 529 nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg) 530 { 531 struct nvme_request *req; 532 533 req = _nvme_allocate_request(cb_fn, cb_arg); 534 if (req != NULL) { 535 req->payload = memdesc_bio(bio); 536 req->payload_valid = true; 537 } 538 return (req); 539 } 540 541 static __inline struct nvme_request * 542 nvme_allocate_request_ccb(union ccb *ccb, nvme_cb_fn_t cb_fn, void *cb_arg) 543 { 544 struct nvme_request *req; 545 546 req = _nvme_allocate_request(cb_fn, cb_arg); 547 if (req != NULL) { 548 req->payload = memdesc_ccb(ccb); 549 req->payload_valid = true; 550 } 551 552 return (req); 553 } 554 555 #define nvme_free_request(req) free(req, M_NVME) 556 557 void nvme_notify_async_consumers(struct nvme_controller *ctrlr, 558 const struct nvme_completion *async_cpl, 559 uint32_t log_page_id, void *log_page_buffer, 560 uint32_t log_page_size); 561 void nvme_notify_fail_consumers(struct nvme_controller *ctrlr); 562 void nvme_notify_new_controller(struct nvme_controller *ctrlr); 563 void nvme_notify_ns(struct nvme_controller *ctrlr, int nsid); 564 565 void nvme_ctrlr_shared_handler(void *arg); 566 void nvme_ctrlr_poll(struct nvme_controller *ctrlr); 567 568 int nvme_ctrlr_suspend(struct nvme_controller *ctrlr); 569 int nvme_ctrlr_resume(struct nvme_controller *ctrlr); 570 571 #endif /* __NVME_PRIVATE_H__ */ 572