1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (C) 2012-2014 Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #ifndef __NVME_PRIVATE_H__ 30 #define __NVME_PRIVATE_H__ 31 32 #include <sys/param.h> 33 #include <sys/bio.h> 34 #include <sys/bus.h> 35 #include <sys/counter.h> 36 #include <sys/kernel.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/memdesc.h> 40 #include <sys/module.h> 41 #include <sys/mutex.h> 42 #include <sys/rman.h> 43 #include <sys/systm.h> 44 #include <sys/taskqueue.h> 45 46 #include <vm/uma.h> 47 48 #include <machine/bus.h> 49 50 #include "nvme.h" 51 52 #define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev)) 53 54 MALLOC_DECLARE(M_NVME); 55 56 #define IDT32_PCI_ID 0x80d0111d /* 32 channel board */ 57 #define IDT8_PCI_ID 0x80d2111d /* 8 channel board */ 58 59 #define NVME_ADMIN_TRACKERS (16) 60 #define NVME_ADMIN_ENTRIES (128) 61 62 /* 63 * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion 64 * queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we 65 * will allow outstanding on an I/O qpair at any time. The only advantage in 66 * having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping 67 * the contents of the submission and completion queues, it will show a longer 68 * history of data. 69 */ 70 #define NVME_IO_ENTRIES (256) 71 #define NVME_IO_TRACKERS (128) 72 #define NVME_MIN_IO_TRACKERS (4) 73 #define NVME_MAX_IO_TRACKERS (1024) 74 75 #define NVME_INT_COAL_TIME (0) /* disabled */ 76 #define NVME_INT_COAL_THRESHOLD (0) /* 0-based */ 77 78 #define NVME_MAX_NAMESPACES (16) 79 #define NVME_MAX_CONSUMERS (2) 80 #define NVME_MAX_ASYNC_EVENTS (8) 81 82 #define NVME_ADMIN_TIMEOUT_PERIOD (60) /* in seconds */ 83 #define NVME_DEFAULT_TIMEOUT_PERIOD (30) /* in seconds */ 84 #define NVME_MIN_TIMEOUT_PERIOD (5) 85 #define NVME_MAX_TIMEOUT_PERIOD (120) 86 87 #define NVME_DEFAULT_RETRY_COUNT (4) 88 89 /* Maximum log page size to fetch for AERs. */ 90 #define NVME_MAX_AER_LOG_SIZE (4096) 91 92 /* 93 * Define CACHE_LINE_SIZE here for older FreeBSD versions that do not define 94 * it. 95 */ 96 #ifndef CACHE_LINE_SIZE 97 #define CACHE_LINE_SIZE (64) 98 #endif 99 100 #define NVME_GONE 0xfffffffful 101 102 extern int32_t nvme_retry_count; 103 extern bool nvme_verbose_cmd_dump; 104 105 struct nvme_completion_poll_status { 106 struct nvme_completion cpl; 107 int done; 108 }; 109 110 struct nvme_request { 111 struct nvme_command cmd; 112 struct nvme_qpair *qpair; 113 struct memdesc payload; 114 nvme_cb_fn_t cb_fn; 115 void *cb_arg; 116 int32_t retries; 117 bool payload_valid; 118 bool timeout; 119 bool spare[2]; /* Future use */ 120 STAILQ_ENTRY(nvme_request) stailq; 121 }; 122 123 struct nvme_async_event_request { 124 struct nvme_controller *ctrlr; 125 struct nvme_request *req; 126 struct nvme_completion cpl; 127 uint32_t log_page_id; 128 uint32_t log_page_size; 129 uint8_t log_page_buffer[NVME_MAX_AER_LOG_SIZE]; 130 }; 131 132 struct nvme_tracker { 133 TAILQ_ENTRY(nvme_tracker) tailq; 134 struct nvme_request *req; 135 struct nvme_qpair *qpair; 136 sbintime_t deadline; 137 bus_dmamap_t payload_dma_map; 138 uint16_t cid; 139 140 uint64_t *prp; 141 bus_addr_t prp_bus_addr; 142 }; 143 144 enum nvme_recovery { 145 RECOVERY_NONE = 0, /* Normal operations */ 146 RECOVERY_WAITING, /* waiting for the reset to complete */ 147 }; 148 struct nvme_qpair { 149 struct nvme_controller *ctrlr; 150 uint32_t id; 151 int domain; 152 int cpu; 153 154 uint16_t vector; 155 int rid; 156 struct resource *res; 157 void *tag; 158 159 struct callout timer; /* recovery lock */ 160 bool timer_armed; /* recovery lock */ 161 enum nvme_recovery recovery_state; /* recovery lock */ 162 163 uint32_t num_entries; 164 uint32_t num_trackers; 165 uint32_t sq_tdbl_off; 166 uint32_t cq_hdbl_off; 167 168 uint32_t phase; 169 uint32_t sq_head; 170 uint32_t sq_tail; 171 uint32_t cq_head; 172 173 int64_t num_cmds; 174 int64_t num_intr_handler_calls; 175 int64_t num_retries; 176 int64_t num_failures; 177 int64_t num_ignored; 178 int64_t num_recovery_nolock; 179 180 struct nvme_command *cmd; 181 struct nvme_completion *cpl; 182 183 bus_dma_tag_t dma_tag; 184 bus_dma_tag_t dma_tag_payload; 185 186 bus_dmamap_t queuemem_map; 187 uint64_t cmd_bus_addr; 188 uint64_t cpl_bus_addr; 189 190 TAILQ_HEAD(, nvme_tracker) free_tr; 191 TAILQ_HEAD(, nvme_tracker) outstanding_tr; 192 STAILQ_HEAD(, nvme_request) queued_req; 193 194 struct nvme_tracker **act_tr; 195 196 struct mtx_padalign lock; 197 struct mtx_padalign recovery; 198 } __aligned(CACHE_LINE_SIZE); 199 200 struct nvme_namespace { 201 struct nvme_controller *ctrlr; 202 struct nvme_namespace_data data; 203 uint32_t id; 204 uint32_t flags; 205 struct cdev *cdev; 206 void *cons_cookie[NVME_MAX_CONSUMERS]; 207 uint32_t boundary; 208 struct mtx lock; 209 }; 210 211 /* 212 * One of these per allocated PCI device. 213 */ 214 struct nvme_controller { 215 device_t dev; 216 217 struct mtx lock; 218 int domain; 219 uint32_t ready_timeout_in_ms; 220 uint32_t quirks; 221 #define QUIRK_DELAY_B4_CHK_RDY 1 /* Can't touch MMIO on disable */ 222 #define QUIRK_DISABLE_TIMEOUT 2 /* Disable broken completion timeout feature */ 223 #define QUIRK_INTEL_ALIGNMENT 4 /* Pre NVMe 1.3 performance alignment */ 224 #define QUIRK_AHCI 8 /* Attached via AHCI redirect */ 225 226 bus_space_tag_t bus_tag; 227 bus_space_handle_t bus_handle; 228 int resource_id; 229 struct resource *resource; 230 231 /* 232 * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5, 233 * separate from the control registers which are in BAR 0/1. These 234 * members track the mapping of BAR 4/5 for that reason. 235 */ 236 int bar4_resource_id; 237 struct resource *bar4_resource; 238 239 int msi_count; 240 uint32_t enable_aborts; 241 242 uint32_t num_io_queues; 243 uint32_t max_hw_pend_io; 244 245 /* Fields for tracking progress during controller initialization. */ 246 struct intr_config_hook config_hook; 247 uint32_t ns_identified; 248 uint32_t queues_created; 249 250 struct task reset_task; 251 struct taskqueue *taskqueue; 252 253 /* For shared legacy interrupt. */ 254 int rid; 255 struct resource *res; 256 void *tag; 257 258 /** maximum i/o size in bytes */ 259 uint32_t max_xfer_size; 260 261 /** LO and HI capacity mask */ 262 uint32_t cap_lo; 263 uint32_t cap_hi; 264 265 /** Page size and log2(page_size) - 12 that we're currently using */ 266 uint32_t page_size; 267 uint32_t mps; 268 269 /** interrupt coalescing time period (in microseconds) */ 270 uint32_t int_coal_time; 271 272 /** interrupt coalescing threshold */ 273 uint32_t int_coal_threshold; 274 275 /** timeout period in seconds */ 276 uint32_t admin_timeout_period; 277 uint32_t timeout_period; 278 279 /** doorbell stride */ 280 uint32_t dstrd; 281 282 struct nvme_qpair adminq; 283 struct nvme_qpair *ioq; 284 285 struct nvme_registers *regs; 286 287 struct nvme_controller_data cdata; 288 struct nvme_namespace ns[NVME_MAX_NAMESPACES]; 289 290 struct cdev *cdev; 291 292 /** bit mask of event types currently enabled for async events */ 293 uint32_t async_event_config; 294 295 uint32_t num_aers; 296 struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS]; 297 298 void *cons_cookie[NVME_MAX_CONSUMERS]; 299 300 uint32_t is_resetting; 301 uint32_t is_initialized; 302 uint32_t notification_sent; 303 304 bool is_failed; 305 bool is_dying; 306 STAILQ_HEAD(, nvme_request) fail_req; 307 308 /* Host Memory Buffer */ 309 int hmb_nchunks; 310 size_t hmb_chunk; 311 bus_dma_tag_t hmb_tag; 312 struct nvme_hmb_chunk { 313 bus_dmamap_t hmbc_map; 314 void *hmbc_vaddr; 315 uint64_t hmbc_paddr; 316 } *hmb_chunks; 317 bus_dma_tag_t hmb_desc_tag; 318 bus_dmamap_t hmb_desc_map; 319 struct nvme_hmb_desc *hmb_desc_vaddr; 320 uint64_t hmb_desc_paddr; 321 322 /* Statistics */ 323 counter_u64_t alignment_splits; 324 }; 325 326 #define nvme_mmio_offsetof(reg) \ 327 offsetof(struct nvme_registers, reg) 328 329 #define nvme_mmio_read_4(sc, reg) \ 330 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \ 331 nvme_mmio_offsetof(reg)) 332 333 #define nvme_mmio_write_4(sc, reg, val) \ 334 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ 335 nvme_mmio_offsetof(reg), val) 336 337 #define nvme_mmio_write_8(sc, reg, val) \ 338 do { \ 339 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ 340 nvme_mmio_offsetof(reg), val & 0xFFFFFFFF); \ 341 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ 342 nvme_mmio_offsetof(reg)+4, \ 343 (val & 0xFFFFFFFF00000000ULL) >> 32); \ 344 } while (0); 345 346 #define nvme_printf(ctrlr, fmt, args...) \ 347 device_printf(ctrlr->dev, fmt, ##args) 348 349 void nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg); 350 351 void nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, 352 void *payload, 353 nvme_cb_fn_t cb_fn, void *cb_arg); 354 void nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, 355 uint32_t nsid, void *payload, 356 nvme_cb_fn_t cb_fn, void *cb_arg); 357 void nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr, 358 uint32_t microseconds, 359 uint32_t threshold, 360 nvme_cb_fn_t cb_fn, 361 void *cb_arg); 362 void nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr, 363 struct nvme_error_information_entry *payload, 364 uint32_t num_entries, /* 0 = max */ 365 nvme_cb_fn_t cb_fn, 366 void *cb_arg); 367 void nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr, 368 uint32_t nsid, 369 struct nvme_health_information_page *payload, 370 nvme_cb_fn_t cb_fn, 371 void *cb_arg); 372 void nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr, 373 struct nvme_firmware_page *payload, 374 nvme_cb_fn_t cb_fn, 375 void *cb_arg); 376 void nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr, 377 struct nvme_qpair *io_que, 378 nvme_cb_fn_t cb_fn, void *cb_arg); 379 void nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr, 380 struct nvme_qpair *io_que, 381 nvme_cb_fn_t cb_fn, void *cb_arg); 382 void nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr, 383 struct nvme_qpair *io_que, 384 nvme_cb_fn_t cb_fn, void *cb_arg); 385 void nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr, 386 struct nvme_qpair *io_que, 387 nvme_cb_fn_t cb_fn, void *cb_arg); 388 void nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr, 389 uint32_t num_queues, nvme_cb_fn_t cb_fn, 390 void *cb_arg); 391 void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr, 392 uint32_t state, 393 nvme_cb_fn_t cb_fn, void *cb_arg); 394 void nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid, 395 uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg); 396 397 void nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl); 398 399 int nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev); 400 void nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev); 401 void nvme_ctrlr_shutdown(struct nvme_controller *ctrlr); 402 void nvme_ctrlr_reset(struct nvme_controller *ctrlr); 403 /* ctrlr defined as void * to allow use with config_intrhook. */ 404 void nvme_ctrlr_start_config_hook(void *ctrlr_arg); 405 void nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, 406 struct nvme_request *req); 407 void nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr, 408 struct nvme_request *req); 409 410 int nvme_qpair_construct(struct nvme_qpair *qpair, 411 uint32_t num_entries, uint32_t num_trackers, 412 struct nvme_controller *ctrlr); 413 void nvme_qpair_submit_tracker(struct nvme_qpair *qpair, 414 struct nvme_tracker *tr); 415 bool nvme_qpair_process_completions(struct nvme_qpair *qpair); 416 void nvme_qpair_submit_request(struct nvme_qpair *qpair, 417 struct nvme_request *req); 418 void nvme_qpair_reset(struct nvme_qpair *qpair); 419 void nvme_qpair_fail(struct nvme_qpair *qpair); 420 void nvme_qpair_manual_complete_request(struct nvme_qpair *qpair, 421 struct nvme_request *req, 422 uint32_t sct, uint32_t sc); 423 424 void nvme_admin_qpair_enable(struct nvme_qpair *qpair); 425 void nvme_admin_qpair_disable(struct nvme_qpair *qpair); 426 void nvme_admin_qpair_destroy(struct nvme_qpair *qpair); 427 428 void nvme_io_qpair_enable(struct nvme_qpair *qpair); 429 void nvme_io_qpair_disable(struct nvme_qpair *qpair); 430 void nvme_io_qpair_destroy(struct nvme_qpair *qpair); 431 432 int nvme_ns_construct(struct nvme_namespace *ns, uint32_t id, 433 struct nvme_controller *ctrlr); 434 void nvme_ns_destruct(struct nvme_namespace *ns); 435 436 void nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr); 437 438 void nvme_qpair_print_command(struct nvme_qpair *qpair, 439 struct nvme_command *cmd); 440 void nvme_qpair_print_completion(struct nvme_qpair *qpair, 441 struct nvme_completion *cpl); 442 443 int nvme_attach(device_t dev); 444 int nvme_shutdown(device_t dev); 445 int nvme_detach(device_t dev); 446 447 /* 448 * Wait for a command to complete using the nvme_completion_poll_cb. Used in 449 * limited contexts where the caller knows it's OK to block briefly while the 450 * command runs. The ISR will run the callback which will set status->done to 451 * true, usually within microseconds. If not, then after one second timeout 452 * handler should reset the controller and abort all outstanding requests 453 * including this polled one. If still not after ten seconds, then something is 454 * wrong with the driver, and panic is the only way to recover. 455 * 456 * Most commands using this interface aren't actual I/O to the drive's media so 457 * complete within a few microseconds. Adaptively spin for one tick to catch the 458 * vast majority of these without waiting for a tick plus scheduling delays. Since 459 * these are on startup, this drastically reduces startup time. 460 */ 461 static __inline 462 void 463 nvme_completion_poll(struct nvme_completion_poll_status *status) 464 { 465 int timeout = ticks + 10 * hz; 466 sbintime_t delta_t = SBT_1US; 467 468 while (!atomic_load_acq_int(&status->done)) { 469 if (timeout - ticks < 0) 470 panic("NVME polled command failed to complete within 10s."); 471 pause_sbt("nvme", delta_t, 0, C_PREL(1)); 472 delta_t = min(SBT_1MS, delta_t * 3 / 2); 473 } 474 } 475 476 static __inline void 477 nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 478 { 479 uint64_t *bus_addr = (uint64_t *)arg; 480 481 KASSERT(nseg == 1, ("number of segments (%d) is not 1", nseg)); 482 if (error != 0) 483 printf("nvme_single_map err %d\n", error); 484 *bus_addr = seg[0].ds_addr; 485 } 486 487 static __inline struct nvme_request * 488 _nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg) 489 { 490 struct nvme_request *req; 491 492 req = malloc(sizeof(*req), M_NVME, M_NOWAIT | M_ZERO); 493 if (req != NULL) { 494 req->cb_fn = cb_fn; 495 req->cb_arg = cb_arg; 496 req->timeout = true; 497 } 498 return (req); 499 } 500 501 static __inline struct nvme_request * 502 nvme_allocate_request_vaddr(void *payload, uint32_t payload_size, 503 nvme_cb_fn_t cb_fn, void *cb_arg) 504 { 505 struct nvme_request *req; 506 507 req = _nvme_allocate_request(cb_fn, cb_arg); 508 if (req != NULL) { 509 req->payload = memdesc_vaddr(payload, payload_size); 510 req->payload_valid = true; 511 } 512 return (req); 513 } 514 515 static __inline struct nvme_request * 516 nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg) 517 { 518 struct nvme_request *req; 519 520 req = _nvme_allocate_request(cb_fn, cb_arg); 521 return (req); 522 } 523 524 static __inline struct nvme_request * 525 nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg) 526 { 527 struct nvme_request *req; 528 529 req = _nvme_allocate_request(cb_fn, cb_arg); 530 if (req != NULL) { 531 req->payload = memdesc_bio(bio); 532 req->payload_valid = true; 533 } 534 return (req); 535 } 536 537 static __inline struct nvme_request * 538 nvme_allocate_request_ccb(union ccb *ccb, nvme_cb_fn_t cb_fn, void *cb_arg) 539 { 540 struct nvme_request *req; 541 542 req = _nvme_allocate_request(cb_fn, cb_arg); 543 if (req != NULL) { 544 req->payload = memdesc_ccb(ccb); 545 req->payload_valid = true; 546 } 547 548 return (req); 549 } 550 551 #define nvme_free_request(req) free(req, M_NVME) 552 553 void nvme_notify_async_consumers(struct nvme_controller *ctrlr, 554 const struct nvme_completion *async_cpl, 555 uint32_t log_page_id, void *log_page_buffer, 556 uint32_t log_page_size); 557 void nvme_notify_fail_consumers(struct nvme_controller *ctrlr); 558 void nvme_notify_new_controller(struct nvme_controller *ctrlr); 559 void nvme_notify_ns(struct nvme_controller *ctrlr, int nsid); 560 561 void nvme_ctrlr_shared_handler(void *arg); 562 void nvme_ctrlr_poll(struct nvme_controller *ctrlr); 563 564 int nvme_ctrlr_suspend(struct nvme_controller *ctrlr); 565 int nvme_ctrlr_resume(struct nvme_controller *ctrlr); 566 567 #endif /* __NVME_PRIVATE_H__ */ 568