1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2012-2014 Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #ifndef __NVME_PRIVATE_H__ 32 #define __NVME_PRIVATE_H__ 33 34 #include <sys/param.h> 35 #include <sys/bio.h> 36 #include <sys/bus.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/malloc.h> 40 #include <sys/module.h> 41 #include <sys/mutex.h> 42 #include <sys/rman.h> 43 #include <sys/systm.h> 44 #include <sys/taskqueue.h> 45 46 #include <vm/uma.h> 47 48 #include <machine/bus.h> 49 50 #include "nvme.h" 51 52 #define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev)) 53 54 MALLOC_DECLARE(M_NVME); 55 56 #define IDT32_PCI_ID 0x80d0111d /* 32 channel board */ 57 #define IDT8_PCI_ID 0x80d2111d /* 8 channel board */ 58 59 /* 60 * For commands requiring more than 2 PRP entries, one PRP will be 61 * embedded in the command (prp1), and the rest of the PRP entries 62 * will be in a list pointed to by the command (prp2). This means 63 * that real max number of PRP entries we support is 32+1, which 64 * results in a max xfer size of 32*PAGE_SIZE. 65 */ 66 #define NVME_MAX_PRP_LIST_ENTRIES (NVME_MAX_XFER_SIZE / PAGE_SIZE) 67 68 #define NVME_ADMIN_TRACKERS (16) 69 #define NVME_ADMIN_ENTRIES (128) 70 /* min and max are defined in admin queue attributes section of spec */ 71 #define NVME_MIN_ADMIN_ENTRIES (2) 72 #define NVME_MAX_ADMIN_ENTRIES (4096) 73 74 /* 75 * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion 76 * queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we 77 * will allow outstanding on an I/O qpair at any time. The only advantage in 78 * having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping 79 * the contents of the submission and completion queues, it will show a longer 80 * history of data. 81 */ 82 #define NVME_IO_ENTRIES (256) 83 #define NVME_IO_TRACKERS (128) 84 #define NVME_MIN_IO_TRACKERS (4) 85 #define NVME_MAX_IO_TRACKERS (1024) 86 87 /* 88 * NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES 89 * for each controller. 90 */ 91 92 #define NVME_INT_COAL_TIME (0) /* disabled */ 93 #define NVME_INT_COAL_THRESHOLD (0) /* 0-based */ 94 95 #define NVME_MAX_NAMESPACES (16) 96 #define NVME_MAX_CONSUMERS (2) 97 #define NVME_MAX_ASYNC_EVENTS (8) 98 99 #define NVME_DEFAULT_TIMEOUT_PERIOD (30) /* in seconds */ 100 #define NVME_MIN_TIMEOUT_PERIOD (5) 101 #define NVME_MAX_TIMEOUT_PERIOD (120) 102 103 #define NVME_DEFAULT_RETRY_COUNT (4) 104 105 /* Maximum log page size to fetch for AERs. */ 106 #define NVME_MAX_AER_LOG_SIZE (4096) 107 108 /* 109 * Define CACHE_LINE_SIZE here for older FreeBSD versions that do not define 110 * it. 111 */ 112 #ifndef CACHE_LINE_SIZE 113 #define CACHE_LINE_SIZE (64) 114 #endif 115 116 extern uma_zone_t nvme_request_zone; 117 extern int32_t nvme_retry_count; 118 extern bool nvme_verbose_cmd_dump; 119 120 struct nvme_completion_poll_status { 121 122 struct nvme_completion cpl; 123 int done; 124 }; 125 126 extern devclass_t nvme_devclass; 127 128 #define NVME_REQUEST_VADDR 1 129 #define NVME_REQUEST_NULL 2 /* For requests with no payload. */ 130 #define NVME_REQUEST_UIO 3 131 #define NVME_REQUEST_BIO 4 132 #define NVME_REQUEST_CCB 5 133 134 struct nvme_request { 135 136 struct nvme_command cmd; 137 struct nvme_qpair *qpair; 138 union { 139 void *payload; 140 struct bio *bio; 141 } u; 142 uint32_t type; 143 uint32_t payload_size; 144 bool timeout; 145 nvme_cb_fn_t cb_fn; 146 void *cb_arg; 147 int32_t retries; 148 STAILQ_ENTRY(nvme_request) stailq; 149 }; 150 151 struct nvme_async_event_request { 152 153 struct nvme_controller *ctrlr; 154 struct nvme_request *req; 155 struct nvme_completion cpl; 156 uint32_t log_page_id; 157 uint32_t log_page_size; 158 uint8_t log_page_buffer[NVME_MAX_AER_LOG_SIZE]; 159 }; 160 161 struct nvme_tracker { 162 163 TAILQ_ENTRY(nvme_tracker) tailq; 164 struct nvme_request *req; 165 struct nvme_qpair *qpair; 166 struct callout timer; 167 bus_dmamap_t payload_dma_map; 168 uint16_t cid; 169 170 uint64_t *prp; 171 bus_addr_t prp_bus_addr; 172 }; 173 174 struct nvme_qpair { 175 176 struct nvme_controller *ctrlr; 177 uint32_t id; 178 int domain; 179 int cpu; 180 181 uint16_t vector; 182 int rid; 183 struct resource *res; 184 void *tag; 185 186 uint32_t num_entries; 187 uint32_t num_trackers; 188 uint32_t sq_tdbl_off; 189 uint32_t cq_hdbl_off; 190 191 uint32_t phase; 192 uint32_t sq_head; 193 uint32_t sq_tail; 194 uint32_t cq_head; 195 196 int64_t num_cmds; 197 int64_t num_intr_handler_calls; 198 int64_t num_retries; 199 int64_t num_failures; 200 201 struct nvme_command *cmd; 202 struct nvme_completion *cpl; 203 204 bus_dma_tag_t dma_tag; 205 bus_dma_tag_t dma_tag_payload; 206 207 bus_dmamap_t queuemem_map; 208 uint64_t cmd_bus_addr; 209 uint64_t cpl_bus_addr; 210 211 TAILQ_HEAD(, nvme_tracker) free_tr; 212 TAILQ_HEAD(, nvme_tracker) outstanding_tr; 213 STAILQ_HEAD(, nvme_request) queued_req; 214 215 struct nvme_tracker **act_tr; 216 217 bool is_enabled; 218 219 struct mtx lock __aligned(CACHE_LINE_SIZE); 220 221 } __aligned(CACHE_LINE_SIZE); 222 223 struct nvme_namespace { 224 225 struct nvme_controller *ctrlr; 226 struct nvme_namespace_data data; 227 uint32_t id; 228 uint32_t flags; 229 struct cdev *cdev; 230 void *cons_cookie[NVME_MAX_CONSUMERS]; 231 uint32_t boundary; 232 struct mtx lock; 233 }; 234 235 /* 236 * One of these per allocated PCI device. 237 */ 238 struct nvme_controller { 239 240 device_t dev; 241 242 struct mtx lock; 243 int domain; 244 uint32_t ready_timeout_in_ms; 245 uint32_t quirks; 246 #define QUIRK_DELAY_B4_CHK_RDY 1 /* Can't touch MMIO on disable */ 247 #define QUIRK_DISABLE_TIMEOUT 2 /* Disable broken completion timeout feature */ 248 249 bus_space_tag_t bus_tag; 250 bus_space_handle_t bus_handle; 251 int resource_id; 252 struct resource *resource; 253 254 /* 255 * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5, 256 * separate from the control registers which are in BAR 0/1. These 257 * members track the mapping of BAR 4/5 for that reason. 258 */ 259 int bar4_resource_id; 260 struct resource *bar4_resource; 261 262 uint32_t msix_enabled; 263 uint32_t enable_aborts; 264 265 uint32_t num_io_queues; 266 uint32_t max_hw_pend_io; 267 268 /* Fields for tracking progress during controller initialization. */ 269 struct intr_config_hook config_hook; 270 uint32_t ns_identified; 271 uint32_t queues_created; 272 273 struct task reset_task; 274 struct task fail_req_task; 275 struct taskqueue *taskqueue; 276 277 /* For shared legacy interrupt. */ 278 int rid; 279 struct resource *res; 280 void *tag; 281 282 /** maximum i/o size in bytes */ 283 uint32_t max_xfer_size; 284 285 /** minimum page size supported by this controller in bytes */ 286 uint32_t min_page_size; 287 288 /** interrupt coalescing time period (in microseconds) */ 289 uint32_t int_coal_time; 290 291 /** interrupt coalescing threshold */ 292 uint32_t int_coal_threshold; 293 294 /** timeout period in seconds */ 295 uint32_t timeout_period; 296 297 /** doorbell stride */ 298 uint32_t dstrd; 299 300 struct nvme_qpair adminq; 301 struct nvme_qpair *ioq; 302 303 struct nvme_registers *regs; 304 305 struct nvme_controller_data cdata; 306 struct nvme_namespace ns[NVME_MAX_NAMESPACES]; 307 308 struct cdev *cdev; 309 310 /** bit mask of event types currently enabled for async events */ 311 uint32_t async_event_config; 312 313 uint32_t num_aers; 314 struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS]; 315 316 void *cons_cookie[NVME_MAX_CONSUMERS]; 317 318 uint32_t is_resetting; 319 uint32_t is_initialized; 320 uint32_t notification_sent; 321 322 bool is_failed; 323 STAILQ_HEAD(, nvme_request) fail_req; 324 325 /* Host Memory Buffer */ 326 int hmb_nchunks; 327 size_t hmb_chunk; 328 bus_dma_tag_t hmb_tag; 329 struct nvme_hmb_chunk { 330 bus_dmamap_t hmbc_map; 331 void *hmbc_vaddr; 332 uint64_t hmbc_paddr; 333 } *hmb_chunks; 334 bus_dma_tag_t hmb_desc_tag; 335 bus_dmamap_t hmb_desc_map; 336 struct nvme_hmb_desc *hmb_desc_vaddr; 337 uint64_t hmb_desc_paddr; 338 }; 339 340 #define nvme_mmio_offsetof(reg) \ 341 offsetof(struct nvme_registers, reg) 342 343 #define nvme_mmio_read_4(sc, reg) \ 344 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \ 345 nvme_mmio_offsetof(reg)) 346 347 #define nvme_mmio_write_4(sc, reg, val) \ 348 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ 349 nvme_mmio_offsetof(reg), val) 350 351 #define nvme_mmio_write_8(sc, reg, val) \ 352 do { \ 353 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ 354 nvme_mmio_offsetof(reg), val & 0xFFFFFFFF); \ 355 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ 356 nvme_mmio_offsetof(reg)+4, \ 357 (val & 0xFFFFFFFF00000000ULL) >> 32); \ 358 } while (0); 359 360 #define nvme_printf(ctrlr, fmt, args...) \ 361 device_printf(ctrlr->dev, fmt, ##args) 362 363 void nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg); 364 365 void nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, 366 void *payload, 367 nvme_cb_fn_t cb_fn, void *cb_arg); 368 void nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, 369 uint32_t nsid, void *payload, 370 nvme_cb_fn_t cb_fn, void *cb_arg); 371 void nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr, 372 uint32_t microseconds, 373 uint32_t threshold, 374 nvme_cb_fn_t cb_fn, 375 void *cb_arg); 376 void nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr, 377 struct nvme_error_information_entry *payload, 378 uint32_t num_entries, /* 0 = max */ 379 nvme_cb_fn_t cb_fn, 380 void *cb_arg); 381 void nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr, 382 uint32_t nsid, 383 struct nvme_health_information_page *payload, 384 nvme_cb_fn_t cb_fn, 385 void *cb_arg); 386 void nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr, 387 struct nvme_firmware_page *payload, 388 nvme_cb_fn_t cb_fn, 389 void *cb_arg); 390 void nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr, 391 struct nvme_qpair *io_que, 392 nvme_cb_fn_t cb_fn, void *cb_arg); 393 void nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr, 394 struct nvme_qpair *io_que, 395 nvme_cb_fn_t cb_fn, void *cb_arg); 396 void nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr, 397 struct nvme_qpair *io_que, 398 nvme_cb_fn_t cb_fn, void *cb_arg); 399 void nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr, 400 struct nvme_qpair *io_que, 401 nvme_cb_fn_t cb_fn, void *cb_arg); 402 void nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr, 403 uint32_t num_queues, nvme_cb_fn_t cb_fn, 404 void *cb_arg); 405 void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr, 406 uint32_t state, 407 nvme_cb_fn_t cb_fn, void *cb_arg); 408 void nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid, 409 uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg); 410 411 void nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl); 412 413 int nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev); 414 void nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev); 415 void nvme_ctrlr_shutdown(struct nvme_controller *ctrlr); 416 int nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr); 417 void nvme_ctrlr_reset(struct nvme_controller *ctrlr); 418 /* ctrlr defined as void * to allow use with config_intrhook. */ 419 void nvme_ctrlr_start_config_hook(void *ctrlr_arg); 420 void nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, 421 struct nvme_request *req); 422 void nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr, 423 struct nvme_request *req); 424 void nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr, 425 struct nvme_request *req); 426 427 int nvme_qpair_construct(struct nvme_qpair *qpair, 428 uint32_t num_entries, uint32_t num_trackers, 429 struct nvme_controller *ctrlr); 430 void nvme_qpair_submit_tracker(struct nvme_qpair *qpair, 431 struct nvme_tracker *tr); 432 bool nvme_qpair_process_completions(struct nvme_qpair *qpair); 433 void nvme_qpair_submit_request(struct nvme_qpair *qpair, 434 struct nvme_request *req); 435 void nvme_qpair_reset(struct nvme_qpair *qpair); 436 void nvme_qpair_fail(struct nvme_qpair *qpair); 437 void nvme_qpair_manual_complete_request(struct nvme_qpair *qpair, 438 struct nvme_request *req, 439 uint32_t sct, uint32_t sc); 440 441 void nvme_admin_qpair_enable(struct nvme_qpair *qpair); 442 void nvme_admin_qpair_disable(struct nvme_qpair *qpair); 443 void nvme_admin_qpair_destroy(struct nvme_qpair *qpair); 444 445 void nvme_io_qpair_enable(struct nvme_qpair *qpair); 446 void nvme_io_qpair_disable(struct nvme_qpair *qpair); 447 void nvme_io_qpair_destroy(struct nvme_qpair *qpair); 448 449 int nvme_ns_construct(struct nvme_namespace *ns, uint32_t id, 450 struct nvme_controller *ctrlr); 451 void nvme_ns_destruct(struct nvme_namespace *ns); 452 453 void nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr); 454 455 void nvme_dump_command(struct nvme_command *cmd); 456 void nvme_dump_completion(struct nvme_completion *cpl); 457 458 int nvme_attach(device_t dev); 459 int nvme_shutdown(device_t dev); 460 int nvme_detach(device_t dev); 461 462 /* 463 * Wait for a command to complete using the nvme_completion_poll_cb. 464 * Used in limited contexts where the caller knows it's OK to block 465 * briefly while the command runs. The ISR will run the callback which 466 * will set status->done to true.usually within microseconds. A 1s 467 * pause means something is seriously AFU and we should panic to 468 * provide the proper context to diagnose. 469 */ 470 static __inline 471 void 472 nvme_completion_poll(struct nvme_completion_poll_status *status) 473 { 474 int sanity = hz * 1; 475 476 while (!atomic_load_acq_int(&status->done) && --sanity > 0) 477 pause("nvme", 1); 478 if (sanity <= 0) 479 panic("NVME polled command failed to complete within 1s."); 480 } 481 482 static __inline void 483 nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 484 { 485 uint64_t *bus_addr = (uint64_t *)arg; 486 487 if (error != 0) 488 printf("nvme_single_map err %d\n", error); 489 *bus_addr = seg[0].ds_addr; 490 } 491 492 static __inline struct nvme_request * 493 _nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg) 494 { 495 struct nvme_request *req; 496 497 req = uma_zalloc(nvme_request_zone, M_NOWAIT | M_ZERO); 498 if (req != NULL) { 499 req->cb_fn = cb_fn; 500 req->cb_arg = cb_arg; 501 req->timeout = true; 502 } 503 return (req); 504 } 505 506 static __inline struct nvme_request * 507 nvme_allocate_request_vaddr(void *payload, uint32_t payload_size, 508 nvme_cb_fn_t cb_fn, void *cb_arg) 509 { 510 struct nvme_request *req; 511 512 req = _nvme_allocate_request(cb_fn, cb_arg); 513 if (req != NULL) { 514 req->type = NVME_REQUEST_VADDR; 515 req->u.payload = payload; 516 req->payload_size = payload_size; 517 } 518 return (req); 519 } 520 521 static __inline struct nvme_request * 522 nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg) 523 { 524 struct nvme_request *req; 525 526 req = _nvme_allocate_request(cb_fn, cb_arg); 527 if (req != NULL) 528 req->type = NVME_REQUEST_NULL; 529 return (req); 530 } 531 532 static __inline struct nvme_request * 533 nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg) 534 { 535 struct nvme_request *req; 536 537 req = _nvme_allocate_request(cb_fn, cb_arg); 538 if (req != NULL) { 539 req->type = NVME_REQUEST_BIO; 540 req->u.bio = bio; 541 } 542 return (req); 543 } 544 545 static __inline struct nvme_request * 546 nvme_allocate_request_ccb(union ccb *ccb, nvme_cb_fn_t cb_fn, void *cb_arg) 547 { 548 struct nvme_request *req; 549 550 req = _nvme_allocate_request(cb_fn, cb_arg); 551 if (req != NULL) { 552 req->type = NVME_REQUEST_CCB; 553 req->u.payload = ccb; 554 } 555 556 return (req); 557 } 558 559 #define nvme_free_request(req) uma_zfree(nvme_request_zone, req) 560 561 void nvme_notify_async_consumers(struct nvme_controller *ctrlr, 562 const struct nvme_completion *async_cpl, 563 uint32_t log_page_id, void *log_page_buffer, 564 uint32_t log_page_size); 565 void nvme_notify_fail_consumers(struct nvme_controller *ctrlr); 566 void nvme_notify_new_controller(struct nvme_controller *ctrlr); 567 void nvme_notify_ns(struct nvme_controller *ctrlr, int nsid); 568 569 void nvme_ctrlr_intx_handler(void *arg); 570 void nvme_ctrlr_poll(struct nvme_controller *ctrlr); 571 572 int nvme_ctrlr_suspend(struct nvme_controller *ctrlr); 573 int nvme_ctrlr_resume(struct nvme_controller *ctrlr); 574 575 #endif /* __NVME_PRIVATE_H__ */ 576