1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2017 Shunsuke Mie 5 * Copyright (c) 2018 Leon Dang 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * bhyve PCIe-NVMe device emulation. 31 * 32 * options: 33 * -s <n>,nvme,devpath,maxq=#,qsz=#,ioslots=#,sectsz=#,ser=A-Z 34 * 35 * accepted devpath: 36 * /dev/blockdev 37 * /path/to/image 38 * ram=size_in_MiB 39 * 40 * maxq = max number of queues 41 * qsz = max elements in each queue 42 * ioslots = max number of concurrent io requests 43 * sectsz = sector size (defaults to blockif sector size) 44 * ser = serial number (20-chars max) 45 * 46 */ 47 48 /* TODO: 49 - create async event for smart and log 50 - intr coalesce 51 */ 52 53 #include <sys/cdefs.h> 54 __FBSDID("$FreeBSD$"); 55 56 #include <sys/types.h> 57 58 #include <assert.h> 59 #include <pthread.h> 60 #include <semaphore.h> 61 #include <stdbool.h> 62 #include <stddef.h> 63 #include <stdint.h> 64 #include <stdio.h> 65 #include <stdlib.h> 66 #include <string.h> 67 68 #include <machine/atomic.h> 69 #include <machine/vmm.h> 70 #include <vmmapi.h> 71 72 #include <dev/nvme/nvme.h> 73 74 #include "bhyverun.h" 75 #include "block_if.h" 76 #include "pci_emul.h" 77 78 79 static int nvme_debug = 0; 80 #define DPRINTF(params) if (nvme_debug) printf params 81 #define WPRINTF(params) printf params 82 83 /* defaults; can be overridden */ 84 #define NVME_MSIX_BAR 4 85 86 #define NVME_IOSLOTS 8 87 88 /* The NVMe spec defines bits 13:4 in BAR0 as reserved */ 89 #define NVME_MMIO_SPACE_MIN (1 << 14) 90 91 #define NVME_QUEUES 16 92 #define NVME_MAX_QENTRIES 2048 93 94 #define NVME_PRP2_ITEMS (PAGE_SIZE/sizeof(uint64_t)) 95 #define NVME_MAX_BLOCKIOVS 512 96 97 /* helpers */ 98 99 /* Convert a zero-based value into a one-based value */ 100 #define ONE_BASED(zero) ((zero) + 1) 101 /* Convert a one-based value into a zero-based value */ 102 #define ZERO_BASED(one) ((one) - 1) 103 104 /* Encode number of SQ's and CQ's for Set/Get Features */ 105 #define NVME_FEATURE_NUM_QUEUES(sc) \ 106 (ZERO_BASED((sc)->num_squeues) & 0xffff) | \ 107 (ZERO_BASED((sc)->num_cqueues) & 0xffff) << 16; 108 109 #define NVME_DOORBELL_OFFSET offsetof(struct nvme_registers, doorbell) 110 111 enum nvme_controller_register_offsets { 112 NVME_CR_CAP_LOW = 0x00, 113 NVME_CR_CAP_HI = 0x04, 114 NVME_CR_VS = 0x08, 115 NVME_CR_INTMS = 0x0c, 116 NVME_CR_INTMC = 0x10, 117 NVME_CR_CC = 0x14, 118 NVME_CR_CSTS = 0x1c, 119 NVME_CR_NSSR = 0x20, 120 NVME_CR_AQA = 0x24, 121 NVME_CR_ASQ_LOW = 0x28, 122 NVME_CR_ASQ_HI = 0x2c, 123 NVME_CR_ACQ_LOW = 0x30, 124 NVME_CR_ACQ_HI = 0x34, 125 }; 126 127 enum nvme_cmd_cdw11 { 128 NVME_CMD_CDW11_PC = 0x0001, 129 NVME_CMD_CDW11_IEN = 0x0002, 130 NVME_CMD_CDW11_IV = 0xFFFF0000, 131 }; 132 133 #define NVME_CQ_INTEN 0x01 134 #define NVME_CQ_INTCOAL 0x02 135 136 struct nvme_completion_queue { 137 struct nvme_completion *qbase; 138 uint32_t size; 139 uint16_t tail; /* nvme progress */ 140 uint16_t head; /* guest progress */ 141 uint16_t intr_vec; 142 uint32_t intr_en; 143 pthread_mutex_t mtx; 144 }; 145 146 struct nvme_submission_queue { 147 struct nvme_command *qbase; 148 uint32_t size; 149 uint16_t head; /* nvme progress */ 150 uint16_t tail; /* guest progress */ 151 uint16_t cqid; /* completion queue id */ 152 int busy; /* queue is being processed */ 153 int qpriority; 154 }; 155 156 enum nvme_storage_type { 157 NVME_STOR_BLOCKIF = 0, 158 NVME_STOR_RAM = 1, 159 }; 160 161 struct pci_nvme_blockstore { 162 enum nvme_storage_type type; 163 void *ctx; 164 uint64_t size; 165 uint32_t sectsz; 166 uint32_t sectsz_bits; 167 }; 168 169 struct pci_nvme_ioreq { 170 struct pci_nvme_softc *sc; 171 struct pci_nvme_ioreq *next; 172 struct nvme_submission_queue *nvme_sq; 173 uint16_t sqid; 174 175 /* command information */ 176 uint16_t opc; 177 uint16_t cid; 178 uint32_t nsid; 179 180 uint64_t prev_gpaddr; 181 size_t prev_size; 182 183 /* 184 * lock if all iovs consumed (big IO); 185 * complete transaction before continuing 186 */ 187 pthread_mutex_t mtx; 188 pthread_cond_t cv; 189 190 struct blockif_req io_req; 191 192 /* pad to fit up to 512 page descriptors from guest IO request */ 193 struct iovec iovpadding[NVME_MAX_BLOCKIOVS-BLOCKIF_IOV_MAX]; 194 }; 195 196 struct pci_nvme_softc { 197 struct pci_devinst *nsc_pi; 198 199 pthread_mutex_t mtx; 200 201 struct nvme_registers regs; 202 203 struct nvme_namespace_data nsdata; 204 struct nvme_controller_data ctrldata; 205 struct nvme_error_information_entry err_log; 206 struct nvme_health_information_page health_log; 207 struct nvme_firmware_page fw_log; 208 209 struct pci_nvme_blockstore nvstore; 210 211 uint16_t max_qentries; /* max entries per queue */ 212 uint32_t max_queues; /* max number of IO SQ's or CQ's */ 213 uint32_t num_cqueues; 214 uint32_t num_squeues; 215 216 struct pci_nvme_ioreq *ioreqs; 217 struct pci_nvme_ioreq *ioreqs_free; /* free list of ioreqs */ 218 uint32_t pending_ios; 219 uint32_t ioslots; 220 sem_t iosemlock; 221 222 /* 223 * Memory mapped Submission and Completion queues 224 * Each array includes both Admin and IO queues 225 */ 226 struct nvme_completion_queue *compl_queues; 227 struct nvme_submission_queue *submit_queues; 228 229 /* controller features */ 230 uint32_t intr_coales_aggr_time; /* 0x08: uS to delay intr */ 231 uint32_t intr_coales_aggr_thresh; /* 0x08: compl-Q entries */ 232 uint32_t async_ev_config; /* 0x0B: async event config */ 233 }; 234 235 236 static void pci_nvme_io_partial(struct blockif_req *br, int err); 237 238 /* Controller Configuration utils */ 239 #define NVME_CC_GET_EN(cc) \ 240 ((cc) >> NVME_CC_REG_EN_SHIFT & NVME_CC_REG_EN_MASK) 241 #define NVME_CC_GET_CSS(cc) \ 242 ((cc) >> NVME_CC_REG_CSS_SHIFT & NVME_CC_REG_CSS_MASK) 243 #define NVME_CC_GET_SHN(cc) \ 244 ((cc) >> NVME_CC_REG_SHN_SHIFT & NVME_CC_REG_SHN_MASK) 245 #define NVME_CC_GET_IOSQES(cc) \ 246 ((cc) >> NVME_CC_REG_IOSQES_SHIFT & NVME_CC_REG_IOSQES_MASK) 247 #define NVME_CC_GET_IOCQES(cc) \ 248 ((cc) >> NVME_CC_REG_IOCQES_SHIFT & NVME_CC_REG_IOCQES_MASK) 249 250 #define NVME_CC_WRITE_MASK \ 251 ((NVME_CC_REG_EN_MASK << NVME_CC_REG_EN_SHIFT) | \ 252 (NVME_CC_REG_IOSQES_MASK << NVME_CC_REG_IOSQES_SHIFT) | \ 253 (NVME_CC_REG_IOCQES_MASK << NVME_CC_REG_IOCQES_SHIFT)) 254 255 #define NVME_CC_NEN_WRITE_MASK \ 256 ((NVME_CC_REG_CSS_MASK << NVME_CC_REG_CSS_SHIFT) | \ 257 (NVME_CC_REG_MPS_MASK << NVME_CC_REG_MPS_SHIFT) | \ 258 (NVME_CC_REG_AMS_MASK << NVME_CC_REG_AMS_SHIFT)) 259 260 /* Controller Status utils */ 261 #define NVME_CSTS_GET_RDY(sts) \ 262 ((sts) >> NVME_CSTS_REG_RDY_SHIFT & NVME_CSTS_REG_RDY_MASK) 263 264 #define NVME_CSTS_RDY (1 << NVME_CSTS_REG_RDY_SHIFT) 265 266 /* Completion Queue status word utils */ 267 #define NVME_STATUS_P (1 << NVME_STATUS_P_SHIFT) 268 #define NVME_STATUS_MASK \ 269 ((NVME_STATUS_SCT_MASK << NVME_STATUS_SCT_SHIFT) |\ 270 (NVME_STATUS_SC_MASK << NVME_STATUS_SC_SHIFT)) 271 272 static __inline void 273 cpywithpad(char *dst, size_t dst_size, const char *src, char pad) 274 { 275 size_t len; 276 277 len = strnlen(src, dst_size); 278 memset(dst, pad, dst_size); 279 memcpy(dst, src, len); 280 } 281 282 static __inline void 283 pci_nvme_status_tc(uint16_t *status, uint16_t type, uint16_t code) 284 { 285 286 *status &= ~NVME_STATUS_MASK; 287 *status |= (type & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT | 288 (code & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT; 289 } 290 291 static __inline void 292 pci_nvme_status_genc(uint16_t *status, uint16_t code) 293 { 294 295 pci_nvme_status_tc(status, NVME_SCT_GENERIC, code); 296 } 297 298 static __inline void 299 pci_nvme_toggle_phase(uint16_t *status, int prev) 300 { 301 302 if (prev) 303 *status &= ~NVME_STATUS_P; 304 else 305 *status |= NVME_STATUS_P; 306 } 307 308 static void 309 pci_nvme_init_ctrldata(struct pci_nvme_softc *sc) 310 { 311 struct nvme_controller_data *cd = &sc->ctrldata; 312 313 cd->vid = 0xFB5D; 314 cd->ssvid = 0x0000; 315 316 cpywithpad((char *)cd->mn, sizeof(cd->mn), "bhyve-NVMe", ' '); 317 cpywithpad((char *)cd->fr, sizeof(cd->fr), "1.0", ' '); 318 319 /* Num of submission commands that we can handle at a time (2^rab) */ 320 cd->rab = 4; 321 322 /* FreeBSD OUI */ 323 cd->ieee[0] = 0x58; 324 cd->ieee[1] = 0x9c; 325 cd->ieee[2] = 0xfc; 326 327 cd->mic = 0; 328 329 cd->mdts = 9; /* max data transfer size (2^mdts * CAP.MPSMIN) */ 330 331 cd->ver = 0x00010300; 332 333 cd->oacs = 1 << NVME_CTRLR_DATA_OACS_FORMAT_SHIFT; 334 cd->acl = 2; 335 cd->aerl = 4; 336 337 cd->lpa = 0; /* TODO: support some simple things like SMART */ 338 cd->elpe = 0; /* max error log page entries */ 339 cd->npss = 1; /* number of power states support */ 340 341 /* Warning Composite Temperature Threshold */ 342 cd->wctemp = 0x0157; 343 344 cd->sqes = (6 << NVME_CTRLR_DATA_SQES_MAX_SHIFT) | 345 (6 << NVME_CTRLR_DATA_SQES_MIN_SHIFT); 346 cd->cqes = (4 << NVME_CTRLR_DATA_CQES_MAX_SHIFT) | 347 (4 << NVME_CTRLR_DATA_CQES_MIN_SHIFT); 348 cd->nn = 1; /* number of namespaces */ 349 350 cd->fna = 0x03; 351 352 cd->power_state[0].mp = 10; 353 } 354 355 static void 356 pci_nvme_init_nsdata(struct pci_nvme_softc *sc) 357 { 358 struct nvme_namespace_data *nd; 359 360 nd = &sc->nsdata; 361 362 nd->nsze = sc->nvstore.size / sc->nvstore.sectsz; 363 nd->ncap = nd->nsze; 364 nd->nuse = nd->nsze; 365 366 /* Get LBA and backstore information from backing store */ 367 nd->nlbaf = 0; /* NLBAF is a 0's based value (i.e. 1 LBA Format) */ 368 /* LBA data-sz = 2^lbads */ 369 nd->lbaf[0] = sc->nvstore.sectsz_bits << NVME_NS_DATA_LBAF_LBADS_SHIFT; 370 371 nd->flbas = 0; 372 } 373 374 static void 375 pci_nvme_init_logpages(struct pci_nvme_softc *sc) 376 { 377 378 memset(&sc->err_log, 0, sizeof(sc->err_log)); 379 memset(&sc->health_log, 0, sizeof(sc->health_log)); 380 memset(&sc->fw_log, 0, sizeof(sc->fw_log)); 381 } 382 383 static void 384 pci_nvme_reset_locked(struct pci_nvme_softc *sc) 385 { 386 DPRINTF(("%s\r\n", __func__)); 387 388 sc->regs.cap_lo = (ZERO_BASED(sc->max_qentries) & NVME_CAP_LO_REG_MQES_MASK) | 389 (1 << NVME_CAP_LO_REG_CQR_SHIFT) | 390 (60 << NVME_CAP_LO_REG_TO_SHIFT); 391 392 sc->regs.cap_hi = 1 << NVME_CAP_HI_REG_CSS_NVM_SHIFT; 393 394 sc->regs.vs = 0x00010300; /* NVMe v1.3 */ 395 396 sc->regs.cc = 0; 397 sc->regs.csts = 0; 398 399 sc->num_cqueues = sc->num_squeues = sc->max_queues; 400 if (sc->submit_queues != NULL) { 401 for (int i = 0; i < sc->num_squeues + 1; i++) { 402 /* 403 * The Admin Submission Queue is at index 0. 404 * It must not be changed at reset otherwise the 405 * emulation will be out of sync with the guest. 406 */ 407 if (i != 0) { 408 sc->submit_queues[i].qbase = NULL; 409 sc->submit_queues[i].size = 0; 410 sc->submit_queues[i].cqid = 0; 411 } 412 sc->submit_queues[i].tail = 0; 413 sc->submit_queues[i].head = 0; 414 sc->submit_queues[i].busy = 0; 415 } 416 } else 417 sc->submit_queues = calloc(sc->num_squeues + 1, 418 sizeof(struct nvme_submission_queue)); 419 420 if (sc->compl_queues != NULL) { 421 for (int i = 0; i < sc->num_cqueues + 1; i++) { 422 /* See Admin Submission Queue note above */ 423 if (i != 0) { 424 sc->compl_queues[i].qbase = NULL; 425 sc->compl_queues[i].size = 0; 426 } 427 428 sc->compl_queues[i].tail = 0; 429 sc->compl_queues[i].head = 0; 430 } 431 } else { 432 sc->compl_queues = calloc(sc->num_cqueues + 1, 433 sizeof(struct nvme_completion_queue)); 434 435 for (int i = 0; i < sc->num_cqueues + 1; i++) 436 pthread_mutex_init(&sc->compl_queues[i].mtx, NULL); 437 } 438 } 439 440 static void 441 pci_nvme_reset(struct pci_nvme_softc *sc) 442 { 443 pthread_mutex_lock(&sc->mtx); 444 pci_nvme_reset_locked(sc); 445 pthread_mutex_unlock(&sc->mtx); 446 } 447 448 static void 449 pci_nvme_init_controller(struct vmctx *ctx, struct pci_nvme_softc *sc) 450 { 451 uint16_t acqs, asqs; 452 453 DPRINTF(("%s\r\n", __func__)); 454 455 asqs = (sc->regs.aqa & NVME_AQA_REG_ASQS_MASK) + 1; 456 sc->submit_queues[0].size = asqs; 457 sc->submit_queues[0].qbase = vm_map_gpa(ctx, sc->regs.asq, 458 sizeof(struct nvme_command) * asqs); 459 460 DPRINTF(("%s mapping Admin-SQ guest 0x%lx, host: %p\r\n", 461 __func__, sc->regs.asq, sc->submit_queues[0].qbase)); 462 463 acqs = ((sc->regs.aqa >> NVME_AQA_REG_ACQS_SHIFT) & 464 NVME_AQA_REG_ACQS_MASK) + 1; 465 sc->compl_queues[0].size = acqs; 466 sc->compl_queues[0].qbase = vm_map_gpa(ctx, sc->regs.acq, 467 sizeof(struct nvme_completion) * acqs); 468 DPRINTF(("%s mapping Admin-CQ guest 0x%lx, host: %p\r\n", 469 __func__, sc->regs.acq, sc->compl_queues[0].qbase)); 470 } 471 472 static int 473 nvme_prp_memcpy(struct vmctx *ctx, uint64_t prp1, uint64_t prp2, uint8_t *src, 474 size_t len) 475 { 476 uint8_t *dst; 477 size_t bytes; 478 479 if (len > (8 * 1024)) { 480 return (-1); 481 } 482 483 /* Copy from the start of prp1 to the end of the physical page */ 484 bytes = PAGE_SIZE - (prp1 & PAGE_MASK); 485 bytes = MIN(bytes, len); 486 487 dst = vm_map_gpa(ctx, prp1, bytes); 488 if (dst == NULL) { 489 return (-1); 490 } 491 492 memcpy(dst, src, bytes); 493 494 src += bytes; 495 496 len -= bytes; 497 if (len == 0) { 498 return (0); 499 } 500 501 len = MIN(len, PAGE_SIZE); 502 503 dst = vm_map_gpa(ctx, prp2, len); 504 if (dst == NULL) { 505 return (-1); 506 } 507 508 memcpy(dst, src, len); 509 510 return (0); 511 } 512 513 static int 514 nvme_opc_delete_io_sq(struct pci_nvme_softc* sc, struct nvme_command* command, 515 struct nvme_completion* compl) 516 { 517 uint16_t qid = command->cdw10 & 0xffff; 518 519 DPRINTF(("%s DELETE_IO_SQ %u\r\n", __func__, qid)); 520 if (qid == 0 || qid > sc->num_squeues) { 521 WPRINTF(("%s NOT PERMITTED queue id %u / num_squeues %u\r\n", 522 __func__, qid, sc->num_squeues)); 523 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC, 524 NVME_SC_INVALID_QUEUE_IDENTIFIER); 525 return (1); 526 } 527 528 sc->submit_queues[qid].qbase = NULL; 529 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS); 530 return (1); 531 } 532 533 static int 534 nvme_opc_create_io_sq(struct pci_nvme_softc* sc, struct nvme_command* command, 535 struct nvme_completion* compl) 536 { 537 if (command->cdw11 & NVME_CMD_CDW11_PC) { 538 uint16_t qid = command->cdw10 & 0xffff; 539 struct nvme_submission_queue *nsq; 540 541 if ((qid == 0) || (qid > sc->num_squeues)) { 542 WPRINTF(("%s queue index %u > num_squeues %u\r\n", 543 __func__, qid, sc->num_squeues)); 544 pci_nvme_status_tc(&compl->status, 545 NVME_SCT_COMMAND_SPECIFIC, 546 NVME_SC_INVALID_QUEUE_IDENTIFIER); 547 return (1); 548 } 549 550 nsq = &sc->submit_queues[qid]; 551 nsq->size = ONE_BASED((command->cdw10 >> 16) & 0xffff); 552 553 nsq->qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1, 554 sizeof(struct nvme_command) * (size_t)nsq->size); 555 nsq->cqid = (command->cdw11 >> 16) & 0xffff; 556 nsq->qpriority = (command->cdw11 >> 1) & 0x03; 557 558 DPRINTF(("%s sq %u size %u gaddr %p cqid %u\r\n", __func__, 559 qid, nsq->size, nsq->qbase, nsq->cqid)); 560 561 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS); 562 563 DPRINTF(("%s completed creating IOSQ qid %u\r\n", 564 __func__, qid)); 565 } else { 566 /* 567 * Guest sent non-cont submission queue request. 568 * This setting is unsupported by this emulation. 569 */ 570 WPRINTF(("%s unsupported non-contig (list-based) " 571 "create i/o submission queue\r\n", __func__)); 572 573 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD); 574 } 575 return (1); 576 } 577 578 static int 579 nvme_opc_delete_io_cq(struct pci_nvme_softc* sc, struct nvme_command* command, 580 struct nvme_completion* compl) 581 { 582 uint16_t qid = command->cdw10 & 0xffff; 583 584 DPRINTF(("%s DELETE_IO_CQ %u\r\n", __func__, qid)); 585 if (qid == 0 || qid > sc->num_cqueues) { 586 WPRINTF(("%s queue index %u / num_cqueues %u\r\n", 587 __func__, qid, sc->num_cqueues)); 588 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC, 589 NVME_SC_INVALID_QUEUE_IDENTIFIER); 590 return (1); 591 } 592 593 sc->compl_queues[qid].qbase = NULL; 594 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS); 595 return (1); 596 } 597 598 static int 599 nvme_opc_create_io_cq(struct pci_nvme_softc* sc, struct nvme_command* command, 600 struct nvme_completion* compl) 601 { 602 if (command->cdw11 & NVME_CMD_CDW11_PC) { 603 uint16_t qid = command->cdw10 & 0xffff; 604 struct nvme_completion_queue *ncq; 605 606 if ((qid == 0) || (qid > sc->num_cqueues)) { 607 WPRINTF(("%s queue index %u > num_cqueues %u\r\n", 608 __func__, qid, sc->num_cqueues)); 609 pci_nvme_status_tc(&compl->status, 610 NVME_SCT_COMMAND_SPECIFIC, 611 NVME_SC_INVALID_QUEUE_IDENTIFIER); 612 return (1); 613 } 614 615 ncq = &sc->compl_queues[qid]; 616 ncq->intr_en = (command->cdw11 & NVME_CMD_CDW11_IEN) >> 1; 617 ncq->intr_vec = (command->cdw11 >> 16) & 0xffff; 618 ncq->size = ONE_BASED((command->cdw10 >> 16) & 0xffff); 619 620 ncq->qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx, 621 command->prp1, 622 sizeof(struct nvme_command) * (size_t)ncq->size); 623 624 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS); 625 } else { 626 /* 627 * Non-contig completion queue unsupported. 628 */ 629 WPRINTF(("%s unsupported non-contig (list-based) " 630 "create i/o completion queue\r\n", 631 __func__)); 632 633 /* 0x12 = Invalid Use of Controller Memory Buffer */ 634 pci_nvme_status_genc(&compl->status, 0x12); 635 } 636 637 return (1); 638 } 639 640 static int 641 nvme_opc_get_log_page(struct pci_nvme_softc* sc, struct nvme_command* command, 642 struct nvme_completion* compl) 643 { 644 uint32_t logsize = (1 + ((command->cdw10 >> 16) & 0xFFF)) * 2; 645 uint8_t logpage = command->cdw10 & 0xFF; 646 647 DPRINTF(("%s log page %u len %u\r\n", __func__, logpage, logsize)); 648 649 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS); 650 651 switch (logpage) { 652 case NVME_LOG_ERROR: 653 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1, 654 command->prp2, (uint8_t *)&sc->err_log, logsize); 655 break; 656 case NVME_LOG_HEALTH_INFORMATION: 657 /* TODO: present some smart info */ 658 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1, 659 command->prp2, (uint8_t *)&sc->health_log, logsize); 660 break; 661 case NVME_LOG_FIRMWARE_SLOT: 662 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1, 663 command->prp2, (uint8_t *)&sc->fw_log, logsize); 664 break; 665 default: 666 WPRINTF(("%s get log page %x command not supported\r\n", 667 __func__, logpage)); 668 669 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC, 670 NVME_SC_INVALID_LOG_PAGE); 671 } 672 673 return (1); 674 } 675 676 static int 677 nvme_opc_identify(struct pci_nvme_softc* sc, struct nvme_command* command, 678 struct nvme_completion* compl) 679 { 680 void *dest; 681 682 DPRINTF(("%s identify 0x%x nsid 0x%x\r\n", __func__, 683 command->cdw10 & 0xFF, command->nsid)); 684 685 switch (command->cdw10 & 0xFF) { 686 case 0x00: /* return Identify Namespace data structure */ 687 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1, 688 command->prp2, (uint8_t *)&sc->nsdata, sizeof(sc->nsdata)); 689 break; 690 case 0x01: /* return Identify Controller data structure */ 691 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1, 692 command->prp2, (uint8_t *)&sc->ctrldata, 693 sizeof(sc->ctrldata)); 694 break; 695 case 0x02: /* list of 1024 active NSIDs > CDW1.NSID */ 696 dest = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1, 697 sizeof(uint32_t) * 1024); 698 ((uint32_t *)dest)[0] = 1; 699 ((uint32_t *)dest)[1] = 0; 700 break; 701 case 0x11: 702 pci_nvme_status_genc(&compl->status, 703 NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 704 return (1); 705 case 0x03: /* list of NSID structures in CDW1.NSID, 4096 bytes */ 706 case 0x10: 707 case 0x12: 708 case 0x13: 709 case 0x14: 710 case 0x15: 711 default: 712 DPRINTF(("%s unsupported identify command requested 0x%x\r\n", 713 __func__, command->cdw10 & 0xFF)); 714 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD); 715 return (1); 716 } 717 718 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS); 719 return (1); 720 } 721 722 static int 723 nvme_set_feature_queues(struct pci_nvme_softc* sc, struct nvme_command* command, 724 struct nvme_completion* compl) 725 { 726 uint16_t nqr; /* Number of Queues Requested */ 727 728 nqr = command->cdw11 & 0xFFFF; 729 if (nqr == 0xffff) { 730 WPRINTF(("%s: Illegal NSQR value %#x\n", __func__, nqr)); 731 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD); 732 return (-1); 733 } 734 735 sc->num_squeues = ONE_BASED(nqr); 736 if (sc->num_squeues > sc->max_queues) { 737 DPRINTF(("NSQR=%u is greater than max %u\n", sc->num_squeues, 738 sc->max_queues)); 739 sc->num_squeues = sc->max_queues; 740 } 741 742 nqr = (command->cdw11 >> 16) & 0xFFFF; 743 if (nqr == 0xffff) { 744 WPRINTF(("%s: Illegal NCQR value %#x\n", __func__, nqr)); 745 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD); 746 return (-1); 747 } 748 749 sc->num_cqueues = ONE_BASED(nqr); 750 if (sc->num_cqueues > sc->max_queues) { 751 DPRINTF(("NCQR=%u is greater than max %u\n", sc->num_cqueues, 752 sc->max_queues)); 753 sc->num_cqueues = sc->max_queues; 754 } 755 756 compl->cdw0 = NVME_FEATURE_NUM_QUEUES(sc); 757 758 return (0); 759 } 760 761 static int 762 nvme_opc_set_features(struct pci_nvme_softc* sc, struct nvme_command* command, 763 struct nvme_completion* compl) 764 { 765 int feature = command->cdw10 & 0xFF; 766 uint32_t iv; 767 768 DPRINTF(("%s feature 0x%x\r\n", __func__, feature)); 769 compl->cdw0 = 0; 770 771 switch (feature) { 772 case NVME_FEAT_ARBITRATION: 773 DPRINTF((" arbitration 0x%x\r\n", command->cdw11)); 774 break; 775 case NVME_FEAT_POWER_MANAGEMENT: 776 DPRINTF((" power management 0x%x\r\n", command->cdw11)); 777 break; 778 case NVME_FEAT_LBA_RANGE_TYPE: 779 DPRINTF((" lba range 0x%x\r\n", command->cdw11)); 780 break; 781 case NVME_FEAT_TEMPERATURE_THRESHOLD: 782 DPRINTF((" temperature threshold 0x%x\r\n", command->cdw11)); 783 break; 784 case NVME_FEAT_ERROR_RECOVERY: 785 DPRINTF((" error recovery 0x%x\r\n", command->cdw11)); 786 break; 787 case NVME_FEAT_VOLATILE_WRITE_CACHE: 788 DPRINTF((" volatile write cache 0x%x\r\n", command->cdw11)); 789 break; 790 case NVME_FEAT_NUMBER_OF_QUEUES: 791 nvme_set_feature_queues(sc, command, compl); 792 break; 793 case NVME_FEAT_INTERRUPT_COALESCING: 794 DPRINTF((" interrupt coalescing 0x%x\r\n", command->cdw11)); 795 796 /* in uS */ 797 sc->intr_coales_aggr_time = ((command->cdw11 >> 8) & 0xFF)*100; 798 799 sc->intr_coales_aggr_thresh = command->cdw11 & 0xFF; 800 break; 801 case NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION: 802 iv = command->cdw11 & 0xFFFF; 803 804 DPRINTF((" interrupt vector configuration 0x%x\r\n", 805 command->cdw11)); 806 807 for (uint32_t i = 0; i < sc->num_cqueues + 1; i++) { 808 if (sc->compl_queues[i].intr_vec == iv) { 809 if (command->cdw11 & (1 << 16)) 810 sc->compl_queues[i].intr_en |= 811 NVME_CQ_INTCOAL; 812 else 813 sc->compl_queues[i].intr_en &= 814 ~NVME_CQ_INTCOAL; 815 } 816 } 817 break; 818 case NVME_FEAT_WRITE_ATOMICITY: 819 DPRINTF((" write atomicity 0x%x\r\n", command->cdw11)); 820 break; 821 case NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 822 DPRINTF((" async event configuration 0x%x\r\n", 823 command->cdw11)); 824 sc->async_ev_config = command->cdw11; 825 break; 826 case NVME_FEAT_SOFTWARE_PROGRESS_MARKER: 827 DPRINTF((" software progress marker 0x%x\r\n", 828 command->cdw11)); 829 break; 830 case 0x0C: 831 DPRINTF((" autonomous power state transition 0x%x\r\n", 832 command->cdw11)); 833 break; 834 default: 835 WPRINTF(("%s invalid feature\r\n", __func__)); 836 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD); 837 return (1); 838 } 839 840 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS); 841 return (1); 842 } 843 844 static int 845 nvme_opc_get_features(struct pci_nvme_softc* sc, struct nvme_command* command, 846 struct nvme_completion* compl) 847 { 848 int feature = command->cdw10 & 0xFF; 849 850 DPRINTF(("%s feature 0x%x\r\n", __func__, feature)); 851 852 compl->cdw0 = 0; 853 854 switch (feature) { 855 case NVME_FEAT_ARBITRATION: 856 DPRINTF((" arbitration\r\n")); 857 break; 858 case NVME_FEAT_POWER_MANAGEMENT: 859 DPRINTF((" power management\r\n")); 860 break; 861 case NVME_FEAT_LBA_RANGE_TYPE: 862 DPRINTF((" lba range\r\n")); 863 break; 864 case NVME_FEAT_TEMPERATURE_THRESHOLD: 865 DPRINTF((" temperature threshold\r\n")); 866 switch ((command->cdw11 >> 20) & 0x3) { 867 case 0: 868 /* Over temp threshold */ 869 compl->cdw0 = 0xFFFF; 870 break; 871 case 1: 872 /* Under temp threshold */ 873 compl->cdw0 = 0; 874 break; 875 default: 876 WPRINTF((" invalid threshold type select\r\n")); 877 pci_nvme_status_genc(&compl->status, 878 NVME_SC_INVALID_FIELD); 879 return (1); 880 } 881 break; 882 case NVME_FEAT_ERROR_RECOVERY: 883 DPRINTF((" error recovery\r\n")); 884 break; 885 case NVME_FEAT_VOLATILE_WRITE_CACHE: 886 DPRINTF((" volatile write cache\r\n")); 887 break; 888 case NVME_FEAT_NUMBER_OF_QUEUES: 889 compl->cdw0 = NVME_FEATURE_NUM_QUEUES(sc); 890 891 DPRINTF((" number of queues (submit %u, completion %u)\r\n", 892 compl->cdw0 & 0xFFFF, 893 (compl->cdw0 >> 16) & 0xFFFF)); 894 895 break; 896 case NVME_FEAT_INTERRUPT_COALESCING: 897 DPRINTF((" interrupt coalescing\r\n")); 898 break; 899 case NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION: 900 DPRINTF((" interrupt vector configuration\r\n")); 901 break; 902 case NVME_FEAT_WRITE_ATOMICITY: 903 DPRINTF((" write atomicity\r\n")); 904 break; 905 case NVME_FEAT_ASYNC_EVENT_CONFIGURATION: 906 DPRINTF((" async event configuration\r\n")); 907 sc->async_ev_config = command->cdw11; 908 break; 909 case NVME_FEAT_SOFTWARE_PROGRESS_MARKER: 910 DPRINTF((" software progress marker\r\n")); 911 break; 912 case 0x0C: 913 DPRINTF((" autonomous power state transition\r\n")); 914 break; 915 default: 916 WPRINTF(("%s invalid feature 0x%x\r\n", __func__, feature)); 917 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD); 918 return (1); 919 } 920 921 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS); 922 return (1); 923 } 924 925 static int 926 nvme_opc_abort(struct pci_nvme_softc* sc, struct nvme_command* command, 927 struct nvme_completion* compl) 928 { 929 DPRINTF(("%s submission queue %u, command ID 0x%x\r\n", __func__, 930 command->cdw10 & 0xFFFF, (command->cdw10 >> 16) & 0xFFFF)); 931 932 /* TODO: search for the command ID and abort it */ 933 934 compl->cdw0 = 1; 935 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS); 936 return (1); 937 } 938 939 #ifdef __FreeBSD__ 940 static int 941 nvme_opc_async_event_req(struct pci_nvme_softc* sc, 942 struct nvme_command* command, struct nvme_completion* compl) 943 { 944 DPRINTF(("%s async event request 0x%x\r\n", __func__, command->cdw11)); 945 946 /* 947 * TODO: raise events when they happen based on the Set Features cmd. 948 * These events happen async, so only set completion successful if 949 * there is an event reflective of the request to get event. 950 */ 951 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC, 952 NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED); 953 return (0); 954 } 955 #else 956 /* This is kept behind an ifdef while it's unused to appease the compiler. */ 957 #endif /* __FreeBSD__ */ 958 959 static void 960 pci_nvme_handle_admin_cmd(struct pci_nvme_softc* sc, uint64_t value) 961 { 962 struct nvme_completion compl; 963 struct nvme_command *cmd; 964 struct nvme_submission_queue *sq; 965 struct nvme_completion_queue *cq; 966 int do_intr = 0; 967 uint16_t sqhead; 968 969 DPRINTF(("%s index %u\r\n", __func__, (uint32_t)value)); 970 971 sq = &sc->submit_queues[0]; 972 973 sqhead = atomic_load_acq_short(&sq->head); 974 975 if (atomic_testandset_int(&sq->busy, 1)) { 976 DPRINTF(("%s SQ busy, head %u, tail %u\r\n", 977 __func__, sqhead, sq->tail)); 978 return; 979 } 980 981 DPRINTF(("sqhead %u, tail %u\r\n", sqhead, sq->tail)); 982 983 while (sqhead != atomic_load_acq_short(&sq->tail)) { 984 cmd = &(sq->qbase)[sqhead]; 985 compl.status = 0; 986 987 switch (cmd->opc) { 988 case NVME_OPC_DELETE_IO_SQ: 989 DPRINTF(("%s command DELETE_IO_SQ\r\n", __func__)); 990 do_intr |= nvme_opc_delete_io_sq(sc, cmd, &compl); 991 break; 992 case NVME_OPC_CREATE_IO_SQ: 993 DPRINTF(("%s command CREATE_IO_SQ\r\n", __func__)); 994 do_intr |= nvme_opc_create_io_sq(sc, cmd, &compl); 995 break; 996 case NVME_OPC_DELETE_IO_CQ: 997 DPRINTF(("%s command DELETE_IO_CQ\r\n", __func__)); 998 do_intr |= nvme_opc_delete_io_cq(sc, cmd, &compl); 999 break; 1000 case NVME_OPC_CREATE_IO_CQ: 1001 DPRINTF(("%s command CREATE_IO_CQ\r\n", __func__)); 1002 do_intr |= nvme_opc_create_io_cq(sc, cmd, &compl); 1003 break; 1004 case NVME_OPC_GET_LOG_PAGE: 1005 DPRINTF(("%s command GET_LOG_PAGE\r\n", __func__)); 1006 do_intr |= nvme_opc_get_log_page(sc, cmd, &compl); 1007 break; 1008 case NVME_OPC_IDENTIFY: 1009 DPRINTF(("%s command IDENTIFY\r\n", __func__)); 1010 do_intr |= nvme_opc_identify(sc, cmd, &compl); 1011 break; 1012 case NVME_OPC_ABORT: 1013 DPRINTF(("%s command ABORT\r\n", __func__)); 1014 do_intr |= nvme_opc_abort(sc, cmd, &compl); 1015 break; 1016 case NVME_OPC_SET_FEATURES: 1017 DPRINTF(("%s command SET_FEATURES\r\n", __func__)); 1018 do_intr |= nvme_opc_set_features(sc, cmd, &compl); 1019 break; 1020 case NVME_OPC_GET_FEATURES: 1021 DPRINTF(("%s command GET_FEATURES\r\n", __func__)); 1022 do_intr |= nvme_opc_get_features(sc, cmd, &compl); 1023 break; 1024 case NVME_OPC_ASYNC_EVENT_REQUEST: 1025 DPRINTF(("%s command ASYNC_EVENT_REQ\r\n", __func__)); 1026 /* XXX dont care, unhandled for now 1027 do_intr |= nvme_opc_async_event_req(sc, cmd, &compl); 1028 */ 1029 break; 1030 default: 1031 WPRINTF(("0x%x command is not implemented\r\n", 1032 cmd->opc)); 1033 } 1034 1035 /* for now skip async event generation */ 1036 if (cmd->opc != NVME_OPC_ASYNC_EVENT_REQUEST) { 1037 struct nvme_completion *cp; 1038 int phase; 1039 1040 cq = &sc->compl_queues[0]; 1041 1042 cp = &(cq->qbase)[cq->tail]; 1043 cp->cdw0 = compl.cdw0; 1044 cp->sqid = 0; 1045 cp->sqhd = sqhead; 1046 cp->cid = cmd->cid; 1047 1048 phase = NVME_STATUS_GET_P(cp->status); 1049 cp->status = compl.status; 1050 pci_nvme_toggle_phase(&cp->status, phase); 1051 1052 cq->tail = (cq->tail + 1) % cq->size; 1053 } 1054 sqhead = (sqhead + 1) % sq->size; 1055 } 1056 1057 DPRINTF(("setting sqhead %u\r\n", sqhead)); 1058 atomic_store_short(&sq->head, sqhead); 1059 atomic_store_int(&sq->busy, 0); 1060 1061 if (do_intr) 1062 pci_generate_msix(sc->nsc_pi, 0); 1063 1064 } 1065 1066 static int 1067 pci_nvme_append_iov_req(struct pci_nvme_softc *sc, struct pci_nvme_ioreq *req, 1068 uint64_t gpaddr, size_t size, int do_write, uint64_t lba) 1069 { 1070 int iovidx; 1071 1072 if (req != NULL) { 1073 /* concatenate contig block-iovs to minimize number of iovs */ 1074 if ((req->prev_gpaddr + req->prev_size) == gpaddr) { 1075 iovidx = req->io_req.br_iovcnt - 1; 1076 1077 req->io_req.br_iov[iovidx].iov_base = 1078 paddr_guest2host(req->sc->nsc_pi->pi_vmctx, 1079 req->prev_gpaddr, size); 1080 1081 req->prev_size += size; 1082 req->io_req.br_resid += size; 1083 1084 req->io_req.br_iov[iovidx].iov_len = req->prev_size; 1085 } else { 1086 pthread_mutex_lock(&req->mtx); 1087 1088 iovidx = req->io_req.br_iovcnt; 1089 if (iovidx == NVME_MAX_BLOCKIOVS) { 1090 int err = 0; 1091 1092 DPRINTF(("large I/O, doing partial req\r\n")); 1093 1094 iovidx = 0; 1095 req->io_req.br_iovcnt = 0; 1096 1097 req->io_req.br_callback = pci_nvme_io_partial; 1098 1099 if (!do_write) 1100 err = blockif_read(sc->nvstore.ctx, 1101 &req->io_req); 1102 else 1103 err = blockif_write(sc->nvstore.ctx, 1104 &req->io_req); 1105 1106 /* wait until req completes before cont */ 1107 if (err == 0) 1108 pthread_cond_wait(&req->cv, &req->mtx); 1109 } 1110 if (iovidx == 0) { 1111 req->io_req.br_offset = lba; 1112 req->io_req.br_resid = 0; 1113 req->io_req.br_param = req; 1114 } 1115 1116 req->io_req.br_iov[iovidx].iov_base = 1117 paddr_guest2host(req->sc->nsc_pi->pi_vmctx, 1118 gpaddr, size); 1119 1120 req->io_req.br_iov[iovidx].iov_len = size; 1121 1122 req->prev_gpaddr = gpaddr; 1123 req->prev_size = size; 1124 req->io_req.br_resid += size; 1125 1126 req->io_req.br_iovcnt++; 1127 1128 pthread_mutex_unlock(&req->mtx); 1129 } 1130 } else { 1131 /* RAM buffer: read/write directly */ 1132 void *p = sc->nvstore.ctx; 1133 void *gptr; 1134 1135 if ((lba + size) > sc->nvstore.size) { 1136 WPRINTF(("%s write would overflow RAM\r\n", __func__)); 1137 return (-1); 1138 } 1139 1140 p = (void *)((uintptr_t)p + (uintptr_t)lba); 1141 gptr = paddr_guest2host(sc->nsc_pi->pi_vmctx, gpaddr, size); 1142 if (do_write) 1143 memcpy(p, gptr, size); 1144 else 1145 memcpy(gptr, p, size); 1146 } 1147 return (0); 1148 } 1149 1150 static void 1151 pci_nvme_set_completion(struct pci_nvme_softc *sc, 1152 struct nvme_submission_queue *sq, int sqid, uint16_t cid, 1153 uint32_t cdw0, uint16_t status, int ignore_busy) 1154 { 1155 struct nvme_completion_queue *cq = &sc->compl_queues[sq->cqid]; 1156 struct nvme_completion *compl; 1157 int do_intr = 0; 1158 int phase; 1159 1160 DPRINTF(("%s sqid %d cqid %u cid %u status: 0x%x 0x%x\r\n", 1161 __func__, sqid, sq->cqid, cid, NVME_STATUS_GET_SCT(status), 1162 NVME_STATUS_GET_SC(status))); 1163 1164 pthread_mutex_lock(&cq->mtx); 1165 1166 assert(cq->qbase != NULL); 1167 1168 compl = &cq->qbase[cq->tail]; 1169 1170 compl->sqhd = atomic_load_acq_short(&sq->head); 1171 compl->sqid = sqid; 1172 compl->cid = cid; 1173 1174 // toggle phase 1175 phase = NVME_STATUS_GET_P(compl->status); 1176 compl->status = status; 1177 pci_nvme_toggle_phase(&compl->status, phase); 1178 1179 cq->tail = (cq->tail + 1) % cq->size; 1180 1181 if (cq->intr_en & NVME_CQ_INTEN) 1182 do_intr = 1; 1183 1184 pthread_mutex_unlock(&cq->mtx); 1185 1186 if (ignore_busy || !atomic_load_acq_int(&sq->busy)) 1187 if (do_intr) 1188 pci_generate_msix(sc->nsc_pi, cq->intr_vec); 1189 } 1190 1191 static void 1192 pci_nvme_release_ioreq(struct pci_nvme_softc *sc, struct pci_nvme_ioreq *req) 1193 { 1194 req->sc = NULL; 1195 req->nvme_sq = NULL; 1196 req->sqid = 0; 1197 1198 pthread_mutex_lock(&sc->mtx); 1199 1200 req->next = sc->ioreqs_free; 1201 sc->ioreqs_free = req; 1202 sc->pending_ios--; 1203 1204 /* when no more IO pending, can set to ready if device reset/enabled */ 1205 if (sc->pending_ios == 0 && 1206 NVME_CC_GET_EN(sc->regs.cc) && !(NVME_CSTS_GET_RDY(sc->regs.csts))) 1207 sc->regs.csts |= NVME_CSTS_RDY; 1208 1209 pthread_mutex_unlock(&sc->mtx); 1210 1211 sem_post(&sc->iosemlock); 1212 } 1213 1214 static struct pci_nvme_ioreq * 1215 pci_nvme_get_ioreq(struct pci_nvme_softc *sc) 1216 { 1217 struct pci_nvme_ioreq *req = NULL;; 1218 1219 sem_wait(&sc->iosemlock); 1220 pthread_mutex_lock(&sc->mtx); 1221 1222 req = sc->ioreqs_free; 1223 assert(req != NULL); 1224 1225 sc->ioreqs_free = req->next; 1226 1227 req->next = NULL; 1228 req->sc = sc; 1229 1230 sc->pending_ios++; 1231 1232 pthread_mutex_unlock(&sc->mtx); 1233 1234 req->io_req.br_iovcnt = 0; 1235 req->io_req.br_offset = 0; 1236 req->io_req.br_resid = 0; 1237 req->io_req.br_param = req; 1238 req->prev_gpaddr = 0; 1239 req->prev_size = 0; 1240 1241 return req; 1242 } 1243 1244 static void 1245 pci_nvme_io_done(struct blockif_req *br, int err) 1246 { 1247 struct pci_nvme_ioreq *req = br->br_param; 1248 struct nvme_submission_queue *sq = req->nvme_sq; 1249 uint16_t code, status = 0; 1250 1251 DPRINTF(("%s error %d %s\r\n", __func__, err, strerror(err))); 1252 1253 /* TODO return correct error */ 1254 code = err ? NVME_SC_DATA_TRANSFER_ERROR : NVME_SC_SUCCESS; 1255 pci_nvme_status_genc(&status, code); 1256 1257 pci_nvme_set_completion(req->sc, sq, req->sqid, req->cid, 0, status, 0); 1258 pci_nvme_release_ioreq(req->sc, req); 1259 } 1260 1261 static void 1262 pci_nvme_io_partial(struct blockif_req *br, int err) 1263 { 1264 struct pci_nvme_ioreq *req = br->br_param; 1265 1266 DPRINTF(("%s error %d %s\r\n", __func__, err, strerror(err))); 1267 1268 pthread_cond_signal(&req->cv); 1269 } 1270 1271 1272 static void 1273 pci_nvme_handle_io_cmd(struct pci_nvme_softc* sc, uint16_t idx) 1274 { 1275 struct nvme_submission_queue *sq; 1276 uint16_t status = 0; 1277 uint16_t sqhead; 1278 int err; 1279 1280 /* handle all submissions up to sq->tail index */ 1281 sq = &sc->submit_queues[idx]; 1282 1283 if (atomic_testandset_int(&sq->busy, 1)) { 1284 DPRINTF(("%s sqid %u busy\r\n", __func__, idx)); 1285 return; 1286 } 1287 1288 sqhead = atomic_load_acq_short(&sq->head); 1289 1290 DPRINTF(("nvme_handle_io qid %u head %u tail %u cmdlist %p\r\n", 1291 idx, sqhead, sq->tail, sq->qbase)); 1292 1293 while (sqhead != atomic_load_acq_short(&sq->tail)) { 1294 struct nvme_command *cmd; 1295 struct pci_nvme_ioreq *req = NULL; 1296 uint64_t lba; 1297 uint64_t nblocks, bytes, size, cpsz; 1298 1299 /* TODO: support scatter gather list handling */ 1300 1301 cmd = &sq->qbase[sqhead]; 1302 sqhead = (sqhead + 1) % sq->size; 1303 1304 lba = ((uint64_t)cmd->cdw11 << 32) | cmd->cdw10; 1305 1306 if (cmd->opc == NVME_OPC_FLUSH) { 1307 pci_nvme_status_genc(&status, NVME_SC_SUCCESS); 1308 pci_nvme_set_completion(sc, sq, idx, cmd->cid, 0, 1309 status, 1); 1310 1311 continue; 1312 } else if (cmd->opc == 0x08) { 1313 /* TODO: write zeroes */ 1314 WPRINTF(("%s write zeroes lba 0x%lx blocks %u\r\n", 1315 __func__, lba, cmd->cdw12 & 0xFFFF)); 1316 pci_nvme_status_genc(&status, NVME_SC_SUCCESS); 1317 pci_nvme_set_completion(sc, sq, idx, cmd->cid, 0, 1318 status, 1); 1319 1320 continue; 1321 } 1322 1323 nblocks = (cmd->cdw12 & 0xFFFF) + 1; 1324 1325 bytes = nblocks * sc->nvstore.sectsz; 1326 1327 if (sc->nvstore.type == NVME_STOR_BLOCKIF) { 1328 req = pci_nvme_get_ioreq(sc); 1329 req->nvme_sq = sq; 1330 req->sqid = idx; 1331 } 1332 1333 /* 1334 * If data starts mid-page and flows into the next page, then 1335 * increase page count 1336 */ 1337 1338 DPRINTF(("[h%u:t%u:n%u] %s starting LBA 0x%lx blocks %lu " 1339 "(%lu-bytes)\r\n", 1340 sqhead==0 ? sq->size-1 : sqhead-1, sq->tail, sq->size, 1341 cmd->opc == NVME_OPC_WRITE ? 1342 "WRITE" : "READ", 1343 lba, nblocks, bytes)); 1344 1345 cmd->prp1 &= ~(0x03UL); 1346 cmd->prp2 &= ~(0x03UL); 1347 1348 DPRINTF((" prp1 0x%lx prp2 0x%lx\r\n", cmd->prp1, cmd->prp2)); 1349 1350 size = bytes; 1351 lba *= sc->nvstore.sectsz; 1352 1353 cpsz = PAGE_SIZE - (cmd->prp1 % PAGE_SIZE); 1354 1355 if (cpsz > bytes) 1356 cpsz = bytes; 1357 1358 if (req != NULL) { 1359 req->io_req.br_offset = ((uint64_t)cmd->cdw11 << 32) | 1360 cmd->cdw10; 1361 req->opc = cmd->opc; 1362 req->cid = cmd->cid; 1363 req->nsid = cmd->nsid; 1364 } 1365 1366 err = pci_nvme_append_iov_req(sc, req, cmd->prp1, cpsz, 1367 cmd->opc == NVME_OPC_WRITE, lba); 1368 lba += cpsz; 1369 size -= cpsz; 1370 1371 if (size == 0) 1372 goto iodone; 1373 1374 if (size <= PAGE_SIZE) { 1375 /* prp2 is second (and final) page in transfer */ 1376 1377 err = pci_nvme_append_iov_req(sc, req, cmd->prp2, 1378 size, 1379 cmd->opc == NVME_OPC_WRITE, 1380 lba); 1381 } else { 1382 uint64_t *prp_list; 1383 int i; 1384 1385 /* prp2 is pointer to a physical region page list */ 1386 prp_list = paddr_guest2host(sc->nsc_pi->pi_vmctx, 1387 cmd->prp2, PAGE_SIZE); 1388 1389 i = 0; 1390 while (size != 0) { 1391 cpsz = MIN(size, PAGE_SIZE); 1392 1393 /* 1394 * Move to linked physical region page list 1395 * in last item. 1396 */ 1397 if (i == (NVME_PRP2_ITEMS-1) && 1398 size > PAGE_SIZE) { 1399 assert((prp_list[i] & (PAGE_SIZE-1)) == 0); 1400 prp_list = paddr_guest2host( 1401 sc->nsc_pi->pi_vmctx, 1402 prp_list[i], PAGE_SIZE); 1403 i = 0; 1404 } 1405 if (prp_list[i] == 0) { 1406 WPRINTF(("PRP2[%d] = 0 !!!\r\n", i)); 1407 err = 1; 1408 break; 1409 } 1410 1411 err = pci_nvme_append_iov_req(sc, req, 1412 prp_list[i], cpsz, 1413 cmd->opc == NVME_OPC_WRITE, lba); 1414 if (err) 1415 break; 1416 1417 lba += cpsz; 1418 size -= cpsz; 1419 i++; 1420 } 1421 } 1422 1423 iodone: 1424 if (sc->nvstore.type == NVME_STOR_RAM) { 1425 uint16_t code, status = 0; 1426 1427 code = err ? NVME_SC_LBA_OUT_OF_RANGE : 1428 NVME_SC_SUCCESS; 1429 pci_nvme_status_genc(&status, code); 1430 1431 pci_nvme_set_completion(sc, sq, idx, cmd->cid, 0, 1432 status, 1); 1433 1434 continue; 1435 } 1436 1437 1438 if (err) 1439 goto do_error; 1440 1441 req->io_req.br_callback = pci_nvme_io_done; 1442 1443 err = 0; 1444 switch (cmd->opc) { 1445 case NVME_OPC_READ: 1446 err = blockif_read(sc->nvstore.ctx, &req->io_req); 1447 break; 1448 case NVME_OPC_WRITE: 1449 err = blockif_write(sc->nvstore.ctx, &req->io_req); 1450 break; 1451 default: 1452 WPRINTF(("%s unhandled io command 0x%x\r\n", 1453 __func__, cmd->opc)); 1454 err = 1; 1455 } 1456 1457 do_error: 1458 if (err) { 1459 uint16_t status = 0; 1460 1461 pci_nvme_status_genc(&status, 1462 NVME_SC_DATA_TRANSFER_ERROR); 1463 1464 pci_nvme_set_completion(sc, sq, idx, cmd->cid, 0, 1465 status, 1); 1466 pci_nvme_release_ioreq(sc, req); 1467 } 1468 } 1469 1470 atomic_store_short(&sq->head, sqhead); 1471 atomic_store_int(&sq->busy, 0); 1472 } 1473 1474 static void 1475 pci_nvme_handle_doorbell(struct vmctx *ctx, struct pci_nvme_softc* sc, 1476 uint64_t idx, int is_sq, uint64_t value) 1477 { 1478 DPRINTF(("nvme doorbell %lu, %s, val 0x%lx\r\n", 1479 idx, is_sq ? "SQ" : "CQ", value & 0xFFFF)); 1480 1481 if (is_sq) { 1482 atomic_store_short(&sc->submit_queues[idx].tail, 1483 (uint16_t)value); 1484 1485 if (idx == 0) { 1486 pci_nvme_handle_admin_cmd(sc, value); 1487 } else { 1488 /* submission queue; handle new entries in SQ */ 1489 if (idx > sc->num_squeues) { 1490 WPRINTF(("%s SQ index %lu overflow from " 1491 "guest (max %u)\r\n", 1492 __func__, idx, sc->num_squeues)); 1493 return; 1494 } 1495 pci_nvme_handle_io_cmd(sc, (uint16_t)idx); 1496 } 1497 } else { 1498 if (idx > sc->num_cqueues) { 1499 WPRINTF(("%s queue index %lu overflow from " 1500 "guest (max %u)\r\n", 1501 __func__, idx, sc->num_cqueues)); 1502 return; 1503 } 1504 1505 sc->compl_queues[idx].head = (uint16_t)value; 1506 } 1507 } 1508 1509 static void 1510 pci_nvme_bar0_reg_dumps(const char *func, uint64_t offset, int iswrite) 1511 { 1512 const char *s = iswrite ? "WRITE" : "READ"; 1513 1514 switch (offset) { 1515 case NVME_CR_CAP_LOW: 1516 DPRINTF(("%s %s NVME_CR_CAP_LOW\r\n", func, s)); 1517 break; 1518 case NVME_CR_CAP_HI: 1519 DPRINTF(("%s %s NVME_CR_CAP_HI\r\n", func, s)); 1520 break; 1521 case NVME_CR_VS: 1522 DPRINTF(("%s %s NVME_CR_VS\r\n", func, s)); 1523 break; 1524 case NVME_CR_INTMS: 1525 DPRINTF(("%s %s NVME_CR_INTMS\r\n", func, s)); 1526 break; 1527 case NVME_CR_INTMC: 1528 DPRINTF(("%s %s NVME_CR_INTMC\r\n", func, s)); 1529 break; 1530 case NVME_CR_CC: 1531 DPRINTF(("%s %s NVME_CR_CC\r\n", func, s)); 1532 break; 1533 case NVME_CR_CSTS: 1534 DPRINTF(("%s %s NVME_CR_CSTS\r\n", func, s)); 1535 break; 1536 case NVME_CR_NSSR: 1537 DPRINTF(("%s %s NVME_CR_NSSR\r\n", func, s)); 1538 break; 1539 case NVME_CR_AQA: 1540 DPRINTF(("%s %s NVME_CR_AQA\r\n", func, s)); 1541 break; 1542 case NVME_CR_ASQ_LOW: 1543 DPRINTF(("%s %s NVME_CR_ASQ_LOW\r\n", func, s)); 1544 break; 1545 case NVME_CR_ASQ_HI: 1546 DPRINTF(("%s %s NVME_CR_ASQ_HI\r\n", func, s)); 1547 break; 1548 case NVME_CR_ACQ_LOW: 1549 DPRINTF(("%s %s NVME_CR_ACQ_LOW\r\n", func, s)); 1550 break; 1551 case NVME_CR_ACQ_HI: 1552 DPRINTF(("%s %s NVME_CR_ACQ_HI\r\n", func, s)); 1553 break; 1554 default: 1555 DPRINTF(("unknown nvme bar-0 offset 0x%lx\r\n", offset)); 1556 } 1557 1558 } 1559 1560 static void 1561 pci_nvme_write_bar_0(struct vmctx *ctx, struct pci_nvme_softc* sc, 1562 uint64_t offset, int size, uint64_t value) 1563 { 1564 uint32_t ccreg; 1565 1566 if (offset >= NVME_DOORBELL_OFFSET) { 1567 uint64_t belloffset = offset - NVME_DOORBELL_OFFSET; 1568 uint64_t idx = belloffset / 8; /* door bell size = 2*int */ 1569 int is_sq = (belloffset % 8) < 4; 1570 1571 if (belloffset > ((sc->max_queues+1) * 8 - 4)) { 1572 WPRINTF(("guest attempted an overflow write offset " 1573 "0x%lx, val 0x%lx in %s", 1574 offset, value, __func__)); 1575 return; 1576 } 1577 1578 pci_nvme_handle_doorbell(ctx, sc, idx, is_sq, value); 1579 return; 1580 } 1581 1582 DPRINTF(("nvme-write offset 0x%lx, size %d, value 0x%lx\r\n", 1583 offset, size, value)); 1584 1585 if (size != 4) { 1586 WPRINTF(("guest wrote invalid size %d (offset 0x%lx, " 1587 "val 0x%lx) to bar0 in %s", 1588 size, offset, value, __func__)); 1589 /* TODO: shutdown device */ 1590 return; 1591 } 1592 1593 pci_nvme_bar0_reg_dumps(__func__, offset, 1); 1594 1595 pthread_mutex_lock(&sc->mtx); 1596 1597 switch (offset) { 1598 case NVME_CR_CAP_LOW: 1599 case NVME_CR_CAP_HI: 1600 /* readonly */ 1601 break; 1602 case NVME_CR_VS: 1603 /* readonly */ 1604 break; 1605 case NVME_CR_INTMS: 1606 /* MSI-X, so ignore */ 1607 break; 1608 case NVME_CR_INTMC: 1609 /* MSI-X, so ignore */ 1610 break; 1611 case NVME_CR_CC: 1612 ccreg = (uint32_t)value; 1613 1614 DPRINTF(("%s NVME_CR_CC en %x css %x shn %x iosqes %u " 1615 "iocqes %u\r\n", 1616 __func__, 1617 NVME_CC_GET_EN(ccreg), NVME_CC_GET_CSS(ccreg), 1618 NVME_CC_GET_SHN(ccreg), NVME_CC_GET_IOSQES(ccreg), 1619 NVME_CC_GET_IOCQES(ccreg))); 1620 1621 if (NVME_CC_GET_SHN(ccreg)) { 1622 /* perform shutdown - flush out data to backend */ 1623 sc->regs.csts &= ~(NVME_CSTS_REG_SHST_MASK << 1624 NVME_CSTS_REG_SHST_SHIFT); 1625 sc->regs.csts |= NVME_SHST_COMPLETE << 1626 NVME_CSTS_REG_SHST_SHIFT; 1627 } 1628 if (NVME_CC_GET_EN(ccreg) != NVME_CC_GET_EN(sc->regs.cc)) { 1629 if (NVME_CC_GET_EN(ccreg) == 0) 1630 /* transition 1-> causes controller reset */ 1631 pci_nvme_reset_locked(sc); 1632 else 1633 pci_nvme_init_controller(ctx, sc); 1634 } 1635 1636 /* Insert the iocqes, iosqes and en bits from the write */ 1637 sc->regs.cc &= ~NVME_CC_WRITE_MASK; 1638 sc->regs.cc |= ccreg & NVME_CC_WRITE_MASK; 1639 if (NVME_CC_GET_EN(ccreg) == 0) { 1640 /* Insert the ams, mps and css bit fields */ 1641 sc->regs.cc &= ~NVME_CC_NEN_WRITE_MASK; 1642 sc->regs.cc |= ccreg & NVME_CC_NEN_WRITE_MASK; 1643 sc->regs.csts &= ~NVME_CSTS_RDY; 1644 } else if (sc->pending_ios == 0) { 1645 sc->regs.csts |= NVME_CSTS_RDY; 1646 } 1647 break; 1648 case NVME_CR_CSTS: 1649 break; 1650 case NVME_CR_NSSR: 1651 /* ignore writes; don't support subsystem reset */ 1652 break; 1653 case NVME_CR_AQA: 1654 sc->regs.aqa = (uint32_t)value; 1655 break; 1656 case NVME_CR_ASQ_LOW: 1657 sc->regs.asq = (sc->regs.asq & (0xFFFFFFFF00000000)) | 1658 (0xFFFFF000 & value); 1659 break; 1660 case NVME_CR_ASQ_HI: 1661 sc->regs.asq = (sc->regs.asq & (0x00000000FFFFFFFF)) | 1662 (value << 32); 1663 break; 1664 case NVME_CR_ACQ_LOW: 1665 sc->regs.acq = (sc->regs.acq & (0xFFFFFFFF00000000)) | 1666 (0xFFFFF000 & value); 1667 break; 1668 case NVME_CR_ACQ_HI: 1669 sc->regs.acq = (sc->regs.acq & (0x00000000FFFFFFFF)) | 1670 (value << 32); 1671 break; 1672 default: 1673 DPRINTF(("%s unknown offset 0x%lx, value 0x%lx size %d\r\n", 1674 __func__, offset, value, size)); 1675 } 1676 pthread_mutex_unlock(&sc->mtx); 1677 } 1678 1679 static void 1680 pci_nvme_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, 1681 int baridx, uint64_t offset, int size, uint64_t value) 1682 { 1683 struct pci_nvme_softc* sc = pi->pi_arg; 1684 1685 if (baridx == pci_msix_table_bar(pi) || 1686 baridx == pci_msix_pba_bar(pi)) { 1687 DPRINTF(("nvme-write baridx %d, msix: off 0x%lx, size %d, " 1688 " value 0x%lx\r\n", baridx, offset, size, value)); 1689 1690 pci_emul_msix_twrite(pi, offset, size, value); 1691 return; 1692 } 1693 1694 switch (baridx) { 1695 case 0: 1696 pci_nvme_write_bar_0(ctx, sc, offset, size, value); 1697 break; 1698 1699 default: 1700 DPRINTF(("%s unknown baridx %d, val 0x%lx\r\n", 1701 __func__, baridx, value)); 1702 } 1703 } 1704 1705 static uint64_t pci_nvme_read_bar_0(struct pci_nvme_softc* sc, 1706 uint64_t offset, int size) 1707 { 1708 uint64_t value; 1709 1710 pci_nvme_bar0_reg_dumps(__func__, offset, 0); 1711 1712 if (offset < NVME_DOORBELL_OFFSET) { 1713 void *p = &(sc->regs); 1714 pthread_mutex_lock(&sc->mtx); 1715 memcpy(&value, (void *)((uintptr_t)p + offset), size); 1716 pthread_mutex_unlock(&sc->mtx); 1717 } else { 1718 value = 0; 1719 WPRINTF(("pci_nvme: read invalid offset %ld\r\n", offset)); 1720 } 1721 1722 switch (size) { 1723 case 1: 1724 value &= 0xFF; 1725 break; 1726 case 2: 1727 value &= 0xFFFF; 1728 break; 1729 case 4: 1730 value &= 0xFFFFFFFF; 1731 break; 1732 } 1733 1734 DPRINTF((" nvme-read offset 0x%lx, size %d -> value 0x%x\r\n", 1735 offset, size, (uint32_t)value)); 1736 1737 return (value); 1738 } 1739 1740 1741 1742 static uint64_t 1743 pci_nvme_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, 1744 uint64_t offset, int size) 1745 { 1746 struct pci_nvme_softc* sc = pi->pi_arg; 1747 1748 if (baridx == pci_msix_table_bar(pi) || 1749 baridx == pci_msix_pba_bar(pi)) { 1750 DPRINTF(("nvme-read bar: %d, msix: regoff 0x%lx, size %d\r\n", 1751 baridx, offset, size)); 1752 1753 return pci_emul_msix_tread(pi, offset, size); 1754 } 1755 1756 switch (baridx) { 1757 case 0: 1758 return pci_nvme_read_bar_0(sc, offset, size); 1759 1760 default: 1761 DPRINTF(("unknown bar %d, 0x%lx\r\n", baridx, offset)); 1762 } 1763 1764 return (0); 1765 } 1766 1767 1768 static int 1769 pci_nvme_parse_opts(struct pci_nvme_softc *sc, char *opts) 1770 { 1771 char bident[sizeof("XX:X:X")]; 1772 char *uopt, *xopts, *config; 1773 uint32_t sectsz; 1774 int optidx; 1775 1776 sc->max_queues = NVME_QUEUES; 1777 sc->max_qentries = NVME_MAX_QENTRIES; 1778 sc->ioslots = NVME_IOSLOTS; 1779 sc->num_squeues = sc->max_queues; 1780 sc->num_cqueues = sc->max_queues; 1781 sectsz = 0; 1782 1783 uopt = strdup(opts); 1784 optidx = 0; 1785 snprintf(sc->ctrldata.sn, sizeof(sc->ctrldata.sn), 1786 "NVME-%d-%d", sc->nsc_pi->pi_slot, sc->nsc_pi->pi_func); 1787 for (xopts = strtok(uopt, ","); 1788 xopts != NULL; 1789 xopts = strtok(NULL, ",")) { 1790 1791 if ((config = strchr(xopts, '=')) != NULL) 1792 *config++ = '\0'; 1793 1794 if (!strcmp("maxq", xopts)) { 1795 sc->max_queues = atoi(config); 1796 } else if (!strcmp("qsz", xopts)) { 1797 sc->max_qentries = atoi(config); 1798 } else if (!strcmp("ioslots", xopts)) { 1799 sc->ioslots = atoi(config); 1800 } else if (!strcmp("sectsz", xopts)) { 1801 sectsz = atoi(config); 1802 } else if (!strcmp("ser", xopts)) { 1803 /* 1804 * This field indicates the Product Serial Number in 1805 * 7-bit ASCII, unused bytes should be space characters. 1806 * Ref: NVMe v1.3c. 1807 */ 1808 cpywithpad((char *)sc->ctrldata.sn, 1809 sizeof(sc->ctrldata.sn), config, ' '); 1810 } else if (!strcmp("ram", xopts)) { 1811 uint64_t sz = strtoull(&xopts[4], NULL, 10); 1812 1813 sc->nvstore.type = NVME_STOR_RAM; 1814 sc->nvstore.size = sz * 1024 * 1024; 1815 sc->nvstore.ctx = calloc(1, sc->nvstore.size); 1816 sc->nvstore.sectsz = 4096; 1817 sc->nvstore.sectsz_bits = 12; 1818 if (sc->nvstore.ctx == NULL) { 1819 perror("Unable to allocate RAM"); 1820 free(uopt); 1821 return (-1); 1822 } 1823 } else if (optidx == 0) { 1824 snprintf(bident, sizeof(bident), "%d:%d", 1825 sc->nsc_pi->pi_slot, sc->nsc_pi->pi_func); 1826 sc->nvstore.ctx = blockif_open(xopts, bident); 1827 if (sc->nvstore.ctx == NULL) { 1828 perror("Could not open backing file"); 1829 free(uopt); 1830 return (-1); 1831 } 1832 sc->nvstore.type = NVME_STOR_BLOCKIF; 1833 sc->nvstore.size = blockif_size(sc->nvstore.ctx); 1834 } else { 1835 fprintf(stderr, "Invalid option %s\n", xopts); 1836 free(uopt); 1837 return (-1); 1838 } 1839 1840 optidx++; 1841 } 1842 free(uopt); 1843 1844 if (sc->nvstore.ctx == NULL || sc->nvstore.size == 0) { 1845 fprintf(stderr, "backing store not specified\n"); 1846 return (-1); 1847 } 1848 if (sectsz == 512 || sectsz == 4096 || sectsz == 8192) 1849 sc->nvstore.sectsz = sectsz; 1850 else if (sc->nvstore.type != NVME_STOR_RAM) 1851 sc->nvstore.sectsz = blockif_sectsz(sc->nvstore.ctx); 1852 for (sc->nvstore.sectsz_bits = 9; 1853 (1 << sc->nvstore.sectsz_bits) < sc->nvstore.sectsz; 1854 sc->nvstore.sectsz_bits++); 1855 1856 if (sc->max_queues <= 0 || sc->max_queues > NVME_QUEUES) 1857 sc->max_queues = NVME_QUEUES; 1858 1859 if (sc->max_qentries <= 0) { 1860 fprintf(stderr, "Invalid qsz option\n"); 1861 return (-1); 1862 } 1863 if (sc->ioslots <= 0) { 1864 fprintf(stderr, "Invalid ioslots option\n"); 1865 return (-1); 1866 } 1867 1868 return (0); 1869 } 1870 1871 static int 1872 pci_nvme_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) 1873 { 1874 struct pci_nvme_softc *sc; 1875 uint32_t pci_membar_sz; 1876 int error; 1877 1878 error = 0; 1879 1880 sc = calloc(1, sizeof(struct pci_nvme_softc)); 1881 pi->pi_arg = sc; 1882 sc->nsc_pi = pi; 1883 1884 error = pci_nvme_parse_opts(sc, opts); 1885 if (error < 0) 1886 goto done; 1887 else 1888 error = 0; 1889 1890 sc->ioreqs = calloc(sc->ioslots, sizeof(struct pci_nvme_ioreq)); 1891 for (int i = 0; i < sc->ioslots; i++) { 1892 if (i < (sc->ioslots-1)) 1893 sc->ioreqs[i].next = &sc->ioreqs[i+1]; 1894 pthread_mutex_init(&sc->ioreqs[i].mtx, NULL); 1895 pthread_cond_init(&sc->ioreqs[i].cv, NULL); 1896 } 1897 sc->ioreqs_free = sc->ioreqs; 1898 sc->intr_coales_aggr_thresh = 1; 1899 1900 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x0A0A); 1901 pci_set_cfgdata16(pi, PCIR_VENDOR, 0xFB5D); 1902 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE); 1903 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_STORAGE_NVM); 1904 pci_set_cfgdata8(pi, PCIR_PROGIF, 1905 PCIP_STORAGE_NVM_ENTERPRISE_NVMHCI_1_0); 1906 1907 /* 1908 * Allocate size of NVMe registers + doorbell space for all queues. 1909 * 1910 * The specification requires a minimum memory I/O window size of 16K. 1911 * The Windows driver will refuse to start a device with a smaller 1912 * window. 1913 */ 1914 pci_membar_sz = sizeof(struct nvme_registers) + 1915 2 * sizeof(uint32_t) * (sc->max_queues + 1); 1916 pci_membar_sz = MAX(pci_membar_sz, NVME_MMIO_SPACE_MIN); 1917 1918 DPRINTF(("nvme membar size: %u\r\n", pci_membar_sz)); 1919 1920 error = pci_emul_alloc_bar(pi, 0, PCIBAR_MEM64, pci_membar_sz); 1921 if (error) { 1922 WPRINTF(("%s pci alloc mem bar failed\r\n", __func__)); 1923 goto done; 1924 } 1925 1926 error = pci_emul_add_msixcap(pi, sc->max_queues + 1, NVME_MSIX_BAR); 1927 if (error) { 1928 WPRINTF(("%s pci add msixcap failed\r\n", __func__)); 1929 goto done; 1930 } 1931 1932 pthread_mutex_init(&sc->mtx, NULL); 1933 sem_init(&sc->iosemlock, 0, sc->ioslots); 1934 1935 pci_nvme_reset(sc); 1936 pci_nvme_init_ctrldata(sc); 1937 pci_nvme_init_nsdata(sc); 1938 pci_nvme_init_logpages(sc); 1939 1940 pci_lintr_request(pi); 1941 1942 done: 1943 return (error); 1944 } 1945 1946 1947 struct pci_devemu pci_de_nvme = { 1948 .pe_emu = "nvme", 1949 .pe_init = pci_nvme_init, 1950 .pe_barwrite = pci_nvme_write, 1951 .pe_barread = pci_nvme_read 1952 }; 1953 PCI_EMUL_SET(pci_de_nvme); 1954