Lines Matching +full:ignore +full:- +full:power +full:- +full:on +full:- +full:sel
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * bhyve PCIe-NVMe device emulation.
34 * -s <n>,nvme,devpath,maxq=#,qsz=#,ioslots=#,sectsz=#,ser=A-Z,eui64=#,dsm=<opt>
45 * ser = serial number (20-chars max)
52 - create async event for smart and log
53 - intr coalesce
120 /* Convert a zero-based value into a one-based value */
122 /* Convert a one-based value into a zero-based value */
123 #define ZERO_BASED(one) ((one) - 1)
127 (ZERO_BASED((sc)->num_squeues) & 0xffff) | \
128 (ZERO_BASED((sc)->num_cqueues) & 0xffff) << 16
199 * based on the advertised Max Data Transfer (MDTS) and given the number of
204 NVME_MAX_IOVEC - BLOCKIF_IOV_MAX : \
268 /** Asynchronous Event Information - Error */
279 /** Asynchronous Event Information - Notice */
484 sc->num_squeues = nsq; in pci_nvme_init_queues()
486 sc->submit_queues = calloc(sc->num_squeues + 1, in pci_nvme_init_queues()
488 if (sc->submit_queues == NULL) { in pci_nvme_init_queues()
490 sc->num_squeues = 0; in pci_nvme_init_queues()
492 struct nvme_submission_queue *sq = sc->submit_queues; in pci_nvme_init_queues()
494 for (i = 0; i < sc->num_squeues + 1; i++) in pci_nvme_init_queues()
507 sc->num_cqueues = ncq; in pci_nvme_init_queues()
509 sc->compl_queues = calloc(sc->num_cqueues + 1, in pci_nvme_init_queues()
511 if (sc->compl_queues == NULL) { in pci_nvme_init_queues()
513 sc->num_cqueues = 0; in pci_nvme_init_queues()
515 struct nvme_completion_queue *cq = sc->compl_queues; in pci_nvme_init_queues()
517 for (i = 0; i < sc->num_cqueues + 1; i++) in pci_nvme_init_queues()
525 struct nvme_controller_data *cd = &sc->ctrldata; in pci_nvme_init_ctrldata()
528 cd->vid = 0xFB5D; in pci_nvme_init_ctrldata()
529 cd->ssvid = 0x0000; in pci_nvme_init_ctrldata()
531 cpywithpad((char *)cd->mn, sizeof(cd->mn), "bhyve-NVMe", ' '); in pci_nvme_init_ctrldata()
532 cpywithpad((char *)cd->fr, sizeof(cd->fr), "1.0", ' '); in pci_nvme_init_ctrldata()
535 cd->rab = 4; in pci_nvme_init_ctrldata()
538 cd->ieee[0] = 0xfc; in pci_nvme_init_ctrldata()
539 cd->ieee[1] = 0x9c; in pci_nvme_init_ctrldata()
540 cd->ieee[2] = 0x58; in pci_nvme_init_ctrldata()
542 cd->mic = 0; in pci_nvme_init_ctrldata()
544 cd->mdts = NVME_MDTS; /* max data transfer size (2^mdts * CAP.MPSMIN) */ in pci_nvme_init_ctrldata()
546 cd->ver = NVME_REV(1,4); in pci_nvme_init_ctrldata()
548 cd->cntrltype = NVME_CNTRLTYPE_IO; in pci_nvme_init_ctrldata()
549 cd->oacs = NVMEF(NVME_CTRLR_DATA_OACS_FORMAT, 1); in pci_nvme_init_ctrldata()
550 cd->oaes = NVMEM(NVME_CTRLR_DATA_OAES_NS_ATTR); in pci_nvme_init_ctrldata()
551 cd->acl = 2; in pci_nvme_init_ctrldata()
552 cd->aerl = 4; in pci_nvme_init_ctrldata()
554 /* Advertise 1, Read-only firmware slot */ in pci_nvme_init_ctrldata()
555 cd->frmw = NVMEM(NVME_CTRLR_DATA_FRMW_SLOT1_RO) | in pci_nvme_init_ctrldata()
557 cd->lpa = 0; /* TODO: support some simple things like SMART */ in pci_nvme_init_ctrldata()
558 cd->elpe = 0; /* max error log page entries */ in pci_nvme_init_ctrldata()
560 * Report a single power state (zero-based value) in pci_nvme_init_ctrldata()
563 cd->npss = 0; in pci_nvme_init_ctrldata()
566 cd->wctemp = 0x0157; in pci_nvme_init_ctrldata()
567 cd->cctemp = 0x0157; in pci_nvme_init_ctrldata()
570 cd->sanicap = NVMEF(NVME_CTRLR_DATA_SANICAP_NODMMAS, in pci_nvme_init_ctrldata()
573 cd->sqes = NVMEF(NVME_CTRLR_DATA_SQES_MAX, 6) | in pci_nvme_init_ctrldata()
575 cd->cqes = NVMEF(NVME_CTRLR_DATA_CQES_MAX, 4) | in pci_nvme_init_ctrldata()
577 cd->nn = 1; /* number of namespaces */ in pci_nvme_init_ctrldata()
579 cd->oncs = 0; in pci_nvme_init_ctrldata()
580 switch (sc->dataset_management) { in pci_nvme_init_ctrldata()
582 if (sc->nvstore.deallocate) in pci_nvme_init_ctrldata()
583 cd->oncs |= NVME_ONCS_DSM; in pci_nvme_init_ctrldata()
586 cd->oncs |= NVME_ONCS_DSM; in pci_nvme_init_ctrldata()
592 cd->fna = NVMEM(NVME_CTRLR_DATA_FNA_FORMAT_ALL); in pci_nvme_init_ctrldata()
594 cd->vwc = NVMEF(NVME_CTRLR_DATA_VWC_ALL, NVME_CTRLR_DATA_VWC_ALL_NO); in pci_nvme_init_ctrldata()
596 ret = snprintf(cd->subnqn, sizeof(cd->subnqn), in pci_nvme_init_ctrldata()
597 "nqn.2013-12.org.freebsd:bhyve-%s-%u-%u-%u", in pci_nvme_init_ctrldata()
598 get_config_value("name"), sc->nsc_pi->pi_bus, in pci_nvme_init_ctrldata()
599 sc->nsc_pi->pi_slot, sc->nsc_pi->pi_func); in pci_nvme_init_ctrldata()
600 if ((ret < 0) || ((unsigned)ret > sizeof(cd->subnqn))) in pci_nvme_init_ctrldata()
610 nd->nsze = nvstore->size / nvstore->sectsz; in pci_nvme_init_nsdata_size()
611 nd->ncap = nd->nsze; in pci_nvme_init_nsdata_size()
612 nd->nuse = nd->nsze; in pci_nvme_init_nsdata_size()
623 if (nvstore->type == NVME_STOR_BLOCKIF) in pci_nvme_init_nsdata()
624 nvstore->deallocate = blockif_candelete(nvstore->ctx); in pci_nvme_init_nsdata()
626 nd->nlbaf = 0; /* NLBAF is a 0's based value (i.e. 1 LBA Format) */ in pci_nvme_init_nsdata()
627 nd->flbas = 0; in pci_nvme_init_nsdata()
629 /* Create an EUI-64 if user did not provide one */ in pci_nvme_init_nsdata()
630 if (nvstore->eui64 == 0) { in pci_nvme_init_nsdata()
632 uint64_t eui64 = nvstore->eui64; in pci_nvme_init_nsdata()
635 sc->nsc_pi->pi_bus, sc->nsc_pi->pi_slot, in pci_nvme_init_nsdata()
636 sc->nsc_pi->pi_func); in pci_nvme_init_nsdata()
642 nvstore->eui64 = (eui64 << 16) | (nsid & 0xffff); in pci_nvme_init_nsdata()
644 be64enc(nd->eui64, nvstore->eui64); in pci_nvme_init_nsdata()
646 /* LBA data-sz = 2^lbads */ in pci_nvme_init_nsdata()
647 nd->lbaf[0] = NVMEF(NVME_NS_DATA_LBAF_LBADS, nvstore->sectsz_bits); in pci_nvme_init_nsdata()
655 memset(&sc->err_log, 0, sizeof(sc->err_log)); in pci_nvme_init_logpages()
656 memset(&sc->health_log, 0, sizeof(sc->health_log)); in pci_nvme_init_logpages()
657 memset(&sc->fw_log, 0, sizeof(sc->fw_log)); in pci_nvme_init_logpages()
658 memset(&sc->ns_log, 0, sizeof(sc->ns_log)); in pci_nvme_init_logpages()
661 sc->read_dunits_remainder = 999; in pci_nvme_init_logpages()
662 sc->write_dunits_remainder = 999; in pci_nvme_init_logpages()
665 sc->health_log.temperature = NVME_TEMPERATURE; in pci_nvme_init_logpages()
666 sc->health_log.available_spare = 100; in pci_nvme_init_logpages()
667 sc->health_log.available_spare_threshold = 10; in pci_nvme_init_logpages()
670 sc->fw_log.afi = NVMEF(NVME_FIRMWARE_PAGE_AFI_SLOT, 1); in pci_nvme_init_logpages()
671 memcpy(&sc->fw_log.revision[0], sc->ctrldata.fr, in pci_nvme_init_logpages()
672 sizeof(sc->fw_log.revision[0])); in pci_nvme_init_logpages()
674 memcpy(&sc->health_log.power_cycles, &power_cycles, in pci_nvme_init_logpages()
675 sizeof(sc->health_log.power_cycles)); in pci_nvme_init_logpages()
690 //XXX hang - case NVME_FEAT_PREDICTABLE_LATENCY_MODE_CONFIG: in pci_nvme_init_features()
691 //XXX hang - case NVME_FEAT_HOST_BEHAVIOR_SUPPORT: in pci_nvme_init_features()
695 sc->feat[fid].set = nvme_feature_temperature; in pci_nvme_init_features()
698 sc->feat[fid].namespace_specific = true; in pci_nvme_init_features()
701 sc->feat[fid].set = nvme_feature_num_queues; in pci_nvme_init_features()
704 sc->feat[fid].set = nvme_feature_iv_config; in pci_nvme_init_features()
707 sc->feat[fid].set = nvme_feature_async_event; in pci_nvme_init_features()
709 sc->feat[fid].cdw11 = PCI_NVME_AEN_DEFAULT_MASK; in pci_nvme_init_features()
712 sc->feat[fid].set = nvme_feature_invalid_cb; in pci_nvme_init_features()
713 sc->feat[fid].get = nvme_feature_invalid_cb; in pci_nvme_init_features()
722 STAILQ_INIT(&sc->aer_list); in pci_nvme_aer_reset()
723 sc->aer_count = 0; in pci_nvme_aer_reset()
730 pthread_mutex_init(&sc->aer_mtx, NULL); in pci_nvme_aer_init()
739 pthread_mutex_lock(&sc->aer_mtx); in pci_nvme_aer_destroy()
740 while (!STAILQ_EMPTY(&sc->aer_list)) { in pci_nvme_aer_destroy()
741 aer = STAILQ_FIRST(&sc->aer_list); in pci_nvme_aer_destroy()
742 STAILQ_REMOVE_HEAD(&sc->aer_list, link); in pci_nvme_aer_destroy()
745 pthread_mutex_unlock(&sc->aer_mtx); in pci_nvme_aer_destroy()
754 return (sc->aer_count != 0); in pci_nvme_aer_available()
760 struct nvme_controller_data *cd = &sc->ctrldata; in pci_nvme_aer_limit_reached()
763 return (sc->aer_count == (cd->aerl + 1U)); in pci_nvme_aer_limit_reached()
781 return (-1); in pci_nvme_aer_add()
784 aer->cid = cid; in pci_nvme_aer_add()
786 pthread_mutex_lock(&sc->aer_mtx); in pci_nvme_aer_add()
787 sc->aer_count++; in pci_nvme_aer_add()
788 STAILQ_INSERT_TAIL(&sc->aer_list, aer, link); in pci_nvme_aer_add()
789 pthread_mutex_unlock(&sc->aer_mtx); in pci_nvme_aer_add()
805 pthread_mutex_lock(&sc->aer_mtx); in pci_nvme_aer_get()
806 aer = STAILQ_FIRST(&sc->aer_list); in pci_nvme_aer_get()
808 STAILQ_REMOVE_HEAD(&sc->aer_list, link); in pci_nvme_aer_get()
809 sc->aer_count--; in pci_nvme_aer_get()
811 pthread_mutex_unlock(&sc->aer_mtx); in pci_nvme_aer_get()
821 memset(sc->aen, 0, PCI_NVME_AE_TYPE_MAX * sizeof(struct pci_nvme_aen)); in pci_nvme_aen_reset()
824 sc->aen[atype].atype = atype; in pci_nvme_aen_reset()
835 pthread_mutex_init(&sc->aen_mtx, NULL); in pci_nvme_aen_init()
836 pthread_create(&sc->aen_tid, NULL, aen_thr, sc); in pci_nvme_aen_init()
837 snprintf(nstr, sizeof(nstr), "nvme-aen-%d:%d", sc->nsc_pi->pi_slot, in pci_nvme_aen_init()
838 sc->nsc_pi->pi_func); in pci_nvme_aen_init()
839 pthread_set_name_np(sc->aen_tid, nstr); in pci_nvme_aen_init()
854 pthread_cond_signal(&sc->aen_cond); in pci_nvme_aen_notify()
870 pthread_mutex_lock(&sc->aen_mtx); in pci_nvme_aen_post()
871 aen = &sc->aen[atype]; in pci_nvme_aen_post()
874 if (aen->posted) { in pci_nvme_aen_post()
875 pthread_mutex_unlock(&sc->aen_mtx); in pci_nvme_aen_post()
879 aen->event_data = event_data; in pci_nvme_aen_post()
880 aen->posted = true; in pci_nvme_aen_post()
881 pthread_mutex_unlock(&sc->aen_mtx); in pci_nvme_aen_post()
898 assert(pthread_mutex_isowned_np(&sc->aen_mtx)); in pci_nvme_aen_process()
900 aen = &sc->aen[atype]; in pci_nvme_aen_process()
907 if (!aen->posted) { in pci_nvme_aen_process()
916 sc->feat[NVME_FEAT_ASYNC_EVENT_CONFIGURATION].cdw11; in pci_nvme_aen_process()
918 DPRINTF("%s: atype=%#x mask=%#x event_data=%#x", __func__, atype, mask, aen->event_data); in pci_nvme_aen_process()
925 if ((mask & aen->event_data) == 0) in pci_nvme_aen_process()
930 if (aen->event_data >= PCI_NVME_AEI_NOTICE_MAX) { in pci_nvme_aen_process()
932 __func__, aen->event_data); in pci_nvme_aen_process()
937 if ((PCI_NVME_AEI_NOTICE_MASK(aen->event_data) & mask) == 0) in pci_nvme_aen_process()
939 switch (aen->event_data) { in pci_nvme_aen_process()
976 DPRINTF("%s: CID=%#x CDW0=%#x", __func__, aer->cid, (lid << 16) | (aen->event_data << 8) | atype); in pci_nvme_aen_process()
977 pci_nvme_cq_update(sc, &sc->compl_queues[0], in pci_nvme_aen_process()
978 (lid << 16) | (aen->event_data << 8) | atype, /* cdw0 */ in pci_nvme_aen_process()
979 aer->cid, in pci_nvme_aen_process()
983 aen->event_data = 0; in pci_nvme_aen_process()
984 aen->posted = false; in pci_nvme_aen_process()
986 pci_generate_msix(sc->nsc_pi, 0); in pci_nvme_aen_process()
997 pthread_mutex_lock(&sc->aen_mtx); in aen_thr()
1000 pthread_cond_wait(&sc->aen_cond, &sc->aen_mtx); in aen_thr()
1002 pthread_mutex_unlock(&sc->aen_mtx); in aen_thr()
1015 sc->regs.cap_lo = (ZERO_BASED(sc->max_qentries) & NVME_CAP_LO_REG_MQES_MASK) | in pci_nvme_reset_locked()
1019 sc->regs.cap_hi = NVMEF(NVME_CAP_HI_REG_CSS_NVM, 1); in pci_nvme_reset_locked()
1021 sc->regs.vs = NVME_REV(1,4); /* NVMe v1.4 */ in pci_nvme_reset_locked()
1023 sc->regs.cc = 0; in pci_nvme_reset_locked()
1025 assert(sc->submit_queues != NULL); in pci_nvme_reset_locked()
1027 for (i = 0; i < sc->num_squeues + 1; i++) { in pci_nvme_reset_locked()
1028 sc->submit_queues[i].qbase = NULL; in pci_nvme_reset_locked()
1029 sc->submit_queues[i].size = 0; in pci_nvme_reset_locked()
1030 sc->submit_queues[i].cqid = 0; in pci_nvme_reset_locked()
1031 sc->submit_queues[i].tail = 0; in pci_nvme_reset_locked()
1032 sc->submit_queues[i].head = 0; in pci_nvme_reset_locked()
1035 assert(sc->compl_queues != NULL); in pci_nvme_reset_locked()
1037 for (i = 0; i < sc->num_cqueues + 1; i++) { in pci_nvme_reset_locked()
1038 sc->compl_queues[i].qbase = NULL; in pci_nvme_reset_locked()
1039 sc->compl_queues[i].size = 0; in pci_nvme_reset_locked()
1040 sc->compl_queues[i].tail = 0; in pci_nvme_reset_locked()
1041 sc->compl_queues[i].head = 0; in pci_nvme_reset_locked()
1044 sc->num_q_is_set = false; in pci_nvme_reset_locked()
1053 sc->regs.csts = 0; in pci_nvme_reset_locked()
1059 pthread_mutex_lock(&sc->mtx); in pci_nvme_reset()
1061 pthread_mutex_unlock(&sc->mtx); in pci_nvme_reset()
1076 asqs = ONE_BASED(NVMEV(NVME_AQA_REG_ASQS, sc->regs.aqa)); in pci_nvme_init_controller()
1079 asqs - 1, sc->regs.aqa); in pci_nvme_init_controller()
1080 sc->regs.csts |= NVME_CSTS_CFS; in pci_nvme_init_controller()
1081 return (-1); in pci_nvme_init_controller()
1083 sc->submit_queues[0].size = asqs; in pci_nvme_init_controller()
1084 sc->submit_queues[0].qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx, in pci_nvme_init_controller()
1085 sc->regs.asq, sizeof(struct nvme_command) * asqs); in pci_nvme_init_controller()
1086 if (sc->submit_queues[0].qbase == NULL) { in pci_nvme_init_controller()
1088 sc->regs.asq); in pci_nvme_init_controller()
1089 sc->regs.csts |= NVME_CSTS_CFS; in pci_nvme_init_controller()
1090 return (-1); in pci_nvme_init_controller()
1093 DPRINTF("%s mapping Admin-SQ guest 0x%lx, host: %p", in pci_nvme_init_controller()
1094 __func__, sc->regs.asq, sc->submit_queues[0].qbase); in pci_nvme_init_controller()
1096 acqs = ONE_BASED(NVMEV(NVME_AQA_REG_ACQS, sc->regs.aqa)); in pci_nvme_init_controller()
1099 acqs - 1, sc->regs.aqa); in pci_nvme_init_controller()
1100 sc->regs.csts |= NVME_CSTS_CFS; in pci_nvme_init_controller()
1101 return (-1); in pci_nvme_init_controller()
1103 sc->compl_queues[0].size = acqs; in pci_nvme_init_controller()
1104 sc->compl_queues[0].qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx, in pci_nvme_init_controller()
1105 sc->regs.acq, sizeof(struct nvme_completion) * acqs); in pci_nvme_init_controller()
1106 if (sc->compl_queues[0].qbase == NULL) { in pci_nvme_init_controller()
1108 sc->regs.acq); in pci_nvme_init_controller()
1109 sc->regs.csts |= NVME_CSTS_CFS; in pci_nvme_init_controller()
1110 return (-1); in pci_nvme_init_controller()
1112 sc->compl_queues[0].intr_en = NVME_CQ_INTEN; in pci_nvme_init_controller()
1114 DPRINTF("%s mapping Admin-CQ guest 0x%lx, host: %p", in pci_nvme_init_controller()
1115 __func__, sc->regs.acq, sc->compl_queues[0].qbase); in pci_nvme_init_controller()
1128 return (-1); in nvme_prp_memcpy()
1132 bytes = PAGE_SIZE - (prp1 & PAGE_MASK); in nvme_prp_memcpy()
1137 return (-1); in nvme_prp_memcpy()
1147 len -= bytes; in nvme_prp_memcpy()
1156 return (-1); in nvme_prp_memcpy()
1180 struct nvme_submission_queue *sq = &sc->submit_queues[sqid]; in pci_nvme_cq_update()
1183 assert(cq->qbase != NULL); in pci_nvme_cq_update()
1185 pthread_mutex_lock(&cq->mtx); in pci_nvme_cq_update()
1187 cqe = &cq->qbase[cq->tail]; in pci_nvme_cq_update()
1190 status |= (cqe->status ^ NVME_STATUS_P) & NVME_STATUS_P_MASK; in pci_nvme_cq_update()
1192 cqe->cdw0 = cdw0; in pci_nvme_cq_update()
1193 cqe->sqhd = sq->head; in pci_nvme_cq_update()
1194 cqe->sqid = sqid; in pci_nvme_cq_update()
1195 cqe->cid = cid; in pci_nvme_cq_update()
1196 cqe->status = status; in pci_nvme_cq_update()
1198 cq->tail++; in pci_nvme_cq_update()
1199 if (cq->tail >= cq->size) { in pci_nvme_cq_update()
1200 cq->tail = 0; in pci_nvme_cq_update()
1203 pthread_mutex_unlock(&cq->mtx); in pci_nvme_cq_update()
1210 uint16_t qid = command->cdw10 & 0xffff; in nvme_opc_delete_io_sq()
1213 if (qid == 0 || qid > sc->num_squeues || in nvme_opc_delete_io_sq()
1214 (sc->submit_queues[qid].qbase == NULL)) { in nvme_opc_delete_io_sq()
1216 __func__, qid, sc->num_squeues); in nvme_opc_delete_io_sq()
1217 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC, in nvme_opc_delete_io_sq()
1222 sc->submit_queues[qid].qbase = NULL; in nvme_opc_delete_io_sq()
1223 sc->submit_queues[qid].cqid = 0; in nvme_opc_delete_io_sq()
1224 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS); in nvme_opc_delete_io_sq()
1232 if (command->cdw11 & NVME_CMD_CDW11_PC) { in nvme_opc_create_io_sq()
1233 uint16_t qid = command->cdw10 & 0xffff; in nvme_opc_create_io_sq()
1236 if ((qid == 0) || (qid > sc->num_squeues) || in nvme_opc_create_io_sq()
1237 (sc->submit_queues[qid].qbase != NULL)) { in nvme_opc_create_io_sq()
1239 __func__, qid, sc->num_squeues); in nvme_opc_create_io_sq()
1240 pci_nvme_status_tc(&compl->status, in nvme_opc_create_io_sq()
1246 nsq = &sc->submit_queues[qid]; in nvme_opc_create_io_sq()
1247 nsq->size = ONE_BASED((command->cdw10 >> 16) & 0xffff); in nvme_opc_create_io_sq()
1248 DPRINTF("%s size=%u (max=%u)", __func__, nsq->size, sc->max_qentries); in nvme_opc_create_io_sq()
1249 if ((nsq->size < 2) || (nsq->size > sc->max_qentries)) { in nvme_opc_create_io_sq()
1255 pci_nvme_status_tc(&compl->status, in nvme_opc_create_io_sq()
1260 nsq->head = nsq->tail = 0; in nvme_opc_create_io_sq()
1262 nsq->cqid = (command->cdw11 >> 16) & 0xffff; in nvme_opc_create_io_sq()
1263 if ((nsq->cqid == 0) || (nsq->cqid > sc->num_cqueues)) { in nvme_opc_create_io_sq()
1264 pci_nvme_status_tc(&compl->status, in nvme_opc_create_io_sq()
1270 if (sc->compl_queues[nsq->cqid].qbase == NULL) { in nvme_opc_create_io_sq()
1271 pci_nvme_status_tc(&compl->status, in nvme_opc_create_io_sq()
1277 nsq->qpriority = (command->cdw11 >> 1) & 0x03; in nvme_opc_create_io_sq()
1279 nsq->qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1, in nvme_opc_create_io_sq()
1280 sizeof(struct nvme_command) * (size_t)nsq->size); in nvme_opc_create_io_sq()
1283 qid, nsq->size, nsq->qbase, nsq->cqid); in nvme_opc_create_io_sq()
1285 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS); in nvme_opc_create_io_sq()
1291 * Guest sent non-cont submission queue request. in nvme_opc_create_io_sq()
1294 WPRINTF("%s unsupported non-contig (list-based) " in nvme_opc_create_io_sq()
1297 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD); in nvme_opc_create_io_sq()
1306 uint16_t qid = command->cdw10 & 0xffff; in nvme_opc_delete_io_cq()
1310 if (qid == 0 || qid > sc->num_cqueues || in nvme_opc_delete_io_cq()
1311 (sc->compl_queues[qid].qbase == NULL)) { in nvme_opc_delete_io_cq()
1313 __func__, qid, sc->num_cqueues); in nvme_opc_delete_io_cq()
1314 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC, in nvme_opc_delete_io_cq()
1320 for (sqid = 1; sqid < sc->num_squeues + 1; sqid++) in nvme_opc_delete_io_cq()
1321 if (sc->submit_queues[sqid].cqid == qid) { in nvme_opc_delete_io_cq()
1322 pci_nvme_status_tc(&compl->status, in nvme_opc_delete_io_cq()
1328 sc->compl_queues[qid].qbase = NULL; in nvme_opc_delete_io_cq()
1329 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS); in nvme_opc_delete_io_cq()
1338 uint16_t qid = command->cdw10 & 0xffff; in nvme_opc_create_io_cq()
1341 if ((command->cdw11 & NVME_CMD_CDW11_PC) == 0) { in nvme_opc_create_io_cq()
1342 WPRINTF("%s unsupported non-contig (list-based) " in nvme_opc_create_io_cq()
1346 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD); in nvme_opc_create_io_cq()
1350 if ((qid == 0) || (qid > sc->num_cqueues) || in nvme_opc_create_io_cq()
1351 (sc->compl_queues[qid].qbase != NULL)) { in nvme_opc_create_io_cq()
1353 __func__, qid, sc->num_cqueues); in nvme_opc_create_io_cq()
1354 pci_nvme_status_tc(&compl->status, in nvme_opc_create_io_cq()
1360 ncq = &sc->compl_queues[qid]; in nvme_opc_create_io_cq()
1361 ncq->intr_en = (command->cdw11 & NVME_CMD_CDW11_IEN) >> 1; in nvme_opc_create_io_cq()
1362 ncq->intr_vec = (command->cdw11 >> 16) & 0xffff; in nvme_opc_create_io_cq()
1363 if (ncq->intr_vec > (sc->max_queues + 1)) { in nvme_opc_create_io_cq()
1364 pci_nvme_status_tc(&compl->status, in nvme_opc_create_io_cq()
1370 ncq->size = ONE_BASED((command->cdw10 >> 16) & 0xffff); in nvme_opc_create_io_cq()
1371 if ((ncq->size < 2) || (ncq->size > sc->max_qentries)) { in nvme_opc_create_io_cq()
1377 pci_nvme_status_tc(&compl->status, in nvme_opc_create_io_cq()
1382 ncq->head = ncq->tail = 0; in nvme_opc_create_io_cq()
1383 ncq->qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx, in nvme_opc_create_io_cq()
1384 command->prp1, in nvme_opc_create_io_cq()
1385 sizeof(struct nvme_command) * (size_t)ncq->size); in nvme_opc_create_io_cq()
1387 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS); in nvme_opc_create_io_cq()
1401 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS); in nvme_opc_get_log_page()
1405 * and NUMDL. This is a zero-based value. in nvme_opc_get_log_page()
1407 logpage = command->cdw10 & 0xFF; in nvme_opc_get_log_page()
1408 logsize = ((command->cdw11 << 16) | (command->cdw10 >> 16)) + 1; in nvme_opc_get_log_page()
1410 logoff = ((uint64_t)(command->cdw13) << 32) | command->cdw12; in nvme_opc_get_log_page()
1416 if (logoff >= sizeof(sc->err_log)) { in nvme_opc_get_log_page()
1417 pci_nvme_status_genc(&compl->status, in nvme_opc_get_log_page()
1422 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1, in nvme_opc_get_log_page()
1423 command->prp2, (uint8_t *)&sc->err_log + logoff, in nvme_opc_get_log_page()
1424 MIN(logsize, sizeof(sc->err_log) - logoff), in nvme_opc_get_log_page()
1428 if (logoff >= sizeof(sc->health_log)) { in nvme_opc_get_log_page()
1429 pci_nvme_status_genc(&compl->status, in nvme_opc_get_log_page()
1434 pthread_mutex_lock(&sc->mtx); in nvme_opc_get_log_page()
1435 memcpy(&sc->health_log.data_units_read, &sc->read_data_units, in nvme_opc_get_log_page()
1436 sizeof(sc->health_log.data_units_read)); in nvme_opc_get_log_page()
1437 memcpy(&sc->health_log.data_units_written, &sc->write_data_units, in nvme_opc_get_log_page()
1438 sizeof(sc->health_log.data_units_written)); in nvme_opc_get_log_page()
1439 memcpy(&sc->health_log.host_read_commands, &sc->read_commands, in nvme_opc_get_log_page()
1440 sizeof(sc->health_log.host_read_commands)); in nvme_opc_get_log_page()
1441 memcpy(&sc->health_log.host_write_commands, &sc->write_commands, in nvme_opc_get_log_page()
1442 sizeof(sc->health_log.host_write_commands)); in nvme_opc_get_log_page()
1443 pthread_mutex_unlock(&sc->mtx); in nvme_opc_get_log_page()
1445 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1, in nvme_opc_get_log_page()
1446 command->prp2, (uint8_t *)&sc->health_log + logoff, in nvme_opc_get_log_page()
1447 MIN(logsize, sizeof(sc->health_log) - logoff), in nvme_opc_get_log_page()
1451 if (logoff >= sizeof(sc->fw_log)) { in nvme_opc_get_log_page()
1452 pci_nvme_status_genc(&compl->status, in nvme_opc_get_log_page()
1457 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1, in nvme_opc_get_log_page()
1458 command->prp2, (uint8_t *)&sc->fw_log + logoff, in nvme_opc_get_log_page()
1459 MIN(logsize, sizeof(sc->fw_log) - logoff), in nvme_opc_get_log_page()
1463 if (logoff >= sizeof(sc->ns_log)) { in nvme_opc_get_log_page()
1464 pci_nvme_status_genc(&compl->status, in nvme_opc_get_log_page()
1469 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1, in nvme_opc_get_log_page()
1470 command->prp2, (uint8_t *)&sc->ns_log + logoff, in nvme_opc_get_log_page()
1471 MIN(logsize, sizeof(sc->ns_log) - logoff), in nvme_opc_get_log_page()
1473 memset(&sc->ns_log, 0, sizeof(sc->ns_log)); in nvme_opc_get_log_page()
1479 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC, in nvme_opc_get_log_page()
1494 command->cdw10 & 0xFF, command->nsid); in nvme_opc_identify()
1499 switch (command->cdw10 & 0xFF) { in nvme_opc_identify()
1502 if (command->nsid == NVME_GLOBAL_NAMESPACE_TAG) { in nvme_opc_identify()
1507 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1, in nvme_opc_identify()
1508 command->prp2, (uint8_t *)&sc->nsdata, sizeof(sc->nsdata), in nvme_opc_identify()
1512 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1, in nvme_opc_identify()
1513 command->prp2, (uint8_t *)&sc->ctrldata, in nvme_opc_identify()
1514 sizeof(sc->ctrldata), in nvme_opc_identify()
1518 dest = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1, in nvme_opc_identify()
1525 if (command->nsid != 1) { in nvme_opc_identify()
1530 dest = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1, in nvme_opc_identify()
1538 memcpy(((uint8_t *)dest) + 4, sc->nsdata.eui64, sizeof(uint64_t)); in nvme_opc_identify()
1545 dest = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1, in nvme_opc_identify()
1551 __func__, command->cdw10 & 0xFF); in nvme_opc_identify()
1556 compl->status = status; in nvme_opc_identify()
1570 name = "Power Management"; in nvme_fid_to_name()
1600 name = "Autonomous Power State Transition"; in nvme_fid_to_name()
1615 name = "Non-Operation Power State Config"; in nvme_fid_to_name()
1667 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD); in nvme_feature_invalid_cb()
1677 uint32_t cdw11 = command->cdw11; in nvme_feature_iv_config()
1681 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD); in nvme_feature_iv_config()
1686 if (iv > (sc->max_queues + 1)) { in nvme_feature_iv_config()
1695 for (i = 0; i < sc->num_cqueues + 1; i++) { in nvme_feature_iv_config()
1696 if (sc->compl_queues[i].intr_vec == iv) { in nvme_feature_iv_config()
1697 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS); in nvme_feature_iv_config()
1709 if (command->cdw11 & NVME_ASYNC_EVENT_ENDURANCE_GROUP) in nvme_feature_async_event()
1710 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD); in nvme_feature_async_event()
1727 tmpth = command->cdw11 & 0xffff; in nvme_feature_temperature()
1728 tmpsel = (command->cdw11 >> 16) & 0xf; in nvme_feature_temperature()
1729 thsel = (command->cdw11 >> 20) & 0x3; in nvme_feature_temperature()
1736 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD); in nvme_feature_temperature()
1744 pthread_mutex_lock(&sc->mtx); in nvme_feature_temperature()
1746 sc->health_log.critical_warning |= in nvme_feature_temperature()
1749 sc->health_log.critical_warning &= in nvme_feature_temperature()
1751 pthread_mutex_unlock(&sc->mtx); in nvme_feature_temperature()
1753 report_crit = sc->feat[NVME_FEAT_ASYNC_EVENT_CONFIGURATION].cdw11 & in nvme_feature_temperature()
1758 sc->health_log.critical_warning); in nvme_feature_temperature()
1760 …_warning=%#x status=%#x", __func__, set_crit ? 'T':'F', sc->health_log.critical_warning, compl->st… in nvme_feature_temperature()
1771 if (sc->num_q_is_set) { in nvme_feature_num_queues()
1773 pci_nvme_status_genc(&compl->status, in nvme_feature_num_queues()
1778 nqr = command->cdw11 & 0xFFFF; in nvme_feature_num_queues()
1781 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD); in nvme_feature_num_queues()
1785 sc->num_squeues = ONE_BASED(nqr); in nvme_feature_num_queues()
1786 if (sc->num_squeues > sc->max_queues) { in nvme_feature_num_queues()
1787 DPRINTF("NSQR=%u is greater than max %u", sc->num_squeues, in nvme_feature_num_queues()
1788 sc->max_queues); in nvme_feature_num_queues()
1789 sc->num_squeues = sc->max_queues; in nvme_feature_num_queues()
1792 nqr = (command->cdw11 >> 16) & 0xFFFF; in nvme_feature_num_queues()
1795 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD); in nvme_feature_num_queues()
1799 sc->num_cqueues = ONE_BASED(nqr); in nvme_feature_num_queues()
1800 if (sc->num_cqueues > sc->max_queues) { in nvme_feature_num_queues()
1801 DPRINTF("NCQR=%u is greater than max %u", sc->num_cqueues, in nvme_feature_num_queues()
1802 sc->max_queues); in nvme_feature_num_queues()
1803 sc->num_cqueues = sc->max_queues; in nvme_feature_num_queues()
1806 /* Patch the command value which will be saved on callback's return */ in nvme_feature_num_queues()
1807 command->cdw11 = NVME_FEATURE_NUM_QUEUES(sc); in nvme_feature_num_queues()
1808 compl->cdw0 = NVME_FEATURE_NUM_QUEUES(sc); in nvme_feature_num_queues()
1810 sc->num_q_is_set = true; in nvme_feature_num_queues()
1818 uint32_t nsid = command->nsid; in nvme_opc_set_features()
1819 uint8_t fid = NVMEV(NVME_FEAT_SET_FID, command->cdw10); in nvme_opc_set_features()
1820 bool sv = NVMEV(NVME_FEAT_SET_SV, command->cdw10); in nvme_opc_set_features()
1826 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD); in nvme_opc_set_features()
1831 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC, in nvme_opc_set_features()
1836 feat = &sc->feat[fid]; in nvme_opc_set_features()
1838 if (feat->namespace_specific && (nsid == NVME_GLOBAL_NAMESPACE_TAG)) { in nvme_opc_set_features()
1839 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD); in nvme_opc_set_features()
1843 if (!feat->namespace_specific && in nvme_opc_set_features()
1845 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC, in nvme_opc_set_features()
1850 compl->cdw0 = 0; in nvme_opc_set_features()
1851 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS); in nvme_opc_set_features()
1853 if (feat->set) in nvme_opc_set_features()
1854 feat->set(sc, feat, command, compl); in nvme_opc_set_features()
1856 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC, in nvme_opc_set_features()
1861 DPRINTF("%s: status=%#x cdw11=%#x", __func__, compl->status, command->cdw11); in nvme_opc_set_features()
1862 if (compl->status == NVME_SC_SUCCESS) { in nvme_opc_set_features()
1863 feat->cdw11 = command->cdw11; in nvme_opc_set_features()
1865 (command->cdw11 != 0)) in nvme_opc_set_features()
1880 uint8_t fid = command->cdw10 & 0xFF; in nvme_opc_get_features()
1881 uint8_t sel = (command->cdw10 >> 8) & 0x7; in nvme_opc_get_features() local
1887 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD); in nvme_opc_get_features()
1891 compl->cdw0 = 0; in nvme_opc_get_features()
1892 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS); in nvme_opc_get_features()
1894 feat = &sc->feat[fid]; in nvme_opc_get_features()
1895 if (feat->get) { in nvme_opc_get_features()
1896 feat->get(sc, feat, command, compl); in nvme_opc_get_features()
1899 if (compl->status == NVME_SC_SUCCESS) { in nvme_opc_get_features()
1900 if ((sel == NVME_FEATURES_SEL_SUPPORTED) && feat->namespace_specific) in nvme_opc_get_features()
1901 compl->cdw0 = NVME_FEATURES_NS_SPECIFIC; in nvme_opc_get_features()
1903 compl->cdw0 = feat->cdw11; in nvme_opc_get_features()
1915 /* Only supports Secure Erase Setting - User Data Erase */ in nvme_opc_format_nvm()
1916 ses = (command->cdw10 >> 9) & 0x7; in nvme_opc_format_nvm()
1918 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD); in nvme_opc_format_nvm()
1923 lbaf = command->cdw10 & 0xf; in nvme_opc_format_nvm()
1925 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC, in nvme_opc_format_nvm()
1931 pi = (command->cdw10 >> 5) & 0x7; in nvme_opc_format_nvm()
1933 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD); in nvme_opc_format_nvm()
1937 if (sc->nvstore.type == NVME_STOR_RAM) { in nvme_opc_format_nvm()
1938 if (sc->nvstore.ctx) in nvme_opc_format_nvm()
1939 free(sc->nvstore.ctx); in nvme_opc_format_nvm()
1940 sc->nvstore.ctx = calloc(1, sc->nvstore.size); in nvme_opc_format_nvm()
1941 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS); in nvme_opc_format_nvm()
1948 pci_nvme_status_genc(&compl->status, in nvme_opc_format_nvm()
1953 req->nvme_sq = &sc->submit_queues[0]; in nvme_opc_format_nvm()
1954 req->sqid = 0; in nvme_opc_format_nvm()
1955 req->opc = command->opc; in nvme_opc_format_nvm()
1956 req->cid = command->cid; in nvme_opc_format_nvm()
1957 req->nsid = command->nsid; in nvme_opc_format_nvm()
1959 req->io_req.br_offset = 0; in nvme_opc_format_nvm()
1960 req->io_req.br_resid = sc->nvstore.size; in nvme_opc_format_nvm()
1961 req->io_req.br_callback = pci_nvme_io_done; in nvme_opc_format_nvm()
1963 err = blockif_delete(sc->nvstore.ctx, &req->io_req); in nvme_opc_format_nvm()
1965 pci_nvme_status_genc(&compl->status, in nvme_opc_format_nvm()
1969 compl->status = NVME_NO_STATUS; in nvme_opc_format_nvm()
1980 command->cdw10 & 0xFFFF, (command->cdw10 >> 16) & 0xFFFF); in nvme_opc_abort()
1984 compl->cdw0 = 1; in nvme_opc_abort()
1985 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS); in nvme_opc_abort()
1994 sc->aer_count, sc->ctrldata.aerl, command->cid); in nvme_opc_async_event_req()
1998 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC, in nvme_opc_async_event_req()
2003 if (pci_nvme_aer_add(sc, command->cid)) { in nvme_opc_async_event_req()
2004 pci_nvme_status_tc(&compl->status, NVME_SCT_GENERIC, in nvme_opc_async_event_req()
2010 * Raise events when they happen based on the Set Features cmd. in nvme_opc_async_event_req()
2014 compl->status = NVME_NO_STATUS; in nvme_opc_async_event_req()
2031 sq = &sc->submit_queues[0]; in pci_nvme_handle_admin_cmd()
2032 cq = &sc->compl_queues[0]; in pci_nvme_handle_admin_cmd()
2034 pthread_mutex_lock(&sq->mtx); in pci_nvme_handle_admin_cmd()
2036 sqhead = sq->head; in pci_nvme_handle_admin_cmd()
2037 DPRINTF("sqhead %u, tail %u", sqhead, sq->tail); in pci_nvme_handle_admin_cmd()
2039 while (sqhead != atomic_load_acq_short(&sq->tail)) { in pci_nvme_handle_admin_cmd()
2040 cmd = &(sq->qbase)[sqhead]; in pci_nvme_handle_admin_cmd()
2044 switch (cmd->opc) { in pci_nvme_handle_admin_cmd()
2094 sc->ctrldata.oacs) == 0) { in pci_nvme_handle_admin_cmd()
2105 cmd->opc); in pci_nvme_handle_admin_cmd()
2112 cmd->opc); in pci_nvme_handle_admin_cmd()
2115 sqhead = (sqhead + 1) % sq->size; in pci_nvme_handle_admin_cmd()
2118 pci_nvme_cq_update(sc, &sc->compl_queues[0], in pci_nvme_handle_admin_cmd()
2120 cmd->cid, in pci_nvme_handle_admin_cmd()
2127 sq->head = sqhead; in pci_nvme_handle_admin_cmd()
2129 if (cq->head != cq->tail) in pci_nvme_handle_admin_cmd()
2130 pci_generate_msix(sc->nsc_pi, 0); in pci_nvme_handle_admin_cmd()
2132 pthread_mutex_unlock(&sq->mtx); in pci_nvme_handle_admin_cmd()
2139 * E.g. 1 data unit is 1 - 1,000 512 byte blocks. 3 data units are 2,001 - 3,000
2147 pthread_mutex_lock(&sc->mtx); in pci_nvme_stats_write_read_update()
2150 sc->write_commands++; in pci_nvme_stats_write_read_update()
2153 sc->write_dunits_remainder += (bytes / 512); in pci_nvme_stats_write_read_update()
2154 while (sc->write_dunits_remainder >= 1000) { in pci_nvme_stats_write_read_update()
2155 sc->write_data_units++; in pci_nvme_stats_write_read_update()
2156 sc->write_dunits_remainder -= 1000; in pci_nvme_stats_write_read_update()
2160 sc->read_commands++; in pci_nvme_stats_write_read_update()
2163 sc->read_dunits_remainder += (bytes / 512); in pci_nvme_stats_write_read_update()
2164 while (sc->read_dunits_remainder >= 1000) { in pci_nvme_stats_write_read_update()
2165 sc->read_data_units++; in pci_nvme_stats_write_read_update()
2166 sc->read_dunits_remainder -= 1000; in pci_nvme_stats_write_read_update()
2173 pthread_mutex_unlock(&sc->mtx); in pci_nvme_stats_write_read_update()
2191 if (slba >> (64 - nvstore->sectsz_bits)) in pci_nvme_out_of_range()
2194 offset = slba << nvstore->sectsz_bits; in pci_nvme_out_of_range()
2195 bytes = nblocks << nvstore->sectsz_bits; in pci_nvme_out_of_range()
2198 if ((nvstore->size <= offset) || ((nvstore->size - offset) < bytes)) in pci_nvme_out_of_range()
2212 return (-1); in pci_nvme_append_iov_req()
2214 if (req->io_req.br_iovcnt == NVME_MAX_IOVEC) { in pci_nvme_append_iov_req()
2215 return (-1); in pci_nvme_append_iov_req()
2223 if (req->io_req.br_iovcnt == 0) in pci_nvme_append_iov_req()
2226 range_is_contiguous = (req->prev_gpaddr + req->prev_size) == gpaddr; in pci_nvme_append_iov_req()
2229 iovidx = req->io_req.br_iovcnt - 1; in pci_nvme_append_iov_req()
2231 req->io_req.br_iov[iovidx].iov_base = in pci_nvme_append_iov_req()
2232 paddr_guest2host(req->sc->nsc_pi->pi_vmctx, in pci_nvme_append_iov_req()
2233 req->prev_gpaddr, size); in pci_nvme_append_iov_req()
2234 if (req->io_req.br_iov[iovidx].iov_base == NULL) in pci_nvme_append_iov_req()
2235 return (-1); in pci_nvme_append_iov_req()
2237 req->prev_size += size; in pci_nvme_append_iov_req()
2238 req->io_req.br_resid += size; in pci_nvme_append_iov_req()
2240 req->io_req.br_iov[iovidx].iov_len = req->prev_size; in pci_nvme_append_iov_req()
2242 iovidx = req->io_req.br_iovcnt; in pci_nvme_append_iov_req()
2244 req->io_req.br_offset = offset; in pci_nvme_append_iov_req()
2245 req->io_req.br_resid = 0; in pci_nvme_append_iov_req()
2246 req->io_req.br_param = req; in pci_nvme_append_iov_req()
2249 req->io_req.br_iov[iovidx].iov_base = in pci_nvme_append_iov_req()
2250 paddr_guest2host(req->sc->nsc_pi->pi_vmctx, in pci_nvme_append_iov_req()
2252 if (req->io_req.br_iov[iovidx].iov_base == NULL) in pci_nvme_append_iov_req()
2253 return (-1); in pci_nvme_append_iov_req()
2255 req->io_req.br_iov[iovidx].iov_len = size; in pci_nvme_append_iov_req()
2257 req->prev_gpaddr = gpaddr; in pci_nvme_append_iov_req()
2258 req->prev_size = size; in pci_nvme_append_iov_req()
2259 req->io_req.br_resid += size; in pci_nvme_append_iov_req()
2261 req->io_req.br_iovcnt++; in pci_nvme_append_iov_req()
2271 struct nvme_completion_queue *cq = &sc->compl_queues[sq->cqid]; in pci_nvme_set_completion()
2274 __func__, sqid, sq->cqid, cid, NVME_STATUS_GET_SCT(status), in pci_nvme_set_completion()
2279 if (cq->head != cq->tail) { in pci_nvme_set_completion()
2280 if (cq->intr_en & NVME_CQ_INTEN) { in pci_nvme_set_completion()
2281 pci_generate_msix(sc->nsc_pi, cq->intr_vec); in pci_nvme_set_completion()
2284 __func__, sq->cqid); in pci_nvme_set_completion()
2292 req->sc = NULL; in pci_nvme_release_ioreq()
2293 req->nvme_sq = NULL; in pci_nvme_release_ioreq()
2294 req->sqid = 0; in pci_nvme_release_ioreq()
2296 pthread_mutex_lock(&sc->mtx); in pci_nvme_release_ioreq()
2298 STAILQ_INSERT_TAIL(&sc->ioreqs_free, req, link); in pci_nvme_release_ioreq()
2299 sc->pending_ios--; in pci_nvme_release_ioreq()
2302 if (sc->pending_ios == 0 && in pci_nvme_release_ioreq()
2303 NVME_CC_GET_EN(sc->regs.cc) && !(NVME_CSTS_GET_RDY(sc->regs.csts))) in pci_nvme_release_ioreq()
2304 sc->regs.csts |= NVME_CSTS_RDY; in pci_nvme_release_ioreq()
2306 pthread_mutex_unlock(&sc->mtx); in pci_nvme_release_ioreq()
2308 sem_post(&sc->iosemlock); in pci_nvme_release_ioreq()
2316 sem_wait(&sc->iosemlock); in pci_nvme_get_ioreq()
2317 pthread_mutex_lock(&sc->mtx); in pci_nvme_get_ioreq()
2319 req = STAILQ_FIRST(&sc->ioreqs_free); in pci_nvme_get_ioreq()
2321 STAILQ_REMOVE_HEAD(&sc->ioreqs_free, link); in pci_nvme_get_ioreq()
2323 req->sc = sc; in pci_nvme_get_ioreq()
2325 sc->pending_ios++; in pci_nvme_get_ioreq()
2327 pthread_mutex_unlock(&sc->mtx); in pci_nvme_get_ioreq()
2329 req->io_req.br_iovcnt = 0; in pci_nvme_get_ioreq()
2330 req->io_req.br_offset = 0; in pci_nvme_get_ioreq()
2331 req->io_req.br_resid = 0; in pci_nvme_get_ioreq()
2332 req->io_req.br_param = req; in pci_nvme_get_ioreq()
2333 req->prev_gpaddr = 0; in pci_nvme_get_ioreq()
2334 req->prev_size = 0; in pci_nvme_get_ioreq()
2342 struct pci_nvme_ioreq *req = br->br_param; in pci_nvme_io_done()
2343 struct nvme_submission_queue *sq = req->nvme_sq; in pci_nvme_io_done()
2353 pci_nvme_set_completion(req->sc, sq, req->sqid, req->cid, status); in pci_nvme_io_done()
2354 pci_nvme_stats_write_read_update(req->sc, req->opc, in pci_nvme_io_done()
2355 req->bytes, status); in pci_nvme_io_done()
2356 pci_nvme_release_ioreq(req->sc, req); in pci_nvme_io_done()
2376 if (nvstore->type == NVME_STOR_RAM) { in nvme_opc_flush()
2381 req->io_req.br_callback = pci_nvme_io_done; in nvme_opc_flush()
2383 err = blockif_flush(nvstore->ctx, &req->io_req); in nvme_opc_flush()
2406 uint8_t *buf = nvstore->ctx; in nvme_write_read_ram()
2416 if (nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, prp1, prp2, in nvme_write_read_ram()
2438 size = MIN(PAGE_SIZE - (prp1 % PAGE_SIZE), bytes); in nvme_write_read_blockif()
2440 err = -1; in nvme_write_read_blockif()
2445 bytes -= size; in nvme_write_read_blockif()
2452 err = -1; in nvme_write_read_blockif()
2456 void *vmctx = sc->nsc_pi->pi_vmctx; in nvme_write_read_blockif()
2467 PAGE_SIZE - (prp % PAGE_SIZE)); in nvme_write_read_blockif()
2469 err = -1; in nvme_write_read_blockif()
2472 last = prp_list + (NVME_PRP2_ITEMS - 1); in nvme_write_read_blockif()
2479 err = -1; in nvme_write_read_blockif()
2484 bytes -= size; in nvme_write_read_blockif()
2489 req->io_req.br_callback = pci_nvme_io_done; in nvme_write_read_blockif()
2491 err = blockif_write(nvstore->ctx, &req->io_req); in nvme_write_read_blockif()
2493 err = blockif_read(nvstore->ctx, &req->io_req); in nvme_write_read_blockif()
2510 bool is_write = cmd->opc == NVME_OPC_WRITE; in nvme_opc_write_read()
2513 lba = ((uint64_t)cmd->cdw11 << 32) | cmd->cdw10; in nvme_opc_write_read()
2514 nblocks = (cmd->cdw12 & 0xFFFF) + 1; in nvme_opc_write_read()
2515 bytes = nblocks << nvstore->sectsz_bits; in nvme_opc_write_read()
2529 offset = lba << nvstore->sectsz_bits; in nvme_opc_write_read()
2531 req->bytes = bytes; in nvme_opc_write_read()
2532 req->io_req.br_offset = lba; in nvme_opc_write_read()
2535 cmd->prp1 &= ~0x3UL; in nvme_opc_write_read()
2536 cmd->prp2 &= ~0x3UL; in nvme_opc_write_read()
2538 if (nvstore->type == NVME_STOR_RAM) { in nvme_opc_write_read()
2539 *status = nvme_write_read_ram(sc, nvstore, cmd->prp1, in nvme_opc_write_read()
2540 cmd->prp2, offset, bytes, is_write); in nvme_opc_write_read()
2543 cmd->prp1, cmd->prp2, offset, bytes, is_write); in nvme_opc_write_read()
2550 pci_nvme_stats_write_read_update(sc, cmd->opc, bytes, *status); in nvme_opc_write_read()
2558 struct pci_nvme_ioreq *req = br->br_param; in pci_nvme_dealloc_sm()
2559 struct pci_nvme_softc *sc = req->sc; in pci_nvme_dealloc_sm()
2566 } else if ((req->prev_gpaddr + 1) == (req->prev_size)) { in pci_nvme_dealloc_sm()
2569 struct iovec *iov = req->io_req.br_iov; in pci_nvme_dealloc_sm()
2571 req->prev_gpaddr++; in pci_nvme_dealloc_sm()
2572 iov += req->prev_gpaddr; in pci_nvme_dealloc_sm()
2575 req->io_req.br_offset = (off_t)iov->iov_base; in pci_nvme_dealloc_sm()
2576 req->io_req.br_resid = iov->iov_len; in pci_nvme_dealloc_sm()
2577 if (blockif_delete(sc->nvstore.ctx, &req->io_req)) { in pci_nvme_dealloc_sm()
2585 pci_nvme_set_completion(sc, req->nvme_sq, req->sqid, req->cid, in pci_nvme_dealloc_sm()
2603 if ((sc->ctrldata.oncs & NVME_ONCS_DSM) == 0) { in nvme_opc_dataset_mgmt()
2608 nr = cmd->cdw10 & 0xff; in nvme_opc_dataset_mgmt()
2616 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, cmd->prp1, cmd->prp2, in nvme_opc_dataset_mgmt()
2619 /* Check for invalid ranges and the number of non-zero lengths */ in nvme_opc_dataset_mgmt()
2631 if (cmd->cdw11 & NVME_DSM_ATTR_DEALLOCATE) { in nvme_opc_dataset_mgmt()
2633 int sectsz_bits = sc->nvstore.sectsz_bits; in nvme_opc_dataset_mgmt()
2639 if (!nvstore->deallocate) { in nvme_opc_dataset_mgmt()
2665 req->io_req.br_iovcnt = 0; in nvme_opc_dataset_mgmt()
2666 req->io_req.br_offset = offset; in nvme_opc_dataset_mgmt()
2667 req->io_req.br_resid = bytes; in nvme_opc_dataset_mgmt()
2670 req->io_req.br_callback = pci_nvme_io_done; in nvme_opc_dataset_mgmt()
2672 struct iovec *iov = req->io_req.br_iov; in nvme_opc_dataset_mgmt()
2680 if ((nvstore->size - offset) < bytes) { in nvme_opc_dataset_mgmt()
2689 req->io_req.br_callback = pci_nvme_dealloc_sm; in nvme_opc_dataset_mgmt()
2695 req->prev_gpaddr = 0; in nvme_opc_dataset_mgmt()
2696 req->prev_size = dr; in nvme_opc_dataset_mgmt()
2699 err = blockif_delete(nvstore->ctx, &req->io_req); in nvme_opc_dataset_mgmt()
2717 /* handle all submissions up to sq->tail index */ in pci_nvme_handle_io_cmd()
2718 sq = &sc->submit_queues[idx]; in pci_nvme_handle_io_cmd()
2720 pthread_mutex_lock(&sq->mtx); in pci_nvme_handle_io_cmd()
2722 sqhead = sq->head; in pci_nvme_handle_io_cmd()
2724 idx, sqhead, sq->tail, sq->qbase); in pci_nvme_handle_io_cmd()
2726 while (sqhead != atomic_load_acq_short(&sq->tail)) { in pci_nvme_handle_io_cmd()
2736 cmd = &sq->qbase[sqhead]; in pci_nvme_handle_io_cmd()
2737 sqhead = (sqhead + 1) % sq->size; in pci_nvme_handle_io_cmd()
2739 nsid = le32toh(cmd->nsid); in pci_nvme_handle_io_cmd()
2740 if ((nsid == 0) || (nsid > sc->ctrldata.nn)) { in pci_nvme_handle_io_cmd()
2754 req->nvme_sq = sq; in pci_nvme_handle_io_cmd()
2755 req->sqid = idx; in pci_nvme_handle_io_cmd()
2756 req->opc = cmd->opc; in pci_nvme_handle_io_cmd()
2757 req->cid = cmd->cid; in pci_nvme_handle_io_cmd()
2758 req->nsid = cmd->nsid; in pci_nvme_handle_io_cmd()
2760 switch (cmd->opc) { in pci_nvme_handle_io_cmd()
2762 pending = nvme_opc_flush(sc, cmd, &sc->nvstore, in pci_nvme_handle_io_cmd()
2767 pending = nvme_opc_write_read(sc, cmd, &sc->nvstore, in pci_nvme_handle_io_cmd()
2773 __func__, lba, cmd->cdw12 & 0xFFFF); */ in pci_nvme_handle_io_cmd()
2777 pending = nvme_opc_dataset_mgmt(sc, cmd, &sc->nvstore, in pci_nvme_handle_io_cmd()
2782 __func__, cmd->opc); in pci_nvme_handle_io_cmd()
2787 pci_nvme_set_completion(sc, sq, idx, cmd->cid, status); in pci_nvme_handle_io_cmd()
2793 sq->head = sqhead; in pci_nvme_handle_io_cmd()
2795 pthread_mutex_unlock(&sq->mtx); in pci_nvme_handle_io_cmd()
2801 * "Asynchronous Event Information - Error Status" for details
2811 * Therefore, can never have more than (size - 1) entries in pci_nvme_sq_doorbell_valid()
2813 if (sq->head == sq->tail) in pci_nvme_sq_doorbell_valid()
2814 capacity = sq->size - 1; in pci_nvme_sq_doorbell_valid()
2815 else if (sq->head > sq->tail) in pci_nvme_sq_doorbell_valid()
2816 capacity = sq->size - (sq->head - sq->tail) - 1; in pci_nvme_sq_doorbell_valid()
2818 capacity = sq->tail - sq->head - 1; in pci_nvme_sq_doorbell_valid()
2820 if ((value == sq->tail) || /* same as previous */ in pci_nvme_sq_doorbell_valid()
2823 __func__, sq->size, sq->head, sq->tail, capacity, value); in pci_nvme_sq_doorbell_valid()
2838 if (idx > sc->num_squeues) { in pci_nvme_handle_doorbell()
2841 __func__, idx, sc->num_squeues); in pci_nvme_handle_doorbell()
2847 if (sc->submit_queues[idx].qbase == NULL) { in pci_nvme_handle_doorbell()
2855 if (!pci_nvme_sq_doorbell_valid(&sc->submit_queues[idx], value)) { in pci_nvme_handle_doorbell()
2863 atomic_store_short(&sc->submit_queues[idx].tail, in pci_nvme_handle_doorbell()
2873 if (idx > sc->num_cqueues) { in pci_nvme_handle_doorbell()
2876 __func__, idx, sc->num_cqueues); in pci_nvme_handle_doorbell()
2882 if (sc->compl_queues[idx].qbase == NULL) { in pci_nvme_handle_doorbell()
2890 atomic_store_short(&sc->compl_queues[idx].head, in pci_nvme_handle_doorbell()
2941 DPRINTF("unknown nvme bar-0 offset 0x%lx", offset); in pci_nvme_bar0_reg_dumps()
2953 uint64_t belloffset = offset - NVME_DOORBELL_OFFSET; in pci_nvme_write_bar_0()
2957 if ((sc->regs.csts & NVME_CSTS_RDY) == 0) { in pci_nvme_write_bar_0()
2963 if (belloffset > ((sc->max_queues+1) * 8 - 4)) { in pci_nvme_write_bar_0()
2971 if (sc->submit_queues[idx].qbase == NULL) in pci_nvme_write_bar_0()
2973 } else if (sc->compl_queues[idx].qbase == NULL) in pci_nvme_write_bar_0()
2980 DPRINTF("nvme-write offset 0x%lx, size %d, value 0x%lx", in pci_nvme_write_bar_0()
2993 pthread_mutex_lock(&sc->mtx); in pci_nvme_write_bar_0()
3004 /* MSI-X, so ignore */ in pci_nvme_write_bar_0()
3007 /* MSI-X, so ignore */ in pci_nvme_write_bar_0()
3020 /* perform shutdown - flush out data to backend */ in pci_nvme_write_bar_0()
3021 sc->regs.csts &= ~NVMEM(NVME_CSTS_REG_SHST); in pci_nvme_write_bar_0()
3022 sc->regs.csts |= NVMEF(NVME_CSTS_REG_SHST, in pci_nvme_write_bar_0()
3025 if (NVME_CC_GET_EN(ccreg) != NVME_CC_GET_EN(sc->regs.cc)) { in pci_nvme_write_bar_0()
3027 /* transition 1-> causes controller reset */ in pci_nvme_write_bar_0()
3034 sc->regs.cc &= ~NVME_CC_WRITE_MASK; in pci_nvme_write_bar_0()
3035 sc->regs.cc |= ccreg & NVME_CC_WRITE_MASK; in pci_nvme_write_bar_0()
3038 sc->regs.cc &= ~NVME_CC_NEN_WRITE_MASK; in pci_nvme_write_bar_0()
3039 sc->regs.cc |= ccreg & NVME_CC_NEN_WRITE_MASK; in pci_nvme_write_bar_0()
3040 sc->regs.csts &= ~NVME_CSTS_RDY; in pci_nvme_write_bar_0()
3041 } else if ((sc->pending_ios == 0) && in pci_nvme_write_bar_0()
3042 !(sc->regs.csts & NVME_CSTS_CFS)) { in pci_nvme_write_bar_0()
3043 sc->regs.csts |= NVME_CSTS_RDY; in pci_nvme_write_bar_0()
3049 /* ignore writes; don't support subsystem reset */ in pci_nvme_write_bar_0()
3052 sc->regs.aqa = (uint32_t)value; in pci_nvme_write_bar_0()
3055 sc->regs.asq = (sc->regs.asq & (0xFFFFFFFF00000000)) | in pci_nvme_write_bar_0()
3059 sc->regs.asq = (sc->regs.asq & (0x00000000FFFFFFFF)) | in pci_nvme_write_bar_0()
3063 sc->regs.acq = (sc->regs.acq & (0xFFFFFFFF00000000)) | in pci_nvme_write_bar_0()
3067 sc->regs.acq = (sc->regs.acq & (0x00000000FFFFFFFF)) | in pci_nvme_write_bar_0()
3074 pthread_mutex_unlock(&sc->mtx); in pci_nvme_write_bar_0()
3081 struct pci_nvme_softc* sc = pi->pi_arg; in pci_nvme_write()
3085 DPRINTF("nvme-write baridx %d, msix: off 0x%lx, size %d, " in pci_nvme_write()
3111 void *p = &(sc->regs); in pci_nvme_read_bar_0()
3112 pthread_mutex_lock(&sc->mtx); in pci_nvme_read_bar_0()
3114 pthread_mutex_unlock(&sc->mtx); in pci_nvme_read_bar_0()
3132 DPRINTF(" nvme-read offset 0x%lx, size %d -> value 0x%x", in pci_nvme_read_bar_0()
3143 struct pci_nvme_softc* sc = pi->pi_arg; in pci_nvme_read()
3147 DPRINTF("nvme-read bar: %d, msix: regoff 0x%lx, size %d", in pci_nvme_read()
3171 sc->max_queues = NVME_QUEUES; in pci_nvme_parse_config()
3172 sc->max_qentries = NVME_MAX_QENTRIES; in pci_nvme_parse_config()
3173 sc->ioslots = NVME_IOSLOTS; in pci_nvme_parse_config()
3174 sc->num_squeues = sc->max_queues; in pci_nvme_parse_config()
3175 sc->num_cqueues = sc->max_queues; in pci_nvme_parse_config()
3176 sc->dataset_management = NVME_DATASET_MANAGEMENT_AUTO; in pci_nvme_parse_config()
3178 snprintf(sc->ctrldata.sn, sizeof(sc->ctrldata.sn), in pci_nvme_parse_config()
3179 "NVME-%d-%d", sc->nsc_pi->pi_slot, sc->nsc_pi->pi_func); in pci_nvme_parse_config()
3183 sc->max_queues = atoi(value); in pci_nvme_parse_config()
3186 sc->max_qentries = atoi(value); in pci_nvme_parse_config()
3187 if (sc->max_qentries <= 0) { in pci_nvme_parse_config()
3189 sc->max_qentries); in pci_nvme_parse_config()
3190 return (-1); in pci_nvme_parse_config()
3195 sc->ioslots = atoi(value); in pci_nvme_parse_config()
3196 if (sc->ioslots <= 0) { in pci_nvme_parse_config()
3197 EPRINTLN("Invalid ioslots option %d", sc->ioslots); in pci_nvme_parse_config()
3198 return (-1); in pci_nvme_parse_config()
3208 * 7-bit ASCII, unused bytes should be space characters. in pci_nvme_parse_config()
3211 cpywithpad((char *)sc->ctrldata.sn, in pci_nvme_parse_config()
3212 sizeof(sc->ctrldata.sn), value, ' '); in pci_nvme_parse_config()
3216 sc->nvstore.eui64 = htobe64(strtoull(value, NULL, 0)); in pci_nvme_parse_config()
3220 sc->dataset_management = NVME_DATASET_MANAGEMENT_AUTO; in pci_nvme_parse_config()
3222 sc->dataset_management = NVME_DATASET_MANAGEMENT_ENABLE; in pci_nvme_parse_config()
3224 sc->dataset_management = NVME_DATASET_MANAGEMENT_DISABLE; in pci_nvme_parse_config()
3229 if (pci_emul_add_boot_device(sc->nsc_pi, atoi(value))) { in pci_nvme_parse_config()
3231 return (-1); in pci_nvme_parse_config()
3239 sc->nvstore.type = NVME_STOR_RAM; in pci_nvme_parse_config()
3240 sc->nvstore.size = sz * 1024 * 1024; in pci_nvme_parse_config()
3241 sc->nvstore.ctx = calloc(1, sc->nvstore.size); in pci_nvme_parse_config()
3242 sc->nvstore.sectsz = 4096; in pci_nvme_parse_config()
3243 sc->nvstore.sectsz_bits = 12; in pci_nvme_parse_config()
3244 if (sc->nvstore.ctx == NULL) { in pci_nvme_parse_config()
3246 return (-1); in pci_nvme_parse_config()
3250 sc->nsc_pi->pi_slot, sc->nsc_pi->pi_func); in pci_nvme_parse_config()
3251 sc->nvstore.ctx = blockif_open(nvl, bident); in pci_nvme_parse_config()
3252 if (sc->nvstore.ctx == NULL) { in pci_nvme_parse_config()
3255 return (-1); in pci_nvme_parse_config()
3257 sc->nvstore.type = NVME_STOR_BLOCKIF; in pci_nvme_parse_config()
3258 sc->nvstore.size = blockif_size(sc->nvstore.ctx); in pci_nvme_parse_config()
3262 sc->nvstore.sectsz = sectsz; in pci_nvme_parse_config()
3263 else if (sc->nvstore.type != NVME_STOR_RAM) in pci_nvme_parse_config()
3264 sc->nvstore.sectsz = blockif_sectsz(sc->nvstore.ctx); in pci_nvme_parse_config()
3265 for (sc->nvstore.sectsz_bits = 9; in pci_nvme_parse_config()
3266 (1U << sc->nvstore.sectsz_bits) < sc->nvstore.sectsz; in pci_nvme_parse_config()
3267 sc->nvstore.sectsz_bits++); in pci_nvme_parse_config()
3269 if (sc->max_queues <= 0 || sc->max_queues > NVME_QUEUES) in pci_nvme_parse_config()
3270 sc->max_queues = NVME_QUEUES; in pci_nvme_parse_config()
3284 nvstore = &sc->nvstore; in pci_nvme_resized()
3285 nd = &sc->nsdata; in pci_nvme_resized()
3287 nvstore->size = new_size; in pci_nvme_resized()
3291 sc->ns_log.ns[0] = 1; in pci_nvme_resized()
3292 sc->ns_log.ns[1] = 0; in pci_nvme_resized()
3308 pi->pi_arg = sc; in pci_nvme_init()
3309 sc->nsc_pi = pi; in pci_nvme_init()
3317 STAILQ_INIT(&sc->ioreqs_free); in pci_nvme_init()
3318 sc->ioreqs = calloc(sc->ioslots, sizeof(struct pci_nvme_ioreq)); in pci_nvme_init()
3319 for (uint32_t i = 0; i < sc->ioslots; i++) { in pci_nvme_init()
3320 STAILQ_INSERT_TAIL(&sc->ioreqs_free, &sc->ioreqs[i], link); in pci_nvme_init()
3338 2 * sizeof(uint32_t) * (sc->max_queues + 1); in pci_nvme_init()
3349 error = pci_emul_add_msixcap(pi, sc->max_queues + 1, NVME_MSIX_BAR); in pci_nvme_init()
3361 pthread_mutex_init(&sc->mtx, NULL); in pci_nvme_init()
3362 sem_init(&sc->iosemlock, 0, sc->ioslots); in pci_nvme_init()
3363 blockif_register_resize_callback(sc->nvstore.ctx, pci_nvme_resized, sc); in pci_nvme_init()
3365 pci_nvme_init_queues(sc, sc->max_queues, sc->max_queues); in pci_nvme_init()
3367 * Controller data depends on Namespace data so initialize Namespace in pci_nvme_init()
3370 pci_nvme_init_nsdata(sc, &sc->nsdata, 1, &sc->nvstore); in pci_nvme_init()
3397 ram = strndup(opts + 4, cp - opts - 4); in pci_nvme_legacy_config()