Lines Matching refs:nvme

229 #error nvme driver needs porting for big-endian platforms
578 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val) in nvme_put64() argument
580 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); in nvme_put64()
583 ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val); in nvme_put64()
587 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val) in nvme_put32() argument
589 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); in nvme_put32()
592 ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val); in nvme_put32()
596 nvme_get64(nvme_t *nvme, uintptr_t reg) in nvme_get64() argument
600 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); in nvme_get64()
603 val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg)); in nvme_get64()
609 nvme_get32(nvme_t *nvme, uintptr_t reg) in nvme_get32() argument
613 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); in nvme_get32()
616 val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg)); in nvme_get32()
622 nvme_check_regs_hdl(nvme_t *nvme) in nvme_check_regs_hdl() argument
626 ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION); in nvme_check_regs_hdl()
678 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma, in nvme_alloc_dma_common() argument
681 if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL, in nvme_alloc_dma_common()
688 dev_err(nvme->n_dip, CE_PANIC, in nvme_alloc_dma_common()
697 (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr, in nvme_alloc_dma_common()
704 dev_err(nvme->n_dip, CE_WARN, in nvme_alloc_dma_common()
706 atomic_inc_32(&nvme->n_dma_bind_err); in nvme_alloc_dma_common()
715 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags, in nvme_zalloc_dma() argument
720 if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) != in nvme_zalloc_dma()
738 nvme_t *nvme = (nvme_t *)private; in nvme_prp_dma_constructor() local
743 if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize, in nvme_prp_dma_constructor()
744 DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) { in nvme_prp_dma_constructor()
756 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len, in nvme_zalloc_queue_dma() argument
760 ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr; in nvme_zalloc_queue_dma()
762 len = roundup(len, nvme->n_pagesize); in nvme_zalloc_queue_dma()
766 if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma) in nvme_zalloc_queue_dma()
768 dev_err(nvme->n_dip, CE_WARN, in nvme_zalloc_queue_dma()
774 dev_err(nvme->n_dip, CE_WARN, in nvme_zalloc_queue_dma()
815 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp, in nvme_alloc_qpair() argument
821 DDI_INTR_PRI(nvme->n_intr_pri)); in nvme_alloc_qpair()
824 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t), in nvme_alloc_qpair()
828 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t), in nvme_alloc_qpair()
836 qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx); in nvme_alloc_qpair()
837 qp->nq_cqhdbl = NVME_REG_CQHDBL(nvme, idx); in nvme_alloc_qpair()
853 nvme_alloc_cmd(nvme_t *nvme, int kmflag) in nvme_alloc_cmd() argument
862 cmd->nc_nvme = nvme; in nvme_alloc_cmd()
865 DDI_INTR_PRI(nvme->n_intr_pri)); in nvme_alloc_cmd()
944 nvme_unqueue_cmd(nvme_t *nvme, nvme_qpair_t *qp, int cid) in nvme_unqueue_cmd() argument
958 ASSERT3P(cmd->nc_nvme, ==, nvme); in nvme_unqueue_cmd()
965 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp) in nvme_retrieve_cmd() argument
984 ASSERT(nvme->n_ioq[cqe->cqe_sqid] == qp); in nvme_retrieve_cmd()
986 cmd = nvme_unqueue_cmd(nvme, qp, cqe->cqe_cid); in nvme_retrieve_cmd()
1311 nvme_t *nvme = abort_cmd->nc_nvme; in nvme_abort_cmd() local
1312 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); in nvme_abort_cmd()
1316 sema_p(&nvme->n_abort_sema); in nvme_abort_cmd()
1333 sema_v(&nvme->n_abort_sema); in nvme_abort_cmd()
1336 dev_err(nvme->n_dip, CE_WARN, in nvme_abort_cmd()
1339 atomic_inc_32(&nvme->n_abort_failed); in nvme_abort_cmd()
1341 dev_err(nvme->n_dip, CE_WARN, in nvme_abort_cmd()
1346 atomic_inc_32(&nvme->n_cmd_aborted); in nvme_abort_cmd()
1363 nvme_t *nvme = cmd->nc_nvme; in nvme_wait_cmd() local
1384 csts.r = nvme_get32(nvme, NVME_REG_CSTS); in nvme_wait_cmd()
1385 dev_err(nvme->n_dip, CE_WARN, "!command %d/%d timeout, " in nvme_wait_cmd()
1388 atomic_inc_32(&nvme->n_cmd_timeout); in nvme_wait_cmd()
1391 nvme_check_regs_hdl(nvme) || in nvme_wait_cmd()
1394 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); in nvme_wait_cmd()
1395 nvme->n_dead = B_TRUE; in nvme_wait_cmd()
1407 qp = nvme->n_ioq[cmd->nc_sqid]; in nvme_wait_cmd()
1410 (void) nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid); in nvme_wait_cmd()
1440 nvme_t *nvme = cmd->nc_nvme; in nvme_async_event_task() local
1478 nvme_submit_admin_cmd(nvme->n_adminq, cmd); in nvme_async_event_task()
1483 (void) nvme_get_logpage(nvme, (void **)&error_log, in nvme_async_event_task()
1486 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " in nvme_async_event_task()
1488 atomic_inc_32(&nvme->n_wrong_logpage); in nvme_async_event_task()
1493 dev_err(nvme->n_dip, CE_PANIC, "programming error: " in nvme_async_event_task()
1498 dev_err(nvme->n_dip, CE_PANIC, "programming error: " in nvme_async_event_task()
1503 dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure"); in nvme_async_event_task()
1504 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); in nvme_async_event_task()
1505 nvme->n_dead = B_TRUE; in nvme_async_event_task()
1506 atomic_inc_32(&nvme->n_diagfail_event); in nvme_async_event_task()
1510 dev_err(nvme->n_dip, CE_WARN, "!persistent internal " in nvme_async_event_task()
1512 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); in nvme_async_event_task()
1513 nvme->n_dead = B_TRUE; in nvme_async_event_task()
1514 atomic_inc_32(&nvme->n_persistent_event); in nvme_async_event_task()
1518 dev_err(nvme->n_dip, CE_WARN, "!transient internal " in nvme_async_event_task()
1521 atomic_inc_32(&nvme->n_transient_event); in nvme_async_event_task()
1525 dev_err(nvme->n_dip, CE_WARN, in nvme_async_event_task()
1527 atomic_inc_32(&nvme->n_fw_load_event); in nvme_async_event_task()
1534 (void) nvme_get_logpage(nvme, (void **)&health_log, in nvme_async_event_task()
1537 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " in nvme_async_event_task()
1539 atomic_inc_32(&nvme->n_wrong_logpage); in nvme_async_event_task()
1544 dev_err(nvme->n_dip, CE_WARN, in nvme_async_event_task()
1547 atomic_inc_32(&nvme->n_reliability_event); in nvme_async_event_task()
1551 dev_err(nvme->n_dip, CE_WARN, in nvme_async_event_task()
1554 atomic_inc_32(&nvme->n_temperature_event); in nvme_async_event_task()
1558 dev_err(nvme->n_dip, CE_WARN, in nvme_async_event_task()
1561 atomic_inc_32(&nvme->n_spare_event); in nvme_async_event_task()
1567 dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event " in nvme_async_event_task()
1570 atomic_inc_32(&nvme->n_vendor_event); in nvme_async_event_task()
1574 dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, " in nvme_async_event_task()
1577 atomic_inc_32(&nvme->n_unknown_event); in nvme_async_event_task()
1598 nvme_async_event(nvme_t *nvme) in nvme_async_event() argument
1600 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); in nvme_async_event()
1606 nvme_submit_admin_cmd(nvme->n_adminq, cmd); in nvme_async_event()
1610 nvme_format_nvm(nvme_t *nvme, uint32_t nsid, uint8_t lbaf, boolean_t ms, in nvme_format_nvm() argument
1613 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); in nvme_format_nvm()
1639 dev_err(nvme->n_dip, CE_WARN, in nvme_format_nvm()
1649 nvme_get_logpage(nvme_t *nvme, void **buf, size_t *bufsize, uint8_t logpage, in nvme_get_logpage() argument
1652 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); in nvme_get_logpage()
1672 *bufsize = MIN(2 * nvme->n_pagesize, in nvme_get_logpage()
1673 nvme->n_error_log_len * sizeof (nvme_error_log_entry_t)); in nvme_get_logpage()
1687 dev_err(nvme->n_dip, CE_WARN, "!unknown log page requested: %d", in nvme_get_logpage()
1689 atomic_inc_32(&nvme->n_unknown_logpage); in nvme_get_logpage()
1700 if (nvme_zalloc_dma(nvme, getlogpage.b.lp_numd * sizeof (uint32_t), in nvme_get_logpage()
1701 DDI_DMA_READ, &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { in nvme_get_logpage()
1702 dev_err(nvme->n_dip, CE_WARN, in nvme_get_logpage()
1709 dev_err(nvme->n_dip, CE_WARN, in nvme_get_logpage()
1711 atomic_inc_32(&nvme->n_too_many_cookies); in nvme_get_logpage()
1727 dev_err(nvme->n_dip, CE_WARN, in nvme_get_logpage()
1743 nvme_identify(nvme_t *nvme, uint32_t nsid, void **buf) in nvme_identify() argument
1745 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); in nvme_identify()
1757 if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ, in nvme_identify()
1758 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { in nvme_identify()
1759 dev_err(nvme->n_dip, CE_WARN, in nvme_identify()
1766 dev_err(nvme->n_dip, CE_WARN, in nvme_identify()
1768 atomic_inc_32(&nvme->n_too_many_cookies); in nvme_identify()
1784 dev_err(nvme->n_dip, CE_WARN, in nvme_identify()
1800 nvme_set_features(nvme_t *nvme, uint32_t nsid, uint8_t feature, uint32_t val, in nvme_set_features() argument
1804 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); in nvme_set_features()
1817 if (!nvme->n_write_cache_present) in nvme_set_features()
1831 dev_err(nvme->n_dip, CE_WARN, in nvme_set_features()
1846 nvme_get_features(nvme_t *nvme, uint32_t nsid, uint8_t feature, uint32_t *res, in nvme_get_features() argument
1849 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); in nvme_get_features()
1877 if (!nvme->n_write_cache_present) in nvme_get_features()
1882 if (!nvme->n_lba_range_supported) in nvme_get_features()
1900 if (!nvme->n_auto_pst_supported) in nvme_get_features()
1912 if (nvme_zalloc_dma(nvme, *bufsize, DDI_DMA_READ, in nvme_get_features()
1913 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { in nvme_get_features()
1914 dev_err(nvme->n_dip, CE_WARN, in nvme_get_features()
1921 dev_err(nvme->n_dip, CE_WARN, in nvme_get_features()
1923 atomic_inc_32(&nvme->n_too_many_cookies); in nvme_get_features()
1944 nvme->n_lba_range_supported = B_FALSE; in nvme_get_features()
1946 dev_err(nvme->n_dip, CE_WARN, in nvme_get_features()
1967 nvme_write_cache_set(nvme_t *nvme, boolean_t enable) in nvme_write_cache_set() argument
1974 return (nvme_set_features(nvme, 0, NVME_FEAT_WRITE_CACHE, nwc.r, in nvme_write_cache_set()
1979 nvme_set_nqueues(nvme_t *nvme, uint16_t *nqueues) in nvme_set_nqueues() argument
1986 ret = nvme_set_features(nvme, 0, NVME_FEAT_NQUEUES, nq.r, &nq.r); in nvme_set_nqueues()
2001 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx) in nvme_create_io_qpair() argument
2003 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); in nvme_create_io_qpair()
2014 c_dw11.b.cq_iv = idx % nvme->n_intr_cnt; in nvme_create_io_qpair()
2026 dev_err(nvme->n_dip, CE_WARN, in nvme_create_io_qpair()
2037 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); in nvme_create_io_qpair()
2048 dev_err(nvme->n_dip, CE_WARN, in nvme_create_io_qpair()
2061 nvme_reset(nvme_t *nvme, boolean_t quiesce) in nvme_reset() argument
2066 nvme_put32(nvme, NVME_REG_CC, 0); in nvme_reset()
2068 csts.r = nvme_get32(nvme, NVME_REG_CSTS); in nvme_reset()
2070 nvme_put32(nvme, NVME_REG_CC, 0); in nvme_reset()
2071 for (i = 0; i != nvme->n_timeout * 10; i++) { in nvme_reset()
2072 csts.r = nvme_get32(nvme, NVME_REG_CSTS); in nvme_reset()
2083 nvme_put32(nvme, NVME_REG_AQA, 0); in nvme_reset()
2084 nvme_put32(nvme, NVME_REG_ASQ, 0); in nvme_reset()
2085 nvme_put32(nvme, NVME_REG_ACQ, 0); in nvme_reset()
2087 csts.r = nvme_get32(nvme, NVME_REG_CSTS); in nvme_reset()
2092 nvme_shutdown(nvme_t *nvme, int mode, boolean_t quiesce) in nvme_shutdown() argument
2100 cc.r = nvme_get32(nvme, NVME_REG_CC); in nvme_shutdown()
2102 nvme_put32(nvme, NVME_REG_CC, cc.r); in nvme_shutdown()
2105 csts.r = nvme_get32(nvme, NVME_REG_CSTS); in nvme_shutdown()
2118 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid) in nvme_prepare_devid() argument
2128 char model[sizeof (nvme->n_idctl->id_model) + 1]; in nvme_prepare_devid()
2129 char serial[sizeof (nvme->n_idctl->id_serial) + 1]; in nvme_prepare_devid()
2131 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); in nvme_prepare_devid()
2132 bcopy(nvme->n_idctl->id_serial, serial, in nvme_prepare_devid()
2133 sizeof (nvme->n_idctl->id_serial)); in nvme_prepare_devid()
2135 model[sizeof (nvme->n_idctl->id_model)] = '\0'; in nvme_prepare_devid()
2136 serial[sizeof (nvme->n_idctl->id_serial)] = '\0'; in nvme_prepare_devid()
2138 nvme->n_ns[nsid - 1].ns_devid = kmem_asprintf("%4X-%s-%s-%X", in nvme_prepare_devid()
2139 nvme->n_idctl->id_vid, model, serial, nsid); in nvme_prepare_devid()
2143 nvme_init_ns(nvme_t *nvme, int nsid) in nvme_init_ns() argument
2145 nvme_namespace_t *ns = &nvme->n_ns[nsid - 1]; in nvme_init_ns()
2149 ns->ns_nvme = nvme; in nvme_init_ns()
2151 if (nvme_identify(nvme, nsid, (void **)&idns) != 0) { in nvme_init_ns()
2152 dev_err(nvme->n_dip, CE_WARN, in nvme_init_ns()
2167 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) in nvme_init_ns()
2182 nvme_prepare_devid(nvme, ns->ns_id); in nvme_init_ns()
2202 if (ns->ns_best_block_size < nvme->n_min_block_size) in nvme_init_ns()
2203 ns->ns_best_block_size = nvme->n_min_block_size; in nvme_init_ns()
2213 dev_err(nvme->n_dip, CE_WARN, in nvme_init_ns()
2219 dev_err(nvme->n_dip, CE_WARN, in nvme_init_ns()
2231 nvme_init(nvme_t *nvme) in nvme_init() argument
2242 char model[sizeof (nvme->n_idctl->id_model) + 1]; in nvme_init()
2246 vs.r = nvme_get32(nvme, NVME_REG_VS); in nvme_init()
2247 nvme->n_version.v_major = vs.b.vs_mjr; in nvme_init()
2248 nvme->n_version.v_minor = vs.b.vs_mnr; in nvme_init()
2249 dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d", in nvme_init()
2250 nvme->n_version.v_major, nvme->n_version.v_minor); in nvme_init()
2252 if (nvme->n_version.v_major > nvme_version_major) { in nvme_init()
2253 dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.x", in nvme_init()
2255 if (nvme->n_strict_version) in nvme_init()
2260 cap.r = nvme_get64(nvme, NVME_REG_CAP); in nvme_init()
2263 dev_err(nvme->n_dip, CE_WARN, in nvme_init()
2268 nvme->n_nssr_supported = cap.b.cap_nssrs; in nvme_init()
2269 nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd; in nvme_init()
2270 nvme->n_timeout = cap.b.cap_to; in nvme_init()
2271 nvme->n_arbitration_mechanisms = cap.b.cap_ams; in nvme_init()
2272 nvme->n_cont_queues_reqd = cap.b.cap_cqr; in nvme_init()
2273 nvme->n_max_queue_entries = cap.b.cap_mqes + 1; in nvme_init()
2280 nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT), in nvme_init()
2282 nvme->n_pagesize = 1UL << (nvme->n_pageshift); in nvme_init()
2287 nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize; in nvme_init()
2288 nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize; in nvme_init()
2294 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize; in nvme_init()
2295 nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize; in nvme_init()
2296 nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize; in nvme_init()
2297 nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1; in nvme_init()
2302 if (nvme_reset(nvme, B_FALSE) == B_FALSE) { in nvme_init()
2303 dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller"); in nvme_init()
2304 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); in nvme_init()
2305 nvme->n_dead = B_TRUE; in nvme_init()
2312 if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0) in nvme_init()
2314 dev_err(nvme->n_dip, CE_WARN, in nvme_init()
2318 nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP); in nvme_init()
2319 nvme->n_ioq[0] = nvme->n_adminq; in nvme_init()
2321 nvme->n_progress |= NVME_ADMIN_QUEUE; in nvme_init()
2323 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, in nvme_init()
2324 "admin-queue-len", nvme->n_admin_queue_len); in nvme_init()
2326 aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1; in nvme_init()
2327 asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress; in nvme_init()
2328 acq = nvme->n_adminq->nq_cqdma->nd_cookie.dmac_laddress; in nvme_init()
2330 ASSERT((asq & (nvme->n_pagesize - 1)) == 0); in nvme_init()
2331 ASSERT((acq & (nvme->n_pagesize - 1)) == 0); in nvme_init()
2333 nvme_put32(nvme, NVME_REG_AQA, aqa.r); in nvme_init()
2334 nvme_put64(nvme, NVME_REG_ASQ, asq); in nvme_init()
2335 nvme_put64(nvme, NVME_REG_ACQ, acq); in nvme_init()
2339 cc.b.cc_mps = nvme->n_pageshift - 12; in nvme_init()
2345 nvme_put32(nvme, NVME_REG_CC, cc.r); in nvme_init()
2350 csts.r = nvme_get32(nvme, NVME_REG_CSTS); in nvme_init()
2352 for (i = 0; i != nvme->n_timeout * 10; i++) { in nvme_init()
2354 csts.r = nvme_get32(nvme, NVME_REG_CSTS); in nvme_init()
2357 dev_err(nvme->n_dip, CE_WARN, in nvme_init()
2359 ddi_fm_service_impact(nvme->n_dip, in nvme_init()
2361 nvme->n_dead = B_TRUE; in nvme_init()
2371 dev_err(nvme->n_dip, CE_WARN, "!controller not ready"); in nvme_init()
2372 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); in nvme_init()
2373 nvme->n_dead = B_TRUE; in nvme_init()
2381 sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL); in nvme_init()
2386 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1) in nvme_init()
2388 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1) in nvme_init()
2390 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1) in nvme_init()
2392 dev_err(nvme->n_dip, CE_WARN, in nvme_init()
2400 nvme_async_event(nvme); in nvme_init()
2405 if (nvme_identify(nvme, 0, (void **)&nvme->n_idctl) != 0) { in nvme_init()
2406 dev_err(nvme->n_dip, CE_WARN, in nvme_init()
2414 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); in nvme_init()
2415 model[sizeof (nvme->n_idctl->id_model)] = '\0'; in nvme_init()
2419 nvme->n_vendor = strdup("NVMe"); in nvme_init()
2421 nvme->n_vendor = strdup(vendor); in nvme_init()
2423 nvme->n_product = strdup(product); in nvme_init()
2428 nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT, in nvme_init()
2429 MIN(nvme->n_admin_queue_len / 10, in nvme_init()
2430 MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit))); in nvme_init()
2432 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, in nvme_init()
2433 "async-event-limit", nvme->n_async_event_limit); in nvme_init()
2435 nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1; in nvme_init()
2443 sema_destroy(&nvme->n_abort_sema); in nvme_init()
2444 sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL, in nvme_init()
2447 nvme->n_progress |= NVME_CTRL_LIMITS; in nvme_init()
2449 if (nvme->n_idctl->id_mdts == 0) in nvme_init()
2450 nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536; in nvme_init()
2452 nvme->n_max_data_transfer_size = in nvme_init()
2453 1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts); in nvme_init()
2455 nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1; in nvme_init()
2464 nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size, in nvme_init()
2465 (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize)); in nvme_init()
2467 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size; in nvme_init()
2474 if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) || in nvme_init()
2475 ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) || in nvme_init()
2476 ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) || in nvme_init()
2477 ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t))) in nvme_init()
2485 nvme->n_write_cache_present = in nvme_init()
2486 nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE; in nvme_init()
2488 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, in nvme_init()
2490 nvme->n_write_cache_present ? 1 : 0); in nvme_init()
2492 if (!nvme->n_write_cache_present) { in nvme_init()
2493 nvme->n_write_cache_enabled = B_FALSE; in nvme_init()
2494 } else if (nvme_write_cache_set(nvme, nvme->n_write_cache_enabled) in nvme_init()
2496 dev_err(nvme->n_dip, CE_WARN, in nvme_init()
2498 nvme->n_write_cache_enabled ? "en" : "dis"); in nvme_init()
2502 nvme->n_write_cache_enabled = B_TRUE; in nvme_init()
2505 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, in nvme_init()
2507 nvme->n_write_cache_enabled ? 1 : 0); in nvme_init()
2513 nvme->n_lba_range_supported = B_TRUE; in nvme_init()
2518 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) in nvme_init()
2519 nvme->n_auto_pst_supported = in nvme_init()
2520 nvme->n_idctl->id_apsta.ap_sup == 0 ? B_FALSE : B_TRUE; in nvme_init()
2525 nvme->n_namespace_count = nvme->n_idctl->id_nn; in nvme_init()
2526 if (nvme->n_namespace_count > NVME_MINOR_MAX) { in nvme_init()
2527 dev_err(nvme->n_dip, CE_WARN, in nvme_init()
2529 nvme->n_namespace_count, NVME_MINOR_MAX); in nvme_init()
2530 nvme->n_namespace_count = NVME_MINOR_MAX; in nvme_init()
2533 nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) * in nvme_init()
2534 nvme->n_namespace_count, KM_SLEEP); in nvme_init()
2536 for (i = 0; i != nvme->n_namespace_count; i++) { in nvme_init()
2537 mutex_init(&nvme->n_ns[i].ns_minor.nm_mutex, NULL, MUTEX_DRIVER, in nvme_init()
2539 if (nvme_init_ns(nvme, i + 1) != DDI_SUCCESS) in nvme_init()
2546 if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX)) in nvme_init()
2548 nvme_release_interrupts(nvme); in nvme_init()
2552 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, in nvme_init()
2554 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, in nvme_init()
2556 dev_err(nvme->n_dip, CE_WARN, in nvme_init()
2562 nqueues = nvme->n_intr_cnt; in nvme_init()
2568 if (nvme_set_nqueues(nvme, &nqueues) != 0) { in nvme_init()
2569 dev_err(nvme->n_dip, CE_WARN, in nvme_init()
2571 nvme->n_intr_cnt); in nvme_init()
2578 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *)); in nvme_init()
2579 nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) * in nvme_init()
2581 nvme->n_ioq[0] = nvme->n_adminq; in nvme_init()
2583 nvme->n_ioq_count = nqueues; in nvme_init()
2589 if (nvme->n_ioq_count < nvme->n_intr_cnt) { in nvme_init()
2590 nvme_release_interrupts(nvme); in nvme_init()
2592 if (nvme_setup_interrupts(nvme, nvme->n_intr_type, in nvme_init()
2593 nvme->n_ioq_count) != DDI_SUCCESS) { in nvme_init()
2594 dev_err(nvme->n_dip, CE_WARN, in nvme_init()
2603 nvme->n_io_queue_len = in nvme_init()
2604 MIN(nvme->n_io_queue_len, nvme->n_max_queue_entries); in nvme_init()
2605 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-queue-len", in nvme_init()
2606 nvme->n_io_queue_len); in nvme_init()
2608 for (i = 1; i != nvme->n_ioq_count + 1; i++) { in nvme_init()
2609 if (nvme_alloc_qpair(nvme, nvme->n_io_queue_len, in nvme_init()
2610 &nvme->n_ioq[i], i) != DDI_SUCCESS) { in nvme_init()
2611 dev_err(nvme->n_dip, CE_WARN, in nvme_init()
2616 if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) != 0) { in nvme_init()
2617 dev_err(nvme->n_dip, CE_WARN, in nvme_init()
2627 for (i = 1; i != nvme->n_async_event_limit; i++) in nvme_init()
2628 nvme_async_event(nvme); in nvme_init()
2633 (void) nvme_reset(nvme, B_FALSE); in nvme_init()
2641 nvme_t *nvme = (nvme_t *)arg1; in nvme_intr() local
2647 if (inum >= nvme->n_intr_cnt) in nvme_intr()
2650 if (nvme->n_dead) in nvme_intr()
2651 return (nvme->n_intr_type == DDI_INTR_TYPE_FIXED ? in nvme_intr()
2660 qnum < nvme->n_ioq_count + 1 && nvme->n_ioq[qnum] != NULL; in nvme_intr()
2661 qnum += nvme->n_intr_cnt) { in nvme_intr()
2662 while ((cmd = nvme_retrieve_cmd(nvme, nvme->n_ioq[qnum]))) { in nvme_intr()
2673 nvme_release_interrupts(nvme_t *nvme) in nvme_release_interrupts() argument
2677 for (i = 0; i < nvme->n_intr_cnt; i++) { in nvme_release_interrupts()
2678 if (nvme->n_inth[i] == NULL) in nvme_release_interrupts()
2681 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) in nvme_release_interrupts()
2682 (void) ddi_intr_block_disable(&nvme->n_inth[i], 1); in nvme_release_interrupts()
2684 (void) ddi_intr_disable(nvme->n_inth[i]); in nvme_release_interrupts()
2686 (void) ddi_intr_remove_handler(nvme->n_inth[i]); in nvme_release_interrupts()
2687 (void) ddi_intr_free(nvme->n_inth[i]); in nvme_release_interrupts()
2690 kmem_free(nvme->n_inth, nvme->n_inth_sz); in nvme_release_interrupts()
2691 nvme->n_inth = NULL; in nvme_release_interrupts()
2692 nvme->n_inth_sz = 0; in nvme_release_interrupts()
2694 nvme->n_progress &= ~NVME_INTERRUPTS; in nvme_release_interrupts()
2698 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs) in nvme_setup_interrupts() argument
2704 if (nvme->n_intr_types == 0) { in nvme_setup_interrupts()
2705 ret = ddi_intr_get_supported_types(nvme->n_dip, in nvme_setup_interrupts()
2706 &nvme->n_intr_types); in nvme_setup_interrupts()
2708 dev_err(nvme->n_dip, CE_WARN, in nvme_setup_interrupts()
2715 nvme->n_intr_types &= ~DDI_INTR_TYPE_MSIX; in nvme_setup_interrupts()
2719 if ((nvme->n_intr_types & intr_type) == 0) in nvme_setup_interrupts()
2722 ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs); in nvme_setup_interrupts()
2724 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed", in nvme_setup_interrupts()
2729 ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail); in nvme_setup_interrupts()
2731 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed", in nvme_setup_interrupts()
2740 nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail; in nvme_setup_interrupts()
2741 nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP); in nvme_setup_interrupts()
2743 ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail, in nvme_setup_interrupts()
2746 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed", in nvme_setup_interrupts()
2751 nvme->n_intr_cnt = count; in nvme_setup_interrupts()
2753 ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri); in nvme_setup_interrupts()
2755 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed", in nvme_setup_interrupts()
2761 ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr, in nvme_setup_interrupts()
2762 (void *)nvme, (void *)(uintptr_t)i); in nvme_setup_interrupts()
2764 dev_err(nvme->n_dip, CE_WARN, in nvme_setup_interrupts()
2770 (void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap); in nvme_setup_interrupts()
2773 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) in nvme_setup_interrupts()
2774 ret = ddi_intr_block_enable(&nvme->n_inth[i], 1); in nvme_setup_interrupts()
2776 ret = ddi_intr_enable(nvme->n_inth[i]); in nvme_setup_interrupts()
2779 dev_err(nvme->n_dip, CE_WARN, in nvme_setup_interrupts()
2785 nvme->n_intr_type = intr_type; in nvme_setup_interrupts()
2787 nvme->n_progress |= NVME_INTERRUPTS; in nvme_setup_interrupts()
2792 nvme_release_interrupts(nvme); in nvme_setup_interrupts()
2809 nvme_t *nvme; in nvme_attach() local
2824 nvme = ddi_get_soft_state(nvme_state, instance); in nvme_attach()
2825 ddi_set_driver_private(dip, nvme); in nvme_attach()
2826 nvme->n_dip = dip; in nvme_attach()
2828 mutex_init(&nvme->n_minor.nm_mutex, NULL, MUTEX_DRIVER, NULL); in nvme_attach()
2830 nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip, in nvme_attach()
2832 nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY, in nvme_attach()
2835 nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, in nvme_attach()
2837 nvme->n_io_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, in nvme_attach()
2839 nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip, in nvme_attach()
2842 nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip, in nvme_attach()
2845 nvme->n_min_block_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, in nvme_attach()
2849 if (!ISP2(nvme->n_min_block_size) || in nvme_attach()
2850 (nvme->n_min_block_size < NVME_DEFAULT_MIN_BLOCK_SIZE)) { in nvme_attach()
2852 "using default %d", ISP2(nvme->n_min_block_size) ? in nvme_attach()
2855 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE; in nvme_attach()
2858 if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN) in nvme_attach()
2859 nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN; in nvme_attach()
2860 else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN) in nvme_attach()
2861 nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN; in nvme_attach()
2863 if (nvme->n_io_queue_len < NVME_MIN_IO_QUEUE_LEN) in nvme_attach()
2864 nvme->n_io_queue_len = NVME_MIN_IO_QUEUE_LEN; in nvme_attach()
2866 if (nvme->n_async_event_limit < 1) in nvme_attach()
2867 nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT; in nvme_attach()
2869 nvme->n_reg_acc_attr = nvme_reg_acc_attr; in nvme_attach()
2870 nvme->n_queue_dma_attr = nvme_queue_dma_attr; in nvme_attach()
2871 nvme->n_prp_dma_attr = nvme_prp_dma_attr; in nvme_attach()
2872 nvme->n_sgl_dma_attr = nvme_sgl_dma_attr; in nvme_attach()
2877 nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip, in nvme_attach()
2882 ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc); in nvme_attach()
2884 if (nvme->n_fm_cap) { in nvme_attach()
2885 if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE) in nvme_attach()
2886 nvme->n_reg_acc_attr.devacc_attr_access = in nvme_attach()
2889 if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) { in nvme_attach()
2890 nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; in nvme_attach()
2891 nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; in nvme_attach()
2894 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || in nvme_attach()
2895 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) in nvme_attach()
2898 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) in nvme_attach()
2900 (void *)nvme); in nvme_attach()
2903 nvme->n_progress |= NVME_FMA_INIT; in nvme_attach()
2914 if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize, in nvme_attach()
2915 &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) { in nvme_attach()
2920 nvme->n_progress |= NVME_REGS_MAPPED; in nvme_attach()
2927 nvme->n_cmd_taskq = ddi_taskq_create(dip, name, MIN(UINT16_MAX, ncpus), in nvme_attach()
2929 if (nvme->n_cmd_taskq == NULL) { in nvme_attach()
2939 nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t), in nvme_attach()
2941 NULL, (void *)nvme, NULL, 0); in nvme_attach()
2943 if (nvme_init(nvme) != DDI_SUCCESS) in nvme_attach()
2949 for (i = 0; i != nvme->n_namespace_count; i++) { in nvme_attach()
2950 if (ddi_create_minor_node(nvme->n_dip, nvme->n_ns[i].ns_name, in nvme_attach()
2951 S_IFCHR, NVME_MINOR(ddi_get_instance(nvme->n_dip), i + 1), in nvme_attach()
2958 if (nvme->n_ns[i].ns_ignore) in nvme_attach()
2961 nvme->n_ns[i].ns_bd_hdl = bd_alloc_handle(&nvme->n_ns[i], in nvme_attach()
2962 &nvme_bd_ops, &nvme->n_prp_dma_attr, KM_SLEEP); in nvme_attach()
2964 if (nvme->n_ns[i].ns_bd_hdl == NULL) { in nvme_attach()
2970 if (bd_attach_handle(dip, nvme->n_ns[i].ns_bd_hdl) in nvme_attach()
2991 if (nvme->n_dead) in nvme_attach()
3003 nvme_t *nvme; in nvme_detach() local
3010 nvme = ddi_get_soft_state(nvme_state, instance); in nvme_detach()
3012 if (nvme == NULL) in nvme_detach()
3016 mutex_destroy(&nvme->n_minor.nm_mutex); in nvme_detach()
3018 if (nvme->n_ns) { in nvme_detach()
3019 for (i = 0; i != nvme->n_namespace_count; i++) { in nvme_detach()
3020 ddi_remove_minor_node(dip, nvme->n_ns[i].ns_name); in nvme_detach()
3021 mutex_destroy(&nvme->n_ns[i].ns_minor.nm_mutex); in nvme_detach()
3023 if (nvme->n_ns[i].ns_bd_hdl) { in nvme_detach()
3025 nvme->n_ns[i].ns_bd_hdl); in nvme_detach()
3026 bd_free_handle(nvme->n_ns[i].ns_bd_hdl); in nvme_detach()
3029 if (nvme->n_ns[i].ns_idns) in nvme_detach()
3030 kmem_free(nvme->n_ns[i].ns_idns, in nvme_detach()
3032 if (nvme->n_ns[i].ns_devid) in nvme_detach()
3033 strfree(nvme->n_ns[i].ns_devid); in nvme_detach()
3036 kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) * in nvme_detach()
3037 nvme->n_namespace_count); in nvme_detach()
3040 if (nvme->n_progress & NVME_INTERRUPTS) in nvme_detach()
3041 nvme_release_interrupts(nvme); in nvme_detach()
3043 if (nvme->n_cmd_taskq) in nvme_detach()
3044 ddi_taskq_wait(nvme->n_cmd_taskq); in nvme_detach()
3046 if (nvme->n_ioq_count > 0) { in nvme_detach()
3047 for (i = 1; i != nvme->n_ioq_count + 1; i++) { in nvme_detach()
3048 if (nvme->n_ioq[i] != NULL) { in nvme_detach()
3050 nvme_free_qpair(nvme->n_ioq[i]); in nvme_detach()
3054 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) * in nvme_detach()
3055 (nvme->n_ioq_count + 1)); in nvme_detach()
3058 if (nvme->n_prp_cache != NULL) { in nvme_detach()
3059 kmem_cache_destroy(nvme->n_prp_cache); in nvme_detach()
3062 if (nvme->n_progress & NVME_REGS_MAPPED) { in nvme_detach()
3063 nvme_shutdown(nvme, NVME_CC_SHN_NORMAL, B_FALSE); in nvme_detach()
3064 (void) nvme_reset(nvme, B_FALSE); in nvme_detach()
3067 if (nvme->n_cmd_taskq) in nvme_detach()
3068 ddi_taskq_destroy(nvme->n_cmd_taskq); in nvme_detach()
3070 if (nvme->n_progress & NVME_CTRL_LIMITS) in nvme_detach()
3071 sema_destroy(&nvme->n_abort_sema); in nvme_detach()
3073 if (nvme->n_progress & NVME_ADMIN_QUEUE) in nvme_detach()
3074 nvme_free_qpair(nvme->n_adminq); in nvme_detach()
3076 if (nvme->n_idctl) in nvme_detach()
3077 kmem_free(nvme->n_idctl, NVME_IDENTIFY_BUFSIZE); in nvme_detach()
3079 if (nvme->n_progress & NVME_REGS_MAPPED) in nvme_detach()
3080 ddi_regs_map_free(&nvme->n_regh); in nvme_detach()
3082 if (nvme->n_progress & NVME_FMA_INIT) { in nvme_detach()
3083 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) in nvme_detach()
3084 ddi_fm_handler_unregister(nvme->n_dip); in nvme_detach()
3086 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || in nvme_detach()
3087 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) in nvme_detach()
3088 pci_ereport_teardown(nvme->n_dip); in nvme_detach()
3090 ddi_fm_fini(nvme->n_dip); in nvme_detach()
3093 if (nvme->n_vendor != NULL) in nvme_detach()
3094 strfree(nvme->n_vendor); in nvme_detach()
3096 if (nvme->n_product != NULL) in nvme_detach()
3097 strfree(nvme->n_product); in nvme_detach()
3108 nvme_t *nvme; in nvme_quiesce() local
3112 nvme = ddi_get_soft_state(nvme_state, instance); in nvme_quiesce()
3114 if (nvme == NULL) in nvme_quiesce()
3117 nvme_shutdown(nvme, NVME_CC_SHN_ABRUPT, B_TRUE); in nvme_quiesce()
3119 (void) nvme_reset(nvme, B_TRUE); in nvme_quiesce()
3127 nvme_t *nvme = cmd->nc_nvme; in nvme_fill_prp() local
3147 nprp_page = nvme->n_pagesize / sizeof (uint64_t) - 1; in nvme_fill_prp()
3158 cmd->nc_dma = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP); in nvme_fill_prp()
3179 nvme_t *nvme = ns->ns_nvme; in nvme_create_nvm_cmd() local
3185 cmd = nvme_alloc_cmd(nvme, (xfer->x_flags & BD_XFER_POLL) ? in nvme_create_nvm_cmd()
3242 nvme_t *nvme = ns->ns_nvme; in nvme_bd_driveinfo() local
3250 drive->d_qsize = nvme->n_ioq_count * nvme->n_io_queue_len in nvme_bd_driveinfo()
3251 / nvme->n_namespace_count; in nvme_bd_driveinfo()
3265 drive->d_model = nvme->n_idctl->id_model; in nvme_bd_driveinfo()
3266 drive->d_model_len = sizeof (nvme->n_idctl->id_model); in nvme_bd_driveinfo()
3267 drive->d_vendor = nvme->n_vendor; in nvme_bd_driveinfo()
3268 drive->d_vendor_len = strlen(nvme->n_vendor); in nvme_bd_driveinfo()
3269 drive->d_product = nvme->n_product; in nvme_bd_driveinfo()
3270 drive->d_product_len = strlen(nvme->n_product); in nvme_bd_driveinfo()
3271 drive->d_serial = nvme->n_idctl->id_serial; in nvme_bd_driveinfo()
3272 drive->d_serial_len = sizeof (nvme->n_idctl->id_serial); in nvme_bd_driveinfo()
3273 drive->d_revision = nvme->n_idctl->id_fwrev; in nvme_bd_driveinfo()
3274 drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev); in nvme_bd_driveinfo()
3295 nvme_t *nvme = ns->ns_nvme; in nvme_bd_cmd() local
3301 if (nvme->n_dead) in nvme_bd_cmd()
3308 cmd->nc_sqid = (CPU->cpu_id % nvme->n_ioq_count) + 1; in nvme_bd_cmd()
3309 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count); in nvme_bd_cmd()
3310 ioq = nvme->n_ioq[cmd->nc_sqid]; in nvme_bd_cmd()
3328 cmd = nvme_retrieve_cmd(nvme, ioq); in nvme_bd_cmd()
3401 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); in nvme_open() local
3409 if (nvme == NULL) in nvme_open()
3412 if (nsid > nvme->n_namespace_count) in nvme_open()
3415 if (nvme->n_dead) in nvme_open()
3418 nm = nsid == 0 ? &nvme->n_minor : &nvme->n_ns[nsid - 1].ns_minor; in nvme_open()
3450 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); in nvme_close() local
3457 if (nvme == NULL) in nvme_close()
3460 if (nsid > nvme->n_namespace_count) in nvme_close()
3463 nm = nsid == 0 ? &nvme->n_minor : &nvme->n_ns[nsid - 1].ns_minor; in nvme_close()
3477 nvme_ioctl_identify(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, in nvme_ioctl_identify() argument
3490 if ((rv = nvme_identify(nvme, nsid, (void **)&idctl)) != 0) in nvme_ioctl_identify()
3503 nvme_ioctl_capabilities(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, in nvme_ioctl_capabilities() argument
3517 cap.r = nvme_get64(nvme, NVME_REG_CAP); in nvme_ioctl_capabilities()
3534 nvme_ioctl_get_logpage(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, in nvme_ioctl_get_logpage() argument
3551 if (nsid != 0 && nvme->n_idctl->id_lpa.lp_smart == 0) in nvme_ioctl_get_logpage()
3566 if (nvme_get_logpage(nvme, &log, &bufsize, nioc->n_arg, nsid) in nvme_ioctl_get_logpage()
3585 nvme_ioctl_get_features(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, in nvme_ioctl_get_features() argument
3622 if (res >= nvme->n_intr_cnt) in nvme_ioctl_get_features()
3627 if (nvme->n_lba_range_supported == B_FALSE) in nvme_ioctl_get_features()
3631 nsid > nvme->n_namespace_count) in nvme_ioctl_get_features()
3640 if (!nvme->n_write_cache_present) in nvme_ioctl_get_features()
3649 if (!nvme->n_auto_pst_supported) in nvme_ioctl_get_features()
3658 rv = nvme_get_features(nvme, nsid, feature, &res, &buf, &bufsize); in nvme_ioctl_get_features()
3678 nvme_ioctl_intr_cnt(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, in nvme_ioctl_intr_cnt() argument
3686 nioc->n_arg = nvme->n_intr_cnt; in nvme_ioctl_intr_cnt()
3691 nvme_ioctl_version(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, in nvme_ioctl_version() argument
3700 if (nioc->n_len < sizeof (nvme->n_version)) in nvme_ioctl_version()
3703 if (ddi_copyout(&nvme->n_version, (void *)nioc->n_buf, in nvme_ioctl_version()
3704 sizeof (nvme->n_version), mode) != 0) in nvme_ioctl_version()
3711 nvme_ioctl_format(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, in nvme_ioctl_format() argument
3726 if (nvme->n_idctl->id_oacs.oa_format == 0) in nvme_ioctl_format()
3733 if (nsid != 0 && nvme->n_idctl->id_fna.fn_format != 0) in nvme_ioctl_format()
3737 nvme->n_idctl->id_fna.fn_sec_erase != 0) in nvme_ioctl_format()
3750 if (frmt.b.fm_lbaf > nvme->n_ns[c_nsid].ns_idns->id_nlbaf || in nvme_ioctl_format()
3751 nvme->n_ns[c_nsid].ns_idns->id_lbaf[frmt.b.fm_lbaf].lbaf_ms != 0) in nvme_ioctl_format()
3759 nvme->n_idctl->id_fna.fn_crypt_erase == 0)) in nvme_ioctl_format()
3765 return (nvme_format_nvm(nvme, nsid, frmt.b.fm_lbaf, B_FALSE, 0, B_FALSE, in nvme_ioctl_format()
3770 nvme_ioctl_detach(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, in nvme_ioctl_detach() argument
3782 rv = bd_detach_handle(nvme->n_ns[nsid - 1].ns_bd_hdl); in nvme_ioctl_detach()
3790 nvme_ioctl_attach(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, in nvme_ioctl_attach() argument
3806 idns = nvme->n_ns[nsid - 1].ns_idns; in nvme_ioctl_attach()
3807 if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS) in nvme_ioctl_attach()
3812 rv = bd_attach_handle(nvme->n_dip, nvme->n_ns[nsid - 1].ns_bd_hdl); in nvme_ioctl_attach()
3827 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); in nvme_ioctl() local
3846 if (nvme == NULL) in nvme_ioctl()
3849 if (nsid > nvme->n_namespace_count) in nvme_ioctl()
3853 return (ndi_devctl_ioctl(nvme->n_dip, cmd, arg, mode, 0)); in nvme_ioctl()
3877 if (nvme->n_dead && cmd != NVME_IOC_DETACH) in nvme_ioctl()
3896 rv = nvme_ioctl[NVME_IOC_CMD(cmd)](nvme, nsid, &nioc, mode, in nvme_ioctl()