Lines Matching +full:cmd +full:- +full:timeout +full:- +full:ms

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (C) 2012-2016 Intel Corporation
56 bus_barrier(ctrlr->resource, 0, rman_get_size(ctrlr->resource), flags); in nvme_ctrlr_barrier()
68 sbuf_printf(&sb, "name=\"%s\" ", device_get_nameunit(ctrlr->dev)); in nvme_ctrlr_devctl_va()
95 sbuf_printf(&sb, "%s: ", device_get_nameunit(ctrlr->dev)); in nvme_ctrlr_devctl_log()
115 qpair = &ctrlr->adminq; in nvme_ctrlr_construct_admin_qpair()
116 qpair->id = 0; in nvme_ctrlr_construct_admin_qpair()
117 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1; in nvme_ctrlr_construct_admin_qpair()
118 qpair->domain = ctrlr->domain; in nvme_ctrlr_construct_admin_qpair()
135 * max I/O xfer size. 16KB is sufficient here - maybe even less? in nvme_ctrlr_construct_admin_qpair()
142 #define QP(ctrlr, c) ((c) * (ctrlr)->num_io_queues / mp_ncpus)
159 * fail-safe. in nvme_ctrlr_construct_io_qpairs()
162 (rman_get_size(ctrlr->resource) - nvme_mmio_offsetof(doorbell[0])) / in nvme_ctrlr_construct_io_qpairs()
163 (1 << (ctrlr->dstrd + 1)); in nvme_ctrlr_construct_io_qpairs()
178 * also that for a queue size of N, we can only have (N-1) commands in nvme_ctrlr_construct_io_qpairs()
179 * outstanding, hence the "-1" here. in nvme_ctrlr_construct_io_qpairs()
181 num_trackers = min(num_trackers, (num_entries-1)); in nvme_ctrlr_construct_io_qpairs()
187 * of the storage system grows multi-queue support. in nvme_ctrlr_construct_io_qpairs()
189 ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4; in nvme_ctrlr_construct_io_qpairs()
191 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair), in nvme_ctrlr_construct_io_qpairs()
194 for (i = c = n = 0; i < ctrlr->num_io_queues; i++, c += n) { in nvme_ctrlr_construct_io_qpairs()
195 qpair = &ctrlr->ioq[i]; in nvme_ctrlr_construct_io_qpairs()
198 * Admin queue has ID=0. IO queues start at ID=1 - in nvme_ctrlr_construct_io_qpairs()
201 qpair->id = i + 1; in nvme_ctrlr_construct_io_qpairs()
202 if (ctrlr->num_io_queues > 1) { in nvme_ctrlr_construct_io_qpairs()
207 qpair->cpu = c + (device_get_unit(ctrlr->dev)+n/2) % n; in nvme_ctrlr_construct_io_qpairs()
208 qpair->domain = pcpu_find(qpair->cpu)->pc_domain; in nvme_ctrlr_construct_io_qpairs()
210 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1; in nvme_ctrlr_construct_io_qpairs()
211 qpair->domain = ctrlr->domain; in nvme_ctrlr_construct_io_qpairs()
215 * For I/O queues, use the controller-wide max_xfer_size in nvme_ctrlr_construct_io_qpairs()
227 if (ctrlr->num_io_queues > 1) in nvme_ctrlr_construct_io_qpairs()
228 bus_bind_intr(ctrlr->dev, qpair->res, qpair->cpu); in nvme_ctrlr_construct_io_qpairs()
244 ctrlr->is_failed = true; in nvme_ctrlr_fail()
246 ctrlr->is_failed_admin = true; in nvme_ctrlr_fail()
247 nvme_qpair_fail(&ctrlr->adminq); in nvme_ctrlr_fail()
249 if (ctrlr->ioq != NULL) { in nvme_ctrlr_fail()
250 for (i = 0; i < ctrlr->num_io_queues; i++) { in nvme_ctrlr_fail()
251 nvme_qpair_fail(&ctrlr->ioq[i]); in nvme_ctrlr_fail()
261 * capped at 1ms.
266 int timeout = ticks + MSEC_2_TICKS(ctrlr->ready_timeout_in_ms); in nvme_ctrlr_wait_for_ready() local
276 if (timeout - ticks < 0) { in nvme_ctrlr_wait_for_ready()
278 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms); in nvme_ctrlr_wait_for_ready()
310 /* Wait for RDY == 0 or timeout & fail */ in nvme_ctrlr_disable()
316 /* EN == 1, wait for RDY == 1 or timeout & fail */ in nvme_ctrlr_disable()
329 if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY) in nvme_ctrlr_disable()
359 /* EN == 0 already wait for RDY == 0 or timeout & fail */ in nvme_ctrlr_enable()
364 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr); in nvme_ctrlr_enable()
365 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr); in nvme_ctrlr_enable()
367 /* acqs and asqs are 0-based. */ in nvme_ctrlr_enable()
368 qsize = ctrlr->adminq.num_entries - 1; in nvme_ctrlr_enable()
389 cc |= NVMEF(NVME_CC_REG_MPS, ctrlr->mps); in nvme_ctrlr_enable()
402 nvme_admin_qpair_disable(&ctrlr->adminq); in nvme_ctrlr_disable_qpairs()
408 if (ctrlr->is_initialized) { in nvme_ctrlr_disable_qpairs()
409 for (i = 0; i < ctrlr->num_io_queues; i++) in nvme_ctrlr_disable_qpairs()
410 nvme_io_qpair_disable(&ctrlr->ioq[i]); in nvme_ctrlr_disable_qpairs()
421 ctrlr->is_failed_admin = true; in nvme_ctrlr_hw_reset()
431 ctrlr->is_failed_admin = false; in nvme_ctrlr_hw_reset()
442 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1); in nvme_ctrlr_reset()
451 if (!ctrlr->is_dying) in nvme_ctrlr_reset()
452 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task); in nvme_ctrlr_reset()
461 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata, in nvme_ctrlr_identify()
470 nvme_controller_data_swapbytes(&ctrlr->cdata); in nvme_ctrlr_identify()
476 if (ctrlr->cdata.mdts > 0) in nvme_ctrlr_identify()
477 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size, in nvme_ctrlr_identify()
478 1 << (ctrlr->cdata.mdts + NVME_MPS_SHIFT + in nvme_ctrlr_identify()
479 NVME_CAP_HI_MPSMIN(ctrlr->cap_hi))); in nvme_ctrlr_identify()
491 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues, in nvme_ctrlr_set_num_qpairs()
500 * Data in cdw0 is 0-based. in nvme_ctrlr_set_num_qpairs()
501 * Lower 16-bits indicate number of submission queues allocated. in nvme_ctrlr_set_num_qpairs()
502 * Upper 16-bits indicate number of completion queues allocated. in nvme_ctrlr_set_num_qpairs()
512 ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated); in nvme_ctrlr_set_num_qpairs()
513 ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated); in nvme_ctrlr_set_num_qpairs()
514 if (ctrlr->num_io_queues > vm_ndomains) in nvme_ctrlr_set_num_qpairs()
515 ctrlr->num_io_queues -= ctrlr->num_io_queues % vm_ndomains; in nvme_ctrlr_set_num_qpairs()
527 for (i = 0; i < ctrlr->num_io_queues; i++) { in nvme_ctrlr_create_qpairs()
528 qpair = &ctrlr->ioq[i]; in nvme_ctrlr_create_qpairs()
558 for (int i = 0; i < ctrlr->num_io_queues; i++) { in nvme_ctrlr_delete_qpairs()
559 qpair = &ctrlr->ioq[i]; in nvme_ctrlr_delete_qpairs()
589 for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) { in nvme_ctrlr_construct_namespaces()
590 ns = &ctrlr->ns[i]; in nvme_ctrlr_construct_namespaces()
624 (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE); in nvme_ctrlr_get_log_page_size()
697 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, in nvme_ctrlr_async_event_log_page_cb()
698 aer->log_page_id, NULL, 0); in nvme_ctrlr_async_event_log_page_cb()
701 switch (aer->log_page_id) { in nvme_ctrlr_async_event_log_page_cb()
703 err = (struct nvme_error_information_entry *)aer->log_page_buffer; in nvme_ctrlr_async_event_log_page_cb()
704 for (i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++) in nvme_ctrlr_async_event_log_page_cb()
709 (struct nvme_health_information_page *)aer->log_page_buffer); in nvme_ctrlr_async_event_log_page_cb()
713 (struct nvme_ns_list *)aer->log_page_buffer); in nvme_ctrlr_async_event_log_page_cb()
717 (struct nvme_command_effects_page *)aer->log_page_buffer); in nvme_ctrlr_async_event_log_page_cb()
721 (struct nvme_res_notification_page *)aer->log_page_buffer); in nvme_ctrlr_async_event_log_page_cb()
725 (struct nvme_sanitize_status_page *)aer->log_page_buffer); in nvme_ctrlr_async_event_log_page_cb()
731 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) { in nvme_ctrlr_async_event_log_page_cb()
733 aer->log_page_buffer; in nvme_ctrlr_async_event_log_page_cb()
734 nvme_ctrlr_log_critical_warnings(aer->ctrlr, in nvme_ctrlr_async_event_log_page_cb()
735 health_info->critical_warning); in nvme_ctrlr_async_event_log_page_cb()
743 aer->ctrlr->async_event_config &= in nvme_ctrlr_async_event_log_page_cb()
744 ~health_info->critical_warning; in nvme_ctrlr_async_event_log_page_cb()
745 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr, in nvme_ctrlr_async_event_log_page_cb()
746 aer->ctrlr->async_event_config, NULL, NULL); in nvme_ctrlr_async_event_log_page_cb()
747 } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE && in nvme_ctrlr_async_event_log_page_cb()
749 nsl = (struct nvme_ns_list *)aer->log_page_buffer; in nvme_ctrlr_async_event_log_page_cb()
750 for (i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) { in nvme_ctrlr_async_event_log_page_cb()
751 if (nsl->ns[i] > NVME_MAX_NAMESPACES) in nvme_ctrlr_async_event_log_page_cb()
753 nvme_notify_ns(aer->ctrlr, nsl->ns[i]); in nvme_ctrlr_async_event_log_page_cb()
761 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, in nvme_ctrlr_async_event_log_page_cb()
762 aer->log_page_id, aer->log_page_buffer, aer->log_page_size); in nvme_ctrlr_async_event_log_page_cb()
769 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); in nvme_ctrlr_async_event_log_page_cb()
788 aer->log_page_id = NVMEV(NVME_ASYNC_EVENT_LOG_PAGE_ID, cpl->cdw0); in nvme_ctrlr_async_event_cb()
790 nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x," in nvme_ctrlr_async_event_cb()
791 " page 0x%02x)\n", NVMEV(NVME_ASYNC_EVENT_TYPE, cpl->cdw0), in nvme_ctrlr_async_event_cb()
792 NVMEV(NVME_ASYNC_EVENT_INFO, cpl->cdw0), in nvme_ctrlr_async_event_cb()
793 aer->log_page_id); in nvme_ctrlr_async_event_cb()
795 if (is_log_page_id_valid(aer->log_page_id)) { in nvme_ctrlr_async_event_cb()
796 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr, in nvme_ctrlr_async_event_cb()
797 aer->log_page_id); in nvme_ctrlr_async_event_cb()
798 memcpy(&aer->cpl, cpl, sizeof(*cpl)); in nvme_ctrlr_async_event_cb()
799 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id, in nvme_ctrlr_async_event_cb()
800 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer, in nvme_ctrlr_async_event_cb()
801 aer->log_page_size, nvme_ctrlr_async_event_log_page_cb, in nvme_ctrlr_async_event_cb()
805 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id, in nvme_ctrlr_async_event_cb()
812 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); in nvme_ctrlr_async_event_cb()
822 aer->ctrlr = ctrlr; in nvme_ctrlr_construct_and_submit_aer()
824 * XXX-MJ this should be M_WAITOK but we might be in a non-sleepable in nvme_ctrlr_construct_and_submit_aer()
830 aer->req = req; in nvme_ctrlr_construct_and_submit_aer()
833 * Disable timeout here, since asynchronous event requests should by in nvme_ctrlr_construct_and_submit_aer()
836 req->timeout = false; in nvme_ctrlr_construct_and_submit_aer()
837 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST; in nvme_ctrlr_construct_and_submit_aer()
848 ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE | in nvme_ctrlr_configure_aer()
852 if (ctrlr->cdata.ver >= NVME_REV(1, 2)) in nvme_ctrlr_configure_aer()
853 ctrlr->async_event_config |= in nvme_ctrlr_configure_aer()
854 ctrlr->cdata.oaes & (NVME_ASYNC_EVENT_NS_ATTRIBUTE | in nvme_ctrlr_configure_aer()
866 ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE; in nvme_ctrlr_configure_aer()
869 ctrlr->async_event_config, NULL, NULL); in nvme_ctrlr_configure_aer()
871 /* aerl is a zero-based value, so we need to add 1 here. */ in nvme_ctrlr_configure_aer()
872 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1)); in nvme_ctrlr_configure_aer()
874 for (i = 0; i < ctrlr->num_aers; i++) { in nvme_ctrlr_configure_aer()
875 aer = &ctrlr->aer[i]; in nvme_ctrlr_configure_aer()
884 ctrlr->int_coal_time = 0; in nvme_ctrlr_configure_int_coalescing()
886 &ctrlr->int_coal_time); in nvme_ctrlr_configure_int_coalescing()
888 ctrlr->int_coal_threshold = 0; in nvme_ctrlr_configure_int_coalescing()
890 &ctrlr->int_coal_threshold); in nvme_ctrlr_configure_int_coalescing()
892 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time, in nvme_ctrlr_configure_int_coalescing()
893 ctrlr->int_coal_threshold, NULL, NULL); in nvme_ctrlr_configure_int_coalescing()
902 if (ctrlr->hmb_desc_paddr) { in nvme_ctrlr_hmb_free()
903 bus_dmamap_unload(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map); in nvme_ctrlr_hmb_free()
904 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr, in nvme_ctrlr_hmb_free()
905 ctrlr->hmb_desc_map); in nvme_ctrlr_hmb_free()
906 ctrlr->hmb_desc_paddr = 0; in nvme_ctrlr_hmb_free()
908 if (ctrlr->hmb_desc_tag) { in nvme_ctrlr_hmb_free()
909 bus_dma_tag_destroy(ctrlr->hmb_desc_tag); in nvme_ctrlr_hmb_free()
910 ctrlr->hmb_desc_tag = NULL; in nvme_ctrlr_hmb_free()
912 for (i = 0; i < ctrlr->hmb_nchunks; i++) { in nvme_ctrlr_hmb_free()
913 hmbc = &ctrlr->hmb_chunks[i]; in nvme_ctrlr_hmb_free()
914 bus_dmamap_unload(ctrlr->hmb_tag, hmbc->hmbc_map); in nvme_ctrlr_hmb_free()
915 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr, in nvme_ctrlr_hmb_free()
916 hmbc->hmbc_map); in nvme_ctrlr_hmb_free()
918 ctrlr->hmb_nchunks = 0; in nvme_ctrlr_hmb_free()
919 if (ctrlr->hmb_tag) { in nvme_ctrlr_hmb_free()
920 bus_dma_tag_destroy(ctrlr->hmb_tag); in nvme_ctrlr_hmb_free()
921 ctrlr->hmb_tag = NULL; in nvme_ctrlr_hmb_free()
923 if (ctrlr->hmb_chunks) { in nvme_ctrlr_hmb_free()
924 free(ctrlr->hmb_chunks, M_NVME); in nvme_ctrlr_hmb_free()
925 ctrlr->hmb_chunks = NULL; in nvme_ctrlr_hmb_free()
945 min = (long long unsigned)ctrlr->cdata.hmmin * NVME_HMB_UNITS; in nvme_ctrlr_hmb_alloc()
948 pref = MIN((long long unsigned)ctrlr->cdata.hmpre * NVME_HMB_UNITS, max); in nvme_ctrlr_hmb_alloc()
949 minc = MAX(ctrlr->cdata.hmminds * NVME_HMB_UNITS, ctrlr->page_size); in nvme_ctrlr_hmb_alloc()
950 if (min > 0 && ctrlr->cdata.hmmaxd > 0) in nvme_ctrlr_hmb_alloc()
951 minc = MAX(minc, min / ctrlr->cdata.hmmaxd); in nvme_ctrlr_hmb_alloc()
952 ctrlr->hmb_chunk = pref; in nvme_ctrlr_hmb_alloc()
957 * are all based on the current MPS (ctrlr->page_size). in nvme_ctrlr_hmb_alloc()
959 ctrlr->hmb_chunk = roundup2(ctrlr->hmb_chunk, ctrlr->page_size); in nvme_ctrlr_hmb_alloc()
960 ctrlr->hmb_nchunks = howmany(pref, ctrlr->hmb_chunk); in nvme_ctrlr_hmb_alloc()
961 if (ctrlr->cdata.hmmaxd > 0 && ctrlr->hmb_nchunks > ctrlr->cdata.hmmaxd) in nvme_ctrlr_hmb_alloc()
962 ctrlr->hmb_nchunks = ctrlr->cdata.hmmaxd; in nvme_ctrlr_hmb_alloc()
963 ctrlr->hmb_chunks = malloc(sizeof(struct nvme_hmb_chunk) * in nvme_ctrlr_hmb_alloc()
964 ctrlr->hmb_nchunks, M_NVME, M_WAITOK); in nvme_ctrlr_hmb_alloc()
965 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), in nvme_ctrlr_hmb_alloc()
966 ctrlr->page_size, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, in nvme_ctrlr_hmb_alloc()
967 ctrlr->hmb_chunk, 1, ctrlr->hmb_chunk, 0, NULL, NULL, &ctrlr->hmb_tag); in nvme_ctrlr_hmb_alloc()
974 for (i = 0; i < ctrlr->hmb_nchunks; i++) { in nvme_ctrlr_hmb_alloc()
975 hmbc = &ctrlr->hmb_chunks[i]; in nvme_ctrlr_hmb_alloc()
976 if (bus_dmamem_alloc(ctrlr->hmb_tag, in nvme_ctrlr_hmb_alloc()
977 (void **)&hmbc->hmbc_vaddr, BUS_DMA_NOWAIT, in nvme_ctrlr_hmb_alloc()
978 &hmbc->hmbc_map)) { in nvme_ctrlr_hmb_alloc()
982 if (bus_dmamap_load(ctrlr->hmb_tag, hmbc->hmbc_map, in nvme_ctrlr_hmb_alloc()
983 hmbc->hmbc_vaddr, ctrlr->hmb_chunk, nvme_single_map, in nvme_ctrlr_hmb_alloc()
984 &hmbc->hmbc_paddr, BUS_DMA_NOWAIT) != 0) { in nvme_ctrlr_hmb_alloc()
985 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr, in nvme_ctrlr_hmb_alloc()
986 hmbc->hmbc_map); in nvme_ctrlr_hmb_alloc()
990 bus_dmamap_sync(ctrlr->hmb_tag, hmbc->hmbc_map, in nvme_ctrlr_hmb_alloc()
994 if (i < ctrlr->hmb_nchunks && i * ctrlr->hmb_chunk < min && in nvme_ctrlr_hmb_alloc()
995 ctrlr->hmb_chunk / 2 >= minc) { in nvme_ctrlr_hmb_alloc()
996 ctrlr->hmb_nchunks = i; in nvme_ctrlr_hmb_alloc()
998 ctrlr->hmb_chunk /= 2; in nvme_ctrlr_hmb_alloc()
1001 ctrlr->hmb_nchunks = i; in nvme_ctrlr_hmb_alloc()
1002 if (ctrlr->hmb_nchunks * ctrlr->hmb_chunk < min) { in nvme_ctrlr_hmb_alloc()
1007 size = sizeof(struct nvme_hmb_desc) * ctrlr->hmb_nchunks; in nvme_ctrlr_hmb_alloc()
1008 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), in nvme_ctrlr_hmb_alloc()
1010 size, 1, size, 0, NULL, NULL, &ctrlr->hmb_desc_tag); in nvme_ctrlr_hmb_alloc()
1016 if (bus_dmamem_alloc(ctrlr->hmb_desc_tag, in nvme_ctrlr_hmb_alloc()
1017 (void **)&ctrlr->hmb_desc_vaddr, BUS_DMA_WAITOK, in nvme_ctrlr_hmb_alloc()
1018 &ctrlr->hmb_desc_map)) { in nvme_ctrlr_hmb_alloc()
1023 if (bus_dmamap_load(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map, in nvme_ctrlr_hmb_alloc()
1024 ctrlr->hmb_desc_vaddr, size, nvme_single_map, in nvme_ctrlr_hmb_alloc()
1025 &ctrlr->hmb_desc_paddr, BUS_DMA_NOWAIT) != 0) { in nvme_ctrlr_hmb_alloc()
1026 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr, in nvme_ctrlr_hmb_alloc()
1027 ctrlr->hmb_desc_map); in nvme_ctrlr_hmb_alloc()
1033 for (i = 0; i < ctrlr->hmb_nchunks; i++) { in nvme_ctrlr_hmb_alloc()
1034 memset(&ctrlr->hmb_desc_vaddr[i], 0, in nvme_ctrlr_hmb_alloc()
1036 ctrlr->hmb_desc_vaddr[i].addr = in nvme_ctrlr_hmb_alloc()
1037 htole64(ctrlr->hmb_chunks[i].hmbc_paddr); in nvme_ctrlr_hmb_alloc()
1038 ctrlr->hmb_desc_vaddr[i].size = htole32(ctrlr->hmb_chunk / ctrlr->page_size); in nvme_ctrlr_hmb_alloc()
1040 bus_dmamap_sync(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map, in nvme_ctrlr_hmb_alloc()
1044 (long long unsigned)ctrlr->hmb_nchunks * ctrlr->hmb_chunk in nvme_ctrlr_hmb_alloc()
1061 ctrlr->hmb_nchunks * ctrlr->hmb_chunk / ctrlr->page_size, in nvme_ctrlr_hmb_enable()
1062 ctrlr->hmb_desc_paddr, ctrlr->hmb_desc_paddr >> 32, in nvme_ctrlr_hmb_enable()
1063 ctrlr->hmb_nchunks, NULL, 0, in nvme_ctrlr_hmb_enable()
1087 nvme_qpair_reset(&ctrlr->adminq); in nvme_ctrlr_start()
1088 nvme_admin_qpair_enable(&ctrlr->adminq); in nvme_ctrlr_start()
1091 if (ctrlr->ioq != NULL) { in nvme_ctrlr_start()
1092 for (i = 0; i < ctrlr->num_io_queues; i++) in nvme_ctrlr_start()
1093 nvme_qpair_reset(&ctrlr->ioq[i]); in nvme_ctrlr_start()
1097 * If it was a reset on initialization command timeout, just in nvme_ctrlr_start()
1100 if (resetting && !ctrlr->is_initialized) in nvme_ctrlr_start()
1117 old_num_io_queues = ctrlr->num_io_queues; in nvme_ctrlr_start()
1123 if (old_num_io_queues != ctrlr->num_io_queues) { in nvme_ctrlr_start()
1125 old_num_io_queues, ctrlr->num_io_queues); in nvme_ctrlr_start()
1129 if (ctrlr->cdata.hmpre > 0 && ctrlr->hmb_nchunks == 0) { in nvme_ctrlr_start()
1131 if (ctrlr->hmb_nchunks > 0) in nvme_ctrlr_start()
1133 } else if (ctrlr->hmb_nchunks > 0) in nvme_ctrlr_start()
1149 for (i = 0; i < ctrlr->num_io_queues; i++) in nvme_ctrlr_start()
1150 nvme_io_qpair_enable(&ctrlr->ioq[i]); in nvme_ctrlr_start()
1161 if (nvme_ctrlr_hw_reset(ctrlr) != 0 || ctrlr->fail_on_reset != 0) { in nvme_ctrlr_start_config_hook()
1163 config_intrhook_disestablish(&ctrlr->config_hook); in nvme_ctrlr_start_config_hook()
1167 nvme_qpair_reset(&ctrlr->adminq); in nvme_ctrlr_start_config_hook()
1168 nvme_admin_qpair_enable(&ctrlr->adminq); in nvme_ctrlr_start_config_hook()
1178 config_intrhook_disestablish(&ctrlr->config_hook); in nvme_ctrlr_start_config_hook()
1180 if (!ctrlr->is_failed) { in nvme_ctrlr_start_config_hook()
1181 ctrlr->is_initialized = true; in nvme_ctrlr_start_config_hook()
1203 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); in nvme_ctrlr_reset_task()
1214 nvme_qpair_process_completions(&ctrlr->adminq); in nvme_ctrlr_poll()
1216 for (i = 0; i < ctrlr->num_io_queues; i++) in nvme_ctrlr_poll()
1217 if (ctrlr->ioq && ctrlr->ioq[i].cpl) in nvme_ctrlr_poll()
1218 nvme_qpair_process_completions(&ctrlr->ioq[i]); in nvme_ctrlr_poll()
1222 * Poll the single-vector interrupt case: num_io_queues will be 1 and
1240 struct mtx *mtx = pt->driver_lock; in nvme_pt_done()
1243 bzero(&pt->cpl, sizeof(pt->cpl)); in nvme_pt_done()
1244 pt->cpl.cdw0 = cpl->cdw0; in nvme_pt_done()
1246 status = cpl->status; in nvme_pt_done()
1248 pt->cpl.status = status; in nvme_pt_done()
1251 pt->driver_lock = NULL; in nvme_pt_done()
1266 if (pt->len > 0) { in nvme_ctrlr_passthrough_cmd()
1267 if (pt->len > ctrlr->max_xfer_size) { in nvme_ctrlr_passthrough_cmd()
1268 nvme_printf(ctrlr, "pt->len (%d) " in nvme_ctrlr_passthrough_cmd()
1269 "exceeds max_xfer_size (%d)\n", pt->len, in nvme_ctrlr_passthrough_cmd()
1270 ctrlr->max_xfer_size); in nvme_ctrlr_passthrough_cmd()
1275 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE; in nvme_ctrlr_passthrough_cmd()
1276 if (vmapbuf(buf, pt->buf, pt->len, 1) < 0) { in nvme_ctrlr_passthrough_cmd()
1280 req = nvme_allocate_request_vaddr(buf->b_data, pt->len, in nvme_ctrlr_passthrough_cmd()
1283 req = nvme_allocate_request_vaddr(pt->buf, pt->len, in nvme_ctrlr_passthrough_cmd()
1288 /* Assume user space already converted to little-endian */ in nvme_ctrlr_passthrough_cmd()
1289 req->cmd.opc = pt->cmd.opc; in nvme_ctrlr_passthrough_cmd()
1290 req->cmd.fuse = pt->cmd.fuse; in nvme_ctrlr_passthrough_cmd()
1291 req->cmd.rsvd2 = pt->cmd.rsvd2; in nvme_ctrlr_passthrough_cmd()
1292 req->cmd.rsvd3 = pt->cmd.rsvd3; in nvme_ctrlr_passthrough_cmd()
1293 req->cmd.cdw10 = pt->cmd.cdw10; in nvme_ctrlr_passthrough_cmd()
1294 req->cmd.cdw11 = pt->cmd.cdw11; in nvme_ctrlr_passthrough_cmd()
1295 req->cmd.cdw12 = pt->cmd.cdw12; in nvme_ctrlr_passthrough_cmd()
1296 req->cmd.cdw13 = pt->cmd.cdw13; in nvme_ctrlr_passthrough_cmd()
1297 req->cmd.cdw14 = pt->cmd.cdw14; in nvme_ctrlr_passthrough_cmd()
1298 req->cmd.cdw15 = pt->cmd.cdw15; in nvme_ctrlr_passthrough_cmd()
1300 req->cmd.nsid = htole32(nsid); in nvme_ctrlr_passthrough_cmd()
1303 pt->driver_lock = mtx; in nvme_ctrlr_passthrough_cmd()
1311 while (pt->driver_lock != NULL) in nvme_ctrlr_passthrough_cmd()
1328 struct mtx *mtx = (void *)(uintptr_t)npc->metadata; in nvme_npc_done()
1330 npc->result = cpl->cdw0; /* cpl in host order by now */ in nvme_npc_done()
1332 npc->metadata = 0; in nvme_npc_done()
1351 if (npc->metadata != 0 || npc->metadata_len != 0) in nvme_ctrlr_linux_passthru_cmd()
1354 if (npc->data_len > 0 && npc->addr != 0) { in nvme_ctrlr_linux_passthru_cmd()
1355 if (npc->data_len > ctrlr->max_xfer_size) { in nvme_ctrlr_linux_passthru_cmd()
1357 "npc->data_len (%d) exceeds max_xfer_size (%d)\n", in nvme_ctrlr_linux_passthru_cmd()
1358 npc->data_len, ctrlr->max_xfer_size); in nvme_ctrlr_linux_passthru_cmd()
1362 if ((npc->opcode & 0x3) == 0 || (npc->opcode & 0x3) == 3) in nvme_ctrlr_linux_passthru_cmd()
1366 buf->b_iocmd = npc->opcode & 1 ? BIO_WRITE : BIO_READ; in nvme_ctrlr_linux_passthru_cmd()
1367 if (vmapbuf(buf, (void *)(uintptr_t)npc->addr, in nvme_ctrlr_linux_passthru_cmd()
1368 npc->data_len, 1) < 0) { in nvme_ctrlr_linux_passthru_cmd()
1372 req = nvme_allocate_request_vaddr(buf->b_data, in nvme_ctrlr_linux_passthru_cmd()
1373 npc->data_len, M_WAITOK, nvme_npc_done, npc); in nvme_ctrlr_linux_passthru_cmd()
1376 (void *)(uintptr_t)npc->addr, npc->data_len, in nvme_ctrlr_linux_passthru_cmd()
1381 req->cmd.opc = npc->opcode; in nvme_ctrlr_linux_passthru_cmd()
1382 req->cmd.fuse = npc->flags; in nvme_ctrlr_linux_passthru_cmd()
1383 req->cmd.rsvd2 = htole16(npc->cdw2); in nvme_ctrlr_linux_passthru_cmd()
1384 req->cmd.rsvd3 = htole16(npc->cdw3); in nvme_ctrlr_linux_passthru_cmd()
1385 req->cmd.cdw10 = htole32(npc->cdw10); in nvme_ctrlr_linux_passthru_cmd()
1386 req->cmd.cdw11 = htole32(npc->cdw11); in nvme_ctrlr_linux_passthru_cmd()
1387 req->cmd.cdw12 = htole32(npc->cdw12); in nvme_ctrlr_linux_passthru_cmd()
1388 req->cmd.cdw13 = htole32(npc->cdw13); in nvme_ctrlr_linux_passthru_cmd()
1389 req->cmd.cdw14 = htole32(npc->cdw14); in nvme_ctrlr_linux_passthru_cmd()
1390 req->cmd.cdw15 = htole32(npc->cdw15); in nvme_ctrlr_linux_passthru_cmd()
1392 req->cmd.nsid = htole32(nsid); in nvme_ctrlr_linux_passthru_cmd()
1395 npc->metadata = (uintptr_t) mtx; in nvme_ctrlr_linux_passthru_cmd()
1397 /* XXX no timeout passed down */ in nvme_ctrlr_linux_passthru_cmd()
1404 while (npc->metadata != 0) in nvme_ctrlr_linux_passthru_cmd()
1418 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag, in nvme_ctrlr_ioctl() argument
1424 ctrlr = cdev->si_drv1; in nvme_ctrlr_ioctl()
1426 switch (cmd) { in nvme_ctrlr_ioctl()
1433 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid), in nvme_ctrlr_ioctl()
1438 strlcpy(gnsid->cdev, device_get_nameunit(ctrlr->dev), in nvme_ctrlr_ioctl()
1439 sizeof(gnsid->cdev)); in nvme_ctrlr_ioctl()
1440 gnsid->nsid = 0; in nvme_ctrlr_ioctl()
1444 *(uint64_t *)arg = ctrlr->max_xfer_size; in nvme_ctrlr_ioctl()
1447 memcpy(arg, &ctrlr->cdata, sizeof(ctrlr->cdata)); in nvme_ctrlr_ioctl()
1451 td->td_retval[0] = 0xfffffffful; in nvme_ctrlr_ioctl()
1458 return (nvme_ctrlr_linux_passthru_cmd(ctrlr, npc, npc->nsid, true, in nvme_ctrlr_ioctl()
1459 cmd == NVME_IOCTL_ADMIN_CMD)); in nvme_ctrlr_ioctl()
1484 ctrlr->dev = dev; in nvme_ctrlr_construct()
1486 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF); in nvme_ctrlr_construct()
1487 if (bus_get_domain(dev, &ctrlr->domain) != 0) in nvme_ctrlr_construct()
1488 ctrlr->domain = 0; in nvme_ctrlr_construct()
1490 ctrlr->cap_lo = cap_lo = nvme_mmio_read_4(ctrlr, cap_lo); in nvme_ctrlr_construct()
1500 ctrlr->cap_hi = cap_hi = nvme_mmio_read_4(ctrlr, cap_hi); in nvme_ctrlr_construct()
1535 ctrlr->dstrd = NVME_CAP_HI_DSTRD(cap_hi) + 2; in nvme_ctrlr_construct()
1537 ctrlr->mps = NVME_CAP_HI_MPSMIN(cap_hi); in nvme_ctrlr_construct()
1538 ctrlr->page_size = 1 << (NVME_MPS_SHIFT + ctrlr->mps); in nvme_ctrlr_construct()
1540 /* Get ready timeout value from controller, in units of 500ms. */ in nvme_ctrlr_construct()
1542 ctrlr->ready_timeout_in_ms = to * 500; in nvme_ctrlr_construct()
1548 ctrlr->admin_timeout_period = timeout_period; in nvme_ctrlr_construct()
1554 ctrlr->timeout_period = timeout_period; in nvme_ctrlr_construct()
1559 ctrlr->enable_aborts = 0; in nvme_ctrlr_construct()
1560 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts); in nvme_ctrlr_construct()
1562 ctrlr->alignment_splits = counter_u64_alloc(M_WAITOK); in nvme_ctrlr_construct()
1564 /* Cap transfers by the maximum addressable by page-sized PRP (4KB pages -> 2MB). */ in nvme_ctrlr_construct()
1565 ctrlr->max_xfer_size = MIN(maxphys, (ctrlr->page_size / 8 * ctrlr->page_size)); in nvme_ctrlr_construct()
1580 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK, in nvme_ctrlr_construct()
1581 taskqueue_thread_enqueue, &ctrlr->taskqueue); in nvme_ctrlr_construct()
1582 taskqueue_start_threads(&ctrlr->taskqueue, 2, PI_DISK, "nvme taskq"); in nvme_ctrlr_construct()
1584 ctrlr->is_resetting = 0; in nvme_ctrlr_construct()
1585 ctrlr->is_initialized = false; in nvme_ctrlr_construct()
1586 ctrlr->notification_sent = 0; in nvme_ctrlr_construct()
1587 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr); in nvme_ctrlr_construct()
1588 STAILQ_INIT(&ctrlr->fail_req); in nvme_ctrlr_construct()
1589 ctrlr->is_failed = false; in nvme_ctrlr_construct()
1598 status = make_dev_s(&md_args, &ctrlr->cdev, "%s", in nvme_ctrlr_construct()
1618 ctrlr->is_dying = true; in nvme_ctrlr_destruct()
1620 if (ctrlr->resource == NULL) in nvme_ctrlr_destruct()
1622 if (!mtx_initialized(&ctrlr->adminq.lock)) in nvme_ctrlr_destruct()
1642 nvme_ns_destruct(&ctrlr->ns[i]); in nvme_ctrlr_destruct()
1644 if (ctrlr->cdev) in nvme_ctrlr_destruct()
1645 destroy_dev(ctrlr->cdev); in nvme_ctrlr_destruct()
1647 if (ctrlr->is_initialized) { in nvme_ctrlr_destruct()
1649 if (ctrlr->hmb_nchunks > 0) in nvme_ctrlr_destruct()
1655 if (ctrlr->ioq != NULL) { in nvme_ctrlr_destruct()
1656 for (i = 0; i < ctrlr->num_io_queues; i++) in nvme_ctrlr_destruct()
1657 nvme_io_qpair_destroy(&ctrlr->ioq[i]); in nvme_ctrlr_destruct()
1658 free(ctrlr->ioq, M_NVME); in nvme_ctrlr_destruct()
1660 nvme_admin_qpair_destroy(&ctrlr->adminq); in nvme_ctrlr_destruct()
1676 if (ctrlr->taskqueue) in nvme_ctrlr_destruct()
1677 taskqueue_free(ctrlr->taskqueue); in nvme_ctrlr_destruct()
1679 if (ctrlr->tag) in nvme_ctrlr_destruct()
1680 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); in nvme_ctrlr_destruct()
1682 if (ctrlr->res) in nvme_ctrlr_destruct()
1683 bus_release_resource(ctrlr->dev, SYS_RES_IRQ, in nvme_ctrlr_destruct()
1684 rman_get_rid(ctrlr->res), ctrlr->res); in nvme_ctrlr_destruct()
1686 if (ctrlr->bar4_resource != NULL) { in nvme_ctrlr_destruct()
1688 ctrlr->bar4_resource_id, ctrlr->bar4_resource); in nvme_ctrlr_destruct()
1692 ctrlr->resource_id, ctrlr->resource); in nvme_ctrlr_destruct()
1695 if (ctrlr->alignment_splits) in nvme_ctrlr_destruct()
1696 counter_u64_free(ctrlr->alignment_splits); in nvme_ctrlr_destruct()
1698 mtx_destroy(&ctrlr->lock); in nvme_ctrlr_destruct()
1706 int timeout; in nvme_ctrlr_shutdown() local
1713 timeout = ticks + (ctrlr->cdata.rtd3e == 0 ? 5 * hz : in nvme_ctrlr_shutdown()
1714 ((uint64_t)ctrlr->cdata.rtd3e * hz + 999999) / 1000000); in nvme_ctrlr_shutdown()
1721 if (timeout - ticks < 0) { in nvme_ctrlr_shutdown()
1722 nvme_printf(ctrlr, "shutdown timeout\n"); in nvme_ctrlr_shutdown()
1734 nvme_qpair_submit_request(&ctrlr->adminq, req); in nvme_ctrlr_submit_admin_request()
1743 qpair = &ctrlr->ioq[QP(ctrlr, curcpu)]; in nvme_ctrlr_submit_io_request()
1751 return (ctrlr->dev); in nvme_ctrlr_get_device()
1758 return (&ctrlr->cdata); in nvme_ctrlr_get_data()
1771 if (ctrlr->is_failed) in nvme_ctrlr_suspend()
1781 while (atomic_cmpset_32(&ctrlr->is_resetting, 0, 1) == 0 && to-- > 0) in nvme_ctrlr_suspend()
1789 if (ctrlr->hmb_nchunks > 0) in nvme_ctrlr_suspend()
1814 if (ctrlr->is_failed) in nvme_ctrlr_resume()
1827 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); in nvme_ctrlr_resume()
1838 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); in nvme_ctrlr_resume()