Lines Matching +full:hw +full:- +full:timeout +full:- +full:ms
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (C) 2012-2016 Intel Corporation
56 bus_barrier(ctrlr->resource, 0, rman_get_size(ctrlr->resource), flags); in nvme_ctrlr_barrier()
68 sbuf_printf(&sb, "name=\"%s\" ", device_get_nameunit(ctrlr->dev)); in nvme_ctrlr_devctl_va()
95 sbuf_printf(&sb, "%s: ", device_get_nameunit(ctrlr->dev)); in nvme_ctrlr_devctl_log()
115 qpair = &ctrlr->adminq; in nvme_ctrlr_construct_admin_qpair()
116 qpair->id = 0; in nvme_ctrlr_construct_admin_qpair()
117 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1; in nvme_ctrlr_construct_admin_qpair()
118 qpair->domain = ctrlr->domain; in nvme_ctrlr_construct_admin_qpair()
121 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries); in nvme_ctrlr_construct_admin_qpair()
128 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d " in nvme_ctrlr_construct_admin_qpair()
135 * max I/O xfer size. 16KB is sufficient here - maybe even less? in nvme_ctrlr_construct_admin_qpair()
142 #define QP(ctrlr, c) ((c) * (ctrlr)->num_io_queues / mp_ncpus)
159 * fail-safe. in nvme_ctrlr_construct_io_qpairs()
162 (rman_get_size(ctrlr->resource) - nvme_mmio_offsetof(doorbell[0])) / in nvme_ctrlr_construct_io_qpairs()
163 (1 << (ctrlr->dstrd + 1)); in nvme_ctrlr_construct_io_qpairs()
165 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries); in nvme_ctrlr_construct_io_qpairs()
172 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers); in nvme_ctrlr_construct_io_qpairs()
178 * also that for a queue size of N, we can only have (N-1) commands in nvme_ctrlr_construct_io_qpairs()
179 * outstanding, hence the "-1" here. in nvme_ctrlr_construct_io_qpairs()
181 num_trackers = min(num_trackers, (num_entries-1)); in nvme_ctrlr_construct_io_qpairs()
187 * of the storage system grows multi-queue support. in nvme_ctrlr_construct_io_qpairs()
189 ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4; in nvme_ctrlr_construct_io_qpairs()
191 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair), in nvme_ctrlr_construct_io_qpairs()
194 for (i = c = n = 0; i < ctrlr->num_io_queues; i++, c += n) { in nvme_ctrlr_construct_io_qpairs()
195 qpair = &ctrlr->ioq[i]; in nvme_ctrlr_construct_io_qpairs()
198 * Admin queue has ID=0. IO queues start at ID=1 - in nvme_ctrlr_construct_io_qpairs()
201 qpair->id = i + 1; in nvme_ctrlr_construct_io_qpairs()
202 if (ctrlr->num_io_queues > 1) { in nvme_ctrlr_construct_io_qpairs()
207 qpair->cpu = c + (device_get_unit(ctrlr->dev)+n/2) % n; in nvme_ctrlr_construct_io_qpairs()
208 qpair->domain = pcpu_find(qpair->cpu)->pc_domain; in nvme_ctrlr_construct_io_qpairs()
210 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1; in nvme_ctrlr_construct_io_qpairs()
211 qpair->domain = ctrlr->domain; in nvme_ctrlr_construct_io_qpairs()
215 * For I/O queues, use the controller-wide max_xfer_size in nvme_ctrlr_construct_io_qpairs()
227 if (ctrlr->num_io_queues > 1) in nvme_ctrlr_construct_io_qpairs()
228 bus_bind_intr(ctrlr->dev, qpair->res, qpair->cpu); in nvme_ctrlr_construct_io_qpairs()
244 ctrlr->is_failed = true; in nvme_ctrlr_fail()
246 ctrlr->is_failed_admin = true; in nvme_ctrlr_fail()
247 nvme_qpair_fail(&ctrlr->adminq); in nvme_ctrlr_fail()
249 if (ctrlr->ioq != NULL) { in nvme_ctrlr_fail()
250 for (i = 0; i < ctrlr->num_io_queues; i++) { in nvme_ctrlr_fail()
251 nvme_qpair_fail(&ctrlr->ioq[i]); in nvme_ctrlr_fail()
261 * capped at 1ms.
266 int timeout = ticks + MSEC_2_TICKS(ctrlr->ready_timeout_in_ms); in nvme_ctrlr_wait_for_ready() local
276 if (timeout - ticks < 0) { in nvme_ctrlr_wait_for_ready()
278 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms); in nvme_ctrlr_wait_for_ready()
310 /* Wait for RDY == 0 or timeout & fail */ in nvme_ctrlr_disable()
316 /* EN == 1, wait for RDY == 1 or timeout & fail */ in nvme_ctrlr_disable()
329 if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY) in nvme_ctrlr_disable()
359 /* EN == 0 already wait for RDY == 0 or timeout & fail */ in nvme_ctrlr_enable()
364 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr); in nvme_ctrlr_enable()
365 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr); in nvme_ctrlr_enable()
367 /* acqs and asqs are 0-based. */ in nvme_ctrlr_enable()
368 qsize = ctrlr->adminq.num_entries - 1; in nvme_ctrlr_enable()
389 cc |= NVMEF(NVME_CC_REG_MPS, ctrlr->mps); in nvme_ctrlr_enable()
402 nvme_admin_qpair_disable(&ctrlr->adminq); in nvme_ctrlr_disable_qpairs()
404 * I/O queues are not allocated before the initial HW in nvme_ctrlr_disable_qpairs()
406 * to determine if this is the initial HW reset. in nvme_ctrlr_disable_qpairs()
408 if (ctrlr->is_initialized) { in nvme_ctrlr_disable_qpairs()
409 for (i = 0; i < ctrlr->num_io_queues; i++) in nvme_ctrlr_disable_qpairs()
410 nvme_io_qpair_disable(&ctrlr->ioq[i]); in nvme_ctrlr_disable_qpairs()
421 ctrlr->is_failed_admin = true; in nvme_ctrlr_hw_reset()
431 ctrlr->is_failed_admin = false; in nvme_ctrlr_hw_reset()
442 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1); in nvme_ctrlr_reset()
451 if (!ctrlr->is_dying) in nvme_ctrlr_reset()
452 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task); in nvme_ctrlr_reset()
461 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata, in nvme_ctrlr_identify()
470 nvme_controller_data_swapbytes(&ctrlr->cdata); in nvme_ctrlr_identify()
476 if (ctrlr->cdata.mdts > 0) in nvme_ctrlr_identify()
477 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size, in nvme_ctrlr_identify()
478 1 << (ctrlr->cdata.mdts + NVME_MPS_SHIFT + in nvme_ctrlr_identify()
479 NVME_CAP_HI_MPSMIN(ctrlr->cap_hi))); in nvme_ctrlr_identify()
491 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues, in nvme_ctrlr_set_num_qpairs()
500 * Data in cdw0 is 0-based. in nvme_ctrlr_set_num_qpairs()
501 * Lower 16-bits indicate number of submission queues allocated. in nvme_ctrlr_set_num_qpairs()
502 * Upper 16-bits indicate number of completion queues allocated. in nvme_ctrlr_set_num_qpairs()
512 ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated); in nvme_ctrlr_set_num_qpairs()
513 ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated); in nvme_ctrlr_set_num_qpairs()
514 if (ctrlr->num_io_queues > vm_ndomains) in nvme_ctrlr_set_num_qpairs()
515 ctrlr->num_io_queues -= ctrlr->num_io_queues % vm_ndomains; in nvme_ctrlr_set_num_qpairs()
527 for (i = 0; i < ctrlr->num_io_queues; i++) { in nvme_ctrlr_create_qpairs()
528 qpair = &ctrlr->ioq[i]; in nvme_ctrlr_create_qpairs()
558 for (int i = 0; i < ctrlr->num_io_queues; i++) { in nvme_ctrlr_delete_qpairs()
559 qpair = &ctrlr->ioq[i]; in nvme_ctrlr_delete_qpairs()
589 for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) { in nvme_ctrlr_construct_namespaces()
590 ns = &ctrlr->ns[i]; in nvme_ctrlr_construct_namespaces()
624 (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE); in nvme_ctrlr_get_log_page_size()
702 memcpy(&aer->cpl, cpl, sizeof(*cpl)); in nvme_ctrlr_async_event_cb()
703 aer->log_page_id = NVMEV(NVME_ASYNC_EVENT_LOG_PAGE_ID, cpl->cdw0); in nvme_ctrlr_async_event_cb()
704 nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x," in nvme_ctrlr_async_event_cb()
705 " page 0x%02x)\n", NVMEV(NVME_ASYNC_EVENT_TYPE, cpl->cdw0), in nvme_ctrlr_async_event_cb()
706 NVMEV(NVME_ASYNC_EVENT_INFO, cpl->cdw0), in nvme_ctrlr_async_event_cb()
707 aer->log_page_id); in nvme_ctrlr_async_event_cb()
708 taskqueue_enqueue(aer->ctrlr->taskqueue, &aer->task); in nvme_ctrlr_async_event_cb()
724 if (ctrlr->is_resetting) in nvme_ctrlr_construct_and_submit_aer()
727 aer->ctrlr = ctrlr; in nvme_ctrlr_construct_and_submit_aer()
730 aer->req = req; in nvme_ctrlr_construct_and_submit_aer()
731 aer->log_page_id = 0; /* Not a valid page */ in nvme_ctrlr_construct_and_submit_aer()
734 * Disable timeout here, since asynchronous event requests should by in nvme_ctrlr_construct_and_submit_aer()
737 req->timeout = false; in nvme_ctrlr_construct_and_submit_aer()
738 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST; in nvme_ctrlr_construct_and_submit_aer()
749 ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE | in nvme_ctrlr_configure_aer()
753 if (ctrlr->cdata.ver >= NVME_REV(1, 2)) in nvme_ctrlr_configure_aer()
754 ctrlr->async_event_config |= in nvme_ctrlr_configure_aer()
755 ctrlr->cdata.oaes & (NVME_ASYNC_EVENT_NS_ATTRIBUTE | in nvme_ctrlr_configure_aer()
767 ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE; in nvme_ctrlr_configure_aer()
770 ctrlr->async_event_config, NULL, NULL); in nvme_ctrlr_configure_aer()
772 /* aerl is a zero-based value, so we need to add 1 here. */ in nvme_ctrlr_configure_aer()
773 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1)); in nvme_ctrlr_configure_aer()
775 for (i = 0; i < ctrlr->num_aers; i++) { in nvme_ctrlr_configure_aer()
776 aer = &ctrlr->aer[i]; in nvme_ctrlr_configure_aer()
785 ctrlr->int_coal_time = 0; in nvme_ctrlr_configure_int_coalescing()
786 TUNABLE_INT_FETCH("hw.nvme.int_coal_time", in nvme_ctrlr_configure_int_coalescing()
787 &ctrlr->int_coal_time); in nvme_ctrlr_configure_int_coalescing()
789 ctrlr->int_coal_threshold = 0; in nvme_ctrlr_configure_int_coalescing()
790 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold", in nvme_ctrlr_configure_int_coalescing()
791 &ctrlr->int_coal_threshold); in nvme_ctrlr_configure_int_coalescing()
793 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time, in nvme_ctrlr_configure_int_coalescing()
794 ctrlr->int_coal_threshold, NULL, NULL); in nvme_ctrlr_configure_int_coalescing()
803 if (ctrlr->hmb_desc_paddr) { in nvme_ctrlr_hmb_free()
804 bus_dmamap_unload(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map); in nvme_ctrlr_hmb_free()
805 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr, in nvme_ctrlr_hmb_free()
806 ctrlr->hmb_desc_map); in nvme_ctrlr_hmb_free()
807 ctrlr->hmb_desc_paddr = 0; in nvme_ctrlr_hmb_free()
809 if (ctrlr->hmb_desc_tag) { in nvme_ctrlr_hmb_free()
810 bus_dma_tag_destroy(ctrlr->hmb_desc_tag); in nvme_ctrlr_hmb_free()
811 ctrlr->hmb_desc_tag = NULL; in nvme_ctrlr_hmb_free()
813 for (i = 0; i < ctrlr->hmb_nchunks; i++) { in nvme_ctrlr_hmb_free()
814 hmbc = &ctrlr->hmb_chunks[i]; in nvme_ctrlr_hmb_free()
815 bus_dmamap_unload(ctrlr->hmb_tag, hmbc->hmbc_map); in nvme_ctrlr_hmb_free()
816 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr, in nvme_ctrlr_hmb_free()
817 hmbc->hmbc_map); in nvme_ctrlr_hmb_free()
819 ctrlr->hmb_nchunks = 0; in nvme_ctrlr_hmb_free()
820 if (ctrlr->hmb_tag) { in nvme_ctrlr_hmb_free()
821 bus_dma_tag_destroy(ctrlr->hmb_tag); in nvme_ctrlr_hmb_free()
822 ctrlr->hmb_tag = NULL; in nvme_ctrlr_hmb_free()
824 if (ctrlr->hmb_chunks) { in nvme_ctrlr_hmb_free()
825 free(ctrlr->hmb_chunks, M_NVME); in nvme_ctrlr_hmb_free()
826 ctrlr->hmb_chunks = NULL; in nvme_ctrlr_hmb_free()
840 TUNABLE_UINT64_FETCH("hw.nvme.hmb_max", &max); in nvme_ctrlr_hmb_alloc()
846 min = (long long unsigned)ctrlr->cdata.hmmin * NVME_HMB_UNITS; in nvme_ctrlr_hmb_alloc()
849 pref = MIN((long long unsigned)ctrlr->cdata.hmpre * NVME_HMB_UNITS, max); in nvme_ctrlr_hmb_alloc()
850 minc = MAX(ctrlr->cdata.hmminds * NVME_HMB_UNITS, ctrlr->page_size); in nvme_ctrlr_hmb_alloc()
851 if (min > 0 && ctrlr->cdata.hmmaxd > 0) in nvme_ctrlr_hmb_alloc()
852 minc = MAX(minc, min / ctrlr->cdata.hmmaxd); in nvme_ctrlr_hmb_alloc()
853 ctrlr->hmb_chunk = pref; in nvme_ctrlr_hmb_alloc()
858 * are all based on the current MPS (ctrlr->page_size). in nvme_ctrlr_hmb_alloc()
860 ctrlr->hmb_chunk = roundup2(ctrlr->hmb_chunk, ctrlr->page_size); in nvme_ctrlr_hmb_alloc()
861 ctrlr->hmb_nchunks = howmany(pref, ctrlr->hmb_chunk); in nvme_ctrlr_hmb_alloc()
862 if (ctrlr->cdata.hmmaxd > 0 && ctrlr->hmb_nchunks > ctrlr->cdata.hmmaxd) in nvme_ctrlr_hmb_alloc()
863 ctrlr->hmb_nchunks = ctrlr->cdata.hmmaxd; in nvme_ctrlr_hmb_alloc()
864 ctrlr->hmb_chunks = malloc(sizeof(struct nvme_hmb_chunk) * in nvme_ctrlr_hmb_alloc()
865 ctrlr->hmb_nchunks, M_NVME, M_WAITOK); in nvme_ctrlr_hmb_alloc()
866 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), in nvme_ctrlr_hmb_alloc()
867 ctrlr->page_size, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, in nvme_ctrlr_hmb_alloc()
868 ctrlr->hmb_chunk, 1, ctrlr->hmb_chunk, 0, NULL, NULL, &ctrlr->hmb_tag); in nvme_ctrlr_hmb_alloc()
875 for (i = 0; i < ctrlr->hmb_nchunks; i++) { in nvme_ctrlr_hmb_alloc()
876 hmbc = &ctrlr->hmb_chunks[i]; in nvme_ctrlr_hmb_alloc()
877 if (bus_dmamem_alloc(ctrlr->hmb_tag, in nvme_ctrlr_hmb_alloc()
878 (void **)&hmbc->hmbc_vaddr, BUS_DMA_NOWAIT, in nvme_ctrlr_hmb_alloc()
879 &hmbc->hmbc_map)) { in nvme_ctrlr_hmb_alloc()
883 if (bus_dmamap_load(ctrlr->hmb_tag, hmbc->hmbc_map, in nvme_ctrlr_hmb_alloc()
884 hmbc->hmbc_vaddr, ctrlr->hmb_chunk, nvme_single_map, in nvme_ctrlr_hmb_alloc()
885 &hmbc->hmbc_paddr, BUS_DMA_NOWAIT) != 0) { in nvme_ctrlr_hmb_alloc()
886 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr, in nvme_ctrlr_hmb_alloc()
887 hmbc->hmbc_map); in nvme_ctrlr_hmb_alloc()
891 bus_dmamap_sync(ctrlr->hmb_tag, hmbc->hmbc_map, in nvme_ctrlr_hmb_alloc()
895 if (i < ctrlr->hmb_nchunks && i * ctrlr->hmb_chunk < min && in nvme_ctrlr_hmb_alloc()
896 ctrlr->hmb_chunk / 2 >= minc) { in nvme_ctrlr_hmb_alloc()
897 ctrlr->hmb_nchunks = i; in nvme_ctrlr_hmb_alloc()
899 ctrlr->hmb_chunk /= 2; in nvme_ctrlr_hmb_alloc()
902 ctrlr->hmb_nchunks = i; in nvme_ctrlr_hmb_alloc()
903 if (ctrlr->hmb_nchunks * ctrlr->hmb_chunk < min) { in nvme_ctrlr_hmb_alloc()
908 size = sizeof(struct nvme_hmb_desc) * ctrlr->hmb_nchunks; in nvme_ctrlr_hmb_alloc()
909 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), in nvme_ctrlr_hmb_alloc()
911 size, 1, size, 0, NULL, NULL, &ctrlr->hmb_desc_tag); in nvme_ctrlr_hmb_alloc()
917 if (bus_dmamem_alloc(ctrlr->hmb_desc_tag, in nvme_ctrlr_hmb_alloc()
918 (void **)&ctrlr->hmb_desc_vaddr, BUS_DMA_WAITOK, in nvme_ctrlr_hmb_alloc()
919 &ctrlr->hmb_desc_map)) { in nvme_ctrlr_hmb_alloc()
924 if (bus_dmamap_load(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map, in nvme_ctrlr_hmb_alloc()
925 ctrlr->hmb_desc_vaddr, size, nvme_single_map, in nvme_ctrlr_hmb_alloc()
926 &ctrlr->hmb_desc_paddr, BUS_DMA_NOWAIT) != 0) { in nvme_ctrlr_hmb_alloc()
927 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr, in nvme_ctrlr_hmb_alloc()
928 ctrlr->hmb_desc_map); in nvme_ctrlr_hmb_alloc()
934 for (i = 0; i < ctrlr->hmb_nchunks; i++) { in nvme_ctrlr_hmb_alloc()
935 memset(&ctrlr->hmb_desc_vaddr[i], 0, in nvme_ctrlr_hmb_alloc()
937 ctrlr->hmb_desc_vaddr[i].addr = in nvme_ctrlr_hmb_alloc()
938 htole64(ctrlr->hmb_chunks[i].hmbc_paddr); in nvme_ctrlr_hmb_alloc()
939 ctrlr->hmb_desc_vaddr[i].size = htole32(ctrlr->hmb_chunk / ctrlr->page_size); in nvme_ctrlr_hmb_alloc()
941 bus_dmamap_sync(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map, in nvme_ctrlr_hmb_alloc()
945 (long long unsigned)ctrlr->hmb_nchunks * ctrlr->hmb_chunk in nvme_ctrlr_hmb_alloc()
962 ctrlr->hmb_nchunks * ctrlr->hmb_chunk / ctrlr->page_size, in nvme_ctrlr_hmb_enable()
963 ctrlr->hmb_desc_paddr, ctrlr->hmb_desc_paddr >> 32, in nvme_ctrlr_hmb_enable()
964 ctrlr->hmb_nchunks, NULL, 0, in nvme_ctrlr_hmb_enable()
988 nvme_qpair_reset(&ctrlr->adminq); in nvme_ctrlr_start()
989 nvme_admin_qpair_enable(&ctrlr->adminq); in nvme_ctrlr_start()
992 if (ctrlr->ioq != NULL) { in nvme_ctrlr_start()
993 for (i = 0; i < ctrlr->num_io_queues; i++) in nvme_ctrlr_start()
994 nvme_qpair_reset(&ctrlr->ioq[i]); in nvme_ctrlr_start()
998 * If it was a reset on initialization command timeout, just in nvme_ctrlr_start()
1001 if (resetting && !ctrlr->is_initialized) in nvme_ctrlr_start()
1012 * HW limit. We call SET_FEATURES again here so that it gets called in nvme_ctrlr_start()
1018 old_num_io_queues = ctrlr->num_io_queues; in nvme_ctrlr_start()
1024 if (old_num_io_queues != ctrlr->num_io_queues) { in nvme_ctrlr_start()
1026 old_num_io_queues, ctrlr->num_io_queues); in nvme_ctrlr_start()
1030 if (ctrlr->cdata.hmpre > 0 && ctrlr->hmb_nchunks == 0) { in nvme_ctrlr_start()
1032 if (ctrlr->hmb_nchunks > 0) in nvme_ctrlr_start()
1034 } else if (ctrlr->hmb_nchunks > 0) in nvme_ctrlr_start()
1050 for (i = 0; i < ctrlr->num_io_queues; i++) in nvme_ctrlr_start()
1051 nvme_io_qpair_enable(&ctrlr->ioq[i]); in nvme_ctrlr_start()
1062 if (nvme_ctrlr_hw_reset(ctrlr) != 0 || ctrlr->fail_on_reset != 0) { in nvme_ctrlr_start_config_hook()
1064 config_intrhook_disestablish(&ctrlr->config_hook); in nvme_ctrlr_start_config_hook()
1068 nvme_qpair_reset(&ctrlr->adminq); in nvme_ctrlr_start_config_hook()
1069 nvme_admin_qpair_enable(&ctrlr->adminq); in nvme_ctrlr_start_config_hook()
1079 config_intrhook_disestablish(&ctrlr->config_hook); in nvme_ctrlr_start_config_hook()
1081 if (!ctrlr->is_failed) { in nvme_ctrlr_start_config_hook()
1082 ctrlr->is_initialized = true; in nvme_ctrlr_start_config_hook()
1104 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); in nvme_ctrlr_reset_task()
1112 mtx_lock(&aer->mtx); in nvme_ctrlr_aer_done()
1114 aer->log_page_size = (uint32_t)-1; in nvme_ctrlr_aer_done()
1116 aer->log_page_size = nvme_ctrlr_get_log_page_size( in nvme_ctrlr_aer_done()
1117 aer->ctrlr, aer->log_page_id); in nvme_ctrlr_aer_done()
1119 mtx_unlock(&aer->mtx); in nvme_ctrlr_aer_done()
1126 struct nvme_controller *ctrlr = aer->ctrlr; in nvme_ctrlr_aer_task()
1132 if (ctrlr->is_resetting) in nvme_ctrlr_aer_task()
1135 if (!is_log_page_id_valid(aer->log_page_id)) { in nvme_ctrlr_aer_task()
1140 nvme_notify_async_consumers(ctrlr, &aer->cpl, aer->log_page_id, in nvme_ctrlr_aer_task()
1146 aer->log_page_size = 0; in nvme_ctrlr_aer_task()
1147 len = nvme_ctrlr_get_log_page_size(aer->ctrlr, aer->log_page_id); in nvme_ctrlr_aer_task()
1148 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id, in nvme_ctrlr_aer_task()
1149 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer, len, in nvme_ctrlr_aer_task()
1151 mtx_lock(&aer->mtx); in nvme_ctrlr_aer_task()
1152 while (aer->log_page_size == 0) in nvme_ctrlr_aer_task()
1153 mtx_sleep(aer, &aer->mtx, PRIBIO, "nvme_pt", 0); in nvme_ctrlr_aer_task()
1154 mtx_unlock(&aer->mtx); in nvme_ctrlr_aer_task()
1156 if (aer->log_page_size != (uint32_t)-1) { in nvme_ctrlr_aer_task()
1162 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, in nvme_ctrlr_aer_task()
1163 aer->log_page_id, NULL, 0); in nvme_ctrlr_aer_task()
1168 switch (aer->log_page_id) { in nvme_ctrlr_aer_task()
1171 (struct nvme_error_information_entry *)aer->log_page_buffer; in nvme_ctrlr_aer_task()
1172 for (int i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++) in nvme_ctrlr_aer_task()
1178 (struct nvme_health_information_page *)aer->log_page_buffer); in nvme_ctrlr_aer_task()
1182 (struct nvme_ns_list *)aer->log_page_buffer); in nvme_ctrlr_aer_task()
1186 (struct nvme_command_effects_page *)aer->log_page_buffer); in nvme_ctrlr_aer_task()
1190 (struct nvme_res_notification_page *)aer->log_page_buffer); in nvme_ctrlr_aer_task()
1194 (struct nvme_sanitize_status_page *)aer->log_page_buffer); in nvme_ctrlr_aer_task()
1200 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) { in nvme_ctrlr_aer_task()
1202 (struct nvme_health_information_page *)aer->log_page_buffer; in nvme_ctrlr_aer_task()
1210 nvme_ctrlr_log_critical_warnings(aer->ctrlr, in nvme_ctrlr_aer_task()
1211 health_info->critical_warning); in nvme_ctrlr_aer_task()
1212 aer->ctrlr->async_event_config &= in nvme_ctrlr_aer_task()
1213 ~health_info->critical_warning; in nvme_ctrlr_aer_task()
1214 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr, in nvme_ctrlr_aer_task()
1215 aer->ctrlr->async_event_config, NULL, NULL); in nvme_ctrlr_aer_task()
1216 } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE) { in nvme_ctrlr_aer_task()
1218 (struct nvme_ns_list *)aer->log_page_buffer; in nvme_ctrlr_aer_task()
1219 for (int i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) { in nvme_ctrlr_aer_task()
1220 if (nsl->ns[i] > NVME_MAX_NAMESPACES) in nvme_ctrlr_aer_task()
1222 nvme_notify_ns(aer->ctrlr, nsl->ns[i]); in nvme_ctrlr_aer_task()
1230 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, in nvme_ctrlr_aer_task()
1231 aer->log_page_id, aer->log_page_buffer, aer->log_page_size); in nvme_ctrlr_aer_task()
1249 nvme_qpair_process_completions(&ctrlr->adminq); in nvme_ctrlr_poll()
1251 for (i = 0; i < ctrlr->num_io_queues; i++) in nvme_ctrlr_poll()
1252 if (ctrlr->ioq && ctrlr->ioq[i].cpl) in nvme_ctrlr_poll()
1253 nvme_qpair_process_completions(&ctrlr->ioq[i]); in nvme_ctrlr_poll()
1257 * Poll the single-vector interrupt case: num_io_queues will be 1 and
1275 struct mtx *mtx = pt->driver_lock; in nvme_pt_done()
1278 bzero(&pt->cpl, sizeof(pt->cpl)); in nvme_pt_done()
1279 pt->cpl.cdw0 = cpl->cdw0; in nvme_pt_done()
1281 status = cpl->status; in nvme_pt_done()
1283 pt->cpl.status = status; in nvme_pt_done()
1286 pt->driver_lock = NULL; in nvme_pt_done()
1301 if (pt->len > 0) { in nvme_ctrlr_passthrough_cmd()
1302 if (pt->len > ctrlr->max_xfer_size) { in nvme_ctrlr_passthrough_cmd()
1303 nvme_printf(ctrlr, "pt->len (%d) " in nvme_ctrlr_passthrough_cmd()
1304 "exceeds max_xfer_size (%d)\n", pt->len, in nvme_ctrlr_passthrough_cmd()
1305 ctrlr->max_xfer_size); in nvme_ctrlr_passthrough_cmd()
1310 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE; in nvme_ctrlr_passthrough_cmd()
1311 if (vmapbuf(buf, pt->buf, pt->len, 1) < 0) { in nvme_ctrlr_passthrough_cmd()
1315 req = nvme_allocate_request_vaddr(buf->b_data, pt->len, in nvme_ctrlr_passthrough_cmd()
1318 req = nvme_allocate_request_vaddr(pt->buf, pt->len, in nvme_ctrlr_passthrough_cmd()
1323 /* Assume user space already converted to little-endian */ in nvme_ctrlr_passthrough_cmd()
1324 req->cmd.opc = pt->cmd.opc; in nvme_ctrlr_passthrough_cmd()
1325 req->cmd.fuse = pt->cmd.fuse; in nvme_ctrlr_passthrough_cmd()
1326 req->cmd.rsvd2 = pt->cmd.rsvd2; in nvme_ctrlr_passthrough_cmd()
1327 req->cmd.rsvd3 = pt->cmd.rsvd3; in nvme_ctrlr_passthrough_cmd()
1328 req->cmd.cdw10 = pt->cmd.cdw10; in nvme_ctrlr_passthrough_cmd()
1329 req->cmd.cdw11 = pt->cmd.cdw11; in nvme_ctrlr_passthrough_cmd()
1330 req->cmd.cdw12 = pt->cmd.cdw12; in nvme_ctrlr_passthrough_cmd()
1331 req->cmd.cdw13 = pt->cmd.cdw13; in nvme_ctrlr_passthrough_cmd()
1332 req->cmd.cdw14 = pt->cmd.cdw14; in nvme_ctrlr_passthrough_cmd()
1333 req->cmd.cdw15 = pt->cmd.cdw15; in nvme_ctrlr_passthrough_cmd()
1335 req->cmd.nsid = htole32(nsid); in nvme_ctrlr_passthrough_cmd()
1338 pt->driver_lock = mtx; in nvme_ctrlr_passthrough_cmd()
1346 while (pt->driver_lock != NULL) in nvme_ctrlr_passthrough_cmd()
1363 struct mtx *mtx = (void *)(uintptr_t)npc->metadata; in nvme_npc_done()
1365 npc->result = cpl->cdw0; /* cpl in host order by now */ in nvme_npc_done()
1367 npc->metadata = 0; in nvme_npc_done()
1386 if (npc->metadata != 0 || npc->metadata_len != 0) in nvme_ctrlr_linux_passthru_cmd()
1389 if (npc->data_len > 0 && npc->addr != 0) { in nvme_ctrlr_linux_passthru_cmd()
1390 if (npc->data_len > ctrlr->max_xfer_size) { in nvme_ctrlr_linux_passthru_cmd()
1392 "npc->data_len (%d) exceeds max_xfer_size (%d)\n", in nvme_ctrlr_linux_passthru_cmd()
1393 npc->data_len, ctrlr->max_xfer_size); in nvme_ctrlr_linux_passthru_cmd()
1402 if ((npc->opcode & 0x3) == 3) in nvme_ctrlr_linux_passthru_cmd()
1406 buf->b_iocmd = npc->opcode & 1 ? BIO_WRITE : BIO_READ; in nvme_ctrlr_linux_passthru_cmd()
1407 if (vmapbuf(buf, (void *)(uintptr_t)npc->addr, in nvme_ctrlr_linux_passthru_cmd()
1408 npc->data_len, 1) < 0) { in nvme_ctrlr_linux_passthru_cmd()
1412 req = nvme_allocate_request_vaddr(buf->b_data, in nvme_ctrlr_linux_passthru_cmd()
1413 npc->data_len, M_WAITOK, nvme_npc_done, npc); in nvme_ctrlr_linux_passthru_cmd()
1416 (void *)(uintptr_t)npc->addr, npc->data_len, in nvme_ctrlr_linux_passthru_cmd()
1421 req->cmd.opc = npc->opcode; in nvme_ctrlr_linux_passthru_cmd()
1422 req->cmd.fuse = npc->flags; in nvme_ctrlr_linux_passthru_cmd()
1423 req->cmd.rsvd2 = htole16(npc->cdw2); in nvme_ctrlr_linux_passthru_cmd()
1424 req->cmd.rsvd3 = htole16(npc->cdw3); in nvme_ctrlr_linux_passthru_cmd()
1425 req->cmd.cdw10 = htole32(npc->cdw10); in nvme_ctrlr_linux_passthru_cmd()
1426 req->cmd.cdw11 = htole32(npc->cdw11); in nvme_ctrlr_linux_passthru_cmd()
1427 req->cmd.cdw12 = htole32(npc->cdw12); in nvme_ctrlr_linux_passthru_cmd()
1428 req->cmd.cdw13 = htole32(npc->cdw13); in nvme_ctrlr_linux_passthru_cmd()
1429 req->cmd.cdw14 = htole32(npc->cdw14); in nvme_ctrlr_linux_passthru_cmd()
1430 req->cmd.cdw15 = htole32(npc->cdw15); in nvme_ctrlr_linux_passthru_cmd()
1432 req->cmd.nsid = htole32(nsid); in nvme_ctrlr_linux_passthru_cmd()
1435 npc->metadata = (uintptr_t) mtx; in nvme_ctrlr_linux_passthru_cmd()
1437 /* XXX no timeout passed down */ in nvme_ctrlr_linux_passthru_cmd()
1444 while (npc->metadata != 0) in nvme_ctrlr_linux_passthru_cmd()
1464 ctrlr = cdev->si_drv1; in nvme_ctrlr_ioctl()
1473 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid), in nvme_ctrlr_ioctl()
1478 strlcpy(gnsid->cdev, device_get_nameunit(ctrlr->dev), in nvme_ctrlr_ioctl()
1479 sizeof(gnsid->cdev)); in nvme_ctrlr_ioctl()
1480 gnsid->nsid = 0; in nvme_ctrlr_ioctl()
1484 *(uint64_t *)arg = ctrlr->max_xfer_size; in nvme_ctrlr_ioctl()
1487 memcpy(arg, &ctrlr->cdata, sizeof(ctrlr->cdata)); in nvme_ctrlr_ioctl()
1491 td->td_retval[0] = 0xfffffffful; in nvme_ctrlr_ioctl()
1498 return (nvme_ctrlr_linux_passthru_cmd(ctrlr, npc, npc->nsid, true, in nvme_ctrlr_ioctl()
1524 ctrlr->dev = dev; in nvme_ctrlr_construct()
1526 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF); in nvme_ctrlr_construct()
1527 if (bus_get_domain(dev, &ctrlr->domain) != 0) in nvme_ctrlr_construct()
1528 ctrlr->domain = 0; in nvme_ctrlr_construct()
1530 ctrlr->cap_lo = cap_lo = nvme_mmio_read_4(ctrlr, cap_lo); in nvme_ctrlr_construct()
1540 ctrlr->cap_hi = cap_hi = nvme_mmio_read_4(ctrlr, cap_hi); in nvme_ctrlr_construct()
1575 ctrlr->dstrd = NVME_CAP_HI_DSTRD(cap_hi) + 2; in nvme_ctrlr_construct()
1577 ctrlr->mps = NVME_CAP_HI_MPSMIN(cap_hi); in nvme_ctrlr_construct()
1578 ctrlr->page_size = 1 << (NVME_MPS_SHIFT + ctrlr->mps); in nvme_ctrlr_construct()
1580 /* Get ready timeout value from controller, in units of 500ms. */ in nvme_ctrlr_construct()
1582 ctrlr->ready_timeout_in_ms = to * 500; in nvme_ctrlr_construct()
1585 TUNABLE_INT_FETCH("hw.nvme.admin_timeout_period", &timeout_period); in nvme_ctrlr_construct()
1588 ctrlr->admin_timeout_period = timeout_period; in nvme_ctrlr_construct()
1591 TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period); in nvme_ctrlr_construct()
1594 ctrlr->timeout_period = timeout_period; in nvme_ctrlr_construct()
1597 TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count); in nvme_ctrlr_construct()
1599 ctrlr->enable_aborts = 0; in nvme_ctrlr_construct()
1600 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts); in nvme_ctrlr_construct()
1602 ctrlr->alignment_splits = counter_u64_alloc(M_WAITOK); in nvme_ctrlr_construct()
1604 /* Cap transfers by the maximum addressable by page-sized PRP (4KB pages -> 2MB). */ in nvme_ctrlr_construct()
1605 ctrlr->max_xfer_size = MIN(maxphys, (ctrlr->page_size / 8 * ctrlr->page_size)); in nvme_ctrlr_construct()
1615 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK, in nvme_ctrlr_construct()
1616 taskqueue_thread_enqueue, &ctrlr->taskqueue); in nvme_ctrlr_construct()
1617 taskqueue_start_threads(&ctrlr->taskqueue, 2, PI_DISK, "nvme taskq"); in nvme_ctrlr_construct()
1619 ctrlr->is_resetting = 0; in nvme_ctrlr_construct()
1620 ctrlr->is_initialized = false; in nvme_ctrlr_construct()
1621 ctrlr->notification_sent = 0; in nvme_ctrlr_construct()
1622 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr); in nvme_ctrlr_construct()
1624 struct nvme_async_event_request *aer = &ctrlr->aer[i]; in nvme_ctrlr_construct()
1626 TASK_INIT(&aer->task, 0, nvme_ctrlr_aer_task, aer); in nvme_ctrlr_construct()
1627 mtx_init(&aer->mtx, "AER mutex", NULL, MTX_DEF); in nvme_ctrlr_construct()
1629 ctrlr->is_failed = false; in nvme_ctrlr_construct()
1638 status = make_dev_s(&md_args, &ctrlr->cdev, "%s", in nvme_ctrlr_construct()
1658 ctrlr->is_dying = true; in nvme_ctrlr_destruct()
1660 if (ctrlr->resource == NULL) in nvme_ctrlr_destruct()
1662 if (!mtx_initialized(&ctrlr->adminq.lock)) in nvme_ctrlr_destruct()
1682 nvme_ns_destruct(&ctrlr->ns[i]); in nvme_ctrlr_destruct()
1684 if (ctrlr->cdev) in nvme_ctrlr_destruct()
1685 destroy_dev(ctrlr->cdev); in nvme_ctrlr_destruct()
1687 if (ctrlr->is_initialized) { in nvme_ctrlr_destruct()
1689 if (ctrlr->hmb_nchunks > 0) in nvme_ctrlr_destruct()
1695 if (ctrlr->ioq != NULL) { in nvme_ctrlr_destruct()
1696 for (i = 0; i < ctrlr->num_io_queues; i++) in nvme_ctrlr_destruct()
1697 nvme_io_qpair_destroy(&ctrlr->ioq[i]); in nvme_ctrlr_destruct()
1698 free(ctrlr->ioq, M_NVME); in nvme_ctrlr_destruct()
1700 nvme_admin_qpair_destroy(&ctrlr->adminq); in nvme_ctrlr_destruct()
1716 if (ctrlr->taskqueue) { in nvme_ctrlr_destruct()
1717 taskqueue_free(ctrlr->taskqueue); in nvme_ctrlr_destruct()
1719 struct nvme_async_event_request *aer = &ctrlr->aer[i]; in nvme_ctrlr_destruct()
1721 mtx_destroy(&aer->mtx); in nvme_ctrlr_destruct()
1725 if (ctrlr->tag) in nvme_ctrlr_destruct()
1726 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); in nvme_ctrlr_destruct()
1728 if (ctrlr->res) in nvme_ctrlr_destruct()
1729 bus_release_resource(ctrlr->dev, SYS_RES_IRQ, in nvme_ctrlr_destruct()
1730 rman_get_rid(ctrlr->res), ctrlr->res); in nvme_ctrlr_destruct()
1732 if (ctrlr->bar4_resource != NULL) { in nvme_ctrlr_destruct()
1734 ctrlr->bar4_resource_id, ctrlr->bar4_resource); in nvme_ctrlr_destruct()
1738 ctrlr->resource_id, ctrlr->resource); in nvme_ctrlr_destruct()
1741 if (ctrlr->alignment_splits) in nvme_ctrlr_destruct()
1742 counter_u64_free(ctrlr->alignment_splits); in nvme_ctrlr_destruct()
1744 mtx_destroy(&ctrlr->lock); in nvme_ctrlr_destruct()
1752 int timeout; in nvme_ctrlr_shutdown() local
1759 timeout = ticks + (ctrlr->cdata.rtd3e == 0 ? 5 * hz : in nvme_ctrlr_shutdown()
1760 ((uint64_t)ctrlr->cdata.rtd3e * hz + 999999) / 1000000); in nvme_ctrlr_shutdown()
1767 if (timeout - ticks < 0) { in nvme_ctrlr_shutdown()
1768 nvme_printf(ctrlr, "shutdown timeout\n"); in nvme_ctrlr_shutdown()
1780 nvme_qpair_submit_request(&ctrlr->adminq, req); in nvme_ctrlr_submit_admin_request()
1789 qpair = &ctrlr->ioq[QP(ctrlr, curcpu)]; in nvme_ctrlr_submit_io_request()
1797 return (ctrlr->dev); in nvme_ctrlr_get_device()
1804 return (&ctrlr->cdata); in nvme_ctrlr_get_data()
1817 if (ctrlr->is_failed) in nvme_ctrlr_suspend()
1827 while (atomic_cmpset_32(&ctrlr->is_resetting, 0, 1) == 0 && to-- > 0) in nvme_ctrlr_suspend()
1835 if (ctrlr->hmb_nchunks > 0) in nvme_ctrlr_suspend()
1860 if (ctrlr->is_failed) in nvme_ctrlr_resume()
1873 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); in nvme_ctrlr_resume()
1884 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); in nvme_ctrlr_resume()