Lines Matching +full:freeze +full:- +full:bridge +full:- +full:controller

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (C) 2012-2016 Intel Corporation
52 #define B4_CHK_RDY_DELAY_MS 2300 /* work around controller bug */
60 bus_barrier(ctrlr->resource, 0, rman_get_size(ctrlr->resource), flags); in nvme_ctrlr_barrier()
72 sbuf_printf(&sb, "name=\"%s\" ", device_get_nameunit(ctrlr->dev)); in nvme_ctrlr_devctl_va()
76 devctl_notify("nvme", "controller", type, sbuf_data(&sb)); in nvme_ctrlr_devctl_va()
99 sbuf_printf(&sb, "%s: ", device_get_nameunit(ctrlr->dev)); in nvme_ctrlr_devctl_log()
119 qpair = &ctrlr->adminq; in nvme_ctrlr_construct_admin_qpair()
120 qpair->id = 0; in nvme_ctrlr_construct_admin_qpair()
121 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1; in nvme_ctrlr_construct_admin_qpair()
122 qpair->domain = ctrlr->domain; in nvme_ctrlr_construct_admin_qpair()
139 * max I/O xfer size. 16KB is sufficient here - maybe even less? in nvme_ctrlr_construct_admin_qpair()
146 #define QP(ctrlr, c) ((c) * (ctrlr)->num_io_queues / mp_ncpus)
163 * fail-safe. in nvme_ctrlr_construct_io_qpairs()
166 (rman_get_size(ctrlr->resource) - nvme_mmio_offsetof(doorbell[0])) / in nvme_ctrlr_construct_io_qpairs()
167 (1 << (ctrlr->dstrd + 1)); in nvme_ctrlr_construct_io_qpairs()
182 * also that for a queue size of N, we can only have (N-1) commands in nvme_ctrlr_construct_io_qpairs()
183 * outstanding, hence the "-1" here. in nvme_ctrlr_construct_io_qpairs()
185 num_trackers = min(num_trackers, (num_entries-1)); in nvme_ctrlr_construct_io_qpairs()
191 * of the storage system grows multi-queue support. in nvme_ctrlr_construct_io_qpairs()
193 ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4; in nvme_ctrlr_construct_io_qpairs()
195 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair), in nvme_ctrlr_construct_io_qpairs()
198 for (i = c = n = 0; i < ctrlr->num_io_queues; i++, c += n) { in nvme_ctrlr_construct_io_qpairs()
199 qpair = &ctrlr->ioq[i]; in nvme_ctrlr_construct_io_qpairs()
202 * Admin queue has ID=0. IO queues start at ID=1 - in nvme_ctrlr_construct_io_qpairs()
205 qpair->id = i + 1; in nvme_ctrlr_construct_io_qpairs()
206 if (ctrlr->num_io_queues > 1) { in nvme_ctrlr_construct_io_qpairs()
211 qpair->cpu = c + (device_get_unit(ctrlr->dev)+n/2) % n; in nvme_ctrlr_construct_io_qpairs()
212 qpair->domain = pcpu_find(qpair->cpu)->pc_domain; in nvme_ctrlr_construct_io_qpairs()
214 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1; in nvme_ctrlr_construct_io_qpairs()
215 qpair->domain = ctrlr->domain; in nvme_ctrlr_construct_io_qpairs()
219 * For I/O queues, use the controller-wide max_xfer_size in nvme_ctrlr_construct_io_qpairs()
229 * interrupt thread for this controller. in nvme_ctrlr_construct_io_qpairs()
231 if (ctrlr->num_io_queues > 1) in nvme_ctrlr_construct_io_qpairs()
232 bus_bind_intr(ctrlr->dev, qpair->res, qpair->cpu); in nvme_ctrlr_construct_io_qpairs()
248 ctrlr->is_failed = true; in nvme_ctrlr_fail()
250 ctrlr->is_failed_admin = true; in nvme_ctrlr_fail()
251 nvme_qpair_fail(&ctrlr->adminq); in nvme_ctrlr_fail()
253 if (ctrlr->ioq != NULL) { in nvme_ctrlr_fail()
254 for (i = 0; i < ctrlr->num_io_queues; i++) { in nvme_ctrlr_fail()
255 nvme_qpair_fail(&ctrlr->ioq[i]); in nvme_ctrlr_fail()
270 int timeout = ticks + MSEC_2_TICKS(ctrlr->ready_timeout_in_ms); in nvme_ctrlr_wait_for_ready()
280 if (timeout - ticks < 0) { in nvme_ctrlr_wait_for_ready()
281 nvme_printf(ctrlr, "controller ready did not become %d " in nvme_ctrlr_wait_for_ready()
282 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms); in nvme_ctrlr_wait_for_ready()
330 * A few drives have firmware bugs that freeze the drive if we access in nvme_ctrlr_disable()
333 if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY) in nvme_ctrlr_disable()
368 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr); in nvme_ctrlr_enable()
369 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr); in nvme_ctrlr_enable()
371 /* acqs and asqs are 0-based. */ in nvme_ctrlr_enable()
372 qsize = ctrlr->adminq.num_entries - 1; in nvme_ctrlr_enable()
393 cc |= NVMEF(NVME_CC_REG_MPS, ctrlr->mps); in nvme_ctrlr_enable()
406 nvme_admin_qpair_disable(&ctrlr->adminq); in nvme_ctrlr_disable_qpairs()
412 if (ctrlr->is_initialized) { in nvme_ctrlr_disable_qpairs()
413 for (i = 0; i < ctrlr->num_io_queues; i++) in nvme_ctrlr_disable_qpairs()
414 nvme_io_qpair_disable(&ctrlr->ioq[i]); in nvme_ctrlr_disable_qpairs()
425 ctrlr->is_failed_admin = true; in nvme_ctrlr_hw_reset()
435 ctrlr->is_failed_admin = false; in nvme_ctrlr_hw_reset()
446 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1); in nvme_ctrlr_reset()
450 * Controller is already resetting. Return immediately since in nvme_ctrlr_reset()
455 if (!ctrlr->is_dying) in nvme_ctrlr_reset()
456 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task); in nvme_ctrlr_reset()
465 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata, in nvme_ctrlr_identify()
474 nvme_controller_data_swapbytes(&ctrlr->cdata); in nvme_ctrlr_identify()
478 * controller supports. in nvme_ctrlr_identify()
480 if (ctrlr->cdata.mdts > 0) in nvme_ctrlr_identify()
481 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size, in nvme_ctrlr_identify()
482 1 << (ctrlr->cdata.mdts + NVME_MPS_SHIFT + in nvme_ctrlr_identify()
483 NVME_CAP_HI_MPSMIN(ctrlr->cap_hi))); in nvme_ctrlr_identify()
495 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues, in nvme_ctrlr_set_num_qpairs()
504 * Data in cdw0 is 0-based. in nvme_ctrlr_set_num_qpairs()
505 * Lower 16-bits indicate number of submission queues allocated. in nvme_ctrlr_set_num_qpairs()
506 * Upper 16-bits indicate number of completion queues allocated. in nvme_ctrlr_set_num_qpairs()
512 * Controller may allocate more queues than we requested, in nvme_ctrlr_set_num_qpairs()
516 ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated); in nvme_ctrlr_set_num_qpairs()
517 ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated); in nvme_ctrlr_set_num_qpairs()
518 if (ctrlr->num_io_queues > vm_ndomains) in nvme_ctrlr_set_num_qpairs()
519 ctrlr->num_io_queues -= ctrlr->num_io_queues % vm_ndomains; in nvme_ctrlr_set_num_qpairs()
531 for (i = 0; i < ctrlr->num_io_queues; i++) { in nvme_ctrlr_create_qpairs()
532 qpair = &ctrlr->ioq[i]; in nvme_ctrlr_create_qpairs()
562 for (int i = 0; i < ctrlr->num_io_queues; i++) { in nvme_ctrlr_delete_qpairs()
563 qpair = &ctrlr->ioq[i]; in nvme_ctrlr_delete_qpairs()
593 for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) { in nvme_ctrlr_construct_namespaces()
594 ns = &ctrlr->ns[i]; in nvme_ctrlr_construct_namespaces()
627 (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE); in nvme_ctrlr_get_log_page_size()
704 memcpy(&aer->cpl, cpl, sizeof(*cpl)); in nvme_ctrlr_async_event_cb()
705 aer->log_page_id = NVMEV(NVME_ASYNC_EVENT_LOG_PAGE_ID, cpl->cdw0); in nvme_ctrlr_async_event_cb()
706 nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x," in nvme_ctrlr_async_event_cb()
707 " page 0x%02x)\n", NVMEV(NVME_ASYNC_EVENT_TYPE, cpl->cdw0), in nvme_ctrlr_async_event_cb()
708 NVMEV(NVME_ASYNC_EVENT_INFO, cpl->cdw0), in nvme_ctrlr_async_event_cb()
709 aer->log_page_id); in nvme_ctrlr_async_event_cb()
710 taskqueue_enqueue(aer->ctrlr->taskqueue, &aer->task); in nvme_ctrlr_async_event_cb()
726 if (ctrlr->is_resetting) in nvme_ctrlr_construct_and_submit_aer()
729 aer->ctrlr = ctrlr; in nvme_ctrlr_construct_and_submit_aer()
732 aer->req = req; in nvme_ctrlr_construct_and_submit_aer()
733 aer->log_page_id = 0; /* Not a valid page */ in nvme_ctrlr_construct_and_submit_aer()
739 req->timeout = false; in nvme_ctrlr_construct_and_submit_aer()
740 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST; in nvme_ctrlr_construct_and_submit_aer()
751 ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE | in nvme_ctrlr_configure_aer()
755 if (ctrlr->cdata.ver >= NVME_REV(1, 2)) in nvme_ctrlr_configure_aer()
756 ctrlr->async_event_config |= in nvme_ctrlr_configure_aer()
757 ctrlr->cdata.oaes & (NVME_ASYNC_EVENT_NS_ATTRIBUTE | in nvme_ctrlr_configure_aer()
769 ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE; in nvme_ctrlr_configure_aer()
772 ctrlr->async_event_config, NULL, NULL); in nvme_ctrlr_configure_aer()
774 /* aerl is a zero-based value, so we need to add 1 here. */ in nvme_ctrlr_configure_aer()
775 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1)); in nvme_ctrlr_configure_aer()
777 for (i = 0; i < ctrlr->num_aers; i++) { in nvme_ctrlr_configure_aer()
778 aer = &ctrlr->aer[i]; in nvme_ctrlr_configure_aer()
786 ctrlr->int_coal_time = 0; in nvme_ctrlr_configure_int_coalescing()
788 &ctrlr->int_coal_time); in nvme_ctrlr_configure_int_coalescing()
790 ctrlr->int_coal_threshold = 0; in nvme_ctrlr_configure_int_coalescing()
792 &ctrlr->int_coal_threshold); in nvme_ctrlr_configure_int_coalescing()
794 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time, in nvme_ctrlr_configure_int_coalescing()
795 ctrlr->int_coal_threshold, NULL, NULL); in nvme_ctrlr_configure_int_coalescing()
804 if (ctrlr->hmb_desc_paddr) { in nvme_ctrlr_hmb_free()
805 bus_dmamap_unload(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map); in nvme_ctrlr_hmb_free()
806 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr, in nvme_ctrlr_hmb_free()
807 ctrlr->hmb_desc_map); in nvme_ctrlr_hmb_free()
808 ctrlr->hmb_desc_paddr = 0; in nvme_ctrlr_hmb_free()
810 if (ctrlr->hmb_desc_tag) { in nvme_ctrlr_hmb_free()
811 bus_dma_tag_destroy(ctrlr->hmb_desc_tag); in nvme_ctrlr_hmb_free()
812 ctrlr->hmb_desc_tag = NULL; in nvme_ctrlr_hmb_free()
814 for (i = 0; i < ctrlr->hmb_nchunks; i++) { in nvme_ctrlr_hmb_free()
815 hmbc = &ctrlr->hmb_chunks[i]; in nvme_ctrlr_hmb_free()
816 bus_dmamap_unload(ctrlr->hmb_tag, hmbc->hmbc_map); in nvme_ctrlr_hmb_free()
817 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr, in nvme_ctrlr_hmb_free()
818 hmbc->hmbc_map); in nvme_ctrlr_hmb_free()
820 ctrlr->hmb_nchunks = 0; in nvme_ctrlr_hmb_free()
821 if (ctrlr->hmb_tag) { in nvme_ctrlr_hmb_free()
822 bus_dma_tag_destroy(ctrlr->hmb_tag); in nvme_ctrlr_hmb_free()
823 ctrlr->hmb_tag = NULL; in nvme_ctrlr_hmb_free()
825 if (ctrlr->hmb_chunks) { in nvme_ctrlr_hmb_free()
826 free(ctrlr->hmb_chunks, M_NVME); in nvme_ctrlr_hmb_free()
827 ctrlr->hmb_chunks = NULL; in nvme_ctrlr_hmb_free()
847 min = (long long unsigned)ctrlr->cdata.hmmin * NVME_HMB_UNITS; in nvme_ctrlr_hmb_alloc()
850 pref = MIN((long long unsigned)ctrlr->cdata.hmpre * NVME_HMB_UNITS, max); in nvme_ctrlr_hmb_alloc()
851 minc = MAX(ctrlr->cdata.hmminds * NVME_HMB_UNITS, ctrlr->page_size); in nvme_ctrlr_hmb_alloc()
852 if (min > 0 && ctrlr->cdata.hmmaxd > 0) in nvme_ctrlr_hmb_alloc()
853 minc = MAX(minc, min / ctrlr->cdata.hmmaxd); in nvme_ctrlr_hmb_alloc()
854 ctrlr->hmb_chunk = pref; in nvme_ctrlr_hmb_alloc()
859 * are all based on the current MPS (ctrlr->page_size). in nvme_ctrlr_hmb_alloc()
861 ctrlr->hmb_chunk = roundup2(ctrlr->hmb_chunk, ctrlr->page_size); in nvme_ctrlr_hmb_alloc()
862 ctrlr->hmb_nchunks = howmany(pref, ctrlr->hmb_chunk); in nvme_ctrlr_hmb_alloc()
863 if (ctrlr->cdata.hmmaxd > 0 && ctrlr->hmb_nchunks > ctrlr->cdata.hmmaxd) in nvme_ctrlr_hmb_alloc()
864 ctrlr->hmb_nchunks = ctrlr->cdata.hmmaxd; in nvme_ctrlr_hmb_alloc()
865 ctrlr->hmb_chunks = malloc(sizeof(struct nvme_hmb_chunk) * in nvme_ctrlr_hmb_alloc()
866 ctrlr->hmb_nchunks, M_NVME, M_WAITOK); in nvme_ctrlr_hmb_alloc()
867 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), in nvme_ctrlr_hmb_alloc()
868 ctrlr->page_size, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, in nvme_ctrlr_hmb_alloc()
869 ctrlr->hmb_chunk, 1, ctrlr->hmb_chunk, 0, NULL, NULL, &ctrlr->hmb_tag); in nvme_ctrlr_hmb_alloc()
876 for (i = 0; i < ctrlr->hmb_nchunks; i++) { in nvme_ctrlr_hmb_alloc()
877 hmbc = &ctrlr->hmb_chunks[i]; in nvme_ctrlr_hmb_alloc()
878 if (bus_dmamem_alloc(ctrlr->hmb_tag, in nvme_ctrlr_hmb_alloc()
879 (void **)&hmbc->hmbc_vaddr, BUS_DMA_NOWAIT, in nvme_ctrlr_hmb_alloc()
880 &hmbc->hmbc_map)) { in nvme_ctrlr_hmb_alloc()
884 if (bus_dmamap_load(ctrlr->hmb_tag, hmbc->hmbc_map, in nvme_ctrlr_hmb_alloc()
885 hmbc->hmbc_vaddr, ctrlr->hmb_chunk, nvme_single_map, in nvme_ctrlr_hmb_alloc()
886 &hmbc->hmbc_paddr, BUS_DMA_NOWAIT) != 0) { in nvme_ctrlr_hmb_alloc()
887 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr, in nvme_ctrlr_hmb_alloc()
888 hmbc->hmbc_map); in nvme_ctrlr_hmb_alloc()
892 bus_dmamap_sync(ctrlr->hmb_tag, hmbc->hmbc_map, in nvme_ctrlr_hmb_alloc()
896 if (i < ctrlr->hmb_nchunks && i * ctrlr->hmb_chunk < min && in nvme_ctrlr_hmb_alloc()
897 ctrlr->hmb_chunk / 2 >= minc) { in nvme_ctrlr_hmb_alloc()
898 ctrlr->hmb_nchunks = i; in nvme_ctrlr_hmb_alloc()
900 ctrlr->hmb_chunk /= 2; in nvme_ctrlr_hmb_alloc()
903 ctrlr->hmb_nchunks = i; in nvme_ctrlr_hmb_alloc()
904 if (ctrlr->hmb_nchunks * ctrlr->hmb_chunk < min) { in nvme_ctrlr_hmb_alloc()
909 size = sizeof(struct nvme_hmb_desc) * ctrlr->hmb_nchunks; in nvme_ctrlr_hmb_alloc()
910 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), in nvme_ctrlr_hmb_alloc()
912 size, 1, size, 0, NULL, NULL, &ctrlr->hmb_desc_tag); in nvme_ctrlr_hmb_alloc()
918 if (bus_dmamem_alloc(ctrlr->hmb_desc_tag, in nvme_ctrlr_hmb_alloc()
919 (void **)&ctrlr->hmb_desc_vaddr, BUS_DMA_WAITOK, in nvme_ctrlr_hmb_alloc()
920 &ctrlr->hmb_desc_map)) { in nvme_ctrlr_hmb_alloc()
925 if (bus_dmamap_load(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map, in nvme_ctrlr_hmb_alloc()
926 ctrlr->hmb_desc_vaddr, size, nvme_single_map, in nvme_ctrlr_hmb_alloc()
927 &ctrlr->hmb_desc_paddr, BUS_DMA_NOWAIT) != 0) { in nvme_ctrlr_hmb_alloc()
928 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr, in nvme_ctrlr_hmb_alloc()
929 ctrlr->hmb_desc_map); in nvme_ctrlr_hmb_alloc()
935 for (i = 0; i < ctrlr->hmb_nchunks; i++) { in nvme_ctrlr_hmb_alloc()
936 memset(&ctrlr->hmb_desc_vaddr[i], 0, in nvme_ctrlr_hmb_alloc()
938 ctrlr->hmb_desc_vaddr[i].addr = in nvme_ctrlr_hmb_alloc()
939 htole64(ctrlr->hmb_chunks[i].hmbc_paddr); in nvme_ctrlr_hmb_alloc()
940 ctrlr->hmb_desc_vaddr[i].size = htole32(ctrlr->hmb_chunk / ctrlr->page_size); in nvme_ctrlr_hmb_alloc()
942 bus_dmamap_sync(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map, in nvme_ctrlr_hmb_alloc()
946 (long long unsigned)ctrlr->hmb_nchunks * ctrlr->hmb_chunk in nvme_ctrlr_hmb_alloc()
963 ctrlr->hmb_nchunks * ctrlr->hmb_chunk / ctrlr->page_size, in nvme_ctrlr_hmb_enable()
964 ctrlr->hmb_desc_paddr, ctrlr->hmb_desc_paddr >> 32, in nvme_ctrlr_hmb_enable()
965 ctrlr->hmb_nchunks, NULL, 0, in nvme_ctrlr_hmb_enable()
983 * controller after a reset. During initialization, in nvme_ctrlr_start()
989 nvme_qpair_reset(&ctrlr->adminq); in nvme_ctrlr_start()
990 nvme_admin_qpair_enable(&ctrlr->adminq); in nvme_ctrlr_start()
993 if (ctrlr->ioq != NULL) { in nvme_ctrlr_start()
994 for (i = 0; i < ctrlr->num_io_queues; i++) in nvme_ctrlr_start()
995 nvme_qpair_reset(&ctrlr->ioq[i]); in nvme_ctrlr_start()
1002 if (resetting && !ctrlr->is_initialized) in nvme_ctrlr_start()
1011 * The number of qpairs are determined during controller initialization, in nvme_ctrlr_start()
1019 old_num_io_queues = ctrlr->num_io_queues; in nvme_ctrlr_start()
1025 if (old_num_io_queues != ctrlr->num_io_queues) { in nvme_ctrlr_start()
1027 old_num_io_queues, ctrlr->num_io_queues); in nvme_ctrlr_start()
1031 if (ctrlr->cdata.hmpre > 0 && ctrlr->hmb_nchunks == 0) { in nvme_ctrlr_start()
1033 if (ctrlr->hmb_nchunks > 0) in nvme_ctrlr_start()
1035 } else if (ctrlr->hmb_nchunks > 0) in nvme_ctrlr_start()
1051 for (i = 0; i < ctrlr->num_io_queues; i++) in nvme_ctrlr_start()
1052 nvme_io_qpair_enable(&ctrlr->ioq[i]); in nvme_ctrlr_start()
1063 if (nvme_ctrlr_hw_reset(ctrlr) != 0 || ctrlr->fail_on_reset != 0) { in nvme_ctrlr_start_config_hook()
1065 config_intrhook_disestablish(&ctrlr->config_hook); in nvme_ctrlr_start_config_hook()
1069 nvme_qpair_reset(&ctrlr->adminq); in nvme_ctrlr_start_config_hook()
1070 nvme_admin_qpair_enable(&ctrlr->adminq); in nvme_ctrlr_start_config_hook()
1080 config_intrhook_disestablish(&ctrlr->config_hook); in nvme_ctrlr_start_config_hook()
1082 if (!ctrlr->is_failed) { in nvme_ctrlr_start_config_hook()
1083 ctrlr->is_initialized = true; in nvme_ctrlr_start_config_hook()
1105 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); in nvme_ctrlr_reset_task()
1113 mtx_lock(&aer->mtx); in nvme_ctrlr_aer_done()
1115 aer->log_page_size = (uint32_t)-1; in nvme_ctrlr_aer_done()
1117 aer->log_page_size = nvme_ctrlr_get_log_page_size( in nvme_ctrlr_aer_done()
1118 aer->ctrlr, aer->log_page_id); in nvme_ctrlr_aer_done()
1120 mtx_unlock(&aer->mtx); in nvme_ctrlr_aer_done()
1127 struct nvme_controller *ctrlr = aer->ctrlr; in nvme_ctrlr_aer_task()
1133 if (ctrlr->is_resetting) in nvme_ctrlr_aer_task()
1136 if (!is_log_page_id_valid(aer->log_page_id)) { in nvme_ctrlr_aer_task()
1141 nvme_notify_async_consumers(ctrlr, &aer->cpl, aer->log_page_id, in nvme_ctrlr_aer_task()
1147 aer->log_page_size = 0; in nvme_ctrlr_aer_task()
1148 len = nvme_ctrlr_get_log_page_size(aer->ctrlr, aer->log_page_id); in nvme_ctrlr_aer_task()
1149 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id, in nvme_ctrlr_aer_task()
1150 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer, len, in nvme_ctrlr_aer_task()
1152 mtx_lock(&aer->mtx); in nvme_ctrlr_aer_task()
1153 while (aer->log_page_size == 0) in nvme_ctrlr_aer_task()
1154 mtx_sleep(aer, &aer->mtx, PRIBIO, "nvme_pt", 0); in nvme_ctrlr_aer_task()
1155 mtx_unlock(&aer->mtx); in nvme_ctrlr_aer_task()
1157 if (aer->log_page_size != (uint32_t)-1) { in nvme_ctrlr_aer_task()
1163 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, in nvme_ctrlr_aer_task()
1164 aer->log_page_id, NULL, 0); in nvme_ctrlr_aer_task()
1169 switch (aer->log_page_id) { in nvme_ctrlr_aer_task()
1172 (struct nvme_error_information_entry *)aer->log_page_buffer; in nvme_ctrlr_aer_task()
1173 for (int i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++) in nvme_ctrlr_aer_task()
1179 (struct nvme_health_information_page *)aer->log_page_buffer); in nvme_ctrlr_aer_task()
1183 (struct nvme_ns_list *)aer->log_page_buffer); in nvme_ctrlr_aer_task()
1187 (struct nvme_command_effects_page *)aer->log_page_buffer); in nvme_ctrlr_aer_task()
1191 (struct nvme_res_notification_page *)aer->log_page_buffer); in nvme_ctrlr_aer_task()
1195 (struct nvme_sanitize_status_page *)aer->log_page_buffer); in nvme_ctrlr_aer_task()
1201 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) { in nvme_ctrlr_aer_task()
1203 (struct nvme_health_information_page *)aer->log_page_buffer; in nvme_ctrlr_aer_task()
1211 nvme_ctrlr_log_critical_warnings(aer->ctrlr, in nvme_ctrlr_aer_task()
1212 health_info->critical_warning); in nvme_ctrlr_aer_task()
1213 aer->ctrlr->async_event_config &= in nvme_ctrlr_aer_task()
1214 ~health_info->critical_warning; in nvme_ctrlr_aer_task()
1215 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr, in nvme_ctrlr_aer_task()
1216 aer->ctrlr->async_event_config, NULL, NULL); in nvme_ctrlr_aer_task()
1217 } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE) { in nvme_ctrlr_aer_task()
1219 (struct nvme_ns_list *)aer->log_page_buffer; in nvme_ctrlr_aer_task()
1220 for (int i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) { in nvme_ctrlr_aer_task()
1221 if (nsl->ns[i] > NVME_MAX_NAMESPACES) in nvme_ctrlr_aer_task()
1223 nvme_notify_ns(aer->ctrlr, nsl->ns[i]); in nvme_ctrlr_aer_task()
1231 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, in nvme_ctrlr_aer_task()
1232 aer->log_page_id, aer->log_page_buffer, aer->log_page_size); in nvme_ctrlr_aer_task()
1250 nvme_qpair_process_completions(&ctrlr->adminq); in nvme_ctrlr_poll()
1252 for (i = 0; i < ctrlr->num_io_queues; i++) in nvme_ctrlr_poll()
1253 if (ctrlr->ioq && ctrlr->ioq[i].cpl) in nvme_ctrlr_poll()
1254 nvme_qpair_process_completions(&ctrlr->ioq[i]); in nvme_ctrlr_poll()
1267 memmove(sn, ctrlr->cdata.sn, NVME_SERIAL_NUMBER_LENGTH); in nvme_ctrlr_get_ident()
1276 * Poll the single-vector interrupt case: num_io_queues will be 1 and
1278 * interrupts in the controller.
1302 err = vm_fault_hold_pages(&curproc->p_vmspace->vm_map, in nvme_user_ioctl_req()
1307 (*req)->payload = memdesc_vmpages(upages, len, addr & PAGE_MASK); in nvme_user_ioctl_req()
1308 (*req)->payload_valid = true; in nvme_user_ioctl_req()
1322 struct mtx *mtx = pt->driver_lock; in nvme_pt_done()
1325 bzero(&pt->cpl, sizeof(pt->cpl)); in nvme_pt_done()
1326 pt->cpl.cdw0 = cpl->cdw0; in nvme_pt_done()
1328 status = cpl->status; in nvme_pt_done()
1330 pt->cpl.status = status; in nvme_pt_done()
1333 pt->driver_lock = NULL; in nvme_pt_done()
1349 if (pt->len > 0) { in nvme_ctrlr_passthrough_cmd()
1350 if (pt->len > ctrlr->max_xfer_size) { in nvme_ctrlr_passthrough_cmd()
1353 pt->len, ctrlr->max_xfer_size); in nvme_ctrlr_passthrough_cmd()
1357 ret = nvme_user_ioctl_req((vm_offset_t)pt->buf, pt->len, in nvme_ctrlr_passthrough_cmd()
1358 pt->is_read, upages, nitems(upages), &npages, &req, in nvme_ctrlr_passthrough_cmd()
1363 req = nvme_allocate_request_vaddr(pt->buf, pt->len, in nvme_ctrlr_passthrough_cmd()
1368 /* Assume user space already converted to little-endian */ in nvme_ctrlr_passthrough_cmd()
1369 req->cmd.opc = pt->cmd.opc; in nvme_ctrlr_passthrough_cmd()
1370 req->cmd.fuse = pt->cmd.fuse; in nvme_ctrlr_passthrough_cmd()
1371 req->cmd.rsvd2 = pt->cmd.rsvd2; in nvme_ctrlr_passthrough_cmd()
1372 req->cmd.rsvd3 = pt->cmd.rsvd3; in nvme_ctrlr_passthrough_cmd()
1373 req->cmd.cdw10 = pt->cmd.cdw10; in nvme_ctrlr_passthrough_cmd()
1374 req->cmd.cdw11 = pt->cmd.cdw11; in nvme_ctrlr_passthrough_cmd()
1375 req->cmd.cdw12 = pt->cmd.cdw12; in nvme_ctrlr_passthrough_cmd()
1376 req->cmd.cdw13 = pt->cmd.cdw13; in nvme_ctrlr_passthrough_cmd()
1377 req->cmd.cdw14 = pt->cmd.cdw14; in nvme_ctrlr_passthrough_cmd()
1378 req->cmd.cdw15 = pt->cmd.cdw15; in nvme_ctrlr_passthrough_cmd()
1380 req->cmd.nsid = htole32(nsid); in nvme_ctrlr_passthrough_cmd()
1383 pt->driver_lock = mtx; in nvme_ctrlr_passthrough_cmd()
1391 while (pt->driver_lock != NULL) in nvme_ctrlr_passthrough_cmd()
1405 struct mtx *mtx = (void *)(uintptr_t)npc->metadata; in nvme_npc_done()
1407 npc->result = cpl->cdw0; /* cpl in host order by now */ in nvme_npc_done()
1409 npc->metadata = 0; in nvme_npc_done()
1429 if (npc->metadata != 0 || npc->metadata_len != 0) in nvme_ctrlr_linux_passthru_cmd()
1432 if (npc->data_len > 0 && npc->addr != 0) { in nvme_ctrlr_linux_passthru_cmd()
1433 if (npc->data_len > ctrlr->max_xfer_size) { in nvme_ctrlr_linux_passthru_cmd()
1436 npc->data_len, ctrlr->max_xfer_size); in nvme_ctrlr_linux_passthru_cmd()
1440 ret = nvme_user_ioctl_req(npc->addr, npc->data_len, in nvme_ctrlr_linux_passthru_cmd()
1441 npc->opcode & 0x1, upages, nitems(upages), &npages, in nvme_ctrlr_linux_passthru_cmd()
1447 (void *)(uintptr_t)npc->addr, npc->data_len, in nvme_ctrlr_linux_passthru_cmd()
1452 req->cmd.opc = npc->opcode; in nvme_ctrlr_linux_passthru_cmd()
1453 req->cmd.fuse = npc->flags; in nvme_ctrlr_linux_passthru_cmd()
1454 req->cmd.rsvd2 = htole32(npc->cdw2); in nvme_ctrlr_linux_passthru_cmd()
1455 req->cmd.rsvd3 = htole32(npc->cdw3); in nvme_ctrlr_linux_passthru_cmd()
1456 req->cmd.cdw10 = htole32(npc->cdw10); in nvme_ctrlr_linux_passthru_cmd()
1457 req->cmd.cdw11 = htole32(npc->cdw11); in nvme_ctrlr_linux_passthru_cmd()
1458 req->cmd.cdw12 = htole32(npc->cdw12); in nvme_ctrlr_linux_passthru_cmd()
1459 req->cmd.cdw13 = htole32(npc->cdw13); in nvme_ctrlr_linux_passthru_cmd()
1460 req->cmd.cdw14 = htole32(npc->cdw14); in nvme_ctrlr_linux_passthru_cmd()
1461 req->cmd.cdw15 = htole32(npc->cdw15); in nvme_ctrlr_linux_passthru_cmd()
1463 req->cmd.nsid = htole32(nsid); in nvme_ctrlr_linux_passthru_cmd()
1466 npc->metadata = (uintptr_t) mtx; in nvme_ctrlr_linux_passthru_cmd()
1475 while (npc->metadata != 0) in nvme_ctrlr_linux_passthru_cmd()
1492 ctrlr = cdev->si_drv1; in nvme_ctrlr_ioctl()
1501 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid), in nvme_ctrlr_ioctl()
1506 strlcpy(gnsid->cdev, device_get_nameunit(ctrlr->dev), in nvme_ctrlr_ioctl()
1507 sizeof(gnsid->cdev)); in nvme_ctrlr_ioctl()
1508 gnsid->nsid = 0; in nvme_ctrlr_ioctl()
1512 *(uint64_t *)arg = ctrlr->max_xfer_size; in nvme_ctrlr_ioctl()
1515 memcpy(arg, &ctrlr->cdata, sizeof(ctrlr->cdata)); in nvme_ctrlr_ioctl()
1524 td->td_retval[0] = 0xfffffffful; in nvme_ctrlr_ioctl()
1531 return (nvme_ctrlr_linux_passthru_cmd(ctrlr, npc, npc->nsid, true, in nvme_ctrlr_ioctl()
1557 ctrlr->dev = dev; in nvme_ctrlr_construct()
1559 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF); in nvme_ctrlr_construct()
1560 if (bus_get_domain(dev, &ctrlr->domain) != 0) in nvme_ctrlr_construct()
1561 ctrlr->domain = 0; in nvme_ctrlr_construct()
1563 ctrlr->cap_lo = cap_lo = nvme_mmio_read_4(ctrlr, cap_lo); in nvme_ctrlr_construct()
1573 ctrlr->cap_hi = cap_hi = nvme_mmio_read_4(ctrlr, cap_hi); in nvme_ctrlr_construct()
1608 ctrlr->dstrd = NVME_CAP_HI_DSTRD(cap_hi) + 2; in nvme_ctrlr_construct()
1610 ctrlr->mps = NVME_CAP_HI_MPSMIN(cap_hi); in nvme_ctrlr_construct()
1611 ctrlr->page_size = 1 << (NVME_MPS_SHIFT + ctrlr->mps); in nvme_ctrlr_construct()
1613 /* Get ready timeout value from controller, in units of 500ms. */ in nvme_ctrlr_construct()
1615 ctrlr->ready_timeout_in_ms = to * 500; in nvme_ctrlr_construct()
1621 ctrlr->admin_timeout_period = timeout_period; in nvme_ctrlr_construct()
1627 ctrlr->timeout_period = timeout_period; in nvme_ctrlr_construct()
1632 ctrlr->enable_aborts = 0; in nvme_ctrlr_construct()
1633 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts); in nvme_ctrlr_construct()
1635 ctrlr->alignment_splits = counter_u64_alloc(M_WAITOK); in nvme_ctrlr_construct()
1637 /* Cap transfers by the maximum addressable by page-sized PRP (4KB pages -> 2MB). */ in nvme_ctrlr_construct()
1638 ctrlr->max_xfer_size = MIN(maxphys, (ctrlr->page_size / 8 * ctrlr->page_size)); in nvme_ctrlr_construct()
1644 * it detects that the controller has failed until all I/O has been in nvme_ctrlr_construct()
1648 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK, in nvme_ctrlr_construct()
1649 taskqueue_thread_enqueue, &ctrlr->taskqueue); in nvme_ctrlr_construct()
1650 taskqueue_start_threads(&ctrlr->taskqueue, 2, PI_DISK, "nvme taskq"); in nvme_ctrlr_construct()
1652 ctrlr->is_resetting = 0; in nvme_ctrlr_construct()
1653 ctrlr->is_initialized = false; in nvme_ctrlr_construct()
1654 ctrlr->notification_sent = 0; in nvme_ctrlr_construct()
1655 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr); in nvme_ctrlr_construct()
1657 struct nvme_async_event_request *aer = &ctrlr->aer[i]; in nvme_ctrlr_construct()
1659 TASK_INIT(&aer->task, 0, nvme_ctrlr_aer_task, aer); in nvme_ctrlr_construct()
1660 mtx_init(&aer->mtx, "AER mutex", NULL, MTX_DEF); in nvme_ctrlr_construct()
1662 ctrlr->is_failed = false; in nvme_ctrlr_construct()
1671 status = make_dev_s(&md_args, &ctrlr->cdev, "%s", in nvme_ctrlr_construct()
1691 ctrlr->is_dying = true; in nvme_ctrlr_destruct()
1693 if (ctrlr->resource == NULL) in nvme_ctrlr_destruct()
1695 if (!mtx_initialized(&ctrlr->adminq.lock)) in nvme_ctrlr_destruct()
1702 * departing, so ask the bridge if the device is gone. Some systems can in nvme_ctrlr_destruct()
1703 * remove the drive w/o the bridge knowing its gone (they don't really in nvme_ctrlr_destruct()
1715 nvme_ns_destruct(&ctrlr->ns[i]); in nvme_ctrlr_destruct()
1717 if (ctrlr->cdev) in nvme_ctrlr_destruct()
1718 destroy_dev(ctrlr->cdev); in nvme_ctrlr_destruct()
1720 if (ctrlr->is_initialized) { in nvme_ctrlr_destruct()
1722 if (ctrlr->hmb_nchunks > 0) in nvme_ctrlr_destruct()
1728 if (ctrlr->ioq != NULL) { in nvme_ctrlr_destruct()
1729 for (i = 0; i < ctrlr->num_io_queues; i++) in nvme_ctrlr_destruct()
1730 nvme_io_qpair_destroy(&ctrlr->ioq[i]); in nvme_ctrlr_destruct()
1731 free(ctrlr->ioq, M_NVME); in nvme_ctrlr_destruct()
1733 nvme_admin_qpair_destroy(&ctrlr->adminq); in nvme_ctrlr_destruct()
1736 * Notify the controller of a shutdown, even though this is due to a in nvme_ctrlr_destruct()
1738 * shutdown). This ensures the controller receives a shutdown in nvme_ctrlr_destruct()
1749 if (ctrlr->taskqueue) { in nvme_ctrlr_destruct()
1750 taskqueue_free(ctrlr->taskqueue); in nvme_ctrlr_destruct()
1752 struct nvme_async_event_request *aer = &ctrlr->aer[i]; in nvme_ctrlr_destruct()
1754 mtx_destroy(&aer->mtx); in nvme_ctrlr_destruct()
1758 if (ctrlr->tag) in nvme_ctrlr_destruct()
1759 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); in nvme_ctrlr_destruct()
1761 if (ctrlr->res) in nvme_ctrlr_destruct()
1762 bus_release_resource(ctrlr->dev, SYS_RES_IRQ, in nvme_ctrlr_destruct()
1763 rman_get_rid(ctrlr->res), ctrlr->res); in nvme_ctrlr_destruct()
1765 if (ctrlr->bar4_resource != NULL) { in nvme_ctrlr_destruct()
1767 ctrlr->bar4_resource_id, ctrlr->bar4_resource); in nvme_ctrlr_destruct()
1771 ctrlr->resource_id, ctrlr->resource); in nvme_ctrlr_destruct()
1774 if (ctrlr->alignment_splits) in nvme_ctrlr_destruct()
1775 counter_u64_free(ctrlr->alignment_splits); in nvme_ctrlr_destruct()
1777 mtx_destroy(&ctrlr->lock); in nvme_ctrlr_destruct()
1792 timeout = ticks + (ctrlr->cdata.rtd3e == 0 ? 5 * hz : in nvme_ctrlr_shutdown()
1793 ((uint64_t)ctrlr->cdata.rtd3e * hz + 999999) / 1000000); in nvme_ctrlr_shutdown()
1800 if (timeout - ticks < 0) { in nvme_ctrlr_shutdown()
1812 nvme_qpair_submit_request(&ctrlr->adminq, req); in nvme_ctrlr_submit_admin_request()
1821 qpair = &ctrlr->ioq[QP(ctrlr, curcpu)]; in nvme_ctrlr_submit_io_request()
1828 return (ctrlr->dev); in nvme_ctrlr_get_device()
1834 return (&ctrlr->cdata); in nvme_ctrlr_get_data()
1847 if (ctrlr->is_failed) in nvme_ctrlr_suspend()
1857 while (atomic_cmpset_32(&ctrlr->is_resetting, 0, 1) == 0 && to-- > 0) in nvme_ctrlr_suspend()
1865 if (ctrlr->hmb_nchunks > 0) in nvme_ctrlr_suspend()
1889 if (ctrlr->is_failed) in nvme_ctrlr_resume()
1896 * Now that we've reset the hardware, we can restart the controller. Any in nvme_ctrlr_resume()
1898 * an error. Once we've restarted, stop flagging the controller as being in nvme_ctrlr_resume()
1902 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); in nvme_ctrlr_resume()
1907 * Since we can't bring the controller out of reset, announce and fail in nvme_ctrlr_resume()
1908 * the controller. However, we have to return success for the resume in nvme_ctrlr_resume()
1913 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); in nvme_ctrlr_resume()