Lines Matching +full:cpu +full:- +full:viewed

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (C) 2012-2014 Intel Corporation
65 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%x " in nvme_admin_qpair_print_command()
67 get_opcode_string(true, cmd->opc, buf, sizeof(buf)), qpair->id, in nvme_admin_qpair_print_command()
68 cmd->cid, le32toh(cmd->nsid), le32toh(cmd->cdw10), in nvme_admin_qpair_print_command()
69 le32toh(cmd->cdw11)); in nvme_admin_qpair_print_command()
78 switch (cmd->opc) { in nvme_io_qpair_print_command()
85 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d " in nvme_io_qpair_print_command()
87 get_opcode_string(false, cmd->opc, buf, sizeof(buf)), in nvme_io_qpair_print_command()
88 qpair->id, cmd->cid, le32toh(cmd->nsid), in nvme_io_qpair_print_command()
89 ((unsigned long long)le32toh(cmd->cdw11) << 32) + le32toh(cmd->cdw10), in nvme_io_qpair_print_command()
90 (le32toh(cmd->cdw12) & 0xFFFF) + 1); in nvme_io_qpair_print_command()
93 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n", in nvme_io_qpair_print_command()
94 get_opcode_string(false, cmd->opc, buf, sizeof(buf)), in nvme_io_qpair_print_command()
95 qpair->id, cmd->cid, le32toh(cmd->nsid)); in nvme_io_qpair_print_command()
103 if (qpair->id == 0) in nvme_qpair_print_command()
108 nvme_printf(qpair->ctrlr, in nvme_qpair_print_command()
110 cmd->nsid, cmd->rsvd2, cmd->rsvd3, (uintmax_t)cmd->mptr, in nvme_qpair_print_command()
111 (uintmax_t)cmd->prp1, (uintmax_t)cmd->prp2); in nvme_qpair_print_command()
112 nvme_printf(qpair->ctrlr, in nvme_qpair_print_command()
114 cmd->cdw10, cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14, in nvme_qpair_print_command()
115 cmd->cdw15); in nvme_qpair_print_command()
138 crd = NVME_STATUS_GET_CRD(cpl->status); in nvme_qpair_print_completion()
139 m = NVME_STATUS_GET_M(cpl->status); in nvme_qpair_print_completion()
140 dnr = NVME_STATUS_GET_DNR(cpl->status); in nvme_qpair_print_completion()
141 p = NVME_STATUS_GET_P(cpl->status); in nvme_qpair_print_completion()
143 nvme_printf(qpair->ctrlr, "%s crd:%x m:%x dnr:%x p:%d " in nvme_qpair_print_completion()
146 cpl->sqid, cpl->cid, cpl->cdw0); in nvme_qpair_print_completion()
154 sct = NVME_STATUS_GET_SCT(cpl->status); in nvme_completion_is_retry()
155 sc = NVME_STATUS_GET_SC(cpl->status); in nvme_completion_is_retry()
156 dnr = NVME_STATUS_GET_DNR(cpl->status); /* Do Not Retry Bit */ in nvme_completion_is_retry()
213 struct nvme_qpair *qpair = tr->qpair; in nvme_qpair_complete_tracker()
217 mtx_assert(&qpair->lock, MA_NOTOWNED); in nvme_qpair_complete_tracker()
219 req = tr->req; in nvme_qpair_complete_tracker()
222 retry = error && retriable && req->retries < nvme_retry_count; in nvme_qpair_complete_tracker()
224 qpair->num_retries++; in nvme_qpair_complete_tracker()
225 if (error && req->retries >= nvme_retry_count && retriable) in nvme_qpair_complete_tracker()
226 qpair->num_failures++; in nvme_qpair_complete_tracker()
230 nvme_qpair_print_command(qpair, &req->cmd); in nvme_qpair_complete_tracker()
234 qpair->act_tr[cpl->cid] = NULL; in nvme_qpair_complete_tracker()
236 KASSERT(cpl->cid == req->cmd.cid, ("cpl cid does not match cmd cid\n")); in nvme_qpair_complete_tracker()
239 if (req->payload_valid) { in nvme_qpair_complete_tracker()
240 bus_dmamap_sync(qpair->dma_tag_payload, in nvme_qpair_complete_tracker()
241 tr->payload_dma_map, in nvme_qpair_complete_tracker()
244 if (req->cb_fn) in nvme_qpair_complete_tracker()
245 req->cb_fn(req->cb_arg, cpl); in nvme_qpair_complete_tracker()
248 mtx_lock(&qpair->lock); in nvme_qpair_complete_tracker()
251 req->retries++; in nvme_qpair_complete_tracker()
254 if (req->payload_valid) { in nvme_qpair_complete_tracker()
255 bus_dmamap_unload(qpair->dma_tag_payload, in nvme_qpair_complete_tracker()
256 tr->payload_dma_map); in nvme_qpair_complete_tracker()
260 tr->req = NULL; in nvme_qpair_complete_tracker()
262 TAILQ_REMOVE(&qpair->outstanding_tr, tr, tailq); in nvme_qpair_complete_tracker()
263 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq); in nvme_qpair_complete_tracker()
267 * try to submit queued requests here - let the reset logic in nvme_qpair_complete_tracker()
270 if (!STAILQ_EMPTY(&qpair->queued_req) && in nvme_qpair_complete_tracker()
271 !qpair->ctrlr->is_resetting) { in nvme_qpair_complete_tracker()
272 req = STAILQ_FIRST(&qpair->queued_req); in nvme_qpair_complete_tracker()
273 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); in nvme_qpair_complete_tracker()
278 mtx_unlock(&qpair->lock); in nvme_qpair_complete_tracker()
301 struct nvme_qpair * qpair = tr->qpair; in nvme_qpair_manual_complete_tracker()
303 mtx_assert(&qpair->lock, MA_NOTOWNED); in nvme_qpair_manual_complete_tracker()
307 cpl.sqid = qpair->id; in nvme_qpair_manual_complete_tracker()
308 cpl.cid = tr->cid; in nvme_qpair_manual_complete_tracker()
322 cpl.sqid = qpair->id; in nvme_qpair_manual_complete_request()
327 nvme_qpair_print_command(qpair, &req->cmd); in nvme_qpair_manual_complete_request()
331 if (req->cb_fn) in nvme_qpair_manual_complete_request()
332 req->cb_fn(req->cb_arg, &cpl); in nvme_qpair_manual_complete_request()
346 mtx_assert(&qpair->recovery, MA_OWNED); in _nvme_qpair_process_completions()
350 * progress. Ignore the interrupt - any I/O that was associated with in _nvme_qpair_process_completions()
356 if (qpair->recovery_state != RECOVERY_NONE) { in _nvme_qpair_process_completions()
357 qpair->num_ignored++; in _nvme_qpair_process_completions()
369 KASSERT(!(qpair->num_intr_handler_calls == 0 && qpair->phase == 0), in _nvme_qpair_process_completions()
371 device_get_nameunit(qpair->ctrlr->dev))); in _nvme_qpair_process_completions()
373 qpair->num_intr_handler_calls++; in _nvme_qpair_process_completions()
375 bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map, in _nvme_qpair_process_completions()
378 * A panic can stop the CPU this routine is running on at any point. If in _nvme_qpair_process_completions()
388 if (qpair->cq_head == qpair->num_entries) { in _nvme_qpair_process_completions()
394 qpair->cq_head = 0; in _nvme_qpair_process_completions()
395 qpair->phase = !qpair->phase; in _nvme_qpair_process_completions()
396 } else if (qpair->cq_head == 0) { in _nvme_qpair_process_completions()
404 cpl = qpair->cpl[qpair->num_entries - 1]; in _nvme_qpair_process_completions()
406 qpair->phase = !NVME_STATUS_GET_P(cpl.status); in _nvme_qpair_process_completions()
423 status = le16toh(qpair->cpl[qpair->cq_head].status); in _nvme_qpair_process_completions()
424 if (NVME_STATUS_GET_P(status) != qpair->phase) in _nvme_qpair_process_completions()
427 bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map, in _nvme_qpair_process_completions()
429 cpl = qpair->cpl[qpair->cq_head]; in _nvme_qpair_process_completions()
436 if (cpl.cid < qpair->num_trackers) in _nvme_qpair_process_completions()
437 tr = qpair->act_tr[cpl.cid]; in _nvme_qpair_process_completions()
444 qpair->sq_head = cpl.sqhd; in _nvme_qpair_process_completions()
448 * panic can stop the CPU this routine is running on in _nvme_qpair_process_completions()
450 * qpair->cq_head at 1 below. Later, we re-enter this in _nvme_qpair_process_completions()
458 nvme_printf(qpair->ctrlr, in _nvme_qpair_process_completions()
462 &qpair->cpl[qpair->cq_head]); in _nvme_qpair_process_completions()
470 * viewed in the aftermath of a panic). in _nvme_qpair_process_completions()
472 if (++qpair->cq_head == qpair->num_entries) { /* 1 */ in _nvme_qpair_process_completions()
473 atomic_store_rel_int(&qpair->cq_head, 0); /* 2 */ in _nvme_qpair_process_completions()
474 qpair->phase = !qpair->phase; /* 3 */ in _nvme_qpair_process_completions()
479 bus_space_write_4(qpair->ctrlr->bus_tag, qpair->ctrlr->bus_handle, in _nvme_qpair_process_completions()
480 qpair->cq_hdbl_off, qpair->cq_head); in _nvme_qpair_process_completions()
497 mtx_lock(&qpair->recovery); in nvme_qpair_process_completions()
499 if (__predict_true(qpair->recovery_state == RECOVERY_NONE)) in nvme_qpair_process_completions()
502 qpair->num_recovery_nolock++; // XXX likely need to rename in nvme_qpair_process_completions()
504 mtx_unlock(&qpair->recovery); in nvme_qpair_process_completions()
528 qpair->vector = ctrlr->msi_count > 1 ? qpair->id : 0; in nvme_qpair_construct()
529 qpair->num_entries = num_entries; in nvme_qpair_construct()
530 qpair->num_trackers = num_trackers; in nvme_qpair_construct()
531 qpair->ctrlr = ctrlr; in nvme_qpair_construct()
533 mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF); in nvme_qpair_construct()
534 mtx_init(&qpair->recovery, "nvme qpair recovery", NULL, MTX_DEF); in nvme_qpair_construct()
536 callout_init_mtx(&qpair->timer, &qpair->recovery, 0); in nvme_qpair_construct()
537 qpair->timer_armed = false; in nvme_qpair_construct()
538 qpair->recovery_state = RECOVERY_WAITING; in nvme_qpair_construct()
540 /* Note: NVMe PRP format is restricted to 4-byte alignment. */ in nvme_qpair_construct()
541 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), in nvme_qpair_construct()
542 4, ctrlr->page_size, BUS_SPACE_MAXADDR, in nvme_qpair_construct()
543 BUS_SPACE_MAXADDR, NULL, NULL, ctrlr->max_xfer_size, in nvme_qpair_construct()
544 howmany(ctrlr->max_xfer_size, ctrlr->page_size) + 1, in nvme_qpair_construct()
545 ctrlr->page_size, 0, in nvme_qpair_construct()
546 NULL, NULL, &qpair->dma_tag_payload); in nvme_qpair_construct()
556 cmdsz = qpair->num_entries * sizeof(struct nvme_command); in nvme_qpair_construct()
557 cmdsz = roundup2(cmdsz, ctrlr->page_size); in nvme_qpair_construct()
558 cplsz = qpair->num_entries * sizeof(struct nvme_completion); in nvme_qpair_construct()
559 cplsz = roundup2(cplsz, ctrlr->page_size); in nvme_qpair_construct()
566 howmany(ctrlr->max_xfer_size, ctrlr->page_size); in nvme_qpair_construct()
567 prpmemsz = qpair->num_trackers * prpsz; in nvme_qpair_construct()
570 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), in nvme_qpair_construct()
571 ctrlr->page_size, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, in nvme_qpair_construct()
572 allocsz, 1, allocsz, 0, NULL, NULL, &qpair->dma_tag); in nvme_qpair_construct()
577 bus_dma_tag_set_domain(qpair->dma_tag, qpair->domain); in nvme_qpair_construct()
579 if (bus_dmamem_alloc(qpair->dma_tag, (void **)&queuemem, in nvme_qpair_construct()
580 BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &qpair->queuemem_map)) { in nvme_qpair_construct()
585 if (bus_dmamap_load(qpair->dma_tag, qpair->queuemem_map, in nvme_qpair_construct()
588 bus_dmamem_free(qpair->dma_tag, qpair->cmd, in nvme_qpair_construct()
589 qpair->queuemem_map); in nvme_qpair_construct()
593 qpair->num_cmds = 0; in nvme_qpair_construct()
594 qpair->num_intr_handler_calls = 0; in nvme_qpair_construct()
595 qpair->num_retries = 0; in nvme_qpair_construct()
596 qpair->num_failures = 0; in nvme_qpair_construct()
597 qpair->num_ignored = 0; in nvme_qpair_construct()
598 qpair->cmd = (struct nvme_command *)queuemem; in nvme_qpair_construct()
599 qpair->cpl = (struct nvme_completion *)(queuemem + cmdsz); in nvme_qpair_construct()
601 qpair->cmd_bus_addr = queuemem_phys; in nvme_qpair_construct()
602 qpair->cpl_bus_addr = queuemem_phys + cmdsz; in nvme_qpair_construct()
610 qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[0]) + in nvme_qpair_construct()
611 (qpair->id << (ctrlr->dstrd + 1)); in nvme_qpair_construct()
612 qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[0]) + in nvme_qpair_construct()
613 (qpair->id << (ctrlr->dstrd + 1)) + (1 << ctrlr->dstrd); in nvme_qpair_construct()
615 TAILQ_INIT(&qpair->free_tr); in nvme_qpair_construct()
616 TAILQ_INIT(&qpair->outstanding_tr); in nvme_qpair_construct()
617 STAILQ_INIT(&qpair->queued_req); in nvme_qpair_construct()
621 for (i = 0; i < qpair->num_trackers; i++) { in nvme_qpair_construct()
623 qpair->num_trackers = i; in nvme_qpair_construct()
632 trunc_page(list_phys + prpsz - 1)) { in nvme_qpair_construct()
633 list_phys = roundup2(list_phys, ctrlr->page_size); in nvme_qpair_construct()
635 (uint8_t *)roundup2((uintptr_t)prp_list, ctrlr->page_size); in nvme_qpair_construct()
639 DOMAINSET_PREF(qpair->domain), M_ZERO | M_WAITOK); in nvme_qpair_construct()
640 bus_dmamap_create(qpair->dma_tag_payload, 0, in nvme_qpair_construct()
641 &tr->payload_dma_map); in nvme_qpair_construct()
642 tr->cid = i; in nvme_qpair_construct()
643 tr->qpair = qpair; in nvme_qpair_construct()
644 tr->prp = (uint64_t *)prp_list; in nvme_qpair_construct()
645 tr->prp_bus_addr = list_phys; in nvme_qpair_construct()
646 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq); in nvme_qpair_construct()
651 if (qpair->num_trackers == 0) { in nvme_qpair_construct()
656 qpair->act_tr = malloc_domainset(sizeof(struct nvme_tracker *) * in nvme_qpair_construct()
657 qpair->num_entries, M_NVME, DOMAINSET_PREF(qpair->domain), in nvme_qpair_construct()
660 if (ctrlr->msi_count > 1) { in nvme_qpair_construct()
662 * MSI-X vector resource IDs start at 1, so we add one to in nvme_qpair_construct()
665 qpair->rid = qpair->vector + 1; in nvme_qpair_construct()
667 qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, in nvme_qpair_construct()
668 &qpair->rid, RF_ACTIVE); in nvme_qpair_construct()
669 if (qpair->res == NULL) { in nvme_qpair_construct()
673 if (bus_setup_intr(ctrlr->dev, qpair->res, in nvme_qpair_construct()
675 nvme_qpair_msi_handler, qpair, &qpair->tag) != 0) { in nvme_qpair_construct()
679 if (qpair->id == 0) { in nvme_qpair_construct()
680 bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag, in nvme_qpair_construct()
683 bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag, in nvme_qpair_construct()
684 "io%d", qpair->id - 1); in nvme_qpair_construct()
700 mtx_lock(&qpair->recovery); in nvme_qpair_destroy()
701 qpair->timer_armed = false; in nvme_qpair_destroy()
702 mtx_unlock(&qpair->recovery); in nvme_qpair_destroy()
703 callout_drain(&qpair->timer); in nvme_qpair_destroy()
705 if (qpair->tag) { in nvme_qpair_destroy()
706 bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag); in nvme_qpair_destroy()
707 qpair->tag = NULL; in nvme_qpair_destroy()
710 if (qpair->act_tr) { in nvme_qpair_destroy()
711 free(qpair->act_tr, M_NVME); in nvme_qpair_destroy()
712 qpair->act_tr = NULL; in nvme_qpair_destroy()
715 while (!TAILQ_EMPTY(&qpair->free_tr)) { in nvme_qpair_destroy()
716 tr = TAILQ_FIRST(&qpair->free_tr); in nvme_qpair_destroy()
717 TAILQ_REMOVE(&qpair->free_tr, tr, tailq); in nvme_qpair_destroy()
718 bus_dmamap_destroy(qpair->dma_tag_payload, in nvme_qpair_destroy()
719 tr->payload_dma_map); in nvme_qpair_destroy()
723 if (qpair->cmd != NULL) { in nvme_qpair_destroy()
724 bus_dmamap_unload(qpair->dma_tag, qpair->queuemem_map); in nvme_qpair_destroy()
725 bus_dmamem_free(qpair->dma_tag, qpair->cmd, in nvme_qpair_destroy()
726 qpair->queuemem_map); in nvme_qpair_destroy()
727 qpair->cmd = NULL; in nvme_qpair_destroy()
730 if (qpair->dma_tag) { in nvme_qpair_destroy()
731 bus_dma_tag_destroy(qpair->dma_tag); in nvme_qpair_destroy()
732 qpair->dma_tag = NULL; in nvme_qpair_destroy()
735 if (qpair->dma_tag_payload) { in nvme_qpair_destroy()
736 bus_dma_tag_destroy(qpair->dma_tag_payload); in nvme_qpair_destroy()
737 qpair->dma_tag_payload = NULL; in nvme_qpair_destroy()
740 if (mtx_initialized(&qpair->lock)) in nvme_qpair_destroy()
741 mtx_destroy(&qpair->lock); in nvme_qpair_destroy()
742 if (mtx_initialized(&qpair->recovery)) in nvme_qpair_destroy()
743 mtx_destroy(&qpair->recovery); in nvme_qpair_destroy()
745 if (qpair->res) { in nvme_qpair_destroy()
746 bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ, in nvme_qpair_destroy()
747 rman_get_rid(qpair->res), qpair->res); in nvme_qpair_destroy()
748 qpair->res = NULL; in nvme_qpair_destroy()
765 mtx_assert(&qpair->lock, MA_NOTOWNED); in nvme_admin_qpair_abort_aers()
767 mtx_lock(&qpair->lock); in nvme_admin_qpair_abort_aers()
768 tr = TAILQ_FIRST(&qpair->outstanding_tr); in nvme_admin_qpair_abort_aers()
770 if (tr->req->cmd.opc != NVME_OPC_ASYNC_EVENT_REQUEST) { in nvme_admin_qpair_abort_aers()
774 mtx_unlock(&qpair->lock); in nvme_admin_qpair_abort_aers()
778 mtx_lock(&qpair->lock); in nvme_admin_qpair_abort_aers()
779 tr = TAILQ_FIRST(&qpair->outstanding_tr); in nvme_admin_qpair_abort_aers()
781 mtx_unlock(&qpair->lock); in nvme_admin_qpair_abort_aers()
787 mtx_assert(&qpair->lock, MA_NOTOWNED); in nvme_admin_qpair_destroy()
814 mtx_lock(&tr->qpair->lock); in nvme_abort_complete()
815 if ((status->cdw0 & 1) == 1 && tr->qpair->act_tr[tr->cid] != NULL) { in nvme_abort_complete()
822 nvme_printf(tr->qpair->ctrlr, in nvme_abort_complete()
834 mtx_unlock(&tr->qpair->lock); in nvme_abort_complete()
841 struct nvme_controller *ctrlr = qpair->ctrlr; in nvme_qpair_timeout()
845 bool is_admin = qpair == &ctrlr->adminq; in nvme_qpair_timeout()
850 mtx_assert(&qpair->recovery, MA_OWNED); in nvme_qpair_timeout()
857 if (is_admin ? qpair->ctrlr->is_failed_admin : qpair->ctrlr->is_failed) { in nvme_qpair_timeout()
858 nvme_printf(qpair->ctrlr, in nvme_qpair_timeout()
861 qpair->timer_armed = false; in nvme_qpair_timeout()
866 * Shutdown condition: We set qpair->timer_armed to false in in nvme_qpair_timeout()
872 if (!qpair->timer_armed) { in nvme_qpair_timeout()
873 nvme_printf(qpair->ctrlr, in nvme_qpair_timeout()
878 switch (qpair->recovery_state) { in nvme_qpair_timeout()
881 * Read csts to get value of cfs - controller fatal status. If in nvme_qpair_timeout()
882 * we are in the hot-plug or controller failed status proceed in nvme_qpair_timeout()
899 * If we get here due to a possible surprise hot-unplug in nvme_qpair_timeout()
907 qpair->recovery_state = RECOVERY_WAITING; in nvme_qpair_timeout()
920 mtx_lock(&qpair->lock); in nvme_qpair_timeout()
922 TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq) { in nvme_qpair_timeout()
927 if (tr->deadline == SBT_MAX) in nvme_qpair_timeout()
935 if (now <= tr->deadline) in nvme_qpair_timeout()
939 mtx_unlock(&qpair->lock); in nvme_qpair_timeout()
945 * deadline has passed. Poll the competions as a last-ditch in nvme_qpair_timeout()
950 if (_nvme_qpair_process_completions(qpair) && !ctrlr->isr_warned) { in nvme_qpair_timeout()
952 ctrlr->isr_warned = true; in nvme_qpair_timeout()
956 * Now that we've run the ISR, re-rheck to see if there's any in nvme_qpair_timeout()
959 mtx_lock(&qpair->lock); in nvme_qpair_timeout()
961 TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq) { in nvme_qpair_timeout()
966 if (tr->deadline == SBT_MAX) in nvme_qpair_timeout()
977 if (now <= tr->deadline) in nvme_qpair_timeout()
983 if (ctrlr->enable_aborts && in nvme_qpair_timeout()
984 tr->req->cb_fn != nvme_abort_complete) { in nvme_qpair_timeout()
991 nvme_ctrlr_cmd_abort(ctrlr, tr->cid, qpair->id, in nvme_qpair_timeout()
999 mtx_unlock(&qpair->lock); in nvme_qpair_timeout()
1003 mtx_unlock(&qpair->lock); in nvme_qpair_timeout()
1014 if (!device_is_suspended(ctrlr->dev)) in nvme_qpair_timeout()
1024 callout_schedule_sbt(&qpair->timer, SBT_1S / 2, SBT_1S / 2, 0); in nvme_qpair_timeout()
1026 qpair->timer_armed = false; in nvme_qpair_timeout()
1041 mtx_assert(&qpair->lock, MA_OWNED); in nvme_qpair_submit_tracker()
1043 req = tr->req; in nvme_qpair_submit_tracker()
1044 req->cmd.cid = tr->cid; in nvme_qpair_submit_tracker()
1045 qpair->act_tr[tr->cid] = tr; in nvme_qpair_submit_tracker()
1046 ctrlr = qpair->ctrlr; in nvme_qpair_submit_tracker()
1048 if (req->timeout) { in nvme_qpair_submit_tracker()
1049 if (req->cb_fn == nvme_completion_poll_cb) in nvme_qpair_submit_tracker()
1051 else if (qpair->id == 0) in nvme_qpair_submit_tracker()
1052 timeout = ctrlr->admin_timeout_period; in nvme_qpair_submit_tracker()
1054 timeout = ctrlr->timeout_period; in nvme_qpair_submit_tracker()
1055 tr->deadline = getsbinuptime() + timeout * SBT_1S; in nvme_qpair_submit_tracker()
1056 if (!qpair->timer_armed) { in nvme_qpair_submit_tracker()
1057 qpair->timer_armed = true; in nvme_qpair_submit_tracker()
1058 callout_reset_sbt_on(&qpair->timer, SBT_1S / 2, SBT_1S / 2, in nvme_qpair_submit_tracker()
1059 nvme_qpair_timeout, qpair, qpair->cpu, 0); in nvme_qpair_submit_tracker()
1062 tr->deadline = SBT_MAX; in nvme_qpair_submit_tracker()
1065 memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd)); in nvme_qpair_submit_tracker()
1067 if (++qpair->sq_tail == qpair->num_entries) in nvme_qpair_submit_tracker()
1068 qpair->sq_tail = 0; in nvme_qpair_submit_tracker()
1070 bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map, in nvme_qpair_submit_tracker()
1072 bus_space_write_4(ctrlr->bus_tag, ctrlr->bus_handle, in nvme_qpair_submit_tracker()
1073 qpair->sq_tdbl_off, qpair->sq_tail); in nvme_qpair_submit_tracker()
1074 qpair->num_cmds++; in nvme_qpair_submit_tracker()
1089 nvme_printf(tr->qpair->ctrlr, in nvme_payload_map()
1095 * Note that we specified ctrlr->page_size for alignment and max in nvme_payload_map()
1099 tr->req->cmd.prp1 = htole64(seg[0].ds_addr); in nvme_payload_map()
1102 tr->req->cmd.prp2 = htole64(seg[1].ds_addr); in nvme_payload_map()
1105 tr->req->cmd.prp2 = htole64((uint64_t)tr->prp_bus_addr); in nvme_payload_map()
1107 tr->prp[cur_nseg-1] = in nvme_payload_map()
1117 tr->req->cmd.prp2 = 0; in nvme_payload_map()
1120 bus_dmamap_sync(tr->qpair->dma_tag_payload, tr->payload_dma_map, in nvme_payload_map()
1122 nvme_qpair_submit_tracker(tr->qpair, tr); in nvme_payload_map()
1130 bool is_admin = qpair == &qpair->ctrlr->adminq; in _nvme_qpair_submit_request()
1132 mtx_assert(&qpair->lock, MA_OWNED); in _nvme_qpair_submit_request()
1134 tr = TAILQ_FIRST(&qpair->free_tr); in _nvme_qpair_submit_request()
1135 req->qpair = qpair; in _nvme_qpair_submit_request()
1147 if (is_admin ? qpair->ctrlr->is_failed_admin : qpair->ctrlr->is_failed) { in _nvme_qpair_submit_request()
1156 * in-progress controller-level reset. If we lose the race with in _nvme_qpair_submit_request()
1159 * qpair->lock also held, so if we observe that the state is not NONE, in _nvme_qpair_submit_request()
1163 if (tr == NULL || qpair->recovery_state != RECOVERY_NONE) { in _nvme_qpair_submit_request()
1164 STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq); in _nvme_qpair_submit_request()
1168 TAILQ_REMOVE(&qpair->free_tr, tr, tailq); in _nvme_qpair_submit_request()
1169 TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq); in _nvme_qpair_submit_request()
1170 tr->deadline = SBT_MAX; in _nvme_qpair_submit_request()
1171 tr->req = req; in _nvme_qpair_submit_request()
1173 if (!req->payload_valid) { in _nvme_qpair_submit_request()
1174 nvme_qpair_submit_tracker(tr->qpair, tr); in _nvme_qpair_submit_request()
1179 * tr->deadline updating when nvme_payload_map calls in _nvme_qpair_submit_request()
1183 err = bus_dmamap_load_mem(tr->qpair->dma_tag_payload, in _nvme_qpair_submit_request()
1184 tr->payload_dma_map, &req->payload, nvme_payload_map, tr, 0); in _nvme_qpair_submit_request()
1193 nvme_printf(qpair->ctrlr, in _nvme_qpair_submit_request()
1195 mtx_unlock(&qpair->lock); in _nvme_qpair_submit_request()
1198 mtx_lock(&qpair->lock); in _nvme_qpair_submit_request()
1206 mtx_lock(&qpair->lock); in nvme_qpair_submit_request()
1208 mtx_unlock(&qpair->lock); in nvme_qpair_submit_request()
1214 bool is_admin __unused = qpair == &qpair->ctrlr->adminq; in nvme_qpair_enable()
1216 if (mtx_initialized(&qpair->recovery)) in nvme_qpair_enable()
1217 mtx_assert(&qpair->recovery, MA_OWNED); in nvme_qpair_enable()
1218 if (mtx_initialized(&qpair->lock)) in nvme_qpair_enable()
1219 mtx_assert(&qpair->lock, MA_OWNED); in nvme_qpair_enable()
1220 KASSERT(!(is_admin ? qpair->ctrlr->is_failed_admin : qpair->ctrlr->is_failed), in nvme_qpair_enable()
1223 qpair->recovery_state = RECOVERY_NONE; in nvme_qpair_enable()
1230 qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0; in nvme_qpair_reset()
1239 qpair->phase = 1; in nvme_qpair_reset()
1241 memset(qpair->cmd, 0, in nvme_qpair_reset()
1242 qpair->num_entries * sizeof(struct nvme_command)); in nvme_qpair_reset()
1243 memset(qpair->cpl, 0, in nvme_qpair_reset()
1244 qpair->num_entries * sizeof(struct nvme_completion)); in nvme_qpair_reset()
1260 rpt = !TAILQ_EMPTY(&qpair->outstanding_tr); in nvme_admin_qpair_enable()
1262 nvme_printf(qpair->ctrlr, in nvme_admin_qpair_enable()
1264 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { in nvme_admin_qpair_enable()
1269 nvme_printf(qpair->ctrlr, in nvme_admin_qpair_enable()
1272 mtx_lock(&qpair->recovery); in nvme_admin_qpair_enable()
1273 mtx_lock(&qpair->lock); in nvme_admin_qpair_enable()
1275 mtx_unlock(&qpair->lock); in nvme_admin_qpair_enable()
1276 mtx_unlock(&qpair->recovery); in nvme_admin_qpair_enable()
1293 report = !TAILQ_EMPTY(&qpair->outstanding_tr); in nvme_io_qpair_enable()
1295 nvme_printf(qpair->ctrlr, "aborting outstanding i/o\n"); in nvme_io_qpair_enable()
1296 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { in nvme_io_qpair_enable()
1301 nvme_printf(qpair->ctrlr, "done aborting outstanding i/o\n"); in nvme_io_qpair_enable()
1303 mtx_lock(&qpair->recovery); in nvme_io_qpair_enable()
1304 mtx_lock(&qpair->lock); in nvme_io_qpair_enable()
1308 STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request); in nvme_io_qpair_enable()
1312 nvme_printf(qpair->ctrlr, "resubmitting queued i/o\n"); in nvme_io_qpair_enable()
1316 nvme_qpair_print_command(qpair, &req->cmd); in nvme_io_qpair_enable()
1320 nvme_printf(qpair->ctrlr, "done resubmitting i/o\n"); in nvme_io_qpair_enable()
1322 mtx_unlock(&qpair->lock); in nvme_io_qpair_enable()
1323 mtx_unlock(&qpair->recovery); in nvme_io_qpair_enable()
1331 if (mtx_initialized(&qpair->recovery)) in nvme_qpair_disable()
1332 mtx_assert(&qpair->recovery, MA_OWNED); in nvme_qpair_disable()
1333 if (mtx_initialized(&qpair->lock)) in nvme_qpair_disable()
1334 mtx_assert(&qpair->lock, MA_OWNED); in nvme_qpair_disable()
1336 qpair->recovery_state = RECOVERY_WAITING; in nvme_qpair_disable()
1337 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { in nvme_qpair_disable()
1338 tr->deadline = SBT_MAX; in nvme_qpair_disable()
1345 mtx_lock(&qpair->recovery); in nvme_admin_qpair_disable()
1347 mtx_lock(&qpair->lock); in nvme_admin_qpair_disable()
1349 mtx_unlock(&qpair->lock); in nvme_admin_qpair_disable()
1353 mtx_unlock(&qpair->recovery); in nvme_admin_qpair_disable()
1359 mtx_lock(&qpair->recovery); in nvme_io_qpair_disable()
1360 mtx_lock(&qpair->lock); in nvme_io_qpair_disable()
1364 mtx_unlock(&qpair->lock); in nvme_io_qpair_disable()
1365 mtx_unlock(&qpair->recovery); in nvme_io_qpair_disable()
1374 if (!mtx_initialized(&qpair->lock)) in nvme_qpair_fail()
1377 mtx_lock(&qpair->lock); in nvme_qpair_fail()
1379 if (!STAILQ_EMPTY(&qpair->queued_req)) { in nvme_qpair_fail()
1380 nvme_printf(qpair->ctrlr, "failing queued i/o\n"); in nvme_qpair_fail()
1382 while (!STAILQ_EMPTY(&qpair->queued_req)) { in nvme_qpair_fail()
1383 req = STAILQ_FIRST(&qpair->queued_req); in nvme_qpair_fail()
1384 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); in nvme_qpair_fail()
1385 mtx_unlock(&qpair->lock); in nvme_qpair_fail()
1388 mtx_lock(&qpair->lock); in nvme_qpair_fail()
1391 if (!TAILQ_EMPTY(&qpair->outstanding_tr)) { in nvme_qpair_fail()
1392 nvme_printf(qpair->ctrlr, "failing outstanding i/o\n"); in nvme_qpair_fail()
1395 while (!TAILQ_EMPTY(&qpair->outstanding_tr)) { in nvme_qpair_fail()
1396 tr = TAILQ_FIRST(&qpair->outstanding_tr); in nvme_qpair_fail()
1401 mtx_unlock(&qpair->lock); in nvme_qpair_fail()
1404 mtx_lock(&qpair->lock); in nvme_qpair_fail()
1407 mtx_unlock(&qpair->lock); in nvme_qpair_fail()