Lines Matching refs:qpair
43 static void _nvme_qpair_submit_request(struct nvme_qpair *qpair,
45 static void nvme_qpair_destroy(struct nvme_qpair *qpair);
60 nvme_admin_qpair_print_command(struct nvme_qpair *qpair, in nvme_admin_qpair_print_command() argument
65 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%x " in nvme_admin_qpair_print_command()
67 get_opcode_string(true, cmd->opc, buf, sizeof(buf)), qpair->id, in nvme_admin_qpair_print_command()
73 nvme_io_qpair_print_command(struct nvme_qpair *qpair, in nvme_io_qpair_print_command() argument
85 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d " in nvme_io_qpair_print_command()
88 qpair->id, cmd->cid, le32toh(cmd->nsid), in nvme_io_qpair_print_command()
93 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n", in nvme_io_qpair_print_command()
95 qpair->id, cmd->cid, le32toh(cmd->nsid)); in nvme_io_qpair_print_command()
101 nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd) in nvme_qpair_print_command() argument
103 if (qpair->id == 0) in nvme_qpair_print_command()
104 nvme_admin_qpair_print_command(qpair, cmd); in nvme_qpair_print_command()
106 nvme_io_qpair_print_command(qpair, cmd); in nvme_qpair_print_command()
108 nvme_printf(qpair->ctrlr, in nvme_qpair_print_command()
112 nvme_printf(qpair->ctrlr, in nvme_qpair_print_command()
132 nvme_qpair_print_completion(struct nvme_qpair *qpair, in nvme_qpair_print_completion() argument
143 nvme_printf(qpair->ctrlr, "%s crd:%x m:%x dnr:%x p:%d " in nvme_qpair_print_completion()
213 struct nvme_qpair *qpair = tr->qpair; in nvme_qpair_complete_tracker() local
217 mtx_assert(&qpair->lock, MA_NOTOWNED); in nvme_qpair_complete_tracker()
224 qpair->num_retries++; in nvme_qpair_complete_tracker()
226 qpair->num_failures++; in nvme_qpair_complete_tracker()
230 nvme_qpair_print_command(qpair, &req->cmd); in nvme_qpair_complete_tracker()
231 nvme_qpair_print_completion(qpair, cpl); in nvme_qpair_complete_tracker()
234 qpair->act_tr[cpl->cid] = NULL; in nvme_qpair_complete_tracker()
240 bus_dmamap_sync(qpair->dma_tag_payload, in nvme_qpair_complete_tracker()
248 mtx_lock(&qpair->lock); in nvme_qpair_complete_tracker()
252 nvme_qpair_submit_tracker(qpair, tr); in nvme_qpair_complete_tracker()
255 bus_dmamap_unload(qpair->dma_tag_payload, in nvme_qpair_complete_tracker()
262 TAILQ_REMOVE(&qpair->outstanding_tr, tr, tailq); in nvme_qpair_complete_tracker()
263 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq); in nvme_qpair_complete_tracker()
270 if (!STAILQ_EMPTY(&qpair->queued_req) && in nvme_qpair_complete_tracker()
271 !qpair->ctrlr->is_resetting) { in nvme_qpair_complete_tracker()
272 req = STAILQ_FIRST(&qpair->queued_req); in nvme_qpair_complete_tracker()
273 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); in nvme_qpair_complete_tracker()
274 _nvme_qpair_submit_request(qpair, req); in nvme_qpair_complete_tracker()
278 mtx_unlock(&qpair->lock); in nvme_qpair_complete_tracker()
301 struct nvme_qpair * qpair = tr->qpair; in nvme_qpair_manual_complete_tracker() local
303 mtx_assert(&qpair->lock, MA_NOTOWNED); in nvme_qpair_manual_complete_tracker()
307 cpl.sqid = qpair->id; in nvme_qpair_manual_complete_tracker()
314 nvme_qpair_manual_complete_request(struct nvme_qpair *qpair, in nvme_qpair_manual_complete_request() argument
322 cpl.sqid = qpair->id; in nvme_qpair_manual_complete_request()
327 nvme_qpair_print_command(qpair, &req->cmd); in nvme_qpair_manual_complete_request()
328 nvme_qpair_print_completion(qpair, &cpl); in nvme_qpair_manual_complete_request()
339 _nvme_qpair_process_completions(struct nvme_qpair *qpair) in _nvme_qpair_process_completions() argument
346 mtx_assert(&qpair->recovery, MA_OWNED); in _nvme_qpair_process_completions()
356 if (qpair->recovery_state != RECOVERY_NONE) { in _nvme_qpair_process_completions()
357 qpair->num_ignored++; in _nvme_qpair_process_completions()
369 KASSERT(!(qpair->num_intr_handler_calls == 0 && qpair->phase == 0), in _nvme_qpair_process_completions()
371 device_get_nameunit(qpair->ctrlr->dev))); in _nvme_qpair_process_completions()
373 qpair->num_intr_handler_calls++; in _nvme_qpair_process_completions()
375 bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map, in _nvme_qpair_process_completions()
388 if (qpair->cq_head == qpair->num_entries) { in _nvme_qpair_process_completions()
394 qpair->cq_head = 0; in _nvme_qpair_process_completions()
395 qpair->phase = !qpair->phase; in _nvme_qpair_process_completions()
396 } else if (qpair->cq_head == 0) { in _nvme_qpair_process_completions()
404 cpl = qpair->cpl[qpair->num_entries - 1]; in _nvme_qpair_process_completions()
406 qpair->phase = !NVME_STATUS_GET_P(cpl.status); in _nvme_qpair_process_completions()
423 status = le16toh(qpair->cpl[qpair->cq_head].status); in _nvme_qpair_process_completions()
424 if (NVME_STATUS_GET_P(status) != qpair->phase) in _nvme_qpair_process_completions()
427 bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map, in _nvme_qpair_process_completions()
429 cpl = qpair->cpl[qpair->cq_head]; in _nvme_qpair_process_completions()
436 if (cpl.cid < qpair->num_trackers) in _nvme_qpair_process_completions()
437 tr = qpair->act_tr[cpl.cid]; in _nvme_qpair_process_completions()
444 qpair->sq_head = cpl.sqhd; in _nvme_qpair_process_completions()
458 nvme_printf(qpair->ctrlr, in _nvme_qpair_process_completions()
461 nvme_qpair_print_completion(qpair, in _nvme_qpair_process_completions()
462 &qpair->cpl[qpair->cq_head]); in _nvme_qpair_process_completions()
472 if (++qpair->cq_head == qpair->num_entries) { /* 1 */ in _nvme_qpair_process_completions()
473 atomic_store_rel_int(&qpair->cq_head, 0); /* 2 */ in _nvme_qpair_process_completions()
474 qpair->phase = !qpair->phase; /* 3 */ in _nvme_qpair_process_completions()
479 bus_space_write_4(qpair->ctrlr->bus_tag, qpair->ctrlr->bus_handle, in _nvme_qpair_process_completions()
480 qpair->cq_hdbl_off, qpair->cq_head); in _nvme_qpair_process_completions()
487 nvme_qpair_process_completions(struct nvme_qpair *qpair) in nvme_qpair_process_completions() argument
497 mtx_lock(&qpair->recovery); in nvme_qpair_process_completions()
499 if (__predict_true(qpair->recovery_state == RECOVERY_NONE)) in nvme_qpair_process_completions()
500 done = _nvme_qpair_process_completions(qpair); in nvme_qpair_process_completions()
502 qpair->num_recovery_nolock++; // XXX likely need to rename in nvme_qpair_process_completions()
504 mtx_unlock(&qpair->recovery); in nvme_qpair_process_completions()
512 struct nvme_qpair *qpair = arg; in nvme_qpair_msi_handler() local
514 nvme_qpair_process_completions(qpair); in nvme_qpair_msi_handler()
518 nvme_qpair_construct(struct nvme_qpair *qpair, in nvme_qpair_construct() argument
528 qpair->vector = ctrlr->msi_count > 1 ? qpair->id : 0; in nvme_qpair_construct()
529 qpair->num_entries = num_entries; in nvme_qpair_construct()
530 qpair->num_trackers = num_trackers; in nvme_qpair_construct()
531 qpair->ctrlr = ctrlr; in nvme_qpair_construct()
533 mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF); in nvme_qpair_construct()
534 mtx_init(&qpair->recovery, "nvme qpair recovery", NULL, MTX_DEF); in nvme_qpair_construct()
536 callout_init_mtx(&qpair->timer, &qpair->recovery, 0); in nvme_qpair_construct()
537 qpair->timer_armed = false; in nvme_qpair_construct()
538 qpair->recovery_state = RECOVERY_WAITING; in nvme_qpair_construct()
546 NULL, NULL, &qpair->dma_tag_payload); in nvme_qpair_construct()
556 cmdsz = qpair->num_entries * sizeof(struct nvme_command); in nvme_qpair_construct()
558 cplsz = qpair->num_entries * sizeof(struct nvme_completion); in nvme_qpair_construct()
567 prpmemsz = qpair->num_trackers * prpsz; in nvme_qpair_construct()
572 allocsz, 1, allocsz, 0, NULL, NULL, &qpair->dma_tag); in nvme_qpair_construct()
577 bus_dma_tag_set_domain(qpair->dma_tag, qpair->domain); in nvme_qpair_construct()
579 if (bus_dmamem_alloc(qpair->dma_tag, (void **)&queuemem, in nvme_qpair_construct()
580 BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &qpair->queuemem_map)) { in nvme_qpair_construct()
585 if (bus_dmamap_load(qpair->dma_tag, qpair->queuemem_map, in nvme_qpair_construct()
588 bus_dmamem_free(qpair->dma_tag, qpair->cmd, in nvme_qpair_construct()
589 qpair->queuemem_map); in nvme_qpair_construct()
593 qpair->num_cmds = 0; in nvme_qpair_construct()
594 qpair->num_intr_handler_calls = 0; in nvme_qpair_construct()
595 qpair->num_retries = 0; in nvme_qpair_construct()
596 qpair->num_failures = 0; in nvme_qpair_construct()
597 qpair->num_ignored = 0; in nvme_qpair_construct()
598 qpair->cmd = (struct nvme_command *)queuemem; in nvme_qpair_construct()
599 qpair->cpl = (struct nvme_completion *)(queuemem + cmdsz); in nvme_qpair_construct()
601 qpair->cmd_bus_addr = queuemem_phys; in nvme_qpair_construct()
602 qpair->cpl_bus_addr = queuemem_phys + cmdsz; in nvme_qpair_construct()
610 qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[0]) + in nvme_qpair_construct()
611 (qpair->id << (ctrlr->dstrd + 1)); in nvme_qpair_construct()
612 qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[0]) + in nvme_qpair_construct()
613 (qpair->id << (ctrlr->dstrd + 1)) + (1 << ctrlr->dstrd); in nvme_qpair_construct()
615 TAILQ_INIT(&qpair->free_tr); in nvme_qpair_construct()
616 TAILQ_INIT(&qpair->outstanding_tr); in nvme_qpair_construct()
617 STAILQ_INIT(&qpair->queued_req); in nvme_qpair_construct()
621 for (i = 0; i < qpair->num_trackers; i++) { in nvme_qpair_construct()
623 qpair->num_trackers = i; in nvme_qpair_construct()
639 DOMAINSET_PREF(qpair->domain), M_ZERO | M_WAITOK); in nvme_qpair_construct()
640 bus_dmamap_create(qpair->dma_tag_payload, 0, in nvme_qpair_construct()
643 tr->qpair = qpair; in nvme_qpair_construct()
646 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq); in nvme_qpair_construct()
651 if (qpair->num_trackers == 0) { in nvme_qpair_construct()
656 qpair->act_tr = malloc_domainset(sizeof(struct nvme_tracker *) * in nvme_qpair_construct()
657 qpair->num_entries, M_NVME, DOMAINSET_PREF(qpair->domain), in nvme_qpair_construct()
665 qpair->rid = qpair->vector + 1; in nvme_qpair_construct()
667 qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, in nvme_qpair_construct()
668 &qpair->rid, RF_ACTIVE); in nvme_qpair_construct()
669 if (qpair->res == NULL) { in nvme_qpair_construct()
673 if (bus_setup_intr(ctrlr->dev, qpair->res, in nvme_qpair_construct()
675 nvme_qpair_msi_handler, qpair, &qpair->tag) != 0) { in nvme_qpair_construct()
679 if (qpair->id == 0) { in nvme_qpair_construct()
680 bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag, in nvme_qpair_construct()
683 bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag, in nvme_qpair_construct()
684 "io%d", qpair->id - 1); in nvme_qpair_construct()
691 nvme_qpair_destroy(qpair); in nvme_qpair_construct()
696 nvme_qpair_destroy(struct nvme_qpair *qpair) in nvme_qpair_destroy() argument
700 mtx_lock(&qpair->recovery); in nvme_qpair_destroy()
701 qpair->timer_armed = false; in nvme_qpair_destroy()
702 mtx_unlock(&qpair->recovery); in nvme_qpair_destroy()
703 callout_drain(&qpair->timer); in nvme_qpair_destroy()
705 if (qpair->tag) { in nvme_qpair_destroy()
706 bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag); in nvme_qpair_destroy()
707 qpair->tag = NULL; in nvme_qpair_destroy()
710 if (qpair->act_tr) { in nvme_qpair_destroy()
711 free(qpair->act_tr, M_NVME); in nvme_qpair_destroy()
712 qpair->act_tr = NULL; in nvme_qpair_destroy()
715 while (!TAILQ_EMPTY(&qpair->free_tr)) { in nvme_qpair_destroy()
716 tr = TAILQ_FIRST(&qpair->free_tr); in nvme_qpair_destroy()
717 TAILQ_REMOVE(&qpair->free_tr, tr, tailq); in nvme_qpair_destroy()
718 bus_dmamap_destroy(qpair->dma_tag_payload, in nvme_qpair_destroy()
723 if (qpair->cmd != NULL) { in nvme_qpair_destroy()
724 bus_dmamap_unload(qpair->dma_tag, qpair->queuemem_map); in nvme_qpair_destroy()
725 bus_dmamem_free(qpair->dma_tag, qpair->cmd, in nvme_qpair_destroy()
726 qpair->queuemem_map); in nvme_qpair_destroy()
727 qpair->cmd = NULL; in nvme_qpair_destroy()
730 if (qpair->dma_tag) { in nvme_qpair_destroy()
731 bus_dma_tag_destroy(qpair->dma_tag); in nvme_qpair_destroy()
732 qpair->dma_tag = NULL; in nvme_qpair_destroy()
735 if (qpair->dma_tag_payload) { in nvme_qpair_destroy()
736 bus_dma_tag_destroy(qpair->dma_tag_payload); in nvme_qpair_destroy()
737 qpair->dma_tag_payload = NULL; in nvme_qpair_destroy()
740 if (mtx_initialized(&qpair->lock)) in nvme_qpair_destroy()
741 mtx_destroy(&qpair->lock); in nvme_qpair_destroy()
742 if (mtx_initialized(&qpair->recovery)) in nvme_qpair_destroy()
743 mtx_destroy(&qpair->recovery); in nvme_qpair_destroy()
745 if (qpair->res) { in nvme_qpair_destroy()
746 bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ, in nvme_qpair_destroy()
747 rman_get_rid(qpair->res), qpair->res); in nvme_qpair_destroy()
748 qpair->res = NULL; in nvme_qpair_destroy()
753 nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair) in nvme_admin_qpair_abort_aers() argument
765 mtx_assert(&qpair->lock, MA_NOTOWNED); in nvme_admin_qpair_abort_aers()
767 mtx_lock(&qpair->lock); in nvme_admin_qpair_abort_aers()
768 tr = TAILQ_FIRST(&qpair->outstanding_tr); in nvme_admin_qpair_abort_aers()
774 mtx_unlock(&qpair->lock); in nvme_admin_qpair_abort_aers()
778 mtx_lock(&qpair->lock); in nvme_admin_qpair_abort_aers()
779 tr = TAILQ_FIRST(&qpair->outstanding_tr); in nvme_admin_qpair_abort_aers()
781 mtx_unlock(&qpair->lock); in nvme_admin_qpair_abort_aers()
785 nvme_admin_qpair_destroy(struct nvme_qpair *qpair) in nvme_admin_qpair_destroy() argument
787 mtx_assert(&qpair->lock, MA_NOTOWNED); in nvme_admin_qpair_destroy()
789 nvme_admin_qpair_abort_aers(qpair); in nvme_admin_qpair_destroy()
790 nvme_qpair_destroy(qpair); in nvme_admin_qpair_destroy()
794 nvme_io_qpair_destroy(struct nvme_qpair *qpair) in nvme_io_qpair_destroy() argument
797 nvme_qpair_destroy(qpair); in nvme_io_qpair_destroy()
814 mtx_lock(&tr->qpair->lock); in nvme_abort_complete()
815 if ((status->cdw0 & 1) == 1 && tr->qpair->act_tr[tr->cid] != NULL) { in nvme_abort_complete()
822 nvme_printf(tr->qpair->ctrlr, in nvme_abort_complete()
834 mtx_unlock(&tr->qpair->lock); in nvme_abort_complete()
840 struct nvme_qpair *qpair = arg; in nvme_qpair_timeout() local
841 struct nvme_controller *ctrlr = qpair->ctrlr; in nvme_qpair_timeout()
845 bool is_admin = qpair == &ctrlr->adminq; in nvme_qpair_timeout()
850 mtx_assert(&qpair->recovery, MA_OWNED); in nvme_qpair_timeout()
857 if (is_admin ? qpair->ctrlr->is_failed_admin : qpair->ctrlr->is_failed) { in nvme_qpair_timeout()
858 nvme_printf(qpair->ctrlr, in nvme_qpair_timeout()
861 qpair->timer_armed = false; in nvme_qpair_timeout()
872 if (!qpair->timer_armed) { in nvme_qpair_timeout()
873 nvme_printf(qpair->ctrlr, in nvme_qpair_timeout()
878 switch (qpair->recovery_state) { in nvme_qpair_timeout()
907 qpair->recovery_state = RECOVERY_WAITING; in nvme_qpair_timeout()
920 mtx_lock(&qpair->lock); in nvme_qpair_timeout()
922 TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq) { in nvme_qpair_timeout()
939 mtx_unlock(&qpair->lock); in nvme_qpair_timeout()
950 if (_nvme_qpair_process_completions(qpair) && !ctrlr->isr_warned) { in nvme_qpair_timeout()
959 mtx_lock(&qpair->lock); in nvme_qpair_timeout()
961 TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq) { in nvme_qpair_timeout()
991 nvme_ctrlr_cmd_abort(ctrlr, tr->cid, qpair->id, in nvme_qpair_timeout()
999 mtx_unlock(&qpair->lock); in nvme_qpair_timeout()
1003 mtx_unlock(&qpair->lock); in nvme_qpair_timeout()
1024 callout_schedule_sbt(&qpair->timer, SBT_1S / 2, SBT_1S / 2, 0); in nvme_qpair_timeout()
1026 qpair->timer_armed = false; in nvme_qpair_timeout()
1035 nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr) in nvme_qpair_submit_tracker() argument
1041 mtx_assert(&qpair->lock, MA_OWNED); in nvme_qpair_submit_tracker()
1045 qpair->act_tr[tr->cid] = tr; in nvme_qpair_submit_tracker()
1046 ctrlr = qpair->ctrlr; in nvme_qpair_submit_tracker()
1051 else if (qpair->id == 0) in nvme_qpair_submit_tracker()
1056 if (!qpair->timer_armed) { in nvme_qpair_submit_tracker()
1057 qpair->timer_armed = true; in nvme_qpair_submit_tracker()
1058 callout_reset_sbt_on(&qpair->timer, SBT_1S / 2, SBT_1S / 2, in nvme_qpair_submit_tracker()
1059 nvme_qpair_timeout, qpair, qpair->cpu, 0); in nvme_qpair_submit_tracker()
1065 memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd)); in nvme_qpair_submit_tracker()
1067 if (++qpair->sq_tail == qpair->num_entries) in nvme_qpair_submit_tracker()
1068 qpair->sq_tail = 0; in nvme_qpair_submit_tracker()
1070 bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map, in nvme_qpair_submit_tracker()
1073 qpair->sq_tdbl_off, qpair->sq_tail); in nvme_qpair_submit_tracker()
1074 qpair->num_cmds++; in nvme_qpair_submit_tracker()
1089 nvme_printf(tr->qpair->ctrlr, in nvme_payload_map()
1120 bus_dmamap_sync(tr->qpair->dma_tag_payload, tr->payload_dma_map, in nvme_payload_map()
1122 nvme_qpair_submit_tracker(tr->qpair, tr); in nvme_payload_map()
1126 _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req) in _nvme_qpair_submit_request() argument
1130 bool is_admin = qpair == &qpair->ctrlr->adminq; in _nvme_qpair_submit_request()
1132 mtx_assert(&qpair->lock, MA_OWNED); in _nvme_qpair_submit_request()
1134 tr = TAILQ_FIRST(&qpair->free_tr); in _nvme_qpair_submit_request()
1135 req->qpair = qpair; in _nvme_qpair_submit_request()
1147 if (is_admin ? qpair->ctrlr->is_failed_admin : qpair->ctrlr->is_failed) { in _nvme_qpair_submit_request()
1148 nvme_qpair_manual_complete_request(qpair, req, in _nvme_qpair_submit_request()
1163 if (tr == NULL || qpair->recovery_state != RECOVERY_NONE) { in _nvme_qpair_submit_request()
1164 STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq); in _nvme_qpair_submit_request()
1168 TAILQ_REMOVE(&qpair->free_tr, tr, tailq); in _nvme_qpair_submit_request()
1169 TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq); in _nvme_qpair_submit_request()
1174 nvme_qpair_submit_tracker(tr->qpair, tr); in _nvme_qpair_submit_request()
1183 err = bus_dmamap_load_mem(tr->qpair->dma_tag_payload, in _nvme_qpair_submit_request()
1193 nvme_printf(qpair->ctrlr, in _nvme_qpair_submit_request()
1195 mtx_unlock(&qpair->lock); in _nvme_qpair_submit_request()
1198 mtx_lock(&qpair->lock); in _nvme_qpair_submit_request()
1203 nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req) in nvme_qpair_submit_request() argument
1206 mtx_lock(&qpair->lock); in nvme_qpair_submit_request()
1207 _nvme_qpair_submit_request(qpair, req); in nvme_qpair_submit_request()
1208 mtx_unlock(&qpair->lock); in nvme_qpair_submit_request()
1212 nvme_qpair_enable(struct nvme_qpair *qpair) in nvme_qpair_enable() argument
1214 bool is_admin __unused = qpair == &qpair->ctrlr->adminq; in nvme_qpair_enable()
1216 if (mtx_initialized(&qpair->recovery)) in nvme_qpair_enable()
1217 mtx_assert(&qpair->recovery, MA_OWNED); in nvme_qpair_enable()
1218 if (mtx_initialized(&qpair->lock)) in nvme_qpair_enable()
1219 mtx_assert(&qpair->lock, MA_OWNED); in nvme_qpair_enable()
1220 KASSERT(!(is_admin ? qpair->ctrlr->is_failed_admin : qpair->ctrlr->is_failed), in nvme_qpair_enable()
1223 qpair->recovery_state = RECOVERY_NONE; in nvme_qpair_enable()
1227 nvme_qpair_reset(struct nvme_qpair *qpair) in nvme_qpair_reset() argument
1230 qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0; in nvme_qpair_reset()
1239 qpair->phase = 1; in nvme_qpair_reset()
1241 memset(qpair->cmd, 0, in nvme_qpair_reset()
1242 qpair->num_entries * sizeof(struct nvme_command)); in nvme_qpair_reset()
1243 memset(qpair->cpl, 0, in nvme_qpair_reset()
1244 qpair->num_entries * sizeof(struct nvme_completion)); in nvme_qpair_reset()
1248 nvme_admin_qpair_enable(struct nvme_qpair *qpair) in nvme_admin_qpair_enable() argument
1260 rpt = !TAILQ_EMPTY(&qpair->outstanding_tr); in nvme_admin_qpair_enable()
1262 nvme_printf(qpair->ctrlr, in nvme_admin_qpair_enable()
1264 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { in nvme_admin_qpair_enable()
1269 nvme_printf(qpair->ctrlr, in nvme_admin_qpair_enable()
1272 mtx_lock(&qpair->recovery); in nvme_admin_qpair_enable()
1273 mtx_lock(&qpair->lock); in nvme_admin_qpair_enable()
1274 nvme_qpair_enable(qpair); in nvme_admin_qpair_enable()
1275 mtx_unlock(&qpair->lock); in nvme_admin_qpair_enable()
1276 mtx_unlock(&qpair->recovery); in nvme_admin_qpair_enable()
1280 nvme_io_qpair_enable(struct nvme_qpair *qpair) in nvme_io_qpair_enable() argument
1293 report = !TAILQ_EMPTY(&qpair->outstanding_tr); in nvme_io_qpair_enable()
1295 nvme_printf(qpair->ctrlr, "aborting outstanding i/o\n"); in nvme_io_qpair_enable()
1296 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { in nvme_io_qpair_enable()
1301 nvme_printf(qpair->ctrlr, "done aborting outstanding i/o\n"); in nvme_io_qpair_enable()
1303 mtx_lock(&qpair->recovery); in nvme_io_qpair_enable()
1304 mtx_lock(&qpair->lock); in nvme_io_qpair_enable()
1305 nvme_qpair_enable(qpair); in nvme_io_qpair_enable()
1308 STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request); in nvme_io_qpair_enable()
1312 nvme_printf(qpair->ctrlr, "resubmitting queued i/o\n"); in nvme_io_qpair_enable()
1316 nvme_qpair_print_command(qpair, &req->cmd); in nvme_io_qpair_enable()
1317 _nvme_qpair_submit_request(qpair, req); in nvme_io_qpair_enable()
1320 nvme_printf(qpair->ctrlr, "done resubmitting i/o\n"); in nvme_io_qpair_enable()
1322 mtx_unlock(&qpair->lock); in nvme_io_qpair_enable()
1323 mtx_unlock(&qpair->recovery); in nvme_io_qpair_enable()
1327 nvme_qpair_disable(struct nvme_qpair *qpair) in nvme_qpair_disable() argument
1331 if (mtx_initialized(&qpair->recovery)) in nvme_qpair_disable()
1332 mtx_assert(&qpair->recovery, MA_OWNED); in nvme_qpair_disable()
1333 if (mtx_initialized(&qpair->lock)) in nvme_qpair_disable()
1334 mtx_assert(&qpair->lock, MA_OWNED); in nvme_qpair_disable()
1336 qpair->recovery_state = RECOVERY_WAITING; in nvme_qpair_disable()
1337 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { in nvme_qpair_disable()
1343 nvme_admin_qpair_disable(struct nvme_qpair *qpair) in nvme_admin_qpair_disable() argument
1345 mtx_lock(&qpair->recovery); in nvme_admin_qpair_disable()
1347 mtx_lock(&qpair->lock); in nvme_admin_qpair_disable()
1348 nvme_qpair_disable(qpair); in nvme_admin_qpair_disable()
1349 mtx_unlock(&qpair->lock); in nvme_admin_qpair_disable()
1351 nvme_admin_qpair_abort_aers(qpair); in nvme_admin_qpair_disable()
1353 mtx_unlock(&qpair->recovery); in nvme_admin_qpair_disable()
1357 nvme_io_qpair_disable(struct nvme_qpair *qpair) in nvme_io_qpair_disable() argument
1359 mtx_lock(&qpair->recovery); in nvme_io_qpair_disable()
1360 mtx_lock(&qpair->lock); in nvme_io_qpair_disable()
1362 nvme_qpair_disable(qpair); in nvme_io_qpair_disable()
1364 mtx_unlock(&qpair->lock); in nvme_io_qpair_disable()
1365 mtx_unlock(&qpair->recovery); in nvme_io_qpair_disable()
1369 nvme_qpair_fail(struct nvme_qpair *qpair) in nvme_qpair_fail() argument
1374 if (!mtx_initialized(&qpair->lock)) in nvme_qpair_fail()
1377 mtx_lock(&qpair->lock); in nvme_qpair_fail()
1379 if (!STAILQ_EMPTY(&qpair->queued_req)) { in nvme_qpair_fail()
1380 nvme_printf(qpair->ctrlr, "failing queued i/o\n"); in nvme_qpair_fail()
1382 while (!STAILQ_EMPTY(&qpair->queued_req)) { in nvme_qpair_fail()
1383 req = STAILQ_FIRST(&qpair->queued_req); in nvme_qpair_fail()
1384 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); in nvme_qpair_fail()
1385 mtx_unlock(&qpair->lock); in nvme_qpair_fail()
1386 nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC, in nvme_qpair_fail()
1388 mtx_lock(&qpair->lock); in nvme_qpair_fail()
1391 if (!TAILQ_EMPTY(&qpair->outstanding_tr)) { in nvme_qpair_fail()
1392 nvme_printf(qpair->ctrlr, "failing outstanding i/o\n"); in nvme_qpair_fail()
1395 while (!TAILQ_EMPTY(&qpair->outstanding_tr)) { in nvme_qpair_fail()
1396 tr = TAILQ_FIRST(&qpair->outstanding_tr); in nvme_qpair_fail()
1401 mtx_unlock(&qpair->lock); in nvme_qpair_fail()
1404 mtx_lock(&qpair->lock); in nvme_qpair_fail()
1407 mtx_unlock(&qpair->lock); in nvme_qpair_fail()