Lines Matching +full:lock +full:- +full:offset

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2023-2024 Chelsio Communications, Inc.
11 #include <sys/lock.h>
38 sbuf_printf(&sb, "nvmft%u: ", ctrlr->cntlid); in nvmft_printf()
57 ctrlr->cntlid = cntlid; in nvmft_controller_alloc()
59 TAILQ_INSERT_TAIL(&np->controllers, ctrlr, link); in nvmft_controller_alloc()
60 ctrlr->np = np; in nvmft_controller_alloc()
61 mtx_init(&ctrlr->lock, "nvmft controller", NULL, MTX_DEF); in nvmft_controller_alloc()
62 callout_init(&ctrlr->ka_timer, 1); in nvmft_controller_alloc()
63 TASK_INIT(&ctrlr->shutdown_task, 0, nvmft_controller_shutdown, ctrlr); in nvmft_controller_alloc()
64 TIMEOUT_TASK_INIT(taskqueue_thread, &ctrlr->terminate_task, 0, in nvmft_controller_alloc()
67 ctrlr->cdata = np->cdata; in nvmft_controller_alloc()
68 ctrlr->cdata.ctrlr_id = htole16(cntlid); in nvmft_controller_alloc()
69 memcpy(ctrlr->hostid, data->hostid, sizeof(ctrlr->hostid)); in nvmft_controller_alloc()
70 memcpy(ctrlr->hostnqn, data->hostnqn, sizeof(ctrlr->hostnqn)); in nvmft_controller_alloc()
71 ctrlr->hip.power_cycles[0] = 1; in nvmft_controller_alloc()
72 ctrlr->create_time = sbinuptime(); in nvmft_controller_alloc()
74 ctrlr->changed_ns = malloc(sizeof(*ctrlr->changed_ns), M_NVMFT, in nvmft_controller_alloc()
83 mtx_destroy(&ctrlr->lock); in nvmft_controller_free()
84 MPASS(ctrlr->io_qpairs == NULL); in nvmft_controller_free()
85 free(ctrlr->changed_ns, M_NVMFT); in nvmft_controller_free()
95 if (ctrlr->shutdown) in nvmft_keep_alive_timer()
98 traffic = atomic_readandclear_int(&ctrlr->ka_active_traffic); in nvmft_keep_alive_timer()
106 callout_schedule_sbt(&ctrlr->ka_timer, ctrlr->ka_sbt, 0, C_HARDCLOCK); in nvmft_keep_alive_timer()
120 if (cmd->qid != htole16(0)) in nvmft_handoff_admin_queue()
123 qp = nvmft_qpair_init(handoff->trtype, &handoff->params, 0, in nvmft_handoff_admin_queue()
127 (int)sizeof(data->hostnqn), data->hostnqn); in nvmft_handoff_admin_queue()
131 sx_xlock(&np->lock); in nvmft_handoff_admin_queue()
132 cntlid = alloc_unr(np->ids); in nvmft_handoff_admin_queue()
133 if (cntlid == -1) { in nvmft_handoff_admin_queue()
134 sx_xunlock(&np->lock); in nvmft_handoff_admin_queue()
136 (int)sizeof(data->hostnqn), data->hostnqn); in nvmft_handoff_admin_queue()
144 TAILQ_FOREACH(ctrlr, &np->controllers, link) { in nvmft_handoff_admin_queue()
145 KASSERT(ctrlr->cntlid != cntlid, in nvmft_handoff_admin_queue()
152 (int)sizeof(data->hostnqn), data->hostnqn); in nvmft_handoff_admin_queue()
153 ctrlr->admin = qp; in nvmft_handoff_admin_queue()
154 ctrlr->trtype = handoff->trtype; in nvmft_handoff_admin_queue()
157 * The spec requires a non-zero KeepAlive timer, but allow a in nvmft_handoff_admin_queue()
160 kato = le32toh(cmd->kato); in nvmft_handoff_admin_queue()
166 ctrlr->ka_sbt = mstosbt(roundup(kato, 1000)); in nvmft_handoff_admin_queue()
167 callout_reset_sbt(&ctrlr->ka_timer, ctrlr->ka_sbt, 0, in nvmft_handoff_admin_queue()
172 sx_xunlock(&np->lock); in nvmft_handoff_admin_queue()
188 qid = le16toh(cmd->qid); in nvmft_handoff_io_queue()
191 cntlid = le16toh(data->cntlid); in nvmft_handoff_io_queue()
194 qp = nvmft_qpair_init(handoff->trtype, &handoff->params, qid, name); in nvmft_handoff_io_queue()
197 (int)sizeof(data->hostnqn), data->hostnqn); in nvmft_handoff_io_queue()
201 sx_slock(&np->lock); in nvmft_handoff_io_queue()
202 TAILQ_FOREACH(ctrlr, &np->controllers, link) { in nvmft_handoff_io_queue()
203 if (ctrlr->cntlid == cntlid) in nvmft_handoff_io_queue()
207 sx_sunlock(&np->lock); in nvmft_handoff_io_queue()
209 ctrlr->cntlid, qid, (int)sizeof(data->hostnqn), in nvmft_handoff_io_queue()
210 data->hostnqn); in nvmft_handoff_io_queue()
217 if (memcmp(ctrlr->hostid, data->hostid, sizeof(ctrlr->hostid)) != 0) { in nvmft_handoff_io_queue()
218 sx_sunlock(&np->lock); in nvmft_handoff_io_queue()
221 (int)sizeof(data->hostnqn), data->hostnqn); in nvmft_handoff_io_queue()
227 if (memcmp(ctrlr->hostnqn, data->hostnqn, sizeof(ctrlr->hostnqn)) != 0) { in nvmft_handoff_io_queue()
228 sx_sunlock(&np->lock); in nvmft_handoff_io_queue()
231 (int)sizeof(data->hostnqn), data->hostnqn); in nvmft_handoff_io_queue()
238 /* XXX: Require handoff->trtype == ctrlr->trtype? */ in nvmft_handoff_io_queue()
240 mtx_lock(&ctrlr->lock); in nvmft_handoff_io_queue()
241 if (ctrlr->shutdown) { in nvmft_handoff_io_queue()
242 mtx_unlock(&ctrlr->lock); in nvmft_handoff_io_queue()
243 sx_sunlock(&np->lock); in nvmft_handoff_io_queue()
246 qid, (int)sizeof(data->hostnqn), data->hostnqn); in nvmft_handoff_io_queue()
252 if (ctrlr->num_io_queues == 0) { in nvmft_handoff_io_queue()
253 mtx_unlock(&ctrlr->lock); in nvmft_handoff_io_queue()
254 sx_sunlock(&np->lock); in nvmft_handoff_io_queue()
257 qid, (int)sizeof(data->hostnqn), data->hostnqn); in nvmft_handoff_io_queue()
263 if (cmd->qid > ctrlr->num_io_queues) { in nvmft_handoff_io_queue()
264 mtx_unlock(&ctrlr->lock); in nvmft_handoff_io_queue()
265 sx_sunlock(&np->lock); in nvmft_handoff_io_queue()
268 (int)sizeof(data->hostnqn), data->hostnqn); in nvmft_handoff_io_queue()
274 if (ctrlr->io_qpairs[qid - 1].qp != NULL) { in nvmft_handoff_io_queue()
275 mtx_unlock(&ctrlr->lock); in nvmft_handoff_io_queue()
276 sx_sunlock(&np->lock); in nvmft_handoff_io_queue()
278 "attempt to re-create I/O queue %u from %.*s\n", qid, in nvmft_handoff_io_queue()
279 (int)sizeof(data->hostnqn), data->hostnqn); in nvmft_handoff_io_queue()
286 ctrlr->io_qpairs[qid - 1].qp = qp; in nvmft_handoff_io_queue()
287 mtx_unlock(&ctrlr->lock); in nvmft_handoff_io_queue()
289 sx_sunlock(&np->lock); in nvmft_handoff_io_queue()
305 mtx_lock(&ctrlr->lock); in nvmft_controller_shutdown()
306 for (u_int i = 0; i < ctrlr->num_io_queues; i++) { in nvmft_controller_shutdown()
307 if (ctrlr->io_qpairs[i].qp != NULL) { in nvmft_controller_shutdown()
308 ctrlr->io_qpairs[i].shutdown = true; in nvmft_controller_shutdown()
309 mtx_unlock(&ctrlr->lock); in nvmft_controller_shutdown()
310 nvmft_qpair_shutdown(ctrlr->io_qpairs[i].qp); in nvmft_controller_shutdown()
311 mtx_lock(&ctrlr->lock); in nvmft_controller_shutdown()
314 mtx_unlock(&ctrlr->lock); in nvmft_controller_shutdown()
320 mtx_lock(&ctrlr->lock); in nvmft_controller_shutdown()
321 while (ctrlr->pending_commands != 0) in nvmft_controller_shutdown()
322 mtx_sleep(&ctrlr->pending_commands, &ctrlr->lock, 0, "nvmftsh", in nvmft_controller_shutdown()
324 mtx_unlock(&ctrlr->lock); in nvmft_controller_shutdown()
327 for (u_int i = 0; i < ctrlr->num_io_queues; i++) { in nvmft_controller_shutdown()
328 if (ctrlr->io_qpairs[i].qp != NULL) in nvmft_controller_shutdown()
329 nvmft_qpair_destroy(ctrlr->io_qpairs[i].qp); in nvmft_controller_shutdown()
331 free(ctrlr->io_qpairs, M_NVMFT); in nvmft_controller_shutdown()
332 ctrlr->io_qpairs = NULL; in nvmft_controller_shutdown()
334 mtx_lock(&ctrlr->lock); in nvmft_controller_shutdown()
335 ctrlr->num_io_queues = 0; in nvmft_controller_shutdown()
338 if (NVMEV(NVME_CSTS_REG_SHST, ctrlr->csts) == NVME_SHST_OCCURRING) { in nvmft_controller_shutdown()
339 ctrlr->csts &= ~NVMEM(NVME_CSTS_REG_SHST); in nvmft_controller_shutdown()
340 ctrlr->csts |= NVMEF(NVME_CSTS_REG_SHST, NVME_SHST_COMPLETE); in nvmft_controller_shutdown()
343 if (NVMEV(NVME_CSTS_REG_CFS, ctrlr->csts) == 0) { in nvmft_controller_shutdown()
344 ctrlr->csts &= ~NVMEM(NVME_CSTS_REG_RDY); in nvmft_controller_shutdown()
345 ctrlr->shutdown = false; in nvmft_controller_shutdown()
347 mtx_unlock(&ctrlr->lock); in nvmft_controller_shutdown()
353 * (NVMe-over-Fabrics 1.1 4.6). in nvmft_controller_shutdown()
355 if (ctrlr->admin_closed || NVMEV(NVME_CSTS_REG_CFS, ctrlr->csts) != 0) in nvmft_controller_shutdown()
359 &ctrlr->terminate_task, hz * 60 * 2); in nvmft_controller_shutdown()
369 /* If the controller has been re-enabled, nothing to do. */ in nvmft_controller_terminate()
370 mtx_lock(&ctrlr->lock); in nvmft_controller_terminate()
371 if (NVMEV(NVME_CC_REG_EN, ctrlr->cc) != 0) { in nvmft_controller_terminate()
372 mtx_unlock(&ctrlr->lock); in nvmft_controller_terminate()
374 if (ctrlr->ka_sbt != 0) in nvmft_controller_terminate()
375 callout_schedule_sbt(&ctrlr->ka_timer, ctrlr->ka_sbt, 0, in nvmft_controller_terminate()
381 ctrlr->shutdown = true; in nvmft_controller_terminate()
382 mtx_unlock(&ctrlr->lock); in nvmft_controller_terminate()
384 nvmft_qpair_destroy(ctrlr->admin); in nvmft_controller_terminate()
387 np = ctrlr->np; in nvmft_controller_terminate()
388 sx_xlock(&np->lock); in nvmft_controller_terminate()
389 TAILQ_REMOVE(&np->controllers, ctrlr, link); in nvmft_controller_terminate()
390 free_unr(np->ids, ctrlr->cntlid); in nvmft_controller_terminate()
391 wakeup_np = (!np->online && TAILQ_EMPTY(&np->controllers)); in nvmft_controller_terminate()
392 sx_xunlock(&np->lock); in nvmft_controller_terminate()
396 callout_drain(&ctrlr->ka_timer); in nvmft_controller_terminate()
418 if (qp != ctrlr->admin) in nvmft_controller_error()
421 mtx_lock(&ctrlr->lock); in nvmft_controller_error()
422 if (ctrlr->shutdown) { in nvmft_controller_error()
423 ctrlr->admin_closed = true; in nvmft_controller_error()
424 mtx_unlock(&ctrlr->lock); in nvmft_controller_error()
428 if (NVMEV(NVME_CC_REG_EN, ctrlr->cc) == 0) { in nvmft_controller_error()
429 MPASS(ctrlr->num_io_queues == 0); in nvmft_controller_error()
430 mtx_unlock(&ctrlr->lock); in nvmft_controller_error()
433 * Ok to drop lock here since ctrlr->cc can't in nvmft_controller_error()
448 &ctrlr->terminate_task, NULL) == 0) in nvmft_controller_error()
450 &ctrlr->terminate_task, 0); in nvmft_controller_error()
459 ctrlr->admin_closed = true; in nvmft_controller_error()
461 mtx_lock(&ctrlr->lock); in nvmft_controller_error()
464 if (ctrlr->shutdown) { in nvmft_controller_error()
465 mtx_unlock(&ctrlr->lock); in nvmft_controller_error()
469 ctrlr->csts |= NVMEF(NVME_CSTS_REG_CFS, 1); in nvmft_controller_error()
470 ctrlr->cc &= ~NVMEM(NVME_CC_REG_EN); in nvmft_controller_error()
471 ctrlr->shutdown = true; in nvmft_controller_error()
472 mtx_unlock(&ctrlr->lock); in nvmft_controller_error()
474 callout_stop(&ctrlr->ka_timer); in nvmft_controller_error()
475 taskqueue_enqueue(taskqueue_thread, &ctrlr->shutdown_task); in nvmft_controller_error()
487 for (n = m; len > 0; n = n->m_next) { in m_getml()
488 n->m_len = M_SIZE(n); in m_getml()
489 if (n->m_len >= len) { in m_getml()
490 n->m_len = len; in m_getml()
491 MPASS(n->m_next == NULL); in m_getml()
493 len -= n->m_len; in m_getml()
499 m_zero(struct mbuf *m, u_int offset, u_int len) in m_zero() argument
506 while (m->m_len <= offset) { in m_zero()
507 offset -= m->m_len; in m_zero()
508 m = m->m_next; in m_zero()
511 todo = m->m_len - offset; in m_zero()
514 memset(mtodo(m, offset), 0, todo); in m_zero()
515 m = m->m_next; in m_zero()
516 len -= todo; in m_zero()
519 todo = m->m_len; in m_zero()
523 m = m->m_next; in m_zero()
524 len -= todo; in m_zero()
533 uint64_t offset; in handle_get_log_page() local
540 lid = le32toh(cmd->cdw10) & 0xff; in handle_get_log_page()
541 rae = (le32toh(cmd->cdw10) & (1U << 15)) != 0; in handle_get_log_page()
542 numd = le32toh(cmd->cdw10) >> 16 | le32toh(cmd->cdw11) << 16; in handle_get_log_page()
543 offset = le32toh(cmd->cdw12) | (uint64_t)le32toh(cmd->cdw13) << 32; in handle_get_log_page()
545 if (offset % 3 != 0) { in handle_get_log_page()
558 m_zero(m, todo, len - todo); in handle_get_log_page()
566 if (offset >= sizeof(hip)) { in handle_get_log_page()
570 todo = sizeof(hip) - offset; in handle_get_log_page()
574 mtx_lock(&ctrlr->lock); in handle_get_log_page()
575 hip = ctrlr->hip; in handle_get_log_page()
577 sbintime_getsec(ctrlr->busy_total) / 60; in handle_get_log_page()
579 sbintime_getsec(sbinuptime() - ctrlr->create_time) / 3600; in handle_get_log_page()
580 mtx_unlock(&ctrlr->lock); in handle_get_log_page()
583 m_copyback(m, 0, todo, (char *)&hip + offset); in handle_get_log_page()
585 m_zero(m, todo, len - todo); in handle_get_log_page()
591 if (offset >= sizeof(ctrlr->np->fp)) { in handle_get_log_page()
595 todo = sizeof(ctrlr->np->fp) - offset; in handle_get_log_page()
600 m_copyback(m, 0, todo, (char *)&ctrlr->np->fp + offset); in handle_get_log_page()
602 m_zero(m, todo, len - todo); in handle_get_log_page()
607 if (offset >= sizeof(*ctrlr->changed_ns)) { in handle_get_log_page()
611 todo = sizeof(*ctrlr->changed_ns) - offset; in handle_get_log_page()
616 mtx_lock(&ctrlr->lock); in handle_get_log_page()
617 m_copyback(m, 0, todo, (char *)ctrlr->changed_ns + offset); in handle_get_log_page()
618 if (offset == 0 && len == sizeof(*ctrlr->changed_ns)) in handle_get_log_page()
619 memset(ctrlr->changed_ns, 0, in handle_get_log_page()
620 sizeof(*ctrlr->changed_ns)); in handle_get_log_page()
622 ctrlr->changed_ns_reported = false; in handle_get_log_page()
623 mtx_unlock(&ctrlr->lock); in handle_get_log_page()
625 m_zero(m, todo, len - todo); in handle_get_log_page()
638 nvmft_command_completed(ctrlr->admin, nc); in handle_get_log_page()
640 nvmft_send_generic_error(ctrlr->admin, nc, status); in handle_get_log_page()
647 free(m->m_ext.ext_arg1, M_NVMFT); in m_free_nslist()
659 cns = le32toh(cmd->cdw10) & 0xFF; in handle_identify_command()
661 if (data_len != sizeof(ctrlr->cdata)) { in handle_identify_command()
665 nvmft_send_generic_error(ctrlr->admin, nc, in handle_identify_command()
674 nvmft_dispatch_command(ctrlr->admin, nc, true); in handle_identify_command()
678 m = m_getml(sizeof(ctrlr->cdata), M_WAITOK); in handle_identify_command()
679 m_copyback(m, 0, sizeof(ctrlr->cdata), (void *)&ctrlr->cdata); in handle_identify_command()
681 sizeof(ctrlr->cdata)); in handle_identify_command()
690 nsid = le32toh(cmd->nsid); in handle_identify_command()
697 nvmft_populate_active_nslist(ctrlr->np, nsid, nslist); in handle_identify_command()
701 m->m_len = sizeof(*nslist); in handle_identify_command()
702 status = nvmf_send_controller_data(nc, 0, m, m->m_len); in handle_identify_command()
713 nvmft_command_completed(ctrlr->admin, nc); in handle_identify_command()
715 nvmft_send_generic_error(ctrlr->admin, nc, status); in handle_identify_command()
726 fid = NVMEV(NVME_FEAT_SET_FID, le32toh(cmd->cdw10)); in handle_set_features()
733 num_queues = le32toh(cmd->cdw11) & 0xffff; in handle_set_features()
740 if (le32toh(cmd->cdw11) >> 16 != num_queues) in handle_set_features()
749 mtx_lock(&ctrlr->lock); in handle_set_features()
750 if (ctrlr->num_io_queues != 0) { in handle_set_features()
751 mtx_unlock(&ctrlr->lock); in handle_set_features()
753 nvmft_send_generic_error(ctrlr->admin, nc, in handle_set_features()
759 ctrlr->num_io_queues = num_queues; in handle_set_features()
760 ctrlr->io_qpairs = io_qpairs; in handle_set_features()
761 mtx_unlock(&ctrlr->lock); in handle_set_features()
764 cqe.cdw0 = cmd->cdw11; in handle_set_features()
765 nvmft_send_response(ctrlr->admin, &cqe); in handle_set_features()
773 aer_mask = le32toh(cmd->cdw11); in handle_set_features()
779 mtx_lock(&ctrlr->lock); in handle_set_features()
780 ctrlr->aer_mask = aer_mask; in handle_set_features()
781 mtx_unlock(&ctrlr->lock); in handle_set_features()
782 nvmft_send_success(ctrlr->admin, nc); in handle_set_features()
792 nvmft_send_generic_error(ctrlr->admin, nc, NVME_SC_INVALID_FIELD); in handle_set_features()
799 struct nvmft_port *np = ctrlr->np; in update_cc()
804 mtx_lock(&ctrlr->lock); in update_cc()
807 if (ctrlr->shutdown) { in update_cc()
808 mtx_unlock(&ctrlr->lock); in update_cc()
812 if (!_nvmf_validate_cc(np->max_io_qsize, np->cap, ctrlr->cc, new_cc)) { in update_cc()
813 mtx_unlock(&ctrlr->lock); in update_cc()
817 changes = ctrlr->cc ^ new_cc; in update_cc()
818 ctrlr->cc = new_cc; in update_cc()
823 ctrlr->csts &= ~NVMEM(NVME_CSTS_REG_SHST); in update_cc()
824 ctrlr->csts |= NVMEF(NVME_CSTS_REG_SHST, NVME_SHST_OCCURRING); in update_cc()
825 ctrlr->cc &= ~NVMEM(NVME_CC_REG_EN); in update_cc()
826 ctrlr->shutdown = true; in update_cc()
835 ctrlr->shutdown = true; in update_cc()
838 ctrlr->csts |= NVMEF(NVME_CSTS_REG_RDY, 1); in update_cc()
840 mtx_unlock(&ctrlr->lock); in update_cc()
853 switch (le32toh(pget->ofst)) { in handle_property_get()
855 if (pget->attrib.size != NVMF_PROP_SIZE_8) in handle_property_get()
857 rsp.value.u64 = htole64(ctrlr->np->cap); in handle_property_get()
860 if (pget->attrib.size != NVMF_PROP_SIZE_4) in handle_property_get()
862 rsp.value.u32.low = ctrlr->cdata.ver; in handle_property_get()
865 if (pget->attrib.size != NVMF_PROP_SIZE_4) in handle_property_get()
867 rsp.value.u32.low = htole32(ctrlr->cc); in handle_property_get()
870 if (pget->attrib.size != NVMF_PROP_SIZE_4) in handle_property_get()
872 rsp.value.u32.low = htole32(ctrlr->csts); in handle_property_get()
878 nvmft_send_response(ctrlr->admin, &rsp); in handle_property_get()
881 nvmft_send_generic_error(ctrlr->admin, nc, NVME_SC_INVALID_FIELD); in handle_property_get()
891 switch (le32toh(pset->ofst)) { in handle_property_set()
893 if (pset->attrib.size != NVMF_PROP_SIZE_4) in handle_property_set()
895 if (!update_cc(ctrlr, le32toh(pset->value.u32.low), in handle_property_set()
903 nvmft_send_success(ctrlr->admin, nc); in handle_property_set()
905 callout_stop(&ctrlr->ka_timer); in handle_property_set()
906 taskqueue_enqueue(taskqueue_thread, &ctrlr->shutdown_task); in handle_property_set()
910 nvmft_send_generic_error(ctrlr->admin, nc, NVME_SC_INVALID_FIELD); in handle_property_set()
917 switch (fc->fctype) { in handle_admin_fabrics_command()
929 nvmft_send_generic_error(ctrlr->admin, nc, in handle_admin_fabrics_command()
934 nvmft_send_error(ctrlr->admin, nc, NVME_SCT_COMMAND_SPECIFIC, in handle_admin_fabrics_command()
939 fc->fctype); in handle_admin_fabrics_command()
940 nvmft_send_generic_error(ctrlr->admin, nc, in handle_admin_fabrics_command()
954 if (NVMEV(NVME_CC_REG_EN, ctrlr->cc) == 0 && in nvmft_handle_admin_command()
955 cmd->opc != NVME_OPC_FABRICS_COMMANDS) { in nvmft_handle_admin_command()
957 "Unsupported admin opcode %#x while disabled\n", cmd->opc); in nvmft_handle_admin_command()
958 nvmft_send_generic_error(ctrlr->admin, nc, in nvmft_handle_admin_command()
964 atomic_store_int(&ctrlr->ka_active_traffic, 1); in nvmft_handle_admin_command()
966 switch (cmd->opc) { in nvmft_handle_admin_command()
977 mtx_lock(&ctrlr->lock); in nvmft_handle_admin_command()
978 if (ctrlr->aer_pending == NVMFT_NUM_AER) { in nvmft_handle_admin_command()
979 mtx_unlock(&ctrlr->lock); in nvmft_handle_admin_command()
980 nvmft_send_error(ctrlr->admin, nc, in nvmft_handle_admin_command()
984 /* NB: Store the CID without byte-swapping. */ in nvmft_handle_admin_command()
985 ctrlr->aer_cids[ctrlr->aer_pidx] = cmd->cid; in nvmft_handle_admin_command()
986 ctrlr->aer_pending++; in nvmft_handle_admin_command()
987 ctrlr->aer_pidx = (ctrlr->aer_pidx + 1) % NVMFT_NUM_AER; in nvmft_handle_admin_command()
988 mtx_unlock(&ctrlr->lock); in nvmft_handle_admin_command()
993 nvmft_send_success(ctrlr->admin, nc); in nvmft_handle_admin_command()
1001 nvmft_printf(ctrlr, "Unsupported admin opcode %#x\n", cmd->opc); in nvmft_handle_admin_command()
1002 nvmft_send_generic_error(ctrlr->admin, nc, in nvmft_handle_admin_command()
1016 atomic_store_int(&ctrlr->ka_active_traffic, 1); in nvmft_handle_io_command()
1018 switch (cmd->opc) { in nvmft_handle_io_command()
1020 if (cmd->nsid == htole32(0xffffffff)) { in nvmft_handle_io_command()
1037 nvmft_printf(ctrlr, "Unsupported I/O opcode %#x\n", cmd->opc); in nvmft_handle_io_command()
1054 mtx_lock(&ctrlr->lock); in nvmft_report_aer()
1055 if ((ctrlr->aer_mask & aer_mask) == 0) { in nvmft_report_aer()
1056 mtx_unlock(&ctrlr->lock); in nvmft_report_aer()
1064 if (ctrlr->aer_pending == 0) { in nvmft_report_aer()
1065 mtx_unlock(&ctrlr->lock); in nvmft_report_aer()
1073 cpl.cid = ctrlr->aer_cids[ctrlr->aer_cidx]; in nvmft_report_aer()
1074 ctrlr->aer_pending--; in nvmft_report_aer()
1075 ctrlr->aer_cidx = (ctrlr->aer_cidx + 1) % NVMFT_NUM_AER; in nvmft_report_aer()
1076 mtx_unlock(&ctrlr->lock); in nvmft_report_aer()
1082 nvmft_send_response(ctrlr->admin, &cpl); in nvmft_report_aer()
1094 mtx_lock(&ctrlr->lock); in nvmft_controller_lun_changed()
1095 nslist = ctrlr->changed_ns; in nvmft_controller_lun_changed()
1098 if (nslist->ns[0] != 0xffffffff) { in nvmft_controller_lun_changed()
1100 for (i = 0; i < nitems(nslist->ns); i++) { in nvmft_controller_lun_changed()
1101 nsid = le32toh(nslist->ns[i]); in nvmft_controller_lun_changed()
1104 mtx_unlock(&ctrlr->lock); in nvmft_controller_lun_changed()
1112 if (nslist->ns[nitems(nslist->ns) - 1] != htole32(0)) { in nvmft_controller_lun_changed()
1114 memset(ctrlr->changed_ns, 0, in nvmft_controller_lun_changed()
1115 sizeof(*ctrlr->changed_ns)); in nvmft_controller_lun_changed()
1116 ctrlr->changed_ns->ns[0] = 0xffffffff; in nvmft_controller_lun_changed()
1117 } else if (nslist->ns[i] == htole32(0)) { in nvmft_controller_lun_changed()
1122 nslist->ns[i] = htole32(new_nsid); in nvmft_controller_lun_changed()
1124 memmove(&nslist->ns[i + 1], &nslist->ns[i], in nvmft_controller_lun_changed()
1125 (nitems(nslist->ns) - i - 1) * in nvmft_controller_lun_changed()
1126 sizeof(nslist->ns[0])); in nvmft_controller_lun_changed()
1127 nslist->ns[i] = htole32(new_nsid); in nvmft_controller_lun_changed()
1131 if (ctrlr->changed_ns_reported) { in nvmft_controller_lun_changed()
1132 mtx_unlock(&ctrlr->lock); in nvmft_controller_lun_changed()
1135 ctrlr->changed_ns_reported = true; in nvmft_controller_lun_changed()
1136 mtx_unlock(&ctrlr->lock); in nvmft_controller_lun_changed()