Lines Matching full:ctrl

134 static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)  in nvmet_async_events_failall()  argument
138 mutex_lock(&ctrl->lock); in nvmet_async_events_failall()
139 while (ctrl->nr_async_event_cmds) { in nvmet_async_events_failall()
140 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; in nvmet_async_events_failall()
141 mutex_unlock(&ctrl->lock); in nvmet_async_events_failall()
143 mutex_lock(&ctrl->lock); in nvmet_async_events_failall()
145 mutex_unlock(&ctrl->lock); in nvmet_async_events_failall()
148 static void nvmet_async_events_process(struct nvmet_ctrl *ctrl) in nvmet_async_events_process() argument
153 mutex_lock(&ctrl->lock); in nvmet_async_events_process()
154 while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) { in nvmet_async_events_process()
155 aen = list_first_entry(&ctrl->async_events, in nvmet_async_events_process()
157 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; in nvmet_async_events_process()
163 mutex_unlock(&ctrl->lock); in nvmet_async_events_process()
164 trace_nvmet_async_event(ctrl, req->cqe->result.u32); in nvmet_async_events_process()
166 mutex_lock(&ctrl->lock); in nvmet_async_events_process()
168 mutex_unlock(&ctrl->lock); in nvmet_async_events_process()
171 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl) in nvmet_async_events_free() argument
175 mutex_lock(&ctrl->lock); in nvmet_async_events_free()
176 list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) { in nvmet_async_events_free()
180 mutex_unlock(&ctrl->lock); in nvmet_async_events_free()
185 struct nvmet_ctrl *ctrl = in nvmet_async_event_work() local
188 nvmet_async_events_process(ctrl); in nvmet_async_event_work()
191 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, in nvmet_add_async_event() argument
204 mutex_lock(&ctrl->lock); in nvmet_add_async_event()
205 list_add_tail(&aen->entry, &ctrl->async_events); in nvmet_add_async_event()
206 mutex_unlock(&ctrl->lock); in nvmet_add_async_event()
208 queue_work(nvmet_wq, &ctrl->async_event_work); in nvmet_add_async_event()
211 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid) in nvmet_add_to_changed_ns_log() argument
215 mutex_lock(&ctrl->lock); in nvmet_add_to_changed_ns_log()
216 if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES) in nvmet_add_to_changed_ns_log()
219 for (i = 0; i < ctrl->nr_changed_ns; i++) { in nvmet_add_to_changed_ns_log()
220 if (ctrl->changed_ns_list[i] == nsid) in nvmet_add_to_changed_ns_log()
224 if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) { in nvmet_add_to_changed_ns_log()
225 ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff); in nvmet_add_to_changed_ns_log()
226 ctrl->nr_changed_ns = U32_MAX; in nvmet_add_to_changed_ns_log()
230 ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid; in nvmet_add_to_changed_ns_log()
232 mutex_unlock(&ctrl->lock); in nvmet_add_to_changed_ns_log()
237 struct nvmet_ctrl *ctrl; in nvmet_ns_changed() local
241 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_ns_changed()
242 nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid)); in nvmet_ns_changed()
243 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR)) in nvmet_ns_changed()
245 nvmet_add_async_event(ctrl, NVME_AER_NOTICE, in nvmet_ns_changed()
254 struct nvmet_ctrl *ctrl; in nvmet_send_ana_event() local
257 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_send_ana_event()
258 if (port && ctrl->port != port) in nvmet_send_ana_event()
260 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE)) in nvmet_send_ana_event()
262 nvmet_add_async_event(ctrl, NVME_AER_NOTICE, in nvmet_send_ana_event()
303 struct nvmet_ctrl *ctrl; in nvmet_port_del_ctrls() local
306 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_port_del_ctrls()
307 if (ctrl->port == port) in nvmet_port_del_ctrls()
308 ctrl->ops->delete_ctrl(ctrl); in nvmet_port_del_ctrls()
395 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work), in nvmet_keep_alive_timer() local
397 bool reset_tbkas = ctrl->reset_tbkas; in nvmet_keep_alive_timer()
399 ctrl->reset_tbkas = false; in nvmet_keep_alive_timer()
401 pr_debug("ctrl %d reschedule traffic based keep-alive timer\n", in nvmet_keep_alive_timer()
402 ctrl->cntlid); in nvmet_keep_alive_timer()
403 queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); in nvmet_keep_alive_timer()
407 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", in nvmet_keep_alive_timer()
408 ctrl->cntlid, ctrl->kato); in nvmet_keep_alive_timer()
410 nvmet_ctrl_fatal_error(ctrl); in nvmet_keep_alive_timer()
413 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) in nvmet_start_keep_alive_timer() argument
415 if (unlikely(ctrl->kato == 0)) in nvmet_start_keep_alive_timer()
418 pr_debug("ctrl %d start keep-alive timer for %d secs\n", in nvmet_start_keep_alive_timer()
419 ctrl->cntlid, ctrl->kato); in nvmet_start_keep_alive_timer()
421 queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); in nvmet_start_keep_alive_timer()
424 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) in nvmet_stop_keep_alive_timer() argument
426 if (unlikely(ctrl->kato == 0)) in nvmet_stop_keep_alive_timer()
429 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid); in nvmet_stop_keep_alive_timer()
431 cancel_delayed_work_sync(&ctrl->ka_work); in nvmet_stop_keep_alive_timer()
516 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl, in nvmet_p2pmem_ns_add_p2p() argument
523 lockdep_assert_held(&ctrl->subsys->lock); in nvmet_p2pmem_ns_add_p2p()
525 if (!ctrl->p2p_client || !ns->use_p2pmem) in nvmet_p2pmem_ns_add_p2p()
529 ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true); in nvmet_p2pmem_ns_add_p2p()
535 clients[0] = ctrl->p2p_client; in nvmet_p2pmem_ns_add_p2p()
541 dev_name(ctrl->p2p_client), ns->device_path); in nvmet_p2pmem_ns_add_p2p()
546 ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev); in nvmet_p2pmem_ns_add_p2p()
569 struct nvmet_ctrl *ctrl; in nvmet_ns_enable() local
593 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_ns_enable()
594 nvmet_p2pmem_ns_add_p2p(ctrl, ns); in nvmet_ns_enable()
616 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_ns_enable()
617 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); in nvmet_ns_enable()
626 struct nvmet_ctrl *ctrl; in nvmet_ns_disable() local
635 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_ns_disable()
636 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); in nvmet_ns_disable()
747 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_set_error() local
753 if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC) in nvmet_set_error()
756 spin_lock_irqsave(&ctrl->error_lock, flags); in nvmet_set_error()
757 ctrl->err_counter++; in nvmet_set_error()
759 &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS]; in nvmet_set_error()
761 new_error_slot->error_count = cpu_to_le64(ctrl->err_counter); in nvmet_set_error()
768 spin_unlock_irqrestore(&ctrl->error_lock, flags); in nvmet_set_error()
825 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, in nvmet_cq_setup() argument
831 ctrl->cqs[qid] = cq; in nvmet_cq_setup()
836 struct nvmet_ctrl *ctrl = cq->ctrl; in nvmet_cq_destroy() local
838 if (ctrl) { in nvmet_cq_destroy()
839 ctrl->cqs[cq->qid] = NULL; in nvmet_cq_destroy()
840 nvmet_ctrl_put(cq->ctrl); in nvmet_cq_destroy()
841 cq->ctrl = NULL; in nvmet_cq_destroy()
845 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, in nvmet_sq_setup() argument
852 ctrl->sqs[qid] = sq; in nvmet_sq_setup()
862 u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create) in nvmet_check_cqid() argument
864 if (!ctrl->cqs) in nvmet_check_cqid()
867 if (cqid > ctrl->subsys->max_qid) in nvmet_check_cqid()
870 if ((create && ctrl->cqs[cqid]) || (!create && !ctrl->cqs[cqid])) in nvmet_check_cqid()
876 u16 nvmet_check_io_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create) in nvmet_check_io_cqid() argument
880 return nvmet_check_cqid(ctrl, cqid, create); in nvmet_check_io_cqid()
889 u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, in nvmet_cq_create() argument
894 status = nvmet_check_cqid(ctrl, qid, true); in nvmet_cq_create()
898 if (!kref_get_unless_zero(&ctrl->ref)) in nvmet_cq_create()
900 cq->ctrl = ctrl; in nvmet_cq_create()
903 nvmet_cq_setup(ctrl, cq, qid, size); in nvmet_cq_create()
909 u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid, in nvmet_check_sqid() argument
912 if (!ctrl->sqs) in nvmet_check_sqid()
915 if (sqid > ctrl->subsys->max_qid) in nvmet_check_sqid()
918 if ((create && ctrl->sqs[sqid]) || in nvmet_check_sqid()
919 (!create && !ctrl->sqs[sqid])) in nvmet_check_sqid()
925 u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, in nvmet_sq_create() argument
931 if (!kref_get_unless_zero(&ctrl->ref)) in nvmet_sq_create()
934 status = nvmet_check_sqid(ctrl, sqid, true); in nvmet_sq_create()
944 nvmet_sq_setup(ctrl, sq, sqid, size); in nvmet_sq_create()
945 sq->ctrl = ctrl; in nvmet_sq_create()
950 nvmet_ctrl_put(ctrl); in nvmet_sq_create()
957 struct nvmet_ctrl *ctrl = sq->ctrl; in nvmet_sq_destroy() local
963 if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq) in nvmet_sq_destroy()
964 nvmet_async_events_failall(ctrl); in nvmet_sq_destroy()
973 * we must reference the ctrl again after waiting for inflight IO in nvmet_sq_destroy()
975 * store sq->ctrl locally, but before we killed the percpu_ref. the in nvmet_sq_destroy()
976 * admin connect allocates and assigns sq->ctrl, which now needs a in nvmet_sq_destroy()
977 * final ref put, as this ctrl is going away. in nvmet_sq_destroy()
979 ctrl = sq->ctrl; in nvmet_sq_destroy()
981 if (ctrl) { in nvmet_sq_destroy()
988 ctrl->reset_tbkas = true; in nvmet_sq_destroy()
989 sq->ctrl->sqs[sq->qid] = NULL; in nvmet_sq_destroy()
990 nvmet_ctrl_put(ctrl); in nvmet_sq_destroy()
991 sq->ctrl = NULL; /* allows reusing the queue later */ in nvmet_sq_destroy()
1069 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) in nvmet_io_cmd_transfer_len()
1185 if (!req->sq->ctrl || !nvmet_is_pci_ctrl(req->sq->ctrl)) { in nvmet_req_init()
1193 if (unlikely(!req->sq->ctrl)) in nvmet_req_init()
1211 if (sq->ctrl) in nvmet_req_init()
1212 sq->ctrl->reset_tbkas = true; in nvmet_req_init()
1236 if (unlikely(!req->sq->ctrl)) in nvmet_req_transfer_len()
1309 !req->sq->ctrl || !req->sq->qid || !req->ns) in nvmet_req_find_p2p_dev()
1311 return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid); in nvmet_req_find_p2p_dev()
1372 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) in nvmet_start_ctrl() argument
1374 lockdep_assert_held(&ctrl->lock); in nvmet_start_ctrl()
1382 if (!nvmet_is_disc_subsys(ctrl->subsys) && in nvmet_start_ctrl()
1383 (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || in nvmet_start_ctrl()
1384 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) { in nvmet_start_ctrl()
1385 ctrl->csts = NVME_CSTS_CFS; in nvmet_start_ctrl()
1389 if (nvmet_cc_mps(ctrl->cc) != 0 || in nvmet_start_ctrl()
1390 nvmet_cc_ams(ctrl->cc) != 0 || in nvmet_start_ctrl()
1391 !nvmet_css_supported(nvmet_cc_css(ctrl->cc))) { in nvmet_start_ctrl()
1392 ctrl->csts = NVME_CSTS_CFS; in nvmet_start_ctrl()
1396 ctrl->csts = NVME_CSTS_RDY; in nvmet_start_ctrl()
1404 if (ctrl->kato) in nvmet_start_ctrl()
1405 mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); in nvmet_start_ctrl()
1408 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) in nvmet_clear_ctrl() argument
1410 lockdep_assert_held(&ctrl->lock); in nvmet_clear_ctrl()
1413 ctrl->csts &= ~NVME_CSTS_RDY; in nvmet_clear_ctrl()
1414 ctrl->cc = 0; in nvmet_clear_ctrl()
1417 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new) in nvmet_update_cc() argument
1421 mutex_lock(&ctrl->lock); in nvmet_update_cc()
1422 old = ctrl->cc; in nvmet_update_cc()
1423 ctrl->cc = new; in nvmet_update_cc()
1426 nvmet_start_ctrl(ctrl); in nvmet_update_cc()
1428 nvmet_clear_ctrl(ctrl); in nvmet_update_cc()
1430 nvmet_clear_ctrl(ctrl); in nvmet_update_cc()
1431 ctrl->csts |= NVME_CSTS_SHST_CMPLT; in nvmet_update_cc()
1434 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; in nvmet_update_cc()
1435 mutex_unlock(&ctrl->lock); in nvmet_update_cc()
1439 static void nvmet_init_cap(struct nvmet_ctrl *ctrl) in nvmet_init_cap() argument
1442 ctrl->cap = (1ULL << 37); in nvmet_init_cap()
1444 ctrl->cap |= (1ULL << 43); in nvmet_init_cap()
1446 ctrl->cap |= (15ULL << 24); in nvmet_init_cap()
1448 if (ctrl->ops->get_max_queue_size) in nvmet_init_cap()
1449 ctrl->cap |= min_t(u16, ctrl->ops->get_max_queue_size(ctrl), in nvmet_init_cap()
1450 ctrl->port->max_queue_size) - 1; in nvmet_init_cap()
1452 ctrl->cap |= ctrl->port->max_queue_size - 1; in nvmet_init_cap()
1454 if (nvmet_is_passthru_subsys(ctrl->subsys)) in nvmet_init_cap()
1455 nvmet_passthrough_override_cap(ctrl); in nvmet_init_cap()
1462 struct nvmet_ctrl *ctrl = NULL; in nvmet_ctrl_find_get() local
1474 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_ctrl_find_get()
1475 if (ctrl->cntlid == cntlid) { in nvmet_ctrl_find_get()
1476 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) { in nvmet_ctrl_find_get()
1480 if (!kref_get_unless_zero(&ctrl->ref)) in nvmet_ctrl_find_get()
1483 /* ctrl found */ in nvmet_ctrl_find_get()
1488 ctrl = NULL; /* ctrl not found */ in nvmet_ctrl_find_get()
1497 return ctrl; in nvmet_ctrl_find_get()
1502 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { in nvmet_check_ctrl_status()
1508 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { in nvmet_check_ctrl_status()
1541 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl, in nvmet_setup_p2p_ns_map() argument
1547 lockdep_assert_held(&ctrl->subsys->lock); in nvmet_setup_p2p_ns_map()
1552 ctrl->p2p_client = get_device(p2p_client); in nvmet_setup_p2p_ns_map()
1554 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) in nvmet_setup_p2p_ns_map()
1555 nvmet_p2pmem_ns_add_p2p(ctrl, ns); in nvmet_setup_p2p_ns_map()
1558 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl) in nvmet_release_p2p_ns_map() argument
1563 lockdep_assert_held(&ctrl->subsys->lock); in nvmet_release_p2p_ns_map()
1565 radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0) in nvmet_release_p2p_ns_map()
1568 put_device(ctrl->p2p_client); in nvmet_release_p2p_ns_map()
1573 struct nvmet_ctrl *ctrl = in nvmet_fatal_error_handler() local
1576 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid); in nvmet_fatal_error_handler()
1577 ctrl->ops->delete_ctrl(ctrl); in nvmet_fatal_error_handler()
1583 struct nvmet_ctrl *ctrl; in nvmet_alloc_ctrl() local
1611 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); in nvmet_alloc_ctrl()
1612 if (!ctrl) in nvmet_alloc_ctrl()
1614 mutex_init(&ctrl->lock); in nvmet_alloc_ctrl()
1616 ctrl->port = args->port; in nvmet_alloc_ctrl()
1617 ctrl->ops = args->ops; in nvmet_alloc_ctrl()
1621 if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP) in nvmet_alloc_ctrl()
1625 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); in nvmet_alloc_ctrl()
1626 INIT_LIST_HEAD(&ctrl->async_events); in nvmet_alloc_ctrl()
1627 INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL); in nvmet_alloc_ctrl()
1628 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); in nvmet_alloc_ctrl()
1629 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); in nvmet_alloc_ctrl()
1631 memcpy(ctrl->hostnqn, args->hostnqn, NVMF_NQN_SIZE); in nvmet_alloc_ctrl()
1633 kref_init(&ctrl->ref); in nvmet_alloc_ctrl()
1634 ctrl->subsys = subsys; in nvmet_alloc_ctrl()
1635 ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support; in nvmet_alloc_ctrl()
1636 nvmet_init_cap(ctrl); in nvmet_alloc_ctrl()
1637 WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL); in nvmet_alloc_ctrl()
1639 ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES, in nvmet_alloc_ctrl()
1641 if (!ctrl->changed_ns_list) in nvmet_alloc_ctrl()
1644 ctrl->sqs = kcalloc(subsys->max_qid + 1, in nvmet_alloc_ctrl()
1647 if (!ctrl->sqs) in nvmet_alloc_ctrl()
1650 ctrl->cqs = kcalloc(subsys->max_qid + 1, sizeof(struct nvmet_cq *), in nvmet_alloc_ctrl()
1652 if (!ctrl->cqs) in nvmet_alloc_ctrl()
1662 ctrl->cntlid = ret; in nvmet_alloc_ctrl()
1668 if (nvmet_is_disc_subsys(ctrl->subsys) && !kato) in nvmet_alloc_ctrl()
1672 ctrl->kato = DIV_ROUND_UP(kato, 1000); in nvmet_alloc_ctrl()
1674 ctrl->err_counter = 0; in nvmet_alloc_ctrl()
1675 spin_lock_init(&ctrl->error_lock); in nvmet_alloc_ctrl()
1677 nvmet_start_keep_alive_timer(ctrl); in nvmet_alloc_ctrl()
1680 ret = nvmet_ctrl_init_pr(ctrl); in nvmet_alloc_ctrl()
1683 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); in nvmet_alloc_ctrl()
1684 nvmet_setup_p2p_ns_map(ctrl, args->p2p_client); in nvmet_alloc_ctrl()
1685 nvmet_debugfs_ctrl_setup(ctrl); in nvmet_alloc_ctrl()
1689 uuid_copy(&ctrl->hostid, args->hostid); in nvmet_alloc_ctrl()
1691 dhchap_status = nvmet_setup_auth(ctrl, args->sq); in nvmet_alloc_ctrl()
1695 nvmet_ctrl_put(ctrl); in nvmet_alloc_ctrl()
1707 nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm", in nvmet_alloc_ctrl()
1708 ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn, in nvmet_alloc_ctrl()
1709 ctrl->pi_support ? " T10-PI is enabled" : "", in nvmet_alloc_ctrl()
1710 nvmet_has_auth(ctrl, args->sq) ? " with DH-HMAC-CHAP" : "", in nvmet_alloc_ctrl()
1713 return ctrl; in nvmet_alloc_ctrl()
1717 nvmet_stop_keep_alive_timer(ctrl); in nvmet_alloc_ctrl()
1718 ida_free(&cntlid_ida, ctrl->cntlid); in nvmet_alloc_ctrl()
1720 kfree(ctrl->cqs); in nvmet_alloc_ctrl()
1722 kfree(ctrl->sqs); in nvmet_alloc_ctrl()
1724 kfree(ctrl->changed_ns_list); in nvmet_alloc_ctrl()
1726 kfree(ctrl); in nvmet_alloc_ctrl()
1735 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref); in nvmet_ctrl_free() local
1736 struct nvmet_subsys *subsys = ctrl->subsys; in nvmet_ctrl_free()
1739 nvmet_ctrl_destroy_pr(ctrl); in nvmet_ctrl_free()
1740 nvmet_release_p2p_ns_map(ctrl); in nvmet_ctrl_free()
1741 list_del(&ctrl->subsys_entry); in nvmet_ctrl_free()
1744 nvmet_stop_keep_alive_timer(ctrl); in nvmet_ctrl_free()
1746 flush_work(&ctrl->async_event_work); in nvmet_ctrl_free()
1747 cancel_work_sync(&ctrl->fatal_err_work); in nvmet_ctrl_free()
1749 nvmet_destroy_auth(ctrl); in nvmet_ctrl_free()
1751 nvmet_debugfs_ctrl_free(ctrl); in nvmet_ctrl_free()
1753 ida_free(&cntlid_ida, ctrl->cntlid); in nvmet_ctrl_free()
1755 nvmet_async_events_free(ctrl); in nvmet_ctrl_free()
1756 kfree(ctrl->sqs); in nvmet_ctrl_free()
1757 kfree(ctrl->cqs); in nvmet_ctrl_free()
1758 kfree(ctrl->changed_ns_list); in nvmet_ctrl_free()
1759 kfree(ctrl); in nvmet_ctrl_free()
1764 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl) in nvmet_ctrl_put() argument
1766 kref_put(&ctrl->ref, nvmet_ctrl_free); in nvmet_ctrl_put()
1770 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) in nvmet_ctrl_fatal_error() argument
1772 mutex_lock(&ctrl->lock); in nvmet_ctrl_fatal_error()
1773 if (!(ctrl->csts & NVME_CSTS_CFS)) { in nvmet_ctrl_fatal_error()
1774 ctrl->csts |= NVME_CSTS_CFS; in nvmet_ctrl_fatal_error()
1775 queue_work(nvmet_wq, &ctrl->fatal_err_work); in nvmet_ctrl_fatal_error()
1777 mutex_unlock(&ctrl->lock); in nvmet_ctrl_fatal_error()
1781 ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl, in nvmet_ctrl_host_traddr() argument
1784 if (!ctrl->ops->host_traddr) in nvmet_ctrl_host_traddr()
1786 return ctrl->ops->host_traddr(ctrl, traddr, traddr_len); in nvmet_ctrl_host_traddr()
1922 struct nvmet_ctrl *ctrl; in nvmet_subsys_del_ctrls() local
1925 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_subsys_del_ctrls()
1926 ctrl->ops->delete_ctrl(ctrl); in nvmet_subsys_del_ctrls()