Lines Matching +full:csi +full:- +full:b

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2011-2014, Intel Corporation.
9 #include <linux/blk-mq.h>
10 #include <linux/blk-integrity.h>
17 #include <linux/backing-dev.h>
29 #include <linux/nvme-auth.h>
110 * nvme_wq - hosts nvme related works that are not reset or delete
111 * nvme_reset_wq - hosts nvme reset works
112 * nvme_delete_wq - hosts nvme delete works
115 * keep-alive, periodic reconnects etc. nvme_reset_wq
141 .name = "nvme-subsystem",
147 .name = "nvme-generic",
156 u8 lsp, u8 csi, void *log, size_t size, u64 offset, u16 lsi);
163 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE && ctrl->tagset)
164 queue_work(nvme_wq, &ctrl->scan_work);
176 return -EBUSY;
177 if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
178 return -EBUSY;
191 set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
192 dev_info(ctrl->device, "failfast expired\n");
198 if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1)
201 schedule_delayed_work(&ctrl->failfast_work,
202 ctrl->opts->fast_io_fail_tmo * HZ);
207 if (!ctrl->opts)
210 cancel_delayed_work_sync(&ctrl->failfast_work);
211 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
218 return -EBUSY;
219 if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
220 return -EBUSY;
231 flush_work(&ctrl->reset_work);
233 ret = -ENETRESET;
241 dev_info(ctrl->device,
244 flush_work(&ctrl->reset_work);
247 ctrl->ops->delete_ctrl(ctrl);
262 return -EBUSY;
263 if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
264 return -EBUSY;
273 * since ->delete_ctrl can free the controller.
328 crd = (nvme_req(req)->status & NVME_STATUS_CRD) >> 11;
330 delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
332 nvme_req(req)->retries++;
334 blk_mq_delay_kick_requeue_list(req->q, delay);
339 struct nvme_ns *ns = req->q->queuedata;
344 ns->disk ? ns->disk->disk_name : "?",
345 nvme_get_opcode_str(nr->cmd->common.opcode),
346 nr->cmd->common.opcode,
347 nvme_sect_to_lba(ns->head, blk_rq_pos(req)),
348 blk_rq_bytes(req) >> ns->head->lba_shift,
349 nvme_get_error_status_str(nr->status),
350 NVME_SCT(nr->status), /* Status Code Type */
351 nr->status & NVME_SC_MASK, /* Status Code */
352 nr->status & NVME_STATUS_MORE ? "MORE " : "",
353 nr->status & NVME_STATUS_DNR ? "DNR " : "");
358 dev_name(nr->ctrl->device),
359 nvme_get_admin_opcode_str(nr->cmd->common.opcode),
360 nr->cmd->common.opcode,
361 nvme_get_error_status_str(nr->status),
362 NVME_SCT(nr->status), /* Status Code Type */
363 nr->status & NVME_SC_MASK, /* Status Code */
364 nr->status & NVME_STATUS_MORE ? "MORE " : "",
365 nr->status & NVME_STATUS_DNR ? "DNR " : "");
370 struct nvme_ns *ns = req->q->queuedata;
375 ns ? ns->disk->disk_name : dev_name(nr->ctrl->device),
376 ns ? nvme_get_opcode_str(nr->cmd->common.opcode) :
377 nvme_get_admin_opcode_str(nr->cmd->common.opcode),
378 nr->cmd->common.opcode,
379 nvme_get_error_status_str(nr->status),
380 NVME_SCT(nr->status), /* Status Code Type */
381 nr->status & NVME_SC_MASK, /* Status Code */
382 nr->status & NVME_STATUS_MORE ? "MORE " : "",
383 nr->status & NVME_STATUS_DNR ? "DNR " : "",
384 le32_to_cpu(nr->cmd->common.cdw10),
385 le32_to_cpu(nr->cmd->common.cdw11),
386 le32_to_cpu(nr->cmd->common.cdw12),
387 le32_to_cpu(nr->cmd->common.cdw13),
388 le32_to_cpu(nr->cmd->common.cdw14),
389 le32_to_cpu(nr->cmd->common.cdw15));
401 if (likely(nvme_req(req)->status == 0))
405 (nvme_req(req)->status & NVME_STATUS_DNR) ||
406 nvme_req(req)->retries >= nvme_max_retries)
409 if ((nvme_req(req)->status & NVME_SCT_SC_MASK) == NVME_SC_AUTH_REQUIRED)
412 if (req->cmd_flags & REQ_NVME_MPATH) {
413 if (nvme_is_path_error(nvme_req(req)->status) ||
414 blk_queue_dying(req->q))
417 if (blk_queue_dying(req->q))
428 struct nvme_ns *ns = req->q->queuedata;
430 req->__sector = nvme_lba_to_sect(ns->head,
431 le64_to_cpu(nvme_req(req)->result.u64));
437 if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
445 if (req->cmd_flags & REQ_NVME_MPATH)
451 blk_status_t status = nvme_error_status(nvme_req(req)->status);
459 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
465 * Completions of long-running commands should not be able to
469 * req->deadline - req->timeout is the command submission time
472 if (ctrl->kas &&
473 req->deadline - req->timeout >= ctrl->ka_last_check_time)
474 ctrl->comp_seen = true;
488 queue_work(nvme_wq, &ctrl->dhchap_auth_work);
507 * Called to unwind from ->queue_rq on a failed command submission so that the
514 nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
523 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
524 "Cancelling I/O %d", req->tag);
530 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
531 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
539 if (ctrl->tagset) {
540 blk_mq_tagset_busy_iter(ctrl->tagset,
542 blk_mq_tagset_wait_completed_request(ctrl->tagset);
549 if (ctrl->admin_tagset) {
550 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
552 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
564 spin_lock_irqsave(&ctrl->lock, flags);
632 WRITE_ONCE(ctrl->state, new_state);
633 wake_up_all(&ctrl->state_wq);
636 spin_unlock_irqrestore(&ctrl->lock, flags);
658 wait_event(ctrl->state_wq,
671 ida_free(&head->subsys->ns_ida, head->instance);
672 cleanup_srcu_struct(&head->srcu);
673 nvme_put_subsystem(head->subsys);
674 kfree(head->plids);
680 return kref_get_unless_zero(&head->ref);
685 kref_put(&head->ref, nvme_free_ns_head);
692 put_disk(ns->disk);
693 nvme_put_ns_head(ns->head);
694 nvme_put_ctrl(ns->ctrl);
700 return kref_get_unless_zero(&ns->kref);
705 kref_put(&ns->kref, nvme_free_ns);
711 nvme_req(req)->status = 0;
712 nvme_req(req)->retries = 0;
713 nvme_req(req)->flags = 0;
714 req->rq_flags |= RQF_DONTPREP;
723 if (req->q->queuedata) {
724 struct nvme_ns *ns = req->q->disk->private_data;
726 logging_enabled = ns->head->passthru_err_log_enabled;
727 req->timeout = NVME_IO_TIMEOUT;
729 logging_enabled = nr->ctrl->passthru_err_log_enabled;
730 req->timeout = NVME_ADMIN_TIMEOUT;
734 req->rq_flags |= RQF_QUIET;
737 cmd->common.flags &= ~NVME_CMD_SGL_ALL;
739 req->cmd_flags |= REQ_FAILFAST_DRIVER;
740 if (req->mq_hctx->type == HCTX_TYPE_POLL)
741 req->cmd_flags |= REQ_POLLED;
743 memcpy(nr->cmd, cmd, sizeof(*cmd));
764 !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
765 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
768 if (!(rq->rq_flags & RQF_DONTPREP))
788 if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
791 if (ctrl->ops->flags & NVME_F_FABRICS) {
799 if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
800 (req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
801 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send ||
802 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive))
820 cmnd->common.opcode = nvme_cmd_flush;
821 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
845 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
848 range = page_address(ns->ctrl->discard_page);
851 if (queue_max_discard_segments(req->q) == 1) {
852 u64 slba = nvme_sect_to_lba(ns->head, blk_rq_pos(req));
853 u32 nlb = blk_rq_sectors(req) >> (ns->head->lba_shift - 9);
861 u64 slba = nvme_sect_to_lba(ns->head,
862 bio->bi_iter.bi_sector);
863 u32 nlb = bio->bi_iter.bi_size >> ns->head->lba_shift;
875 if (virt_to_page(range) == ns->ctrl->discard_page)
876 clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
883 cmnd->dsm.opcode = nvme_cmd_dsm;
884 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
885 cmnd->dsm.nr = cpu_to_le32(segments - 1);
886 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
888 bvec_set_virt(&req->special_vec, range, alloc_size);
889 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
896 cmnd->rw.lbat = cpu_to_le16(bio_integrity(req->bio)->app_tag);
897 cmnd->rw.lbatm = cpu_to_le16(0xffff);
907 switch (ns->head->pi_type) {
916 switch (ns->head->guard_type) {
918 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
925 cmnd->rw.reftag = cpu_to_le32(lower);
926 cmnd->rw.cdw3 = cpu_to_le32(upper);
938 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
941 cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
942 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
943 cmnd->write_zeroes.slba =
944 cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req)));
945 cmnd->write_zeroes.length =
946 cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
948 if (!(req->cmd_flags & REQ_NOUNMAP) &&
949 (ns->head->features & NVME_NS_DEAC))
950 cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC);
952 if (nvme_ns_has_pi(ns->head)) {
953 cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT);
963 * non-atomically. The request issuer should ensure that the write is within
968 struct request_queue *q = req->q;
975 u64 mask = boundary_bytes - 1, imask = ~mask;
977 u64 end = start + blk_rq_bytes(req) - 1;
997 if (req->cmd_flags & REQ_FUA)
999 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
1002 if (req->cmd_flags & REQ_RAHEAD)
1005 if (op == nvme_cmd_write && ns->head->nr_plids) {
1006 u16 write_stream = req->bio->bi_write_stream;
1008 if (WARN_ON_ONCE(write_stream > ns->head->nr_plids))
1012 dsmgmt |= ns->head->plids[write_stream - 1] << 16;
1017 if (req->cmd_flags & REQ_ATOMIC && !nvme_valid_atomic_write(req))
1020 cmnd->rw.opcode = op;
1021 cmnd->rw.flags = 0;
1022 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
1023 cmnd->rw.cdw2 = 0;
1024 cmnd->rw.cdw3 = 0;
1025 cmnd->rw.metadata = 0;
1026 cmnd->rw.slba =
1027 cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req)));
1028 cmnd->rw.length =
1029 cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
1030 cmnd->rw.reftag = 0;
1031 cmnd->rw.lbat = 0;
1032 cmnd->rw.lbatm = 0;
1034 if (ns->head->ms) {
1042 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head)))
1048 if (bio_integrity_flagged(req->bio, BIP_CHECK_GUARD))
1050 if (bio_integrity_flagged(req->bio, BIP_CHECK_REFTAG)) {
1056 if (bio_integrity_flagged(req->bio, BIP_CHECK_APPTAG)) {
1062 cmnd->rw.control = cpu_to_le16(control);
1063 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
1069 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
1070 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
1072 if (req->special_vec.bv_page == ctrl->discard_page)
1073 clear_bit_unlock(0, &ctrl->discard_page_busy);
1075 kfree(bvec_virt(&req->special_vec));
1076 req->rq_flags &= ~RQF_SPECIAL_PAYLOAD;
1083 struct nvme_command *cmd = nvme_req(req)->cmd;
1086 if (!(req->rq_flags & RQF_DONTPREP))
1130 cmd->common.command_id = nvme_cid(req);
1147 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
1148 return -EINTR;
1149 if (nvme_req(rq)->status)
1150 return nvme_req(rq)->status;
1175 qid - 1);
1181 req->cmd_flags &= ~REQ_FAILFAST_DRIVER;
1191 *result = nvme_req(req)->result;
1211 effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
1213 dev_warn_once(ctrl->device,
1224 effects = le32_to_cpu(ctrl->effects->acs[opcode]);
1244 mutex_lock(&ctrl->scan_lock);
1245 mutex_lock(&ctrl->subsys->lock);
1246 nvme_mpath_start_freeze(ctrl->subsys);
1247 nvme_mpath_wait_freeze(ctrl->subsys);
1260 nvme_mpath_unfreeze(ctrl->subsys);
1261 mutex_unlock(&ctrl->subsys->lock);
1262 mutex_unlock(&ctrl->scan_lock);
1266 &ctrl->flags)) {
1267 dev_info(ctrl->device,
1273 flush_work(&ctrl->scan_work);
1278 switch (cmd->common.opcode) {
1280 switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) {
1308 unsigned long delay = ctrl->kato * HZ / 2;
1316 if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS)
1325 unsigned long ka_next_check_tm = ctrl->ka_last_check_time + delay;
1330 delay = ka_next_check_tm - now;
1332 queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
1338 struct nvme_ctrl *ctrl = rq->end_io_data;
1339 unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
1348 delay -= rtt;
1350 dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n",
1358 dev_err(ctrl->device,
1364 ctrl->ka_last_check_time = jiffies;
1365 ctrl->comp_seen = false;
1367 queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
1375 bool comp_seen = ctrl->comp_seen;
1378 ctrl->ka_last_check_time = jiffies;
1380 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
1381 dev_dbg(ctrl->device,
1382 "reschedule traffic based keep-alive timer\n");
1383 ctrl->comp_seen = false;
1388 rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd),
1392 dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
1396 nvme_init_request(rq, &ctrl->ka_cmd);
1398 rq->timeout = ctrl->kato * HZ;
1399 rq->end_io = nvme_keep_alive_end_io;
1400 rq->end_io_data = ctrl;
1406 if (unlikely(ctrl->kato == 0))
1414 if (unlikely(ctrl->kato == 0))
1417 cancel_delayed_work_sync(&ctrl->ka_work);
1425 DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000);
1427 dev_info(ctrl->device,
1429 ctrl->kato * 1000 / 2, new_kato * 1000 / 2);
1432 ctrl->kato = new_kato;
1441 if (ctrl->vs >= NVME_VS(1, 2, 0))
1452 if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1453 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS))
1467 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1473 return -ENOMEM;
1475 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
1490 switch (cur->nidt) {
1492 if (cur->nidl != NVME_NIDT_EUI64_LEN) {
1493 dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
1494 warn_str, cur->nidl);
1495 return -1;
1497 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1499 memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
1502 if (cur->nidl != NVME_NIDT_NGUID_LEN) {
1503 dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
1504 warn_str, cur->nidl);
1505 return -1;
1507 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1509 memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
1512 if (cur->nidl != NVME_NIDT_UUID_LEN) {
1513 dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
1514 warn_str, cur->nidl);
1515 return -1;
1517 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1519 uuid_copy(&ids->uuid, data + sizeof(*cur));
1522 if (cur->nidl != NVME_NIDT_CSI_LEN) {
1523 dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n",
1524 warn_str, cur->nidl);
1525 return -1;
1527 memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN);
1532 return cur->nidl;
1544 if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl))
1546 if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
1550 c.identify.nsid = cpu_to_le32(info->nsid);
1555 return -ENOMEM;
1557 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1560 dev_warn(ctrl->device,
1562 info->nsid, status);
1569 if (cur->nidl == 0)
1572 len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen);
1580 dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
1581 info->nsid);
1582 status = -EINVAL;
1596 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1603 return -ENOMEM;
1605 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
1607 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
1617 struct nvme_ns_ids *ids = &info->ids;
1621 ret = nvme_identify_ns(ctrl, info->nsid, &id);
1625 if (id->ncap == 0) {
1627 info->is_removed = true;
1628 ret = -ENODEV;
1632 info->anagrpid = id->anagrpid;
1633 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
1634 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
1635 info->is_ready = true;
1636 info->endgid = le16_to_cpu(id->endgid);
1637 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
1638 dev_info(ctrl->device,
1641 if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1642 !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
1643 memcpy(ids->eui64, id->eui64, sizeof(ids->eui64));
1644 if (ctrl->vs >= NVME_VS(1, 2, 0) &&
1645 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
1646 memcpy(ids->nguid, id->nguid, sizeof(ids->nguid));
1660 .identify.nsid = cpu_to_le32(info->nsid),
1667 return -ENOMEM;
1669 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
1671 info->anagrpid = id->anagrpid;
1672 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
1673 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
1674 info->is_ready = id->nstat & NVME_NSTAT_NRDY;
1675 info->is_rotational = id->nsfeat & NVME_NS_ROTATIONAL;
1676 info->no_vwc = id->nsfeat & NVME_NS_VWC_NOT_PRESENT;
1677 info->endgid = le16_to_cpu(id->endgid);
1694 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1721 u32 q_count = (*count - 1) | ((*count - 1) << 16);
1742 dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1759 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1768 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1771 queue_work(nvme_wq, &ctrl->async_event_work);
1778 if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head)))
1782 if (!try_module_get(ns->ctrl->ops->module))
1790 return -ENXIO;
1796 module_put(ns->ctrl->ops->module);
1802 return nvme_ns_open(disk->private_data);
1807 nvme_ns_release(disk->private_data);
1813 geo->heads = 1 << 6;
1814 geo->sectors = 1 << 5;
1815 geo->cylinders = get_capacity(disk) >> 11;
1822 struct blk_integrity *bi = &lim->integrity;
1826 if (!head->ms)
1834 !(head->features & NVME_NS_METADATA_SUPPORTED))
1837 switch (head->pi_type) {
1839 switch (head->guard_type) {
1841 bi->csum_type = BLK_INTEGRITY_CSUM_CRC;
1842 bi->tag_size = sizeof(u16) + sizeof(u32);
1843 bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1846 bi->csum_type = BLK_INTEGRITY_CSUM_CRC64;
1847 bi->tag_size = sizeof(u16) + 6;
1848 bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1856 switch (head->guard_type) {
1858 bi->csum_type = BLK_INTEGRITY_CSUM_CRC;
1859 bi->tag_size = sizeof(u16);
1860 bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE |
1864 bi->csum_type = BLK_INTEGRITY_CSUM_CRC64;
1865 bi->tag_size = sizeof(u16);
1866 bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE |
1877 bi->metadata_size = head->ms;
1878 if (bi->csum_type) {
1879 bi->pi_tuple_size = head->pi_size;
1880 bi->pi_offset = info->pi_offset;
1887 struct nvme_ctrl *ctrl = ns->ctrl;
1889 if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
1890 lim->max_hw_discard_sectors =
1891 nvme_lba_to_sect(ns->head, ctrl->dmrsl);
1892 else if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
1893 lim->max_hw_discard_sectors = UINT_MAX;
1895 lim->max_hw_discard_sectors = 0;
1897 lim->discard_granularity = lim->logical_block_size;
1899 if (ctrl->dmrl)
1900 lim->max_discard_segments = ctrl->dmrl;
1902 lim->max_discard_segments = NVME_DSM_MAX_RANGES;
1905 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
1907 return uuid_equal(&a->uuid, &b->uuid) &&
1908 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
1909 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 &&
1910 a->csi == b->csi;
1920 .identify.csi = NVME_CSI_NVM,
1927 return -ENOMEM;
1929 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, nvm, sizeof(*nvm));
1940 u32 elbaf = le32_to_cpu(nvm->elbaf[nvme_lbaf_index(id->flbas)]);
1948 if ((nvm->pic & NVME_ID_NS_NVM_QPIFS) &&
1952 head->guard_type = guard_type;
1953 switch (head->guard_type) {
1955 head->pi_size = sizeof(struct crc64_pi_tuple);
1958 head->pi_size = sizeof(struct t10_pi_tuple);
1969 head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
1970 head->pi_type = 0;
1971 head->pi_size = 0;
1972 head->ms = le16_to_cpu(id->lbaf[nvme_lbaf_index(id->flbas)].ms);
1973 if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1976 if (nvm && (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
1979 head->pi_size = sizeof(struct t10_pi_tuple);
1980 head->guard_type = NVME_NVM_NS_16B_GUARD;
1983 if (head->pi_size && head->ms >= head->pi_size)
1984 head->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
1985 if (!(id->dps & NVME_NS_DPS_PI_FIRST)) {
1987 head->pi_type = 0;
1989 info->pi_offset = head->ms - head->pi_size;
1992 if (ctrl->ops->flags & NVME_F_FABRICS) {
1998 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
2001 head->features |= NVME_NS_EXT_LBAS;
2012 if (ctrl->max_integrity_segments && nvme_ns_has_pi(head))
2013 head->features |= NVME_NS_METADATA_SUPPORTED;
2021 if (id->flbas & NVME_NS_FLBAS_META_EXT)
2022 head->features |= NVME_NS_EXT_LBAS;
2024 head->features |= NVME_NS_METADATA_SUPPORTED;
2037 if (id->nabo)
2040 if ((id->nsfeat & NVME_NS_FEAT_ATOMICS) && id->nawupf) {
2042 * Use the per-namespace atomic write unit when available.
2044 atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
2045 if (id->nabspf)
2046 boundary = (le16_to_cpu(id->nabspf) + 1) * bs;
2055 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
2058 lim->atomic_write_hw_max = atomic_bs;
2059 lim->atomic_write_hw_boundary = boundary;
2060 lim->atomic_write_hw_unit_min = bs;
2061 lim->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs);
2062 lim->features |= BLK_FEAT_ATOMIC_WRITES;
2068 return ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> SECTOR_SHIFT) + 1;
2074 lim->max_hw_sectors = ctrl->max_hw_sectors;
2075 lim->max_segments = min_t(u32, USHRT_MAX,
2076 min_not_zero(nvme_max_drv_segments(ctrl), ctrl->max_segments));
2077 lim->max_integrity_segments = ctrl->max_integrity_segments;
2078 lim->virt_boundary_mask = ctrl->ops->get_virt_boundary(ctrl, is_admin);
2079 lim->max_segment_size = UINT_MAX;
2080 lim->dma_alignment = 3;
2086 struct nvme_ns_head *head = ns->head;
2087 u32 bs = 1U << head->lba_shift;
2104 if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
2106 phys_bs = bs * (1 + le16_to_cpu(id->npwg));
2108 if (id->nows)
2109 io_opt = bs * (1 + le16_to_cpu(id->nows));
2117 lim->logical_block_size = bs;
2118 lim->physical_block_size = min(phys_bs, atomic_bs);
2119 lim->io_min = phys_bs;
2120 lim->io_opt = io_opt;
2121 if ((ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) &&
2122 (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM))
2123 lim->max_write_zeroes_sectors = UINT_MAX;
2125 lim->max_write_zeroes_sectors = ns->ctrl->max_zeroes_sectors;
2131 return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags);
2143 struct nvme_ctrl *ctrl = ns->ctrl;
2146 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
2147 is_power_of_2(ctrl->max_hw_sectors))
2148 iob = ctrl->max_hw_sectors;
2150 iob = nvme_lba_to_sect(ns->head, le16_to_cpu(id->noiob));
2156 if (nvme_first_scan(ns->disk))
2158 ns->disk->disk_name, iob);
2162 if (blk_queue_is_zoned(ns->disk->queue)) {
2163 if (nvme_first_scan(ns->disk))
2165 ns->disk->disk_name);
2169 lim->chunk_sectors = iob;
2179 lim = queue_limits_start_update(ns->disk->queue);
2180 nvme_set_ctrl_limits(ns->ctrl, &lim, false);
2182 memflags = blk_mq_freeze_queue(ns->disk->queue);
2183 ret = queue_limits_commit_update(ns->disk->queue, &lim);
2184 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
2185 blk_mq_unfreeze_queue(ns->disk->queue, memflags);
2187 /* Hide the block-interface for these devices */
2189 ret = -ENODEV;
2203 NVME_CSI_NVM, &hdr, size, 0, info->endgid);
2205 dev_warn(ctrl->device,
2207 info->endgid);
2213 dev_warn(ctrl->device, "FDP config size too large:%zu\n",
2220 return -ENOMEM;
2223 NVME_CSI_NVM, h, size, 0, info->endgid);
2225 dev_warn(ctrl->device,
2227 info->endgid);
2231 n = le16_to_cpu(h->numfdpc) + 1;
2233 dev_warn(ctrl->device, "FDP index:%d out of range:%d\n",
2242 end = log + size - sizeof(*h);
2244 log += le16_to_cpu(desc->dsze);
2247 dev_warn(ctrl->device,
2254 if (le32_to_cpu(desc->nrg) > 1) {
2255 dev_warn(ctrl->device, "FDP NRG > 1 not supported\n");
2260 info->runs = le64_to_cpu(desc->runs);
2268 struct nvme_ns_head *head = ns->head;
2269 struct nvme_ctrl *ctrl = ns->ctrl;
2281 if (head->nr_plids)
2284 ret = nvme_get_features(ctrl, NVME_FEAT_FDP, info->endgid, NULL, 0,
2287 dev_warn(ctrl->device, "FDP get feature status:0x%x\n", ret);
2295 if (!info->runs)
2298 size = struct_size(ruhs, ruhsd, S8_MAX - 1);
2301 return -ENOMEM;
2304 c.imr.nsid = cpu_to_le32(head->ns_id);
2307 ret = nvme_submit_sync_cmd(ns->queue, &c, ruhs, size);
2309 dev_warn(ctrl->device, "FDP io-mgmt status:0x%x\n", ret);
2313 head->nr_plids = le16_to_cpu(ruhs->nruhsd);
2314 if (!head->nr_plids)
2317 head->plids = kcalloc(head->nr_plids, sizeof(*head->plids),
2319 if (!head->plids) {
2320 dev_warn(ctrl->device,
2322 head->nr_plids);
2323 head->nr_plids = 0;
2324 ret = -ENOMEM;
2328 for (i = 0; i < head->nr_plids; i++)
2329 head->plids[i] = le16_to_cpu(ruhs->ruhsd[i].pid);
2347 ret = nvme_identify_ns(ns->ctrl, info->nsid, &id);
2351 if (id->ncap == 0) {
2353 info->is_removed = true;
2354 ret = -ENXIO;
2357 lbaf = nvme_lbaf_index(id->flbas);
2359 if (ns->ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) {
2360 ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm);
2366 ns->head->ids.csi == NVME_CSI_ZNS) {
2372 if (ns->ctrl->ctratt & NVME_CTRL_ATTR_FDPS) {
2378 lim = queue_limits_start_update(ns->disk->queue);
2380 memflags = blk_mq_freeze_queue(ns->disk->queue);
2381 ns->head->lba_shift = id->lbaf[lbaf].ds;
2382 ns->head->nuse = le64_to_cpu(id->nuse);
2383 capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
2384 nvme_set_ctrl_limits(ns->ctrl, &lim, false);
2385 nvme_configure_metadata(ns->ctrl, ns->head, id, nvm, info);
2392 ns->head->ids.csi == NVME_CSI_ZNS)
2395 if ((ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT) && !info->no_vwc)
2400 if (info->is_rotational)
2404 * Register a metadata profile for PI, or the plain non-integrity NVMe
2409 if (!nvme_init_integrity(ns->head, &lim, info))
2412 lim.max_write_streams = ns->head->nr_plids;
2414 lim.write_stream_granularity = min(info->runs, U32_MAX);
2421 * require that, it must be a no-op if reads from deallocated data
2424 if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) {
2425 ns->head->features |= NVME_NS_DEAC;
2429 ret = queue_limits_commit_update(ns->disk->queue, &lim);
2431 blk_mq_unfreeze_queue(ns->disk->queue, memflags);
2435 set_capacity_and_notify(ns->disk, capacity);
2436 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
2437 set_bit(NVME_NS_READY, &ns->flags);
2438 blk_mq_unfreeze_queue(ns->disk->queue, memflags);
2440 if (blk_queue_is_zoned(ns->queue)) {
2441 ret = blk_revalidate_disk_zones(ns->disk);
2442 if (ret && !nvme_first_scan(ns->disk))
2458 switch (info->ids.csi) {
2461 dev_info(ns->ctrl->device,
2463 info->nsid);
2473 dev_info(ns->ctrl->device,
2474 "block device for nsid %u not supported (csi %u)\n",
2475 info->nsid, info->ids.csi);
2484 if (ret == -ENODEV) {
2485 ns->disk->flags |= GENHD_FL_HIDDEN;
2486 set_bit(NVME_NS_READY, &ns->flags);
2491 if (!ret && nvme_ns_head_multipath(ns->head)) {
2492 struct queue_limits *ns_lim = &ns->disk->queue->limits;
2496 lim = queue_limits_start_update(ns->head->disk->queue);
2497 memflags = blk_mq_freeze_queue(ns->head->disk->queue);
2513 lim.logical_block_size = ns_lim->logical_block_size;
2514 lim.physical_block_size = ns_lim->physical_block_size;
2515 lim.io_min = ns_lim->io_min;
2516 lim.io_opt = ns_lim->io_opt;
2517 queue_limits_stack_bdev(&lim, ns->disk->part0, 0,
2518 ns->head->disk->disk_name);
2520 ns->head->disk->flags |= GENHD_FL_HIDDEN;
2522 nvme_init_integrity(ns->head, &lim, info);
2523 lim.max_write_streams = ns_lim->max_write_streams;
2524 lim.write_stream_granularity = ns_lim->write_stream_granularity;
2525 ret = queue_limits_commit_update(ns->head->disk->queue, &lim);
2527 set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk));
2528 set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
2531 blk_mq_unfreeze_queue(ns->head->disk->queue, memflags);
2540 struct nvme_ns_ids *ids = &ns->head->ids;
2543 return -EINVAL;
2545 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) {
2546 memcpy(id, &ids->nguid, sizeof(ids->nguid));
2547 return sizeof(ids->nguid);
2549 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) {
2550 memcpy(id, &ids->eui64, sizeof(ids->eui64));
2551 return sizeof(ids->eui64);
2554 return -EINVAL;
2560 return nvme_ns_get_unique_id(disk->private_data, id, type);
2578 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
2584 if (ctrl->oacs & NVME_CTRL_OACS_SEC_SUPP) {
2585 if (!ctrl->opal_dev)
2586 ctrl->opal_dev = init_opal_dev(ctrl, &nvme_sec_submit);
2588 opal_unlock_from_suspend(ctrl->opal_dev);
2590 free_opal_dev(ctrl->opal_dev);
2591 ctrl->opal_dev = NULL;
2604 return nvme_ns_report_zones(disk->private_data, sector, nr_zones, args);
2629 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2631 return -ENODEV;
2637 return -EINTR;
2639 dev_err(ctrl->device,
2642 return -ENODEV;
2653 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2655 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
2657 ctrl->ctrl_config &= ~NVME_CC_ENABLE;
2659 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2666 ctrl->shutdown_timeout, "shutdown");
2668 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
2671 (NVME_CAP_TIMEOUT(ctrl->cap) + 1) / 2, "reset");
2681 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2683 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
2686 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
2689 dev_err(ctrl->device,
2692 return -ENODEV;
2695 if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
2696 ctrl->ctrl_config = NVME_CC_CSS_CSI;
2698 ctrl->ctrl_config = NVME_CC_CSS_NVM;
2706 ctrl->ctrl_config &= ~NVME_CC_CRIME;
2708 ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
2709 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
2710 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
2711 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2716 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2720 timeout = NVME_CAP_TIMEOUT(ctrl->cap);
2721 if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
2724 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
2726 dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
2739 dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n",
2740 crto, ctrl->cap);
2745 ctrl->ctrl_config |= NVME_CC_ENABLE;
2746 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2759 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
2766 dev_warn_once(ctrl->device,
2778 if (ctrl->crdt[0])
2780 if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)
2790 host->acre = acre;
2791 host->lbafee = lbafee;
2833 * - If the parameters provide explicit timeouts and tolerances, they will be
2834 * used to build a table with up to 2 non-operational states to transition to.
2840 * - If not, we'll configure the table with a simple heuristic: we are willing
2843 * lower-power non-operational state after waiting 50 * (enlat + exlat)
2847 * We will not autonomously enter any non-operational state for which the total
2858 int max_ps = -1;
2867 if (!ctrl->apsta)
2870 if (ctrl->npss > 31) {
2871 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
2879 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
2881 dev_dbg(ctrl->device, "APST disabled\n");
2886 * Walk through all states from lowest- to highest-power.
2887 * According to the spec, lower-numbered states use more power. NPSS,
2888 * despite the name, is the index of the lowest-power state, not the
2891 for (state = (int)ctrl->npss; state >= 0; state--) {
2895 table->entries[state] = target;
2901 if (state == ctrl->npss &&
2902 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
2906 * Is this state a useful non-operational state for higher-power
2909 if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE))
2912 exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
2913 if (exit_latency_us > ctrl->ps_max_latency_us)
2917 le32_to_cpu(ctrl->psd[state].entry_lat);
2930 if (transition_ms > (1 << 24) - 1)
2931 transition_ms = (1 << 24) - 1;
2935 if (max_ps == -1)
2941 if (max_ps == -1)
2942 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
2944 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2952 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
2972 if (ctrl->ps_max_latency_us != latency) {
2973 ctrl->ps_max_latency_us = latency;
3003 * This LiteON CL1-3D*-Q11 firmware version has a race
3013 * This Kioxia CD6-V Series / HPE PE8030 device times out and
3018 * to use "nvme set-feature" to disable APST, but booting with
3041 /* match is null-terminated but idstr is space-padded. */
3065 return q->vid == le16_to_cpu(id->vid) &&
3066 string_matches(id->mn, q->mn, sizeof(id->mn)) &&
3067 string_matches(id->fr, q->fr, sizeof(id->fr));
3076 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
3077 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
3079 strscpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
3083 if (ctrl->vs >= NVME_VS(1, 2, 1))
3084 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
3092 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
3094 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
3095 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
3096 off += sizeof(id->sn);
3097 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
3098 off += sizeof(id->mn);
3099 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
3107 if (subsys->instance >= 0)
3108 ida_free(&nvme_instance_ida, subsys->instance);
3118 list_del(&subsys->entry);
3121 ida_destroy(&subsys->ns_ida);
3122 device_del(&subsys->dev);
3123 put_device(&subsys->dev);
3128 kref_put(&subsys->ref, nvme_destroy_subsystem);
3149 if (strcmp(subsys->subnqn, subsysnqn))
3151 if (!kref_get_unless_zero(&subsys->ref))
3161 return ctrl->opts && ctrl->opts->discovery_nqn;
3166 return ctrl->cntrltype == NVME_CTRL_ADMIN;
3181 list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
3185 if (tmp->cntlid == ctrl->cntlid) {
3186 dev_err(ctrl->device,
3188 ctrl->cntlid, dev_name(tmp->device),
3189 subsys->subnqn);
3193 if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
3197 dev_err(ctrl->device,
3212 return -ENOMEM;
3214 subsys->instance = -1;
3215 mutex_init(&subsys->lock);
3216 kref_init(&subsys->ref);
3217 INIT_LIST_HEAD(&subsys->ctrls);
3218 INIT_LIST_HEAD(&subsys->nsheads);
3220 memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
3221 memcpy(subsys->model, id->mn, sizeof(subsys->model));
3222 subsys->vendor_id = le16_to_cpu(id->vid);
3223 subsys->cmic = id->cmic;
3224 subsys->awupf = le16_to_cpu(id->awupf);
3227 if (id->cntrltype == NVME_CTRL_DISC ||
3228 !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME))
3229 subsys->subtype = NVME_NQN_DISC;
3231 subsys->subtype = NVME_NQN_NVME;
3233 if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) {
3234 dev_err(ctrl->device,
3236 subsys->subnqn);
3238 return -EINVAL;
3242 subsys->dev.class = &nvme_subsys_class;
3243 subsys->dev.release = nvme_release_subsystem;
3244 subsys->dev.groups = nvme_subsys_attrs_groups;
3245 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
3246 device_initialize(&subsys->dev);
3249 found = __nvme_find_get_subsystem(subsys->subnqn);
3251 put_device(&subsys->dev);
3255 ret = -EINVAL;
3259 ret = device_add(&subsys->dev);
3261 dev_err(ctrl->device,
3263 put_device(&subsys->dev);
3266 ida_init(&subsys->ns_ida);
3267 list_add_tail(&subsys->entry, &nvme_subsystems);
3270 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
3271 dev_name(ctrl->device));
3273 dev_err(ctrl->device,
3279 subsys->instance = ctrl->instance;
3280 ctrl->subsys = subsys;
3281 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
3293 u8 lsp, u8 csi, void *log, size_t size, u64 offset, u16 lsi)
3302 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
3306 c.get_log_page.csi = csi;
3309 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
3312 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
3315 return nvme_get_log_lsi(ctrl, nsid, log_page, lsp, csi, log, size,
3319 static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
3322 struct nvme_effects_log *old, *cel = xa_load(&ctrl->cels, csi);
3330 return -ENOMEM;
3332 ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
3339 old = xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
3351 u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val;
3353 if (check_shl_overflow(1U, units + page_shift - 9, &val))
3366 * to the write-zeroes, we are cautious and limit the size to the
3370 if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&
3371 !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
3372 ctrl->max_zeroes_sectors = ctrl->max_hw_sectors;
3374 ctrl->max_zeroes_sectors = 0;
3378 test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags))
3383 return -ENOMEM;
3387 c.identify.csi = NVME_CSI_NVM;
3389 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
3393 ctrl->dmrl = id->dmrl;
3394 ctrl->dmrsl = le32_to_cpu(id->dmrsl);
3395 if (id->wzsl)
3396 ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
3400 set_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags);
3406 u8 csi, struct nvme_effects_log **log)
3412 return -ENOMEM;
3414 old = xa_store(&ctrl->cels, csi, effects, GFP_KERNEL);
3426 struct nvme_effects_log *log = ctrl->effects;
3428 log->acs[nvme_admin_format_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC |
3431 log->acs[nvme_admin_sanitize_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC |
3449 log->acs[nvme_admin_security_recv] &= cpu_to_le32(~NVME_CMD_EFFECTS_CSE_MASK);
3451 log->iocs[nvme_cmd_write] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
3452 log->iocs[nvme_cmd_write_zeroes] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
3453 log->iocs[nvme_cmd_write_uncor] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
3460 if (ctrl->effects)
3463 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
3464 ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
3469 if (!ctrl->effects) {
3470 ret = nvme_init_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
3485 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
3486 dev_err(ctrl->device,
3488 ctrl->cntlid, le16_to_cpu(id->cntlid));
3489 return -EINVAL;
3492 if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
3493 dev_err(ctrl->device,
3494 "keep-alive support is mandatory for fabrics\n");
3495 return -EINVAL;
3498 if (nvme_is_io_ctrl(ctrl) && ctrl->ioccsz < 4) {
3499 dev_err(ctrl->device,
3501 ctrl->ioccsz);
3502 return -EINVAL;
3505 if (nvme_is_io_ctrl(ctrl) && ctrl->iorcsz < 1) {
3506 dev_err(ctrl->device,
3508 ctrl->iorcsz);
3509 return -EINVAL;
3512 if (!ctrl->maxcmd) {
3513 dev_warn(ctrl->device,
3515 ctrl->maxcmd = ctrl->sqsize + 1;
3531 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
3532 return -EIO;
3535 if (!(ctrl->ops->flags & NVME_F_FABRICS))
3536 ctrl->cntlid = le16_to_cpu(id->cntlid);
3538 if (!ctrl->identified) {
3545 * could re-scan for quirks every time we reinitialize
3551 ctrl->quirks |= core_quirks[i].quirks;
3562 memcpy(ctrl->subsys->firmware_rev, id->fr,
3563 sizeof(ctrl->subsys->firmware_rev));
3565 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
3566 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
3567 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
3570 ctrl->crdt[0] = le16_to_cpu(id->crdt1);
3571 ctrl->crdt[1] = le16_to_cpu(id->crdt2);
3572 ctrl->crdt[2] = le16_to_cpu(id->crdt3);
3574 ctrl->oacs = le16_to_cpu(id->oacs);
3575 ctrl->oncs = le16_to_cpu(id->oncs);
3576 ctrl->mtfa = le16_to_cpu(id->mtfa);
3577 ctrl->oaes = le32_to_cpu(id->oaes);
3578 ctrl->wctemp = le16_to_cpu(id->wctemp);
3579 ctrl->cctemp = le16_to_cpu(id->cctemp);
3581 atomic_set(&ctrl->abort_limit, id->acl + 1);
3582 ctrl->vwc = id->vwc;
3583 if (id->mdts)
3584 max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts);
3587 ctrl->max_hw_sectors =
3588 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
3590 lim = queue_limits_start_update(ctrl->admin_q);
3592 ret = queue_limits_commit_update(ctrl->admin_q, &lim);
3596 ctrl->sgls = le32_to_cpu(id->sgls);
3597 ctrl->kas = le16_to_cpu(id->kas);
3598 ctrl->max_namespaces = le32_to_cpu(id->mnan);
3599 ctrl->ctratt = le32_to_cpu(id->ctratt);
3601 ctrl->cntrltype = id->cntrltype;
3602 ctrl->dctype = id->dctype;
3604 if (id->rtd3e) {
3605 /* us -> s */
3606 u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
3608 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
3611 if (ctrl->shutdown_timeout != shutdown_timeout)
3612 dev_info(ctrl->device,
3614 ctrl->shutdown_timeout);
3616 ctrl->shutdown_timeout = shutdown_timeout;
3618 ctrl->npss = id->npss;
3619 ctrl->apsta = id->apsta;
3620 prev_apst_enabled = ctrl->apst_enabled;
3621 if (ctrl->quirks & NVME_QUIRK_NO_APST) {
3622 if (force_apst && id->apsta) {
3623 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
3624 ctrl->apst_enabled = true;
3626 ctrl->apst_enabled = false;
3629 ctrl->apst_enabled = id->apsta;
3631 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
3633 if (ctrl->ops->flags & NVME_F_FABRICS) {
3634 ctrl->icdoff = le16_to_cpu(id->icdoff);
3635 ctrl->ioccsz = le32_to_cpu(id->ioccsz);
3636 ctrl->iorcsz = le32_to_cpu(id->iorcsz);
3637 ctrl->maxcmd = le16_to_cpu(id->maxcmd);
3643 ctrl->hmpre = le32_to_cpu(id->hmpre);
3644 ctrl->hmmin = le32_to_cpu(id->hmmin);
3645 ctrl->hmminds = le32_to_cpu(id->hmminds);
3646 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
3653 if (ctrl->apst_enabled && !prev_apst_enabled)
3654 dev_pm_qos_expose_latency_tolerance(ctrl->device);
3655 else if (!ctrl->apst_enabled && prev_apst_enabled)
3656 dev_pm_qos_hide_latency_tolerance(ctrl->device);
3671 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
3673 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
3677 ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
3679 if (ctrl->vs >= NVME_VS(1, 1, 0))
3680 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
3691 dev_dbg(ctrl->device,
3693 ctrl->subsys->subnqn);
3694 ctrl->queue_count = 1;
3711 if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
3717 if (ret == -EINTR)
3721 clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags);
3722 ctrl->identified = true;
3733 container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3739 return -EWOULDBLOCK;
3743 if (!try_module_get(ctrl->ops->module)) {
3745 return -EINVAL;
3748 file->private_data = ctrl;
3755 container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3757 module_put(ctrl->ops->module);
3776 lockdep_assert_held(&ctrl->subsys->lock);
3778 list_for_each_entry(h, &ctrl->subsys->nsheads, entry) {
3784 if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h))
3796 bool has_uuid = !uuid_is_null(&ids->uuid);
3797 bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid));
3798 bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
3801 lockdep_assert_held(&subsys->lock);
3803 list_for_each_entry(h, &subsys->nsheads, entry) {
3804 if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid))
3805 return -EINVAL;
3807 memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0)
3808 return -EINVAL;
3810 memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0)
3811 return -EINVAL;
3819 ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
3836 cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
3837 cdev_device->class = &nvme_ns_chr_class;
3838 cdev_device->release = nvme_cdev_rel;
3841 cdev->owner = owner;
3851 return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev));
3856 nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev));
3874 ns->cdev_device.parent = ns->ctrl->device;
3875 ret = dev_set_name(&ns->cdev_device, "ng%dn%d",
3876 ns->ctrl->instance, ns->head->instance);
3880 return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
3881 ns->ctrl->ops->module);
3889 int ret = -ENOMEM;
3898 ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL);
3901 head->instance = ret;
3902 INIT_LIST_HEAD(&head->list);
3903 ret = init_srcu_struct(&head->srcu);
3906 head->subsys = ctrl->subsys;
3907 head->ns_id = info->nsid;
3908 head->ids = info->ids;
3909 head->shared = info->is_shared;
3910 head->rotational = info->is_rotational;
3911 ratelimit_state_init(&head->rs_nuse, 5 * HZ, 1);
3912 ratelimit_set_flags(&head->rs_nuse, RATELIMIT_MSG_ON_RELEASE);
3913 kref_init(&head->ref);
3915 if (head->ids.csi) {
3916 ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
3920 head->effects = ctrl->effects;
3926 list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3928 kref_get(&ctrl->subsys->ref);
3932 cleanup_srcu_struct(&head->srcu);
3934 ida_free(&ctrl->subsys->ns_ida, head->instance);
3958 mutex_lock(&s->lock);
3960 mutex_unlock(&s->lock);
3971 struct nvme_ctrl *ctrl = ns->ctrl;
3975 ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids);
3982 * and in user space the /dev/disk/by-id/ links rely on it.
3984 * If the device also claims to be multi-path capable back off
3994 if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */
3995 ((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) &&
3996 info->is_shared)) {
3997 dev_err(ctrl->device,
3999 info->nsid);
4003 dev_err(ctrl->device,
4004 "clearing duplicate IDs for nsid %d\n", info->nsid);
4005 dev_err(ctrl->device,
4006 "use of /dev/disk/by-id/ may cause data corruption\n");
4007 memset(&info->ids.nguid, 0, sizeof(info->ids.nguid));
4008 memset(&info->ids.uuid, 0, sizeof(info->ids.uuid));
4009 memset(&info->ids.eui64, 0, sizeof(info->ids.eui64));
4010 ctrl->quirks |= NVME_QUIRK_BOGUS_NID;
4013 mutex_lock(&ctrl->subsys->lock);
4014 head = nvme_find_ns_head(ctrl, info->nsid);
4016 ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids);
4018 dev_err(ctrl->device,
4020 info->nsid);
4029 ret = -EINVAL;
4030 if ((!info->is_shared || !head->shared) &&
4031 !list_empty(&head->list)) {
4032 dev_err(ctrl->device,
4034 info->nsid);
4037 if (!nvme_ns_ids_equal(&head->ids, &info->ids)) {
4038 dev_err(ctrl->device,
4040 info->nsid);
4045 dev_warn(ctrl->device,
4047 info->nsid);
4048 dev_warn_once(ctrl->device,
4053 list_add_tail_rcu(&ns->siblings, &head->list);
4054 ns->head = head;
4055 mutex_unlock(&ctrl->subsys->lock);
4058 cancel_delayed_work(&head->remove_work);
4065 mutex_unlock(&ctrl->subsys->lock);
4074 srcu_idx = srcu_read_lock(&ctrl->srcu);
4075 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4076 srcu_read_lock_held(&ctrl->srcu)) {
4077 if (ns->head->ns_id == nsid) {
4083 if (ns->head->ns_id > nsid)
4086 srcu_read_unlock(&ctrl->srcu, srcu_idx);
4098 list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
4099 if (tmp->head->ns_id < ns->head->ns_id) {
4100 list_add_rcu(&ns->list, &tmp->list);
4104 list_add_rcu(&ns->list, &ns->ctrl->namespaces);
4112 int node = ctrl->numa_node;
4119 if (ctrl->opts && ctrl->opts->data_digest)
4121 if (ctrl->ops->supports_pci_p2pdma &&
4122 ctrl->ops->supports_pci_p2pdma(ctrl))
4125 disk = blk_mq_alloc_disk(ctrl->tagset, &lim, ns);
4128 disk->fops = &nvme_bdev_ops;
4129 disk->private_data = ns;
4131 ns->disk = disk;
4132 ns->queue = disk->queue;
4133 ns->ctrl = ctrl;
4134 kref_init(&ns->kref);
4150 if (nvme_ns_head_multipath(ns->head)) {
4151 sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
4152 ctrl->instance, ns->head->instance);
4153 disk->flags |= GENHD_FL_HIDDEN;
4155 sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance,
4156 ns->head->instance);
4158 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
4159 ns->head->instance);
4165 mutex_lock(&ctrl->namespaces_lock);
4170 if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) {
4171 mutex_unlock(&ctrl->namespaces_lock);
4175 mutex_unlock(&ctrl->namespaces_lock);
4176 synchronize_srcu(&ctrl->srcu);
4179 if (device_add_disk(ctrl->device, ns->disk, nvme_ns_attr_groups))
4182 if (!nvme_ns_head_multipath(ns->head))
4185 nvme_mpath_add_disk(ns, info->anagrpid);
4186 nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
4189 * Set ns->disk->device->driver_data to ns so we can access
4190 * ns->head->passthru_err_log_enabled in
4193 dev_set_drvdata(disk_to_dev(ns->disk), ns);
4199 mutex_lock(&ctrl->namespaces_lock);
4200 list_del_rcu(&ns->list);
4201 mutex_unlock(&ctrl->namespaces_lock);
4202 synchronize_srcu(&ctrl->srcu);
4204 mutex_lock(&ctrl->subsys->lock);
4205 list_del_rcu(&ns->siblings);
4206 if (list_empty(&ns->head->list)) {
4207 list_del_init(&ns->head->entry);
4210 * head (nshead), but head->disk is not initialized in that
4213 * we do not release the reference to nshead twice if head->disk
4216 if (ns->head->disk)
4219 mutex_unlock(&ctrl->subsys->lock);
4221 nvme_put_ns_head(ns->head);
4222 nvme_put_ns_head(ns->head);
4233 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
4236 clear_bit(NVME_NS_READY, &ns->flags);
4237 set_capacity(ns->disk, 0);
4238 nvme_fault_inject_fini(&ns->fault_inject);
4244 synchronize_srcu(&ns->head->srcu);
4248 synchronize_srcu(&ns->head->srcu);
4250 mutex_lock(&ns->ctrl->subsys->lock);
4251 list_del_rcu(&ns->siblings);
4252 if (list_empty(&ns->head->list)) {
4253 if (!nvme_mpath_queue_if_no_path(ns->head))
4254 list_del_init(&ns->head->entry);
4257 mutex_unlock(&ns->ctrl->subsys->lock);
4259 /* guarantee not available in head->list */
4260 synchronize_srcu(&ns->head->srcu);
4262 if (!nvme_ns_head_multipath(ns->head))
4263 nvme_cdev_del(&ns->cdev, &ns->cdev_device);
4267 del_gendisk(ns->disk);
4269 mutex_lock(&ns->ctrl->namespaces_lock);
4270 list_del_rcu(&ns->list);
4271 mutex_unlock(&ns->ctrl->namespaces_lock);
4272 synchronize_srcu(&ns->ctrl->srcu);
4275 nvme_mpath_remove_disk(ns->head);
4293 if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) {
4294 dev_err(ns->ctrl->device,
4295 "identifiers changed for nsid %d\n", ns->head->ns_id);
4320 if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) {
4321 dev_warn(ctrl->device,
4331 if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) ||
4332 (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS) ||
4333 ctrl->vs >= NVME_VS(2, 0, 0))
4358 * struct async_scan_info - keeps track of controller & NSIDs to scan
4380 idx = (u32)atomic_fetch_inc(&scan_info->next_nsid);
4381 nsid = le32_to_cpu(scan_info->ns_list[idx]);
4383 nvme_scan_ns(scan_info->ctrl, nsid);
4392 mutex_lock(&ctrl->namespaces_lock);
4393 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
4394 if (ns->head->ns_id > nsid) {
4395 list_del_rcu(&ns->list);
4396 synchronize_srcu(&ctrl->srcu);
4397 list_add_tail_rcu(&ns->list, &rm_list);
4400 mutex_unlock(&ctrl->namespaces_lock);
4417 return -ENOMEM;
4428 ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
4431 dev_warn(ctrl->device,
4464 nn = le32_to_cpu(id->nn);
4492 dev_warn(ctrl->device,
4505 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE || !ctrl->tagset)
4511 * such scenario. Controller's non-mdts limits are reported in the unit
4513 * namespace. Hence re-read the limits at the time of ns allocation.
4517 dev_warn(ctrl->device,
4518 "reading non-mdts-limits failed: %d\n", ret);
4522 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
4523 dev_info(ctrl->device, "rescanning namespaces.\n");
4527 mutex_lock(&ctrl->scan_lock);
4540 mutex_unlock(&ctrl->scan_lock);
4543 if (test_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events))
4546 else if (ctrl->ana_log_buf)
4547 /* Re-read the ANA log page to not miss updates */
4548 queue_work(nvme_wq, &ctrl->ana_work);
4576 flush_work(&ctrl->scan_work);
4587 /* this is a no-op when called from the controller reset handler */
4590 mutex_lock(&ctrl->namespaces_lock);
4591 list_splice_init_rcu(&ctrl->namespaces, &ns_list, synchronize_rcu);
4592 mutex_unlock(&ctrl->namespaces_lock);
4593 synchronize_srcu(&ctrl->srcu);
4604 struct nvmf_ctrl_options *opts = ctrl->opts;
4607 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
4612 ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
4617 opts->trsvcid ?: "none");
4622 opts->host_traddr ?: "none");
4627 opts->host_iface ?: "none");
4636 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
4642 u32 aen_result = ctrl->aen_result;
4644 ctrl->aen_result = 0;
4651 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
4668 ctrl->ops->submit_async_event(ctrl);
4676 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
4682 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
4696 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
4700 cur_fw_slot = log->afi & 0x7;
4701 next_fw_slot = (log->afi & 0x70) >> 4;
4703 dev_info(ctrl->device,
4708 memcpy(ctrl->subsys->firmware_rev, &log->frs[cur_fw_slot - 1],
4709 sizeof(ctrl->subsys->firmware_rev));
4723 if (ctrl->mtfa)
4724 fw_act_timeout = jiffies + msecs_to_jiffies(ctrl->mtfa * 100);
4731 dev_warn(ctrl->device,
4747 queue_work(nvme_wq, &ctrl->async_event_work);
4767 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
4778 queue_work(nvme_wq, &ctrl->fw_act_work);
4783 if (!ctrl->ana_log_buf)
4785 queue_work(nvme_wq, &ctrl->ana_work);
4789 ctrl->aen_result = result;
4792 dev_warn(ctrl->device, "async event result %08x\n", result);
4799 dev_warn(ctrl->device,
4807 u32 result = le32_to_cpu(res->u32);
4833 ctrl->aen_result = result;
4840 queue_work(nvme_wq, &ctrl->async_event_work);
4851 set->ops = ops;
4852 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
4853 if (ctrl->ops->flags & NVME_F_FABRICS)
4855 set->reserved_tags = 2;
4856 set->numa_node = ctrl->numa_node;
4857 if (ctrl->ops->flags & NVME_F_BLOCKING)
4858 set->flags |= BLK_MQ_F_BLOCKING;
4859 set->cmd_size = cmd_size;
4860 set->driver_data = ctrl;
4861 set->nr_hw_queues = 1;
4862 set->timeout = NVME_ADMIN_TIMEOUT;
4867 ctrl->admin_q = blk_mq_alloc_queue(set, &lim, NULL);
4868 if (IS_ERR(ctrl->admin_q)) {
4869 ret = PTR_ERR(ctrl->admin_q);
4873 if (ctrl->ops->flags & NVME_F_FABRICS) {
4874 ctrl->fabrics_q = blk_mq_alloc_queue(set, NULL, NULL);
4875 if (IS_ERR(ctrl->fabrics_q)) {
4876 ret = PTR_ERR(ctrl->fabrics_q);
4881 ctrl->admin_tagset = set;
4885 blk_mq_destroy_queue(ctrl->admin_q);
4886 blk_put_queue(ctrl->admin_q);
4889 ctrl->admin_q = NULL;
4890 ctrl->fabrics_q = NULL;
4899 * we can not have keep-alive work running.
4902 blk_mq_destroy_queue(ctrl->admin_q);
4903 if (ctrl->ops->flags & NVME_F_FABRICS) {
4904 blk_mq_destroy_queue(ctrl->fabrics_q);
4905 blk_put_queue(ctrl->fabrics_q);
4907 blk_mq_free_tag_set(ctrl->admin_tagset);
4918 set->ops = ops;
4919 set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1);
4924 if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS)
4925 set->reserved_tags = NVME_AQ_DEPTH;
4926 else if (ctrl->ops->flags & NVME_F_FABRICS)
4928 set->reserved_tags = 1;
4929 set->numa_node = ctrl->numa_node;
4930 if (ctrl->ops->flags & NVME_F_BLOCKING)
4931 set->flags |= BLK_MQ_F_BLOCKING;
4932 set->cmd_size = cmd_size;
4933 set->driver_data = ctrl;
4934 set->nr_hw_queues = ctrl->queue_count - 1;
4935 set->timeout = NVME_IO_TIMEOUT;
4936 set->nr_maps = nr_maps;
4941 if (ctrl->ops->flags & NVME_F_FABRICS) {
4946 ctrl->connect_q = blk_mq_alloc_queue(set, &lim, NULL);
4947 if (IS_ERR(ctrl->connect_q)) {
4948 ret = PTR_ERR(ctrl->connect_q);
4953 ctrl->tagset = set;
4958 ctrl->connect_q = NULL;
4965 if (ctrl->ops->flags & NVME_F_FABRICS) {
4966 blk_mq_destroy_queue(ctrl->connect_q);
4967 blk_put_queue(ctrl->connect_q);
4969 blk_mq_free_tag_set(ctrl->tagset);
4978 flush_work(&ctrl->async_event_work);
4979 cancel_work_sync(&ctrl->fw_act_work);
4980 if (ctrl->ops->stop_ctrl)
4981 ctrl->ops->stop_ctrl(ctrl);
4991 * to re-read the discovery log page to learn about possible changes
4995 if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
4997 if (!ctrl->kato) {
4999 ctrl->kato = NVME_DEFAULT_KATO;
5005 if (ctrl->queue_count > 1) {
5012 set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags);
5020 nvme_fault_inject_fini(&ctrl->fault_inject);
5021 dev_pm_qos_hide_latency_tolerance(ctrl->device);
5022 cdev_device_del(&ctrl->cdev, ctrl->device);
5032 xa_for_each(&ctrl->cels, i, cel) {
5033 xa_erase(&ctrl->cels, i);
5037 xa_destroy(&ctrl->cels);
5044 struct nvme_subsystem *subsys = ctrl->subsys;
5046 if (ctrl->admin_q)
5047 blk_put_queue(ctrl->admin_q);
5048 if (!subsys || ctrl->instance != subsys->instance)
5049 ida_free(&nvme_instance_ida, ctrl->instance);
5052 cleanup_srcu_struct(&ctrl->srcu);
5055 __free_page(ctrl->discard_page);
5056 free_opal_dev(ctrl->opal_dev);
5060 list_del(&ctrl->subsys_entry);
5061 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
5065 ctrl->ops->free_ctrl(ctrl);
5077 * needed, which also invokes the ops->free_ctrl() callback.
5084 WRITE_ONCE(ctrl->state, NVME_CTRL_NEW);
5085 ctrl->passthru_err_log_enabled = false;
5086 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
5087 spin_lock_init(&ctrl->lock);
5088 mutex_init(&ctrl->namespaces_lock);
5090 ret = init_srcu_struct(&ctrl->srcu);
5094 mutex_init(&ctrl->scan_lock);
5095 INIT_LIST_HEAD(&ctrl->namespaces);
5096 xa_init(&ctrl->cels);
5097 ctrl->dev = dev;
5098 ctrl->ops = ops;
5099 ctrl->quirks = quirks;
5100 ctrl->numa_node = NUMA_NO_NODE;
5101 INIT_WORK(&ctrl->scan_work, nvme_scan_work);
5102 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
5103 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
5104 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
5105 init_waitqueue_head(&ctrl->state_wq);
5107 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
5108 INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
5109 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
5110 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
5111 ctrl->ka_last_check_time = jiffies;
5115 ctrl->discard_page = alloc_page(GFP_KERNEL);
5116 if (!ctrl->discard_page) {
5117 ret = -ENOMEM;
5124 ctrl->instance = ret;
5132 device_initialize(&ctrl->ctrl_device);
5133 ctrl->device = &ctrl->ctrl_device;
5134 ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
5135 ctrl->instance);
5136 ctrl->device->class = &nvme_class;
5137 ctrl->device->parent = ctrl->dev;
5138 if (ops->dev_attr_groups)
5139 ctrl->device->groups = ops->dev_attr_groups;
5141 ctrl->device->groups = nvme_dev_attr_groups;
5142 ctrl->device->release = nvme_free_ctrl;
5143 dev_set_drvdata(ctrl->device, ctrl);
5148 ida_free(&nvme_instance_ida, ctrl->instance);
5150 if (ctrl->discard_page)
5151 __free_page(ctrl->discard_page);
5152 cleanup_srcu_struct(&ctrl->srcu);
5165 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
5169 cdev_init(&ctrl->cdev, &nvme_dev_fops);
5170 ctrl->cdev.owner = ctrl->ops->module;
5171 ret = cdev_device_add(&ctrl->cdev, ctrl->device);
5179 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
5180 dev_pm_qos_update_user_latency_tolerance(ctrl->device,
5183 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
5196 srcu_idx = srcu_read_lock(&ctrl->srcu);
5197 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
5198 srcu_read_lock_held(&ctrl->srcu))
5199 blk_mark_disk_dead(ns->disk);
5200 srcu_read_unlock(&ctrl->srcu, srcu_idx);
5209 srcu_idx = srcu_read_lock(&ctrl->srcu);
5210 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
5211 srcu_read_lock_held(&ctrl->srcu))
5212 blk_mq_unfreeze_queue_non_owner(ns->queue);
5213 srcu_read_unlock(&ctrl->srcu, srcu_idx);
5214 clear_bit(NVME_CTRL_FROZEN, &ctrl->flags);
5223 srcu_idx = srcu_read_lock(&ctrl->srcu);
5224 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
5225 srcu_read_lock_held(&ctrl->srcu)) {
5226 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
5230 srcu_read_unlock(&ctrl->srcu, srcu_idx);
5240 srcu_idx = srcu_read_lock(&ctrl->srcu);
5241 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
5242 srcu_read_lock_held(&ctrl->srcu))
5243 blk_mq_freeze_queue_wait(ns->queue);
5244 srcu_read_unlock(&ctrl->srcu, srcu_idx);
5253 set_bit(NVME_CTRL_FROZEN, &ctrl->flags);
5254 srcu_idx = srcu_read_lock(&ctrl->srcu);
5255 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
5256 srcu_read_lock_held(&ctrl->srcu))
5262 blk_freeze_queue_start_non_owner(ns->queue);
5263 srcu_read_unlock(&ctrl->srcu, srcu_idx);
5269 if (!ctrl->tagset)
5271 if (!test_and_set_bit(NVME_CTRL_STOPPED, &ctrl->flags))
5272 blk_mq_quiesce_tagset(ctrl->tagset);
5274 blk_mq_wait_quiesce_done(ctrl->tagset);
5280 if (!ctrl->tagset)
5282 if (test_and_clear_bit(NVME_CTRL_STOPPED, &ctrl->flags))
5283 blk_mq_unquiesce_tagset(ctrl->tagset);
5289 if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
5290 blk_mq_quiesce_queue(ctrl->admin_q);
5292 blk_mq_wait_quiesce_done(ctrl->admin_q->tag_set);
5298 if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
5299 blk_mq_unquiesce_queue(ctrl->admin_q);
5308 srcu_idx = srcu_read_lock(&ctrl->srcu);
5309 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
5310 srcu_read_lock_held(&ctrl->srcu))
5311 blk_sync_queue(ns->queue);
5312 srcu_read_unlock(&ctrl->srcu, srcu_idx);
5319 if (ctrl->admin_q)
5320 blk_sync_queue(ctrl->admin_q);
5326 if (file->f_op != &nvme_dev_fops)
5328 return file->private_data;
5369 int result = -ENOMEM;
5373 nvme_wq = alloc_workqueue("nvme-wq", wq_flags, 0);
5377 nvme_reset_wq = alloc_workqueue("nvme-reset-wq", wq_flags, 0);
5381 nvme_delete_wq = alloc_workqueue("nvme-delete-wq", wq_flags, 0);
5399 "nvme-generic");