core.c (be93e87e780253780df9bb6ecc9bc1199b0d94c3) core.c (240e6ee272c07a2636dfc7d65f5bbb18377c49e5)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVM Express device driver
4 * Copyright (c) 2011-2014, Intel Corporation.
5 */
6
7#include <linux/blkdev.h>
8#include <linux/blk-mq.h>

--- 75 unchanged lines hidden (view full) ---

84static LIST_HEAD(nvme_subsystems);
85static DEFINE_MUTEX(nvme_subsystems_lock);
86
87static DEFINE_IDA(nvme_instance_ida);
88static dev_t nvme_chr_devt;
89static struct class *nvme_class;
90static struct class *nvme_subsys_class;
91
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVM Express device driver
4 * Copyright (c) 2011-2014, Intel Corporation.
5 */
6
7#include <linux/blkdev.h>
8#include <linux/blk-mq.h>

--- 75 unchanged lines hidden (view full) ---

84static LIST_HEAD(nvme_subsystems);
85static DEFINE_MUTEX(nvme_subsystems_lock);
86
87static DEFINE_IDA(nvme_instance_ida);
88static dev_t nvme_chr_devt;
89static struct class *nvme_class;
90static struct class *nvme_subsys_class;
91
92static int nvme_revalidate_disk(struct gendisk *disk);
92static int _nvme_revalidate_disk(struct gendisk *disk);
93static void nvme_put_subsystem(struct nvme_subsystem *subsys);
94static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
95 unsigned nsid);
96
97static void nvme_set_queue_dying(struct nvme_ns *ns)
98{
99 /*
100 * Revalidating a dead namespace sets capacity to 0. This will end

--- 181 unchanged lines hidden (view full) ---

282 if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
283 if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req))
284 return;
285
286 if (!blk_queue_dying(req->q)) {
287 nvme_retry_req(req);
288 return;
289 }
93static void nvme_put_subsystem(struct nvme_subsystem *subsys);
94static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
95 unsigned nsid);
96
97static void nvme_set_queue_dying(struct nvme_ns *ns)
98{
99 /*
100 * Revalidating a dead namespace sets capacity to 0. This will end

--- 181 unchanged lines hidden (view full) ---

282 if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
283 if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req))
284 return;
285
286 if (!blk_queue_dying(req->q)) {
287 nvme_retry_req(req);
288 return;
289 }
290 } else if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
291 req_op(req) == REQ_OP_ZONE_APPEND) {
292 req->__sector = nvme_lba_to_sect(req->q->queuedata,
293 le64_to_cpu(nvme_req(req)->result.u64));
290 }
291
292 nvme_trace_bio_complete(req, status);
293 blk_mq_end_request(req, status);
294}
295EXPORT_SYMBOL_GPL(nvme_complete_rq);
296
297bool nvme_cancel_request(struct request *req, void *data, bool reserved)

--- 370 unchanged lines hidden (view full) ---

668 cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
669 cmnd->write_zeroes.length =
670 cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
671 cmnd->write_zeroes.control = 0;
672 return BLK_STS_OK;
673}
674
675static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
294 }
295
296 nvme_trace_bio_complete(req, status);
297 blk_mq_end_request(req, status);
298}
299EXPORT_SYMBOL_GPL(nvme_complete_rq);
300
301bool nvme_cancel_request(struct request *req, void *data, bool reserved)

--- 370 unchanged lines hidden (view full) ---

672 cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
673 cmnd->write_zeroes.length =
674 cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
675 cmnd->write_zeroes.control = 0;
676 return BLK_STS_OK;
677}
678
679static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
676 struct request *req, struct nvme_command *cmnd)
680 struct request *req, struct nvme_command *cmnd,
681 enum nvme_opcode op)
677{
678 struct nvme_ctrl *ctrl = ns->ctrl;
679 u16 control = 0;
680 u32 dsmgmt = 0;
681
682 if (req->cmd_flags & REQ_FUA)
683 control |= NVME_RW_FUA;
684 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
685 control |= NVME_RW_LR;
686
687 if (req->cmd_flags & REQ_RAHEAD)
688 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
689
682{
683 struct nvme_ctrl *ctrl = ns->ctrl;
684 u16 control = 0;
685 u32 dsmgmt = 0;
686
687 if (req->cmd_flags & REQ_FUA)
688 control |= NVME_RW_FUA;
689 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
690 control |= NVME_RW_LR;
691
692 if (req->cmd_flags & REQ_RAHEAD)
693 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
694
690 cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
695 cmnd->rw.opcode = op;
691 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
692 cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
693 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
694
695 if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
696 nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
697
698 if (ns->ms) {

--- 12 unchanged lines hidden (view full) ---

711 switch (ns->pi_type) {
712 case NVME_NS_DPS_PI_TYPE3:
713 control |= NVME_RW_PRINFO_PRCHK_GUARD;
714 break;
715 case NVME_NS_DPS_PI_TYPE1:
716 case NVME_NS_DPS_PI_TYPE2:
717 control |= NVME_RW_PRINFO_PRCHK_GUARD |
718 NVME_RW_PRINFO_PRCHK_REF;
696 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
697 cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
698 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
699
700 if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
701 nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
702
703 if (ns->ms) {

--- 12 unchanged lines hidden (view full) ---

716 switch (ns->pi_type) {
717 case NVME_NS_DPS_PI_TYPE3:
718 control |= NVME_RW_PRINFO_PRCHK_GUARD;
719 break;
720 case NVME_NS_DPS_PI_TYPE1:
721 case NVME_NS_DPS_PI_TYPE2:
722 control |= NVME_RW_PRINFO_PRCHK_GUARD |
723 NVME_RW_PRINFO_PRCHK_REF;
724 if (op == nvme_cmd_zone_append)
725 control |= NVME_RW_APPEND_PIREMAP;
719 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
720 break;
721 }
722 }
723
724 cmnd->rw.control = cpu_to_le16(control);
725 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
726 return 0;

--- 24 unchanged lines hidden (view full) ---

751 switch (req_op(req)) {
752 case REQ_OP_DRV_IN:
753 case REQ_OP_DRV_OUT:
754 memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
755 break;
756 case REQ_OP_FLUSH:
757 nvme_setup_flush(ns, cmd);
758 break;
726 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
727 break;
728 }
729 }
730
731 cmnd->rw.control = cpu_to_le16(control);
732 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
733 return 0;

--- 24 unchanged lines hidden (view full) ---

758 switch (req_op(req)) {
759 case REQ_OP_DRV_IN:
760 case REQ_OP_DRV_OUT:
761 memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
762 break;
763 case REQ_OP_FLUSH:
764 nvme_setup_flush(ns, cmd);
765 break;
766 case REQ_OP_ZONE_RESET_ALL:
767 case REQ_OP_ZONE_RESET:
768 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET);
769 break;
770 case REQ_OP_ZONE_OPEN:
771 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN);
772 break;
773 case REQ_OP_ZONE_CLOSE:
774 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE);
775 break;
776 case REQ_OP_ZONE_FINISH:
777 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH);
778 break;
759 case REQ_OP_WRITE_ZEROES:
760 ret = nvme_setup_write_zeroes(ns, req, cmd);
761 break;
762 case REQ_OP_DISCARD:
763 ret = nvme_setup_discard(ns, req, cmd);
764 break;
765 case REQ_OP_READ:
779 case REQ_OP_WRITE_ZEROES:
780 ret = nvme_setup_write_zeroes(ns, req, cmd);
781 break;
782 case REQ_OP_DISCARD:
783 ret = nvme_setup_discard(ns, req, cmd);
784 break;
785 case REQ_OP_READ:
786 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read);
787 break;
766 case REQ_OP_WRITE:
788 case REQ_OP_WRITE:
767 ret = nvme_setup_rw(ns, req, cmd);
789 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write);
768 break;
790 break;
791 case REQ_OP_ZONE_APPEND:
792 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
793 break;
769 default:
770 WARN_ON_ONCE(1);
771 return BLK_STS_IOERR;
772 }
773
774 cmd->common.command_id = req->tag;
775 trace_nvme_setup_cmd(req, cmd);
776 return ret;

--- 616 unchanged lines hidden (view full) ---

1393 nvme_mpath_start_freeze(ctrl->subsys);
1394 nvme_mpath_wait_freeze(ctrl->subsys);
1395 nvme_start_freeze(ctrl);
1396 nvme_wait_freeze(ctrl);
1397 }
1398 return effects;
1399}
1400
794 default:
795 WARN_ON_ONCE(1);
796 return BLK_STS_IOERR;
797 }
798
799 cmd->common.command_id = req->tag;
800 trace_nvme_setup_cmd(req, cmd);
801 return ret;

--- 616 unchanged lines hidden (view full) ---

1418 nvme_mpath_start_freeze(ctrl->subsys);
1419 nvme_mpath_wait_freeze(ctrl->subsys);
1420 nvme_start_freeze(ctrl);
1421 nvme_wait_freeze(ctrl);
1422 }
1423 return effects;
1424}
1425
1401static void nvme_update_formats(struct nvme_ctrl *ctrl)
1426static void nvme_update_formats(struct nvme_ctrl *ctrl, u32 *effects)
1402{
1403 struct nvme_ns *ns;
1404
1405 down_read(&ctrl->namespaces_rwsem);
1406 list_for_each_entry(ns, &ctrl->namespaces, list)
1427{
1428 struct nvme_ns *ns;
1429
1430 down_read(&ctrl->namespaces_rwsem);
1431 list_for_each_entry(ns, &ctrl->namespaces, list)
1407 if (ns->disk && nvme_revalidate_disk(ns->disk))
1432 if (ns->disk && _nvme_revalidate_disk(ns->disk))
1408 nvme_set_queue_dying(ns);
1433 nvme_set_queue_dying(ns);
1434 else if (blk_queue_is_zoned(ns->disk->queue)) {
1435 /*
1436 * IO commands are required to fully revalidate a zoned
1437 * device. Force the command effects to trigger rescan
1438 * work so report zones can run in a context with
1439 * unfrozen IO queues.
1440 */
1441 *effects |= NVME_CMD_EFFECTS_NCC;
1442 }
1409 up_read(&ctrl->namespaces_rwsem);
1410}
1411
1412static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
1413{
1414 /*
1415 * Revalidate LBA changes prior to unfreezing. This is necessary to
1416 * prevent memory corruption if a logical block size was changed by
1417 * this command.
1418 */
1419 if (effects & NVME_CMD_EFFECTS_LBCC)
1443 up_read(&ctrl->namespaces_rwsem);
1444}
1445
1446static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
1447{
1448 /*
1449 * Revalidate LBA changes prior to unfreezing. This is necessary to
1450 * prevent memory corruption if a logical block size was changed by
1451 * this command.
1452 */
1453 if (effects & NVME_CMD_EFFECTS_LBCC)
1420 nvme_update_formats(ctrl);
1454 nvme_update_formats(ctrl, &effects);
1421 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1422 nvme_unfreeze(ctrl);
1423 nvme_mpath_unfreeze(ctrl->subsys);
1424 mutex_unlock(&ctrl->subsys->lock);
1425 nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
1426 mutex_unlock(&ctrl->scan_lock);
1427 }
1428 if (effects & NVME_CMD_EFFECTS_CCC)

--- 98 unchanged lines hidden (view full) ---

1527
1528 return status;
1529}
1530
1531/*
1532 * Issue ioctl requests on the first available path. Note that unlike normal
1533 * block layer requests we will not retry failed request on another controller.
1534 */
1455 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1456 nvme_unfreeze(ctrl);
1457 nvme_mpath_unfreeze(ctrl->subsys);
1458 mutex_unlock(&ctrl->subsys->lock);
1459 nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
1460 mutex_unlock(&ctrl->scan_lock);
1461 }
1462 if (effects & NVME_CMD_EFFECTS_CCC)

--- 98 unchanged lines hidden (view full) ---

1561
1562 return status;
1563}
1564
1565/*
1566 * Issue ioctl requests on the first available path. Note that unlike normal
1567 * block layer requests we will not retry failed request on another controller.
1568 */
1535static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
1569struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
1536 struct nvme_ns_head **head, int *srcu_idx)
1537{
1538#ifdef CONFIG_NVME_MULTIPATH
1539 if (disk->fops == &nvme_ns_head_ops) {
1540 struct nvme_ns *ns;
1541
1542 *head = disk->private_data;
1543 *srcu_idx = srcu_read_lock(&(*head)->srcu);
1544 ns = nvme_find_path(*head);
1545 if (!ns)
1546 srcu_read_unlock(&(*head)->srcu, *srcu_idx);
1547 return ns;
1548 }
1549#endif
1550 *head = NULL;
1551 *srcu_idx = -1;
1552 return disk->private_data;
1553}
1554
1570 struct nvme_ns_head **head, int *srcu_idx)
1571{
1572#ifdef CONFIG_NVME_MULTIPATH
1573 if (disk->fops == &nvme_ns_head_ops) {
1574 struct nvme_ns *ns;
1575
1576 *head = disk->private_data;
1577 *srcu_idx = srcu_read_lock(&(*head)->srcu);
1578 ns = nvme_find_path(*head);
1579 if (!ns)
1580 srcu_read_unlock(&(*head)->srcu, *srcu_idx);
1581 return ns;
1582 }
1583#endif
1584 *head = NULL;
1585 *srcu_idx = -1;
1586 return disk->private_data;
1587}
1588
1555static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
1589void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
1556{
1557 if (head)
1558 srcu_read_unlock(&head->srcu, idx);
1559}
1560
1561static bool is_ctrl_ioctl(unsigned int cmd)
1562{
1563 if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)

--- 376 unchanged lines hidden (view full) ---

1940 else
1941 set_disk_ro(disk, false);
1942
1943 blk_mq_unfreeze_queue(disk->queue);
1944}
1945
1946static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
1947{
1590{
1591 if (head)
1592 srcu_read_unlock(&head->srcu, idx);
1593}
1594
1595static bool is_ctrl_ioctl(unsigned int cmd)
1596{
1597 if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)

--- 376 unchanged lines hidden (view full) ---

1974 else
1975 set_disk_ro(disk, false);
1976
1977 blk_mq_unfreeze_queue(disk->queue);
1978}
1979
1980static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
1981{
1982 unsigned lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
1948 struct nvme_ns *ns = disk->private_data;
1949 struct nvme_ctrl *ctrl = ns->ctrl;
1983 struct nvme_ns *ns = disk->private_data;
1984 struct nvme_ctrl *ctrl = ns->ctrl;
1985 int ret;
1950 u32 iob;
1951
1952 /*
1953 * If identify namespace failed, use default 512 byte block size so
1954 * block layer can use before failing read/write for 0 capacity.
1955 */
1986 u32 iob;
1987
1988 /*
1989 * If identify namespace failed, use default 512 byte block size so
1990 * block layer can use before failing read/write for 0 capacity.
1991 */
1956 ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
1992 ns->lba_shift = id->lbaf[lbaf].ds;
1957 if (ns->lba_shift == 0)
1958 ns->lba_shift = 9;
1959
1960 switch (ns->head->ids.csi) {
1961 case NVME_CSI_NVM:
1962 break;
1993 if (ns->lba_shift == 0)
1994 ns->lba_shift = 9;
1995
1996 switch (ns->head->ids.csi) {
1997 case NVME_CSI_NVM:
1998 break;
1999 case NVME_CSI_ZNS:
2000 ret = nvme_update_zone_info(disk, ns, lbaf);
2001 if (ret) {
2002 dev_warn(ctrl->device,
2003 "failed to add zoned namespace:%u ret:%d\n",
2004 ns->head->ns_id, ret);
2005 return ret;
2006 }
2007 break;
1963 default:
2008 default:
1964 dev_warn(ctrl->device, "unknown csi:%d ns:%d\n",
2009 dev_warn(ctrl->device, "unknown csi:%u ns:%u\n",
1965 ns->head->ids.csi, ns->head->ns_id);
1966 return -ENODEV;
1967 }
1968
1969 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
1970 is_power_of_2(ctrl->max_hw_sectors))
1971 iob = ctrl->max_hw_sectors;
1972 else
1973 iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
1974
1975 ns->features = 0;
2010 ns->head->ids.csi, ns->head->ns_id);
2011 return -ENODEV;
2012 }
2013
2014 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
2015 is_power_of_2(ctrl->max_hw_sectors))
2016 iob = ctrl->max_hw_sectors;
2017 else
2018 iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
2019
2020 ns->features = 0;
1976 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
2021 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
1977 /* the PI implementation requires metadata equal t10 pi tuple size */
1978 if (ns->ms == sizeof(struct t10_pi_tuple))
1979 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
1980 else
1981 ns->pi_type = 0;
1982
1983 if (ns->ms) {
1984 /*

--- 25 unchanged lines hidden (view full) ---

2010 if (ns->head->disk) {
2011 nvme_update_disk_info(ns->head->disk, ns, id);
2012 blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
2013 }
2014#endif
2015 return 0;
2016}
2017
2022 /* the PI implementation requires metadata equal t10 pi tuple size */
2023 if (ns->ms == sizeof(struct t10_pi_tuple))
2024 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
2025 else
2026 ns->pi_type = 0;
2027
2028 if (ns->ms) {
2029 /*

--- 25 unchanged lines hidden (view full) ---

2055 if (ns->head->disk) {
2056 nvme_update_disk_info(ns->head->disk, ns, id);
2057 blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
2058 }
2059#endif
2060 return 0;
2061}
2062
2018static int nvme_revalidate_disk(struct gendisk *disk)
2063static int _nvme_revalidate_disk(struct gendisk *disk)
2019{
2020 struct nvme_ns *ns = disk->private_data;
2021 struct nvme_ctrl *ctrl = ns->ctrl;
2022 struct nvme_id_ns *id;
2023 struct nvme_ns_ids ids;
2024 int ret = 0;
2025
2026 if (test_bit(NVME_NS_DEAD, &ns->flags)) {

--- 31 unchanged lines hidden (view full) ---

2058 */
2059 if (ret == -ENOMEM || (ret > 0 && !(ret & NVME_SC_DNR)))
2060 ret = 0;
2061 else if (ret > 0)
2062 ret = blk_status_to_errno(nvme_error_status(ret));
2063 return ret;
2064}
2065
2064{
2065 struct nvme_ns *ns = disk->private_data;
2066 struct nvme_ctrl *ctrl = ns->ctrl;
2067 struct nvme_id_ns *id;
2068 struct nvme_ns_ids ids;
2069 int ret = 0;
2070
2071 if (test_bit(NVME_NS_DEAD, &ns->flags)) {

--- 31 unchanged lines hidden (view full) ---

2103 */
2104 if (ret == -ENOMEM || (ret > 0 && !(ret & NVME_SC_DNR)))
2105 ret = 0;
2106 else if (ret > 0)
2107 ret = blk_status_to_errno(nvme_error_status(ret));
2108 return ret;
2109}
2110
2111static int nvme_revalidate_disk(struct gendisk *disk)
2112{
2113 int ret;
2114
2115 ret = _nvme_revalidate_disk(disk);
2116 if (ret)
2117 return ret;
2118
2119#ifdef CONFIG_BLK_DEV_ZONED
2120 if (blk_queue_is_zoned(disk->queue)) {
2121 struct nvme_ns *ns = disk->private_data;
2122 struct nvme_ctrl *ctrl = ns->ctrl;
2123
2124 ret = blk_revalidate_disk_zones(disk, NULL);
2125 if (!ret)
2126 blk_queue_max_zone_append_sectors(disk->queue,
2127 ctrl->max_zone_append);
2128 }
2129#endif
2130 return ret;
2131}
2132
2066static char nvme_pr_type(enum pr_type type)
2067{
2068 switch (type) {
2069 case PR_WRITE_EXCLUSIVE:
2070 return 1;
2071 case PR_EXCLUSIVE_ACCESS:
2072 return 2;
2073 case PR_WRITE_EXCLUSIVE_REG_ONLY:

--- 114 unchanged lines hidden (view full) ---

2188static const struct block_device_operations nvme_fops = {
2189 .owner = THIS_MODULE,
2190 .ioctl = nvme_ioctl,
2191 .compat_ioctl = nvme_compat_ioctl,
2192 .open = nvme_open,
2193 .release = nvme_release,
2194 .getgeo = nvme_getgeo,
2195 .revalidate_disk= nvme_revalidate_disk,
2133static char nvme_pr_type(enum pr_type type)
2134{
2135 switch (type) {
2136 case PR_WRITE_EXCLUSIVE:
2137 return 1;
2138 case PR_EXCLUSIVE_ACCESS:
2139 return 2;
2140 case PR_WRITE_EXCLUSIVE_REG_ONLY:

--- 114 unchanged lines hidden (view full) ---

2255static const struct block_device_operations nvme_fops = {
2256 .owner = THIS_MODULE,
2257 .ioctl = nvme_ioctl,
2258 .compat_ioctl = nvme_compat_ioctl,
2259 .open = nvme_open,
2260 .release = nvme_release,
2261 .getgeo = nvme_getgeo,
2262 .revalidate_disk= nvme_revalidate_disk,
2263 .report_zones = nvme_report_zones,
2196 .pr_ops = &nvme_pr_ops,
2197};
2198
2199#ifdef CONFIG_NVME_MULTIPATH
2200static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
2201{
2202 struct nvme_ns_head *head = bdev->bd_disk->private_data;
2203

--- 10 unchanged lines hidden (view full) ---

2214const struct block_device_operations nvme_ns_head_ops = {
2215 .owner = THIS_MODULE,
2216 .submit_bio = nvme_ns_head_submit_bio,
2217 .open = nvme_ns_head_open,
2218 .release = nvme_ns_head_release,
2219 .ioctl = nvme_ioctl,
2220 .compat_ioctl = nvme_compat_ioctl,
2221 .getgeo = nvme_getgeo,
2264 .pr_ops = &nvme_pr_ops,
2265};
2266
2267#ifdef CONFIG_NVME_MULTIPATH
2268static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
2269{
2270 struct nvme_ns_head *head = bdev->bd_disk->private_data;
2271

--- 10 unchanged lines hidden (view full) ---

2282const struct block_device_operations nvme_ns_head_ops = {
2283 .owner = THIS_MODULE,
2284 .submit_bio = nvme_ns_head_submit_bio,
2285 .open = nvme_ns_head_open,
2286 .release = nvme_ns_head_release,
2287 .ioctl = nvme_ioctl,
2288 .compat_ioctl = nvme_compat_ioctl,
2289 .getgeo = nvme_getgeo,
2290 .report_zones = nvme_report_zones,
2222 .pr_ops = &nvme_pr_ops,
2223};
2224#endif /* CONFIG_NVME_MULTIPATH */
2225
2226static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
2227{
2228 unsigned long timeout =
2229 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;

--- 2211 unchanged lines hidden (view full) ---

4441 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
4442 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
4443 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
4444 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
4445 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
4446 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
4447 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
4448 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
2291 .pr_ops = &nvme_pr_ops,
2292};
2293#endif /* CONFIG_NVME_MULTIPATH */
2294
2295static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
2296{
2297 unsigned long timeout =
2298 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;

--- 2211 unchanged lines hidden (view full) ---

4510 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
4511 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
4512 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
4513 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
4514 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
4515 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
4516 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
4517 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
4518 BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE);
4519 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE);
4449 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
4450 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
4451 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
4452 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
4453}
4454
4455
4456static int __init nvme_core_init(void)

--- 67 unchanged lines hidden ---
4520 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
4521 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
4522 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
4523 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
4524}
4525
4526
4527static int __init nvme_core_init(void)

--- 67 unchanged lines hidden ---