Lines Matching +full:ns +full:-
1 // SPDX-License-Identifier: GPL-2.0
18 return -ENOMEM; in nvme_set_max_append()
24 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); in nvme_set_max_append()
30 if (id->zasl) in nvme_set_max_append()
31 ctrl->max_zone_append = 1 << (id->zasl + 3); in nvme_set_max_append()
33 ctrl->max_zone_append = ctrl->max_hw_sectors; in nvme_set_max_append()
38 int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf, in nvme_query_zone_info() argument
41 struct nvme_effects_log *log = ns->head->effects; in nvme_query_zone_info()
47 if ((le32_to_cpu(log->iocs[nvme_cmd_zone_append]) & in nvme_query_zone_info()
49 if (test_and_clear_bit(NVME_NS_FORCE_RO, &ns->flags)) in nvme_query_zone_info()
50 dev_warn(ns->ctrl->device, in nvme_query_zone_info()
51 "Zone Append supported for zoned namespace:%d. Remove read-only mode\n", in nvme_query_zone_info()
52 ns->head->ns_id); in nvme_query_zone_info()
54 set_bit(NVME_NS_FORCE_RO, &ns->flags); in nvme_query_zone_info()
55 dev_warn(ns->ctrl->device, in nvme_query_zone_info()
56 "Zone Append not supported for zoned namespace:%d. Forcing to read-only mode\n", in nvme_query_zone_info()
57 ns->head->ns_id); in nvme_query_zone_info()
61 if (!ns->ctrl->max_zone_append) { in nvme_query_zone_info()
62 status = nvme_set_max_append(ns->ctrl); in nvme_query_zone_info()
69 return -ENOMEM; in nvme_query_zone_info()
72 c.identify.nsid = cpu_to_le32(ns->head->ns_id); in nvme_query_zone_info()
76 status = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, id, sizeof(*id)); in nvme_query_zone_info()
84 if (id->zoc) { in nvme_query_zone_info()
85 dev_warn(ns->ctrl->device, in nvme_query_zone_info()
87 le16_to_cpu(id->zoc), ns->head->ns_id); in nvme_query_zone_info()
88 status = -ENODEV; in nvme_query_zone_info()
92 zi->zone_size = le64_to_cpu(id->lbafe[lbaf].zsze); in nvme_query_zone_info()
93 if (!is_power_of_2(zi->zone_size)) { in nvme_query_zone_info()
94 dev_warn(ns->ctrl->device, in nvme_query_zone_info()
96 zi->zone_size, ns->head->ns_id); in nvme_query_zone_info()
97 status = -ENODEV; in nvme_query_zone_info()
100 zi->max_open_zones = le32_to_cpu(id->mor) + 1; in nvme_query_zone_info()
101 zi->max_active_zones = le32_to_cpu(id->mar) + 1; in nvme_query_zone_info()
108 void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim, in nvme_update_zone_info() argument
111 lim->features |= BLK_FEAT_ZONED; in nvme_update_zone_info()
112 lim->max_open_zones = zi->max_open_zones; in nvme_update_zone_info()
113 lim->max_active_zones = zi->max_active_zones; in nvme_update_zone_info()
114 lim->max_hw_zone_append_sectors = ns->ctrl->max_zone_append; in nvme_update_zone_info()
115 lim->chunk_sectors = ns->head->zsze = in nvme_update_zone_info()
116 nvme_lba_to_sect(ns->head, zi->zone_size); in nvme_update_zone_info()
119 static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns, in nvme_zns_alloc_report_buffer() argument
122 struct request_queue *q = ns->disk->queue; in nvme_zns_alloc_report_buffer()
130 get_capacity(ns->disk) >> ilog2(ns->head->zsze)); in nvme_zns_alloc_report_buffer()
149 static int nvme_zone_parse_entry(struct nvme_ns *ns, in nvme_zone_parse_entry() argument
154 struct nvme_ns_head *head = ns->head; in nvme_zone_parse_entry()
157 if ((entry->zt & 0xf) != NVME_ZONE_TYPE_SEQWRITE_REQ) { in nvme_zone_parse_entry()
158 dev_err(ns->ctrl->device, "invalid zone type %#x\n", entry->zt); in nvme_zone_parse_entry()
159 return -EINVAL; in nvme_zone_parse_entry()
163 zone.cond = entry->zs >> 4; in nvme_zone_parse_entry()
164 zone.len = head->zsze; in nvme_zone_parse_entry()
165 zone.capacity = nvme_lba_to_sect(head, le64_to_cpu(entry->zcap)); in nvme_zone_parse_entry()
166 zone.start = nvme_lba_to_sect(head, le64_to_cpu(entry->zslba)); in nvme_zone_parse_entry()
170 zone.wp = nvme_lba_to_sect(head, le64_to_cpu(entry->wp)); in nvme_zone_parse_entry()
175 int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, in nvme_ns_report_zones() argument
184 if (ns->head->ids.csi != NVME_CSI_ZNS) in nvme_ns_report_zones()
185 return -EINVAL; in nvme_ns_report_zones()
187 report = nvme_zns_alloc_report_buffer(ns, nr_zones, &buflen); in nvme_ns_report_zones()
189 return -ENOMEM; in nvme_ns_report_zones()
192 c.zmr.nsid = cpu_to_le32(ns->head->ns_id); in nvme_ns_report_zones()
198 sector &= ~(ns->head->zsze - 1); in nvme_ns_report_zones()
199 while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) { in nvme_ns_report_zones()
202 c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns->head, sector)); in nvme_ns_report_zones()
203 ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen); in nvme_ns_report_zones()
206 ret = -EIO; in nvme_ns_report_zones()
210 nz = min((unsigned int)le64_to_cpu(report->nr_zones), nr_zones); in nvme_ns_report_zones()
215 ret = nvme_zone_parse_entry(ns, &report->entries[i], in nvme_ns_report_zones()
222 sector += ns->head->zsze * nz; in nvme_ns_report_zones()
228 ret = -EINVAL; in nvme_ns_report_zones()
234 blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, in nvme_setup_zone_mgmt_send() argument
239 c->zms.opcode = nvme_cmd_zone_mgmt_send; in nvme_setup_zone_mgmt_send()
240 c->zms.nsid = cpu_to_le32(ns->head->ns_id); in nvme_setup_zone_mgmt_send()
241 c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req))); in nvme_setup_zone_mgmt_send()
242 c->zms.zsa = action; in nvme_setup_zone_mgmt_send()
245 c->zms.select_all = 1; in nvme_setup_zone_mgmt_send()