177141dc6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2d5eff33eSChaitanya Kulkarni /*
3d5eff33eSChaitanya Kulkarni * NVMe I/O command implementation.
4d5eff33eSChaitanya Kulkarni * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5d5eff33eSChaitanya Kulkarni */
6d5eff33eSChaitanya Kulkarni #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7d5eff33eSChaitanya Kulkarni #include <linux/blkdev.h>
8fe45e630SChristoph Hellwig #include <linux/blk-integrity.h>
9dc90f084SChristoph Hellwig #include <linux/memremap.h>
10d5eff33eSChaitanya Kulkarni #include <linux/module.h>
11d5eff33eSChaitanya Kulkarni #include "nvmet.h"
12d5eff33eSChaitanya Kulkarni
nvmet_bdev_set_limits(struct block_device * bdev,struct nvme_id_ns * id)139d05a96eSBart Van Assche void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
149d05a96eSBart Van Assche {
150ec64895SJohn Pittman /* Logical blocks per physical block, 0's based. */
1684fe64f8SChristoph Hellwig const __le16 lpp0b = to0based(bdev_physical_block_size(bdev) /
1784fe64f8SChristoph Hellwig bdev_logical_block_size(bdev));
189d05a96eSBart Van Assche
199d05a96eSBart Van Assche /*
209d05a96eSBart Van Assche * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
219d05a96eSBart Van Assche * NAWUPF, and NACWU are defined for this namespace and should be
229d05a96eSBart Van Assche * used by the host for this namespace instead of the AWUN, AWUPF,
239d05a96eSBart Van Assche * and ACWU fields in the Identify Controller data structure. If
249d05a96eSBart Van Assche * any of these fields are zero that means that the corresponding
259d05a96eSBart Van Assche * field from the identify controller data structure should be used.
269d05a96eSBart Van Assche */
279d05a96eSBart Van Assche id->nsfeat |= 1 << 1;
280ec64895SJohn Pittman id->nawun = lpp0b;
290ec64895SJohn Pittman id->nawupf = lpp0b;
300ec64895SJohn Pittman id->nacwu = lpp0b;
319d05a96eSBart Van Assche
329d05a96eSBart Van Assche /*
339d05a96eSBart Van Assche * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and
349d05a96eSBart Van Assche * NOWS are defined for this namespace and should be used by
359d05a96eSBart Van Assche * the host for I/O optimization.
369d05a96eSBart Van Assche */
379d05a96eSBart Van Assche id->nsfeat |= 1 << 4;
389d05a96eSBart Van Assche /* NPWG = Namespace Preferred Write Granularity. 0's based */
390ec64895SJohn Pittman id->npwg = lpp0b;
409d05a96eSBart Van Assche /* NPWA = Namespace Preferred Write Alignment. 0's based */
419d05a96eSBart Van Assche id->npwa = id->npwg;
429d05a96eSBart Van Assche /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
4384fe64f8SChristoph Hellwig id->npdg = to0based(bdev_discard_granularity(bdev) /
4484fe64f8SChristoph Hellwig bdev_logical_block_size(bdev));
459d05a96eSBart Van Assche /* NPDG = Namespace Preferred Deallocate Alignment */
469d05a96eSBart Van Assche id->npda = id->npdg;
479d05a96eSBart Van Assche /* NOWS = Namespace Optimal Write Size */
4884fe64f8SChristoph Hellwig id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev));
499d05a96eSBart Van Assche }
509d05a96eSBart Van Assche
nvmet_bdev_ns_disable(struct nvmet_ns * ns)51aaf2e048SChaitanya Kulkarni void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
52aaf2e048SChaitanya Kulkarni {
53e9a7254bSChristian Brauner if (ns->bdev_file) {
54e9a7254bSChristian Brauner fput(ns->bdev_file);
55aaf2e048SChaitanya Kulkarni ns->bdev = NULL;
56e9a7254bSChristian Brauner ns->bdev_file = NULL;
57aaf2e048SChaitanya Kulkarni }
58aaf2e048SChaitanya Kulkarni }
59aaf2e048SChaitanya Kulkarni
nvmet_bdev_ns_enable_integrity(struct nvmet_ns * ns)60d2d1c454SIsrael Rukshin static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
61d2d1c454SIsrael Rukshin {
62d2d1c454SIsrael Rukshin struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
63d2d1c454SIsrael Rukshin
64e9f5f44aSChristoph Hellwig if (!bi)
65e9f5f44aSChristoph Hellwig return;
66e9f5f44aSChristoph Hellwig
67e9f5f44aSChristoph Hellwig if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC) {
68d2d1c454SIsrael Rukshin ns->metadata_size = bi->tuple_size;
69e9f5f44aSChristoph Hellwig if (bi->flags & BLK_INTEGRITY_REF_TAG)
70d2d1c454SIsrael Rukshin ns->pi_type = NVME_NS_DPS_PI_TYPE1;
71d2d1c454SIsrael Rukshin else
72e9f5f44aSChristoph Hellwig ns->pi_type = NVME_NS_DPS_PI_TYPE3;
73e9f5f44aSChristoph Hellwig } else {
74d2d1c454SIsrael Rukshin ns->metadata_size = 0;
75d2d1c454SIsrael Rukshin }
76d2d1c454SIsrael Rukshin }
77d2d1c454SIsrael Rukshin
nvmet_bdev_ns_enable(struct nvmet_ns * ns)78d5eff33eSChaitanya Kulkarni int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
79d5eff33eSChaitanya Kulkarni {
80d5eff33eSChaitanya Kulkarni int ret;
81d5eff33eSChaitanya Kulkarni
826f6d604bSChaitanya Kulkarni /*
836f6d604bSChaitanya Kulkarni * When buffered_io namespace attribute is enabled that means user want
846f6d604bSChaitanya Kulkarni * this block device to be used as a file, so block device can take
856f6d604bSChaitanya Kulkarni * an advantage of cache.
866f6d604bSChaitanya Kulkarni */
876f6d604bSChaitanya Kulkarni if (ns->buffered_io)
886f6d604bSChaitanya Kulkarni return -ENOTBLK;
896f6d604bSChaitanya Kulkarni
90e9a7254bSChristian Brauner ns->bdev_file = bdev_file_open_by_path(ns->device_path,
9105bdb996SChristoph Hellwig BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL);
92e9a7254bSChristian Brauner if (IS_ERR(ns->bdev_file)) {
93e9a7254bSChristian Brauner ret = PTR_ERR(ns->bdev_file);
94d5eff33eSChaitanya Kulkarni if (ret != -ENOTBLK) {
952a4936e9SJan Kara pr_err("failed to open block device %s: (%d)\n",
962a4936e9SJan Kara ns->device_path, ret);
97d5eff33eSChaitanya Kulkarni }
98e9a7254bSChristian Brauner ns->bdev_file = NULL;
99d5eff33eSChaitanya Kulkarni return ret;
100d5eff33eSChaitanya Kulkarni }
101e9a7254bSChristian Brauner ns->bdev = file_bdev(ns->bdev_file);
102c68f3ef7SChristoph Hellwig ns->size = bdev_nr_bytes(ns->bdev);
103d5eff33eSChaitanya Kulkarni ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
104d2d1c454SIsrael Rukshin
105d2d1c454SIsrael Rukshin ns->pi_type = 0;
106d2d1c454SIsrael Rukshin ns->metadata_size = 0;
107e9f5f44aSChristoph Hellwig if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
108d2d1c454SIsrael Rukshin nvmet_bdev_ns_enable_integrity(ns);
109d2d1c454SIsrael Rukshin
110aaf2e048SChaitanya Kulkarni if (bdev_is_zoned(ns->bdev)) {
111aaf2e048SChaitanya Kulkarni if (!nvmet_bdev_zns_enable(ns)) {
112aaf2e048SChaitanya Kulkarni nvmet_bdev_ns_disable(ns);
113aaf2e048SChaitanya Kulkarni return -EINVAL;
114aaf2e048SChaitanya Kulkarni }
115aaf2e048SChaitanya Kulkarni ns->csi = NVME_CSI_ZNS;
116d5eff33eSChaitanya Kulkarni }
117d5eff33eSChaitanya Kulkarni
118aaf2e048SChaitanya Kulkarni return 0;
119d5eff33eSChaitanya Kulkarni }
120d5eff33eSChaitanya Kulkarni
nvmet_bdev_ns_revalidate(struct nvmet_ns * ns)121e8cd1ff1SAnthony Iliopoulos void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
122e8cd1ff1SAnthony Iliopoulos {
123c68f3ef7SChristoph Hellwig ns->size = bdev_nr_bytes(ns->bdev);
124e8cd1ff1SAnthony Iliopoulos }
125e8cd1ff1SAnthony Iliopoulos
blk_to_nvme_status(struct nvmet_req * req,blk_status_t blk_sts)126aaf2e048SChaitanya Kulkarni u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
1273b031d15SChaitanya Kulkarni {
1283b031d15SChaitanya Kulkarni u16 status = NVME_SC_SUCCESS;
1293b031d15SChaitanya Kulkarni
1303b031d15SChaitanya Kulkarni if (likely(blk_sts == BLK_STS_OK))
1313b031d15SChaitanya Kulkarni return status;
1323b031d15SChaitanya Kulkarni /*
1333b031d15SChaitanya Kulkarni * Right now there exists M : 1 mapping between block layer error
1343b031d15SChaitanya Kulkarni * to the NVMe status code (see nvme_error_status()). For consistency,
1353b031d15SChaitanya Kulkarni * when we reverse map we use most appropriate NVMe Status code from
1363b031d15SChaitanya Kulkarni * the group of the NVMe staus codes used in the nvme_error_status().
1373b031d15SChaitanya Kulkarni */
1383b031d15SChaitanya Kulkarni switch (blk_sts) {
1393b031d15SChaitanya Kulkarni case BLK_STS_NOSPC:
140*dd0b0a4aSWeiwen Hu status = NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR;
1413b031d15SChaitanya Kulkarni req->error_loc = offsetof(struct nvme_rw_command, length);
1423b031d15SChaitanya Kulkarni break;
1433b031d15SChaitanya Kulkarni case BLK_STS_TARGET:
144*dd0b0a4aSWeiwen Hu status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
1453b031d15SChaitanya Kulkarni req->error_loc = offsetof(struct nvme_rw_command, slba);
1463b031d15SChaitanya Kulkarni break;
1473b031d15SChaitanya Kulkarni case BLK_STS_NOTSUPP:
1483b031d15SChaitanya Kulkarni req->error_loc = offsetof(struct nvme_common_command, opcode);
1493b031d15SChaitanya Kulkarni switch (req->cmd->common.opcode) {
1503b031d15SChaitanya Kulkarni case nvme_cmd_dsm:
1513b031d15SChaitanya Kulkarni case nvme_cmd_write_zeroes:
152*dd0b0a4aSWeiwen Hu status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR;
1533b031d15SChaitanya Kulkarni break;
1543b031d15SChaitanya Kulkarni default:
155*dd0b0a4aSWeiwen Hu status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
1563b031d15SChaitanya Kulkarni }
1573b031d15SChaitanya Kulkarni break;
1583b031d15SChaitanya Kulkarni case BLK_STS_MEDIUM:
1593b031d15SChaitanya Kulkarni status = NVME_SC_ACCESS_DENIED;
1603b031d15SChaitanya Kulkarni req->error_loc = offsetof(struct nvme_rw_command, nsid);
1613b031d15SChaitanya Kulkarni break;
1623b031d15SChaitanya Kulkarni case BLK_STS_IOERR:
1633b031d15SChaitanya Kulkarni default:
164*dd0b0a4aSWeiwen Hu status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1653b031d15SChaitanya Kulkarni req->error_loc = offsetof(struct nvme_common_command, opcode);
1663b031d15SChaitanya Kulkarni }
1673b031d15SChaitanya Kulkarni
1683b031d15SChaitanya Kulkarni switch (req->cmd->common.opcode) {
1693b031d15SChaitanya Kulkarni case nvme_cmd_read:
1703b031d15SChaitanya Kulkarni case nvme_cmd_write:
1713b031d15SChaitanya Kulkarni req->error_slba = le64_to_cpu(req->cmd->rw.slba);
1723b031d15SChaitanya Kulkarni break;
1733b031d15SChaitanya Kulkarni case nvme_cmd_write_zeroes:
1743b031d15SChaitanya Kulkarni req->error_slba =
1753b031d15SChaitanya Kulkarni le64_to_cpu(req->cmd->write_zeroes.slba);
1763b031d15SChaitanya Kulkarni break;
1773b031d15SChaitanya Kulkarni default:
1783b031d15SChaitanya Kulkarni req->error_slba = 0;
1793b031d15SChaitanya Kulkarni }
1803b031d15SChaitanya Kulkarni return status;
1813b031d15SChaitanya Kulkarni }
1823b031d15SChaitanya Kulkarni
nvmet_bio_done(struct bio * bio)183d5eff33eSChaitanya Kulkarni static void nvmet_bio_done(struct bio *bio)
184d5eff33eSChaitanya Kulkarni {
185d5eff33eSChaitanya Kulkarni struct nvmet_req *req = bio->bi_private;
186d5eff33eSChaitanya Kulkarni
1873b031d15SChaitanya Kulkarni nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
1889a01b58cSChaitanya Kulkarni nvmet_req_bio_put(req, bio);
189d5eff33eSChaitanya Kulkarni }
190d5eff33eSChaitanya Kulkarni
191c6e3f133SIsrael Rukshin #ifdef CONFIG_BLK_DEV_INTEGRITY
nvmet_bdev_alloc_bip(struct nvmet_req * req,struct bio * bio,struct sg_mapping_iter * miter)192c6e3f133SIsrael Rukshin static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
193c6e3f133SIsrael Rukshin struct sg_mapping_iter *miter)
194c6e3f133SIsrael Rukshin {
195c6e3f133SIsrael Rukshin struct blk_integrity *bi;
196c6e3f133SIsrael Rukshin struct bio_integrity_payload *bip;
197c6e3f133SIsrael Rukshin int rc;
198c6e3f133SIsrael Rukshin size_t resid, len;
199c6e3f133SIsrael Rukshin
200346ac785SChaitanya Kulkarni bi = bdev_get_integrity(req->ns->bdev);
201c6e3f133SIsrael Rukshin if (unlikely(!bi)) {
202c6e3f133SIsrael Rukshin pr_err("Unable to locate bio_integrity\n");
203c6e3f133SIsrael Rukshin return -ENODEV;
204c6e3f133SIsrael Rukshin }
205c6e3f133SIsrael Rukshin
206c6e3f133SIsrael Rukshin bip = bio_integrity_alloc(bio, GFP_NOIO,
2075f7136dbSMatthew Wilcox (Oracle) bio_max_segs(req->metadata_sg_cnt));
208c6e3f133SIsrael Rukshin if (IS_ERR(bip)) {
209c6e3f133SIsrael Rukshin pr_err("Unable to allocate bio_integrity_payload\n");
210c6e3f133SIsrael Rukshin return PTR_ERR(bip);
211c6e3f133SIsrael Rukshin }
212c6e3f133SIsrael Rukshin
213c6e3f133SIsrael Rukshin /* virtual start sector must be in integrity interval units */
214c6e3f133SIsrael Rukshin bip_set_seed(bip, bio->bi_iter.bi_sector >>
215c6e3f133SIsrael Rukshin (bi->interval_exp - SECTOR_SHIFT));
216c6e3f133SIsrael Rukshin
21780814b8eSJinyoung Choi resid = bio_integrity_bytes(bi, bio_sectors(bio));
218c6e3f133SIsrael Rukshin while (resid > 0 && sg_miter_next(miter)) {
219c6e3f133SIsrael Rukshin len = min_t(size_t, miter->length, resid);
220c6e3f133SIsrael Rukshin rc = bio_integrity_add_page(bio, miter->page, len,
221c6e3f133SIsrael Rukshin offset_in_page(miter->addr));
222c6e3f133SIsrael Rukshin if (unlikely(rc != len)) {
223c6e3f133SIsrael Rukshin pr_err("bio_integrity_add_page() failed; %d\n", rc);
224c6e3f133SIsrael Rukshin sg_miter_stop(miter);
225c6e3f133SIsrael Rukshin return -ENOMEM;
226c6e3f133SIsrael Rukshin }
227c6e3f133SIsrael Rukshin
228c6e3f133SIsrael Rukshin resid -= len;
229c6e3f133SIsrael Rukshin if (len < miter->length)
230c6e3f133SIsrael Rukshin miter->consumed -= miter->length - len;
231c6e3f133SIsrael Rukshin }
232c6e3f133SIsrael Rukshin sg_miter_stop(miter);
233c6e3f133SIsrael Rukshin
234c6e3f133SIsrael Rukshin return 0;
235c6e3f133SIsrael Rukshin }
236c6e3f133SIsrael Rukshin #else
nvmet_bdev_alloc_bip(struct nvmet_req * req,struct bio * bio,struct sg_mapping_iter * miter)237c6e3f133SIsrael Rukshin static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
238c6e3f133SIsrael Rukshin struct sg_mapping_iter *miter)
239c6e3f133SIsrael Rukshin {
240c6e3f133SIsrael Rukshin return -EINVAL;
241c6e3f133SIsrael Rukshin }
242c6e3f133SIsrael Rukshin #endif /* CONFIG_BLK_DEV_INTEGRITY */
243c6e3f133SIsrael Rukshin
nvmet_bdev_execute_rw(struct nvmet_req * req)244d5eff33eSChaitanya Kulkarni static void nvmet_bdev_execute_rw(struct nvmet_req *req)
245d5eff33eSChaitanya Kulkarni {
2465f7136dbSMatthew Wilcox (Oracle) unsigned int sg_cnt = req->sg_cnt;
24773383adfSSagi Grimberg struct bio *bio;
248d5eff33eSChaitanya Kulkarni struct scatterlist *sg;
2499dea0c81SChristoph Hellwig struct blk_plug plug;
250d5eff33eSChaitanya Kulkarni sector_t sector;
251a288000fSBart Van Assche blk_opf_t opf;
252a288000fSBart Van Assche int i, rc;
253c6e3f133SIsrael Rukshin struct sg_mapping_iter prot_miter;
254c6e3f133SIsrael Rukshin unsigned int iter_flags;
255c6e3f133SIsrael Rukshin unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len;
256d5eff33eSChaitanya Kulkarni
257c6e3f133SIsrael Rukshin if (!nvmet_check_transfer_len(req, total_len))
258e9061c39SChristoph Hellwig return;
259e9061c39SChristoph Hellwig
260d5eff33eSChaitanya Kulkarni if (!req->sg_cnt) {
261d5eff33eSChaitanya Kulkarni nvmet_req_complete(req, 0);
262d5eff33eSChaitanya Kulkarni return;
263d5eff33eSChaitanya Kulkarni }
264d5eff33eSChaitanya Kulkarni
265d5eff33eSChaitanya Kulkarni if (req->cmd->rw.opcode == nvme_cmd_write) {
266a288000fSBart Van Assche opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
267d5eff33eSChaitanya Kulkarni if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
268a288000fSBart Van Assche opf |= REQ_FUA;
269c6e3f133SIsrael Rukshin iter_flags = SG_MITER_TO_SG;
270d5eff33eSChaitanya Kulkarni } else {
271a288000fSBart Van Assche opf = REQ_OP_READ;
272c6e3f133SIsrael Rukshin iter_flags = SG_MITER_FROM_SG;
273d5eff33eSChaitanya Kulkarni }
274d5eff33eSChaitanya Kulkarni
275c6925093SLogan Gunthorpe if (is_pci_p2pdma_page(sg_page(req->sg)))
276a288000fSBart Van Assche opf |= REQ_NOMERGE;
277c6925093SLogan Gunthorpe
278193fcf37SChaitanya Kulkarni sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
279d5eff33eSChaitanya Kulkarni
280608a9690SChaitanya Kulkarni if (nvmet_use_inline_bvec(req)) {
28173383adfSSagi Grimberg bio = &req->b.inline_bio;
28249add496SChristoph Hellwig bio_init(bio, req->ns->bdev, req->inline_bvec,
283a288000fSBart Van Assche ARRAY_SIZE(req->inline_bvec), opf);
28407888c66SChristoph Hellwig } else {
285a288000fSBart Van Assche bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), opf,
28607888c66SChristoph Hellwig GFP_KERNEL);
28707888c66SChristoph Hellwig }
288d5eff33eSChaitanya Kulkarni bio->bi_iter.bi_sector = sector;
289d5eff33eSChaitanya Kulkarni bio->bi_private = req;
290d5eff33eSChaitanya Kulkarni bio->bi_end_io = nvmet_bio_done;
291d5eff33eSChaitanya Kulkarni
2929dea0c81SChristoph Hellwig blk_start_plug(&plug);
293c6e3f133SIsrael Rukshin if (req->metadata_len)
294c6e3f133SIsrael Rukshin sg_miter_start(&prot_miter, req->metadata_sg,
295c6e3f133SIsrael Rukshin req->metadata_sg_cnt, iter_flags);
296c6e3f133SIsrael Rukshin
297d5eff33eSChaitanya Kulkarni for_each_sg(req->sg, sg, req->sg_cnt, i) {
298d5eff33eSChaitanya Kulkarni while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
299d5eff33eSChaitanya Kulkarni != sg->length) {
300d5eff33eSChaitanya Kulkarni struct bio *prev = bio;
301d5eff33eSChaitanya Kulkarni
302c6e3f133SIsrael Rukshin if (req->metadata_len) {
303c6e3f133SIsrael Rukshin rc = nvmet_bdev_alloc_bip(req, bio,
304c6e3f133SIsrael Rukshin &prot_miter);
305c6e3f133SIsrael Rukshin if (unlikely(rc)) {
306c6e3f133SIsrael Rukshin bio_io_error(bio);
307c6e3f133SIsrael Rukshin return;
308c6e3f133SIsrael Rukshin }
309c6e3f133SIsrael Rukshin }
310c6e3f133SIsrael Rukshin
31107888c66SChristoph Hellwig bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt),
312a288000fSBart Van Assche opf, GFP_KERNEL);
313d5eff33eSChaitanya Kulkarni bio->bi_iter.bi_sector = sector;
314d5eff33eSChaitanya Kulkarni
315d5eff33eSChaitanya Kulkarni bio_chain(bio, prev);
316d5eff33eSChaitanya Kulkarni submit_bio(prev);
317d5eff33eSChaitanya Kulkarni }
318d5eff33eSChaitanya Kulkarni
319d5eff33eSChaitanya Kulkarni sector += sg->length >> 9;
320d5eff33eSChaitanya Kulkarni sg_cnt--;
321d5eff33eSChaitanya Kulkarni }
322d5eff33eSChaitanya Kulkarni
323c6e3f133SIsrael Rukshin if (req->metadata_len) {
324c6e3f133SIsrael Rukshin rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter);
325c6e3f133SIsrael Rukshin if (unlikely(rc)) {
326c6e3f133SIsrael Rukshin bio_io_error(bio);
327c6e3f133SIsrael Rukshin return;
328c6e3f133SIsrael Rukshin }
329c6e3f133SIsrael Rukshin }
330c6e3f133SIsrael Rukshin
33116d3a280SSagi Grimberg submit_bio(bio);
3329dea0c81SChristoph Hellwig blk_finish_plug(&plug);
333d5eff33eSChaitanya Kulkarni }
334d5eff33eSChaitanya Kulkarni
nvmet_bdev_execute_flush(struct nvmet_req * req)335d5eff33eSChaitanya Kulkarni static void nvmet_bdev_execute_flush(struct nvmet_req *req)
336d5eff33eSChaitanya Kulkarni {
337d5eff33eSChaitanya Kulkarni struct bio *bio = &req->b.inline_bio;
338d5eff33eSChaitanya Kulkarni
339d4168007SGuixin Liu if (!bdev_write_cache(req->ns->bdev)) {
340d4168007SGuixin Liu nvmet_req_complete(req, NVME_SC_SUCCESS);
341d4168007SGuixin Liu return;
342d4168007SGuixin Liu }
343d4168007SGuixin Liu
344136cc1ffSIsrael Rukshin if (!nvmet_check_transfer_len(req, 0))
345e9061c39SChristoph Hellwig return;
346e9061c39SChristoph Hellwig
34749add496SChristoph Hellwig bio_init(bio, req->ns->bdev, req->inline_bvec,
34849add496SChristoph Hellwig ARRAY_SIZE(req->inline_bvec), REQ_OP_WRITE | REQ_PREFLUSH);
349d5eff33eSChaitanya Kulkarni bio->bi_private = req;
350d5eff33eSChaitanya Kulkarni bio->bi_end_io = nvmet_bio_done;
351d5eff33eSChaitanya Kulkarni
352d5eff33eSChaitanya Kulkarni submit_bio(bio);
353d5eff33eSChaitanya Kulkarni }
354d5eff33eSChaitanya Kulkarni
nvmet_bdev_flush(struct nvmet_req * req)355dedf0be5SChaitanya Kulkarni u16 nvmet_bdev_flush(struct nvmet_req *req)
356dedf0be5SChaitanya Kulkarni {
357d4168007SGuixin Liu if (!bdev_write_cache(req->ns->bdev))
358d4168007SGuixin Liu return 0;
359d4168007SGuixin Liu
360c6bf3f0eSChristoph Hellwig if (blkdev_issue_flush(req->ns->bdev))
361*dd0b0a4aSWeiwen Hu return NVME_SC_INTERNAL | NVME_STATUS_DNR;
362dedf0be5SChaitanya Kulkarni return 0;
363dedf0be5SChaitanya Kulkarni }
364dedf0be5SChaitanya Kulkarni
nvmet_bdev_discard_range(struct nvmet_req * req,struct nvme_dsm_range * range,struct bio ** bio)3653b031d15SChaitanya Kulkarni static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
366d5eff33eSChaitanya Kulkarni struct nvme_dsm_range *range, struct bio **bio)
367d5eff33eSChaitanya Kulkarni {
3683b031d15SChaitanya Kulkarni struct nvmet_ns *ns = req->ns;
369d5eff33eSChaitanya Kulkarni int ret;
370d5eff33eSChaitanya Kulkarni
371d5eff33eSChaitanya Kulkarni ret = __blkdev_issue_discard(ns->bdev,
372193fcf37SChaitanya Kulkarni nvmet_lba_to_sect(ns, range->slba),
373d5eff33eSChaitanya Kulkarni le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
37444abff2cSChristoph Hellwig GFP_KERNEL, bio);
375005c674fSChristoph Hellwig if (ret && ret != -EOPNOTSUPP) {
3763b031d15SChaitanya Kulkarni req->error_slba = le64_to_cpu(range->slba);
377cfe03c2eSChristoph Hellwig return errno_to_nvme_status(req, ret);
378d5eff33eSChaitanya Kulkarni }
379005c674fSChristoph Hellwig return NVME_SC_SUCCESS;
380005c674fSChristoph Hellwig }
381d5eff33eSChaitanya Kulkarni
nvmet_bdev_execute_discard(struct nvmet_req * req)382d5eff33eSChaitanya Kulkarni static void nvmet_bdev_execute_discard(struct nvmet_req *req)
383d5eff33eSChaitanya Kulkarni {
384d5eff33eSChaitanya Kulkarni struct nvme_dsm_range range;
385d5eff33eSChaitanya Kulkarni struct bio *bio = NULL;
386d5eff33eSChaitanya Kulkarni int i;
387d5eff33eSChaitanya Kulkarni u16 status;
388d5eff33eSChaitanya Kulkarni
389d5eff33eSChaitanya Kulkarni for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
390d5eff33eSChaitanya Kulkarni status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
391d5eff33eSChaitanya Kulkarni sizeof(range));
392d5eff33eSChaitanya Kulkarni if (status)
393d5eff33eSChaitanya Kulkarni break;
394d5eff33eSChaitanya Kulkarni
3953b031d15SChaitanya Kulkarni status = nvmet_bdev_discard_range(req, &range, &bio);
396d5eff33eSChaitanya Kulkarni if (status)
397d5eff33eSChaitanya Kulkarni break;
398d5eff33eSChaitanya Kulkarni }
399d5eff33eSChaitanya Kulkarni
400d5eff33eSChaitanya Kulkarni if (bio) {
401d5eff33eSChaitanya Kulkarni bio->bi_private = req;
402d5eff33eSChaitanya Kulkarni bio->bi_end_io = nvmet_bio_done;
4034d764bb9SIsrael Rukshin if (status)
4044d764bb9SIsrael Rukshin bio_io_error(bio);
4054d764bb9SIsrael Rukshin else
406d5eff33eSChaitanya Kulkarni submit_bio(bio);
407d5eff33eSChaitanya Kulkarni } else {
408d5eff33eSChaitanya Kulkarni nvmet_req_complete(req, status);
409d5eff33eSChaitanya Kulkarni }
410d5eff33eSChaitanya Kulkarni }
411d5eff33eSChaitanya Kulkarni
nvmet_bdev_execute_dsm(struct nvmet_req * req)412d5eff33eSChaitanya Kulkarni static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
413d5eff33eSChaitanya Kulkarni {
414b716e688SSagi Grimberg if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
415e9061c39SChristoph Hellwig return;
416e9061c39SChristoph Hellwig
417d5eff33eSChaitanya Kulkarni switch (le32_to_cpu(req->cmd->dsm.attributes)) {
418d5eff33eSChaitanya Kulkarni case NVME_DSMGMT_AD:
419d5eff33eSChaitanya Kulkarni nvmet_bdev_execute_discard(req);
420d5eff33eSChaitanya Kulkarni return;
421d5eff33eSChaitanya Kulkarni case NVME_DSMGMT_IDR:
422d5eff33eSChaitanya Kulkarni case NVME_DSMGMT_IDW:
423d5eff33eSChaitanya Kulkarni default:
424d5eff33eSChaitanya Kulkarni /* Not supported yet */
425d5eff33eSChaitanya Kulkarni nvmet_req_complete(req, 0);
426d5eff33eSChaitanya Kulkarni return;
427d5eff33eSChaitanya Kulkarni }
428d5eff33eSChaitanya Kulkarni }
429d5eff33eSChaitanya Kulkarni
nvmet_bdev_execute_write_zeroes(struct nvmet_req * req)430d5eff33eSChaitanya Kulkarni static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
431d5eff33eSChaitanya Kulkarni {
432d5eff33eSChaitanya Kulkarni struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
433d5eff33eSChaitanya Kulkarni struct bio *bio = NULL;
434d5eff33eSChaitanya Kulkarni sector_t sector;
435d5eff33eSChaitanya Kulkarni sector_t nr_sector;
4363b031d15SChaitanya Kulkarni int ret;
437d5eff33eSChaitanya Kulkarni
438136cc1ffSIsrael Rukshin if (!nvmet_check_transfer_len(req, 0))
439e9061c39SChristoph Hellwig return;
440e9061c39SChristoph Hellwig
441193fcf37SChaitanya Kulkarni sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba);
442d5eff33eSChaitanya Kulkarni nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
443d5eff33eSChaitanya Kulkarni (req->ns->blksize_shift - 9));
444d5eff33eSChaitanya Kulkarni
4453b031d15SChaitanya Kulkarni ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
4463b031d15SChaitanya Kulkarni GFP_KERNEL, &bio, 0);
447d5eff33eSChaitanya Kulkarni if (bio) {
448d5eff33eSChaitanya Kulkarni bio->bi_private = req;
449d5eff33eSChaitanya Kulkarni bio->bi_end_io = nvmet_bio_done;
450d5eff33eSChaitanya Kulkarni submit_bio(bio);
451d5eff33eSChaitanya Kulkarni } else {
452cfe03c2eSChristoph Hellwig nvmet_req_complete(req, errno_to_nvme_status(req, ret));
453d5eff33eSChaitanya Kulkarni }
454d5eff33eSChaitanya Kulkarni }
455d5eff33eSChaitanya Kulkarni
nvmet_bdev_parse_io_cmd(struct nvmet_req * req)456d5eff33eSChaitanya Kulkarni u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
457d5eff33eSChaitanya Kulkarni {
45846eca470SChaitanya Kulkarni switch (req->cmd->common.opcode) {
459d5eff33eSChaitanya Kulkarni case nvme_cmd_read:
460d5eff33eSChaitanya Kulkarni case nvme_cmd_write:
461d5eff33eSChaitanya Kulkarni req->execute = nvmet_bdev_execute_rw;
462c6e3f133SIsrael Rukshin if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns))
463c6e3f133SIsrael Rukshin req->metadata_len = nvmet_rw_metadata_len(req);
464d5eff33eSChaitanya Kulkarni return 0;
465d5eff33eSChaitanya Kulkarni case nvme_cmd_flush:
466d5eff33eSChaitanya Kulkarni req->execute = nvmet_bdev_execute_flush;
467d5eff33eSChaitanya Kulkarni return 0;
468d5eff33eSChaitanya Kulkarni case nvme_cmd_dsm:
469d5eff33eSChaitanya Kulkarni req->execute = nvmet_bdev_execute_dsm;
470d5eff33eSChaitanya Kulkarni return 0;
471d5eff33eSChaitanya Kulkarni case nvme_cmd_write_zeroes:
472d5eff33eSChaitanya Kulkarni req->execute = nvmet_bdev_execute_write_zeroes;
473d5eff33eSChaitanya Kulkarni return 0;
474d5eff33eSChaitanya Kulkarni default:
475d81d57cfSChaitanya Kulkarni return nvmet_report_invalid_opcode(req);
476d5eff33eSChaitanya Kulkarni }
477d5eff33eSChaitanya Kulkarni }
478