xref: /linux/drivers/nvme/target/io-cmd-file.c (revision bc537a9cc47eec7f4e32b8164c494ddc35dca8ac)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe Over Fabrics Target File I/O commands implementation.
4  * Copyright (c) 2017-2018 Western Digital Corporation or its
5  * affiliates.
6  */
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/uio.h>
9 #include <linux/falloc.h>
10 #include <linux/file.h>
11 #include "nvmet.h"
12 
13 #define NVMET_MAX_MPOOL_BVEC		16
14 #define NVMET_MIN_MPOOL_OBJ		16
15 
16 void nvmet_file_ns_disable(struct nvmet_ns *ns)
17 {
18 	if (ns->file) {
19 		if (ns->buffered_io)
20 			flush_workqueue(buffered_io_wq);
21 		mempool_destroy(ns->bvec_pool);
22 		ns->bvec_pool = NULL;
23 		kmem_cache_destroy(ns->bvec_cache);
24 		ns->bvec_cache = NULL;
25 		fput(ns->file);
26 		ns->file = NULL;
27 	}
28 }
29 
30 int nvmet_file_ns_enable(struct nvmet_ns *ns)
31 {
32 	int flags = O_RDWR | O_LARGEFILE;
33 	struct kstat stat;
34 	int ret;
35 
36 	if (!ns->buffered_io)
37 		flags |= O_DIRECT;
38 
39 	ns->file = filp_open(ns->device_path, flags, 0);
40 	if (IS_ERR(ns->file)) {
41 		pr_err("failed to open file %s: (%ld)\n",
42 				ns->device_path, PTR_ERR(ns->file));
43 		return PTR_ERR(ns->file);
44 	}
45 
46 	ret = vfs_getattr(&ns->file->f_path,
47 			&stat, STATX_SIZE, AT_STATX_FORCE_SYNC);
48 	if (ret)
49 		goto err;
50 
51 	ns->size = stat.size;
52 	ns->blksize_shift = file_inode(ns->file)->i_blkbits;
53 
54 	ns->bvec_cache = kmem_cache_create("nvmet-bvec",
55 			NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
56 			0, SLAB_HWCACHE_ALIGN, NULL);
57 	if (!ns->bvec_cache) {
58 		ret = -ENOMEM;
59 		goto err;
60 	}
61 
62 	ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab,
63 			mempool_free_slab, ns->bvec_cache);
64 
65 	if (!ns->bvec_pool) {
66 		ret = -ENOMEM;
67 		goto err;
68 	}
69 
70 	return ret;
71 err:
72 	ns->size = 0;
73 	ns->blksize_shift = 0;
74 	nvmet_file_ns_disable(ns);
75 	return ret;
76 }
77 
78 static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter)
79 {
80 	bv->bv_page = sg_page_iter_page(iter);
81 	bv->bv_offset = iter->sg->offset;
82 	bv->bv_len = PAGE_SIZE - iter->sg->offset;
83 }
84 
85 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
86 		unsigned long nr_segs, size_t count)
87 {
88 	struct kiocb *iocb = &req->f.iocb;
89 	ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter);
90 	struct iov_iter iter;
91 	int ki_flags = 0, rw;
92 	ssize_t ret;
93 
94 	if (req->cmd->rw.opcode == nvme_cmd_write) {
95 		if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
96 			ki_flags = IOCB_DSYNC;
97 		call_iter = req->ns->file->f_op->write_iter;
98 		rw = WRITE;
99 	} else {
100 		call_iter = req->ns->file->f_op->read_iter;
101 		rw = READ;
102 	}
103 
104 	iov_iter_bvec(&iter, ITER_BVEC | rw, req->f.bvec, nr_segs, count);
105 
106 	iocb->ki_pos = pos;
107 	iocb->ki_filp = req->ns->file;
108 	iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
109 
110 	ret = call_iter(iocb, &iter);
111 
112 	if (ret != -EIOCBQUEUED && iocb->ki_complete)
113 		iocb->ki_complete(iocb, ret, 0);
114 
115 	return ret;
116 }
117 
118 static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
119 {
120 	struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
121 
122 	if (req->f.bvec != req->inline_bvec) {
123 		if (likely(req->f.mpool_alloc == false))
124 			kfree(req->f.bvec);
125 		else
126 			mempool_free(req->f.bvec, req->ns->bvec_pool);
127 	}
128 
129 	nvmet_req_complete(req, ret != req->data_len ?
130 			NVME_SC_INTERNAL | NVME_SC_DNR : 0);
131 }
132 
133 static void nvmet_file_execute_rw(struct nvmet_req *req)
134 {
135 	ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
136 	struct sg_page_iter sg_pg_iter;
137 	unsigned long bv_cnt = 0;
138 	bool is_sync = false;
139 	size_t len = 0, total_len = 0;
140 	ssize_t ret = 0;
141 	loff_t pos;
142 
143 	if (!req->sg_cnt || !nr_bvec) {
144 		nvmet_req_complete(req, 0);
145 		return;
146 	}
147 
148 	pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
149 	if (unlikely(pos + req->data_len > req->ns->size)) {
150 		nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
151 		return;
152 	}
153 
154 	if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
155 		req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
156 				GFP_KERNEL);
157 	else
158 		req->f.bvec = req->inline_bvec;
159 
160 	req->f.mpool_alloc = false;
161 	if (unlikely(!req->f.bvec)) {
162 		/* fallback under memory pressure */
163 		req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
164 		req->f.mpool_alloc = true;
165 		if (nr_bvec > NVMET_MAX_MPOOL_BVEC)
166 			is_sync = true;
167 	}
168 
169 	memset(&req->f.iocb, 0, sizeof(struct kiocb));
170 	for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
171 		nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
172 		len += req->f.bvec[bv_cnt].bv_len;
173 		total_len += req->f.bvec[bv_cnt].bv_len;
174 		bv_cnt++;
175 
176 		WARN_ON_ONCE((nr_bvec - 1) < 0);
177 
178 		if (unlikely(is_sync) &&
179 		    (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) {
180 			ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len);
181 			if (ret < 0)
182 				goto out;
183 			pos += len;
184 			bv_cnt = 0;
185 			len = 0;
186 		}
187 		nr_bvec--;
188 	}
189 
190 	if (WARN_ON_ONCE(total_len != req->data_len))
191 		ret = -EIO;
192 out:
193 	if (unlikely(is_sync || ret)) {
194 		nvmet_file_io_done(&req->f.iocb, ret < 0 ? ret : total_len, 0);
195 		return;
196 	}
197 	req->f.iocb.ki_complete = nvmet_file_io_done;
198 	nvmet_file_submit_bvec(req, pos, bv_cnt, total_len);
199 }
200 
201 static void nvmet_file_buffered_io_work(struct work_struct *w)
202 {
203 	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
204 
205 	nvmet_file_execute_rw(req);
206 }
207 
208 static void nvmet_file_execute_rw_buffered_io(struct nvmet_req *req)
209 {
210 	INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
211 	queue_work(buffered_io_wq, &req->f.work);
212 }
213 
214 u16 nvmet_file_flush(struct nvmet_req *req)
215 {
216 	if (vfs_fsync(req->ns->file, 1) < 0)
217 		return NVME_SC_INTERNAL | NVME_SC_DNR;
218 	return 0;
219 }
220 
221 static void nvmet_file_flush_work(struct work_struct *w)
222 {
223 	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
224 
225 	nvmet_req_complete(req, nvmet_file_flush(req));
226 }
227 
228 static void nvmet_file_execute_flush(struct nvmet_req *req)
229 {
230 	INIT_WORK(&req->f.work, nvmet_file_flush_work);
231 	schedule_work(&req->f.work);
232 }
233 
234 static void nvmet_file_execute_discard(struct nvmet_req *req)
235 {
236 	int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
237 	struct nvme_dsm_range range;
238 	loff_t offset, len;
239 	u16 ret;
240 	int i;
241 
242 	for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
243 		ret = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
244 					sizeof(range));
245 		if (ret)
246 			break;
247 
248 		offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
249 		len = le32_to_cpu(range.nlb) << req->ns->blksize_shift;
250 		if (offset + len > req->ns->size) {
251 			ret = NVME_SC_LBA_RANGE | NVME_SC_DNR;
252 			break;
253 		}
254 
255 		if (vfs_fallocate(req->ns->file, mode, offset, len)) {
256 			ret = NVME_SC_INTERNAL | NVME_SC_DNR;
257 			break;
258 		}
259 	}
260 
261 	nvmet_req_complete(req, ret);
262 }
263 
264 static void nvmet_file_dsm_work(struct work_struct *w)
265 {
266 	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
267 
268 	switch (le32_to_cpu(req->cmd->dsm.attributes)) {
269 	case NVME_DSMGMT_AD:
270 		nvmet_file_execute_discard(req);
271 		return;
272 	case NVME_DSMGMT_IDR:
273 	case NVME_DSMGMT_IDW:
274 	default:
275 		/* Not supported yet */
276 		nvmet_req_complete(req, 0);
277 		return;
278 	}
279 }
280 
281 static void nvmet_file_execute_dsm(struct nvmet_req *req)
282 {
283 	INIT_WORK(&req->f.work, nvmet_file_dsm_work);
284 	schedule_work(&req->f.work);
285 }
286 
287 static void nvmet_file_write_zeroes_work(struct work_struct *w)
288 {
289 	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
290 	struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
291 	int mode = FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE;
292 	loff_t offset;
293 	loff_t len;
294 	int ret;
295 
296 	offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift;
297 	len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
298 			req->ns->blksize_shift);
299 
300 	if (unlikely(offset + len > req->ns->size)) {
301 		nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
302 		return;
303 	}
304 
305 	ret = vfs_fallocate(req->ns->file, mode, offset, len);
306 	nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
307 }
308 
309 static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
310 {
311 	INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
312 	schedule_work(&req->f.work);
313 }
314 
315 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
316 {
317 	struct nvme_command *cmd = req->cmd;
318 
319 	switch (cmd->common.opcode) {
320 	case nvme_cmd_read:
321 	case nvme_cmd_write:
322 		if (req->ns->buffered_io)
323 			req->execute = nvmet_file_execute_rw_buffered_io;
324 		else
325 			req->execute = nvmet_file_execute_rw;
326 		req->data_len = nvmet_rw_len(req);
327 		return 0;
328 	case nvme_cmd_flush:
329 		req->execute = nvmet_file_execute_flush;
330 		req->data_len = 0;
331 		return 0;
332 	case nvme_cmd_dsm:
333 		req->execute = nvmet_file_execute_dsm;
334 		req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
335 			sizeof(struct nvme_dsm_range);
336 		return 0;
337 	case nvme_cmd_write_zeroes:
338 		req->execute = nvmet_file_execute_write_zeroes;
339 		req->data_len = 0;
340 		return 0;
341 	default:
342 		pr_err("unhandled cmd for file ns %d on qid %d\n",
343 				cmd->common.opcode, req->sq->qid);
344 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
345 	}
346 }
347