1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe Over Fabrics Target File I/O commands implementation. 4 * Copyright (c) 2017-2018 Western Digital Corporation or its 5 * affiliates. 6 */ 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 #include <linux/uio.h> 9 #include <linux/falloc.h> 10 #include <linux/file.h> 11 #include "nvmet.h" 12 13 #define NVMET_MAX_MPOOL_BVEC 16 14 #define NVMET_MIN_MPOOL_OBJ 16 15 16 void nvmet_file_ns_disable(struct nvmet_ns *ns) 17 { 18 if (ns->file) { 19 mempool_destroy(ns->bvec_pool); 20 ns->bvec_pool = NULL; 21 kmem_cache_destroy(ns->bvec_cache); 22 ns->bvec_cache = NULL; 23 fput(ns->file); 24 ns->file = NULL; 25 } 26 } 27 28 int nvmet_file_ns_enable(struct nvmet_ns *ns) 29 { 30 int ret; 31 struct kstat stat; 32 33 ns->file = filp_open(ns->device_path, 34 O_RDWR | O_LARGEFILE | O_DIRECT, 0); 35 if (IS_ERR(ns->file)) { 36 pr_err("failed to open file %s: (%ld)\n", 37 ns->device_path, PTR_ERR(ns->file)); 38 return PTR_ERR(ns->file); 39 } 40 41 ret = vfs_getattr(&ns->file->f_path, 42 &stat, STATX_SIZE, AT_STATX_FORCE_SYNC); 43 if (ret) 44 goto err; 45 46 ns->size = stat.size; 47 ns->blksize_shift = file_inode(ns->file)->i_blkbits; 48 49 ns->bvec_cache = kmem_cache_create("nvmet-bvec", 50 NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), 51 0, SLAB_HWCACHE_ALIGN, NULL); 52 if (!ns->bvec_cache) { 53 ret = -ENOMEM; 54 goto err; 55 } 56 57 ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab, 58 mempool_free_slab, ns->bvec_cache); 59 60 if (!ns->bvec_pool) { 61 ret = -ENOMEM; 62 goto err; 63 } 64 65 return ret; 66 err: 67 ns->size = 0; 68 ns->blksize_shift = 0; 69 nvmet_file_ns_disable(ns); 70 return ret; 71 } 72 73 static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter) 74 { 75 bv->bv_page = sg_page_iter_page(iter); 76 bv->bv_offset = iter->sg->offset; 77 bv->bv_len = PAGE_SIZE - iter->sg->offset; 78 } 79 80 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, 81 unsigned long nr_segs, size_t count) 82 { 83 struct kiocb *iocb = &req->f.iocb; 84 ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter); 85 struct iov_iter iter; 86 int ki_flags = 0, rw; 87 ssize_t ret; 88 89 if (req->cmd->rw.opcode == nvme_cmd_write) { 90 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) 91 ki_flags = IOCB_DSYNC; 92 call_iter = req->ns->file->f_op->write_iter; 93 rw = WRITE; 94 } else { 95 call_iter = req->ns->file->f_op->read_iter; 96 rw = READ; 97 } 98 99 iov_iter_bvec(&iter, ITER_BVEC | rw, req->f.bvec, nr_segs, count); 100 101 iocb->ki_pos = pos; 102 iocb->ki_filp = req->ns->file; 103 iocb->ki_flags = IOCB_DIRECT | ki_flags; 104 105 ret = call_iter(iocb, &iter); 106 107 if (ret != -EIOCBQUEUED && iocb->ki_complete) 108 iocb->ki_complete(iocb, ret, 0); 109 110 return ret; 111 } 112 113 static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2) 114 { 115 struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb); 116 117 if (req->f.bvec != req->inline_bvec) { 118 if (likely(req->f.mpool_alloc == false)) 119 kfree(req->f.bvec); 120 else 121 mempool_free(req->f.bvec, req->ns->bvec_pool); 122 } 123 124 nvmet_req_complete(req, ret != req->data_len ? 125 NVME_SC_INTERNAL | NVME_SC_DNR : 0); 126 } 127 128 static void nvmet_file_execute_rw(struct nvmet_req *req) 129 { 130 ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE); 131 struct sg_page_iter sg_pg_iter; 132 unsigned long bv_cnt = 0; 133 bool is_sync = false; 134 size_t len = 0, total_len = 0; 135 ssize_t ret = 0; 136 loff_t pos; 137 138 if (!req->sg_cnt || !nr_bvec) { 139 nvmet_req_complete(req, 0); 140 return; 141 } 142 143 if (nr_bvec > NVMET_MAX_INLINE_BIOVEC) 144 req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec), 145 GFP_KERNEL); 146 else 147 req->f.bvec = req->inline_bvec; 148 149 req->f.mpool_alloc = false; 150 if (unlikely(!req->f.bvec)) { 151 /* fallback under memory pressure */ 152 req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL); 153 req->f.mpool_alloc = true; 154 if (nr_bvec > NVMET_MAX_MPOOL_BVEC) 155 is_sync = true; 156 } 157 158 pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift; 159 160 memset(&req->f.iocb, 0, sizeof(struct kiocb)); 161 for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) { 162 nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter); 163 len += req->f.bvec[bv_cnt].bv_len; 164 total_len += req->f.bvec[bv_cnt].bv_len; 165 bv_cnt++; 166 167 WARN_ON_ONCE((nr_bvec - 1) < 0); 168 169 if (unlikely(is_sync) && 170 (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) { 171 ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len); 172 if (ret < 0) 173 goto out; 174 pos += len; 175 bv_cnt = 0; 176 len = 0; 177 } 178 nr_bvec--; 179 } 180 181 if (WARN_ON_ONCE(total_len != req->data_len)) 182 ret = -EIO; 183 out: 184 if (unlikely(is_sync || ret)) { 185 nvmet_file_io_done(&req->f.iocb, ret < 0 ? ret : total_len, 0); 186 return; 187 } 188 req->f.iocb.ki_complete = nvmet_file_io_done; 189 nvmet_file_submit_bvec(req, pos, bv_cnt, total_len); 190 } 191 192 static void nvmet_file_flush_work(struct work_struct *w) 193 { 194 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); 195 int ret; 196 197 ret = vfs_fsync(req->ns->file, 1); 198 199 nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0); 200 } 201 202 static void nvmet_file_execute_flush(struct nvmet_req *req) 203 { 204 INIT_WORK(&req->f.work, nvmet_file_flush_work); 205 schedule_work(&req->f.work); 206 } 207 208 static void nvmet_file_execute_discard(struct nvmet_req *req) 209 { 210 int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; 211 struct nvme_dsm_range range; 212 loff_t offset; 213 loff_t len; 214 int i, ret; 215 216 for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) { 217 if (nvmet_copy_from_sgl(req, i * sizeof(range), &range, 218 sizeof(range))) 219 break; 220 offset = le64_to_cpu(range.slba) << req->ns->blksize_shift; 221 len = le32_to_cpu(range.nlb) << req->ns->blksize_shift; 222 ret = vfs_fallocate(req->ns->file, mode, offset, len); 223 if (ret) 224 break; 225 } 226 227 nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0); 228 } 229 230 static void nvmet_file_dsm_work(struct work_struct *w) 231 { 232 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); 233 234 switch (le32_to_cpu(req->cmd->dsm.attributes)) { 235 case NVME_DSMGMT_AD: 236 nvmet_file_execute_discard(req); 237 return; 238 case NVME_DSMGMT_IDR: 239 case NVME_DSMGMT_IDW: 240 default: 241 /* Not supported yet */ 242 nvmet_req_complete(req, 0); 243 return; 244 } 245 } 246 247 static void nvmet_file_execute_dsm(struct nvmet_req *req) 248 { 249 INIT_WORK(&req->f.work, nvmet_file_dsm_work); 250 schedule_work(&req->f.work); 251 } 252 253 static void nvmet_file_write_zeroes_work(struct work_struct *w) 254 { 255 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); 256 struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes; 257 int mode = FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE; 258 loff_t offset; 259 loff_t len; 260 int ret; 261 262 offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift; 263 len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) << 264 req->ns->blksize_shift); 265 266 ret = vfs_fallocate(req->ns->file, mode, offset, len); 267 nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0); 268 } 269 270 static void nvmet_file_execute_write_zeroes(struct nvmet_req *req) 271 { 272 INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work); 273 schedule_work(&req->f.work); 274 } 275 276 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req) 277 { 278 struct nvme_command *cmd = req->cmd; 279 280 switch (cmd->common.opcode) { 281 case nvme_cmd_read: 282 case nvme_cmd_write: 283 req->execute = nvmet_file_execute_rw; 284 req->data_len = nvmet_rw_len(req); 285 return 0; 286 case nvme_cmd_flush: 287 req->execute = nvmet_file_execute_flush; 288 req->data_len = 0; 289 return 0; 290 case nvme_cmd_dsm: 291 req->execute = nvmet_file_execute_dsm; 292 req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) * 293 sizeof(struct nvme_dsm_range); 294 return 0; 295 case nvme_cmd_write_zeroes: 296 req->execute = nvmet_file_execute_write_zeroes; 297 req->data_len = 0; 298 return 0; 299 default: 300 pr_err("unhandled cmd for file ns %d on qid %d\n", 301 cmd->common.opcode, req->sq->qid); 302 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 303 } 304 } 305