xref: /linux/drivers/target/target_core_file.c (revision 071bf69a0220253a44acb8b2a27f7a262b9a46bf)
1 /*******************************************************************************
2  * Filename:  target_core_file.c
3  *
4  * This file contains the Storage Engine <-> FILEIO transport specific functions
5  *
6  * (c) Copyright 2005-2013 Datera, Inc.
7  *
8  * Nicholas A. Bellinger <nab@kernel.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  ******************************************************************************/
25 
26 #include <linux/string.h>
27 #include <linux/parser.h>
28 #include <linux/timer.h>
29 #include <linux/blkdev.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/module.h>
33 #include <linux/vmalloc.h>
34 #include <linux/falloc.h>
35 #include <scsi/scsi_proto.h>
36 #include <asm/unaligned.h>
37 
38 #include <target/target_core_base.h>
39 #include <target/target_core_backend.h>
40 
41 #include "target_core_file.h"
42 
43 static inline struct fd_dev *FD_DEV(struct se_device *dev)
44 {
45 	return container_of(dev, struct fd_dev, dev);
46 }
47 
48 static int fd_attach_hba(struct se_hba *hba, u32 host_id)
49 {
50 	struct fd_host *fd_host;
51 
52 	fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
53 	if (!fd_host) {
54 		pr_err("Unable to allocate memory for struct fd_host\n");
55 		return -ENOMEM;
56 	}
57 
58 	fd_host->fd_host_id = host_id;
59 
60 	hba->hba_ptr = fd_host;
61 
62 	pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
63 		" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
64 		TARGET_CORE_VERSION);
65 	pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
66 		hba->hba_id, fd_host->fd_host_id);
67 
68 	return 0;
69 }
70 
71 static void fd_detach_hba(struct se_hba *hba)
72 {
73 	struct fd_host *fd_host = hba->hba_ptr;
74 
75 	pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
76 		" Target Core\n", hba->hba_id, fd_host->fd_host_id);
77 
78 	kfree(fd_host);
79 	hba->hba_ptr = NULL;
80 }
81 
82 static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
83 {
84 	struct fd_dev *fd_dev;
85 	struct fd_host *fd_host = hba->hba_ptr;
86 
87 	fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
88 	if (!fd_dev) {
89 		pr_err("Unable to allocate memory for struct fd_dev\n");
90 		return NULL;
91 	}
92 
93 	fd_dev->fd_host = fd_host;
94 
95 	pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
96 
97 	return &fd_dev->dev;
98 }
99 
100 static int fd_configure_device(struct se_device *dev)
101 {
102 	struct fd_dev *fd_dev = FD_DEV(dev);
103 	struct fd_host *fd_host = dev->se_hba->hba_ptr;
104 	struct file *file;
105 	struct inode *inode = NULL;
106 	int flags, ret = -EINVAL;
107 
108 	if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
109 		pr_err("Missing fd_dev_name=\n");
110 		return -EINVAL;
111 	}
112 
113 	/*
114 	 * Use O_DSYNC by default instead of O_SYNC to forgo syncing
115 	 * of pure timestamp updates.
116 	 */
117 	flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
118 
119 	/*
120 	 * Optionally allow fd_buffered_io=1 to be enabled for people
121 	 * who want use the fs buffer cache as an WriteCache mechanism.
122 	 *
123 	 * This means that in event of a hard failure, there is a risk
124 	 * of silent data-loss if the SCSI client has *not* performed a
125 	 * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
126 	 * to write-out the entire device cache.
127 	 */
128 	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
129 		pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
130 		flags &= ~O_DSYNC;
131 	}
132 
133 	file = filp_open(fd_dev->fd_dev_name, flags, 0600);
134 	if (IS_ERR(file)) {
135 		pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name);
136 		ret = PTR_ERR(file);
137 		goto fail;
138 	}
139 	fd_dev->fd_file = file;
140 	/*
141 	 * If using a block backend with this struct file, we extract
142 	 * fd_dev->fd_[block,dev]_size from struct block_device.
143 	 *
144 	 * Otherwise, we use the passed fd_size= from configfs
145 	 */
146 	inode = file->f_mapping->host;
147 	if (S_ISBLK(inode->i_mode)) {
148 		struct request_queue *q = bdev_get_queue(inode->i_bdev);
149 		unsigned long long dev_size;
150 
151 		fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
152 		/*
153 		 * Determine the number of bytes from i_size_read() minus
154 		 * one (1) logical sector from underlying struct block_device
155 		 */
156 		dev_size = (i_size_read(file->f_mapping->host) -
157 				       fd_dev->fd_block_size);
158 
159 		pr_debug("FILEIO: Using size: %llu bytes from struct"
160 			" block_device blocks: %llu logical_block_size: %d\n",
161 			dev_size, div_u64(dev_size, fd_dev->fd_block_size),
162 			fd_dev->fd_block_size);
163 
164 		if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
165 			pr_debug("IFILE: BLOCK Discard support available,"
166 				 " disabled by default\n");
167 		/*
168 		 * Enable write same emulation for IBLOCK and use 0xFFFF as
169 		 * the smaller WRITE_SAME(10) only has a two-byte block count.
170 		 */
171 		dev->dev_attrib.max_write_same_len = 0xFFFF;
172 
173 		if (blk_queue_nonrot(q))
174 			dev->dev_attrib.is_nonrot = 1;
175 	} else {
176 		if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
177 			pr_err("FILEIO: Missing fd_dev_size="
178 				" parameter, and no backing struct"
179 				" block_device\n");
180 			goto fail;
181 		}
182 
183 		fd_dev->fd_block_size = FD_BLOCKSIZE;
184 		/*
185 		 * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
186 		 */
187 		dev->dev_attrib.max_unmap_lba_count = 0x2000;
188 		/*
189 		 * Currently hardcoded to 1 in Linux/SCSI code..
190 		 */
191 		dev->dev_attrib.max_unmap_block_desc_count = 1;
192 		dev->dev_attrib.unmap_granularity = 1;
193 		dev->dev_attrib.unmap_granularity_alignment = 0;
194 
195 		/*
196 		 * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
197 		 * based upon struct iovec limit for vfs_writev()
198 		 */
199 		dev->dev_attrib.max_write_same_len = 0x1000;
200 	}
201 
202 	dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
203 	dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
204 	dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
205 	dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
206 
207 	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
208 		pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
209 			" with FDBD_HAS_BUFFERED_IO_WCE\n");
210 		dev->dev_attrib.emulate_write_cache = 1;
211 	}
212 
213 	fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
214 	fd_dev->fd_queue_depth = dev->queue_depth;
215 
216 	pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
217 		" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
218 			fd_dev->fd_dev_name, fd_dev->fd_dev_size);
219 
220 	return 0;
221 fail:
222 	if (fd_dev->fd_file) {
223 		filp_close(fd_dev->fd_file, NULL);
224 		fd_dev->fd_file = NULL;
225 	}
226 	return ret;
227 }
228 
229 static void fd_dev_call_rcu(struct rcu_head *p)
230 {
231 	struct se_device *dev = container_of(p, struct se_device, rcu_head);
232 	struct fd_dev *fd_dev = FD_DEV(dev);
233 
234 	kfree(fd_dev);
235 }
236 
237 static void fd_free_device(struct se_device *dev)
238 {
239 	struct fd_dev *fd_dev = FD_DEV(dev);
240 
241 	if (fd_dev->fd_file) {
242 		filp_close(fd_dev->fd_file, NULL);
243 		fd_dev->fd_file = NULL;
244 	}
245 	call_rcu(&dev->rcu_head, fd_dev_call_rcu);
246 }
247 
248 static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
249 		    u32 block_size, struct scatterlist *sgl,
250 		    u32 sgl_nents, u32 data_length, int is_write)
251 {
252 	struct scatterlist *sg;
253 	struct iov_iter iter;
254 	struct bio_vec *bvec;
255 	ssize_t len = 0;
256 	loff_t pos = (cmd->t_task_lba * block_size);
257 	int ret = 0, i;
258 
259 	bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
260 	if (!bvec) {
261 		pr_err("Unable to allocate fd_do_readv iov[]\n");
262 		return -ENOMEM;
263 	}
264 
265 	for_each_sg(sgl, sg, sgl_nents, i) {
266 		bvec[i].bv_page = sg_page(sg);
267 		bvec[i].bv_len = sg->length;
268 		bvec[i].bv_offset = sg->offset;
269 
270 		len += sg->length;
271 	}
272 
273 	iov_iter_bvec(&iter, ITER_BVEC, bvec, sgl_nents, len);
274 	if (is_write)
275 		ret = vfs_iter_write(fd, &iter, &pos);
276 	else
277 		ret = vfs_iter_read(fd, &iter, &pos);
278 
279 	kfree(bvec);
280 
281 	if (is_write) {
282 		if (ret < 0 || ret != data_length) {
283 			pr_err("%s() write returned %d\n", __func__, ret);
284 			return (ret < 0 ? ret : -EINVAL);
285 		}
286 	} else {
287 		/*
288 		 * Return zeros and GOOD status even if the READ did not return
289 		 * the expected virt_size for struct file w/o a backing struct
290 		 * block_device.
291 		 */
292 		if (S_ISBLK(file_inode(fd)->i_mode)) {
293 			if (ret < 0 || ret != data_length) {
294 				pr_err("%s() returned %d, expecting %u for "
295 						"S_ISBLK\n", __func__, ret,
296 						data_length);
297 				return (ret < 0 ? ret : -EINVAL);
298 			}
299 		} else {
300 			if (ret < 0) {
301 				pr_err("%s() returned %d for non S_ISBLK\n",
302 						__func__, ret);
303 				return ret;
304 			}
305 		}
306 	}
307 	return 1;
308 }
309 
310 static sense_reason_t
311 fd_execute_sync_cache(struct se_cmd *cmd)
312 {
313 	struct se_device *dev = cmd->se_dev;
314 	struct fd_dev *fd_dev = FD_DEV(dev);
315 	int immed = (cmd->t_task_cdb[1] & 0x2);
316 	loff_t start, end;
317 	int ret;
318 
319 	/*
320 	 * If the Immediate bit is set, queue up the GOOD response
321 	 * for this SYNCHRONIZE_CACHE op
322 	 */
323 	if (immed)
324 		target_complete_cmd(cmd, SAM_STAT_GOOD);
325 
326 	/*
327 	 * Determine if we will be flushing the entire device.
328 	 */
329 	if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
330 		start = 0;
331 		end = LLONG_MAX;
332 	} else {
333 		start = cmd->t_task_lba * dev->dev_attrib.block_size;
334 		if (cmd->data_length)
335 			end = start + cmd->data_length - 1;
336 		else
337 			end = LLONG_MAX;
338 	}
339 
340 	ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
341 	if (ret != 0)
342 		pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
343 
344 	if (immed)
345 		return 0;
346 
347 	if (ret)
348 		target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
349 	else
350 		target_complete_cmd(cmd, SAM_STAT_GOOD);
351 
352 	return 0;
353 }
354 
355 static sense_reason_t
356 fd_execute_write_same(struct se_cmd *cmd)
357 {
358 	struct se_device *se_dev = cmd->se_dev;
359 	struct fd_dev *fd_dev = FD_DEV(se_dev);
360 	loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
361 	sector_t nolb = sbc_get_write_same_sectors(cmd);
362 	struct iov_iter iter;
363 	struct bio_vec *bvec;
364 	unsigned int len = 0, i;
365 	ssize_t ret;
366 
367 	if (!nolb) {
368 		target_complete_cmd(cmd, SAM_STAT_GOOD);
369 		return 0;
370 	}
371 	if (cmd->prot_op) {
372 		pr_err("WRITE_SAME: Protection information with FILEIO"
373 		       " backends not supported\n");
374 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
375 	}
376 
377 	if (cmd->t_data_nents > 1 ||
378 	    cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) {
379 		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
380 			" block_size: %u\n",
381 			cmd->t_data_nents,
382 			cmd->t_data_sg[0].length,
383 			cmd->se_dev->dev_attrib.block_size);
384 		return TCM_INVALID_CDB_FIELD;
385 	}
386 
387 	bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL);
388 	if (!bvec)
389 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
390 
391 	for (i = 0; i < nolb; i++) {
392 		bvec[i].bv_page = sg_page(&cmd->t_data_sg[0]);
393 		bvec[i].bv_len = cmd->t_data_sg[0].length;
394 		bvec[i].bv_offset = cmd->t_data_sg[0].offset;
395 
396 		len += se_dev->dev_attrib.block_size;
397 	}
398 
399 	iov_iter_bvec(&iter, ITER_BVEC, bvec, nolb, len);
400 	ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos);
401 
402 	kfree(bvec);
403 	if (ret < 0 || ret != len) {
404 		pr_err("vfs_iter_write() returned %zd for write same\n", ret);
405 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
406 	}
407 
408 	target_complete_cmd(cmd, SAM_STAT_GOOD);
409 	return 0;
410 }
411 
412 static int
413 fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
414 		void *buf, size_t bufsize)
415 {
416 	struct fd_dev *fd_dev = FD_DEV(se_dev);
417 	struct file *prot_fd = fd_dev->fd_prot_file;
418 	sector_t prot_length, prot;
419 	loff_t pos = lba * se_dev->prot_length;
420 
421 	if (!prot_fd) {
422 		pr_err("Unable to locate fd_dev->fd_prot_file\n");
423 		return -ENODEV;
424 	}
425 
426 	prot_length = nolb * se_dev->prot_length;
427 
428 	for (prot = 0; prot < prot_length;) {
429 		sector_t len = min_t(sector_t, bufsize, prot_length - prot);
430 		ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot);
431 
432 		if (ret != len) {
433 			pr_err("vfs_write to prot file failed: %zd\n", ret);
434 			return ret < 0 ? ret : -ENODEV;
435 		}
436 		prot += ret;
437 	}
438 
439 	return 0;
440 }
441 
442 static int
443 fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
444 {
445 	void *buf;
446 	int rc;
447 
448 	buf = (void *)__get_free_page(GFP_KERNEL);
449 	if (!buf) {
450 		pr_err("Unable to allocate FILEIO prot buf\n");
451 		return -ENOMEM;
452 	}
453 	memset(buf, 0xff, PAGE_SIZE);
454 
455 	rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
456 
457 	free_page((unsigned long)buf);
458 
459 	return rc;
460 }
461 
462 static sense_reason_t
463 fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
464 {
465 	struct file *file = FD_DEV(cmd->se_dev)->fd_file;
466 	struct inode *inode = file->f_mapping->host;
467 	int ret;
468 
469 	if (cmd->se_dev->dev_attrib.pi_prot_type) {
470 		ret = fd_do_prot_unmap(cmd, lba, nolb);
471 		if (ret)
472 			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
473 	}
474 
475 	if (S_ISBLK(inode->i_mode)) {
476 		/* The backend is block device, use discard */
477 		struct block_device *bdev = inode->i_bdev;
478 		struct se_device *dev = cmd->se_dev;
479 
480 		ret = blkdev_issue_discard(bdev,
481 					   target_to_linux_sector(dev, lba),
482 					   target_to_linux_sector(dev,  nolb),
483 					   GFP_KERNEL, 0);
484 		if (ret < 0) {
485 			pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
486 				ret);
487 			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
488 		}
489 	} else {
490 		/* The backend is normal file, use fallocate */
491 		struct se_device *se_dev = cmd->se_dev;
492 		loff_t pos = lba * se_dev->dev_attrib.block_size;
493 		unsigned int len = nolb * se_dev->dev_attrib.block_size;
494 		int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
495 
496 		if (!file->f_op->fallocate)
497 			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
498 
499 		ret = file->f_op->fallocate(file, mode, pos, len);
500 		if (ret < 0) {
501 			pr_warn("FILEIO: fallocate() failed: %d\n", ret);
502 			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
503 		}
504 	}
505 
506 	return 0;
507 }
508 
509 static sense_reason_t
510 fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
511 	      enum dma_data_direction data_direction)
512 {
513 	struct se_device *dev = cmd->se_dev;
514 	struct fd_dev *fd_dev = FD_DEV(dev);
515 	struct file *file = fd_dev->fd_file;
516 	struct file *pfile = fd_dev->fd_prot_file;
517 	sense_reason_t rc;
518 	int ret = 0;
519 	/*
520 	 * We are currently limited by the number of iovecs (2048) per
521 	 * single vfs_[writev,readv] call.
522 	 */
523 	if (cmd->data_length > FD_MAX_BYTES) {
524 		pr_err("FILEIO: Not able to process I/O of %u bytes due to"
525 		       "FD_MAX_BYTES: %u iovec count limitation\n",
526 			cmd->data_length, FD_MAX_BYTES);
527 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
528 	}
529 	/*
530 	 * Call vectorized fileio functions to map struct scatterlist
531 	 * physical memory addresses to struct iovec virtual memory.
532 	 */
533 	if (data_direction == DMA_FROM_DEVICE) {
534 		if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
535 			ret = fd_do_rw(cmd, pfile, dev->prot_length,
536 				       cmd->t_prot_sg, cmd->t_prot_nents,
537 				       cmd->prot_length, 0);
538 			if (ret < 0)
539 				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
540 		}
541 
542 		ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
543 			       sgl, sgl_nents, cmd->data_length, 0);
544 
545 		if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
546 			u32 sectors = cmd->data_length >>
547 					ilog2(dev->dev_attrib.block_size);
548 
549 			rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
550 					    0, cmd->t_prot_sg, 0);
551 			if (rc)
552 				return rc;
553 		}
554 	} else {
555 		if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
556 			u32 sectors = cmd->data_length >>
557 					ilog2(dev->dev_attrib.block_size);
558 
559 			rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
560 					    0, cmd->t_prot_sg, 0);
561 			if (rc)
562 				return rc;
563 		}
564 
565 		ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
566 			       sgl, sgl_nents, cmd->data_length, 1);
567 		/*
568 		 * Perform implicit vfs_fsync_range() for fd_do_writev() ops
569 		 * for SCSI WRITEs with Forced Unit Access (FUA) set.
570 		 * Allow this to happen independent of WCE=0 setting.
571 		 */
572 		if (ret > 0 && (cmd->se_cmd_flags & SCF_FUA)) {
573 			loff_t start = cmd->t_task_lba *
574 				dev->dev_attrib.block_size;
575 			loff_t end;
576 
577 			if (cmd->data_length)
578 				end = start + cmd->data_length - 1;
579 			else
580 				end = LLONG_MAX;
581 
582 			vfs_fsync_range(fd_dev->fd_file, start, end, 1);
583 		}
584 
585 		if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
586 			ret = fd_do_rw(cmd, pfile, dev->prot_length,
587 				       cmd->t_prot_sg, cmd->t_prot_nents,
588 				       cmd->prot_length, 1);
589 			if (ret < 0)
590 				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
591 		}
592 	}
593 
594 	if (ret < 0)
595 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
596 
597 	if (ret)
598 		target_complete_cmd(cmd, SAM_STAT_GOOD);
599 	return 0;
600 }
601 
602 enum {
603 	Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
604 };
605 
606 static match_table_t tokens = {
607 	{Opt_fd_dev_name, "fd_dev_name=%s"},
608 	{Opt_fd_dev_size, "fd_dev_size=%s"},
609 	{Opt_fd_buffered_io, "fd_buffered_io=%d"},
610 	{Opt_err, NULL}
611 };
612 
613 static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
614 		const char *page, ssize_t count)
615 {
616 	struct fd_dev *fd_dev = FD_DEV(dev);
617 	char *orig, *ptr, *arg_p, *opts;
618 	substring_t args[MAX_OPT_ARGS];
619 	int ret = 0, arg, token;
620 
621 	opts = kstrdup(page, GFP_KERNEL);
622 	if (!opts)
623 		return -ENOMEM;
624 
625 	orig = opts;
626 
627 	while ((ptr = strsep(&opts, ",\n")) != NULL) {
628 		if (!*ptr)
629 			continue;
630 
631 		token = match_token(ptr, tokens, args);
632 		switch (token) {
633 		case Opt_fd_dev_name:
634 			if (match_strlcpy(fd_dev->fd_dev_name, &args[0],
635 				FD_MAX_DEV_NAME) == 0) {
636 				ret = -EINVAL;
637 				break;
638 			}
639 			pr_debug("FILEIO: Referencing Path: %s\n",
640 					fd_dev->fd_dev_name);
641 			fd_dev->fbd_flags |= FBDF_HAS_PATH;
642 			break;
643 		case Opt_fd_dev_size:
644 			arg_p = match_strdup(&args[0]);
645 			if (!arg_p) {
646 				ret = -ENOMEM;
647 				break;
648 			}
649 			ret = kstrtoull(arg_p, 0, &fd_dev->fd_dev_size);
650 			kfree(arg_p);
651 			if (ret < 0) {
652 				pr_err("kstrtoull() failed for"
653 						" fd_dev_size=\n");
654 				goto out;
655 			}
656 			pr_debug("FILEIO: Referencing Size: %llu"
657 					" bytes\n", fd_dev->fd_dev_size);
658 			fd_dev->fbd_flags |= FBDF_HAS_SIZE;
659 			break;
660 		case Opt_fd_buffered_io:
661 			ret = match_int(args, &arg);
662 			if (ret)
663 				goto out;
664 			if (arg != 1) {
665 				pr_err("bogus fd_buffered_io=%d value\n", arg);
666 				ret = -EINVAL;
667 				goto out;
668 			}
669 
670 			pr_debug("FILEIO: Using buffered I/O"
671 				" operations for struct fd_dev\n");
672 
673 			fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
674 			break;
675 		default:
676 			break;
677 		}
678 	}
679 
680 out:
681 	kfree(orig);
682 	return (!ret) ? count : ret;
683 }
684 
685 static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
686 {
687 	struct fd_dev *fd_dev = FD_DEV(dev);
688 	ssize_t bl = 0;
689 
690 	bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
691 	bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: %s\n",
692 		fd_dev->fd_dev_name, fd_dev->fd_dev_size,
693 		(fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
694 		"Buffered-WCE" : "O_DSYNC");
695 	return bl;
696 }
697 
698 static sector_t fd_get_blocks(struct se_device *dev)
699 {
700 	struct fd_dev *fd_dev = FD_DEV(dev);
701 	struct file *f = fd_dev->fd_file;
702 	struct inode *i = f->f_mapping->host;
703 	unsigned long long dev_size;
704 	/*
705 	 * When using a file that references an underlying struct block_device,
706 	 * ensure dev_size is always based on the current inode size in order
707 	 * to handle underlying block_device resize operations.
708 	 */
709 	if (S_ISBLK(i->i_mode))
710 		dev_size = i_size_read(i);
711 	else
712 		dev_size = fd_dev->fd_dev_size;
713 
714 	return div_u64(dev_size - dev->dev_attrib.block_size,
715 		       dev->dev_attrib.block_size);
716 }
717 
718 static int fd_init_prot(struct se_device *dev)
719 {
720 	struct fd_dev *fd_dev = FD_DEV(dev);
721 	struct file *prot_file, *file = fd_dev->fd_file;
722 	struct inode *inode;
723 	int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
724 	char buf[FD_MAX_DEV_PROT_NAME];
725 
726 	if (!file) {
727 		pr_err("Unable to locate fd_dev->fd_file\n");
728 		return -ENODEV;
729 	}
730 
731 	inode = file->f_mapping->host;
732 	if (S_ISBLK(inode->i_mode)) {
733 		pr_err("FILEIO Protection emulation only supported on"
734 		       " !S_ISBLK\n");
735 		return -ENOSYS;
736 	}
737 
738 	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE)
739 		flags &= ~O_DSYNC;
740 
741 	snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection",
742 		 fd_dev->fd_dev_name);
743 
744 	prot_file = filp_open(buf, flags, 0600);
745 	if (IS_ERR(prot_file)) {
746 		pr_err("filp_open(%s) failed\n", buf);
747 		ret = PTR_ERR(prot_file);
748 		return ret;
749 	}
750 	fd_dev->fd_prot_file = prot_file;
751 
752 	return 0;
753 }
754 
755 static int fd_format_prot(struct se_device *dev)
756 {
757 	unsigned char *buf;
758 	int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
759 	int ret;
760 
761 	if (!dev->dev_attrib.pi_prot_type) {
762 		pr_err("Unable to format_prot while pi_prot_type == 0\n");
763 		return -ENODEV;
764 	}
765 
766 	buf = vzalloc(unit_size);
767 	if (!buf) {
768 		pr_err("Unable to allocate FILEIO prot buf\n");
769 		return -ENOMEM;
770 	}
771 
772 	pr_debug("Using FILEIO prot_length: %llu\n",
773 		 (unsigned long long)(dev->transport->get_blocks(dev) + 1) *
774 					dev->prot_length);
775 
776 	memset(buf, 0xff, unit_size);
777 	ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
778 			      buf, unit_size);
779 	vfree(buf);
780 	return ret;
781 }
782 
783 static void fd_free_prot(struct se_device *dev)
784 {
785 	struct fd_dev *fd_dev = FD_DEV(dev);
786 
787 	if (!fd_dev->fd_prot_file)
788 		return;
789 
790 	filp_close(fd_dev->fd_prot_file, NULL);
791 	fd_dev->fd_prot_file = NULL;
792 }
793 
794 static struct sbc_ops fd_sbc_ops = {
795 	.execute_rw		= fd_execute_rw,
796 	.execute_sync_cache	= fd_execute_sync_cache,
797 	.execute_write_same	= fd_execute_write_same,
798 	.execute_unmap		= fd_execute_unmap,
799 };
800 
801 static sense_reason_t
802 fd_parse_cdb(struct se_cmd *cmd)
803 {
804 	return sbc_parse_cdb(cmd, &fd_sbc_ops);
805 }
806 
807 static const struct target_backend_ops fileio_ops = {
808 	.name			= "fileio",
809 	.inquiry_prod		= "FILEIO",
810 	.inquiry_rev		= FD_VERSION,
811 	.owner			= THIS_MODULE,
812 	.attach_hba		= fd_attach_hba,
813 	.detach_hba		= fd_detach_hba,
814 	.alloc_device		= fd_alloc_device,
815 	.configure_device	= fd_configure_device,
816 	.free_device		= fd_free_device,
817 	.parse_cdb		= fd_parse_cdb,
818 	.set_configfs_dev_params = fd_set_configfs_dev_params,
819 	.show_configfs_dev_params = fd_show_configfs_dev_params,
820 	.get_device_type	= sbc_get_device_type,
821 	.get_blocks		= fd_get_blocks,
822 	.init_prot		= fd_init_prot,
823 	.format_prot		= fd_format_prot,
824 	.free_prot		= fd_free_prot,
825 	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
826 };
827 
828 static int __init fileio_module_init(void)
829 {
830 	return transport_backend_register(&fileio_ops);
831 }
832 
833 static void __exit fileio_module_exit(void)
834 {
835 	target_backend_unregister(&fileio_ops);
836 }
837 
838 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
839 MODULE_AUTHOR("nab@Linux-iSCSI.org");
840 MODULE_LICENSE("GPL");
841 
842 module_init(fileio_module_init);
843 module_exit(fileio_module_exit);
844