xref: /linux/drivers/target/target_core_file.c (revision 93df8a1ed6231727c5db94a80b1a6bd5ee67cec3)
1 /*******************************************************************************
2  * Filename:  target_core_file.c
3  *
4  * This file contains the Storage Engine <-> FILEIO transport specific functions
5  *
6  * (c) Copyright 2005-2013 Datera, Inc.
7  *
8  * Nicholas A. Bellinger <nab@kernel.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  ******************************************************************************/
25 
26 #include <linux/string.h>
27 #include <linux/parser.h>
28 #include <linux/timer.h>
29 #include <linux/blkdev.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/module.h>
33 #include <linux/vmalloc.h>
34 #include <linux/falloc.h>
35 #include <scsi/scsi_proto.h>
36 #include <asm/unaligned.h>
37 
38 #include <target/target_core_base.h>
39 #include <target/target_core_backend.h>
40 
41 #include "target_core_file.h"
42 
43 static inline struct fd_dev *FD_DEV(struct se_device *dev)
44 {
45 	return container_of(dev, struct fd_dev, dev);
46 }
47 
48 static int fd_attach_hba(struct se_hba *hba, u32 host_id)
49 {
50 	struct fd_host *fd_host;
51 
52 	fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
53 	if (!fd_host) {
54 		pr_err("Unable to allocate memory for struct fd_host\n");
55 		return -ENOMEM;
56 	}
57 
58 	fd_host->fd_host_id = host_id;
59 
60 	hba->hba_ptr = fd_host;
61 
62 	pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
63 		" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
64 		TARGET_CORE_VERSION);
65 	pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
66 		hba->hba_id, fd_host->fd_host_id);
67 
68 	return 0;
69 }
70 
71 static void fd_detach_hba(struct se_hba *hba)
72 {
73 	struct fd_host *fd_host = hba->hba_ptr;
74 
75 	pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
76 		" Target Core\n", hba->hba_id, fd_host->fd_host_id);
77 
78 	kfree(fd_host);
79 	hba->hba_ptr = NULL;
80 }
81 
82 static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
83 {
84 	struct fd_dev *fd_dev;
85 	struct fd_host *fd_host = hba->hba_ptr;
86 
87 	fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
88 	if (!fd_dev) {
89 		pr_err("Unable to allocate memory for struct fd_dev\n");
90 		return NULL;
91 	}
92 
93 	fd_dev->fd_host = fd_host;
94 
95 	pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
96 
97 	return &fd_dev->dev;
98 }
99 
100 static int fd_configure_device(struct se_device *dev)
101 {
102 	struct fd_dev *fd_dev = FD_DEV(dev);
103 	struct fd_host *fd_host = dev->se_hba->hba_ptr;
104 	struct file *file;
105 	struct inode *inode = NULL;
106 	int flags, ret = -EINVAL;
107 
108 	if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
109 		pr_err("Missing fd_dev_name=\n");
110 		return -EINVAL;
111 	}
112 
113 	/*
114 	 * Use O_DSYNC by default instead of O_SYNC to forgo syncing
115 	 * of pure timestamp updates.
116 	 */
117 	flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
118 
119 	/*
120 	 * Optionally allow fd_buffered_io=1 to be enabled for people
121 	 * who want use the fs buffer cache as an WriteCache mechanism.
122 	 *
123 	 * This means that in event of a hard failure, there is a risk
124 	 * of silent data-loss if the SCSI client has *not* performed a
125 	 * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
126 	 * to write-out the entire device cache.
127 	 */
128 	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
129 		pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
130 		flags &= ~O_DSYNC;
131 	}
132 
133 	file = filp_open(fd_dev->fd_dev_name, flags, 0600);
134 	if (IS_ERR(file)) {
135 		pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name);
136 		ret = PTR_ERR(file);
137 		goto fail;
138 	}
139 	fd_dev->fd_file = file;
140 	/*
141 	 * If using a block backend with this struct file, we extract
142 	 * fd_dev->fd_[block,dev]_size from struct block_device.
143 	 *
144 	 * Otherwise, we use the passed fd_size= from configfs
145 	 */
146 	inode = file->f_mapping->host;
147 	if (S_ISBLK(inode->i_mode)) {
148 		struct request_queue *q = bdev_get_queue(inode->i_bdev);
149 		unsigned long long dev_size;
150 
151 		fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
152 		/*
153 		 * Determine the number of bytes from i_size_read() minus
154 		 * one (1) logical sector from underlying struct block_device
155 		 */
156 		dev_size = (i_size_read(file->f_mapping->host) -
157 				       fd_dev->fd_block_size);
158 
159 		pr_debug("FILEIO: Using size: %llu bytes from struct"
160 			" block_device blocks: %llu logical_block_size: %d\n",
161 			dev_size, div_u64(dev_size, fd_dev->fd_block_size),
162 			fd_dev->fd_block_size);
163 		/*
164 		 * Check if the underlying struct block_device request_queue supports
165 		 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
166 		 * in ATA and we need to set TPE=1
167 		 */
168 		if (blk_queue_discard(q)) {
169 			dev->dev_attrib.max_unmap_lba_count =
170 				q->limits.max_discard_sectors;
171 			/*
172 			 * Currently hardcoded to 1 in Linux/SCSI code..
173 			 */
174 			dev->dev_attrib.max_unmap_block_desc_count = 1;
175 			dev->dev_attrib.unmap_granularity =
176 				q->limits.discard_granularity >> 9;
177 			dev->dev_attrib.unmap_granularity_alignment =
178 				q->limits.discard_alignment;
179 			pr_debug("IFILE: BLOCK Discard support available,"
180 					" disabled by default\n");
181 		}
182 		/*
183 		 * Enable write same emulation for IBLOCK and use 0xFFFF as
184 		 * the smaller WRITE_SAME(10) only has a two-byte block count.
185 		 */
186 		dev->dev_attrib.max_write_same_len = 0xFFFF;
187 
188 		if (blk_queue_nonrot(q))
189 			dev->dev_attrib.is_nonrot = 1;
190 	} else {
191 		if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
192 			pr_err("FILEIO: Missing fd_dev_size="
193 				" parameter, and no backing struct"
194 				" block_device\n");
195 			goto fail;
196 		}
197 
198 		fd_dev->fd_block_size = FD_BLOCKSIZE;
199 		/*
200 		 * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
201 		 */
202 		dev->dev_attrib.max_unmap_lba_count = 0x2000;
203 		/*
204 		 * Currently hardcoded to 1 in Linux/SCSI code..
205 		 */
206 		dev->dev_attrib.max_unmap_block_desc_count = 1;
207 		dev->dev_attrib.unmap_granularity = 1;
208 		dev->dev_attrib.unmap_granularity_alignment = 0;
209 
210 		/*
211 		 * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
212 		 * based upon struct iovec limit for vfs_writev()
213 		 */
214 		dev->dev_attrib.max_write_same_len = 0x1000;
215 	}
216 
217 	dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
218 	dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
219 	dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
220 	dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
221 
222 	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
223 		pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
224 			" with FDBD_HAS_BUFFERED_IO_WCE\n");
225 		dev->dev_attrib.emulate_write_cache = 1;
226 	}
227 
228 	fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
229 	fd_dev->fd_queue_depth = dev->queue_depth;
230 
231 	pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
232 		" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
233 			fd_dev->fd_dev_name, fd_dev->fd_dev_size);
234 
235 	return 0;
236 fail:
237 	if (fd_dev->fd_file) {
238 		filp_close(fd_dev->fd_file, NULL);
239 		fd_dev->fd_file = NULL;
240 	}
241 	return ret;
242 }
243 
244 static void fd_dev_call_rcu(struct rcu_head *p)
245 {
246 	struct se_device *dev = container_of(p, struct se_device, rcu_head);
247 	struct fd_dev *fd_dev = FD_DEV(dev);
248 
249 	kfree(fd_dev);
250 }
251 
252 static void fd_free_device(struct se_device *dev)
253 {
254 	struct fd_dev *fd_dev = FD_DEV(dev);
255 
256 	if (fd_dev->fd_file) {
257 		filp_close(fd_dev->fd_file, NULL);
258 		fd_dev->fd_file = NULL;
259 	}
260 	call_rcu(&dev->rcu_head, fd_dev_call_rcu);
261 }
262 
263 static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
264 		    u32 block_size, struct scatterlist *sgl,
265 		    u32 sgl_nents, u32 data_length, int is_write)
266 {
267 	struct scatterlist *sg;
268 	struct iov_iter iter;
269 	struct bio_vec *bvec;
270 	ssize_t len = 0;
271 	loff_t pos = (cmd->t_task_lba * block_size);
272 	int ret = 0, i;
273 
274 	bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
275 	if (!bvec) {
276 		pr_err("Unable to allocate fd_do_readv iov[]\n");
277 		return -ENOMEM;
278 	}
279 
280 	for_each_sg(sgl, sg, sgl_nents, i) {
281 		bvec[i].bv_page = sg_page(sg);
282 		bvec[i].bv_len = sg->length;
283 		bvec[i].bv_offset = sg->offset;
284 
285 		len += sg->length;
286 	}
287 
288 	iov_iter_bvec(&iter, ITER_BVEC, bvec, sgl_nents, len);
289 	if (is_write)
290 		ret = vfs_iter_write(fd, &iter, &pos);
291 	else
292 		ret = vfs_iter_read(fd, &iter, &pos);
293 
294 	kfree(bvec);
295 
296 	if (is_write) {
297 		if (ret < 0 || ret != data_length) {
298 			pr_err("%s() write returned %d\n", __func__, ret);
299 			return (ret < 0 ? ret : -EINVAL);
300 		}
301 	} else {
302 		/*
303 		 * Return zeros and GOOD status even if the READ did not return
304 		 * the expected virt_size for struct file w/o a backing struct
305 		 * block_device.
306 		 */
307 		if (S_ISBLK(file_inode(fd)->i_mode)) {
308 			if (ret < 0 || ret != data_length) {
309 				pr_err("%s() returned %d, expecting %u for "
310 						"S_ISBLK\n", __func__, ret,
311 						data_length);
312 				return (ret < 0 ? ret : -EINVAL);
313 			}
314 		} else {
315 			if (ret < 0) {
316 				pr_err("%s() returned %d for non S_ISBLK\n",
317 						__func__, ret);
318 				return ret;
319 			}
320 		}
321 	}
322 	return 1;
323 }
324 
325 static sense_reason_t
326 fd_execute_sync_cache(struct se_cmd *cmd)
327 {
328 	struct se_device *dev = cmd->se_dev;
329 	struct fd_dev *fd_dev = FD_DEV(dev);
330 	int immed = (cmd->t_task_cdb[1] & 0x2);
331 	loff_t start, end;
332 	int ret;
333 
334 	/*
335 	 * If the Immediate bit is set, queue up the GOOD response
336 	 * for this SYNCHRONIZE_CACHE op
337 	 */
338 	if (immed)
339 		target_complete_cmd(cmd, SAM_STAT_GOOD);
340 
341 	/*
342 	 * Determine if we will be flushing the entire device.
343 	 */
344 	if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
345 		start = 0;
346 		end = LLONG_MAX;
347 	} else {
348 		start = cmd->t_task_lba * dev->dev_attrib.block_size;
349 		if (cmd->data_length)
350 			end = start + cmd->data_length - 1;
351 		else
352 			end = LLONG_MAX;
353 	}
354 
355 	ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
356 	if (ret != 0)
357 		pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
358 
359 	if (immed)
360 		return 0;
361 
362 	if (ret)
363 		target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
364 	else
365 		target_complete_cmd(cmd, SAM_STAT_GOOD);
366 
367 	return 0;
368 }
369 
370 static sense_reason_t
371 fd_execute_write_same(struct se_cmd *cmd)
372 {
373 	struct se_device *se_dev = cmd->se_dev;
374 	struct fd_dev *fd_dev = FD_DEV(se_dev);
375 	loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
376 	sector_t nolb = sbc_get_write_same_sectors(cmd);
377 	struct iov_iter iter;
378 	struct bio_vec *bvec;
379 	unsigned int len = 0, i;
380 	ssize_t ret;
381 
382 	if (!nolb) {
383 		target_complete_cmd(cmd, SAM_STAT_GOOD);
384 		return 0;
385 	}
386 	if (cmd->prot_op) {
387 		pr_err("WRITE_SAME: Protection information with FILEIO"
388 		       " backends not supported\n");
389 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
390 	}
391 
392 	if (cmd->t_data_nents > 1 ||
393 	    cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) {
394 		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
395 			" block_size: %u\n",
396 			cmd->t_data_nents,
397 			cmd->t_data_sg[0].length,
398 			cmd->se_dev->dev_attrib.block_size);
399 		return TCM_INVALID_CDB_FIELD;
400 	}
401 
402 	bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL);
403 	if (!bvec)
404 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
405 
406 	for (i = 0; i < nolb; i++) {
407 		bvec[i].bv_page = sg_page(&cmd->t_data_sg[0]);
408 		bvec[i].bv_len = cmd->t_data_sg[0].length;
409 		bvec[i].bv_offset = cmd->t_data_sg[0].offset;
410 
411 		len += se_dev->dev_attrib.block_size;
412 	}
413 
414 	iov_iter_bvec(&iter, ITER_BVEC, bvec, nolb, len);
415 	ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos);
416 
417 	kfree(bvec);
418 	if (ret < 0 || ret != len) {
419 		pr_err("vfs_iter_write() returned %zd for write same\n", ret);
420 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
421 	}
422 
423 	target_complete_cmd(cmd, SAM_STAT_GOOD);
424 	return 0;
425 }
426 
427 static int
428 fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
429 		void *buf, size_t bufsize)
430 {
431 	struct fd_dev *fd_dev = FD_DEV(se_dev);
432 	struct file *prot_fd = fd_dev->fd_prot_file;
433 	sector_t prot_length, prot;
434 	loff_t pos = lba * se_dev->prot_length;
435 
436 	if (!prot_fd) {
437 		pr_err("Unable to locate fd_dev->fd_prot_file\n");
438 		return -ENODEV;
439 	}
440 
441 	prot_length = nolb * se_dev->prot_length;
442 
443 	for (prot = 0; prot < prot_length;) {
444 		sector_t len = min_t(sector_t, bufsize, prot_length - prot);
445 		ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot);
446 
447 		if (ret != len) {
448 			pr_err("vfs_write to prot file failed: %zd\n", ret);
449 			return ret < 0 ? ret : -ENODEV;
450 		}
451 		prot += ret;
452 	}
453 
454 	return 0;
455 }
456 
457 static int
458 fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
459 {
460 	void *buf;
461 	int rc;
462 
463 	buf = (void *)__get_free_page(GFP_KERNEL);
464 	if (!buf) {
465 		pr_err("Unable to allocate FILEIO prot buf\n");
466 		return -ENOMEM;
467 	}
468 	memset(buf, 0xff, PAGE_SIZE);
469 
470 	rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
471 
472 	free_page((unsigned long)buf);
473 
474 	return rc;
475 }
476 
477 static sense_reason_t
478 fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
479 {
480 	struct file *file = FD_DEV(cmd->se_dev)->fd_file;
481 	struct inode *inode = file->f_mapping->host;
482 	int ret;
483 
484 	if (cmd->se_dev->dev_attrib.pi_prot_type) {
485 		ret = fd_do_prot_unmap(cmd, lba, nolb);
486 		if (ret)
487 			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
488 	}
489 
490 	if (S_ISBLK(inode->i_mode)) {
491 		/* The backend is block device, use discard */
492 		struct block_device *bdev = inode->i_bdev;
493 
494 		ret = blkdev_issue_discard(bdev, lba,
495 				nolb, GFP_KERNEL, 0);
496 		if (ret < 0) {
497 			pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
498 				ret);
499 			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
500 		}
501 	} else {
502 		/* The backend is normal file, use fallocate */
503 		struct se_device *se_dev = cmd->se_dev;
504 		loff_t pos = lba * se_dev->dev_attrib.block_size;
505 		unsigned int len = nolb * se_dev->dev_attrib.block_size;
506 		int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
507 
508 		if (!file->f_op->fallocate)
509 			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
510 
511 		ret = file->f_op->fallocate(file, mode, pos, len);
512 		if (ret < 0) {
513 			pr_warn("FILEIO: fallocate() failed: %d\n", ret);
514 			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
515 		}
516 	}
517 
518 	return 0;
519 }
520 
521 static sense_reason_t
522 fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
523 	      enum dma_data_direction data_direction)
524 {
525 	struct se_device *dev = cmd->se_dev;
526 	struct fd_dev *fd_dev = FD_DEV(dev);
527 	struct file *file = fd_dev->fd_file;
528 	struct file *pfile = fd_dev->fd_prot_file;
529 	sense_reason_t rc;
530 	int ret = 0;
531 	/*
532 	 * We are currently limited by the number of iovecs (2048) per
533 	 * single vfs_[writev,readv] call.
534 	 */
535 	if (cmd->data_length > FD_MAX_BYTES) {
536 		pr_err("FILEIO: Not able to process I/O of %u bytes due to"
537 		       "FD_MAX_BYTES: %u iovec count limitiation\n",
538 			cmd->data_length, FD_MAX_BYTES);
539 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
540 	}
541 	/*
542 	 * Call vectorized fileio functions to map struct scatterlist
543 	 * physical memory addresses to struct iovec virtual memory.
544 	 */
545 	if (data_direction == DMA_FROM_DEVICE) {
546 		if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
547 			ret = fd_do_rw(cmd, pfile, dev->prot_length,
548 				       cmd->t_prot_sg, cmd->t_prot_nents,
549 				       cmd->prot_length, 0);
550 			if (ret < 0)
551 				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
552 		}
553 
554 		ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
555 			       sgl, sgl_nents, cmd->data_length, 0);
556 
557 		if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
558 			u32 sectors = cmd->data_length >>
559 					ilog2(dev->dev_attrib.block_size);
560 
561 			rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
562 					    0, cmd->t_prot_sg, 0);
563 			if (rc)
564 				return rc;
565 		}
566 	} else {
567 		if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
568 			u32 sectors = cmd->data_length >>
569 					ilog2(dev->dev_attrib.block_size);
570 
571 			rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
572 					    0, cmd->t_prot_sg, 0);
573 			if (rc)
574 				return rc;
575 		}
576 
577 		ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
578 			       sgl, sgl_nents, cmd->data_length, 1);
579 		/*
580 		 * Perform implicit vfs_fsync_range() for fd_do_writev() ops
581 		 * for SCSI WRITEs with Forced Unit Access (FUA) set.
582 		 * Allow this to happen independent of WCE=0 setting.
583 		 */
584 		if (ret > 0 && (cmd->se_cmd_flags & SCF_FUA)) {
585 			loff_t start = cmd->t_task_lba *
586 				dev->dev_attrib.block_size;
587 			loff_t end;
588 
589 			if (cmd->data_length)
590 				end = start + cmd->data_length - 1;
591 			else
592 				end = LLONG_MAX;
593 
594 			vfs_fsync_range(fd_dev->fd_file, start, end, 1);
595 		}
596 
597 		if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
598 			ret = fd_do_rw(cmd, pfile, dev->prot_length,
599 				       cmd->t_prot_sg, cmd->t_prot_nents,
600 				       cmd->prot_length, 1);
601 			if (ret < 0)
602 				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
603 		}
604 	}
605 
606 	if (ret < 0)
607 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
608 
609 	if (ret)
610 		target_complete_cmd(cmd, SAM_STAT_GOOD);
611 	return 0;
612 }
613 
614 enum {
615 	Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
616 };
617 
618 static match_table_t tokens = {
619 	{Opt_fd_dev_name, "fd_dev_name=%s"},
620 	{Opt_fd_dev_size, "fd_dev_size=%s"},
621 	{Opt_fd_buffered_io, "fd_buffered_io=%d"},
622 	{Opt_err, NULL}
623 };
624 
625 static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
626 		const char *page, ssize_t count)
627 {
628 	struct fd_dev *fd_dev = FD_DEV(dev);
629 	char *orig, *ptr, *arg_p, *opts;
630 	substring_t args[MAX_OPT_ARGS];
631 	int ret = 0, arg, token;
632 
633 	opts = kstrdup(page, GFP_KERNEL);
634 	if (!opts)
635 		return -ENOMEM;
636 
637 	orig = opts;
638 
639 	while ((ptr = strsep(&opts, ",\n")) != NULL) {
640 		if (!*ptr)
641 			continue;
642 
643 		token = match_token(ptr, tokens, args);
644 		switch (token) {
645 		case Opt_fd_dev_name:
646 			if (match_strlcpy(fd_dev->fd_dev_name, &args[0],
647 				FD_MAX_DEV_NAME) == 0) {
648 				ret = -EINVAL;
649 				break;
650 			}
651 			pr_debug("FILEIO: Referencing Path: %s\n",
652 					fd_dev->fd_dev_name);
653 			fd_dev->fbd_flags |= FBDF_HAS_PATH;
654 			break;
655 		case Opt_fd_dev_size:
656 			arg_p = match_strdup(&args[0]);
657 			if (!arg_p) {
658 				ret = -ENOMEM;
659 				break;
660 			}
661 			ret = kstrtoull(arg_p, 0, &fd_dev->fd_dev_size);
662 			kfree(arg_p);
663 			if (ret < 0) {
664 				pr_err("kstrtoull() failed for"
665 						" fd_dev_size=\n");
666 				goto out;
667 			}
668 			pr_debug("FILEIO: Referencing Size: %llu"
669 					" bytes\n", fd_dev->fd_dev_size);
670 			fd_dev->fbd_flags |= FBDF_HAS_SIZE;
671 			break;
672 		case Opt_fd_buffered_io:
673 			ret = match_int(args, &arg);
674 			if (ret)
675 				goto out;
676 			if (arg != 1) {
677 				pr_err("bogus fd_buffered_io=%d value\n", arg);
678 				ret = -EINVAL;
679 				goto out;
680 			}
681 
682 			pr_debug("FILEIO: Using buffered I/O"
683 				" operations for struct fd_dev\n");
684 
685 			fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
686 			break;
687 		default:
688 			break;
689 		}
690 	}
691 
692 out:
693 	kfree(orig);
694 	return (!ret) ? count : ret;
695 }
696 
697 static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
698 {
699 	struct fd_dev *fd_dev = FD_DEV(dev);
700 	ssize_t bl = 0;
701 
702 	bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
703 	bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: %s\n",
704 		fd_dev->fd_dev_name, fd_dev->fd_dev_size,
705 		(fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
706 		"Buffered-WCE" : "O_DSYNC");
707 	return bl;
708 }
709 
710 static sector_t fd_get_blocks(struct se_device *dev)
711 {
712 	struct fd_dev *fd_dev = FD_DEV(dev);
713 	struct file *f = fd_dev->fd_file;
714 	struct inode *i = f->f_mapping->host;
715 	unsigned long long dev_size;
716 	/*
717 	 * When using a file that references an underlying struct block_device,
718 	 * ensure dev_size is always based on the current inode size in order
719 	 * to handle underlying block_device resize operations.
720 	 */
721 	if (S_ISBLK(i->i_mode))
722 		dev_size = i_size_read(i);
723 	else
724 		dev_size = fd_dev->fd_dev_size;
725 
726 	return div_u64(dev_size - dev->dev_attrib.block_size,
727 		       dev->dev_attrib.block_size);
728 }
729 
730 static int fd_init_prot(struct se_device *dev)
731 {
732 	struct fd_dev *fd_dev = FD_DEV(dev);
733 	struct file *prot_file, *file = fd_dev->fd_file;
734 	struct inode *inode;
735 	int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
736 	char buf[FD_MAX_DEV_PROT_NAME];
737 
738 	if (!file) {
739 		pr_err("Unable to locate fd_dev->fd_file\n");
740 		return -ENODEV;
741 	}
742 
743 	inode = file->f_mapping->host;
744 	if (S_ISBLK(inode->i_mode)) {
745 		pr_err("FILEIO Protection emulation only supported on"
746 		       " !S_ISBLK\n");
747 		return -ENOSYS;
748 	}
749 
750 	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE)
751 		flags &= ~O_DSYNC;
752 
753 	snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection",
754 		 fd_dev->fd_dev_name);
755 
756 	prot_file = filp_open(buf, flags, 0600);
757 	if (IS_ERR(prot_file)) {
758 		pr_err("filp_open(%s) failed\n", buf);
759 		ret = PTR_ERR(prot_file);
760 		return ret;
761 	}
762 	fd_dev->fd_prot_file = prot_file;
763 
764 	return 0;
765 }
766 
767 static int fd_format_prot(struct se_device *dev)
768 {
769 	unsigned char *buf;
770 	int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
771 	int ret;
772 
773 	if (!dev->dev_attrib.pi_prot_type) {
774 		pr_err("Unable to format_prot while pi_prot_type == 0\n");
775 		return -ENODEV;
776 	}
777 
778 	buf = vzalloc(unit_size);
779 	if (!buf) {
780 		pr_err("Unable to allocate FILEIO prot buf\n");
781 		return -ENOMEM;
782 	}
783 
784 	pr_debug("Using FILEIO prot_length: %llu\n",
785 		 (unsigned long long)(dev->transport->get_blocks(dev) + 1) *
786 					dev->prot_length);
787 
788 	memset(buf, 0xff, unit_size);
789 	ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
790 			      buf, unit_size);
791 	vfree(buf);
792 	return ret;
793 }
794 
795 static void fd_free_prot(struct se_device *dev)
796 {
797 	struct fd_dev *fd_dev = FD_DEV(dev);
798 
799 	if (!fd_dev->fd_prot_file)
800 		return;
801 
802 	filp_close(fd_dev->fd_prot_file, NULL);
803 	fd_dev->fd_prot_file = NULL;
804 }
805 
806 static struct sbc_ops fd_sbc_ops = {
807 	.execute_rw		= fd_execute_rw,
808 	.execute_sync_cache	= fd_execute_sync_cache,
809 	.execute_write_same	= fd_execute_write_same,
810 	.execute_unmap		= fd_execute_unmap,
811 };
812 
813 static sense_reason_t
814 fd_parse_cdb(struct se_cmd *cmd)
815 {
816 	return sbc_parse_cdb(cmd, &fd_sbc_ops);
817 }
818 
819 static const struct target_backend_ops fileio_ops = {
820 	.name			= "fileio",
821 	.inquiry_prod		= "FILEIO",
822 	.inquiry_rev		= FD_VERSION,
823 	.owner			= THIS_MODULE,
824 	.attach_hba		= fd_attach_hba,
825 	.detach_hba		= fd_detach_hba,
826 	.alloc_device		= fd_alloc_device,
827 	.configure_device	= fd_configure_device,
828 	.free_device		= fd_free_device,
829 	.parse_cdb		= fd_parse_cdb,
830 	.set_configfs_dev_params = fd_set_configfs_dev_params,
831 	.show_configfs_dev_params = fd_show_configfs_dev_params,
832 	.get_device_type	= sbc_get_device_type,
833 	.get_blocks		= fd_get_blocks,
834 	.init_prot		= fd_init_prot,
835 	.format_prot		= fd_format_prot,
836 	.free_prot		= fd_free_prot,
837 	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
838 };
839 
840 static int __init fileio_module_init(void)
841 {
842 	return transport_backend_register(&fileio_ops);
843 }
844 
845 static void __exit fileio_module_exit(void)
846 {
847 	target_backend_unregister(&fileio_ops);
848 }
849 
850 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
851 MODULE_AUTHOR("nab@Linux-iSCSI.org");
852 MODULE_LICENSE("GPL");
853 
854 module_init(fileio_module_init);
855 module_exit(fileio_module_exit);
856