xref: /linux/drivers/target/target_core_file.c (revision e9e8bcb8178e197d889ec31e79fa1ddc1732c8f9)
1 /*******************************************************************************
2  * Filename:  target_core_file.c
3  *
4  * This file contains the Storage Engine <-> FILEIO transport specific functions
5  *
6  * Copyright (c) 2005 PyX Technologies, Inc.
7  * Copyright (c) 2005-2006 SBE, Inc.  All Rights Reserved.
8  * Copyright (c) 2007-2010 Rising Tide Systems
9  * Copyright (c) 2008-2010 Linux-iSCSI.org
10  *
11  * Nicholas A. Bellinger <nab@kernel.org>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26  *
27  ******************************************************************************/
28 
29 #include <linux/version.h>
30 #include <linux/string.h>
31 #include <linux/parser.h>
32 #include <linux/timer.h>
33 #include <linux/blkdev.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38 
39 #include <target/target_core_base.h>
40 #include <target/target_core_device.h>
41 #include <target/target_core_transport.h>
42 
43 #include "target_core_file.h"
44 
45 #if 1
46 #define DEBUG_FD_CACHE(x...) printk(x)
47 #else
48 #define DEBUG_FD_CACHE(x...)
49 #endif
50 
51 #if 1
52 #define DEBUG_FD_FUA(x...) printk(x)
53 #else
54 #define DEBUG_FD_FUA(x...)
55 #endif
56 
57 static struct se_subsystem_api fileio_template;
58 
59 /*	fd_attach_hba(): (Part of se_subsystem_api_t template)
60  *
61  *
62  */
63 static int fd_attach_hba(struct se_hba *hba, u32 host_id)
64 {
65 	struct fd_host *fd_host;
66 
67 	fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
68 	if (!(fd_host)) {
69 		printk(KERN_ERR "Unable to allocate memory for struct fd_host\n");
70 		return -1;
71 	}
72 
73 	fd_host->fd_host_id = host_id;
74 
75 	atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH);
76 	atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH);
77 	hba->hba_ptr = (void *) fd_host;
78 
79 	printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
80 		" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
81 		TARGET_CORE_MOD_VERSION);
82 	printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
83 		" Target Core with TCQ Depth: %d MaxSectors: %u\n",
84 		hba->hba_id, fd_host->fd_host_id,
85 		atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS);
86 
87 	return 0;
88 }
89 
90 static void fd_detach_hba(struct se_hba *hba)
91 {
92 	struct fd_host *fd_host = hba->hba_ptr;
93 
94 	printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
95 		" Target Core\n", hba->hba_id, fd_host->fd_host_id);
96 
97 	kfree(fd_host);
98 	hba->hba_ptr = NULL;
99 }
100 
101 static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
102 {
103 	struct fd_dev *fd_dev;
104 	struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
105 
106 	fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
107 	if (!(fd_dev)) {
108 		printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n");
109 		return NULL;
110 	}
111 
112 	fd_dev->fd_host = fd_host;
113 
114 	printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name);
115 
116 	return fd_dev;
117 }
118 
119 /*	fd_create_virtdevice(): (Part of se_subsystem_api_t template)
120  *
121  *
122  */
123 static struct se_device *fd_create_virtdevice(
124 	struct se_hba *hba,
125 	struct se_subsystem_dev *se_dev,
126 	void *p)
127 {
128 	char *dev_p = NULL;
129 	struct se_device *dev;
130 	struct se_dev_limits dev_limits;
131 	struct queue_limits *limits;
132 	struct fd_dev *fd_dev = (struct fd_dev *) p;
133 	struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
134 	mm_segment_t old_fs;
135 	struct file *file;
136 	struct inode *inode = NULL;
137 	int dev_flags = 0, flags, ret = -EINVAL;
138 
139 	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
140 
141 	old_fs = get_fs();
142 	set_fs(get_ds());
143 	dev_p = getname(fd_dev->fd_dev_name);
144 	set_fs(old_fs);
145 
146 	if (IS_ERR(dev_p)) {
147 		printk(KERN_ERR "getname(%s) failed: %lu\n",
148 			fd_dev->fd_dev_name, IS_ERR(dev_p));
149 		ret = PTR_ERR(dev_p);
150 		goto fail;
151 	}
152 #if 0
153 	if (di->no_create_file)
154 		flags = O_RDWR | O_LARGEFILE;
155 	else
156 		flags = O_RDWR | O_CREAT | O_LARGEFILE;
157 #else
158 	flags = O_RDWR | O_CREAT | O_LARGEFILE;
159 #endif
160 /*	flags |= O_DIRECT; */
161 	/*
162 	 * If fd_buffered_io=1 has not been set explicitly (the default),
163 	 * use O_SYNC to force FILEIO writes to disk.
164 	 */
165 	if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
166 		flags |= O_SYNC;
167 
168 	file = filp_open(dev_p, flags, 0600);
169 	if (IS_ERR(file)) {
170 		printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
171 		ret = PTR_ERR(file);
172 		goto fail;
173 	}
174 	if (!file || !file->f_dentry) {
175 		printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
176 		goto fail;
177 	}
178 	fd_dev->fd_file = file;
179 	/*
180 	 * If using a block backend with this struct file, we extract
181 	 * fd_dev->fd_[block,dev]_size from struct block_device.
182 	 *
183 	 * Otherwise, we use the passed fd_size= from configfs
184 	 */
185 	inode = file->f_mapping->host;
186 	if (S_ISBLK(inode->i_mode)) {
187 		struct request_queue *q;
188 		/*
189 		 * Setup the local scope queue_limits from struct request_queue->limits
190 		 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
191 		 */
192 		q = bdev_get_queue(inode->i_bdev);
193 		limits = &dev_limits.limits;
194 		limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
195 		limits->max_hw_sectors = queue_max_hw_sectors(q);
196 		limits->max_sectors = queue_max_sectors(q);
197 		/*
198 		 * Determine the number of bytes from i_size_read() minus
199 		 * one (1) logical sector from underlying struct block_device
200 		 */
201 		fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
202 		fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
203 				       fd_dev->fd_block_size);
204 
205 		printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct"
206 			" block_device blocks: %llu logical_block_size: %d\n",
207 			fd_dev->fd_dev_size,
208 			div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
209 			fd_dev->fd_block_size);
210 	} else {
211 		if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
212 			printk(KERN_ERR "FILEIO: Missing fd_dev_size="
213 				" parameter, and no backing struct"
214 				" block_device\n");
215 			goto fail;
216 		}
217 
218 		limits = &dev_limits.limits;
219 		limits->logical_block_size = FD_BLOCKSIZE;
220 		limits->max_hw_sectors = FD_MAX_SECTORS;
221 		limits->max_sectors = FD_MAX_SECTORS;
222 		fd_dev->fd_block_size = FD_BLOCKSIZE;
223 	}
224 
225 	dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
226 	dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
227 
228 	dev = transport_add_device_to_core_hba(hba, &fileio_template,
229 				se_dev, dev_flags, (void *)fd_dev,
230 				&dev_limits, "FILEIO", FD_VERSION);
231 	if (!(dev))
232 		goto fail;
233 
234 	fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
235 	fd_dev->fd_queue_depth = dev->queue_depth;
236 
237 	printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
238 		" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
239 			fd_dev->fd_dev_name, fd_dev->fd_dev_size);
240 
241 	putname(dev_p);
242 	return dev;
243 fail:
244 	if (fd_dev->fd_file) {
245 		filp_close(fd_dev->fd_file, NULL);
246 		fd_dev->fd_file = NULL;
247 	}
248 	putname(dev_p);
249 	return ERR_PTR(ret);
250 }
251 
252 /*	fd_free_device(): (Part of se_subsystem_api_t template)
253  *
254  *
255  */
256 static void fd_free_device(void *p)
257 {
258 	struct fd_dev *fd_dev = (struct fd_dev *) p;
259 
260 	if (fd_dev->fd_file) {
261 		filp_close(fd_dev->fd_file, NULL);
262 		fd_dev->fd_file = NULL;
263 	}
264 
265 	kfree(fd_dev);
266 }
267 
268 static inline struct fd_request *FILE_REQ(struct se_task *task)
269 {
270 	return container_of(task, struct fd_request, fd_task);
271 }
272 
273 
274 static struct se_task *
275 fd_alloc_task(struct se_cmd *cmd)
276 {
277 	struct fd_request *fd_req;
278 
279 	fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
280 	if (!(fd_req)) {
281 		printk(KERN_ERR "Unable to allocate struct fd_request\n");
282 		return NULL;
283 	}
284 
285 	fd_req->fd_dev = SE_DEV(cmd)->dev_ptr;
286 
287 	return &fd_req->fd_task;
288 }
289 
290 static int fd_do_readv(struct se_task *task)
291 {
292 	struct fd_request *req = FILE_REQ(task);
293 	struct file *fd = req->fd_dev->fd_file;
294 	struct scatterlist *sg = task->task_sg;
295 	struct iovec *iov;
296 	mm_segment_t old_fs;
297 	loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
298 	int ret = 0, i;
299 
300 	iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
301 	if (!(iov)) {
302 		printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n");
303 		return -1;
304 	}
305 
306 	for (i = 0; i < task->task_sg_num; i++) {
307 		iov[i].iov_len = sg[i].length;
308 		iov[i].iov_base = sg_virt(&sg[i]);
309 	}
310 
311 	old_fs = get_fs();
312 	set_fs(get_ds());
313 	ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos);
314 	set_fs(old_fs);
315 
316 	kfree(iov);
317 	/*
318 	 * Return zeros and GOOD status even if the READ did not return
319 	 * the expected virt_size for struct file w/o a backing struct
320 	 * block_device.
321 	 */
322 	if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
323 		if (ret < 0 || ret != task->task_size) {
324 			printk(KERN_ERR "vfs_readv() returned %d,"
325 				" expecting %d for S_ISBLK\n", ret,
326 				(int)task->task_size);
327 			return -1;
328 		}
329 	} else {
330 		if (ret < 0) {
331 			printk(KERN_ERR "vfs_readv() returned %d for non"
332 				" S_ISBLK\n", ret);
333 			return -1;
334 		}
335 	}
336 
337 	return 1;
338 }
339 
340 static int fd_do_writev(struct se_task *task)
341 {
342 	struct fd_request *req = FILE_REQ(task);
343 	struct file *fd = req->fd_dev->fd_file;
344 	struct scatterlist *sg = task->task_sg;
345 	struct iovec *iov;
346 	mm_segment_t old_fs;
347 	loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
348 	int ret, i = 0;
349 
350 	iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
351 	if (!(iov)) {
352 		printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n");
353 		return -1;
354 	}
355 
356 	for (i = 0; i < task->task_sg_num; i++) {
357 		iov[i].iov_len = sg[i].length;
358 		iov[i].iov_base = sg_virt(&sg[i]);
359 	}
360 
361 	old_fs = get_fs();
362 	set_fs(get_ds());
363 	ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos);
364 	set_fs(old_fs);
365 
366 	kfree(iov);
367 
368 	if (ret < 0 || ret != task->task_size) {
369 		printk(KERN_ERR "vfs_writev() returned %d\n", ret);
370 		return -1;
371 	}
372 
373 	return 1;
374 }
375 
376 static void fd_emulate_sync_cache(struct se_task *task)
377 {
378 	struct se_cmd *cmd = TASK_CMD(task);
379 	struct se_device *dev = cmd->se_dev;
380 	struct fd_dev *fd_dev = dev->dev_ptr;
381 	int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
382 	loff_t start, end;
383 	int ret;
384 
385 	/*
386 	 * If the Immediate bit is set, queue up the GOOD response
387 	 * for this SYNCHRONIZE_CACHE op
388 	 */
389 	if (immed)
390 		transport_complete_sync_cache(cmd, 1);
391 
392 	/*
393 	 * Determine if we will be flushing the entire device.
394 	 */
395 	if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) {
396 		start = 0;
397 		end = LLONG_MAX;
398 	} else {
399 		start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size;
400 		if (cmd->data_length)
401 			end = start + cmd->data_length;
402 		else
403 			end = LLONG_MAX;
404 	}
405 
406 	ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
407 	if (ret != 0)
408 		printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
409 
410 	if (!immed)
411 		transport_complete_sync_cache(cmd, ret == 0);
412 }
413 
414 /*
415  * Tell TCM Core that we are capable of WriteCache emulation for
416  * an underlying struct se_device.
417  */
418 static int fd_emulated_write_cache(struct se_device *dev)
419 {
420 	return 1;
421 }
422 
423 static int fd_emulated_dpo(struct se_device *dev)
424 {
425 	return 0;
426 }
427 /*
428  * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
429  * for TYPE_DISK.
430  */
431 static int fd_emulated_fua_write(struct se_device *dev)
432 {
433 	return 1;
434 }
435 
436 static int fd_emulated_fua_read(struct se_device *dev)
437 {
438 	return 0;
439 }
440 
441 /*
442  * WRITE Force Unit Access (FUA) emulation on a per struct se_task
443  * LBA range basis..
444  */
445 static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
446 {
447 	struct se_device *dev = cmd->se_dev;
448 	struct fd_dev *fd_dev = dev->dev_ptr;
449 	loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size;
450 	loff_t end = start + task->task_size;
451 	int ret;
452 
453 	DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
454 			task->task_lba, task->task_size);
455 
456 	ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
457 	if (ret != 0)
458 		printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
459 }
460 
461 static int fd_do_task(struct se_task *task)
462 {
463 	struct se_cmd *cmd = task->task_se_cmd;
464 	struct se_device *dev = cmd->se_dev;
465 	int ret = 0;
466 
467 	/*
468 	 * Call vectorized fileio functions to map struct scatterlist
469 	 * physical memory addresses to struct iovec virtual memory.
470 	 */
471 	if (task->task_data_direction == DMA_FROM_DEVICE) {
472 		ret = fd_do_readv(task);
473 	} else {
474 		ret = fd_do_writev(task);
475 
476 		if (ret > 0 &&
477 		    DEV_ATTRIB(dev)->emulate_write_cache > 0 &&
478 		    DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
479 		    T_TASK(cmd)->t_tasks_fua) {
480 			/*
481 			 * We might need to be a bit smarter here
482 			 * and return some sense data to let the initiator
483 			 * know the FUA WRITE cache sync failed..?
484 			 */
485 			fd_emulate_write_fua(cmd, task);
486 		}
487 
488 	}
489 
490 	if (ret < 0)
491 		return ret;
492 	if (ret) {
493 		task->task_scsi_status = GOOD;
494 		transport_complete_task(task, 1);
495 	}
496 	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
497 }
498 
499 /*	fd_free_task(): (Part of se_subsystem_api_t template)
500  *
501  *
502  */
503 static void fd_free_task(struct se_task *task)
504 {
505 	struct fd_request *req = FILE_REQ(task);
506 
507 	kfree(req);
508 }
509 
510 enum {
511 	Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
512 };
513 
514 static match_table_t tokens = {
515 	{Opt_fd_dev_name, "fd_dev_name=%s"},
516 	{Opt_fd_dev_size, "fd_dev_size=%s"},
517 	{Opt_fd_buffered_io, "fd_buffered_io=%d"},
518 	{Opt_err, NULL}
519 };
520 
521 static ssize_t fd_set_configfs_dev_params(
522 	struct se_hba *hba,
523 	struct se_subsystem_dev *se_dev,
524 	const char *page, ssize_t count)
525 {
526 	struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
527 	char *orig, *ptr, *arg_p, *opts;
528 	substring_t args[MAX_OPT_ARGS];
529 	int ret = 0, arg, token;
530 
531 	opts = kstrdup(page, GFP_KERNEL);
532 	if (!opts)
533 		return -ENOMEM;
534 
535 	orig = opts;
536 
537 	while ((ptr = strsep(&opts, ",")) != NULL) {
538 		if (!*ptr)
539 			continue;
540 
541 		token = match_token(ptr, tokens, args);
542 		switch (token) {
543 		case Opt_fd_dev_name:
544 			arg_p = match_strdup(&args[0]);
545 			if (!arg_p) {
546 				ret = -ENOMEM;
547 				break;
548 			}
549 			snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
550 					"%s", arg_p);
551 			kfree(arg_p);
552 			printk(KERN_INFO "FILEIO: Referencing Path: %s\n",
553 					fd_dev->fd_dev_name);
554 			fd_dev->fbd_flags |= FBDF_HAS_PATH;
555 			break;
556 		case Opt_fd_dev_size:
557 			arg_p = match_strdup(&args[0]);
558 			if (!arg_p) {
559 				ret = -ENOMEM;
560 				break;
561 			}
562 			ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
563 			kfree(arg_p);
564 			if (ret < 0) {
565 				printk(KERN_ERR "strict_strtoull() failed for"
566 						" fd_dev_size=\n");
567 				goto out;
568 			}
569 			printk(KERN_INFO "FILEIO: Referencing Size: %llu"
570 					" bytes\n", fd_dev->fd_dev_size);
571 			fd_dev->fbd_flags |= FBDF_HAS_SIZE;
572 			break;
573 		case Opt_fd_buffered_io:
574 			match_int(args, &arg);
575 			if (arg != 1) {
576 				printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg);
577 				ret = -EINVAL;
578 				goto out;
579 			}
580 
581 			printk(KERN_INFO "FILEIO: Using buffered I/O"
582 				" operations for struct fd_dev\n");
583 
584 			fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
585 			break;
586 		default:
587 			break;
588 		}
589 	}
590 
591 out:
592 	kfree(orig);
593 	return (!ret) ? count : ret;
594 }
595 
596 static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
597 {
598 	struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
599 
600 	if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
601 		printk(KERN_ERR "Missing fd_dev_name=\n");
602 		return -1;
603 	}
604 
605 	return 0;
606 }
607 
608 static ssize_t fd_show_configfs_dev_params(
609 	struct se_hba *hba,
610 	struct se_subsystem_dev *se_dev,
611 	char *b)
612 {
613 	struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
614 	ssize_t bl = 0;
615 
616 	bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
617 	bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: %s\n",
618 		fd_dev->fd_dev_name, fd_dev->fd_dev_size,
619 		(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
620 		"Buffered" : "Synchronous");
621 	return bl;
622 }
623 
624 /*	fd_get_cdb(): (Part of se_subsystem_api_t template)
625  *
626  *
627  */
628 static unsigned char *fd_get_cdb(struct se_task *task)
629 {
630 	struct fd_request *req = FILE_REQ(task);
631 
632 	return req->fd_scsi_cdb;
633 }
634 
635 /*	fd_get_device_rev(): (Part of se_subsystem_api_t template)
636  *
637  *
638  */
639 static u32 fd_get_device_rev(struct se_device *dev)
640 {
641 	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
642 }
643 
644 /*	fd_get_device_type(): (Part of se_subsystem_api_t template)
645  *
646  *
647  */
648 static u32 fd_get_device_type(struct se_device *dev)
649 {
650 	return TYPE_DISK;
651 }
652 
653 static sector_t fd_get_blocks(struct se_device *dev)
654 {
655 	struct fd_dev *fd_dev = dev->dev_ptr;
656 	unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
657 			DEV_ATTRIB(dev)->block_size);
658 
659 	return blocks_long;
660 }
661 
662 static struct se_subsystem_api fileio_template = {
663 	.name			= "fileio",
664 	.owner			= THIS_MODULE,
665 	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV,
666 	.attach_hba		= fd_attach_hba,
667 	.detach_hba		= fd_detach_hba,
668 	.allocate_virtdevice	= fd_allocate_virtdevice,
669 	.create_virtdevice	= fd_create_virtdevice,
670 	.free_device		= fd_free_device,
671 	.dpo_emulated		= fd_emulated_dpo,
672 	.fua_write_emulated	= fd_emulated_fua_write,
673 	.fua_read_emulated	= fd_emulated_fua_read,
674 	.write_cache_emulated	= fd_emulated_write_cache,
675 	.alloc_task		= fd_alloc_task,
676 	.do_task		= fd_do_task,
677 	.do_sync_cache		= fd_emulate_sync_cache,
678 	.free_task		= fd_free_task,
679 	.check_configfs_dev_params = fd_check_configfs_dev_params,
680 	.set_configfs_dev_params = fd_set_configfs_dev_params,
681 	.show_configfs_dev_params = fd_show_configfs_dev_params,
682 	.get_cdb		= fd_get_cdb,
683 	.get_device_rev		= fd_get_device_rev,
684 	.get_device_type	= fd_get_device_type,
685 	.get_blocks		= fd_get_blocks,
686 };
687 
688 static int __init fileio_module_init(void)
689 {
690 	return transport_subsystem_register(&fileio_template);
691 }
692 
693 static void fileio_module_exit(void)
694 {
695 	transport_subsystem_release(&fileio_template);
696 }
697 
698 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
699 MODULE_AUTHOR("nab@Linux-iSCSI.org");
700 MODULE_LICENSE("GPL");
701 
702 module_init(fileio_module_init);
703 module_exit(fileio_module_exit);
704