1 /******************************************************************************* 2 * Filename: target_core_file.c 3 * 4 * This file contains the Storage Engine <-> FILEIO transport specific functions 5 * 6 * Copyright (c) 2005 PyX Technologies, Inc. 7 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved. 8 * Copyright (c) 2007-2010 Rising Tide Systems 9 * Copyright (c) 2008-2010 Linux-iSCSI.org 10 * 11 * Nicholas A. Bellinger <nab@kernel.org> 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, write to the Free Software 25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 26 * 27 ******************************************************************************/ 28 29 #include <linux/string.h> 30 #include <linux/parser.h> 31 #include <linux/timer.h> 32 #include <linux/blkdev.h> 33 #include <linux/slab.h> 34 #include <linux/spinlock.h> 35 #include <linux/module.h> 36 #include <scsi/scsi.h> 37 #include <scsi/scsi_host.h> 38 39 #include <target/target_core_base.h> 40 #include <target/target_core_backend.h> 41 42 #include "target_core_file.h" 43 44 static struct se_subsystem_api fileio_template; 45 46 /* fd_attach_hba(): (Part of se_subsystem_api_t template) 47 * 48 * 49 */ 50 static int fd_attach_hba(struct se_hba *hba, u32 host_id) 51 { 52 struct fd_host *fd_host; 53 54 fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); 55 if (!fd_host) { 56 pr_err("Unable to allocate memory for struct fd_host\n"); 57 return -ENOMEM; 58 } 59 60 fd_host->fd_host_id = host_id; 61 62 hba->hba_ptr = fd_host; 63 64 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" 65 " Target Core Stack %s\n", hba->hba_id, FD_VERSION, 66 TARGET_CORE_MOD_VERSION); 67 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" 68 " MaxSectors: %u\n", 69 hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); 70 71 return 0; 72 } 73 74 static void fd_detach_hba(struct se_hba *hba) 75 { 76 struct fd_host *fd_host = hba->hba_ptr; 77 78 pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" 79 " Target Core\n", hba->hba_id, fd_host->fd_host_id); 80 81 kfree(fd_host); 82 hba->hba_ptr = NULL; 83 } 84 85 static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name) 86 { 87 struct fd_dev *fd_dev; 88 struct fd_host *fd_host = hba->hba_ptr; 89 90 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); 91 if (!fd_dev) { 92 pr_err("Unable to allocate memory for struct fd_dev\n"); 93 return NULL; 94 } 95 96 fd_dev->fd_host = fd_host; 97 98 pr_debug("FILEIO: Allocated fd_dev for %p\n", name); 99 100 return fd_dev; 101 } 102 103 /* fd_create_virtdevice(): (Part of se_subsystem_api_t template) 104 * 105 * 106 */ 107 static struct se_device *fd_create_virtdevice( 108 struct se_hba *hba, 109 struct se_subsystem_dev *se_dev, 110 void *p) 111 { 112 char *dev_p = NULL; 113 struct se_device *dev; 114 struct se_dev_limits dev_limits; 115 struct queue_limits *limits; 116 struct fd_dev *fd_dev = p; 117 struct fd_host *fd_host = hba->hba_ptr; 118 mm_segment_t old_fs; 119 struct file *file; 120 struct inode *inode = NULL; 121 int dev_flags = 0, flags, ret = -EINVAL; 122 123 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 124 125 old_fs = get_fs(); 126 set_fs(get_ds()); 127 dev_p = getname(fd_dev->fd_dev_name); 128 set_fs(old_fs); 129 130 if (IS_ERR(dev_p)) { 131 pr_err("getname(%s) failed: %lu\n", 132 fd_dev->fd_dev_name, IS_ERR(dev_p)); 133 ret = PTR_ERR(dev_p); 134 goto fail; 135 } 136 137 /* O_DIRECT too? */ 138 flags = O_RDWR | O_CREAT | O_LARGEFILE; 139 140 /* 141 * If fd_buffered_io=1 has not been set explicitly (the default), 142 * use O_SYNC to force FILEIO writes to disk. 143 */ 144 if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO)) 145 flags |= O_SYNC; 146 147 file = filp_open(dev_p, flags, 0600); 148 if (IS_ERR(file)) { 149 pr_err("filp_open(%s) failed\n", dev_p); 150 ret = PTR_ERR(file); 151 goto fail; 152 } 153 if (!file || !file->f_dentry) { 154 pr_err("filp_open(%s) failed\n", dev_p); 155 goto fail; 156 } 157 fd_dev->fd_file = file; 158 /* 159 * If using a block backend with this struct file, we extract 160 * fd_dev->fd_[block,dev]_size from struct block_device. 161 * 162 * Otherwise, we use the passed fd_size= from configfs 163 */ 164 inode = file->f_mapping->host; 165 if (S_ISBLK(inode->i_mode)) { 166 struct request_queue *q; 167 unsigned long long dev_size; 168 /* 169 * Setup the local scope queue_limits from struct request_queue->limits 170 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. 171 */ 172 q = bdev_get_queue(inode->i_bdev); 173 limits = &dev_limits.limits; 174 limits->logical_block_size = bdev_logical_block_size(inode->i_bdev); 175 limits->max_hw_sectors = queue_max_hw_sectors(q); 176 limits->max_sectors = queue_max_sectors(q); 177 /* 178 * Determine the number of bytes from i_size_read() minus 179 * one (1) logical sector from underlying struct block_device 180 */ 181 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); 182 dev_size = (i_size_read(file->f_mapping->host) - 183 fd_dev->fd_block_size); 184 185 pr_debug("FILEIO: Using size: %llu bytes from struct" 186 " block_device blocks: %llu logical_block_size: %d\n", 187 dev_size, div_u64(dev_size, fd_dev->fd_block_size), 188 fd_dev->fd_block_size); 189 } else { 190 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { 191 pr_err("FILEIO: Missing fd_dev_size=" 192 " parameter, and no backing struct" 193 " block_device\n"); 194 goto fail; 195 } 196 197 limits = &dev_limits.limits; 198 limits->logical_block_size = FD_BLOCKSIZE; 199 limits->max_hw_sectors = FD_MAX_SECTORS; 200 limits->max_sectors = FD_MAX_SECTORS; 201 fd_dev->fd_block_size = FD_BLOCKSIZE; 202 } 203 204 dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; 205 dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH; 206 207 dev = transport_add_device_to_core_hba(hba, &fileio_template, 208 se_dev, dev_flags, fd_dev, 209 &dev_limits, "FILEIO", FD_VERSION); 210 if (!dev) 211 goto fail; 212 213 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; 214 fd_dev->fd_queue_depth = dev->queue_depth; 215 216 pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," 217 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, 218 fd_dev->fd_dev_name, fd_dev->fd_dev_size); 219 220 putname(dev_p); 221 return dev; 222 fail: 223 if (fd_dev->fd_file) { 224 filp_close(fd_dev->fd_file, NULL); 225 fd_dev->fd_file = NULL; 226 } 227 putname(dev_p); 228 return ERR_PTR(ret); 229 } 230 231 /* fd_free_device(): (Part of se_subsystem_api_t template) 232 * 233 * 234 */ 235 static void fd_free_device(void *p) 236 { 237 struct fd_dev *fd_dev = p; 238 239 if (fd_dev->fd_file) { 240 filp_close(fd_dev->fd_file, NULL); 241 fd_dev->fd_file = NULL; 242 } 243 244 kfree(fd_dev); 245 } 246 247 static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl, 248 u32 sgl_nents) 249 { 250 struct se_device *se_dev = cmd->se_dev; 251 struct fd_dev *dev = se_dev->dev_ptr; 252 struct file *fd = dev->fd_file; 253 struct scatterlist *sg; 254 struct iovec *iov; 255 mm_segment_t old_fs; 256 loff_t pos = (cmd->t_task_lba * 257 se_dev->se_sub_dev->se_dev_attrib.block_size); 258 int ret = 0, i; 259 260 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); 261 if (!iov) { 262 pr_err("Unable to allocate fd_do_readv iov[]\n"); 263 return -ENOMEM; 264 } 265 266 for_each_sg(sgl, sg, sgl_nents, i) { 267 iov[i].iov_len = sg->length; 268 iov[i].iov_base = sg_virt(sg); 269 } 270 271 old_fs = get_fs(); 272 set_fs(get_ds()); 273 ret = vfs_readv(fd, &iov[0], sgl_nents, &pos); 274 set_fs(old_fs); 275 276 kfree(iov); 277 /* 278 * Return zeros and GOOD status even if the READ did not return 279 * the expected virt_size for struct file w/o a backing struct 280 * block_device. 281 */ 282 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { 283 if (ret < 0 || ret != cmd->data_length) { 284 pr_err("vfs_readv() returned %d," 285 " expecting %d for S_ISBLK\n", ret, 286 (int)cmd->data_length); 287 return (ret < 0 ? ret : -EINVAL); 288 } 289 } else { 290 if (ret < 0) { 291 pr_err("vfs_readv() returned %d for non" 292 " S_ISBLK\n", ret); 293 return ret; 294 } 295 } 296 297 return 1; 298 } 299 300 static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl, 301 u32 sgl_nents) 302 { 303 struct se_device *se_dev = cmd->se_dev; 304 struct fd_dev *dev = se_dev->dev_ptr; 305 struct file *fd = dev->fd_file; 306 struct scatterlist *sg; 307 struct iovec *iov; 308 mm_segment_t old_fs; 309 loff_t pos = (cmd->t_task_lba * 310 se_dev->se_sub_dev->se_dev_attrib.block_size); 311 int ret, i = 0; 312 313 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); 314 if (!iov) { 315 pr_err("Unable to allocate fd_do_writev iov[]\n"); 316 return -ENOMEM; 317 } 318 319 for_each_sg(sgl, sg, sgl_nents, i) { 320 iov[i].iov_len = sg->length; 321 iov[i].iov_base = sg_virt(sg); 322 } 323 324 old_fs = get_fs(); 325 set_fs(get_ds()); 326 ret = vfs_writev(fd, &iov[0], sgl_nents, &pos); 327 set_fs(old_fs); 328 329 kfree(iov); 330 331 if (ret < 0 || ret != cmd->data_length) { 332 pr_err("vfs_writev() returned %d\n", ret); 333 return (ret < 0 ? ret : -EINVAL); 334 } 335 336 return 1; 337 } 338 339 static void fd_emulate_sync_cache(struct se_cmd *cmd) 340 { 341 struct se_device *dev = cmd->se_dev; 342 struct fd_dev *fd_dev = dev->dev_ptr; 343 int immed = (cmd->t_task_cdb[1] & 0x2); 344 loff_t start, end; 345 int ret; 346 347 /* 348 * If the Immediate bit is set, queue up the GOOD response 349 * for this SYNCHRONIZE_CACHE op 350 */ 351 if (immed) 352 target_complete_cmd(cmd, SAM_STAT_GOOD); 353 354 /* 355 * Determine if we will be flushing the entire device. 356 */ 357 if (cmd->t_task_lba == 0 && cmd->data_length == 0) { 358 start = 0; 359 end = LLONG_MAX; 360 } else { 361 start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; 362 if (cmd->data_length) 363 end = start + cmd->data_length; 364 else 365 end = LLONG_MAX; 366 } 367 368 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); 369 if (ret != 0) 370 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); 371 372 if (immed) 373 return; 374 375 if (ret) { 376 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 377 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); 378 } else { 379 target_complete_cmd(cmd, SAM_STAT_GOOD); 380 } 381 } 382 383 static void fd_emulate_write_fua(struct se_cmd *cmd) 384 { 385 struct se_device *dev = cmd->se_dev; 386 struct fd_dev *fd_dev = dev->dev_ptr; 387 loff_t start = cmd->t_task_lba * 388 dev->se_sub_dev->se_dev_attrib.block_size; 389 loff_t end = start + cmd->data_length; 390 int ret; 391 392 pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", 393 cmd->t_task_lba, cmd->data_length); 394 395 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); 396 if (ret != 0) 397 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); 398 } 399 400 static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 401 u32 sgl_nents, enum dma_data_direction data_direction) 402 { 403 struct se_device *dev = cmd->se_dev; 404 int ret = 0; 405 406 /* 407 * Call vectorized fileio functions to map struct scatterlist 408 * physical memory addresses to struct iovec virtual memory. 409 */ 410 if (data_direction == DMA_FROM_DEVICE) { 411 ret = fd_do_readv(cmd, sgl, sgl_nents); 412 } else { 413 ret = fd_do_writev(cmd, sgl, sgl_nents); 414 415 if (ret > 0 && 416 dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && 417 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 418 (cmd->se_cmd_flags & SCF_FUA)) { 419 /* 420 * We might need to be a bit smarter here 421 * and return some sense data to let the initiator 422 * know the FUA WRITE cache sync failed..? 423 */ 424 fd_emulate_write_fua(cmd); 425 } 426 427 } 428 429 if (ret < 0) { 430 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 431 return ret; 432 } 433 if (ret) 434 target_complete_cmd(cmd, SAM_STAT_GOOD); 435 return 0; 436 } 437 438 enum { 439 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err 440 }; 441 442 static match_table_t tokens = { 443 {Opt_fd_dev_name, "fd_dev_name=%s"}, 444 {Opt_fd_dev_size, "fd_dev_size=%s"}, 445 {Opt_fd_buffered_io, "fd_buffered_io=%d"}, 446 {Opt_err, NULL} 447 }; 448 449 static ssize_t fd_set_configfs_dev_params( 450 struct se_hba *hba, 451 struct se_subsystem_dev *se_dev, 452 const char *page, ssize_t count) 453 { 454 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 455 char *orig, *ptr, *arg_p, *opts; 456 substring_t args[MAX_OPT_ARGS]; 457 int ret = 0, arg, token; 458 459 opts = kstrdup(page, GFP_KERNEL); 460 if (!opts) 461 return -ENOMEM; 462 463 orig = opts; 464 465 while ((ptr = strsep(&opts, ",\n")) != NULL) { 466 if (!*ptr) 467 continue; 468 469 token = match_token(ptr, tokens, args); 470 switch (token) { 471 case Opt_fd_dev_name: 472 arg_p = match_strdup(&args[0]); 473 if (!arg_p) { 474 ret = -ENOMEM; 475 break; 476 } 477 snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, 478 "%s", arg_p); 479 kfree(arg_p); 480 pr_debug("FILEIO: Referencing Path: %s\n", 481 fd_dev->fd_dev_name); 482 fd_dev->fbd_flags |= FBDF_HAS_PATH; 483 break; 484 case Opt_fd_dev_size: 485 arg_p = match_strdup(&args[0]); 486 if (!arg_p) { 487 ret = -ENOMEM; 488 break; 489 } 490 ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); 491 kfree(arg_p); 492 if (ret < 0) { 493 pr_err("strict_strtoull() failed for" 494 " fd_dev_size=\n"); 495 goto out; 496 } 497 pr_debug("FILEIO: Referencing Size: %llu" 498 " bytes\n", fd_dev->fd_dev_size); 499 fd_dev->fbd_flags |= FBDF_HAS_SIZE; 500 break; 501 case Opt_fd_buffered_io: 502 match_int(args, &arg); 503 if (arg != 1) { 504 pr_err("bogus fd_buffered_io=%d value\n", arg); 505 ret = -EINVAL; 506 goto out; 507 } 508 509 pr_debug("FILEIO: Using buffered I/O" 510 " operations for struct fd_dev\n"); 511 512 fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO; 513 break; 514 default: 515 break; 516 } 517 } 518 519 out: 520 kfree(orig); 521 return (!ret) ? count : ret; 522 } 523 524 static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) 525 { 526 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 527 528 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { 529 pr_err("Missing fd_dev_name=\n"); 530 return -EINVAL; 531 } 532 533 return 0; 534 } 535 536 static ssize_t fd_show_configfs_dev_params( 537 struct se_hba *hba, 538 struct se_subsystem_dev *se_dev, 539 char *b) 540 { 541 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 542 ssize_t bl = 0; 543 544 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); 545 bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n", 546 fd_dev->fd_dev_name, fd_dev->fd_dev_size, 547 (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ? 548 "Buffered" : "Synchronous"); 549 return bl; 550 } 551 552 /* fd_get_device_rev(): (Part of se_subsystem_api_t template) 553 * 554 * 555 */ 556 static u32 fd_get_device_rev(struct se_device *dev) 557 { 558 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ 559 } 560 561 /* fd_get_device_type(): (Part of se_subsystem_api_t template) 562 * 563 * 564 */ 565 static u32 fd_get_device_type(struct se_device *dev) 566 { 567 return TYPE_DISK; 568 } 569 570 static sector_t fd_get_blocks(struct se_device *dev) 571 { 572 struct fd_dev *fd_dev = dev->dev_ptr; 573 struct file *f = fd_dev->fd_file; 574 struct inode *i = f->f_mapping->host; 575 unsigned long long dev_size; 576 /* 577 * When using a file that references an underlying struct block_device, 578 * ensure dev_size is always based on the current inode size in order 579 * to handle underlying block_device resize operations. 580 */ 581 if (S_ISBLK(i->i_mode)) 582 dev_size = (i_size_read(i) - fd_dev->fd_block_size); 583 else 584 dev_size = fd_dev->fd_dev_size; 585 586 return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size); 587 } 588 589 static struct se_subsystem_api fileio_template = { 590 .name = "fileio", 591 .owner = THIS_MODULE, 592 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 593 .write_cache_emulated = 1, 594 .fua_write_emulated = 1, 595 .attach_hba = fd_attach_hba, 596 .detach_hba = fd_detach_hba, 597 .allocate_virtdevice = fd_allocate_virtdevice, 598 .create_virtdevice = fd_create_virtdevice, 599 .free_device = fd_free_device, 600 .execute_cmd = fd_execute_cmd, 601 .do_sync_cache = fd_emulate_sync_cache, 602 .check_configfs_dev_params = fd_check_configfs_dev_params, 603 .set_configfs_dev_params = fd_set_configfs_dev_params, 604 .show_configfs_dev_params = fd_show_configfs_dev_params, 605 .get_device_rev = fd_get_device_rev, 606 .get_device_type = fd_get_device_type, 607 .get_blocks = fd_get_blocks, 608 }; 609 610 static int __init fileio_module_init(void) 611 { 612 return transport_subsystem_register(&fileio_template); 613 } 614 615 static void fileio_module_exit(void) 616 { 617 transport_subsystem_release(&fileio_template); 618 } 619 620 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin"); 621 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 622 MODULE_LICENSE("GPL"); 623 624 module_init(fileio_module_init); 625 module_exit(fileio_module_exit); 626