1 /******************************************************************************* 2 * Filename: target_core_file.c 3 * 4 * This file contains the Storage Engine <-> FILEIO transport specific functions 5 * 6 * Copyright (c) 2005 PyX Technologies, Inc. 7 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved. 8 * Copyright (c) 2007-2010 Rising Tide Systems 9 * Copyright (c) 2008-2010 Linux-iSCSI.org 10 * 11 * Nicholas A. Bellinger <nab@kernel.org> 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, write to the Free Software 25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 26 * 27 ******************************************************************************/ 28 29 #include <linux/string.h> 30 #include <linux/parser.h> 31 #include <linux/timer.h> 32 #include <linux/blkdev.h> 33 #include <linux/slab.h> 34 #include <linux/spinlock.h> 35 #include <linux/module.h> 36 #include <scsi/scsi.h> 37 #include <scsi/scsi_host.h> 38 39 #include <target/target_core_base.h> 40 #include <target/target_core_backend.h> 41 42 #include "target_core_file.h" 43 44 static struct se_subsystem_api fileio_template; 45 46 /* fd_attach_hba(): (Part of se_subsystem_api_t template) 47 * 48 * 49 */ 50 static int fd_attach_hba(struct se_hba *hba, u32 host_id) 51 { 52 struct fd_host *fd_host; 53 54 fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); 55 if (!fd_host) { 56 pr_err("Unable to allocate memory for struct fd_host\n"); 57 return -ENOMEM; 58 } 59 60 fd_host->fd_host_id = host_id; 61 62 hba->hba_ptr = fd_host; 63 64 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" 65 " Target Core Stack %s\n", hba->hba_id, FD_VERSION, 66 TARGET_CORE_MOD_VERSION); 67 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" 68 " MaxSectors: %u\n", 69 hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); 70 71 return 0; 72 } 73 74 static void fd_detach_hba(struct se_hba *hba) 75 { 76 struct fd_host *fd_host = hba->hba_ptr; 77 78 pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" 79 " Target Core\n", hba->hba_id, fd_host->fd_host_id); 80 81 kfree(fd_host); 82 hba->hba_ptr = NULL; 83 } 84 85 static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name) 86 { 87 struct fd_dev *fd_dev; 88 struct fd_host *fd_host = hba->hba_ptr; 89 90 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); 91 if (!fd_dev) { 92 pr_err("Unable to allocate memory for struct fd_dev\n"); 93 return NULL; 94 } 95 96 fd_dev->fd_host = fd_host; 97 98 pr_debug("FILEIO: Allocated fd_dev for %p\n", name); 99 100 return fd_dev; 101 } 102 103 /* fd_create_virtdevice(): (Part of se_subsystem_api_t template) 104 * 105 * 106 */ 107 static struct se_device *fd_create_virtdevice( 108 struct se_hba *hba, 109 struct se_subsystem_dev *se_dev, 110 void *p) 111 { 112 char *dev_p = NULL; 113 struct se_device *dev; 114 struct se_dev_limits dev_limits; 115 struct queue_limits *limits; 116 struct fd_dev *fd_dev = p; 117 struct fd_host *fd_host = hba->hba_ptr; 118 mm_segment_t old_fs; 119 struct file *file; 120 struct inode *inode = NULL; 121 int dev_flags = 0, flags, ret = -EINVAL; 122 123 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 124 125 old_fs = get_fs(); 126 set_fs(get_ds()); 127 dev_p = getname(fd_dev->fd_dev_name); 128 set_fs(old_fs); 129 130 if (IS_ERR(dev_p)) { 131 pr_err("getname(%s) failed: %lu\n", 132 fd_dev->fd_dev_name, IS_ERR(dev_p)); 133 ret = PTR_ERR(dev_p); 134 goto fail; 135 } 136 #if 0 137 if (di->no_create_file) 138 flags = O_RDWR | O_LARGEFILE; 139 else 140 flags = O_RDWR | O_CREAT | O_LARGEFILE; 141 #else 142 flags = O_RDWR | O_CREAT | O_LARGEFILE; 143 #endif 144 /* flags |= O_DIRECT; */ 145 /* 146 * If fd_buffered_io=1 has not been set explicitly (the default), 147 * use O_SYNC to force FILEIO writes to disk. 148 */ 149 if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO)) 150 flags |= O_SYNC; 151 152 file = filp_open(dev_p, flags, 0600); 153 if (IS_ERR(file)) { 154 pr_err("filp_open(%s) failed\n", dev_p); 155 ret = PTR_ERR(file); 156 goto fail; 157 } 158 if (!file || !file->f_dentry) { 159 pr_err("filp_open(%s) failed\n", dev_p); 160 goto fail; 161 } 162 fd_dev->fd_file = file; 163 /* 164 * If using a block backend with this struct file, we extract 165 * fd_dev->fd_[block,dev]_size from struct block_device. 166 * 167 * Otherwise, we use the passed fd_size= from configfs 168 */ 169 inode = file->f_mapping->host; 170 if (S_ISBLK(inode->i_mode)) { 171 struct request_queue *q; 172 /* 173 * Setup the local scope queue_limits from struct request_queue->limits 174 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. 175 */ 176 q = bdev_get_queue(inode->i_bdev); 177 limits = &dev_limits.limits; 178 limits->logical_block_size = bdev_logical_block_size(inode->i_bdev); 179 limits->max_hw_sectors = queue_max_hw_sectors(q); 180 limits->max_sectors = queue_max_sectors(q); 181 /* 182 * Determine the number of bytes from i_size_read() minus 183 * one (1) logical sector from underlying struct block_device 184 */ 185 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); 186 fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) - 187 fd_dev->fd_block_size); 188 189 pr_debug("FILEIO: Using size: %llu bytes from struct" 190 " block_device blocks: %llu logical_block_size: %d\n", 191 fd_dev->fd_dev_size, 192 div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size), 193 fd_dev->fd_block_size); 194 } else { 195 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { 196 pr_err("FILEIO: Missing fd_dev_size=" 197 " parameter, and no backing struct" 198 " block_device\n"); 199 goto fail; 200 } 201 202 limits = &dev_limits.limits; 203 limits->logical_block_size = FD_BLOCKSIZE; 204 limits->max_hw_sectors = FD_MAX_SECTORS; 205 limits->max_sectors = FD_MAX_SECTORS; 206 fd_dev->fd_block_size = FD_BLOCKSIZE; 207 } 208 209 dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; 210 dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH; 211 212 dev = transport_add_device_to_core_hba(hba, &fileio_template, 213 se_dev, dev_flags, fd_dev, 214 &dev_limits, "FILEIO", FD_VERSION); 215 if (!dev) 216 goto fail; 217 218 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; 219 fd_dev->fd_queue_depth = dev->queue_depth; 220 221 pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," 222 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, 223 fd_dev->fd_dev_name, fd_dev->fd_dev_size); 224 225 putname(dev_p); 226 return dev; 227 fail: 228 if (fd_dev->fd_file) { 229 filp_close(fd_dev->fd_file, NULL); 230 fd_dev->fd_file = NULL; 231 } 232 putname(dev_p); 233 return ERR_PTR(ret); 234 } 235 236 /* fd_free_device(): (Part of se_subsystem_api_t template) 237 * 238 * 239 */ 240 static void fd_free_device(void *p) 241 { 242 struct fd_dev *fd_dev = p; 243 244 if (fd_dev->fd_file) { 245 filp_close(fd_dev->fd_file, NULL); 246 fd_dev->fd_file = NULL; 247 } 248 249 kfree(fd_dev); 250 } 251 252 static inline struct fd_request *FILE_REQ(struct se_task *task) 253 { 254 return container_of(task, struct fd_request, fd_task); 255 } 256 257 258 static struct se_task * 259 fd_alloc_task(unsigned char *cdb) 260 { 261 struct fd_request *fd_req; 262 263 fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL); 264 if (!fd_req) { 265 pr_err("Unable to allocate struct fd_request\n"); 266 return NULL; 267 } 268 269 return &fd_req->fd_task; 270 } 271 272 static int fd_do_readv(struct se_task *task) 273 { 274 struct fd_request *req = FILE_REQ(task); 275 struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; 276 struct fd_dev *dev = se_dev->dev_ptr; 277 struct file *fd = dev->fd_file; 278 struct scatterlist *sg = task->task_sg; 279 struct iovec *iov; 280 mm_segment_t old_fs; 281 loff_t pos = (task->task_lba * 282 se_dev->se_sub_dev->se_dev_attrib.block_size); 283 int ret = 0, i; 284 285 iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); 286 if (!iov) { 287 pr_err("Unable to allocate fd_do_readv iov[]\n"); 288 return -ENOMEM; 289 } 290 291 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { 292 iov[i].iov_len = sg->length; 293 iov[i].iov_base = sg_virt(sg); 294 } 295 296 old_fs = get_fs(); 297 set_fs(get_ds()); 298 ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos); 299 set_fs(old_fs); 300 301 kfree(iov); 302 /* 303 * Return zeros and GOOD status even if the READ did not return 304 * the expected virt_size for struct file w/o a backing struct 305 * block_device. 306 */ 307 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { 308 if (ret < 0 || ret != task->task_size) { 309 pr_err("vfs_readv() returned %d," 310 " expecting %d for S_ISBLK\n", ret, 311 (int)task->task_size); 312 return (ret < 0 ? ret : -EINVAL); 313 } 314 } else { 315 if (ret < 0) { 316 pr_err("vfs_readv() returned %d for non" 317 " S_ISBLK\n", ret); 318 return ret; 319 } 320 } 321 322 return 1; 323 } 324 325 static int fd_do_writev(struct se_task *task) 326 { 327 struct fd_request *req = FILE_REQ(task); 328 struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; 329 struct fd_dev *dev = se_dev->dev_ptr; 330 struct file *fd = dev->fd_file; 331 struct scatterlist *sg = task->task_sg; 332 struct iovec *iov; 333 mm_segment_t old_fs; 334 loff_t pos = (task->task_lba * 335 se_dev->se_sub_dev->se_dev_attrib.block_size); 336 int ret, i = 0; 337 338 iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); 339 if (!iov) { 340 pr_err("Unable to allocate fd_do_writev iov[]\n"); 341 return -ENOMEM; 342 } 343 344 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { 345 iov[i].iov_len = sg->length; 346 iov[i].iov_base = sg_virt(sg); 347 } 348 349 old_fs = get_fs(); 350 set_fs(get_ds()); 351 ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos); 352 set_fs(old_fs); 353 354 kfree(iov); 355 356 if (ret < 0 || ret != task->task_size) { 357 pr_err("vfs_writev() returned %d\n", ret); 358 return (ret < 0 ? ret : -EINVAL); 359 } 360 361 return 1; 362 } 363 364 static void fd_emulate_sync_cache(struct se_task *task) 365 { 366 struct se_cmd *cmd = task->task_se_cmd; 367 struct se_device *dev = cmd->se_dev; 368 struct fd_dev *fd_dev = dev->dev_ptr; 369 int immed = (cmd->t_task_cdb[1] & 0x2); 370 loff_t start, end; 371 int ret; 372 373 /* 374 * If the Immediate bit is set, queue up the GOOD response 375 * for this SYNCHRONIZE_CACHE op 376 */ 377 if (immed) 378 transport_complete_sync_cache(cmd, 1); 379 380 /* 381 * Determine if we will be flushing the entire device. 382 */ 383 if (cmd->t_task_lba == 0 && cmd->data_length == 0) { 384 start = 0; 385 end = LLONG_MAX; 386 } else { 387 start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; 388 if (cmd->data_length) 389 end = start + cmd->data_length; 390 else 391 end = LLONG_MAX; 392 } 393 394 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); 395 if (ret != 0) 396 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); 397 398 if (!immed) 399 transport_complete_sync_cache(cmd, ret == 0); 400 } 401 402 /* 403 * WRITE Force Unit Access (FUA) emulation on a per struct se_task 404 * LBA range basis.. 405 */ 406 static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task) 407 { 408 struct se_device *dev = cmd->se_dev; 409 struct fd_dev *fd_dev = dev->dev_ptr; 410 loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size; 411 loff_t end = start + task->task_size; 412 int ret; 413 414 pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", 415 task->task_lba, task->task_size); 416 417 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); 418 if (ret != 0) 419 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); 420 } 421 422 static int fd_do_task(struct se_task *task) 423 { 424 struct se_cmd *cmd = task->task_se_cmd; 425 struct se_device *dev = cmd->se_dev; 426 int ret = 0; 427 428 /* 429 * Call vectorized fileio functions to map struct scatterlist 430 * physical memory addresses to struct iovec virtual memory. 431 */ 432 if (task->task_data_direction == DMA_FROM_DEVICE) { 433 ret = fd_do_readv(task); 434 } else { 435 ret = fd_do_writev(task); 436 437 if (ret > 0 && 438 dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && 439 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 440 (cmd->se_cmd_flags & SCF_FUA)) { 441 /* 442 * We might need to be a bit smarter here 443 * and return some sense data to let the initiator 444 * know the FUA WRITE cache sync failed..? 445 */ 446 fd_emulate_write_fua(cmd, task); 447 } 448 449 } 450 451 if (ret < 0) { 452 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 453 return ret; 454 } 455 if (ret) { 456 task->task_scsi_status = GOOD; 457 transport_complete_task(task, 1); 458 } 459 return 0; 460 } 461 462 /* fd_free_task(): (Part of se_subsystem_api_t template) 463 * 464 * 465 */ 466 static void fd_free_task(struct se_task *task) 467 { 468 struct fd_request *req = FILE_REQ(task); 469 470 kfree(req); 471 } 472 473 enum { 474 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err 475 }; 476 477 static match_table_t tokens = { 478 {Opt_fd_dev_name, "fd_dev_name=%s"}, 479 {Opt_fd_dev_size, "fd_dev_size=%s"}, 480 {Opt_fd_buffered_io, "fd_buffered_io=%d"}, 481 {Opt_err, NULL} 482 }; 483 484 static ssize_t fd_set_configfs_dev_params( 485 struct se_hba *hba, 486 struct se_subsystem_dev *se_dev, 487 const char *page, ssize_t count) 488 { 489 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 490 char *orig, *ptr, *arg_p, *opts; 491 substring_t args[MAX_OPT_ARGS]; 492 int ret = 0, arg, token; 493 494 opts = kstrdup(page, GFP_KERNEL); 495 if (!opts) 496 return -ENOMEM; 497 498 orig = opts; 499 500 while ((ptr = strsep(&opts, ",\n")) != NULL) { 501 if (!*ptr) 502 continue; 503 504 token = match_token(ptr, tokens, args); 505 switch (token) { 506 case Opt_fd_dev_name: 507 arg_p = match_strdup(&args[0]); 508 if (!arg_p) { 509 ret = -ENOMEM; 510 break; 511 } 512 snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, 513 "%s", arg_p); 514 kfree(arg_p); 515 pr_debug("FILEIO: Referencing Path: %s\n", 516 fd_dev->fd_dev_name); 517 fd_dev->fbd_flags |= FBDF_HAS_PATH; 518 break; 519 case Opt_fd_dev_size: 520 arg_p = match_strdup(&args[0]); 521 if (!arg_p) { 522 ret = -ENOMEM; 523 break; 524 } 525 ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); 526 kfree(arg_p); 527 if (ret < 0) { 528 pr_err("strict_strtoull() failed for" 529 " fd_dev_size=\n"); 530 goto out; 531 } 532 pr_debug("FILEIO: Referencing Size: %llu" 533 " bytes\n", fd_dev->fd_dev_size); 534 fd_dev->fbd_flags |= FBDF_HAS_SIZE; 535 break; 536 case Opt_fd_buffered_io: 537 match_int(args, &arg); 538 if (arg != 1) { 539 pr_err("bogus fd_buffered_io=%d value\n", arg); 540 ret = -EINVAL; 541 goto out; 542 } 543 544 pr_debug("FILEIO: Using buffered I/O" 545 " operations for struct fd_dev\n"); 546 547 fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO; 548 break; 549 default: 550 break; 551 } 552 } 553 554 out: 555 kfree(orig); 556 return (!ret) ? count : ret; 557 } 558 559 static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) 560 { 561 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 562 563 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { 564 pr_err("Missing fd_dev_name=\n"); 565 return -EINVAL; 566 } 567 568 return 0; 569 } 570 571 static ssize_t fd_show_configfs_dev_params( 572 struct se_hba *hba, 573 struct se_subsystem_dev *se_dev, 574 char *b) 575 { 576 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 577 ssize_t bl = 0; 578 579 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); 580 bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n", 581 fd_dev->fd_dev_name, fd_dev->fd_dev_size, 582 (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ? 583 "Buffered" : "Synchronous"); 584 return bl; 585 } 586 587 /* fd_get_device_rev(): (Part of se_subsystem_api_t template) 588 * 589 * 590 */ 591 static u32 fd_get_device_rev(struct se_device *dev) 592 { 593 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ 594 } 595 596 /* fd_get_device_type(): (Part of se_subsystem_api_t template) 597 * 598 * 599 */ 600 static u32 fd_get_device_type(struct se_device *dev) 601 { 602 return TYPE_DISK; 603 } 604 605 static sector_t fd_get_blocks(struct se_device *dev) 606 { 607 struct fd_dev *fd_dev = dev->dev_ptr; 608 unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size, 609 dev->se_sub_dev->se_dev_attrib.block_size); 610 611 return blocks_long; 612 } 613 614 static struct se_subsystem_api fileio_template = { 615 .name = "fileio", 616 .owner = THIS_MODULE, 617 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 618 .write_cache_emulated = 1, 619 .fua_write_emulated = 1, 620 .attach_hba = fd_attach_hba, 621 .detach_hba = fd_detach_hba, 622 .allocate_virtdevice = fd_allocate_virtdevice, 623 .create_virtdevice = fd_create_virtdevice, 624 .free_device = fd_free_device, 625 .alloc_task = fd_alloc_task, 626 .do_task = fd_do_task, 627 .do_sync_cache = fd_emulate_sync_cache, 628 .free_task = fd_free_task, 629 .check_configfs_dev_params = fd_check_configfs_dev_params, 630 .set_configfs_dev_params = fd_set_configfs_dev_params, 631 .show_configfs_dev_params = fd_show_configfs_dev_params, 632 .get_device_rev = fd_get_device_rev, 633 .get_device_type = fd_get_device_type, 634 .get_blocks = fd_get_blocks, 635 }; 636 637 static int __init fileio_module_init(void) 638 { 639 return transport_subsystem_register(&fileio_template); 640 } 641 642 static void fileio_module_exit(void) 643 { 644 transport_subsystem_release(&fileio_template); 645 } 646 647 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin"); 648 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 649 MODULE_LICENSE("GPL"); 650 651 module_init(fileio_module_init); 652 module_exit(fileio_module_exit); 653