1 /******************************************************************************* 2 * Filename: target_core_file.c 3 * 4 * This file contains the Storage Engine <-> FILEIO transport specific functions 5 * 6 * (c) Copyright 2005-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/string.h> 27 #include <linux/parser.h> 28 #include <linux/timer.h> 29 #include <linux/blkdev.h> 30 #include <linux/slab.h> 31 #include <linux/spinlock.h> 32 #include <linux/module.h> 33 #include <linux/vmalloc.h> 34 #include <linux/falloc.h> 35 #include <scsi/scsi_proto.h> 36 #include <asm/unaligned.h> 37 38 #include <target/target_core_base.h> 39 #include <target/target_core_backend.h> 40 41 #include "target_core_file.h" 42 43 static inline struct fd_dev *FD_DEV(struct se_device *dev) 44 { 45 return container_of(dev, struct fd_dev, dev); 46 } 47 48 static int fd_attach_hba(struct se_hba *hba, u32 host_id) 49 { 50 struct fd_host *fd_host; 51 52 fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); 53 if (!fd_host) { 54 pr_err("Unable to allocate memory for struct fd_host\n"); 55 return -ENOMEM; 56 } 57 58 fd_host->fd_host_id = host_id; 59 60 hba->hba_ptr = fd_host; 61 62 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" 63 " Target Core Stack %s\n", hba->hba_id, FD_VERSION, 64 TARGET_CORE_VERSION); 65 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n", 66 hba->hba_id, fd_host->fd_host_id); 67 68 return 0; 69 } 70 71 static void fd_detach_hba(struct se_hba *hba) 72 { 73 struct fd_host *fd_host = hba->hba_ptr; 74 75 pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" 76 " Target Core\n", hba->hba_id, fd_host->fd_host_id); 77 78 kfree(fd_host); 79 hba->hba_ptr = NULL; 80 } 81 82 static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name) 83 { 84 struct fd_dev *fd_dev; 85 struct fd_host *fd_host = hba->hba_ptr; 86 87 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); 88 if (!fd_dev) { 89 pr_err("Unable to allocate memory for struct fd_dev\n"); 90 return NULL; 91 } 92 93 fd_dev->fd_host = fd_host; 94 95 pr_debug("FILEIO: Allocated fd_dev for %p\n", name); 96 97 return &fd_dev->dev; 98 } 99 100 static int fd_configure_device(struct se_device *dev) 101 { 102 struct fd_dev *fd_dev = FD_DEV(dev); 103 struct fd_host *fd_host = dev->se_hba->hba_ptr; 104 struct file *file; 105 struct inode *inode = NULL; 106 int flags, ret = -EINVAL; 107 108 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { 109 pr_err("Missing fd_dev_name=\n"); 110 return -EINVAL; 111 } 112 113 /* 114 * Use O_DSYNC by default instead of O_SYNC to forgo syncing 115 * of pure timestamp updates. 116 */ 117 flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; 118 119 /* 120 * Optionally allow fd_buffered_io=1 to be enabled for people 121 * who want use the fs buffer cache as an WriteCache mechanism. 122 * 123 * This means that in event of a hard failure, there is a risk 124 * of silent data-loss if the SCSI client has *not* performed a 125 * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE 126 * to write-out the entire device cache. 127 */ 128 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { 129 pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n"); 130 flags &= ~O_DSYNC; 131 } 132 133 file = filp_open(fd_dev->fd_dev_name, flags, 0600); 134 if (IS_ERR(file)) { 135 pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name); 136 ret = PTR_ERR(file); 137 goto fail; 138 } 139 fd_dev->fd_file = file; 140 /* 141 * If using a block backend with this struct file, we extract 142 * fd_dev->fd_[block,dev]_size from struct block_device. 143 * 144 * Otherwise, we use the passed fd_size= from configfs 145 */ 146 inode = file->f_mapping->host; 147 if (S_ISBLK(inode->i_mode)) { 148 struct request_queue *q = bdev_get_queue(inode->i_bdev); 149 unsigned long long dev_size; 150 151 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); 152 /* 153 * Determine the number of bytes from i_size_read() minus 154 * one (1) logical sector from underlying struct block_device 155 */ 156 dev_size = (i_size_read(file->f_mapping->host) - 157 fd_dev->fd_block_size); 158 159 pr_debug("FILEIO: Using size: %llu bytes from struct" 160 " block_device blocks: %llu logical_block_size: %d\n", 161 dev_size, div_u64(dev_size, fd_dev->fd_block_size), 162 fd_dev->fd_block_size); 163 164 if (target_configure_unmap_from_queue(&dev->dev_attrib, q, 165 fd_dev->fd_block_size)) 166 pr_debug("IFILE: BLOCK Discard support available," 167 " disabled by default\n"); 168 /* 169 * Enable write same emulation for IBLOCK and use 0xFFFF as 170 * the smaller WRITE_SAME(10) only has a two-byte block count. 171 */ 172 dev->dev_attrib.max_write_same_len = 0xFFFF; 173 174 if (blk_queue_nonrot(q)) 175 dev->dev_attrib.is_nonrot = 1; 176 } else { 177 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { 178 pr_err("FILEIO: Missing fd_dev_size=" 179 " parameter, and no backing struct" 180 " block_device\n"); 181 goto fail; 182 } 183 184 fd_dev->fd_block_size = FD_BLOCKSIZE; 185 /* 186 * Limit UNMAP emulation to 8k Number of LBAs (NoLB) 187 */ 188 dev->dev_attrib.max_unmap_lba_count = 0x2000; 189 /* 190 * Currently hardcoded to 1 in Linux/SCSI code.. 191 */ 192 dev->dev_attrib.max_unmap_block_desc_count = 1; 193 dev->dev_attrib.unmap_granularity = 1; 194 dev->dev_attrib.unmap_granularity_alignment = 0; 195 196 /* 197 * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB) 198 * based upon struct iovec limit for vfs_writev() 199 */ 200 dev->dev_attrib.max_write_same_len = 0x1000; 201 } 202 203 dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; 204 dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES; 205 dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size; 206 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; 207 208 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { 209 pr_debug("FILEIO: Forcing setting of emulate_write_cache=1" 210 " with FDBD_HAS_BUFFERED_IO_WCE\n"); 211 dev->dev_attrib.emulate_write_cache = 1; 212 } 213 214 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; 215 fd_dev->fd_queue_depth = dev->queue_depth; 216 217 pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," 218 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, 219 fd_dev->fd_dev_name, fd_dev->fd_dev_size); 220 221 return 0; 222 fail: 223 if (fd_dev->fd_file) { 224 filp_close(fd_dev->fd_file, NULL); 225 fd_dev->fd_file = NULL; 226 } 227 return ret; 228 } 229 230 static void fd_dev_call_rcu(struct rcu_head *p) 231 { 232 struct se_device *dev = container_of(p, struct se_device, rcu_head); 233 struct fd_dev *fd_dev = FD_DEV(dev); 234 235 kfree(fd_dev); 236 } 237 238 static void fd_free_device(struct se_device *dev) 239 { 240 struct fd_dev *fd_dev = FD_DEV(dev); 241 242 if (fd_dev->fd_file) { 243 filp_close(fd_dev->fd_file, NULL); 244 fd_dev->fd_file = NULL; 245 } 246 call_rcu(&dev->rcu_head, fd_dev_call_rcu); 247 } 248 249 static int fd_do_rw(struct se_cmd *cmd, struct file *fd, 250 u32 block_size, struct scatterlist *sgl, 251 u32 sgl_nents, u32 data_length, int is_write) 252 { 253 struct scatterlist *sg; 254 struct iov_iter iter; 255 struct bio_vec *bvec; 256 ssize_t len = 0; 257 loff_t pos = (cmd->t_task_lba * block_size); 258 int ret = 0, i; 259 260 bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL); 261 if (!bvec) { 262 pr_err("Unable to allocate fd_do_readv iov[]\n"); 263 return -ENOMEM; 264 } 265 266 for_each_sg(sgl, sg, sgl_nents, i) { 267 bvec[i].bv_page = sg_page(sg); 268 bvec[i].bv_len = sg->length; 269 bvec[i].bv_offset = sg->offset; 270 271 len += sg->length; 272 } 273 274 iov_iter_bvec(&iter, ITER_BVEC, bvec, sgl_nents, len); 275 if (is_write) 276 ret = vfs_iter_write(fd, &iter, &pos); 277 else 278 ret = vfs_iter_read(fd, &iter, &pos); 279 280 kfree(bvec); 281 282 if (is_write) { 283 if (ret < 0 || ret != data_length) { 284 pr_err("%s() write returned %d\n", __func__, ret); 285 return (ret < 0 ? ret : -EINVAL); 286 } 287 } else { 288 /* 289 * Return zeros and GOOD status even if the READ did not return 290 * the expected virt_size for struct file w/o a backing struct 291 * block_device. 292 */ 293 if (S_ISBLK(file_inode(fd)->i_mode)) { 294 if (ret < 0 || ret != data_length) { 295 pr_err("%s() returned %d, expecting %u for " 296 "S_ISBLK\n", __func__, ret, 297 data_length); 298 return (ret < 0 ? ret : -EINVAL); 299 } 300 } else { 301 if (ret < 0) { 302 pr_err("%s() returned %d for non S_ISBLK\n", 303 __func__, ret); 304 return ret; 305 } 306 } 307 } 308 return 1; 309 } 310 311 static sense_reason_t 312 fd_execute_sync_cache(struct se_cmd *cmd) 313 { 314 struct se_device *dev = cmd->se_dev; 315 struct fd_dev *fd_dev = FD_DEV(dev); 316 int immed = (cmd->t_task_cdb[1] & 0x2); 317 loff_t start, end; 318 int ret; 319 320 /* 321 * If the Immediate bit is set, queue up the GOOD response 322 * for this SYNCHRONIZE_CACHE op 323 */ 324 if (immed) 325 target_complete_cmd(cmd, SAM_STAT_GOOD); 326 327 /* 328 * Determine if we will be flushing the entire device. 329 */ 330 if (cmd->t_task_lba == 0 && cmd->data_length == 0) { 331 start = 0; 332 end = LLONG_MAX; 333 } else { 334 start = cmd->t_task_lba * dev->dev_attrib.block_size; 335 if (cmd->data_length) 336 end = start + cmd->data_length - 1; 337 else 338 end = LLONG_MAX; 339 } 340 341 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); 342 if (ret != 0) 343 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); 344 345 if (immed) 346 return 0; 347 348 if (ret) 349 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); 350 else 351 target_complete_cmd(cmd, SAM_STAT_GOOD); 352 353 return 0; 354 } 355 356 static sense_reason_t 357 fd_execute_write_same(struct se_cmd *cmd) 358 { 359 struct se_device *se_dev = cmd->se_dev; 360 struct fd_dev *fd_dev = FD_DEV(se_dev); 361 loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size; 362 sector_t nolb = sbc_get_write_same_sectors(cmd); 363 struct iov_iter iter; 364 struct bio_vec *bvec; 365 unsigned int len = 0, i; 366 ssize_t ret; 367 368 if (!nolb) { 369 target_complete_cmd(cmd, SAM_STAT_GOOD); 370 return 0; 371 } 372 if (cmd->prot_op) { 373 pr_err("WRITE_SAME: Protection information with FILEIO" 374 " backends not supported\n"); 375 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 376 } 377 378 if (cmd->t_data_nents > 1 || 379 cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) { 380 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" 381 " block_size: %u\n", 382 cmd->t_data_nents, 383 cmd->t_data_sg[0].length, 384 cmd->se_dev->dev_attrib.block_size); 385 return TCM_INVALID_CDB_FIELD; 386 } 387 388 bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL); 389 if (!bvec) 390 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 391 392 for (i = 0; i < nolb; i++) { 393 bvec[i].bv_page = sg_page(&cmd->t_data_sg[0]); 394 bvec[i].bv_len = cmd->t_data_sg[0].length; 395 bvec[i].bv_offset = cmd->t_data_sg[0].offset; 396 397 len += se_dev->dev_attrib.block_size; 398 } 399 400 iov_iter_bvec(&iter, ITER_BVEC, bvec, nolb, len); 401 ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos); 402 403 kfree(bvec); 404 if (ret < 0 || ret != len) { 405 pr_err("vfs_iter_write() returned %zd for write same\n", ret); 406 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 407 } 408 409 target_complete_cmd(cmd, SAM_STAT_GOOD); 410 return 0; 411 } 412 413 static int 414 fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb, 415 void *buf, size_t bufsize) 416 { 417 struct fd_dev *fd_dev = FD_DEV(se_dev); 418 struct file *prot_fd = fd_dev->fd_prot_file; 419 sector_t prot_length, prot; 420 loff_t pos = lba * se_dev->prot_length; 421 422 if (!prot_fd) { 423 pr_err("Unable to locate fd_dev->fd_prot_file\n"); 424 return -ENODEV; 425 } 426 427 prot_length = nolb * se_dev->prot_length; 428 429 for (prot = 0; prot < prot_length;) { 430 sector_t len = min_t(sector_t, bufsize, prot_length - prot); 431 ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot); 432 433 if (ret != len) { 434 pr_err("vfs_write to prot file failed: %zd\n", ret); 435 return ret < 0 ? ret : -ENODEV; 436 } 437 prot += ret; 438 } 439 440 return 0; 441 } 442 443 static int 444 fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) 445 { 446 void *buf; 447 int rc; 448 449 buf = (void *)__get_free_page(GFP_KERNEL); 450 if (!buf) { 451 pr_err("Unable to allocate FILEIO prot buf\n"); 452 return -ENOMEM; 453 } 454 memset(buf, 0xff, PAGE_SIZE); 455 456 rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE); 457 458 free_page((unsigned long)buf); 459 460 return rc; 461 } 462 463 static sense_reason_t 464 fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) 465 { 466 struct file *file = FD_DEV(cmd->se_dev)->fd_file; 467 struct inode *inode = file->f_mapping->host; 468 int ret; 469 470 if (cmd->se_dev->dev_attrib.pi_prot_type) { 471 ret = fd_do_prot_unmap(cmd, lba, nolb); 472 if (ret) 473 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 474 } 475 476 if (S_ISBLK(inode->i_mode)) { 477 /* The backend is block device, use discard */ 478 struct block_device *bdev = inode->i_bdev; 479 struct se_device *dev = cmd->se_dev; 480 481 ret = blkdev_issue_discard(bdev, 482 target_to_linux_sector(dev, lba), 483 target_to_linux_sector(dev, nolb), 484 GFP_KERNEL, 0); 485 if (ret < 0) { 486 pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n", 487 ret); 488 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 489 } 490 } else { 491 /* The backend is normal file, use fallocate */ 492 struct se_device *se_dev = cmd->se_dev; 493 loff_t pos = lba * se_dev->dev_attrib.block_size; 494 unsigned int len = nolb * se_dev->dev_attrib.block_size; 495 int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; 496 497 if (!file->f_op->fallocate) 498 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 499 500 ret = file->f_op->fallocate(file, mode, pos, len); 501 if (ret < 0) { 502 pr_warn("FILEIO: fallocate() failed: %d\n", ret); 503 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 504 } 505 } 506 507 return 0; 508 } 509 510 static sense_reason_t 511 fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 512 enum dma_data_direction data_direction) 513 { 514 struct se_device *dev = cmd->se_dev; 515 struct fd_dev *fd_dev = FD_DEV(dev); 516 struct file *file = fd_dev->fd_file; 517 struct file *pfile = fd_dev->fd_prot_file; 518 sense_reason_t rc; 519 int ret = 0; 520 /* 521 * We are currently limited by the number of iovecs (2048) per 522 * single vfs_[writev,readv] call. 523 */ 524 if (cmd->data_length > FD_MAX_BYTES) { 525 pr_err("FILEIO: Not able to process I/O of %u bytes due to" 526 "FD_MAX_BYTES: %u iovec count limitiation\n", 527 cmd->data_length, FD_MAX_BYTES); 528 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 529 } 530 /* 531 * Call vectorized fileio functions to map struct scatterlist 532 * physical memory addresses to struct iovec virtual memory. 533 */ 534 if (data_direction == DMA_FROM_DEVICE) { 535 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { 536 ret = fd_do_rw(cmd, pfile, dev->prot_length, 537 cmd->t_prot_sg, cmd->t_prot_nents, 538 cmd->prot_length, 0); 539 if (ret < 0) 540 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 541 } 542 543 ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size, 544 sgl, sgl_nents, cmd->data_length, 0); 545 546 if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) { 547 u32 sectors = cmd->data_length >> 548 ilog2(dev->dev_attrib.block_size); 549 550 rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 551 0, cmd->t_prot_sg, 0); 552 if (rc) 553 return rc; 554 } 555 } else { 556 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { 557 u32 sectors = cmd->data_length >> 558 ilog2(dev->dev_attrib.block_size); 559 560 rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 561 0, cmd->t_prot_sg, 0); 562 if (rc) 563 return rc; 564 } 565 566 ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size, 567 sgl, sgl_nents, cmd->data_length, 1); 568 /* 569 * Perform implicit vfs_fsync_range() for fd_do_writev() ops 570 * for SCSI WRITEs with Forced Unit Access (FUA) set. 571 * Allow this to happen independent of WCE=0 setting. 572 */ 573 if (ret > 0 && (cmd->se_cmd_flags & SCF_FUA)) { 574 loff_t start = cmd->t_task_lba * 575 dev->dev_attrib.block_size; 576 loff_t end; 577 578 if (cmd->data_length) 579 end = start + cmd->data_length - 1; 580 else 581 end = LLONG_MAX; 582 583 vfs_fsync_range(fd_dev->fd_file, start, end, 1); 584 } 585 586 if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) { 587 ret = fd_do_rw(cmd, pfile, dev->prot_length, 588 cmd->t_prot_sg, cmd->t_prot_nents, 589 cmd->prot_length, 1); 590 if (ret < 0) 591 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 592 } 593 } 594 595 if (ret < 0) 596 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 597 598 if (ret) 599 target_complete_cmd(cmd, SAM_STAT_GOOD); 600 return 0; 601 } 602 603 enum { 604 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err 605 }; 606 607 static match_table_t tokens = { 608 {Opt_fd_dev_name, "fd_dev_name=%s"}, 609 {Opt_fd_dev_size, "fd_dev_size=%s"}, 610 {Opt_fd_buffered_io, "fd_buffered_io=%d"}, 611 {Opt_err, NULL} 612 }; 613 614 static ssize_t fd_set_configfs_dev_params(struct se_device *dev, 615 const char *page, ssize_t count) 616 { 617 struct fd_dev *fd_dev = FD_DEV(dev); 618 char *orig, *ptr, *arg_p, *opts; 619 substring_t args[MAX_OPT_ARGS]; 620 int ret = 0, arg, token; 621 622 opts = kstrdup(page, GFP_KERNEL); 623 if (!opts) 624 return -ENOMEM; 625 626 orig = opts; 627 628 while ((ptr = strsep(&opts, ",\n")) != NULL) { 629 if (!*ptr) 630 continue; 631 632 token = match_token(ptr, tokens, args); 633 switch (token) { 634 case Opt_fd_dev_name: 635 if (match_strlcpy(fd_dev->fd_dev_name, &args[0], 636 FD_MAX_DEV_NAME) == 0) { 637 ret = -EINVAL; 638 break; 639 } 640 pr_debug("FILEIO: Referencing Path: %s\n", 641 fd_dev->fd_dev_name); 642 fd_dev->fbd_flags |= FBDF_HAS_PATH; 643 break; 644 case Opt_fd_dev_size: 645 arg_p = match_strdup(&args[0]); 646 if (!arg_p) { 647 ret = -ENOMEM; 648 break; 649 } 650 ret = kstrtoull(arg_p, 0, &fd_dev->fd_dev_size); 651 kfree(arg_p); 652 if (ret < 0) { 653 pr_err("kstrtoull() failed for" 654 " fd_dev_size=\n"); 655 goto out; 656 } 657 pr_debug("FILEIO: Referencing Size: %llu" 658 " bytes\n", fd_dev->fd_dev_size); 659 fd_dev->fbd_flags |= FBDF_HAS_SIZE; 660 break; 661 case Opt_fd_buffered_io: 662 ret = match_int(args, &arg); 663 if (ret) 664 goto out; 665 if (arg != 1) { 666 pr_err("bogus fd_buffered_io=%d value\n", arg); 667 ret = -EINVAL; 668 goto out; 669 } 670 671 pr_debug("FILEIO: Using buffered I/O" 672 " operations for struct fd_dev\n"); 673 674 fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE; 675 break; 676 default: 677 break; 678 } 679 } 680 681 out: 682 kfree(orig); 683 return (!ret) ? count : ret; 684 } 685 686 static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b) 687 { 688 struct fd_dev *fd_dev = FD_DEV(dev); 689 ssize_t bl = 0; 690 691 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); 692 bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n", 693 fd_dev->fd_dev_name, fd_dev->fd_dev_size, 694 (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ? 695 "Buffered-WCE" : "O_DSYNC"); 696 return bl; 697 } 698 699 static sector_t fd_get_blocks(struct se_device *dev) 700 { 701 struct fd_dev *fd_dev = FD_DEV(dev); 702 struct file *f = fd_dev->fd_file; 703 struct inode *i = f->f_mapping->host; 704 unsigned long long dev_size; 705 /* 706 * When using a file that references an underlying struct block_device, 707 * ensure dev_size is always based on the current inode size in order 708 * to handle underlying block_device resize operations. 709 */ 710 if (S_ISBLK(i->i_mode)) 711 dev_size = i_size_read(i); 712 else 713 dev_size = fd_dev->fd_dev_size; 714 715 return div_u64(dev_size - dev->dev_attrib.block_size, 716 dev->dev_attrib.block_size); 717 } 718 719 static int fd_init_prot(struct se_device *dev) 720 { 721 struct fd_dev *fd_dev = FD_DEV(dev); 722 struct file *prot_file, *file = fd_dev->fd_file; 723 struct inode *inode; 724 int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; 725 char buf[FD_MAX_DEV_PROT_NAME]; 726 727 if (!file) { 728 pr_err("Unable to locate fd_dev->fd_file\n"); 729 return -ENODEV; 730 } 731 732 inode = file->f_mapping->host; 733 if (S_ISBLK(inode->i_mode)) { 734 pr_err("FILEIO Protection emulation only supported on" 735 " !S_ISBLK\n"); 736 return -ENOSYS; 737 } 738 739 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) 740 flags &= ~O_DSYNC; 741 742 snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection", 743 fd_dev->fd_dev_name); 744 745 prot_file = filp_open(buf, flags, 0600); 746 if (IS_ERR(prot_file)) { 747 pr_err("filp_open(%s) failed\n", buf); 748 ret = PTR_ERR(prot_file); 749 return ret; 750 } 751 fd_dev->fd_prot_file = prot_file; 752 753 return 0; 754 } 755 756 static int fd_format_prot(struct se_device *dev) 757 { 758 unsigned char *buf; 759 int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size; 760 int ret; 761 762 if (!dev->dev_attrib.pi_prot_type) { 763 pr_err("Unable to format_prot while pi_prot_type == 0\n"); 764 return -ENODEV; 765 } 766 767 buf = vzalloc(unit_size); 768 if (!buf) { 769 pr_err("Unable to allocate FILEIO prot buf\n"); 770 return -ENOMEM; 771 } 772 773 pr_debug("Using FILEIO prot_length: %llu\n", 774 (unsigned long long)(dev->transport->get_blocks(dev) + 1) * 775 dev->prot_length); 776 777 memset(buf, 0xff, unit_size); 778 ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1, 779 buf, unit_size); 780 vfree(buf); 781 return ret; 782 } 783 784 static void fd_free_prot(struct se_device *dev) 785 { 786 struct fd_dev *fd_dev = FD_DEV(dev); 787 788 if (!fd_dev->fd_prot_file) 789 return; 790 791 filp_close(fd_dev->fd_prot_file, NULL); 792 fd_dev->fd_prot_file = NULL; 793 } 794 795 static struct sbc_ops fd_sbc_ops = { 796 .execute_rw = fd_execute_rw, 797 .execute_sync_cache = fd_execute_sync_cache, 798 .execute_write_same = fd_execute_write_same, 799 .execute_unmap = fd_execute_unmap, 800 }; 801 802 static sense_reason_t 803 fd_parse_cdb(struct se_cmd *cmd) 804 { 805 return sbc_parse_cdb(cmd, &fd_sbc_ops); 806 } 807 808 static const struct target_backend_ops fileio_ops = { 809 .name = "fileio", 810 .inquiry_prod = "FILEIO", 811 .inquiry_rev = FD_VERSION, 812 .owner = THIS_MODULE, 813 .attach_hba = fd_attach_hba, 814 .detach_hba = fd_detach_hba, 815 .alloc_device = fd_alloc_device, 816 .configure_device = fd_configure_device, 817 .free_device = fd_free_device, 818 .parse_cdb = fd_parse_cdb, 819 .set_configfs_dev_params = fd_set_configfs_dev_params, 820 .show_configfs_dev_params = fd_show_configfs_dev_params, 821 .get_device_type = sbc_get_device_type, 822 .get_blocks = fd_get_blocks, 823 .init_prot = fd_init_prot, 824 .format_prot = fd_format_prot, 825 .free_prot = fd_free_prot, 826 .tb_dev_attrib_attrs = sbc_attrib_attrs, 827 }; 828 829 static int __init fileio_module_init(void) 830 { 831 return transport_backend_register(&fileio_ops); 832 } 833 834 static void __exit fileio_module_exit(void) 835 { 836 target_backend_unregister(&fileio_ops); 837 } 838 839 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin"); 840 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 841 MODULE_LICENSE("GPL"); 842 843 module_init(fileio_module_init); 844 module_exit(fileio_module_exit); 845