1 /******************************************************************************* 2 * Filename: target_core_iblock.c 3 * 4 * This file contains the Storage Engine <-> Linux BlockIO transport 5 * specific functions. 6 * 7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. 8 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 9 * Copyright (c) 2007-2010 Rising Tide Systems 10 * Copyright (c) 2008-2010 Linux-iSCSI.org 11 * 12 * Nicholas A. Bellinger <nab@kernel.org> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or 17 * (at your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with this program; if not, write to the Free Software 26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 27 * 28 ******************************************************************************/ 29 30 #include <linux/string.h> 31 #include <linux/parser.h> 32 #include <linux/timer.h> 33 #include <linux/fs.h> 34 #include <linux/blkdev.h> 35 #include <linux/slab.h> 36 #include <linux/spinlock.h> 37 #include <linux/bio.h> 38 #include <linux/genhd.h> 39 #include <linux/file.h> 40 #include <linux/module.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_host.h> 43 44 #include <target/target_core_base.h> 45 #include <target/target_core_backend.h> 46 47 #include "target_core_iblock.h" 48 49 static struct se_subsystem_api iblock_template; 50 51 static void iblock_bio_done(struct bio *, int); 52 53 /* iblock_attach_hba(): (Part of se_subsystem_api_t template) 54 * 55 * 56 */ 57 static int iblock_attach_hba(struct se_hba *hba, u32 host_id) 58 { 59 struct iblock_hba *ib_host; 60 61 ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL); 62 if (!ib_host) { 63 pr_err("Unable to allocate memory for" 64 " struct iblock_hba\n"); 65 return -ENOMEM; 66 } 67 68 ib_host->iblock_host_id = host_id; 69 70 hba->hba_ptr = ib_host; 71 72 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" 73 " Generic Target Core Stack %s\n", hba->hba_id, 74 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); 75 76 pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n", 77 hba->hba_id, ib_host->iblock_host_id); 78 79 return 0; 80 } 81 82 static void iblock_detach_hba(struct se_hba *hba) 83 { 84 struct iblock_hba *ib_host = hba->hba_ptr; 85 86 pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic" 87 " Target Core\n", hba->hba_id, ib_host->iblock_host_id); 88 89 kfree(ib_host); 90 hba->hba_ptr = NULL; 91 } 92 93 static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) 94 { 95 struct iblock_dev *ib_dev = NULL; 96 struct iblock_hba *ib_host = hba->hba_ptr; 97 98 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); 99 if (!ib_dev) { 100 pr_err("Unable to allocate struct iblock_dev\n"); 101 return NULL; 102 } 103 ib_dev->ibd_host = ib_host; 104 105 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); 106 107 return ib_dev; 108 } 109 110 static struct se_device *iblock_create_virtdevice( 111 struct se_hba *hba, 112 struct se_subsystem_dev *se_dev, 113 void *p) 114 { 115 struct iblock_dev *ib_dev = p; 116 struct se_device *dev; 117 struct se_dev_limits dev_limits; 118 struct block_device *bd = NULL; 119 struct request_queue *q; 120 struct queue_limits *limits; 121 u32 dev_flags = 0; 122 int ret = -EINVAL; 123 124 if (!ib_dev) { 125 pr_err("Unable to locate struct iblock_dev parameter\n"); 126 return ERR_PTR(ret); 127 } 128 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 129 /* 130 * These settings need to be made tunable.. 131 */ 132 ib_dev->ibd_bio_set = bioset_create(32, 64); 133 if (!ib_dev->ibd_bio_set) { 134 pr_err("IBLOCK: Unable to create bioset()\n"); 135 return ERR_PTR(-ENOMEM); 136 } 137 pr_debug("IBLOCK: Created bio_set()\n"); 138 /* 139 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path 140 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. 141 */ 142 pr_debug( "IBLOCK: Claiming struct block_device: %s\n", 143 ib_dev->ibd_udev_path); 144 145 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, 146 FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); 147 if (IS_ERR(bd)) { 148 ret = PTR_ERR(bd); 149 goto failed; 150 } 151 /* 152 * Setup the local scope queue_limits from struct request_queue->limits 153 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. 154 */ 155 q = bdev_get_queue(bd); 156 limits = &dev_limits.limits; 157 limits->logical_block_size = bdev_logical_block_size(bd); 158 limits->max_hw_sectors = queue_max_hw_sectors(q); 159 limits->max_sectors = queue_max_sectors(q); 160 dev_limits.hw_queue_depth = q->nr_requests; 161 dev_limits.queue_depth = q->nr_requests; 162 163 ib_dev->ibd_bd = bd; 164 165 dev = transport_add_device_to_core_hba(hba, 166 &iblock_template, se_dev, dev_flags, ib_dev, 167 &dev_limits, "IBLOCK", IBLOCK_VERSION); 168 if (!dev) 169 goto failed; 170 171 /* 172 * Check if the underlying struct block_device request_queue supports 173 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 174 * in ATA and we need to set TPE=1 175 */ 176 if (blk_queue_discard(q)) { 177 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = 178 q->limits.max_discard_sectors; 179 /* 180 * Currently hardcoded to 1 in Linux/SCSI code.. 181 */ 182 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; 183 dev->se_sub_dev->se_dev_attrib.unmap_granularity = 184 q->limits.discard_granularity; 185 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = 186 q->limits.discard_alignment; 187 188 pr_debug("IBLOCK: BLOCK Discard support available," 189 " disabled by default\n"); 190 } 191 192 if (blk_queue_nonrot(q)) 193 dev->se_sub_dev->se_dev_attrib.is_nonrot = 1; 194 195 return dev; 196 197 failed: 198 if (ib_dev->ibd_bio_set) { 199 bioset_free(ib_dev->ibd_bio_set); 200 ib_dev->ibd_bio_set = NULL; 201 } 202 ib_dev->ibd_bd = NULL; 203 return ERR_PTR(ret); 204 } 205 206 static void iblock_free_device(void *p) 207 { 208 struct iblock_dev *ib_dev = p; 209 210 if (ib_dev->ibd_bd != NULL) 211 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 212 if (ib_dev->ibd_bio_set != NULL) 213 bioset_free(ib_dev->ibd_bio_set); 214 kfree(ib_dev); 215 } 216 217 static inline struct iblock_req *IBLOCK_REQ(struct se_task *task) 218 { 219 return container_of(task, struct iblock_req, ib_task); 220 } 221 222 static struct se_task * 223 iblock_alloc_task(unsigned char *cdb) 224 { 225 struct iblock_req *ib_req; 226 227 ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 228 if (!ib_req) { 229 pr_err("Unable to allocate memory for struct iblock_req\n"); 230 return NULL; 231 } 232 233 atomic_set(&ib_req->ib_bio_cnt, 0); 234 return &ib_req->ib_task; 235 } 236 237 static unsigned long long iblock_emulate_read_cap_with_block_size( 238 struct se_device *dev, 239 struct block_device *bd, 240 struct request_queue *q) 241 { 242 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode), 243 bdev_logical_block_size(bd)) - 1); 244 u32 block_size = bdev_logical_block_size(bd); 245 246 if (block_size == dev->se_sub_dev->se_dev_attrib.block_size) 247 return blocks_long; 248 249 switch (block_size) { 250 case 4096: 251 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 252 case 2048: 253 blocks_long <<= 1; 254 break; 255 case 1024: 256 blocks_long <<= 2; 257 break; 258 case 512: 259 blocks_long <<= 3; 260 default: 261 break; 262 } 263 break; 264 case 2048: 265 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 266 case 4096: 267 blocks_long >>= 1; 268 break; 269 case 1024: 270 blocks_long <<= 1; 271 break; 272 case 512: 273 blocks_long <<= 2; 274 break; 275 default: 276 break; 277 } 278 break; 279 case 1024: 280 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 281 case 4096: 282 blocks_long >>= 2; 283 break; 284 case 2048: 285 blocks_long >>= 1; 286 break; 287 case 512: 288 blocks_long <<= 1; 289 break; 290 default: 291 break; 292 } 293 break; 294 case 512: 295 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 296 case 4096: 297 blocks_long >>= 3; 298 break; 299 case 2048: 300 blocks_long >>= 2; 301 break; 302 case 1024: 303 blocks_long >>= 1; 304 break; 305 default: 306 break; 307 } 308 break; 309 default: 310 break; 311 } 312 313 return blocks_long; 314 } 315 316 static void iblock_end_io_flush(struct bio *bio, int err) 317 { 318 struct se_cmd *cmd = bio->bi_private; 319 320 if (err) 321 pr_err("IBLOCK: cache flush failed: %d\n", err); 322 323 if (cmd) 324 transport_complete_sync_cache(cmd, err == 0); 325 bio_put(bio); 326 } 327 328 /* 329 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must 330 * always flush the whole cache. 331 */ 332 static void iblock_emulate_sync_cache(struct se_task *task) 333 { 334 struct se_cmd *cmd = task->task_se_cmd; 335 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; 336 int immed = (cmd->t_task_cdb[1] & 0x2); 337 struct bio *bio; 338 339 /* 340 * If the Immediate bit is set, queue up the GOOD response 341 * for this SYNCHRONIZE_CACHE op. 342 */ 343 if (immed) 344 transport_complete_sync_cache(cmd, 1); 345 346 bio = bio_alloc(GFP_KERNEL, 0); 347 bio->bi_end_io = iblock_end_io_flush; 348 bio->bi_bdev = ib_dev->ibd_bd; 349 if (!immed) 350 bio->bi_private = cmd; 351 submit_bio(WRITE_FLUSH, bio); 352 } 353 354 static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) 355 { 356 struct iblock_dev *ibd = dev->dev_ptr; 357 struct block_device *bd = ibd->ibd_bd; 358 int barrier = 0; 359 360 return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier); 361 } 362 363 static void iblock_free_task(struct se_task *task) 364 { 365 kfree(IBLOCK_REQ(task)); 366 } 367 368 enum { 369 Opt_udev_path, Opt_force, Opt_err 370 }; 371 372 static match_table_t tokens = { 373 {Opt_udev_path, "udev_path=%s"}, 374 {Opt_force, "force=%d"}, 375 {Opt_err, NULL} 376 }; 377 378 static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, 379 struct se_subsystem_dev *se_dev, 380 const char *page, ssize_t count) 381 { 382 struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; 383 char *orig, *ptr, *arg_p, *opts; 384 substring_t args[MAX_OPT_ARGS]; 385 int ret = 0, token; 386 387 opts = kstrdup(page, GFP_KERNEL); 388 if (!opts) 389 return -ENOMEM; 390 391 orig = opts; 392 393 while ((ptr = strsep(&opts, ",\n")) != NULL) { 394 if (!*ptr) 395 continue; 396 397 token = match_token(ptr, tokens, args); 398 switch (token) { 399 case Opt_udev_path: 400 if (ib_dev->ibd_bd) { 401 pr_err("Unable to set udev_path= while" 402 " ib_dev->ibd_bd exists\n"); 403 ret = -EEXIST; 404 goto out; 405 } 406 arg_p = match_strdup(&args[0]); 407 if (!arg_p) { 408 ret = -ENOMEM; 409 break; 410 } 411 snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, 412 "%s", arg_p); 413 kfree(arg_p); 414 pr_debug("IBLOCK: Referencing UDEV path: %s\n", 415 ib_dev->ibd_udev_path); 416 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; 417 break; 418 case Opt_force: 419 break; 420 default: 421 break; 422 } 423 } 424 425 out: 426 kfree(orig); 427 return (!ret) ? count : ret; 428 } 429 430 static ssize_t iblock_check_configfs_dev_params( 431 struct se_hba *hba, 432 struct se_subsystem_dev *se_dev) 433 { 434 struct iblock_dev *ibd = se_dev->se_dev_su_ptr; 435 436 if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { 437 pr_err("Missing udev_path= parameters for IBLOCK\n"); 438 return -EINVAL; 439 } 440 441 return 0; 442 } 443 444 static ssize_t iblock_show_configfs_dev_params( 445 struct se_hba *hba, 446 struct se_subsystem_dev *se_dev, 447 char *b) 448 { 449 struct iblock_dev *ibd = se_dev->se_dev_su_ptr; 450 struct block_device *bd = ibd->ibd_bd; 451 char buf[BDEVNAME_SIZE]; 452 ssize_t bl = 0; 453 454 if (bd) 455 bl += sprintf(b + bl, "iBlock device: %s", 456 bdevname(bd, buf)); 457 if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) { 458 bl += sprintf(b + bl, " UDEV PATH: %s\n", 459 ibd->ibd_udev_path); 460 } else 461 bl += sprintf(b + bl, "\n"); 462 463 bl += sprintf(b + bl, " "); 464 if (bd) { 465 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", 466 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? 467 "" : (bd->bd_holder == ibd) ? 468 "CLAIMED: IBLOCK" : "CLAIMED: OS"); 469 } else { 470 bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); 471 } 472 473 return bl; 474 } 475 476 static void iblock_bio_destructor(struct bio *bio) 477 { 478 struct se_task *task = bio->bi_private; 479 struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; 480 481 bio_free(bio, ib_dev->ibd_bio_set); 482 } 483 484 static struct bio * 485 iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num) 486 { 487 struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; 488 struct iblock_req *ib_req = IBLOCK_REQ(task); 489 struct bio *bio; 490 491 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); 492 if (!bio) { 493 pr_err("Unable to allocate memory for bio\n"); 494 return NULL; 495 } 496 497 pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:" 498 " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set); 499 pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size); 500 501 bio->bi_bdev = ib_dev->ibd_bd; 502 bio->bi_private = task; 503 bio->bi_destructor = iblock_bio_destructor; 504 bio->bi_end_io = &iblock_bio_done; 505 bio->bi_sector = lba; 506 atomic_inc(&ib_req->ib_bio_cnt); 507 508 pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector); 509 pr_debug("Set ib_req->ib_bio_cnt: %d\n", 510 atomic_read(&ib_req->ib_bio_cnt)); 511 return bio; 512 } 513 514 static int iblock_do_task(struct se_task *task) 515 { 516 struct se_cmd *cmd = task->task_se_cmd; 517 struct se_device *dev = cmd->se_dev; 518 struct bio *bio; 519 struct bio_list list; 520 struct scatterlist *sg; 521 u32 i, sg_num = task->task_sg_nents; 522 sector_t block_lba; 523 struct blk_plug plug; 524 int rw; 525 526 if (task->task_data_direction == DMA_TO_DEVICE) { 527 /* 528 * Force data to disk if we pretend to not have a volatile 529 * write cache, or the initiator set the Force Unit Access bit. 530 */ 531 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || 532 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 533 (cmd->se_cmd_flags & SCF_FUA))) 534 rw = WRITE_FUA; 535 else 536 rw = WRITE; 537 } else { 538 rw = READ; 539 } 540 541 /* 542 * Do starting conversion up from non 512-byte blocksize with 543 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. 544 */ 545 if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) 546 block_lba = (task->task_lba << 3); 547 else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) 548 block_lba = (task->task_lba << 2); 549 else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) 550 block_lba = (task->task_lba << 1); 551 else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) 552 block_lba = task->task_lba; 553 else { 554 pr_err("Unsupported SCSI -> BLOCK LBA conversion:" 555 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); 556 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 557 return -ENOSYS; 558 } 559 560 bio = iblock_get_bio(task, block_lba, sg_num); 561 if (!bio) { 562 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 563 return -ENOMEM; 564 } 565 566 bio_list_init(&list); 567 bio_list_add(&list, bio); 568 569 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { 570 /* 571 * XXX: if the length the device accepts is shorter than the 572 * length of the S/G list entry this will cause and 573 * endless loop. Better hope no driver uses huge pages. 574 */ 575 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) 576 != sg->length) { 577 bio = iblock_get_bio(task, block_lba, sg_num); 578 if (!bio) 579 goto fail; 580 bio_list_add(&list, bio); 581 } 582 583 /* Always in 512 byte units for Linux/Block */ 584 block_lba += sg->length >> IBLOCK_LBA_SHIFT; 585 sg_num--; 586 } 587 588 blk_start_plug(&plug); 589 while ((bio = bio_list_pop(&list))) 590 submit_bio(rw, bio); 591 blk_finish_plug(&plug); 592 593 return 0; 594 595 fail: 596 while ((bio = bio_list_pop(&list))) 597 bio_put(bio); 598 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 599 return -ENOMEM; 600 } 601 602 static u32 iblock_get_device_rev(struct se_device *dev) 603 { 604 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ 605 } 606 607 static u32 iblock_get_device_type(struct se_device *dev) 608 { 609 return TYPE_DISK; 610 } 611 612 static sector_t iblock_get_blocks(struct se_device *dev) 613 { 614 struct iblock_dev *ibd = dev->dev_ptr; 615 struct block_device *bd = ibd->ibd_bd; 616 struct request_queue *q = bdev_get_queue(bd); 617 618 return iblock_emulate_read_cap_with_block_size(dev, bd, q); 619 } 620 621 static void iblock_bio_done(struct bio *bio, int err) 622 { 623 struct se_task *task = bio->bi_private; 624 struct iblock_req *ibr = IBLOCK_REQ(task); 625 626 /* 627 * Set -EIO if !BIO_UPTODATE and the passed is still err=0 628 */ 629 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) 630 err = -EIO; 631 632 if (err != 0) { 633 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," 634 " err: %d\n", bio, err); 635 /* 636 * Bump the ib_bio_err_cnt and release bio. 637 */ 638 atomic_inc(&ibr->ib_bio_err_cnt); 639 smp_mb__after_atomic_inc(); 640 } 641 642 bio_put(bio); 643 644 if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) 645 return; 646 647 pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", 648 task, bio, task->task_lba, 649 (unsigned long long)bio->bi_sector, err); 650 651 transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt)); 652 } 653 654 static struct se_subsystem_api iblock_template = { 655 .name = "iblock", 656 .owner = THIS_MODULE, 657 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 658 .write_cache_emulated = 1, 659 .fua_write_emulated = 1, 660 .attach_hba = iblock_attach_hba, 661 .detach_hba = iblock_detach_hba, 662 .allocate_virtdevice = iblock_allocate_virtdevice, 663 .create_virtdevice = iblock_create_virtdevice, 664 .free_device = iblock_free_device, 665 .alloc_task = iblock_alloc_task, 666 .do_task = iblock_do_task, 667 .do_discard = iblock_do_discard, 668 .do_sync_cache = iblock_emulate_sync_cache, 669 .free_task = iblock_free_task, 670 .check_configfs_dev_params = iblock_check_configfs_dev_params, 671 .set_configfs_dev_params = iblock_set_configfs_dev_params, 672 .show_configfs_dev_params = iblock_show_configfs_dev_params, 673 .get_device_rev = iblock_get_device_rev, 674 .get_device_type = iblock_get_device_type, 675 .get_blocks = iblock_get_blocks, 676 }; 677 678 static int __init iblock_module_init(void) 679 { 680 return transport_subsystem_register(&iblock_template); 681 } 682 683 static void iblock_module_exit(void) 684 { 685 transport_subsystem_release(&iblock_template); 686 } 687 688 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin"); 689 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 690 MODULE_LICENSE("GPL"); 691 692 module_init(iblock_module_init); 693 module_exit(iblock_module_exit); 694