1 /******************************************************************************* 2 * Filename: target_core_iblock.c 3 * 4 * This file contains the Storage Engine <-> Linux BlockIO transport 5 * specific functions. 6 * 7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. 8 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 9 * Copyright (c) 2007-2010 Rising Tide Systems 10 * Copyright (c) 2008-2010 Linux-iSCSI.org 11 * 12 * Nicholas A. Bellinger <nab@kernel.org> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or 17 * (at your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with this program; if not, write to the Free Software 26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 27 * 28 ******************************************************************************/ 29 30 #include <linux/string.h> 31 #include <linux/parser.h> 32 #include <linux/timer.h> 33 #include <linux/fs.h> 34 #include <linux/blkdev.h> 35 #include <linux/slab.h> 36 #include <linux/spinlock.h> 37 #include <linux/bio.h> 38 #include <linux/genhd.h> 39 #include <linux/file.h> 40 #include <linux/module.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_host.h> 43 44 #include <target/target_core_base.h> 45 #include <target/target_core_device.h> 46 #include <target/target_core_transport.h> 47 48 #include "target_core_iblock.h" 49 50 static struct se_subsystem_api iblock_template; 51 52 static void iblock_bio_done(struct bio *, int); 53 54 /* iblock_attach_hba(): (Part of se_subsystem_api_t template) 55 * 56 * 57 */ 58 static int iblock_attach_hba(struct se_hba *hba, u32 host_id) 59 { 60 struct iblock_hba *ib_host; 61 62 ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL); 63 if (!ib_host) { 64 pr_err("Unable to allocate memory for" 65 " struct iblock_hba\n"); 66 return -ENOMEM; 67 } 68 69 ib_host->iblock_host_id = host_id; 70 71 hba->hba_ptr = ib_host; 72 73 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" 74 " Generic Target Core Stack %s\n", hba->hba_id, 75 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); 76 77 pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n", 78 hba->hba_id, ib_host->iblock_host_id); 79 80 return 0; 81 } 82 83 static void iblock_detach_hba(struct se_hba *hba) 84 { 85 struct iblock_hba *ib_host = hba->hba_ptr; 86 87 pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic" 88 " Target Core\n", hba->hba_id, ib_host->iblock_host_id); 89 90 kfree(ib_host); 91 hba->hba_ptr = NULL; 92 } 93 94 static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) 95 { 96 struct iblock_dev *ib_dev = NULL; 97 struct iblock_hba *ib_host = hba->hba_ptr; 98 99 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); 100 if (!ib_dev) { 101 pr_err("Unable to allocate struct iblock_dev\n"); 102 return NULL; 103 } 104 ib_dev->ibd_host = ib_host; 105 106 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); 107 108 return ib_dev; 109 } 110 111 static struct se_device *iblock_create_virtdevice( 112 struct se_hba *hba, 113 struct se_subsystem_dev *se_dev, 114 void *p) 115 { 116 struct iblock_dev *ib_dev = p; 117 struct se_device *dev; 118 struct se_dev_limits dev_limits; 119 struct block_device *bd = NULL; 120 struct request_queue *q; 121 struct queue_limits *limits; 122 u32 dev_flags = 0; 123 int ret = -EINVAL; 124 125 if (!ib_dev) { 126 pr_err("Unable to locate struct iblock_dev parameter\n"); 127 return ERR_PTR(ret); 128 } 129 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 130 /* 131 * These settings need to be made tunable.. 132 */ 133 ib_dev->ibd_bio_set = bioset_create(32, 64); 134 if (!ib_dev->ibd_bio_set) { 135 pr_err("IBLOCK: Unable to create bioset()\n"); 136 return ERR_PTR(-ENOMEM); 137 } 138 pr_debug("IBLOCK: Created bio_set()\n"); 139 /* 140 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path 141 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. 142 */ 143 pr_debug( "IBLOCK: Claiming struct block_device: %s\n", 144 ib_dev->ibd_udev_path); 145 146 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, 147 FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); 148 if (IS_ERR(bd)) { 149 ret = PTR_ERR(bd); 150 goto failed; 151 } 152 /* 153 * Setup the local scope queue_limits from struct request_queue->limits 154 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. 155 */ 156 q = bdev_get_queue(bd); 157 limits = &dev_limits.limits; 158 limits->logical_block_size = bdev_logical_block_size(bd); 159 limits->max_hw_sectors = queue_max_hw_sectors(q); 160 limits->max_sectors = queue_max_sectors(q); 161 dev_limits.hw_queue_depth = q->nr_requests; 162 dev_limits.queue_depth = q->nr_requests; 163 164 ib_dev->ibd_bd = bd; 165 166 dev = transport_add_device_to_core_hba(hba, 167 &iblock_template, se_dev, dev_flags, ib_dev, 168 &dev_limits, "IBLOCK", IBLOCK_VERSION); 169 if (!dev) 170 goto failed; 171 172 /* 173 * Check if the underlying struct block_device request_queue supports 174 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 175 * in ATA and we need to set TPE=1 176 */ 177 if (blk_queue_discard(q)) { 178 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = 179 q->limits.max_discard_sectors; 180 /* 181 * Currently hardcoded to 1 in Linux/SCSI code.. 182 */ 183 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; 184 dev->se_sub_dev->se_dev_attrib.unmap_granularity = 185 q->limits.discard_granularity; 186 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = 187 q->limits.discard_alignment; 188 189 pr_debug("IBLOCK: BLOCK Discard support available," 190 " disabled by default\n"); 191 } 192 193 if (blk_queue_nonrot(q)) 194 dev->se_sub_dev->se_dev_attrib.is_nonrot = 1; 195 196 return dev; 197 198 failed: 199 if (ib_dev->ibd_bio_set) { 200 bioset_free(ib_dev->ibd_bio_set); 201 ib_dev->ibd_bio_set = NULL; 202 } 203 ib_dev->ibd_bd = NULL; 204 return ERR_PTR(ret); 205 } 206 207 static void iblock_free_device(void *p) 208 { 209 struct iblock_dev *ib_dev = p; 210 211 if (ib_dev->ibd_bd != NULL) 212 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 213 if (ib_dev->ibd_bio_set != NULL) 214 bioset_free(ib_dev->ibd_bio_set); 215 kfree(ib_dev); 216 } 217 218 static inline struct iblock_req *IBLOCK_REQ(struct se_task *task) 219 { 220 return container_of(task, struct iblock_req, ib_task); 221 } 222 223 static struct se_task * 224 iblock_alloc_task(unsigned char *cdb) 225 { 226 struct iblock_req *ib_req; 227 228 ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 229 if (!ib_req) { 230 pr_err("Unable to allocate memory for struct iblock_req\n"); 231 return NULL; 232 } 233 234 atomic_set(&ib_req->ib_bio_cnt, 0); 235 return &ib_req->ib_task; 236 } 237 238 static unsigned long long iblock_emulate_read_cap_with_block_size( 239 struct se_device *dev, 240 struct block_device *bd, 241 struct request_queue *q) 242 { 243 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode), 244 bdev_logical_block_size(bd)) - 1); 245 u32 block_size = bdev_logical_block_size(bd); 246 247 if (block_size == dev->se_sub_dev->se_dev_attrib.block_size) 248 return blocks_long; 249 250 switch (block_size) { 251 case 4096: 252 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 253 case 2048: 254 blocks_long <<= 1; 255 break; 256 case 1024: 257 blocks_long <<= 2; 258 break; 259 case 512: 260 blocks_long <<= 3; 261 default: 262 break; 263 } 264 break; 265 case 2048: 266 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 267 case 4096: 268 blocks_long >>= 1; 269 break; 270 case 1024: 271 blocks_long <<= 1; 272 break; 273 case 512: 274 blocks_long <<= 2; 275 break; 276 default: 277 break; 278 } 279 break; 280 case 1024: 281 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 282 case 4096: 283 blocks_long >>= 2; 284 break; 285 case 2048: 286 blocks_long >>= 1; 287 break; 288 case 512: 289 blocks_long <<= 1; 290 break; 291 default: 292 break; 293 } 294 break; 295 case 512: 296 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 297 case 4096: 298 blocks_long >>= 3; 299 break; 300 case 2048: 301 blocks_long >>= 2; 302 break; 303 case 1024: 304 blocks_long >>= 1; 305 break; 306 default: 307 break; 308 } 309 break; 310 default: 311 break; 312 } 313 314 return blocks_long; 315 } 316 317 static void iblock_end_io_flush(struct bio *bio, int err) 318 { 319 struct se_cmd *cmd = bio->bi_private; 320 321 if (err) 322 pr_err("IBLOCK: cache flush failed: %d\n", err); 323 324 if (cmd) 325 transport_complete_sync_cache(cmd, err == 0); 326 bio_put(bio); 327 } 328 329 /* 330 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must 331 * always flush the whole cache. 332 */ 333 static void iblock_emulate_sync_cache(struct se_task *task) 334 { 335 struct se_cmd *cmd = task->task_se_cmd; 336 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; 337 int immed = (cmd->t_task_cdb[1] & 0x2); 338 struct bio *bio; 339 340 /* 341 * If the Immediate bit is set, queue up the GOOD response 342 * for this SYNCHRONIZE_CACHE op. 343 */ 344 if (immed) 345 transport_complete_sync_cache(cmd, 1); 346 347 bio = bio_alloc(GFP_KERNEL, 0); 348 bio->bi_end_io = iblock_end_io_flush; 349 bio->bi_bdev = ib_dev->ibd_bd; 350 if (!immed) 351 bio->bi_private = cmd; 352 submit_bio(WRITE_FLUSH, bio); 353 } 354 355 static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) 356 { 357 struct iblock_dev *ibd = dev->dev_ptr; 358 struct block_device *bd = ibd->ibd_bd; 359 int barrier = 0; 360 361 return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier); 362 } 363 364 static void iblock_free_task(struct se_task *task) 365 { 366 kfree(IBLOCK_REQ(task)); 367 } 368 369 enum { 370 Opt_udev_path, Opt_force, Opt_err 371 }; 372 373 static match_table_t tokens = { 374 {Opt_udev_path, "udev_path=%s"}, 375 {Opt_force, "force=%d"}, 376 {Opt_err, NULL} 377 }; 378 379 static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, 380 struct se_subsystem_dev *se_dev, 381 const char *page, ssize_t count) 382 { 383 struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; 384 char *orig, *ptr, *arg_p, *opts; 385 substring_t args[MAX_OPT_ARGS]; 386 int ret = 0, token; 387 388 opts = kstrdup(page, GFP_KERNEL); 389 if (!opts) 390 return -ENOMEM; 391 392 orig = opts; 393 394 while ((ptr = strsep(&opts, ",")) != NULL) { 395 if (!*ptr) 396 continue; 397 398 token = match_token(ptr, tokens, args); 399 switch (token) { 400 case Opt_udev_path: 401 if (ib_dev->ibd_bd) { 402 pr_err("Unable to set udev_path= while" 403 " ib_dev->ibd_bd exists\n"); 404 ret = -EEXIST; 405 goto out; 406 } 407 arg_p = match_strdup(&args[0]); 408 if (!arg_p) { 409 ret = -ENOMEM; 410 break; 411 } 412 snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, 413 "%s", arg_p); 414 kfree(arg_p); 415 pr_debug("IBLOCK: Referencing UDEV path: %s\n", 416 ib_dev->ibd_udev_path); 417 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; 418 break; 419 case Opt_force: 420 break; 421 default: 422 break; 423 } 424 } 425 426 out: 427 kfree(orig); 428 return (!ret) ? count : ret; 429 } 430 431 static ssize_t iblock_check_configfs_dev_params( 432 struct se_hba *hba, 433 struct se_subsystem_dev *se_dev) 434 { 435 struct iblock_dev *ibd = se_dev->se_dev_su_ptr; 436 437 if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { 438 pr_err("Missing udev_path= parameters for IBLOCK\n"); 439 return -EINVAL; 440 } 441 442 return 0; 443 } 444 445 static ssize_t iblock_show_configfs_dev_params( 446 struct se_hba *hba, 447 struct se_subsystem_dev *se_dev, 448 char *b) 449 { 450 struct iblock_dev *ibd = se_dev->se_dev_su_ptr; 451 struct block_device *bd = ibd->ibd_bd; 452 char buf[BDEVNAME_SIZE]; 453 ssize_t bl = 0; 454 455 if (bd) 456 bl += sprintf(b + bl, "iBlock device: %s", 457 bdevname(bd, buf)); 458 if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) { 459 bl += sprintf(b + bl, " UDEV PATH: %s\n", 460 ibd->ibd_udev_path); 461 } else 462 bl += sprintf(b + bl, "\n"); 463 464 bl += sprintf(b + bl, " "); 465 if (bd) { 466 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", 467 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? 468 "" : (bd->bd_holder == (struct iblock_dev *)ibd) ? 469 "CLAIMED: IBLOCK" : "CLAIMED: OS"); 470 } else { 471 bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); 472 } 473 474 return bl; 475 } 476 477 static void iblock_bio_destructor(struct bio *bio) 478 { 479 struct se_task *task = bio->bi_private; 480 struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; 481 482 bio_free(bio, ib_dev->ibd_bio_set); 483 } 484 485 static struct bio * 486 iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num) 487 { 488 struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; 489 struct iblock_req *ib_req = IBLOCK_REQ(task); 490 struct bio *bio; 491 492 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); 493 if (!bio) { 494 pr_err("Unable to allocate memory for bio\n"); 495 return NULL; 496 } 497 498 pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:" 499 " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set); 500 pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size); 501 502 bio->bi_bdev = ib_dev->ibd_bd; 503 bio->bi_private = task; 504 bio->bi_destructor = iblock_bio_destructor; 505 bio->bi_end_io = &iblock_bio_done; 506 bio->bi_sector = lba; 507 atomic_inc(&ib_req->ib_bio_cnt); 508 509 pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector); 510 pr_debug("Set ib_req->ib_bio_cnt: %d\n", 511 atomic_read(&ib_req->ib_bio_cnt)); 512 return bio; 513 } 514 515 static int iblock_do_task(struct se_task *task) 516 { 517 struct se_cmd *cmd = task->task_se_cmd; 518 struct se_device *dev = cmd->se_dev; 519 struct bio *bio; 520 struct bio_list list; 521 struct scatterlist *sg; 522 u32 i, sg_num = task->task_sg_nents; 523 sector_t block_lba; 524 struct blk_plug plug; 525 int rw; 526 527 if (task->task_data_direction == DMA_TO_DEVICE) { 528 /* 529 * Force data to disk if we pretend to not have a volatile 530 * write cache, or the initiator set the Force Unit Access bit. 531 */ 532 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || 533 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 534 task->task_se_cmd->t_tasks_fua)) 535 rw = WRITE_FUA; 536 else 537 rw = WRITE; 538 } else { 539 rw = READ; 540 } 541 542 /* 543 * Do starting conversion up from non 512-byte blocksize with 544 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. 545 */ 546 if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) 547 block_lba = (task->task_lba << 3); 548 else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) 549 block_lba = (task->task_lba << 2); 550 else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) 551 block_lba = (task->task_lba << 1); 552 else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) 553 block_lba = task->task_lba; 554 else { 555 pr_err("Unsupported SCSI -> BLOCK LBA conversion:" 556 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); 557 return PYX_TRANSPORT_LU_COMM_FAILURE; 558 } 559 560 bio = iblock_get_bio(task, block_lba, sg_num); 561 if (!bio) 562 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 563 564 bio_list_init(&list); 565 bio_list_add(&list, bio); 566 567 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { 568 /* 569 * XXX: if the length the device accepts is shorter than the 570 * length of the S/G list entry this will cause and 571 * endless loop. Better hope no driver uses huge pages. 572 */ 573 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) 574 != sg->length) { 575 bio = iblock_get_bio(task, block_lba, sg_num); 576 if (!bio) 577 goto fail; 578 bio_list_add(&list, bio); 579 } 580 581 /* Always in 512 byte units for Linux/Block */ 582 block_lba += sg->length >> IBLOCK_LBA_SHIFT; 583 sg_num--; 584 } 585 586 blk_start_plug(&plug); 587 while ((bio = bio_list_pop(&list))) 588 submit_bio(rw, bio); 589 blk_finish_plug(&plug); 590 591 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 592 593 fail: 594 while ((bio = bio_list_pop(&list))) 595 bio_put(bio); 596 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 597 } 598 599 static u32 iblock_get_device_rev(struct se_device *dev) 600 { 601 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ 602 } 603 604 static u32 iblock_get_device_type(struct se_device *dev) 605 { 606 return TYPE_DISK; 607 } 608 609 static sector_t iblock_get_blocks(struct se_device *dev) 610 { 611 struct iblock_dev *ibd = dev->dev_ptr; 612 struct block_device *bd = ibd->ibd_bd; 613 struct request_queue *q = bdev_get_queue(bd); 614 615 return iblock_emulate_read_cap_with_block_size(dev, bd, q); 616 } 617 618 static void iblock_bio_done(struct bio *bio, int err) 619 { 620 struct se_task *task = bio->bi_private; 621 struct iblock_req *ibr = IBLOCK_REQ(task); 622 623 /* 624 * Set -EIO if !BIO_UPTODATE and the passed is still err=0 625 */ 626 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) 627 err = -EIO; 628 629 if (err != 0) { 630 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," 631 " err: %d\n", bio, err); 632 /* 633 * Bump the ib_bio_err_cnt and release bio. 634 */ 635 atomic_inc(&ibr->ib_bio_err_cnt); 636 smp_mb__after_atomic_inc(); 637 } 638 639 bio_put(bio); 640 641 if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) 642 return; 643 644 pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", 645 task, bio, task->task_lba, 646 (unsigned long long)bio->bi_sector, err); 647 648 transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt)); 649 } 650 651 static struct se_subsystem_api iblock_template = { 652 .name = "iblock", 653 .owner = THIS_MODULE, 654 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 655 .write_cache_emulated = 1, 656 .fua_write_emulated = 1, 657 .attach_hba = iblock_attach_hba, 658 .detach_hba = iblock_detach_hba, 659 .allocate_virtdevice = iblock_allocate_virtdevice, 660 .create_virtdevice = iblock_create_virtdevice, 661 .free_device = iblock_free_device, 662 .alloc_task = iblock_alloc_task, 663 .do_task = iblock_do_task, 664 .do_discard = iblock_do_discard, 665 .do_sync_cache = iblock_emulate_sync_cache, 666 .free_task = iblock_free_task, 667 .check_configfs_dev_params = iblock_check_configfs_dev_params, 668 .set_configfs_dev_params = iblock_set_configfs_dev_params, 669 .show_configfs_dev_params = iblock_show_configfs_dev_params, 670 .get_device_rev = iblock_get_device_rev, 671 .get_device_type = iblock_get_device_type, 672 .get_blocks = iblock_get_blocks, 673 }; 674 675 static int __init iblock_module_init(void) 676 { 677 return transport_subsystem_register(&iblock_template); 678 } 679 680 static void iblock_module_exit(void) 681 { 682 transport_subsystem_release(&iblock_template); 683 } 684 685 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin"); 686 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 687 MODULE_LICENSE("GPL"); 688 689 module_init(iblock_module_init); 690 module_exit(iblock_module_exit); 691