1 /* 2 * bsg.c - block layer implementation of the sg v4 interface 3 * 4 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs 5 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com> 6 * 7 * This file is subject to the terms and conditions of the GNU General Public 8 * License version 2. See the file "COPYING" in the main directory of this 9 * archive for more details. 10 * 11 */ 12 #include <linux/module.h> 13 #include <linux/init.h> 14 #include <linux/file.h> 15 #include <linux/blkdev.h> 16 #include <linux/poll.h> 17 #include <linux/cdev.h> 18 #include <linux/percpu.h> 19 #include <linux/uio.h> 20 #include <linux/idr.h> 21 #include <linux/bsg.h> 22 23 #include <scsi/scsi.h> 24 #include <scsi/scsi_ioctl.h> 25 #include <scsi/scsi_cmnd.h> 26 #include <scsi/scsi_device.h> 27 #include <scsi/scsi_driver.h> 28 #include <scsi/sg.h> 29 30 #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver" 31 #define BSG_VERSION "0.4" 32 33 struct bsg_device { 34 struct request_queue *queue; 35 spinlock_t lock; 36 struct list_head busy_list; 37 struct list_head done_list; 38 struct hlist_node dev_list; 39 atomic_t ref_count; 40 int queued_cmds; 41 int done_cmds; 42 wait_queue_head_t wq_done; 43 wait_queue_head_t wq_free; 44 char name[BUS_ID_SIZE]; 45 int max_queue; 46 unsigned long flags; 47 }; 48 49 enum { 50 BSG_F_BLOCK = 1, 51 BSG_F_WRITE_PERM = 2, 52 }; 53 54 #define BSG_DEFAULT_CMDS 64 55 #define BSG_MAX_DEVS 32768 56 57 #undef BSG_DEBUG 58 59 #ifdef BSG_DEBUG 60 #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args) 61 #else 62 #define dprintk(fmt, args...) 63 #endif 64 65 static DEFINE_MUTEX(bsg_mutex); 66 static DEFINE_IDR(bsg_minor_idr); 67 68 #define BSG_LIST_ARRAY_SIZE 8 69 static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE]; 70 71 static struct class *bsg_class; 72 static int bsg_major; 73 74 static struct kmem_cache *bsg_cmd_cachep; 75 76 /* 77 * our internal command type 78 */ 79 struct bsg_command { 80 struct bsg_device *bd; 81 struct list_head list; 82 struct request *rq; 83 struct bio *bio; 84 struct bio *bidi_bio; 85 int err; 86 struct sg_io_v4 hdr; 87 char sense[SCSI_SENSE_BUFFERSIZE]; 88 }; 89 90 static void bsg_free_command(struct bsg_command *bc) 91 { 92 struct bsg_device *bd = bc->bd; 93 unsigned long flags; 94 95 kmem_cache_free(bsg_cmd_cachep, bc); 96 97 spin_lock_irqsave(&bd->lock, flags); 98 bd->queued_cmds--; 99 spin_unlock_irqrestore(&bd->lock, flags); 100 101 wake_up(&bd->wq_free); 102 } 103 104 static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) 105 { 106 struct bsg_command *bc = ERR_PTR(-EINVAL); 107 108 spin_lock_irq(&bd->lock); 109 110 if (bd->queued_cmds >= bd->max_queue) 111 goto out; 112 113 bd->queued_cmds++; 114 spin_unlock_irq(&bd->lock); 115 116 bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL); 117 if (unlikely(!bc)) { 118 spin_lock_irq(&bd->lock); 119 bd->queued_cmds--; 120 bc = ERR_PTR(-ENOMEM); 121 goto out; 122 } 123 124 bc->bd = bd; 125 INIT_LIST_HEAD(&bc->list); 126 dprintk("%s: returning free cmd %p\n", bd->name, bc); 127 return bc; 128 out: 129 spin_unlock_irq(&bd->lock); 130 return bc; 131 } 132 133 static inline struct hlist_head *bsg_dev_idx_hash(int index) 134 { 135 return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; 136 } 137 138 static int bsg_io_schedule(struct bsg_device *bd) 139 { 140 DEFINE_WAIT(wait); 141 int ret = 0; 142 143 spin_lock_irq(&bd->lock); 144 145 BUG_ON(bd->done_cmds > bd->queued_cmds); 146 147 /* 148 * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no 149 * work to do", even though we return -ENOSPC after this same test 150 * during bsg_write() -- there, it means our buffer can't have more 151 * bsg_commands added to it, thus has no space left. 152 */ 153 if (bd->done_cmds == bd->queued_cmds) { 154 ret = -ENODATA; 155 goto unlock; 156 } 157 158 if (!test_bit(BSG_F_BLOCK, &bd->flags)) { 159 ret = -EAGAIN; 160 goto unlock; 161 } 162 163 prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE); 164 spin_unlock_irq(&bd->lock); 165 io_schedule(); 166 finish_wait(&bd->wq_done, &wait); 167 168 return ret; 169 unlock: 170 spin_unlock_irq(&bd->lock); 171 return ret; 172 } 173 174 static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, 175 struct sg_io_v4 *hdr, int has_write_perm) 176 { 177 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ 178 179 if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request, 180 hdr->request_len)) 181 return -EFAULT; 182 183 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { 184 if (blk_verify_command(rq->cmd, has_write_perm)) 185 return -EPERM; 186 } else if (!capable(CAP_SYS_RAWIO)) 187 return -EPERM; 188 189 /* 190 * fill in request structure 191 */ 192 rq->cmd_len = hdr->request_len; 193 rq->cmd_type = REQ_TYPE_BLOCK_PC; 194 195 rq->timeout = (hdr->timeout * HZ) / 1000; 196 if (!rq->timeout) 197 rq->timeout = q->sg_timeout; 198 if (!rq->timeout) 199 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; 200 201 return 0; 202 } 203 204 /* 205 * Check if sg_io_v4 from user is allowed and valid 206 */ 207 static int 208 bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) 209 { 210 int ret = 0; 211 212 if (hdr->guard != 'Q') 213 return -EINVAL; 214 if (hdr->request_len > BLK_MAX_CDB) 215 return -EINVAL; 216 if (hdr->dout_xfer_len > (q->max_sectors << 9) || 217 hdr->din_xfer_len > (q->max_sectors << 9)) 218 return -EIO; 219 220 switch (hdr->protocol) { 221 case BSG_PROTOCOL_SCSI: 222 switch (hdr->subprotocol) { 223 case BSG_SUB_PROTOCOL_SCSI_CMD: 224 case BSG_SUB_PROTOCOL_SCSI_TRANSPORT: 225 break; 226 default: 227 ret = -EINVAL; 228 } 229 break; 230 default: 231 ret = -EINVAL; 232 } 233 234 *rw = hdr->dout_xfer_len ? WRITE : READ; 235 return ret; 236 } 237 238 /* 239 * map sg_io_v4 to a request. 240 */ 241 static struct request * 242 bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr) 243 { 244 struct request_queue *q = bd->queue; 245 struct request *rq, *next_rq = NULL; 246 int ret, rw; 247 unsigned int dxfer_len; 248 void *dxferp = NULL; 249 250 dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, 251 hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, 252 hdr->din_xfer_len); 253 254 ret = bsg_validate_sgv4_hdr(q, hdr, &rw); 255 if (ret) 256 return ERR_PTR(ret); 257 258 /* 259 * map scatter-gather elements seperately and string them to request 260 */ 261 rq = blk_get_request(q, rw, GFP_KERNEL); 262 if (!rq) 263 return ERR_PTR(-ENOMEM); 264 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM, 265 &bd->flags)); 266 if (ret) 267 goto out; 268 269 if (rw == WRITE && hdr->din_xfer_len) { 270 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { 271 ret = -EOPNOTSUPP; 272 goto out; 273 } 274 275 next_rq = blk_get_request(q, READ, GFP_KERNEL); 276 if (!next_rq) { 277 ret = -ENOMEM; 278 goto out; 279 } 280 rq->next_rq = next_rq; 281 next_rq->cmd_type = rq->cmd_type; 282 283 dxferp = (void*)(unsigned long)hdr->din_xferp; 284 ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len); 285 if (ret) 286 goto out; 287 } 288 289 if (hdr->dout_xfer_len) { 290 dxfer_len = hdr->dout_xfer_len; 291 dxferp = (void*)(unsigned long)hdr->dout_xferp; 292 } else if (hdr->din_xfer_len) { 293 dxfer_len = hdr->din_xfer_len; 294 dxferp = (void*)(unsigned long)hdr->din_xferp; 295 } else 296 dxfer_len = 0; 297 298 if (dxfer_len) { 299 ret = blk_rq_map_user(q, rq, dxferp, dxfer_len); 300 if (ret) 301 goto out; 302 } 303 return rq; 304 out: 305 blk_put_request(rq); 306 if (next_rq) { 307 blk_rq_unmap_user(next_rq->bio); 308 blk_put_request(next_rq); 309 } 310 return ERR_PTR(ret); 311 } 312 313 /* 314 * async completion call-back from the block layer, when scsi/ide/whatever 315 * calls end_that_request_last() on a request 316 */ 317 static void bsg_rq_end_io(struct request *rq, int uptodate) 318 { 319 struct bsg_command *bc = rq->end_io_data; 320 struct bsg_device *bd = bc->bd; 321 unsigned long flags; 322 323 dprintk("%s: finished rq %p bc %p, bio %p stat %d\n", 324 bd->name, rq, bc, bc->bio, uptodate); 325 326 bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); 327 328 spin_lock_irqsave(&bd->lock, flags); 329 list_move_tail(&bc->list, &bd->done_list); 330 bd->done_cmds++; 331 spin_unlock_irqrestore(&bd->lock, flags); 332 333 wake_up(&bd->wq_done); 334 } 335 336 /* 337 * do final setup of a 'bc' and submit the matching 'rq' to the block 338 * layer for io 339 */ 340 static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, 341 struct bsg_command *bc, struct request *rq) 342 { 343 rq->sense = bc->sense; 344 rq->sense_len = 0; 345 346 /* 347 * add bc command to busy queue and submit rq for io 348 */ 349 bc->rq = rq; 350 bc->bio = rq->bio; 351 if (rq->next_rq) 352 bc->bidi_bio = rq->next_rq->bio; 353 bc->hdr.duration = jiffies; 354 spin_lock_irq(&bd->lock); 355 list_add_tail(&bc->list, &bd->busy_list); 356 spin_unlock_irq(&bd->lock); 357 358 dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc); 359 360 rq->end_io_data = bc; 361 blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io); 362 } 363 364 static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) 365 { 366 struct bsg_command *bc = NULL; 367 368 spin_lock_irq(&bd->lock); 369 if (bd->done_cmds) { 370 bc = list_first_entry(&bd->done_list, struct bsg_command, list); 371 list_del(&bc->list); 372 bd->done_cmds--; 373 } 374 spin_unlock_irq(&bd->lock); 375 376 return bc; 377 } 378 379 /* 380 * Get a finished command from the done list 381 */ 382 static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) 383 { 384 struct bsg_command *bc; 385 int ret; 386 387 do { 388 bc = bsg_next_done_cmd(bd); 389 if (bc) 390 break; 391 392 if (!test_bit(BSG_F_BLOCK, &bd->flags)) { 393 bc = ERR_PTR(-EAGAIN); 394 break; 395 } 396 397 ret = wait_event_interruptible(bd->wq_done, bd->done_cmds); 398 if (ret) { 399 bc = ERR_PTR(-ERESTARTSYS); 400 break; 401 } 402 } while (1); 403 404 dprintk("%s: returning done %p\n", bd->name, bc); 405 406 return bc; 407 } 408 409 static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, 410 struct bio *bio, struct bio *bidi_bio) 411 { 412 int ret = 0; 413 414 dprintk("rq %p bio %p %u\n", rq, bio, rq->errors); 415 /* 416 * fill in all the output members 417 */ 418 hdr->device_status = status_byte(rq->errors); 419 hdr->transport_status = host_byte(rq->errors); 420 hdr->driver_status = driver_byte(rq->errors); 421 hdr->info = 0; 422 if (hdr->device_status || hdr->transport_status || hdr->driver_status) 423 hdr->info |= SG_INFO_CHECK; 424 hdr->response_len = 0; 425 426 if (rq->sense_len && hdr->response) { 427 int len = min_t(unsigned int, hdr->max_response_len, 428 rq->sense_len); 429 430 ret = copy_to_user((void*)(unsigned long)hdr->response, 431 rq->sense, len); 432 if (!ret) 433 hdr->response_len = len; 434 else 435 ret = -EFAULT; 436 } 437 438 if (rq->next_rq) { 439 hdr->dout_resid = rq->data_len; 440 hdr->din_resid = rq->next_rq->data_len; 441 blk_rq_unmap_user(bidi_bio); 442 blk_put_request(rq->next_rq); 443 } else if (rq_data_dir(rq) == READ) 444 hdr->din_resid = rq->data_len; 445 else 446 hdr->dout_resid = rq->data_len; 447 448 /* 449 * If the request generated a negative error number, return it 450 * (providing we aren't already returning an error); if it's 451 * just a protocol response (i.e. non negative), that gets 452 * processed above. 453 */ 454 if (!ret && rq->errors < 0) 455 ret = rq->errors; 456 457 blk_rq_unmap_user(bio); 458 blk_put_request(rq); 459 460 return ret; 461 } 462 463 static int bsg_complete_all_commands(struct bsg_device *bd) 464 { 465 struct bsg_command *bc; 466 int ret, tret; 467 468 dprintk("%s: entered\n", bd->name); 469 470 /* 471 * wait for all commands to complete 472 */ 473 ret = 0; 474 do { 475 ret = bsg_io_schedule(bd); 476 /* 477 * look for -ENODATA specifically -- we'll sometimes get 478 * -ERESTARTSYS when we've taken a signal, but we can't 479 * return until we're done freeing the queue, so ignore 480 * it. The signal will get handled when we're done freeing 481 * the bsg_device. 482 */ 483 } while (ret != -ENODATA); 484 485 /* 486 * discard done commands 487 */ 488 ret = 0; 489 do { 490 spin_lock_irq(&bd->lock); 491 if (!bd->queued_cmds) { 492 spin_unlock_irq(&bd->lock); 493 break; 494 } 495 spin_unlock_irq(&bd->lock); 496 497 bc = bsg_get_done_cmd(bd); 498 if (IS_ERR(bc)) 499 break; 500 501 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, 502 bc->bidi_bio); 503 if (!ret) 504 ret = tret; 505 506 bsg_free_command(bc); 507 } while (1); 508 509 return ret; 510 } 511 512 static int 513 __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, 514 const struct iovec *iov, ssize_t *bytes_read) 515 { 516 struct bsg_command *bc; 517 int nr_commands, ret; 518 519 if (count % sizeof(struct sg_io_v4)) 520 return -EINVAL; 521 522 ret = 0; 523 nr_commands = count / sizeof(struct sg_io_v4); 524 while (nr_commands) { 525 bc = bsg_get_done_cmd(bd); 526 if (IS_ERR(bc)) { 527 ret = PTR_ERR(bc); 528 break; 529 } 530 531 /* 532 * this is the only case where we need to copy data back 533 * after completing the request. so do that here, 534 * bsg_complete_work() cannot do that for us 535 */ 536 ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, 537 bc->bidi_bio); 538 539 if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr))) 540 ret = -EFAULT; 541 542 bsg_free_command(bc); 543 544 if (ret) 545 break; 546 547 buf += sizeof(struct sg_io_v4); 548 *bytes_read += sizeof(struct sg_io_v4); 549 nr_commands--; 550 } 551 552 return ret; 553 } 554 555 static inline void bsg_set_block(struct bsg_device *bd, struct file *file) 556 { 557 if (file->f_flags & O_NONBLOCK) 558 clear_bit(BSG_F_BLOCK, &bd->flags); 559 else 560 set_bit(BSG_F_BLOCK, &bd->flags); 561 } 562 563 static inline void bsg_set_write_perm(struct bsg_device *bd, struct file *file) 564 { 565 if (file->f_mode & FMODE_WRITE) 566 set_bit(BSG_F_WRITE_PERM, &bd->flags); 567 else 568 clear_bit(BSG_F_WRITE_PERM, &bd->flags); 569 } 570 571 /* 572 * Check if the error is a "real" error that we should return. 573 */ 574 static inline int err_block_err(int ret) 575 { 576 if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) 577 return 1; 578 579 return 0; 580 } 581 582 static ssize_t 583 bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 584 { 585 struct bsg_device *bd = file->private_data; 586 int ret; 587 ssize_t bytes_read; 588 589 dprintk("%s: read %Zd bytes\n", bd->name, count); 590 591 bsg_set_block(bd, file); 592 bytes_read = 0; 593 ret = __bsg_read(buf, count, bd, NULL, &bytes_read); 594 *ppos = bytes_read; 595 596 if (!bytes_read || (bytes_read && err_block_err(ret))) 597 bytes_read = ret; 598 599 return bytes_read; 600 } 601 602 static int __bsg_write(struct bsg_device *bd, const char __user *buf, 603 size_t count, ssize_t *bytes_written) 604 { 605 struct bsg_command *bc; 606 struct request *rq; 607 int ret, nr_commands; 608 609 if (count % sizeof(struct sg_io_v4)) 610 return -EINVAL; 611 612 nr_commands = count / sizeof(struct sg_io_v4); 613 rq = NULL; 614 bc = NULL; 615 ret = 0; 616 while (nr_commands) { 617 struct request_queue *q = bd->queue; 618 619 bc = bsg_alloc_command(bd); 620 if (IS_ERR(bc)) { 621 ret = PTR_ERR(bc); 622 bc = NULL; 623 break; 624 } 625 626 if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { 627 ret = -EFAULT; 628 break; 629 } 630 631 /* 632 * get a request, fill in the blanks, and add to request queue 633 */ 634 rq = bsg_map_hdr(bd, &bc->hdr); 635 if (IS_ERR(rq)) { 636 ret = PTR_ERR(rq); 637 rq = NULL; 638 break; 639 } 640 641 bsg_add_command(bd, q, bc, rq); 642 bc = NULL; 643 rq = NULL; 644 nr_commands--; 645 buf += sizeof(struct sg_io_v4); 646 *bytes_written += sizeof(struct sg_io_v4); 647 } 648 649 if (bc) 650 bsg_free_command(bc); 651 652 return ret; 653 } 654 655 static ssize_t 656 bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 657 { 658 struct bsg_device *bd = file->private_data; 659 ssize_t bytes_written; 660 int ret; 661 662 dprintk("%s: write %Zd bytes\n", bd->name, count); 663 664 bsg_set_block(bd, file); 665 bsg_set_write_perm(bd, file); 666 667 bytes_written = 0; 668 ret = __bsg_write(bd, buf, count, &bytes_written); 669 *ppos = bytes_written; 670 671 /* 672 * return bytes written on non-fatal errors 673 */ 674 if (!bytes_written || (bytes_written && err_block_err(ret))) 675 bytes_written = ret; 676 677 dprintk("%s: returning %Zd\n", bd->name, bytes_written); 678 return bytes_written; 679 } 680 681 static struct bsg_device *bsg_alloc_device(void) 682 { 683 struct bsg_device *bd; 684 685 bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); 686 if (unlikely(!bd)) 687 return NULL; 688 689 spin_lock_init(&bd->lock); 690 691 bd->max_queue = BSG_DEFAULT_CMDS; 692 693 INIT_LIST_HEAD(&bd->busy_list); 694 INIT_LIST_HEAD(&bd->done_list); 695 INIT_HLIST_NODE(&bd->dev_list); 696 697 init_waitqueue_head(&bd->wq_free); 698 init_waitqueue_head(&bd->wq_done); 699 return bd; 700 } 701 702 static void bsg_kref_release_function(struct kref *kref) 703 { 704 struct bsg_class_device *bcd = 705 container_of(kref, struct bsg_class_device, ref); 706 707 if (bcd->release) 708 bcd->release(bcd->parent); 709 710 put_device(bcd->parent); 711 } 712 713 static int bsg_put_device(struct bsg_device *bd) 714 { 715 int ret = 0, do_free; 716 struct request_queue *q = bd->queue; 717 718 mutex_lock(&bsg_mutex); 719 720 do_free = atomic_dec_and_test(&bd->ref_count); 721 if (!do_free) 722 goto out; 723 724 dprintk("%s: tearing down\n", bd->name); 725 726 /* 727 * close can always block 728 */ 729 set_bit(BSG_F_BLOCK, &bd->flags); 730 731 /* 732 * correct error detection baddies here again. it's the responsibility 733 * of the app to properly reap commands before close() if it wants 734 * fool-proof error detection 735 */ 736 ret = bsg_complete_all_commands(bd); 737 738 hlist_del(&bd->dev_list); 739 kfree(bd); 740 out: 741 mutex_unlock(&bsg_mutex); 742 kref_put(&q->bsg_dev.ref, bsg_kref_release_function); 743 if (do_free) 744 blk_put_queue(q); 745 return ret; 746 } 747 748 static struct bsg_device *bsg_add_device(struct inode *inode, 749 struct request_queue *rq, 750 struct file *file) 751 { 752 struct bsg_device *bd; 753 int ret; 754 #ifdef BSG_DEBUG 755 unsigned char buf[32]; 756 #endif 757 ret = blk_get_queue(rq); 758 if (ret) 759 return ERR_PTR(-ENXIO); 760 761 bd = bsg_alloc_device(); 762 if (!bd) { 763 blk_put_queue(rq); 764 return ERR_PTR(-ENOMEM); 765 } 766 767 bd->queue = rq; 768 bsg_set_block(bd, file); 769 770 atomic_set(&bd->ref_count, 1); 771 mutex_lock(&bsg_mutex); 772 hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); 773 774 strncpy(bd->name, rq->bsg_dev.class_dev->bus_id, sizeof(bd->name) - 1); 775 dprintk("bound to <%s>, max queue %d\n", 776 format_dev_t(buf, inode->i_rdev), bd->max_queue); 777 778 mutex_unlock(&bsg_mutex); 779 return bd; 780 } 781 782 static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) 783 { 784 struct bsg_device *bd; 785 struct hlist_node *entry; 786 787 mutex_lock(&bsg_mutex); 788 789 hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) { 790 if (bd->queue == q) { 791 atomic_inc(&bd->ref_count); 792 goto found; 793 } 794 } 795 bd = NULL; 796 found: 797 mutex_unlock(&bsg_mutex); 798 return bd; 799 } 800 801 static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) 802 { 803 struct bsg_device *bd; 804 struct bsg_class_device *bcd; 805 806 /* 807 * find the class device 808 */ 809 mutex_lock(&bsg_mutex); 810 bcd = idr_find(&bsg_minor_idr, iminor(inode)); 811 if (bcd) 812 kref_get(&bcd->ref); 813 mutex_unlock(&bsg_mutex); 814 815 if (!bcd) 816 return ERR_PTR(-ENODEV); 817 818 bd = __bsg_get_device(iminor(inode), bcd->queue); 819 if (bd) 820 return bd; 821 822 bd = bsg_add_device(inode, bcd->queue, file); 823 if (IS_ERR(bd)) 824 kref_put(&bcd->ref, bsg_kref_release_function); 825 826 return bd; 827 } 828 829 static int bsg_open(struct inode *inode, struct file *file) 830 { 831 struct bsg_device *bd = bsg_get_device(inode, file); 832 833 if (IS_ERR(bd)) 834 return PTR_ERR(bd); 835 836 file->private_data = bd; 837 return 0; 838 } 839 840 static int bsg_release(struct inode *inode, struct file *file) 841 { 842 struct bsg_device *bd = file->private_data; 843 844 file->private_data = NULL; 845 return bsg_put_device(bd); 846 } 847 848 static unsigned int bsg_poll(struct file *file, poll_table *wait) 849 { 850 struct bsg_device *bd = file->private_data; 851 unsigned int mask = 0; 852 853 poll_wait(file, &bd->wq_done, wait); 854 poll_wait(file, &bd->wq_free, wait); 855 856 spin_lock_irq(&bd->lock); 857 if (!list_empty(&bd->done_list)) 858 mask |= POLLIN | POLLRDNORM; 859 if (bd->queued_cmds >= bd->max_queue) 860 mask |= POLLOUT; 861 spin_unlock_irq(&bd->lock); 862 863 return mask; 864 } 865 866 static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 867 { 868 struct bsg_device *bd = file->private_data; 869 int __user *uarg = (int __user *) arg; 870 int ret; 871 872 switch (cmd) { 873 /* 874 * our own ioctls 875 */ 876 case SG_GET_COMMAND_Q: 877 return put_user(bd->max_queue, uarg); 878 case SG_SET_COMMAND_Q: { 879 int queue; 880 881 if (get_user(queue, uarg)) 882 return -EFAULT; 883 if (queue < 1) 884 return -EINVAL; 885 886 spin_lock_irq(&bd->lock); 887 bd->max_queue = queue; 888 spin_unlock_irq(&bd->lock); 889 return 0; 890 } 891 892 /* 893 * SCSI/sg ioctls 894 */ 895 case SG_GET_VERSION_NUM: 896 case SCSI_IOCTL_GET_IDLUN: 897 case SCSI_IOCTL_GET_BUS_NUMBER: 898 case SG_SET_TIMEOUT: 899 case SG_GET_TIMEOUT: 900 case SG_GET_RESERVED_SIZE: 901 case SG_SET_RESERVED_SIZE: 902 case SG_EMULATED_HOST: 903 case SCSI_IOCTL_SEND_COMMAND: { 904 void __user *uarg = (void __user *) arg; 905 return scsi_cmd_ioctl(file, bd->queue, NULL, cmd, uarg); 906 } 907 case SG_IO: { 908 struct request *rq; 909 struct bio *bio, *bidi_bio = NULL; 910 struct sg_io_v4 hdr; 911 912 if (copy_from_user(&hdr, uarg, sizeof(hdr))) 913 return -EFAULT; 914 915 rq = bsg_map_hdr(bd, &hdr); 916 if (IS_ERR(rq)) 917 return PTR_ERR(rq); 918 919 bio = rq->bio; 920 if (rq->next_rq) 921 bidi_bio = rq->next_rq->bio; 922 blk_execute_rq(bd->queue, NULL, rq, 0); 923 ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); 924 925 if (copy_to_user(uarg, &hdr, sizeof(hdr))) 926 return -EFAULT; 927 928 return ret; 929 } 930 /* 931 * block device ioctls 932 */ 933 default: 934 #if 0 935 return ioctl_by_bdev(bd->bdev, cmd, arg); 936 #else 937 return -ENOTTY; 938 #endif 939 } 940 } 941 942 static const struct file_operations bsg_fops = { 943 .read = bsg_read, 944 .write = bsg_write, 945 .poll = bsg_poll, 946 .open = bsg_open, 947 .release = bsg_release, 948 .unlocked_ioctl = bsg_ioctl, 949 .owner = THIS_MODULE, 950 }; 951 952 void bsg_unregister_queue(struct request_queue *q) 953 { 954 struct bsg_class_device *bcd = &q->bsg_dev; 955 956 if (!bcd->class_dev) 957 return; 958 959 mutex_lock(&bsg_mutex); 960 idr_remove(&bsg_minor_idr, bcd->minor); 961 sysfs_remove_link(&q->kobj, "bsg"); 962 device_unregister(bcd->class_dev); 963 bcd->class_dev = NULL; 964 kref_put(&bcd->ref, bsg_kref_release_function); 965 mutex_unlock(&bsg_mutex); 966 } 967 EXPORT_SYMBOL_GPL(bsg_unregister_queue); 968 969 int bsg_register_queue(struct request_queue *q, struct device *parent, 970 const char *name, void (*release)(struct device *)) 971 { 972 struct bsg_class_device *bcd; 973 dev_t dev; 974 int ret, minor; 975 struct device *class_dev = NULL; 976 const char *devname; 977 978 if (name) 979 devname = name; 980 else 981 devname = parent->bus_id; 982 983 /* 984 * we need a proper transport to send commands, not a stacked device 985 */ 986 if (!q->request_fn) 987 return 0; 988 989 bcd = &q->bsg_dev; 990 memset(bcd, 0, sizeof(*bcd)); 991 992 mutex_lock(&bsg_mutex); 993 994 ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL); 995 if (!ret) { 996 ret = -ENOMEM; 997 goto unlock; 998 } 999 1000 ret = idr_get_new(&bsg_minor_idr, bcd, &minor); 1001 if (ret < 0) 1002 goto unlock; 1003 1004 if (minor >= BSG_MAX_DEVS) { 1005 printk(KERN_ERR "bsg: too many bsg devices\n"); 1006 ret = -EINVAL; 1007 goto remove_idr; 1008 } 1009 1010 bcd->minor = minor; 1011 bcd->queue = q; 1012 bcd->parent = get_device(parent); 1013 bcd->release = release; 1014 kref_init(&bcd->ref); 1015 dev = MKDEV(bsg_major, bcd->minor); 1016 class_dev = device_create(bsg_class, parent, dev, "%s", devname); 1017 if (IS_ERR(class_dev)) { 1018 ret = PTR_ERR(class_dev); 1019 goto put_dev; 1020 } 1021 bcd->class_dev = class_dev; 1022 1023 if (q->kobj.sd) { 1024 ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); 1025 if (ret) 1026 goto unregister_class_dev; 1027 } 1028 1029 mutex_unlock(&bsg_mutex); 1030 return 0; 1031 1032 unregister_class_dev: 1033 device_unregister(class_dev); 1034 put_dev: 1035 put_device(parent); 1036 remove_idr: 1037 idr_remove(&bsg_minor_idr, minor); 1038 unlock: 1039 mutex_unlock(&bsg_mutex); 1040 return ret; 1041 } 1042 EXPORT_SYMBOL_GPL(bsg_register_queue); 1043 1044 static struct cdev bsg_cdev; 1045 1046 static int __init bsg_init(void) 1047 { 1048 int ret, i; 1049 dev_t devid; 1050 1051 bsg_cmd_cachep = kmem_cache_create("bsg_cmd", 1052 sizeof(struct bsg_command), 0, 0, NULL); 1053 if (!bsg_cmd_cachep) { 1054 printk(KERN_ERR "bsg: failed creating slab cache\n"); 1055 return -ENOMEM; 1056 } 1057 1058 for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++) 1059 INIT_HLIST_HEAD(&bsg_device_list[i]); 1060 1061 bsg_class = class_create(THIS_MODULE, "bsg"); 1062 if (IS_ERR(bsg_class)) { 1063 ret = PTR_ERR(bsg_class); 1064 goto destroy_kmemcache; 1065 } 1066 1067 ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); 1068 if (ret) 1069 goto destroy_bsg_class; 1070 1071 bsg_major = MAJOR(devid); 1072 1073 cdev_init(&bsg_cdev, &bsg_fops); 1074 ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS); 1075 if (ret) 1076 goto unregister_chrdev; 1077 1078 printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION 1079 " loaded (major %d)\n", bsg_major); 1080 return 0; 1081 unregister_chrdev: 1082 unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS); 1083 destroy_bsg_class: 1084 class_destroy(bsg_class); 1085 destroy_kmemcache: 1086 kmem_cache_destroy(bsg_cmd_cachep); 1087 return ret; 1088 } 1089 1090 MODULE_AUTHOR("Jens Axboe"); 1091 MODULE_DESCRIPTION(BSG_DESCRIPTION); 1092 MODULE_LICENSE("GPL"); 1093 1094 device_initcall(bsg_init); 1095