1 /* 2 * bsg.c - block layer implementation of the sg v4 interface 3 * 4 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs 5 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com> 6 * 7 * This file is subject to the terms and conditions of the GNU General Public 8 * License version 2. See the file "COPYING" in the main directory of this 9 * archive for more details. 10 * 11 */ 12 #include <linux/module.h> 13 #include <linux/init.h> 14 #include <linux/file.h> 15 #include <linux/blkdev.h> 16 #include <linux/poll.h> 17 #include <linux/cdev.h> 18 #include <linux/percpu.h> 19 #include <linux/uio.h> 20 #include <linux/idr.h> 21 #include <linux/bsg.h> 22 #include <linux/smp_lock.h> 23 24 #include <scsi/scsi.h> 25 #include <scsi/scsi_ioctl.h> 26 #include <scsi/scsi_cmnd.h> 27 #include <scsi/scsi_device.h> 28 #include <scsi/scsi_driver.h> 29 #include <scsi/sg.h> 30 31 #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver" 32 #define BSG_VERSION "0.4" 33 34 struct bsg_device { 35 struct request_queue *queue; 36 spinlock_t lock; 37 struct list_head busy_list; 38 struct list_head done_list; 39 struct hlist_node dev_list; 40 atomic_t ref_count; 41 int queued_cmds; 42 int done_cmds; 43 wait_queue_head_t wq_done; 44 wait_queue_head_t wq_free; 45 char name[20]; 46 int max_queue; 47 unsigned long flags; 48 }; 49 50 enum { 51 BSG_F_BLOCK = 1, 52 }; 53 54 #define BSG_DEFAULT_CMDS 64 55 #define BSG_MAX_DEVS 32768 56 57 #undef BSG_DEBUG 58 59 #ifdef BSG_DEBUG 60 #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args) 61 #else 62 #define dprintk(fmt, args...) 63 #endif 64 65 static DEFINE_MUTEX(bsg_mutex); 66 static DEFINE_IDR(bsg_minor_idr); 67 68 #define BSG_LIST_ARRAY_SIZE 8 69 static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE]; 70 71 static struct class *bsg_class; 72 static int bsg_major; 73 74 static struct kmem_cache *bsg_cmd_cachep; 75 76 /* 77 * our internal command type 78 */ 79 struct bsg_command { 80 struct bsg_device *bd; 81 struct list_head list; 82 struct request *rq; 83 struct bio *bio; 84 struct bio *bidi_bio; 85 int err; 86 struct sg_io_v4 hdr; 87 char sense[SCSI_SENSE_BUFFERSIZE]; 88 }; 89 90 static void bsg_free_command(struct bsg_command *bc) 91 { 92 struct bsg_device *bd = bc->bd; 93 unsigned long flags; 94 95 kmem_cache_free(bsg_cmd_cachep, bc); 96 97 spin_lock_irqsave(&bd->lock, flags); 98 bd->queued_cmds--; 99 spin_unlock_irqrestore(&bd->lock, flags); 100 101 wake_up(&bd->wq_free); 102 } 103 104 static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) 105 { 106 struct bsg_command *bc = ERR_PTR(-EINVAL); 107 108 spin_lock_irq(&bd->lock); 109 110 if (bd->queued_cmds >= bd->max_queue) 111 goto out; 112 113 bd->queued_cmds++; 114 spin_unlock_irq(&bd->lock); 115 116 bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL); 117 if (unlikely(!bc)) { 118 spin_lock_irq(&bd->lock); 119 bd->queued_cmds--; 120 bc = ERR_PTR(-ENOMEM); 121 goto out; 122 } 123 124 bc->bd = bd; 125 INIT_LIST_HEAD(&bc->list); 126 dprintk("%s: returning free cmd %p\n", bd->name, bc); 127 return bc; 128 out: 129 spin_unlock_irq(&bd->lock); 130 return bc; 131 } 132 133 static inline struct hlist_head *bsg_dev_idx_hash(int index) 134 { 135 return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; 136 } 137 138 static int bsg_io_schedule(struct bsg_device *bd) 139 { 140 DEFINE_WAIT(wait); 141 int ret = 0; 142 143 spin_lock_irq(&bd->lock); 144 145 BUG_ON(bd->done_cmds > bd->queued_cmds); 146 147 /* 148 * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no 149 * work to do", even though we return -ENOSPC after this same test 150 * during bsg_write() -- there, it means our buffer can't have more 151 * bsg_commands added to it, thus has no space left. 152 */ 153 if (bd->done_cmds == bd->queued_cmds) { 154 ret = -ENODATA; 155 goto unlock; 156 } 157 158 if (!test_bit(BSG_F_BLOCK, &bd->flags)) { 159 ret = -EAGAIN; 160 goto unlock; 161 } 162 163 prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE); 164 spin_unlock_irq(&bd->lock); 165 io_schedule(); 166 finish_wait(&bd->wq_done, &wait); 167 168 return ret; 169 unlock: 170 spin_unlock_irq(&bd->lock); 171 return ret; 172 } 173 174 static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, 175 struct sg_io_v4 *hdr, struct bsg_device *bd, 176 fmode_t has_write_perm) 177 { 178 if (hdr->request_len > BLK_MAX_CDB) { 179 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); 180 if (!rq->cmd) 181 return -ENOMEM; 182 } 183 184 if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request, 185 hdr->request_len)) 186 return -EFAULT; 187 188 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { 189 if (blk_verify_command(rq->cmd, has_write_perm)) 190 return -EPERM; 191 } else if (!capable(CAP_SYS_RAWIO)) 192 return -EPERM; 193 194 /* 195 * fill in request structure 196 */ 197 rq->cmd_len = hdr->request_len; 198 rq->cmd_type = REQ_TYPE_BLOCK_PC; 199 200 rq->timeout = (hdr->timeout * HZ) / 1000; 201 if (!rq->timeout) 202 rq->timeout = q->sg_timeout; 203 if (!rq->timeout) 204 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; 205 if (rq->timeout < BLK_MIN_SG_TIMEOUT) 206 rq->timeout = BLK_MIN_SG_TIMEOUT; 207 208 return 0; 209 } 210 211 /* 212 * Check if sg_io_v4 from user is allowed and valid 213 */ 214 static int 215 bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) 216 { 217 int ret = 0; 218 219 if (hdr->guard != 'Q') 220 return -EINVAL; 221 222 switch (hdr->protocol) { 223 case BSG_PROTOCOL_SCSI: 224 switch (hdr->subprotocol) { 225 case BSG_SUB_PROTOCOL_SCSI_CMD: 226 case BSG_SUB_PROTOCOL_SCSI_TRANSPORT: 227 break; 228 default: 229 ret = -EINVAL; 230 } 231 break; 232 default: 233 ret = -EINVAL; 234 } 235 236 *rw = hdr->dout_xfer_len ? WRITE : READ; 237 return ret; 238 } 239 240 /* 241 * map sg_io_v4 to a request. 242 */ 243 static struct request * 244 bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, 245 u8 *sense) 246 { 247 struct request_queue *q = bd->queue; 248 struct request *rq, *next_rq = NULL; 249 int ret, rw; 250 unsigned int dxfer_len; 251 void *dxferp = NULL; 252 253 dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, 254 hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, 255 hdr->din_xfer_len); 256 257 ret = bsg_validate_sgv4_hdr(q, hdr, &rw); 258 if (ret) 259 return ERR_PTR(ret); 260 261 /* 262 * map scatter-gather elements seperately and string them to request 263 */ 264 rq = blk_get_request(q, rw, GFP_KERNEL); 265 if (!rq) 266 return ERR_PTR(-ENOMEM); 267 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); 268 if (ret) 269 goto out; 270 271 if (rw == WRITE && hdr->din_xfer_len) { 272 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { 273 ret = -EOPNOTSUPP; 274 goto out; 275 } 276 277 next_rq = blk_get_request(q, READ, GFP_KERNEL); 278 if (!next_rq) { 279 ret = -ENOMEM; 280 goto out; 281 } 282 rq->next_rq = next_rq; 283 next_rq->cmd_type = rq->cmd_type; 284 285 dxferp = (void*)(unsigned long)hdr->din_xferp; 286 ret = blk_rq_map_user(q, next_rq, NULL, dxferp, 287 hdr->din_xfer_len, GFP_KERNEL); 288 if (ret) 289 goto out; 290 } 291 292 if (hdr->dout_xfer_len) { 293 dxfer_len = hdr->dout_xfer_len; 294 dxferp = (void*)(unsigned long)hdr->dout_xferp; 295 } else if (hdr->din_xfer_len) { 296 dxfer_len = hdr->din_xfer_len; 297 dxferp = (void*)(unsigned long)hdr->din_xferp; 298 } else 299 dxfer_len = 0; 300 301 if (dxfer_len) { 302 ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len, 303 GFP_KERNEL); 304 if (ret) 305 goto out; 306 } 307 308 rq->sense = sense; 309 rq->sense_len = 0; 310 311 return rq; 312 out: 313 if (rq->cmd != rq->__cmd) 314 kfree(rq->cmd); 315 blk_put_request(rq); 316 if (next_rq) { 317 blk_rq_unmap_user(next_rq->bio); 318 blk_put_request(next_rq); 319 } 320 return ERR_PTR(ret); 321 } 322 323 /* 324 * async completion call-back from the block layer, when scsi/ide/whatever 325 * calls end_that_request_last() on a request 326 */ 327 static void bsg_rq_end_io(struct request *rq, int uptodate) 328 { 329 struct bsg_command *bc = rq->end_io_data; 330 struct bsg_device *bd = bc->bd; 331 unsigned long flags; 332 333 dprintk("%s: finished rq %p bc %p, bio %p stat %d\n", 334 bd->name, rq, bc, bc->bio, uptodate); 335 336 bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); 337 338 spin_lock_irqsave(&bd->lock, flags); 339 list_move_tail(&bc->list, &bd->done_list); 340 bd->done_cmds++; 341 spin_unlock_irqrestore(&bd->lock, flags); 342 343 wake_up(&bd->wq_done); 344 } 345 346 /* 347 * do final setup of a 'bc' and submit the matching 'rq' to the block 348 * layer for io 349 */ 350 static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, 351 struct bsg_command *bc, struct request *rq) 352 { 353 int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL)); 354 355 /* 356 * add bc command to busy queue and submit rq for io 357 */ 358 bc->rq = rq; 359 bc->bio = rq->bio; 360 if (rq->next_rq) 361 bc->bidi_bio = rq->next_rq->bio; 362 bc->hdr.duration = jiffies; 363 spin_lock_irq(&bd->lock); 364 list_add_tail(&bc->list, &bd->busy_list); 365 spin_unlock_irq(&bd->lock); 366 367 dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc); 368 369 rq->end_io_data = bc; 370 blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io); 371 } 372 373 static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) 374 { 375 struct bsg_command *bc = NULL; 376 377 spin_lock_irq(&bd->lock); 378 if (bd->done_cmds) { 379 bc = list_first_entry(&bd->done_list, struct bsg_command, list); 380 list_del(&bc->list); 381 bd->done_cmds--; 382 } 383 spin_unlock_irq(&bd->lock); 384 385 return bc; 386 } 387 388 /* 389 * Get a finished command from the done list 390 */ 391 static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) 392 { 393 struct bsg_command *bc; 394 int ret; 395 396 do { 397 bc = bsg_next_done_cmd(bd); 398 if (bc) 399 break; 400 401 if (!test_bit(BSG_F_BLOCK, &bd->flags)) { 402 bc = ERR_PTR(-EAGAIN); 403 break; 404 } 405 406 ret = wait_event_interruptible(bd->wq_done, bd->done_cmds); 407 if (ret) { 408 bc = ERR_PTR(-ERESTARTSYS); 409 break; 410 } 411 } while (1); 412 413 dprintk("%s: returning done %p\n", bd->name, bc); 414 415 return bc; 416 } 417 418 static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, 419 struct bio *bio, struct bio *bidi_bio) 420 { 421 int ret = 0; 422 423 dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors); 424 /* 425 * fill in all the output members 426 */ 427 hdr->device_status = status_byte(rq->errors); 428 hdr->transport_status = host_byte(rq->errors); 429 hdr->driver_status = driver_byte(rq->errors); 430 hdr->info = 0; 431 if (hdr->device_status || hdr->transport_status || hdr->driver_status) 432 hdr->info |= SG_INFO_CHECK; 433 hdr->response_len = 0; 434 435 if (rq->sense_len && hdr->response) { 436 int len = min_t(unsigned int, hdr->max_response_len, 437 rq->sense_len); 438 439 ret = copy_to_user((void*)(unsigned long)hdr->response, 440 rq->sense, len); 441 if (!ret) 442 hdr->response_len = len; 443 else 444 ret = -EFAULT; 445 } 446 447 if (rq->next_rq) { 448 hdr->dout_resid = rq->resid_len; 449 hdr->din_resid = rq->next_rq->resid_len; 450 blk_rq_unmap_user(bidi_bio); 451 blk_put_request(rq->next_rq); 452 } else if (rq_data_dir(rq) == READ) 453 hdr->din_resid = rq->resid_len; 454 else 455 hdr->dout_resid = rq->resid_len; 456 457 /* 458 * If the request generated a negative error number, return it 459 * (providing we aren't already returning an error); if it's 460 * just a protocol response (i.e. non negative), that gets 461 * processed above. 462 */ 463 if (!ret && rq->errors < 0) 464 ret = rq->errors; 465 466 blk_rq_unmap_user(bio); 467 if (rq->cmd != rq->__cmd) 468 kfree(rq->cmd); 469 blk_put_request(rq); 470 471 return ret; 472 } 473 474 static int bsg_complete_all_commands(struct bsg_device *bd) 475 { 476 struct bsg_command *bc; 477 int ret, tret; 478 479 dprintk("%s: entered\n", bd->name); 480 481 /* 482 * wait for all commands to complete 483 */ 484 ret = 0; 485 do { 486 ret = bsg_io_schedule(bd); 487 /* 488 * look for -ENODATA specifically -- we'll sometimes get 489 * -ERESTARTSYS when we've taken a signal, but we can't 490 * return until we're done freeing the queue, so ignore 491 * it. The signal will get handled when we're done freeing 492 * the bsg_device. 493 */ 494 } while (ret != -ENODATA); 495 496 /* 497 * discard done commands 498 */ 499 ret = 0; 500 do { 501 spin_lock_irq(&bd->lock); 502 if (!bd->queued_cmds) { 503 spin_unlock_irq(&bd->lock); 504 break; 505 } 506 spin_unlock_irq(&bd->lock); 507 508 bc = bsg_get_done_cmd(bd); 509 if (IS_ERR(bc)) 510 break; 511 512 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, 513 bc->bidi_bio); 514 if (!ret) 515 ret = tret; 516 517 bsg_free_command(bc); 518 } while (1); 519 520 return ret; 521 } 522 523 static int 524 __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, 525 const struct iovec *iov, ssize_t *bytes_read) 526 { 527 struct bsg_command *bc; 528 int nr_commands, ret; 529 530 if (count % sizeof(struct sg_io_v4)) 531 return -EINVAL; 532 533 ret = 0; 534 nr_commands = count / sizeof(struct sg_io_v4); 535 while (nr_commands) { 536 bc = bsg_get_done_cmd(bd); 537 if (IS_ERR(bc)) { 538 ret = PTR_ERR(bc); 539 break; 540 } 541 542 /* 543 * this is the only case where we need to copy data back 544 * after completing the request. so do that here, 545 * bsg_complete_work() cannot do that for us 546 */ 547 ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, 548 bc->bidi_bio); 549 550 if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr))) 551 ret = -EFAULT; 552 553 bsg_free_command(bc); 554 555 if (ret) 556 break; 557 558 buf += sizeof(struct sg_io_v4); 559 *bytes_read += sizeof(struct sg_io_v4); 560 nr_commands--; 561 } 562 563 return ret; 564 } 565 566 static inline void bsg_set_block(struct bsg_device *bd, struct file *file) 567 { 568 if (file->f_flags & O_NONBLOCK) 569 clear_bit(BSG_F_BLOCK, &bd->flags); 570 else 571 set_bit(BSG_F_BLOCK, &bd->flags); 572 } 573 574 /* 575 * Check if the error is a "real" error that we should return. 576 */ 577 static inline int err_block_err(int ret) 578 { 579 if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) 580 return 1; 581 582 return 0; 583 } 584 585 static ssize_t 586 bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 587 { 588 struct bsg_device *bd = file->private_data; 589 int ret; 590 ssize_t bytes_read; 591 592 dprintk("%s: read %Zd bytes\n", bd->name, count); 593 594 bsg_set_block(bd, file); 595 596 bytes_read = 0; 597 ret = __bsg_read(buf, count, bd, NULL, &bytes_read); 598 *ppos = bytes_read; 599 600 if (!bytes_read || (bytes_read && err_block_err(ret))) 601 bytes_read = ret; 602 603 return bytes_read; 604 } 605 606 static int __bsg_write(struct bsg_device *bd, const char __user *buf, 607 size_t count, ssize_t *bytes_written, 608 fmode_t has_write_perm) 609 { 610 struct bsg_command *bc; 611 struct request *rq; 612 int ret, nr_commands; 613 614 if (count % sizeof(struct sg_io_v4)) 615 return -EINVAL; 616 617 nr_commands = count / sizeof(struct sg_io_v4); 618 rq = NULL; 619 bc = NULL; 620 ret = 0; 621 while (nr_commands) { 622 struct request_queue *q = bd->queue; 623 624 bc = bsg_alloc_command(bd); 625 if (IS_ERR(bc)) { 626 ret = PTR_ERR(bc); 627 bc = NULL; 628 break; 629 } 630 631 if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { 632 ret = -EFAULT; 633 break; 634 } 635 636 /* 637 * get a request, fill in the blanks, and add to request queue 638 */ 639 rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense); 640 if (IS_ERR(rq)) { 641 ret = PTR_ERR(rq); 642 rq = NULL; 643 break; 644 } 645 646 bsg_add_command(bd, q, bc, rq); 647 bc = NULL; 648 rq = NULL; 649 nr_commands--; 650 buf += sizeof(struct sg_io_v4); 651 *bytes_written += sizeof(struct sg_io_v4); 652 } 653 654 if (bc) 655 bsg_free_command(bc); 656 657 return ret; 658 } 659 660 static ssize_t 661 bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 662 { 663 struct bsg_device *bd = file->private_data; 664 ssize_t bytes_written; 665 int ret; 666 667 dprintk("%s: write %Zd bytes\n", bd->name, count); 668 669 bsg_set_block(bd, file); 670 671 bytes_written = 0; 672 ret = __bsg_write(bd, buf, count, &bytes_written, 673 file->f_mode & FMODE_WRITE); 674 675 *ppos = bytes_written; 676 677 /* 678 * return bytes written on non-fatal errors 679 */ 680 if (!bytes_written || (bytes_written && err_block_err(ret))) 681 bytes_written = ret; 682 683 dprintk("%s: returning %Zd\n", bd->name, bytes_written); 684 return bytes_written; 685 } 686 687 static struct bsg_device *bsg_alloc_device(void) 688 { 689 struct bsg_device *bd; 690 691 bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); 692 if (unlikely(!bd)) 693 return NULL; 694 695 spin_lock_init(&bd->lock); 696 697 bd->max_queue = BSG_DEFAULT_CMDS; 698 699 INIT_LIST_HEAD(&bd->busy_list); 700 INIT_LIST_HEAD(&bd->done_list); 701 INIT_HLIST_NODE(&bd->dev_list); 702 703 init_waitqueue_head(&bd->wq_free); 704 init_waitqueue_head(&bd->wq_done); 705 return bd; 706 } 707 708 static void bsg_kref_release_function(struct kref *kref) 709 { 710 struct bsg_class_device *bcd = 711 container_of(kref, struct bsg_class_device, ref); 712 struct device *parent = bcd->parent; 713 714 if (bcd->release) 715 bcd->release(bcd->parent); 716 717 put_device(parent); 718 } 719 720 static int bsg_put_device(struct bsg_device *bd) 721 { 722 int ret = 0, do_free; 723 struct request_queue *q = bd->queue; 724 725 mutex_lock(&bsg_mutex); 726 727 do_free = atomic_dec_and_test(&bd->ref_count); 728 if (!do_free) { 729 mutex_unlock(&bsg_mutex); 730 goto out; 731 } 732 733 hlist_del(&bd->dev_list); 734 mutex_unlock(&bsg_mutex); 735 736 dprintk("%s: tearing down\n", bd->name); 737 738 /* 739 * close can always block 740 */ 741 set_bit(BSG_F_BLOCK, &bd->flags); 742 743 /* 744 * correct error detection baddies here again. it's the responsibility 745 * of the app to properly reap commands before close() if it wants 746 * fool-proof error detection 747 */ 748 ret = bsg_complete_all_commands(bd); 749 750 kfree(bd); 751 out: 752 kref_put(&q->bsg_dev.ref, bsg_kref_release_function); 753 if (do_free) 754 blk_put_queue(q); 755 return ret; 756 } 757 758 static struct bsg_device *bsg_add_device(struct inode *inode, 759 struct request_queue *rq, 760 struct file *file) 761 { 762 struct bsg_device *bd; 763 int ret; 764 #ifdef BSG_DEBUG 765 unsigned char buf[32]; 766 #endif 767 ret = blk_get_queue(rq); 768 if (ret) 769 return ERR_PTR(-ENXIO); 770 771 bd = bsg_alloc_device(); 772 if (!bd) { 773 blk_put_queue(rq); 774 return ERR_PTR(-ENOMEM); 775 } 776 777 bd->queue = rq; 778 779 bsg_set_block(bd, file); 780 781 atomic_set(&bd->ref_count, 1); 782 mutex_lock(&bsg_mutex); 783 hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); 784 785 strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); 786 dprintk("bound to <%s>, max queue %d\n", 787 format_dev_t(buf, inode->i_rdev), bd->max_queue); 788 789 mutex_unlock(&bsg_mutex); 790 return bd; 791 } 792 793 static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) 794 { 795 struct bsg_device *bd; 796 struct hlist_node *entry; 797 798 mutex_lock(&bsg_mutex); 799 800 hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) { 801 if (bd->queue == q) { 802 atomic_inc(&bd->ref_count); 803 goto found; 804 } 805 } 806 bd = NULL; 807 found: 808 mutex_unlock(&bsg_mutex); 809 return bd; 810 } 811 812 static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) 813 { 814 struct bsg_device *bd; 815 struct bsg_class_device *bcd; 816 817 /* 818 * find the class device 819 */ 820 mutex_lock(&bsg_mutex); 821 bcd = idr_find(&bsg_minor_idr, iminor(inode)); 822 if (bcd) 823 kref_get(&bcd->ref); 824 mutex_unlock(&bsg_mutex); 825 826 if (!bcd) 827 return ERR_PTR(-ENODEV); 828 829 bd = __bsg_get_device(iminor(inode), bcd->queue); 830 if (bd) 831 return bd; 832 833 bd = bsg_add_device(inode, bcd->queue, file); 834 if (IS_ERR(bd)) 835 kref_put(&bcd->ref, bsg_kref_release_function); 836 837 return bd; 838 } 839 840 static int bsg_open(struct inode *inode, struct file *file) 841 { 842 struct bsg_device *bd; 843 844 lock_kernel(); 845 bd = bsg_get_device(inode, file); 846 unlock_kernel(); 847 848 if (IS_ERR(bd)) 849 return PTR_ERR(bd); 850 851 file->private_data = bd; 852 return 0; 853 } 854 855 static int bsg_release(struct inode *inode, struct file *file) 856 { 857 struct bsg_device *bd = file->private_data; 858 859 file->private_data = NULL; 860 return bsg_put_device(bd); 861 } 862 863 static unsigned int bsg_poll(struct file *file, poll_table *wait) 864 { 865 struct bsg_device *bd = file->private_data; 866 unsigned int mask = 0; 867 868 poll_wait(file, &bd->wq_done, wait); 869 poll_wait(file, &bd->wq_free, wait); 870 871 spin_lock_irq(&bd->lock); 872 if (!list_empty(&bd->done_list)) 873 mask |= POLLIN | POLLRDNORM; 874 if (bd->queued_cmds >= bd->max_queue) 875 mask |= POLLOUT; 876 spin_unlock_irq(&bd->lock); 877 878 return mask; 879 } 880 881 static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 882 { 883 struct bsg_device *bd = file->private_data; 884 int __user *uarg = (int __user *) arg; 885 int ret; 886 887 switch (cmd) { 888 /* 889 * our own ioctls 890 */ 891 case SG_GET_COMMAND_Q: 892 return put_user(bd->max_queue, uarg); 893 case SG_SET_COMMAND_Q: { 894 int queue; 895 896 if (get_user(queue, uarg)) 897 return -EFAULT; 898 if (queue < 1) 899 return -EINVAL; 900 901 spin_lock_irq(&bd->lock); 902 bd->max_queue = queue; 903 spin_unlock_irq(&bd->lock); 904 return 0; 905 } 906 907 /* 908 * SCSI/sg ioctls 909 */ 910 case SG_GET_VERSION_NUM: 911 case SCSI_IOCTL_GET_IDLUN: 912 case SCSI_IOCTL_GET_BUS_NUMBER: 913 case SG_SET_TIMEOUT: 914 case SG_GET_TIMEOUT: 915 case SG_GET_RESERVED_SIZE: 916 case SG_SET_RESERVED_SIZE: 917 case SG_EMULATED_HOST: 918 case SCSI_IOCTL_SEND_COMMAND: { 919 void __user *uarg = (void __user *) arg; 920 return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg); 921 } 922 case SG_IO: { 923 struct request *rq; 924 struct bio *bio, *bidi_bio = NULL; 925 struct sg_io_v4 hdr; 926 int at_head; 927 u8 sense[SCSI_SENSE_BUFFERSIZE]; 928 929 if (copy_from_user(&hdr, uarg, sizeof(hdr))) 930 return -EFAULT; 931 932 rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense); 933 if (IS_ERR(rq)) 934 return PTR_ERR(rq); 935 936 bio = rq->bio; 937 if (rq->next_rq) 938 bidi_bio = rq->next_rq->bio; 939 940 at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL)); 941 blk_execute_rq(bd->queue, NULL, rq, at_head); 942 ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); 943 944 if (copy_to_user(uarg, &hdr, sizeof(hdr))) 945 return -EFAULT; 946 947 return ret; 948 } 949 /* 950 * block device ioctls 951 */ 952 default: 953 #if 0 954 return ioctl_by_bdev(bd->bdev, cmd, arg); 955 #else 956 return -ENOTTY; 957 #endif 958 } 959 } 960 961 static const struct file_operations bsg_fops = { 962 .read = bsg_read, 963 .write = bsg_write, 964 .poll = bsg_poll, 965 .open = bsg_open, 966 .release = bsg_release, 967 .unlocked_ioctl = bsg_ioctl, 968 .owner = THIS_MODULE, 969 }; 970 971 void bsg_unregister_queue(struct request_queue *q) 972 { 973 struct bsg_class_device *bcd = &q->bsg_dev; 974 975 if (!bcd->class_dev) 976 return; 977 978 mutex_lock(&bsg_mutex); 979 idr_remove(&bsg_minor_idr, bcd->minor); 980 sysfs_remove_link(&q->kobj, "bsg"); 981 device_unregister(bcd->class_dev); 982 bcd->class_dev = NULL; 983 kref_put(&bcd->ref, bsg_kref_release_function); 984 mutex_unlock(&bsg_mutex); 985 } 986 EXPORT_SYMBOL_GPL(bsg_unregister_queue); 987 988 int bsg_register_queue(struct request_queue *q, struct device *parent, 989 const char *name, void (*release)(struct device *)) 990 { 991 struct bsg_class_device *bcd; 992 dev_t dev; 993 int ret, minor; 994 struct device *class_dev = NULL; 995 const char *devname; 996 997 if (name) 998 devname = name; 999 else 1000 devname = dev_name(parent); 1001 1002 /* 1003 * we need a proper transport to send commands, not a stacked device 1004 */ 1005 if (!q->request_fn) 1006 return 0; 1007 1008 bcd = &q->bsg_dev; 1009 memset(bcd, 0, sizeof(*bcd)); 1010 1011 mutex_lock(&bsg_mutex); 1012 1013 ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL); 1014 if (!ret) { 1015 ret = -ENOMEM; 1016 goto unlock; 1017 } 1018 1019 ret = idr_get_new(&bsg_minor_idr, bcd, &minor); 1020 if (ret < 0) 1021 goto unlock; 1022 1023 if (minor >= BSG_MAX_DEVS) { 1024 printk(KERN_ERR "bsg: too many bsg devices\n"); 1025 ret = -EINVAL; 1026 goto remove_idr; 1027 } 1028 1029 bcd->minor = minor; 1030 bcd->queue = q; 1031 bcd->parent = get_device(parent); 1032 bcd->release = release; 1033 kref_init(&bcd->ref); 1034 dev = MKDEV(bsg_major, bcd->minor); 1035 class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname); 1036 if (IS_ERR(class_dev)) { 1037 ret = PTR_ERR(class_dev); 1038 goto put_dev; 1039 } 1040 bcd->class_dev = class_dev; 1041 1042 if (q->kobj.sd) { 1043 ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); 1044 if (ret) 1045 goto unregister_class_dev; 1046 } 1047 1048 mutex_unlock(&bsg_mutex); 1049 return 0; 1050 1051 unregister_class_dev: 1052 device_unregister(class_dev); 1053 put_dev: 1054 put_device(parent); 1055 remove_idr: 1056 idr_remove(&bsg_minor_idr, minor); 1057 unlock: 1058 mutex_unlock(&bsg_mutex); 1059 return ret; 1060 } 1061 EXPORT_SYMBOL_GPL(bsg_register_queue); 1062 1063 static struct cdev bsg_cdev; 1064 1065 static char *bsg_nodename(struct device *dev) 1066 { 1067 return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev)); 1068 } 1069 1070 static int __init bsg_init(void) 1071 { 1072 int ret, i; 1073 dev_t devid; 1074 1075 bsg_cmd_cachep = kmem_cache_create("bsg_cmd", 1076 sizeof(struct bsg_command), 0, 0, NULL); 1077 if (!bsg_cmd_cachep) { 1078 printk(KERN_ERR "bsg: failed creating slab cache\n"); 1079 return -ENOMEM; 1080 } 1081 1082 for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++) 1083 INIT_HLIST_HEAD(&bsg_device_list[i]); 1084 1085 bsg_class = class_create(THIS_MODULE, "bsg"); 1086 if (IS_ERR(bsg_class)) { 1087 ret = PTR_ERR(bsg_class); 1088 goto destroy_kmemcache; 1089 } 1090 bsg_class->nodename = bsg_nodename; 1091 1092 ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); 1093 if (ret) 1094 goto destroy_bsg_class; 1095 1096 bsg_major = MAJOR(devid); 1097 1098 cdev_init(&bsg_cdev, &bsg_fops); 1099 ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS); 1100 if (ret) 1101 goto unregister_chrdev; 1102 1103 printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION 1104 " loaded (major %d)\n", bsg_major); 1105 return 0; 1106 unregister_chrdev: 1107 unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS); 1108 destroy_bsg_class: 1109 class_destroy(bsg_class); 1110 destroy_kmemcache: 1111 kmem_cache_destroy(bsg_cmd_cachep); 1112 return ret; 1113 } 1114 1115 MODULE_AUTHOR("Jens Axboe"); 1116 MODULE_DESCRIPTION(BSG_DESCRIPTION); 1117 MODULE_LICENSE("GPL"); 1118 1119 device_initcall(bsg_init); 1120