xref: /linux/block/bsg.c (revision 7f3edee81fbd49114c28057512906f169caa0bed)
1 /*
2  * bsg.c - block layer implementation of the sg v4 interface
3  *
4  * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
5  * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
6  *
7  *  This file is subject to the terms and conditions of the GNU General Public
8  *  License version 2.  See the file "COPYING" in the main directory of this
9  *  archive for more details.
10  *
11  */
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/file.h>
15 #include <linux/blkdev.h>
16 #include <linux/poll.h>
17 #include <linux/cdev.h>
18 #include <linux/percpu.h>
19 #include <linux/uio.h>
20 #include <linux/idr.h>
21 #include <linux/bsg.h>
22 
23 #include <scsi/scsi.h>
24 #include <scsi/scsi_ioctl.h>
25 #include <scsi/scsi_cmnd.h>
26 #include <scsi/scsi_device.h>
27 #include <scsi/scsi_driver.h>
28 #include <scsi/sg.h>
29 
30 #define BSG_DESCRIPTION	"Block layer SCSI generic (bsg) driver"
31 #define BSG_VERSION	"0.4"
32 
33 struct bsg_device {
34 	struct request_queue *queue;
35 	spinlock_t lock;
36 	struct list_head busy_list;
37 	struct list_head done_list;
38 	struct hlist_node dev_list;
39 	atomic_t ref_count;
40 	int minor;
41 	int queued_cmds;
42 	int done_cmds;
43 	wait_queue_head_t wq_done;
44 	wait_queue_head_t wq_free;
45 	char name[BUS_ID_SIZE];
46 	int max_queue;
47 	unsigned long flags;
48 };
49 
50 enum {
51 	BSG_F_BLOCK		= 1,
52 	BSG_F_WRITE_PERM	= 2,
53 };
54 
55 #define BSG_DEFAULT_CMDS	64
56 #define BSG_MAX_DEVS		32768
57 
58 #undef BSG_DEBUG
59 
60 #ifdef BSG_DEBUG
61 #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args)
62 #else
63 #define dprintk(fmt, args...)
64 #endif
65 
66 static DEFINE_MUTEX(bsg_mutex);
67 static DEFINE_IDR(bsg_minor_idr);
68 
69 #define BSG_LIST_ARRAY_SIZE	8
70 static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
71 
72 static struct class *bsg_class;
73 static int bsg_major;
74 
75 static struct kmem_cache *bsg_cmd_cachep;
76 
77 /*
78  * our internal command type
79  */
80 struct bsg_command {
81 	struct bsg_device *bd;
82 	struct list_head list;
83 	struct request *rq;
84 	struct bio *bio;
85 	struct bio *bidi_bio;
86 	int err;
87 	struct sg_io_v4 hdr;
88 	char sense[SCSI_SENSE_BUFFERSIZE];
89 };
90 
91 static void bsg_free_command(struct bsg_command *bc)
92 {
93 	struct bsg_device *bd = bc->bd;
94 	unsigned long flags;
95 
96 	kmem_cache_free(bsg_cmd_cachep, bc);
97 
98 	spin_lock_irqsave(&bd->lock, flags);
99 	bd->queued_cmds--;
100 	spin_unlock_irqrestore(&bd->lock, flags);
101 
102 	wake_up(&bd->wq_free);
103 }
104 
105 static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
106 {
107 	struct bsg_command *bc = ERR_PTR(-EINVAL);
108 
109 	spin_lock_irq(&bd->lock);
110 
111 	if (bd->queued_cmds >= bd->max_queue)
112 		goto out;
113 
114 	bd->queued_cmds++;
115 	spin_unlock_irq(&bd->lock);
116 
117 	bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
118 	if (unlikely(!bc)) {
119 		spin_lock_irq(&bd->lock);
120 		bd->queued_cmds--;
121 		bc = ERR_PTR(-ENOMEM);
122 		goto out;
123 	}
124 
125 	bc->bd = bd;
126 	INIT_LIST_HEAD(&bc->list);
127 	dprintk("%s: returning free cmd %p\n", bd->name, bc);
128 	return bc;
129 out:
130 	spin_unlock_irq(&bd->lock);
131 	return bc;
132 }
133 
134 static inline struct hlist_head *bsg_dev_idx_hash(int index)
135 {
136 	return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
137 }
138 
139 static int bsg_io_schedule(struct bsg_device *bd)
140 {
141 	DEFINE_WAIT(wait);
142 	int ret = 0;
143 
144 	spin_lock_irq(&bd->lock);
145 
146 	BUG_ON(bd->done_cmds > bd->queued_cmds);
147 
148 	/*
149 	 * -ENOSPC or -ENODATA?  I'm going for -ENODATA, meaning "I have no
150 	 * work to do", even though we return -ENOSPC after this same test
151 	 * during bsg_write() -- there, it means our buffer can't have more
152 	 * bsg_commands added to it, thus has no space left.
153 	 */
154 	if (bd->done_cmds == bd->queued_cmds) {
155 		ret = -ENODATA;
156 		goto unlock;
157 	}
158 
159 	if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
160 		ret = -EAGAIN;
161 		goto unlock;
162 	}
163 
164 	prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
165 	spin_unlock_irq(&bd->lock);
166 	io_schedule();
167 	finish_wait(&bd->wq_done, &wait);
168 
169 	return ret;
170 unlock:
171 	spin_unlock_irq(&bd->lock);
172 	return ret;
173 }
174 
175 static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
176 				struct sg_io_v4 *hdr, int has_write_perm)
177 {
178 	memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
179 
180 	if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
181 			   hdr->request_len))
182 		return -EFAULT;
183 
184 	if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
185 		if (blk_verify_command(rq->cmd, has_write_perm))
186 			return -EPERM;
187 	} else if (!capable(CAP_SYS_RAWIO))
188 		return -EPERM;
189 
190 	/*
191 	 * fill in request structure
192 	 */
193 	rq->cmd_len = hdr->request_len;
194 	rq->cmd_type = REQ_TYPE_BLOCK_PC;
195 
196 	rq->timeout = (hdr->timeout * HZ) / 1000;
197 	if (!rq->timeout)
198 		rq->timeout = q->sg_timeout;
199 	if (!rq->timeout)
200 		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
201 
202 	return 0;
203 }
204 
205 /*
206  * Check if sg_io_v4 from user is allowed and valid
207  */
208 static int
209 bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
210 {
211 	int ret = 0;
212 
213 	if (hdr->guard != 'Q')
214 		return -EINVAL;
215 	if (hdr->request_len > BLK_MAX_CDB)
216 		return -EINVAL;
217 	if (hdr->dout_xfer_len > (q->max_sectors << 9) ||
218 	    hdr->din_xfer_len > (q->max_sectors << 9))
219 		return -EIO;
220 
221 	switch (hdr->protocol) {
222 	case BSG_PROTOCOL_SCSI:
223 		switch (hdr->subprotocol) {
224 		case BSG_SUB_PROTOCOL_SCSI_CMD:
225 		case BSG_SUB_PROTOCOL_SCSI_TRANSPORT:
226 			break;
227 		default:
228 			ret = -EINVAL;
229 		}
230 		break;
231 	default:
232 		ret = -EINVAL;
233 	}
234 
235 	*rw = hdr->dout_xfer_len ? WRITE : READ;
236 	return ret;
237 }
238 
239 /*
240  * map sg_io_v4 to a request.
241  */
242 static struct request *
243 bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
244 {
245 	struct request_queue *q = bd->queue;
246 	struct request *rq, *next_rq = NULL;
247 	int ret, rw;
248 	unsigned int dxfer_len;
249 	void *dxferp = NULL;
250 
251 	dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
252 		hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
253 		hdr->din_xfer_len);
254 
255 	ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
256 	if (ret)
257 		return ERR_PTR(ret);
258 
259 	/*
260 	 * map scatter-gather elements seperately and string them to request
261 	 */
262 	rq = blk_get_request(q, rw, GFP_KERNEL);
263 	if (!rq)
264 		return ERR_PTR(-ENOMEM);
265 	ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM,
266 						       &bd->flags));
267 	if (ret)
268 		goto out;
269 
270 	if (rw == WRITE && hdr->din_xfer_len) {
271 		if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
272 			ret = -EOPNOTSUPP;
273 			goto out;
274 		}
275 
276 		next_rq = blk_get_request(q, READ, GFP_KERNEL);
277 		if (!next_rq) {
278 			ret = -ENOMEM;
279 			goto out;
280 		}
281 		rq->next_rq = next_rq;
282 
283 		dxferp = (void*)(unsigned long)hdr->din_xferp;
284 		ret =  blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len);
285 		if (ret)
286 			goto out;
287 	}
288 
289 	if (hdr->dout_xfer_len) {
290 		dxfer_len = hdr->dout_xfer_len;
291 		dxferp = (void*)(unsigned long)hdr->dout_xferp;
292 	} else if (hdr->din_xfer_len) {
293 		dxfer_len = hdr->din_xfer_len;
294 		dxferp = (void*)(unsigned long)hdr->din_xferp;
295 	} else
296 		dxfer_len = 0;
297 
298 	if (dxfer_len) {
299 		ret = blk_rq_map_user(q, rq, dxferp, dxfer_len);
300 		if (ret)
301 			goto out;
302 	}
303 	return rq;
304 out:
305 	blk_put_request(rq);
306 	if (next_rq) {
307 		blk_rq_unmap_user(next_rq->bio);
308 		blk_put_request(next_rq);
309 	}
310 	return ERR_PTR(ret);
311 }
312 
313 /*
314  * async completion call-back from the block layer, when scsi/ide/whatever
315  * calls end_that_request_last() on a request
316  */
317 static void bsg_rq_end_io(struct request *rq, int uptodate)
318 {
319 	struct bsg_command *bc = rq->end_io_data;
320 	struct bsg_device *bd = bc->bd;
321 	unsigned long flags;
322 
323 	dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
324 		bd->name, rq, bc, bc->bio, uptodate);
325 
326 	bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
327 
328 	spin_lock_irqsave(&bd->lock, flags);
329 	list_move_tail(&bc->list, &bd->done_list);
330 	bd->done_cmds++;
331 	spin_unlock_irqrestore(&bd->lock, flags);
332 
333 	wake_up(&bd->wq_done);
334 }
335 
336 /*
337  * do final setup of a 'bc' and submit the matching 'rq' to the block
338  * layer for io
339  */
340 static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
341 			    struct bsg_command *bc, struct request *rq)
342 {
343 	rq->sense = bc->sense;
344 	rq->sense_len = 0;
345 
346 	/*
347 	 * add bc command to busy queue and submit rq for io
348 	 */
349 	bc->rq = rq;
350 	bc->bio = rq->bio;
351 	if (rq->next_rq)
352 		bc->bidi_bio = rq->next_rq->bio;
353 	bc->hdr.duration = jiffies;
354 	spin_lock_irq(&bd->lock);
355 	list_add_tail(&bc->list, &bd->busy_list);
356 	spin_unlock_irq(&bd->lock);
357 
358 	dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
359 
360 	rq->end_io_data = bc;
361 	blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io);
362 }
363 
364 static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
365 {
366 	struct bsg_command *bc = NULL;
367 
368 	spin_lock_irq(&bd->lock);
369 	if (bd->done_cmds) {
370 		bc = list_entry(bd->done_list.next, struct bsg_command, list);
371 		list_del(&bc->list);
372 		bd->done_cmds--;
373 	}
374 	spin_unlock_irq(&bd->lock);
375 
376 	return bc;
377 }
378 
379 /*
380  * Get a finished command from the done list
381  */
382 static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
383 {
384 	struct bsg_command *bc;
385 	int ret;
386 
387 	do {
388 		bc = bsg_next_done_cmd(bd);
389 		if (bc)
390 			break;
391 
392 		if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
393 			bc = ERR_PTR(-EAGAIN);
394 			break;
395 		}
396 
397 		ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
398 		if (ret) {
399 			bc = ERR_PTR(-ERESTARTSYS);
400 			break;
401 		}
402 	} while (1);
403 
404 	dprintk("%s: returning done %p\n", bd->name, bc);
405 
406 	return bc;
407 }
408 
409 static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
410 				    struct bio *bio, struct bio *bidi_bio)
411 {
412 	int ret = 0;
413 
414 	dprintk("rq %p bio %p %u\n", rq, bio, rq->errors);
415 	/*
416 	 * fill in all the output members
417 	 */
418 	hdr->device_status = status_byte(rq->errors);
419 	hdr->transport_status = host_byte(rq->errors);
420 	hdr->driver_status = driver_byte(rq->errors);
421 	hdr->info = 0;
422 	if (hdr->device_status || hdr->transport_status || hdr->driver_status)
423 		hdr->info |= SG_INFO_CHECK;
424 	hdr->response_len = 0;
425 
426 	if (rq->sense_len && hdr->response) {
427 		int len = min_t(unsigned int, hdr->max_response_len,
428 					rq->sense_len);
429 
430 		ret = copy_to_user((void*)(unsigned long)hdr->response,
431 				   rq->sense, len);
432 		if (!ret)
433 			hdr->response_len = len;
434 		else
435 			ret = -EFAULT;
436 	}
437 
438 	if (rq->next_rq) {
439 		hdr->dout_resid = rq->data_len;
440 		hdr->din_resid = rq->next_rq->data_len;
441 		blk_rq_unmap_user(bidi_bio);
442 		blk_put_request(rq->next_rq);
443 	} else if (rq_data_dir(rq) == READ)
444 		hdr->din_resid = rq->data_len;
445 	else
446 		hdr->dout_resid = rq->data_len;
447 
448 	/*
449 	 * If the request generated a negative error number, return it
450 	 * (providing we aren't already returning an error); if it's
451 	 * just a protocol response (i.e. non negative), that gets
452 	 * processed above.
453 	 */
454 	if (!ret && rq->errors < 0)
455 		ret = rq->errors;
456 
457 	blk_rq_unmap_user(bio);
458 	blk_put_request(rq);
459 
460 	return ret;
461 }
462 
463 static int bsg_complete_all_commands(struct bsg_device *bd)
464 {
465 	struct bsg_command *bc;
466 	int ret, tret;
467 
468 	dprintk("%s: entered\n", bd->name);
469 
470 	set_bit(BSG_F_BLOCK, &bd->flags);
471 
472 	/*
473 	 * wait for all commands to complete
474 	 */
475 	ret = 0;
476 	do {
477 		ret = bsg_io_schedule(bd);
478 		/*
479 		 * look for -ENODATA specifically -- we'll sometimes get
480 		 * -ERESTARTSYS when we've taken a signal, but we can't
481 		 * return until we're done freeing the queue, so ignore
482 		 * it.  The signal will get handled when we're done freeing
483 		 * the bsg_device.
484 		 */
485 	} while (ret != -ENODATA);
486 
487 	/*
488 	 * discard done commands
489 	 */
490 	ret = 0;
491 	do {
492 		spin_lock_irq(&bd->lock);
493 		if (!bd->queued_cmds) {
494 			spin_unlock_irq(&bd->lock);
495 			break;
496 		}
497 		spin_unlock_irq(&bd->lock);
498 
499 		bc = bsg_get_done_cmd(bd);
500 		if (IS_ERR(bc))
501 			break;
502 
503 		tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
504 						bc->bidi_bio);
505 		if (!ret)
506 			ret = tret;
507 
508 		bsg_free_command(bc);
509 	} while (1);
510 
511 	return ret;
512 }
513 
514 static int
515 __bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
516 	   const struct iovec *iov, ssize_t *bytes_read)
517 {
518 	struct bsg_command *bc;
519 	int nr_commands, ret;
520 
521 	if (count % sizeof(struct sg_io_v4))
522 		return -EINVAL;
523 
524 	ret = 0;
525 	nr_commands = count / sizeof(struct sg_io_v4);
526 	while (nr_commands) {
527 		bc = bsg_get_done_cmd(bd);
528 		if (IS_ERR(bc)) {
529 			ret = PTR_ERR(bc);
530 			break;
531 		}
532 
533 		/*
534 		 * this is the only case where we need to copy data back
535 		 * after completing the request. so do that here,
536 		 * bsg_complete_work() cannot do that for us
537 		 */
538 		ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
539 					       bc->bidi_bio);
540 
541 		if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
542 			ret = -EFAULT;
543 
544 		bsg_free_command(bc);
545 
546 		if (ret)
547 			break;
548 
549 		buf += sizeof(struct sg_io_v4);
550 		*bytes_read += sizeof(struct sg_io_v4);
551 		nr_commands--;
552 	}
553 
554 	return ret;
555 }
556 
557 static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
558 {
559 	if (file->f_flags & O_NONBLOCK)
560 		clear_bit(BSG_F_BLOCK, &bd->flags);
561 	else
562 		set_bit(BSG_F_BLOCK, &bd->flags);
563 }
564 
565 static inline void bsg_set_write_perm(struct bsg_device *bd, struct file *file)
566 {
567 	if (file->f_mode & FMODE_WRITE)
568 		set_bit(BSG_F_WRITE_PERM, &bd->flags);
569 	else
570 		clear_bit(BSG_F_WRITE_PERM, &bd->flags);
571 }
572 
573 /*
574  * Check if the error is a "real" error that we should return.
575  */
576 static inline int err_block_err(int ret)
577 {
578 	if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
579 		return 1;
580 
581 	return 0;
582 }
583 
584 static ssize_t
585 bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
586 {
587 	struct bsg_device *bd = file->private_data;
588 	int ret;
589 	ssize_t bytes_read;
590 
591 	dprintk("%s: read %Zd bytes\n", bd->name, count);
592 
593 	bsg_set_block(bd, file);
594 	bytes_read = 0;
595 	ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
596 	*ppos = bytes_read;
597 
598 	if (!bytes_read || (bytes_read && err_block_err(ret)))
599 		bytes_read = ret;
600 
601 	return bytes_read;
602 }
603 
604 static int __bsg_write(struct bsg_device *bd, const char __user *buf,
605 		       size_t count, ssize_t *bytes_written)
606 {
607 	struct bsg_command *bc;
608 	struct request *rq;
609 	int ret, nr_commands;
610 
611 	if (count % sizeof(struct sg_io_v4))
612 		return -EINVAL;
613 
614 	nr_commands = count / sizeof(struct sg_io_v4);
615 	rq = NULL;
616 	bc = NULL;
617 	ret = 0;
618 	while (nr_commands) {
619 		struct request_queue *q = bd->queue;
620 
621 		bc = bsg_alloc_command(bd);
622 		if (IS_ERR(bc)) {
623 			ret = PTR_ERR(bc);
624 			bc = NULL;
625 			break;
626 		}
627 
628 		if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
629 			ret = -EFAULT;
630 			break;
631 		}
632 
633 		/*
634 		 * get a request, fill in the blanks, and add to request queue
635 		 */
636 		rq = bsg_map_hdr(bd, &bc->hdr);
637 		if (IS_ERR(rq)) {
638 			ret = PTR_ERR(rq);
639 			rq = NULL;
640 			break;
641 		}
642 
643 		bsg_add_command(bd, q, bc, rq);
644 		bc = NULL;
645 		rq = NULL;
646 		nr_commands--;
647 		buf += sizeof(struct sg_io_v4);
648 		*bytes_written += sizeof(struct sg_io_v4);
649 	}
650 
651 	if (bc)
652 		bsg_free_command(bc);
653 
654 	return ret;
655 }
656 
657 static ssize_t
658 bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
659 {
660 	struct bsg_device *bd = file->private_data;
661 	ssize_t bytes_written;
662 	int ret;
663 
664 	dprintk("%s: write %Zd bytes\n", bd->name, count);
665 
666 	bsg_set_block(bd, file);
667 	bsg_set_write_perm(bd, file);
668 
669 	bytes_written = 0;
670 	ret = __bsg_write(bd, buf, count, &bytes_written);
671 	*ppos = bytes_written;
672 
673 	/*
674 	 * return bytes written on non-fatal errors
675 	 */
676 	if (!bytes_written || (bytes_written && err_block_err(ret)))
677 		bytes_written = ret;
678 
679 	dprintk("%s: returning %Zd\n", bd->name, bytes_written);
680 	return bytes_written;
681 }
682 
683 static struct bsg_device *bsg_alloc_device(void)
684 {
685 	struct bsg_device *bd;
686 
687 	bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
688 	if (unlikely(!bd))
689 		return NULL;
690 
691 	spin_lock_init(&bd->lock);
692 
693 	bd->max_queue = BSG_DEFAULT_CMDS;
694 
695 	INIT_LIST_HEAD(&bd->busy_list);
696 	INIT_LIST_HEAD(&bd->done_list);
697 	INIT_HLIST_NODE(&bd->dev_list);
698 
699 	init_waitqueue_head(&bd->wq_free);
700 	init_waitqueue_head(&bd->wq_done);
701 	return bd;
702 }
703 
704 static int bsg_put_device(struct bsg_device *bd)
705 {
706 	int ret = 0;
707 
708 	mutex_lock(&bsg_mutex);
709 
710 	if (!atomic_dec_and_test(&bd->ref_count))
711 		goto out;
712 
713 	dprintk("%s: tearing down\n", bd->name);
714 
715 	/*
716 	 * close can always block
717 	 */
718 	set_bit(BSG_F_BLOCK, &bd->flags);
719 
720 	/*
721 	 * correct error detection baddies here again. it's the responsibility
722 	 * of the app to properly reap commands before close() if it wants
723 	 * fool-proof error detection
724 	 */
725 	ret = bsg_complete_all_commands(bd);
726 
727 	blk_put_queue(bd->queue);
728 	hlist_del(&bd->dev_list);
729 	kfree(bd);
730 out:
731 	mutex_unlock(&bsg_mutex);
732 	return ret;
733 }
734 
735 static struct bsg_device *bsg_add_device(struct inode *inode,
736 					 struct request_queue *rq,
737 					 struct file *file)
738 {
739 	struct bsg_device *bd;
740 #ifdef BSG_DEBUG
741 	unsigned char buf[32];
742 #endif
743 
744 	bd = bsg_alloc_device();
745 	if (!bd)
746 		return ERR_PTR(-ENOMEM);
747 
748 	bd->queue = rq;
749 	kobject_get(&rq->kobj);
750 	bsg_set_block(bd, file);
751 
752 	atomic_set(&bd->ref_count, 1);
753 	bd->minor = iminor(inode);
754 	mutex_lock(&bsg_mutex);
755 	hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(bd->minor));
756 
757 	strncpy(bd->name, rq->bsg_dev.class_dev->class_id, sizeof(bd->name) - 1);
758 	dprintk("bound to <%s>, max queue %d\n",
759 		format_dev_t(buf, inode->i_rdev), bd->max_queue);
760 
761 	mutex_unlock(&bsg_mutex);
762 	return bd;
763 }
764 
765 static struct bsg_device *__bsg_get_device(int minor)
766 {
767 	struct bsg_device *bd = NULL;
768 	struct hlist_node *entry;
769 
770 	mutex_lock(&bsg_mutex);
771 
772 	hlist_for_each(entry, bsg_dev_idx_hash(minor)) {
773 		bd = hlist_entry(entry, struct bsg_device, dev_list);
774 		if (bd->minor == minor) {
775 			atomic_inc(&bd->ref_count);
776 			break;
777 		}
778 
779 		bd = NULL;
780 	}
781 
782 	mutex_unlock(&bsg_mutex);
783 	return bd;
784 }
785 
786 static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
787 {
788 	struct bsg_device *bd;
789 	struct bsg_class_device *bcd;
790 
791 	bd = __bsg_get_device(iminor(inode));
792 	if (bd)
793 		return bd;
794 
795 	/*
796 	 * find the class device
797 	 */
798 	mutex_lock(&bsg_mutex);
799 	bcd = idr_find(&bsg_minor_idr, iminor(inode));
800 	mutex_unlock(&bsg_mutex);
801 
802 	if (!bcd)
803 		return ERR_PTR(-ENODEV);
804 
805 	return bsg_add_device(inode, bcd->queue, file);
806 }
807 
808 static int bsg_open(struct inode *inode, struct file *file)
809 {
810 	struct bsg_device *bd = bsg_get_device(inode, file);
811 
812 	if (IS_ERR(bd))
813 		return PTR_ERR(bd);
814 
815 	file->private_data = bd;
816 	return 0;
817 }
818 
819 static int bsg_release(struct inode *inode, struct file *file)
820 {
821 	struct bsg_device *bd = file->private_data;
822 
823 	file->private_data = NULL;
824 	return bsg_put_device(bd);
825 }
826 
827 static unsigned int bsg_poll(struct file *file, poll_table *wait)
828 {
829 	struct bsg_device *bd = file->private_data;
830 	unsigned int mask = 0;
831 
832 	poll_wait(file, &bd->wq_done, wait);
833 	poll_wait(file, &bd->wq_free, wait);
834 
835 	spin_lock_irq(&bd->lock);
836 	if (!list_empty(&bd->done_list))
837 		mask |= POLLIN | POLLRDNORM;
838 	if (bd->queued_cmds >= bd->max_queue)
839 		mask |= POLLOUT;
840 	spin_unlock_irq(&bd->lock);
841 
842 	return mask;
843 }
844 
845 static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
846 {
847 	struct bsg_device *bd = file->private_data;
848 	int __user *uarg = (int __user *) arg;
849 	int ret;
850 
851 	switch (cmd) {
852 		/*
853 		 * our own ioctls
854 		 */
855 	case SG_GET_COMMAND_Q:
856 		return put_user(bd->max_queue, uarg);
857 	case SG_SET_COMMAND_Q: {
858 		int queue;
859 
860 		if (get_user(queue, uarg))
861 			return -EFAULT;
862 		if (queue < 1)
863 			return -EINVAL;
864 
865 		spin_lock_irq(&bd->lock);
866 		bd->max_queue = queue;
867 		spin_unlock_irq(&bd->lock);
868 		return 0;
869 	}
870 
871 	/*
872 	 * SCSI/sg ioctls
873 	 */
874 	case SG_GET_VERSION_NUM:
875 	case SCSI_IOCTL_GET_IDLUN:
876 	case SCSI_IOCTL_GET_BUS_NUMBER:
877 	case SG_SET_TIMEOUT:
878 	case SG_GET_TIMEOUT:
879 	case SG_GET_RESERVED_SIZE:
880 	case SG_SET_RESERVED_SIZE:
881 	case SG_EMULATED_HOST:
882 	case SCSI_IOCTL_SEND_COMMAND: {
883 		void __user *uarg = (void __user *) arg;
884 		return scsi_cmd_ioctl(file, bd->queue, NULL, cmd, uarg);
885 	}
886 	case SG_IO: {
887 		struct request *rq;
888 		struct bio *bio, *bidi_bio = NULL;
889 		struct sg_io_v4 hdr;
890 
891 		if (copy_from_user(&hdr, uarg, sizeof(hdr)))
892 			return -EFAULT;
893 
894 		rq = bsg_map_hdr(bd, &hdr);
895 		if (IS_ERR(rq))
896 			return PTR_ERR(rq);
897 
898 		bio = rq->bio;
899 		if (rq->next_rq)
900 			bidi_bio = rq->next_rq->bio;
901 		blk_execute_rq(bd->queue, NULL, rq, 0);
902 		ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
903 
904 		if (copy_to_user(uarg, &hdr, sizeof(hdr)))
905 			return -EFAULT;
906 
907 		return ret;
908 	}
909 	/*
910 	 * block device ioctls
911 	 */
912 	default:
913 #if 0
914 		return ioctl_by_bdev(bd->bdev, cmd, arg);
915 #else
916 		return -ENOTTY;
917 #endif
918 	}
919 }
920 
921 static const struct file_operations bsg_fops = {
922 	.read		=	bsg_read,
923 	.write		=	bsg_write,
924 	.poll		=	bsg_poll,
925 	.open		=	bsg_open,
926 	.release	=	bsg_release,
927 	.unlocked_ioctl	=	bsg_ioctl,
928 	.owner		=	THIS_MODULE,
929 };
930 
931 void bsg_unregister_queue(struct request_queue *q)
932 {
933 	struct bsg_class_device *bcd = &q->bsg_dev;
934 
935 	if (!bcd->class_dev)
936 		return;
937 
938 	mutex_lock(&bsg_mutex);
939 	idr_remove(&bsg_minor_idr, bcd->minor);
940 	sysfs_remove_link(&q->kobj, "bsg");
941 	class_device_unregister(bcd->class_dev);
942 	put_device(bcd->dev);
943 	bcd->class_dev = NULL;
944 	bcd->dev = NULL;
945 	mutex_unlock(&bsg_mutex);
946 }
947 EXPORT_SYMBOL_GPL(bsg_unregister_queue);
948 
949 int bsg_register_queue(struct request_queue *q, struct device *gdev,
950 		       const char *name)
951 {
952 	struct bsg_class_device *bcd;
953 	dev_t dev;
954 	int ret, minor;
955 	struct class_device *class_dev = NULL;
956 	const char *devname;
957 
958 	if (name)
959 		devname = name;
960 	else
961 		devname = gdev->bus_id;
962 
963 	/*
964 	 * we need a proper transport to send commands, not a stacked device
965 	 */
966 	if (!q->request_fn)
967 		return 0;
968 
969 	bcd = &q->bsg_dev;
970 	memset(bcd, 0, sizeof(*bcd));
971 
972 	mutex_lock(&bsg_mutex);
973 
974 	ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL);
975 	if (!ret) {
976 		ret = -ENOMEM;
977 		goto unlock;
978 	}
979 
980 	ret = idr_get_new(&bsg_minor_idr, bcd, &minor);
981 	if (ret < 0)
982 		goto unlock;
983 
984 	if (minor >= BSG_MAX_DEVS) {
985 		printk(KERN_ERR "bsg: too many bsg devices\n");
986 		ret = -EINVAL;
987 		goto remove_idr;
988 	}
989 
990 	bcd->minor = minor;
991 	bcd->queue = q;
992 	bcd->dev = get_device(gdev);
993 	dev = MKDEV(bsg_major, bcd->minor);
994 	class_dev = class_device_create(bsg_class, NULL, dev, gdev, "%s",
995 					devname);
996 	if (IS_ERR(class_dev)) {
997 		ret = PTR_ERR(class_dev);
998 		goto put_dev;
999 	}
1000 	bcd->class_dev = class_dev;
1001 
1002 	if (q->kobj.sd) {
1003 		ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
1004 		if (ret)
1005 			goto unregister_class_dev;
1006 	}
1007 
1008 	mutex_unlock(&bsg_mutex);
1009 	return 0;
1010 
1011 unregister_class_dev:
1012 	class_device_unregister(class_dev);
1013 put_dev:
1014 	put_device(gdev);
1015 remove_idr:
1016 	idr_remove(&bsg_minor_idr, minor);
1017 unlock:
1018 	mutex_unlock(&bsg_mutex);
1019 	return ret;
1020 }
1021 EXPORT_SYMBOL_GPL(bsg_register_queue);
1022 
1023 static struct cdev bsg_cdev;
1024 
1025 static int __init bsg_init(void)
1026 {
1027 	int ret, i;
1028 	dev_t devid;
1029 
1030 	bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
1031 				sizeof(struct bsg_command), 0, 0, NULL);
1032 	if (!bsg_cmd_cachep) {
1033 		printk(KERN_ERR "bsg: failed creating slab cache\n");
1034 		return -ENOMEM;
1035 	}
1036 
1037 	for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
1038 		INIT_HLIST_HEAD(&bsg_device_list[i]);
1039 
1040 	bsg_class = class_create(THIS_MODULE, "bsg");
1041 	if (IS_ERR(bsg_class)) {
1042 		ret = PTR_ERR(bsg_class);
1043 		goto destroy_kmemcache;
1044 	}
1045 
1046 	ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
1047 	if (ret)
1048 		goto destroy_bsg_class;
1049 
1050 	bsg_major = MAJOR(devid);
1051 
1052 	cdev_init(&bsg_cdev, &bsg_fops);
1053 	ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
1054 	if (ret)
1055 		goto unregister_chrdev;
1056 
1057 	printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
1058 	       " loaded (major %d)\n", bsg_major);
1059 	return 0;
1060 unregister_chrdev:
1061 	unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
1062 destroy_bsg_class:
1063 	class_destroy(bsg_class);
1064 destroy_kmemcache:
1065 	kmem_cache_destroy(bsg_cmd_cachep);
1066 	return ret;
1067 }
1068 
1069 MODULE_AUTHOR("Jens Axboe");
1070 MODULE_DESCRIPTION(BSG_DESCRIPTION);
1071 MODULE_LICENSE("GPL");
1072 
1073 device_initcall(bsg_init);
1074