xref: /linux/block/bsg.c (revision 9052e9c95d908d6c3d7570aadc8898e1d871c8bb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bsg.c - block layer implementation of the sg v4 interface
4  */
5 #include <linux/module.h>
6 #include <linux/init.h>
7 #include <linux/file.h>
8 #include <linux/blkdev.h>
9 #include <linux/cdev.h>
10 #include <linux/jiffies.h>
11 #include <linux/percpu.h>
12 #include <linux/idr.h>
13 #include <linux/bsg.h>
14 #include <linux/slab.h>
15 
16 #include <scsi/scsi.h>
17 #include <scsi/scsi_ioctl.h>
18 #include <scsi/sg.h>
19 
20 #define BSG_DESCRIPTION	"Block layer SCSI generic (bsg) driver"
21 #define BSG_VERSION	"0.4"
22 
23 struct bsg_device {
24 	struct request_queue *queue;
25 	struct device device;
26 	struct cdev cdev;
27 	int max_queue;
28 	unsigned int timeout;
29 	unsigned int reserved_size;
30 	bsg_sg_io_fn *sg_io_fn;
31 };
32 
33 static inline struct bsg_device *to_bsg_device(struct inode *inode)
34 {
35 	return container_of(inode->i_cdev, struct bsg_device, cdev);
36 }
37 
38 #define BSG_DEFAULT_CMDS	64
39 #define BSG_MAX_DEVS		32768
40 
41 static DEFINE_IDA(bsg_minor_ida);
42 static struct class *bsg_class;
43 static int bsg_major;
44 
45 static unsigned int bsg_timeout(struct bsg_device *bd, struct sg_io_v4 *hdr)
46 {
47 	unsigned int timeout = BLK_DEFAULT_SG_TIMEOUT;
48 
49 	if (hdr->timeout)
50 		timeout = msecs_to_jiffies(hdr->timeout);
51 	else if (bd->timeout)
52 		timeout = bd->timeout;
53 
54 	return max_t(unsigned int, timeout, BLK_MIN_SG_TIMEOUT);
55 }
56 
57 static int bsg_sg_io(struct bsg_device *bd, fmode_t mode, void __user *uarg)
58 {
59 	struct sg_io_v4 hdr;
60 	int ret;
61 
62 	if (copy_from_user(&hdr, uarg, sizeof(hdr)))
63 		return -EFAULT;
64 	if (hdr.guard != 'Q')
65 		return -EINVAL;
66 	ret = bd->sg_io_fn(bd->queue, &hdr, mode, bsg_timeout(bd, &hdr));
67 	if (!ret && copy_to_user(uarg, &hdr, sizeof(hdr)))
68 		return -EFAULT;
69 	return ret;
70 }
71 
72 static int bsg_open(struct inode *inode, struct file *file)
73 {
74 	if (!blk_get_queue(to_bsg_device(inode)->queue))
75 		return -ENXIO;
76 	return 0;
77 }
78 
79 static int bsg_release(struct inode *inode, struct file *file)
80 {
81 	blk_put_queue(to_bsg_device(inode)->queue);
82 	return 0;
83 }
84 
85 static int bsg_get_command_q(struct bsg_device *bd, int __user *uarg)
86 {
87 	return put_user(READ_ONCE(bd->max_queue), uarg);
88 }
89 
90 static int bsg_set_command_q(struct bsg_device *bd, int __user *uarg)
91 {
92 	int max_queue;
93 
94 	if (get_user(max_queue, uarg))
95 		return -EFAULT;
96 	if (max_queue < 1)
97 		return -EINVAL;
98 	WRITE_ONCE(bd->max_queue, max_queue);
99 	return 0;
100 }
101 
102 static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
103 {
104 	struct bsg_device *bd = to_bsg_device(file_inode(file));
105 	struct request_queue *q = bd->queue;
106 	void __user *uarg = (void __user *) arg;
107 	int __user *intp = uarg;
108 	int val;
109 
110 	switch (cmd) {
111 	/*
112 	 * Our own ioctls
113 	 */
114 	case SG_GET_COMMAND_Q:
115 		return bsg_get_command_q(bd, uarg);
116 	case SG_SET_COMMAND_Q:
117 		return bsg_set_command_q(bd, uarg);
118 
119 	/*
120 	 * SCSI/sg ioctls
121 	 */
122 	case SG_GET_VERSION_NUM:
123 		return put_user(30527, intp);
124 	case SCSI_IOCTL_GET_IDLUN:
125 		return put_user(0, intp);
126 	case SCSI_IOCTL_GET_BUS_NUMBER:
127 		return put_user(0, intp);
128 	case SG_SET_TIMEOUT:
129 		if (get_user(val, intp))
130 			return -EFAULT;
131 		bd->timeout = clock_t_to_jiffies(val);
132 		return 0;
133 	case SG_GET_TIMEOUT:
134 		return jiffies_to_clock_t(bd->timeout);
135 	case SG_GET_RESERVED_SIZE:
136 		return put_user(min(bd->reserved_size, queue_max_bytes(q)),
137 				intp);
138 	case SG_SET_RESERVED_SIZE:
139 		if (get_user(val, intp))
140 			return -EFAULT;
141 		if (val < 0)
142 			return -EINVAL;
143 		bd->reserved_size =
144 			min_t(unsigned int, val, queue_max_bytes(q));
145 		return 0;
146 	case SG_EMULATED_HOST:
147 		return put_user(1, intp);
148 	case SG_IO:
149 		return bsg_sg_io(bd, file->f_mode, uarg);
150 	case SCSI_IOCTL_SEND_COMMAND:
151 		pr_warn_ratelimited("%s: calling unsupported SCSI_IOCTL_SEND_COMMAND\n",
152 				current->comm);
153 		return -EINVAL;
154 	default:
155 		return -ENOTTY;
156 	}
157 }
158 
159 static const struct file_operations bsg_fops = {
160 	.open		=	bsg_open,
161 	.release	=	bsg_release,
162 	.unlocked_ioctl	=	bsg_ioctl,
163 	.compat_ioctl	=	compat_ptr_ioctl,
164 	.owner		=	THIS_MODULE,
165 	.llseek		=	default_llseek,
166 };
167 
168 static void bsg_device_release(struct device *dev)
169 {
170 	struct bsg_device *bd = container_of(dev, struct bsg_device, device);
171 
172 	ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
173 	kfree(bd);
174 }
175 
176 void bsg_unregister_queue(struct bsg_device *bd)
177 {
178 	if (bd->queue->kobj.sd)
179 		sysfs_remove_link(&bd->queue->kobj, "bsg");
180 	cdev_device_del(&bd->cdev, &bd->device);
181 	put_device(&bd->device);
182 }
183 EXPORT_SYMBOL_GPL(bsg_unregister_queue);
184 
185 struct bsg_device *bsg_register_queue(struct request_queue *q,
186 		struct device *parent, const char *name, bsg_sg_io_fn *sg_io_fn)
187 {
188 	struct bsg_device *bd;
189 	int ret;
190 
191 	bd = kzalloc(sizeof(*bd), GFP_KERNEL);
192 	if (!bd)
193 		return ERR_PTR(-ENOMEM);
194 	bd->max_queue = BSG_DEFAULT_CMDS;
195 	bd->reserved_size = INT_MAX;
196 	bd->queue = q;
197 	bd->sg_io_fn = sg_io_fn;
198 
199 	ret = ida_simple_get(&bsg_minor_ida, 0, BSG_MAX_DEVS, GFP_KERNEL);
200 	if (ret < 0) {
201 		if (ret == -ENOSPC)
202 			dev_err(parent, "bsg: too many bsg devices\n");
203 		kfree(bd);
204 		return ERR_PTR(ret);
205 	}
206 	bd->device.devt = MKDEV(bsg_major, ret);
207 	bd->device.class = bsg_class;
208 	bd->device.parent = parent;
209 	bd->device.release = bsg_device_release;
210 	dev_set_name(&bd->device, "%s", name);
211 	device_initialize(&bd->device);
212 
213 	cdev_init(&bd->cdev, &bsg_fops);
214 	bd->cdev.owner = THIS_MODULE;
215 	ret = cdev_device_add(&bd->cdev, &bd->device);
216 	if (ret)
217 		goto out_put_device;
218 
219 	if (q->kobj.sd) {
220 		ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg");
221 		if (ret)
222 			goto out_device_del;
223 	}
224 
225 	return bd;
226 
227 out_device_del:
228 	cdev_device_del(&bd->cdev, &bd->device);
229 out_put_device:
230 	put_device(&bd->device);
231 	return ERR_PTR(ret);
232 }
233 EXPORT_SYMBOL_GPL(bsg_register_queue);
234 
235 static char *bsg_devnode(struct device *dev, umode_t *mode)
236 {
237 	return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev));
238 }
239 
240 static int __init bsg_init(void)
241 {
242 	dev_t devid;
243 	int ret;
244 
245 	bsg_class = class_create(THIS_MODULE, "bsg");
246 	if (IS_ERR(bsg_class))
247 		return PTR_ERR(bsg_class);
248 	bsg_class->devnode = bsg_devnode;
249 
250 	ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
251 	if (ret)
252 		goto destroy_bsg_class;
253 	bsg_major = MAJOR(devid);
254 
255 	printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
256 	       " loaded (major %d)\n", bsg_major);
257 	return 0;
258 
259 destroy_bsg_class:
260 	class_destroy(bsg_class);
261 	return ret;
262 }
263 
264 MODULE_AUTHOR("Jens Axboe");
265 MODULE_DESCRIPTION(BSG_DESCRIPTION);
266 MODULE_LICENSE("GPL");
267 
268 device_initcall(bsg_init);
269