1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * bsg.c - block layer implementation of the sg v4 interface 4 */ 5 #include <linux/module.h> 6 #include <linux/init.h> 7 #include <linux/file.h> 8 #include <linux/blkdev.h> 9 #include <linux/cdev.h> 10 #include <linux/jiffies.h> 11 #include <linux/percpu.h> 12 #include <linux/idr.h> 13 #include <linux/bsg.h> 14 #include <linux/slab.h> 15 #include <linux/io_uring/cmd.h> 16 17 #include <scsi/scsi.h> 18 #include <scsi/scsi_ioctl.h> 19 #include <scsi/sg.h> 20 21 #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver" 22 #define BSG_VERSION "0.4" 23 24 struct bsg_device { 25 struct request_queue *queue; 26 struct device device; 27 struct cdev cdev; 28 int max_queue; 29 unsigned int timeout; 30 unsigned int reserved_size; 31 bsg_sg_io_fn *sg_io_fn; 32 bsg_uring_cmd_fn *uring_cmd_fn; 33 }; 34 35 static inline struct bsg_device *to_bsg_device(struct inode *inode) 36 { 37 return container_of(inode->i_cdev, struct bsg_device, cdev); 38 } 39 40 #define BSG_DEFAULT_CMDS 64 41 #define BSG_MAX_DEVS (1 << MINORBITS) 42 43 static DEFINE_IDA(bsg_minor_ida); 44 static const struct class bsg_class; 45 static int bsg_major; 46 47 static unsigned int bsg_timeout(struct bsg_device *bd, struct sg_io_v4 *hdr) 48 { 49 unsigned int timeout = BLK_DEFAULT_SG_TIMEOUT; 50 51 if (hdr->timeout) 52 timeout = msecs_to_jiffies(hdr->timeout); 53 else if (bd->timeout) 54 timeout = bd->timeout; 55 56 return max_t(unsigned int, timeout, BLK_MIN_SG_TIMEOUT); 57 } 58 59 static int bsg_sg_io(struct bsg_device *bd, bool open_for_write, 60 void __user *uarg) 61 { 62 struct sg_io_v4 hdr; 63 int ret; 64 65 if (copy_from_user(&hdr, uarg, sizeof(hdr))) 66 return -EFAULT; 67 if (hdr.guard != 'Q') 68 return -EINVAL; 69 ret = bd->sg_io_fn(bd->queue, &hdr, open_for_write, 70 bsg_timeout(bd, &hdr)); 71 if (!ret && copy_to_user(uarg, &hdr, sizeof(hdr))) 72 return -EFAULT; 73 return ret; 74 } 75 76 static int bsg_open(struct inode *inode, struct file *file) 77 { 78 if (!blk_get_queue(to_bsg_device(inode)->queue)) 79 return -ENXIO; 80 return 0; 81 } 82 83 static int bsg_release(struct inode *inode, struct file *file) 84 { 85 blk_put_queue(to_bsg_device(inode)->queue); 86 return 0; 87 } 88 89 static int bsg_get_command_q(struct bsg_device *bd, int __user *uarg) 90 { 91 return put_user(READ_ONCE(bd->max_queue), uarg); 92 } 93 94 static int bsg_set_command_q(struct bsg_device *bd, int __user *uarg) 95 { 96 int max_queue; 97 98 if (get_user(max_queue, uarg)) 99 return -EFAULT; 100 if (max_queue < 1) 101 return -EINVAL; 102 WRITE_ONCE(bd->max_queue, max_queue); 103 return 0; 104 } 105 106 static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 107 { 108 struct bsg_device *bd = to_bsg_device(file_inode(file)); 109 struct request_queue *q = bd->queue; 110 void __user *uarg = (void __user *) arg; 111 int __user *intp = uarg; 112 int val; 113 114 switch (cmd) { 115 /* 116 * Our own ioctls 117 */ 118 case SG_GET_COMMAND_Q: 119 return bsg_get_command_q(bd, uarg); 120 case SG_SET_COMMAND_Q: 121 return bsg_set_command_q(bd, uarg); 122 123 /* 124 * SCSI/sg ioctls 125 */ 126 case SG_GET_VERSION_NUM: 127 return put_user(30527, intp); 128 case SCSI_IOCTL_GET_IDLUN: 129 return put_user(0, intp); 130 case SCSI_IOCTL_GET_BUS_NUMBER: 131 return put_user(0, intp); 132 case SG_SET_TIMEOUT: 133 if (get_user(val, intp)) 134 return -EFAULT; 135 bd->timeout = clock_t_to_jiffies(val); 136 return 0; 137 case SG_GET_TIMEOUT: 138 return jiffies_to_clock_t(bd->timeout); 139 case SG_GET_RESERVED_SIZE: 140 return put_user(min(bd->reserved_size, queue_max_bytes(q)), 141 intp); 142 case SG_SET_RESERVED_SIZE: 143 if (get_user(val, intp)) 144 return -EFAULT; 145 if (val < 0) 146 return -EINVAL; 147 bd->reserved_size = 148 min_t(unsigned int, val, queue_max_bytes(q)); 149 return 0; 150 case SG_EMULATED_HOST: 151 return put_user(1, intp); 152 case SG_IO: 153 return bsg_sg_io(bd, file->f_mode & FMODE_WRITE, uarg); 154 case SCSI_IOCTL_SEND_COMMAND: 155 pr_warn_ratelimited("%s: calling unsupported SCSI_IOCTL_SEND_COMMAND\n", 156 current->comm); 157 return -EINVAL; 158 default: 159 return -ENOTTY; 160 } 161 } 162 163 static int bsg_check_uring_features(unsigned int issue_flags) 164 { 165 /* BSG passthrough requires big SQE/CQE support */ 166 if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) != 167 (IO_URING_F_SQE128|IO_URING_F_CQE32)) 168 return -EOPNOTSUPP; 169 return 0; 170 } 171 172 static int bsg_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags) 173 { 174 struct bsg_device *bd = to_bsg_device(file_inode(ioucmd->file)); 175 bool open_for_write = ioucmd->file->f_mode & FMODE_WRITE; 176 struct request_queue *q = bd->queue; 177 int ret; 178 179 ret = bsg_check_uring_features(issue_flags); 180 if (ret) 181 return ret; 182 183 if (!bd->uring_cmd_fn) 184 return -EOPNOTSUPP; 185 186 return bd->uring_cmd_fn(q, ioucmd, issue_flags, open_for_write); 187 } 188 189 static const struct file_operations bsg_fops = { 190 .open = bsg_open, 191 .release = bsg_release, 192 .unlocked_ioctl = bsg_ioctl, 193 .compat_ioctl = compat_ptr_ioctl, 194 .uring_cmd = bsg_uring_cmd, 195 .owner = THIS_MODULE, 196 .llseek = default_llseek, 197 }; 198 199 static void bsg_device_release(struct device *dev) 200 { 201 struct bsg_device *bd = container_of(dev, struct bsg_device, device); 202 203 ida_free(&bsg_minor_ida, MINOR(bd->device.devt)); 204 kfree(bd); 205 } 206 207 void bsg_unregister_queue(struct bsg_device *bd) 208 { 209 struct gendisk *disk = bd->queue->disk; 210 211 if (disk && disk->queue_kobj.sd) 212 sysfs_remove_link(&disk->queue_kobj, "bsg"); 213 cdev_device_del(&bd->cdev, &bd->device); 214 put_device(&bd->device); 215 } 216 EXPORT_SYMBOL_GPL(bsg_unregister_queue); 217 218 struct bsg_device *bsg_register_queue(struct request_queue *q, 219 struct device *parent, const char *name, bsg_sg_io_fn *sg_io_fn, 220 bsg_uring_cmd_fn *uring_cmd_fn) 221 { 222 struct bsg_device *bd; 223 int ret; 224 225 bd = kzalloc_obj(*bd); 226 if (!bd) 227 return ERR_PTR(-ENOMEM); 228 bd->max_queue = BSG_DEFAULT_CMDS; 229 bd->reserved_size = INT_MAX; 230 bd->queue = q; 231 bd->sg_io_fn = sg_io_fn; 232 bd->uring_cmd_fn = uring_cmd_fn; 233 234 ret = ida_alloc_max(&bsg_minor_ida, BSG_MAX_DEVS - 1, GFP_KERNEL); 235 if (ret < 0) { 236 if (ret == -ENOSPC) 237 dev_err(parent, "bsg: too many bsg devices\n"); 238 kfree(bd); 239 return ERR_PTR(ret); 240 } 241 bd->device.devt = MKDEV(bsg_major, ret); 242 bd->device.class = &bsg_class; 243 bd->device.parent = parent; 244 bd->device.release = bsg_device_release; 245 dev_set_name(&bd->device, "%s", name); 246 device_initialize(&bd->device); 247 248 cdev_init(&bd->cdev, &bsg_fops); 249 bd->cdev.owner = THIS_MODULE; 250 ret = cdev_device_add(&bd->cdev, &bd->device); 251 if (ret) 252 goto out_put_device; 253 254 if (q->disk && q->disk->queue_kobj.sd) { 255 ret = sysfs_create_link(&q->disk->queue_kobj, &bd->device.kobj, 256 "bsg"); 257 if (ret) 258 goto out_device_del; 259 } 260 261 return bd; 262 263 out_device_del: 264 cdev_device_del(&bd->cdev, &bd->device); 265 out_put_device: 266 put_device(&bd->device); 267 return ERR_PTR(ret); 268 } 269 EXPORT_SYMBOL_GPL(bsg_register_queue); 270 271 static char *bsg_devnode(const struct device *dev, umode_t *mode) 272 { 273 return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev)); 274 } 275 276 static const struct class bsg_class = { 277 .name = "bsg", 278 .devnode = bsg_devnode, 279 }; 280 281 static int __init bsg_init(void) 282 { 283 dev_t devid; 284 int ret; 285 286 ret = class_register(&bsg_class); 287 if (ret) 288 return ret; 289 290 ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); 291 if (ret) 292 goto destroy_bsg_class; 293 bsg_major = MAJOR(devid); 294 295 printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION 296 " loaded (major %d)\n", bsg_major); 297 return 0; 298 299 destroy_bsg_class: 300 class_unregister(&bsg_class); 301 return ret; 302 } 303 304 MODULE_AUTHOR("Jens Axboe"); 305 MODULE_DESCRIPTION(BSG_DESCRIPTION); 306 MODULE_LICENSE("GPL"); 307 308 device_initcall(bsg_init); 309