1 #include <linux/capability.h> 2 #include <linux/blkdev.h> 3 #include <linux/export.h> 4 #include <linux/gfp.h> 5 #include <linux/blkpg.h> 6 #include <linux/hdreg.h> 7 #include <linux/backing-dev.h> 8 #include <linux/fs.h> 9 #include <linux/blktrace_api.h> 10 #include <asm/uaccess.h> 11 12 static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg) 13 { 14 struct block_device *bdevp; 15 struct gendisk *disk; 16 struct hd_struct *part, *lpart; 17 struct blkpg_ioctl_arg a; 18 struct blkpg_partition p; 19 struct disk_part_iter piter; 20 long long start, length; 21 int partno; 22 23 if (!capable(CAP_SYS_ADMIN)) 24 return -EACCES; 25 if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg))) 26 return -EFAULT; 27 if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition))) 28 return -EFAULT; 29 disk = bdev->bd_disk; 30 if (bdev != bdev->bd_contains) 31 return -EINVAL; 32 partno = p.pno; 33 if (partno <= 0) 34 return -EINVAL; 35 switch (a.op) { 36 case BLKPG_ADD_PARTITION: 37 start = p.start >> 9; 38 length = p.length >> 9; 39 /* check for fit in a hd_struct */ 40 if (sizeof(sector_t) == sizeof(long) && 41 sizeof(long long) > sizeof(long)) { 42 long pstart = start, plength = length; 43 if (pstart != start || plength != length 44 || pstart < 0 || plength < 0 || partno > 65535) 45 return -EINVAL; 46 } 47 48 mutex_lock(&bdev->bd_mutex); 49 50 /* overlap? */ 51 disk_part_iter_init(&piter, disk, 52 DISK_PITER_INCL_EMPTY); 53 while ((part = disk_part_iter_next(&piter))) { 54 if (!(start + length <= part->start_sect || 55 start >= part->start_sect + part->nr_sects)) { 56 disk_part_iter_exit(&piter); 57 mutex_unlock(&bdev->bd_mutex); 58 return -EBUSY; 59 } 60 } 61 disk_part_iter_exit(&piter); 62 63 /* all seems OK */ 64 part = add_partition(disk, partno, start, length, 65 ADDPART_FLAG_NONE, NULL); 66 mutex_unlock(&bdev->bd_mutex); 67 return PTR_ERR_OR_ZERO(part); 68 case BLKPG_DEL_PARTITION: 69 part = disk_get_part(disk, partno); 70 if (!part) 71 return -ENXIO; 72 73 bdevp = bdget(part_devt(part)); 74 disk_put_part(part); 75 if (!bdevp) 76 return -ENOMEM; 77 78 mutex_lock(&bdevp->bd_mutex); 79 if (bdevp->bd_openers) { 80 mutex_unlock(&bdevp->bd_mutex); 81 bdput(bdevp); 82 return -EBUSY; 83 } 84 /* all seems OK */ 85 fsync_bdev(bdevp); 86 invalidate_bdev(bdevp); 87 88 mutex_lock_nested(&bdev->bd_mutex, 1); 89 delete_partition(disk, partno); 90 mutex_unlock(&bdev->bd_mutex); 91 mutex_unlock(&bdevp->bd_mutex); 92 bdput(bdevp); 93 94 return 0; 95 case BLKPG_RESIZE_PARTITION: 96 start = p.start >> 9; 97 /* new length of partition in bytes */ 98 length = p.length >> 9; 99 /* check for fit in a hd_struct */ 100 if (sizeof(sector_t) == sizeof(long) && 101 sizeof(long long) > sizeof(long)) { 102 long pstart = start, plength = length; 103 if (pstart != start || plength != length 104 || pstart < 0 || plength < 0) 105 return -EINVAL; 106 } 107 part = disk_get_part(disk, partno); 108 if (!part) 109 return -ENXIO; 110 bdevp = bdget(part_devt(part)); 111 if (!bdevp) { 112 disk_put_part(part); 113 return -ENOMEM; 114 } 115 mutex_lock(&bdevp->bd_mutex); 116 mutex_lock_nested(&bdev->bd_mutex, 1); 117 if (start != part->start_sect) { 118 mutex_unlock(&bdevp->bd_mutex); 119 mutex_unlock(&bdev->bd_mutex); 120 bdput(bdevp); 121 disk_put_part(part); 122 return -EINVAL; 123 } 124 /* overlap? */ 125 disk_part_iter_init(&piter, disk, 126 DISK_PITER_INCL_EMPTY); 127 while ((lpart = disk_part_iter_next(&piter))) { 128 if (lpart->partno != partno && 129 !(start + length <= lpart->start_sect || 130 start >= lpart->start_sect + lpart->nr_sects) 131 ) { 132 disk_part_iter_exit(&piter); 133 mutex_unlock(&bdevp->bd_mutex); 134 mutex_unlock(&bdev->bd_mutex); 135 bdput(bdevp); 136 disk_put_part(part); 137 return -EBUSY; 138 } 139 } 140 disk_part_iter_exit(&piter); 141 part_nr_sects_write(part, (sector_t)length); 142 i_size_write(bdevp->bd_inode, p.length); 143 mutex_unlock(&bdevp->bd_mutex); 144 mutex_unlock(&bdev->bd_mutex); 145 bdput(bdevp); 146 disk_put_part(part); 147 return 0; 148 default: 149 return -EINVAL; 150 } 151 } 152 153 static int blkdev_reread_part(struct block_device *bdev) 154 { 155 struct gendisk *disk = bdev->bd_disk; 156 int res; 157 158 if (!disk_part_scan_enabled(disk) || bdev != bdev->bd_contains) 159 return -EINVAL; 160 if (!capable(CAP_SYS_ADMIN)) 161 return -EACCES; 162 if (!mutex_trylock(&bdev->bd_mutex)) 163 return -EBUSY; 164 res = rescan_partitions(disk, bdev); 165 mutex_unlock(&bdev->bd_mutex); 166 return res; 167 } 168 169 static int blk_ioctl_discard(struct block_device *bdev, uint64_t start, 170 uint64_t len, int secure) 171 { 172 unsigned long flags = 0; 173 174 if (start & 511) 175 return -EINVAL; 176 if (len & 511) 177 return -EINVAL; 178 start >>= 9; 179 len >>= 9; 180 181 if (start + len > (i_size_read(bdev->bd_inode) >> 9)) 182 return -EINVAL; 183 if (secure) 184 flags |= BLKDEV_DISCARD_SECURE; 185 return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags); 186 } 187 188 static int blk_ioctl_zeroout(struct block_device *bdev, uint64_t start, 189 uint64_t len) 190 { 191 if (start & 511) 192 return -EINVAL; 193 if (len & 511) 194 return -EINVAL; 195 start >>= 9; 196 len >>= 9; 197 198 if (start + len > (i_size_read(bdev->bd_inode) >> 9)) 199 return -EINVAL; 200 201 return blkdev_issue_zeroout(bdev, start, len, GFP_KERNEL, false); 202 } 203 204 static int put_ushort(unsigned long arg, unsigned short val) 205 { 206 return put_user(val, (unsigned short __user *)arg); 207 } 208 209 static int put_int(unsigned long arg, int val) 210 { 211 return put_user(val, (int __user *)arg); 212 } 213 214 static int put_uint(unsigned long arg, unsigned int val) 215 { 216 return put_user(val, (unsigned int __user *)arg); 217 } 218 219 static int put_long(unsigned long arg, long val) 220 { 221 return put_user(val, (long __user *)arg); 222 } 223 224 static int put_ulong(unsigned long arg, unsigned long val) 225 { 226 return put_user(val, (unsigned long __user *)arg); 227 } 228 229 static int put_u64(unsigned long arg, u64 val) 230 { 231 return put_user(val, (u64 __user *)arg); 232 } 233 234 int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode, 235 unsigned cmd, unsigned long arg) 236 { 237 struct gendisk *disk = bdev->bd_disk; 238 239 if (disk->fops->ioctl) 240 return disk->fops->ioctl(bdev, mode, cmd, arg); 241 242 return -ENOTTY; 243 } 244 /* 245 * For the record: _GPL here is only because somebody decided to slap it 246 * on the previous export. Sheer idiocy, since it wasn't copyrightable 247 * at all and could be open-coded without any exports by anybody who cares. 248 */ 249 EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl); 250 251 /* 252 * Is it an unrecognized ioctl? The correct returns are either 253 * ENOTTY (final) or ENOIOCTLCMD ("I don't know this one, try a 254 * fallback"). ENOIOCTLCMD gets turned into ENOTTY by the ioctl 255 * code before returning. 256 * 257 * Confused drivers sometimes return EINVAL, which is wrong. It 258 * means "I understood the ioctl command, but the parameters to 259 * it were wrong". 260 * 261 * We should aim to just fix the broken drivers, the EINVAL case 262 * should go away. 263 */ 264 static inline int is_unrecognized_ioctl(int ret) 265 { 266 return ret == -EINVAL || 267 ret == -ENOTTY || 268 ret == -ENOIOCTLCMD; 269 } 270 271 /* 272 * always keep this in sync with compat_blkdev_ioctl() 273 */ 274 int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, 275 unsigned long arg) 276 { 277 struct gendisk *disk = bdev->bd_disk; 278 struct backing_dev_info *bdi; 279 loff_t size; 280 int ret, n; 281 unsigned int max_sectors; 282 283 switch(cmd) { 284 case BLKFLSBUF: 285 if (!capable(CAP_SYS_ADMIN)) 286 return -EACCES; 287 288 ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 289 if (!is_unrecognized_ioctl(ret)) 290 return ret; 291 292 fsync_bdev(bdev); 293 invalidate_bdev(bdev); 294 return 0; 295 296 case BLKROSET: 297 ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 298 if (!is_unrecognized_ioctl(ret)) 299 return ret; 300 if (!capable(CAP_SYS_ADMIN)) 301 return -EACCES; 302 if (get_user(n, (int __user *)(arg))) 303 return -EFAULT; 304 set_device_ro(bdev, n); 305 return 0; 306 307 case BLKDISCARD: 308 case BLKSECDISCARD: { 309 uint64_t range[2]; 310 311 if (!(mode & FMODE_WRITE)) 312 return -EBADF; 313 314 if (copy_from_user(range, (void __user *)arg, sizeof(range))) 315 return -EFAULT; 316 317 return blk_ioctl_discard(bdev, range[0], range[1], 318 cmd == BLKSECDISCARD); 319 } 320 case BLKZEROOUT: { 321 uint64_t range[2]; 322 323 if (!(mode & FMODE_WRITE)) 324 return -EBADF; 325 326 if (copy_from_user(range, (void __user *)arg, sizeof(range))) 327 return -EFAULT; 328 329 return blk_ioctl_zeroout(bdev, range[0], range[1]); 330 } 331 332 case HDIO_GETGEO: { 333 struct hd_geometry geo; 334 335 if (!arg) 336 return -EINVAL; 337 if (!disk->fops->getgeo) 338 return -ENOTTY; 339 340 /* 341 * We need to set the startsect first, the driver may 342 * want to override it. 343 */ 344 memset(&geo, 0, sizeof(geo)); 345 geo.start = get_start_sect(bdev); 346 ret = disk->fops->getgeo(bdev, &geo); 347 if (ret) 348 return ret; 349 if (copy_to_user((struct hd_geometry __user *)arg, &geo, 350 sizeof(geo))) 351 return -EFAULT; 352 return 0; 353 } 354 case BLKRAGET: 355 case BLKFRAGET: 356 if (!arg) 357 return -EINVAL; 358 bdi = blk_get_backing_dev_info(bdev); 359 return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); 360 case BLKROGET: 361 return put_int(arg, bdev_read_only(bdev) != 0); 362 case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */ 363 return put_int(arg, block_size(bdev)); 364 case BLKSSZGET: /* get block device logical block size */ 365 return put_int(arg, bdev_logical_block_size(bdev)); 366 case BLKPBSZGET: /* get block device physical block size */ 367 return put_uint(arg, bdev_physical_block_size(bdev)); 368 case BLKIOMIN: 369 return put_uint(arg, bdev_io_min(bdev)); 370 case BLKIOOPT: 371 return put_uint(arg, bdev_io_opt(bdev)); 372 case BLKALIGNOFF: 373 return put_int(arg, bdev_alignment_offset(bdev)); 374 case BLKDISCARDZEROES: 375 return put_uint(arg, bdev_discard_zeroes_data(bdev)); 376 case BLKSECTGET: 377 max_sectors = min_t(unsigned int, USHRT_MAX, 378 queue_max_sectors(bdev_get_queue(bdev))); 379 return put_ushort(arg, max_sectors); 380 case BLKROTATIONAL: 381 return put_ushort(arg, !blk_queue_nonrot(bdev_get_queue(bdev))); 382 case BLKRASET: 383 case BLKFRASET: 384 if(!capable(CAP_SYS_ADMIN)) 385 return -EACCES; 386 bdi = blk_get_backing_dev_info(bdev); 387 bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; 388 return 0; 389 case BLKBSZSET: 390 /* set the logical block size */ 391 if (!capable(CAP_SYS_ADMIN)) 392 return -EACCES; 393 if (!arg) 394 return -EINVAL; 395 if (get_user(n, (int __user *) arg)) 396 return -EFAULT; 397 if (!(mode & FMODE_EXCL)) { 398 bdgrab(bdev); 399 if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0) 400 return -EBUSY; 401 } 402 ret = set_blocksize(bdev, n); 403 if (!(mode & FMODE_EXCL)) 404 blkdev_put(bdev, mode | FMODE_EXCL); 405 return ret; 406 case BLKPG: 407 ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg); 408 break; 409 case BLKRRPART: 410 ret = blkdev_reread_part(bdev); 411 break; 412 case BLKGETSIZE: 413 size = i_size_read(bdev->bd_inode); 414 if ((size >> 9) > ~0UL) 415 return -EFBIG; 416 return put_ulong(arg, size >> 9); 417 case BLKGETSIZE64: 418 return put_u64(arg, i_size_read(bdev->bd_inode)); 419 case BLKTRACESTART: 420 case BLKTRACESTOP: 421 case BLKTRACESETUP: 422 case BLKTRACETEARDOWN: 423 ret = blk_trace_ioctl(bdev, cmd, (char __user *) arg); 424 break; 425 default: 426 ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 427 } 428 return ret; 429 } 430 EXPORT_SYMBOL_GPL(blkdev_ioctl); 431