1 /* 2 * linux/drivers/block/loop.c 3 * 4 * Written by Theodore Ts'o, 3/29/93 5 * 6 * Copyright 1993 by Theodore Ts'o. Redistribution of this file is 7 * permitted under the GNU General Public License. 8 * 9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993 10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996 11 * 12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994 13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996 14 * 15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997 16 * 17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998 18 * 19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998 20 * 21 * Loadable modules and other fixes by AK, 1998 22 * 23 * Make real block number available to downstream transfer functions, enables 24 * CBC (and relatives) mode encryption requiring unique IVs per data block. 25 * Reed H. Petty, rhp@draper.net 26 * 27 * Maximum number of loop devices now dynamic via max_loop module parameter. 28 * Russell Kroll <rkroll@exploits.org> 19990701 29 * 30 * Maximum number of loop devices when compiled-in now selectable by passing 31 * max_loop=<1-255> to the kernel on boot. 32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999 33 * 34 * Completely rewrite request handling to be make_request_fn style and 35 * non blocking, pushing work to a helper thread. Lots of fixes from 36 * Al Viro too. 37 * Jens Axboe <axboe@suse.de>, Nov 2000 38 * 39 * Support up to 256 loop devices 40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002 41 * 42 * Support for falling back on the write file operation when the address space 43 * operations write_begin is not available on the backing filesystem. 44 * Anton Altaparmakov, 16 Feb 2005 45 * 46 * Still To Fix: 47 * - Advisory locking is ignored here. 48 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN 49 * 50 */ 51 52 #include <linux/module.h> 53 #include <linux/moduleparam.h> 54 #include <linux/sched.h> 55 #include <linux/fs.h> 56 #include <linux/file.h> 57 #include <linux/stat.h> 58 #include <linux/errno.h> 59 #include <linux/major.h> 60 #include <linux/wait.h> 61 #include <linux/blkdev.h> 62 #include <linux/blkpg.h> 63 #include <linux/init.h> 64 #include <linux/swap.h> 65 #include <linux/slab.h> 66 #include <linux/compat.h> 67 #include <linux/suspend.h> 68 #include <linux/freezer.h> 69 #include <linux/mutex.h> 70 #include <linux/writeback.h> 71 #include <linux/completion.h> 72 #include <linux/highmem.h> 73 #include <linux/kthread.h> 74 #include <linux/splice.h> 75 #include <linux/sysfs.h> 76 #include <linux/miscdevice.h> 77 #include <linux/falloc.h> 78 #include "loop.h" 79 80 #include <asm/uaccess.h> 81 82 static DEFINE_IDR(loop_index_idr); 83 static DEFINE_MUTEX(loop_index_mutex); 84 85 static int max_part; 86 static int part_shift; 87 88 static struct workqueue_struct *loop_wq; 89 90 /* 91 * Transfer functions 92 */ 93 static int transfer_none(struct loop_device *lo, int cmd, 94 struct page *raw_page, unsigned raw_off, 95 struct page *loop_page, unsigned loop_off, 96 int size, sector_t real_block) 97 { 98 char *raw_buf = kmap_atomic(raw_page) + raw_off; 99 char *loop_buf = kmap_atomic(loop_page) + loop_off; 100 101 if (cmd == READ) 102 memcpy(loop_buf, raw_buf, size); 103 else 104 memcpy(raw_buf, loop_buf, size); 105 106 kunmap_atomic(loop_buf); 107 kunmap_atomic(raw_buf); 108 cond_resched(); 109 return 0; 110 } 111 112 static int transfer_xor(struct loop_device *lo, int cmd, 113 struct page *raw_page, unsigned raw_off, 114 struct page *loop_page, unsigned loop_off, 115 int size, sector_t real_block) 116 { 117 char *raw_buf = kmap_atomic(raw_page) + raw_off; 118 char *loop_buf = kmap_atomic(loop_page) + loop_off; 119 char *in, *out, *key; 120 int i, keysize; 121 122 if (cmd == READ) { 123 in = raw_buf; 124 out = loop_buf; 125 } else { 126 in = loop_buf; 127 out = raw_buf; 128 } 129 130 key = lo->lo_encrypt_key; 131 keysize = lo->lo_encrypt_key_size; 132 for (i = 0; i < size; i++) 133 *out++ = *in++ ^ key[(i & 511) % keysize]; 134 135 kunmap_atomic(loop_buf); 136 kunmap_atomic(raw_buf); 137 cond_resched(); 138 return 0; 139 } 140 141 static int xor_init(struct loop_device *lo, const struct loop_info64 *info) 142 { 143 if (unlikely(info->lo_encrypt_key_size <= 0)) 144 return -EINVAL; 145 return 0; 146 } 147 148 static struct loop_func_table none_funcs = { 149 .number = LO_CRYPT_NONE, 150 .transfer = transfer_none, 151 }; 152 153 static struct loop_func_table xor_funcs = { 154 .number = LO_CRYPT_XOR, 155 .transfer = transfer_xor, 156 .init = xor_init 157 }; 158 159 /* xfer_funcs[0] is special - its release function is never called */ 160 static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = { 161 &none_funcs, 162 &xor_funcs 163 }; 164 165 static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file) 166 { 167 loff_t loopsize; 168 169 /* Compute loopsize in bytes */ 170 loopsize = i_size_read(file->f_mapping->host); 171 if (offset > 0) 172 loopsize -= offset; 173 /* offset is beyond i_size, weird but possible */ 174 if (loopsize < 0) 175 return 0; 176 177 if (sizelimit > 0 && sizelimit < loopsize) 178 loopsize = sizelimit; 179 /* 180 * Unfortunately, if we want to do I/O on the device, 181 * the number of 512-byte sectors has to fit into a sector_t. 182 */ 183 return loopsize >> 9; 184 } 185 186 static loff_t get_loop_size(struct loop_device *lo, struct file *file) 187 { 188 return get_size(lo->lo_offset, lo->lo_sizelimit, file); 189 } 190 191 static int 192 figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit) 193 { 194 loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); 195 sector_t x = (sector_t)size; 196 struct block_device *bdev = lo->lo_device; 197 198 if (unlikely((loff_t)x != size)) 199 return -EFBIG; 200 if (lo->lo_offset != offset) 201 lo->lo_offset = offset; 202 if (lo->lo_sizelimit != sizelimit) 203 lo->lo_sizelimit = sizelimit; 204 set_capacity(lo->lo_disk, x); 205 bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9); 206 /* let user-space know about the new size */ 207 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); 208 return 0; 209 } 210 211 static inline int 212 lo_do_transfer(struct loop_device *lo, int cmd, 213 struct page *rpage, unsigned roffs, 214 struct page *lpage, unsigned loffs, 215 int size, sector_t rblock) 216 { 217 if (unlikely(!lo->transfer)) 218 return 0; 219 220 return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock); 221 } 222 223 /** 224 * __do_lo_send_write - helper for writing data to a loop device 225 * 226 * This helper just factors out common code between do_lo_send_direct_write() 227 * and do_lo_send_write(). 228 */ 229 static int __do_lo_send_write(struct file *file, 230 u8 *buf, const int len, loff_t pos) 231 { 232 ssize_t bw; 233 mm_segment_t old_fs = get_fs(); 234 235 file_start_write(file); 236 set_fs(get_ds()); 237 bw = file->f_op->write(file, buf, len, &pos); 238 set_fs(old_fs); 239 file_end_write(file); 240 if (likely(bw == len)) 241 return 0; 242 printk_ratelimited(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n", 243 (unsigned long long)pos, len); 244 if (bw >= 0) 245 bw = -EIO; 246 return bw; 247 } 248 249 /** 250 * do_lo_send_direct_write - helper for writing data to a loop device 251 * 252 * This is the fast, non-transforming version that does not need double 253 * buffering. 254 */ 255 static int do_lo_send_direct_write(struct loop_device *lo, 256 struct bio_vec *bvec, loff_t pos, struct page *page) 257 { 258 ssize_t bw = __do_lo_send_write(lo->lo_backing_file, 259 kmap(bvec->bv_page) + bvec->bv_offset, 260 bvec->bv_len, pos); 261 kunmap(bvec->bv_page); 262 cond_resched(); 263 return bw; 264 } 265 266 /** 267 * do_lo_send_write - helper for writing data to a loop device 268 * 269 * This is the slow, transforming version that needs to double buffer the 270 * data as it cannot do the transformations in place without having direct 271 * access to the destination pages of the backing file. 272 */ 273 static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec, 274 loff_t pos, struct page *page) 275 { 276 int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page, 277 bvec->bv_offset, bvec->bv_len, pos >> 9); 278 if (likely(!ret)) 279 return __do_lo_send_write(lo->lo_backing_file, 280 page_address(page), bvec->bv_len, 281 pos); 282 printk_ratelimited(KERN_ERR "loop: Transfer error at byte offset %llu, " 283 "length %i.\n", (unsigned long long)pos, bvec->bv_len); 284 if (ret > 0) 285 ret = -EIO; 286 return ret; 287 } 288 289 static int lo_send(struct loop_device *lo, struct request *rq, loff_t pos) 290 { 291 int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t, 292 struct page *page); 293 struct bio_vec bvec; 294 struct req_iterator iter; 295 struct page *page = NULL; 296 int ret = 0; 297 298 if (lo->transfer != transfer_none) { 299 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); 300 if (unlikely(!page)) 301 goto fail; 302 kmap(page); 303 do_lo_send = do_lo_send_write; 304 } else { 305 do_lo_send = do_lo_send_direct_write; 306 } 307 308 rq_for_each_segment(bvec, rq, iter) { 309 ret = do_lo_send(lo, &bvec, pos, page); 310 if (ret < 0) 311 break; 312 pos += bvec.bv_len; 313 } 314 if (page) { 315 kunmap(page); 316 __free_page(page); 317 } 318 out: 319 return ret; 320 fail: 321 printk_ratelimited(KERN_ERR "loop: Failed to allocate temporary page for write.\n"); 322 ret = -ENOMEM; 323 goto out; 324 } 325 326 struct lo_read_data { 327 struct loop_device *lo; 328 struct page *page; 329 unsigned offset; 330 int bsize; 331 }; 332 333 static int 334 lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, 335 struct splice_desc *sd) 336 { 337 struct lo_read_data *p = sd->u.data; 338 struct loop_device *lo = p->lo; 339 struct page *page = buf->page; 340 sector_t IV; 341 int size; 342 343 IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) + 344 (buf->offset >> 9); 345 size = sd->len; 346 if (size > p->bsize) 347 size = p->bsize; 348 349 if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) { 350 printk_ratelimited(KERN_ERR "loop: transfer error block %ld\n", 351 page->index); 352 size = -EINVAL; 353 } 354 355 flush_dcache_page(p->page); 356 357 if (size > 0) 358 p->offset += size; 359 360 return size; 361 } 362 363 static int 364 lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd) 365 { 366 return __splice_from_pipe(pipe, sd, lo_splice_actor); 367 } 368 369 static ssize_t 370 do_lo_receive(struct loop_device *lo, 371 struct bio_vec *bvec, int bsize, loff_t pos) 372 { 373 struct lo_read_data cookie; 374 struct splice_desc sd; 375 struct file *file; 376 ssize_t retval; 377 378 cookie.lo = lo; 379 cookie.page = bvec->bv_page; 380 cookie.offset = bvec->bv_offset; 381 cookie.bsize = bsize; 382 383 sd.len = 0; 384 sd.total_len = bvec->bv_len; 385 sd.flags = 0; 386 sd.pos = pos; 387 sd.u.data = &cookie; 388 389 file = lo->lo_backing_file; 390 retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor); 391 392 return retval; 393 } 394 395 static int 396 lo_receive(struct loop_device *lo, struct request *rq, int bsize, loff_t pos) 397 { 398 struct bio_vec bvec; 399 struct req_iterator iter; 400 ssize_t s; 401 402 rq_for_each_segment(bvec, rq, iter) { 403 s = do_lo_receive(lo, &bvec, bsize, pos); 404 if (s < 0) 405 return s; 406 407 if (s != bvec.bv_len) { 408 struct bio *bio; 409 410 __rq_for_each_bio(bio, rq) 411 zero_fill_bio(bio); 412 break; 413 } 414 pos += bvec.bv_len; 415 } 416 return 0; 417 } 418 419 static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos) 420 { 421 /* 422 * We use punch hole to reclaim the free space used by the 423 * image a.k.a. discard. However we do not support discard if 424 * encryption is enabled, because it may give an attacker 425 * useful information. 426 */ 427 struct file *file = lo->lo_backing_file; 428 int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; 429 int ret; 430 431 if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) { 432 ret = -EOPNOTSUPP; 433 goto out; 434 } 435 436 ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq)); 437 if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP)) 438 ret = -EIO; 439 out: 440 return ret; 441 } 442 443 static int lo_req_flush(struct loop_device *lo, struct request *rq) 444 { 445 struct file *file = lo->lo_backing_file; 446 int ret = vfs_fsync(file, 0); 447 if (unlikely(ret && ret != -EINVAL)) 448 ret = -EIO; 449 450 return ret; 451 } 452 453 static int do_req_filebacked(struct loop_device *lo, struct request *rq) 454 { 455 loff_t pos; 456 int ret; 457 458 pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset; 459 460 if (rq->cmd_flags & REQ_WRITE) { 461 if (rq->cmd_flags & REQ_FLUSH) 462 ret = lo_req_flush(lo, rq); 463 else if (rq->cmd_flags & REQ_DISCARD) 464 ret = lo_discard(lo, rq, pos); 465 else 466 ret = lo_send(lo, rq, pos); 467 } else 468 ret = lo_receive(lo, rq, lo->lo_blocksize, pos); 469 470 return ret; 471 } 472 473 struct switch_request { 474 struct file *file; 475 struct completion wait; 476 }; 477 478 /* 479 * Do the actual switch; called from the BIO completion routine 480 */ 481 static void do_loop_switch(struct loop_device *lo, struct switch_request *p) 482 { 483 struct file *file = p->file; 484 struct file *old_file = lo->lo_backing_file; 485 struct address_space *mapping; 486 487 /* if no new file, only flush of queued bios requested */ 488 if (!file) 489 return; 490 491 mapping = file->f_mapping; 492 mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask); 493 lo->lo_backing_file = file; 494 lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ? 495 mapping->host->i_bdev->bd_block_size : PAGE_SIZE; 496 lo->old_gfp_mask = mapping_gfp_mask(mapping); 497 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); 498 } 499 500 /* 501 * loop_switch performs the hard work of switching a backing store. 502 * First it needs to flush existing IO, it does this by sending a magic 503 * BIO down the pipe. The completion of this BIO does the actual switch. 504 */ 505 static int loop_switch(struct loop_device *lo, struct file *file) 506 { 507 struct switch_request w; 508 509 w.file = file; 510 511 /* freeze queue and wait for completion of scheduled requests */ 512 blk_mq_freeze_queue(lo->lo_queue); 513 514 /* do the switch action */ 515 do_loop_switch(lo, &w); 516 517 /* unfreeze */ 518 blk_mq_unfreeze_queue(lo->lo_queue); 519 520 return 0; 521 } 522 523 /* 524 * Helper to flush the IOs in loop, but keeping loop thread running 525 */ 526 static int loop_flush(struct loop_device *lo) 527 { 528 return loop_switch(lo, NULL); 529 } 530 531 /* 532 * loop_change_fd switched the backing store of a loopback device to 533 * a new file. This is useful for operating system installers to free up 534 * the original file and in High Availability environments to switch to 535 * an alternative location for the content in case of server meltdown. 536 * This can only work if the loop device is used read-only, and if the 537 * new backing store is the same size and type as the old backing store. 538 */ 539 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, 540 unsigned int arg) 541 { 542 struct file *file, *old_file; 543 struct inode *inode; 544 int error; 545 546 error = -ENXIO; 547 if (lo->lo_state != Lo_bound) 548 goto out; 549 550 /* the loop device has to be read-only */ 551 error = -EINVAL; 552 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY)) 553 goto out; 554 555 error = -EBADF; 556 file = fget(arg); 557 if (!file) 558 goto out; 559 560 inode = file->f_mapping->host; 561 old_file = lo->lo_backing_file; 562 563 error = -EINVAL; 564 565 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) 566 goto out_putf; 567 568 /* size of the new backing store needs to be the same */ 569 if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) 570 goto out_putf; 571 572 /* and ... switch */ 573 error = loop_switch(lo, file); 574 if (error) 575 goto out_putf; 576 577 fput(old_file); 578 if (lo->lo_flags & LO_FLAGS_PARTSCAN) 579 ioctl_by_bdev(bdev, BLKRRPART, 0); 580 return 0; 581 582 out_putf: 583 fput(file); 584 out: 585 return error; 586 } 587 588 static inline int is_loop_device(struct file *file) 589 { 590 struct inode *i = file->f_mapping->host; 591 592 return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR; 593 } 594 595 /* loop sysfs attributes */ 596 597 static ssize_t loop_attr_show(struct device *dev, char *page, 598 ssize_t (*callback)(struct loop_device *, char *)) 599 { 600 struct gendisk *disk = dev_to_disk(dev); 601 struct loop_device *lo = disk->private_data; 602 603 return callback(lo, page); 604 } 605 606 #define LOOP_ATTR_RO(_name) \ 607 static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \ 608 static ssize_t loop_attr_do_show_##_name(struct device *d, \ 609 struct device_attribute *attr, char *b) \ 610 { \ 611 return loop_attr_show(d, b, loop_attr_##_name##_show); \ 612 } \ 613 static struct device_attribute loop_attr_##_name = \ 614 __ATTR(_name, S_IRUGO, loop_attr_do_show_##_name, NULL); 615 616 static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) 617 { 618 ssize_t ret; 619 char *p = NULL; 620 621 spin_lock_irq(&lo->lo_lock); 622 if (lo->lo_backing_file) 623 p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1); 624 spin_unlock_irq(&lo->lo_lock); 625 626 if (IS_ERR_OR_NULL(p)) 627 ret = PTR_ERR(p); 628 else { 629 ret = strlen(p); 630 memmove(buf, p, ret); 631 buf[ret++] = '\n'; 632 buf[ret] = 0; 633 } 634 635 return ret; 636 } 637 638 static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf) 639 { 640 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset); 641 } 642 643 static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf) 644 { 645 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); 646 } 647 648 static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf) 649 { 650 int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR); 651 652 return sprintf(buf, "%s\n", autoclear ? "1" : "0"); 653 } 654 655 static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf) 656 { 657 int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN); 658 659 return sprintf(buf, "%s\n", partscan ? "1" : "0"); 660 } 661 662 LOOP_ATTR_RO(backing_file); 663 LOOP_ATTR_RO(offset); 664 LOOP_ATTR_RO(sizelimit); 665 LOOP_ATTR_RO(autoclear); 666 LOOP_ATTR_RO(partscan); 667 668 static struct attribute *loop_attrs[] = { 669 &loop_attr_backing_file.attr, 670 &loop_attr_offset.attr, 671 &loop_attr_sizelimit.attr, 672 &loop_attr_autoclear.attr, 673 &loop_attr_partscan.attr, 674 NULL, 675 }; 676 677 static struct attribute_group loop_attribute_group = { 678 .name = "loop", 679 .attrs= loop_attrs, 680 }; 681 682 static int loop_sysfs_init(struct loop_device *lo) 683 { 684 return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj, 685 &loop_attribute_group); 686 } 687 688 static void loop_sysfs_exit(struct loop_device *lo) 689 { 690 sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj, 691 &loop_attribute_group); 692 } 693 694 static void loop_config_discard(struct loop_device *lo) 695 { 696 struct file *file = lo->lo_backing_file; 697 struct inode *inode = file->f_mapping->host; 698 struct request_queue *q = lo->lo_queue; 699 700 /* 701 * We use punch hole to reclaim the free space used by the 702 * image a.k.a. discard. However we do not support discard if 703 * encryption is enabled, because it may give an attacker 704 * useful information. 705 */ 706 if ((!file->f_op->fallocate) || 707 lo->lo_encrypt_key_size) { 708 q->limits.discard_granularity = 0; 709 q->limits.discard_alignment = 0; 710 q->limits.max_discard_sectors = 0; 711 q->limits.discard_zeroes_data = 0; 712 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); 713 return; 714 } 715 716 q->limits.discard_granularity = inode->i_sb->s_blocksize; 717 q->limits.discard_alignment = 0; 718 q->limits.max_discard_sectors = UINT_MAX >> 9; 719 q->limits.discard_zeroes_data = 1; 720 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 721 } 722 723 static int loop_set_fd(struct loop_device *lo, fmode_t mode, 724 struct block_device *bdev, unsigned int arg) 725 { 726 struct file *file, *f; 727 struct inode *inode; 728 struct address_space *mapping; 729 unsigned lo_blocksize; 730 int lo_flags = 0; 731 int error; 732 loff_t size; 733 734 /* This is safe, since we have a reference from open(). */ 735 __module_get(THIS_MODULE); 736 737 error = -EBADF; 738 file = fget(arg); 739 if (!file) 740 goto out; 741 742 error = -EBUSY; 743 if (lo->lo_state != Lo_unbound) 744 goto out_putf; 745 746 /* Avoid recursion */ 747 f = file; 748 while (is_loop_device(f)) { 749 struct loop_device *l; 750 751 if (f->f_mapping->host->i_bdev == bdev) 752 goto out_putf; 753 754 l = f->f_mapping->host->i_bdev->bd_disk->private_data; 755 if (l->lo_state == Lo_unbound) { 756 error = -EINVAL; 757 goto out_putf; 758 } 759 f = l->lo_backing_file; 760 } 761 762 mapping = file->f_mapping; 763 inode = mapping->host; 764 765 error = -EINVAL; 766 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) 767 goto out_putf; 768 769 if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) || 770 !file->f_op->write) 771 lo_flags |= LO_FLAGS_READ_ONLY; 772 773 lo_blocksize = S_ISBLK(inode->i_mode) ? 774 inode->i_bdev->bd_block_size : PAGE_SIZE; 775 776 error = -EFBIG; 777 size = get_loop_size(lo, file); 778 if ((loff_t)(sector_t)size != size) 779 goto out_putf; 780 781 error = 0; 782 783 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); 784 785 lo->lo_blocksize = lo_blocksize; 786 lo->lo_device = bdev; 787 lo->lo_flags = lo_flags; 788 lo->lo_backing_file = file; 789 lo->transfer = transfer_none; 790 lo->ioctl = NULL; 791 lo->lo_sizelimit = 0; 792 lo->old_gfp_mask = mapping_gfp_mask(mapping); 793 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); 794 795 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) 796 blk_queue_flush(lo->lo_queue, REQ_FLUSH); 797 798 set_capacity(lo->lo_disk, size); 799 bd_set_size(bdev, size << 9); 800 loop_sysfs_init(lo); 801 /* let user-space know about the new size */ 802 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); 803 804 set_blocksize(bdev, lo_blocksize); 805 806 lo->lo_state = Lo_bound; 807 if (part_shift) 808 lo->lo_flags |= LO_FLAGS_PARTSCAN; 809 if (lo->lo_flags & LO_FLAGS_PARTSCAN) 810 ioctl_by_bdev(bdev, BLKRRPART, 0); 811 812 /* Grab the block_device to prevent its destruction after we 813 * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev). 814 */ 815 bdgrab(bdev); 816 return 0; 817 818 out_putf: 819 fput(file); 820 out: 821 /* This is safe: open() is still holding a reference. */ 822 module_put(THIS_MODULE); 823 return error; 824 } 825 826 static int 827 loop_release_xfer(struct loop_device *lo) 828 { 829 int err = 0; 830 struct loop_func_table *xfer = lo->lo_encryption; 831 832 if (xfer) { 833 if (xfer->release) 834 err = xfer->release(lo); 835 lo->transfer = NULL; 836 lo->lo_encryption = NULL; 837 module_put(xfer->owner); 838 } 839 return err; 840 } 841 842 static int 843 loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, 844 const struct loop_info64 *i) 845 { 846 int err = 0; 847 848 if (xfer) { 849 struct module *owner = xfer->owner; 850 851 if (!try_module_get(owner)) 852 return -EINVAL; 853 if (xfer->init) 854 err = xfer->init(lo, i); 855 if (err) 856 module_put(owner); 857 else 858 lo->lo_encryption = xfer; 859 } 860 return err; 861 } 862 863 static int loop_clr_fd(struct loop_device *lo) 864 { 865 struct file *filp = lo->lo_backing_file; 866 gfp_t gfp = lo->old_gfp_mask; 867 struct block_device *bdev = lo->lo_device; 868 869 if (lo->lo_state != Lo_bound) 870 return -ENXIO; 871 872 /* 873 * If we've explicitly asked to tear down the loop device, 874 * and it has an elevated reference count, set it for auto-teardown when 875 * the last reference goes away. This stops $!~#$@ udev from 876 * preventing teardown because it decided that it needs to run blkid on 877 * the loopback device whenever they appear. xfstests is notorious for 878 * failing tests because blkid via udev races with a losetup 879 * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d 880 * command to fail with EBUSY. 881 */ 882 if (lo->lo_refcnt > 1) { 883 lo->lo_flags |= LO_FLAGS_AUTOCLEAR; 884 mutex_unlock(&lo->lo_ctl_mutex); 885 return 0; 886 } 887 888 if (filp == NULL) 889 return -EINVAL; 890 891 spin_lock_irq(&lo->lo_lock); 892 lo->lo_state = Lo_rundown; 893 lo->lo_backing_file = NULL; 894 spin_unlock_irq(&lo->lo_lock); 895 896 loop_release_xfer(lo); 897 lo->transfer = NULL; 898 lo->ioctl = NULL; 899 lo->lo_device = NULL; 900 lo->lo_encryption = NULL; 901 lo->lo_offset = 0; 902 lo->lo_sizelimit = 0; 903 lo->lo_encrypt_key_size = 0; 904 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); 905 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); 906 memset(lo->lo_file_name, 0, LO_NAME_SIZE); 907 if (bdev) { 908 bdput(bdev); 909 invalidate_bdev(bdev); 910 } 911 set_capacity(lo->lo_disk, 0); 912 loop_sysfs_exit(lo); 913 if (bdev) { 914 bd_set_size(bdev, 0); 915 /* let user-space know about this change */ 916 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); 917 } 918 mapping_set_gfp_mask(filp->f_mapping, gfp); 919 lo->lo_state = Lo_unbound; 920 /* This is safe: open() is still holding a reference. */ 921 module_put(THIS_MODULE); 922 if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev) 923 ioctl_by_bdev(bdev, BLKRRPART, 0); 924 lo->lo_flags = 0; 925 if (!part_shift) 926 lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; 927 mutex_unlock(&lo->lo_ctl_mutex); 928 /* 929 * Need not hold lo_ctl_mutex to fput backing file. 930 * Calling fput holding lo_ctl_mutex triggers a circular 931 * lock dependency possibility warning as fput can take 932 * bd_mutex which is usually taken before lo_ctl_mutex. 933 */ 934 fput(filp); 935 return 0; 936 } 937 938 static int 939 loop_set_status(struct loop_device *lo, const struct loop_info64 *info) 940 { 941 int err; 942 struct loop_func_table *xfer; 943 kuid_t uid = current_uid(); 944 945 if (lo->lo_encrypt_key_size && 946 !uid_eq(lo->lo_key_owner, uid) && 947 !capable(CAP_SYS_ADMIN)) 948 return -EPERM; 949 if (lo->lo_state != Lo_bound) 950 return -ENXIO; 951 if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) 952 return -EINVAL; 953 954 err = loop_release_xfer(lo); 955 if (err) 956 return err; 957 958 if (info->lo_encrypt_type) { 959 unsigned int type = info->lo_encrypt_type; 960 961 if (type >= MAX_LO_CRYPT) 962 return -EINVAL; 963 xfer = xfer_funcs[type]; 964 if (xfer == NULL) 965 return -EINVAL; 966 } else 967 xfer = NULL; 968 969 err = loop_init_xfer(lo, xfer, info); 970 if (err) 971 return err; 972 973 if (lo->lo_offset != info->lo_offset || 974 lo->lo_sizelimit != info->lo_sizelimit) 975 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) 976 return -EFBIG; 977 978 loop_config_discard(lo); 979 980 memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); 981 memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); 982 lo->lo_file_name[LO_NAME_SIZE-1] = 0; 983 lo->lo_crypt_name[LO_NAME_SIZE-1] = 0; 984 985 if (!xfer) 986 xfer = &none_funcs; 987 lo->transfer = xfer->transfer; 988 lo->ioctl = xfer->ioctl; 989 990 if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) != 991 (info->lo_flags & LO_FLAGS_AUTOCLEAR)) 992 lo->lo_flags ^= LO_FLAGS_AUTOCLEAR; 993 994 if ((info->lo_flags & LO_FLAGS_PARTSCAN) && 995 !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { 996 lo->lo_flags |= LO_FLAGS_PARTSCAN; 997 lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; 998 ioctl_by_bdev(lo->lo_device, BLKRRPART, 0); 999 } 1000 1001 lo->lo_encrypt_key_size = info->lo_encrypt_key_size; 1002 lo->lo_init[0] = info->lo_init[0]; 1003 lo->lo_init[1] = info->lo_init[1]; 1004 if (info->lo_encrypt_key_size) { 1005 memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, 1006 info->lo_encrypt_key_size); 1007 lo->lo_key_owner = uid; 1008 } 1009 1010 return 0; 1011 } 1012 1013 static int 1014 loop_get_status(struct loop_device *lo, struct loop_info64 *info) 1015 { 1016 struct file *file = lo->lo_backing_file; 1017 struct kstat stat; 1018 int error; 1019 1020 if (lo->lo_state != Lo_bound) 1021 return -ENXIO; 1022 error = vfs_getattr(&file->f_path, &stat); 1023 if (error) 1024 return error; 1025 memset(info, 0, sizeof(*info)); 1026 info->lo_number = lo->lo_number; 1027 info->lo_device = huge_encode_dev(stat.dev); 1028 info->lo_inode = stat.ino; 1029 info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev); 1030 info->lo_offset = lo->lo_offset; 1031 info->lo_sizelimit = lo->lo_sizelimit; 1032 info->lo_flags = lo->lo_flags; 1033 memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE); 1034 memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE); 1035 info->lo_encrypt_type = 1036 lo->lo_encryption ? lo->lo_encryption->number : 0; 1037 if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) { 1038 info->lo_encrypt_key_size = lo->lo_encrypt_key_size; 1039 memcpy(info->lo_encrypt_key, lo->lo_encrypt_key, 1040 lo->lo_encrypt_key_size); 1041 } 1042 return 0; 1043 } 1044 1045 static void 1046 loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64) 1047 { 1048 memset(info64, 0, sizeof(*info64)); 1049 info64->lo_number = info->lo_number; 1050 info64->lo_device = info->lo_device; 1051 info64->lo_inode = info->lo_inode; 1052 info64->lo_rdevice = info->lo_rdevice; 1053 info64->lo_offset = info->lo_offset; 1054 info64->lo_sizelimit = 0; 1055 info64->lo_encrypt_type = info->lo_encrypt_type; 1056 info64->lo_encrypt_key_size = info->lo_encrypt_key_size; 1057 info64->lo_flags = info->lo_flags; 1058 info64->lo_init[0] = info->lo_init[0]; 1059 info64->lo_init[1] = info->lo_init[1]; 1060 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) 1061 memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE); 1062 else 1063 memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE); 1064 memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE); 1065 } 1066 1067 static int 1068 loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info) 1069 { 1070 memset(info, 0, sizeof(*info)); 1071 info->lo_number = info64->lo_number; 1072 info->lo_device = info64->lo_device; 1073 info->lo_inode = info64->lo_inode; 1074 info->lo_rdevice = info64->lo_rdevice; 1075 info->lo_offset = info64->lo_offset; 1076 info->lo_encrypt_type = info64->lo_encrypt_type; 1077 info->lo_encrypt_key_size = info64->lo_encrypt_key_size; 1078 info->lo_flags = info64->lo_flags; 1079 info->lo_init[0] = info64->lo_init[0]; 1080 info->lo_init[1] = info64->lo_init[1]; 1081 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) 1082 memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE); 1083 else 1084 memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE); 1085 memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE); 1086 1087 /* error in case values were truncated */ 1088 if (info->lo_device != info64->lo_device || 1089 info->lo_rdevice != info64->lo_rdevice || 1090 info->lo_inode != info64->lo_inode || 1091 info->lo_offset != info64->lo_offset) 1092 return -EOVERFLOW; 1093 1094 return 0; 1095 } 1096 1097 static int 1098 loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg) 1099 { 1100 struct loop_info info; 1101 struct loop_info64 info64; 1102 1103 if (copy_from_user(&info, arg, sizeof (struct loop_info))) 1104 return -EFAULT; 1105 loop_info64_from_old(&info, &info64); 1106 return loop_set_status(lo, &info64); 1107 } 1108 1109 static int 1110 loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg) 1111 { 1112 struct loop_info64 info64; 1113 1114 if (copy_from_user(&info64, arg, sizeof (struct loop_info64))) 1115 return -EFAULT; 1116 return loop_set_status(lo, &info64); 1117 } 1118 1119 static int 1120 loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) { 1121 struct loop_info info; 1122 struct loop_info64 info64; 1123 int err = 0; 1124 1125 if (!arg) 1126 err = -EINVAL; 1127 if (!err) 1128 err = loop_get_status(lo, &info64); 1129 if (!err) 1130 err = loop_info64_to_old(&info64, &info); 1131 if (!err && copy_to_user(arg, &info, sizeof(info))) 1132 err = -EFAULT; 1133 1134 return err; 1135 } 1136 1137 static int 1138 loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { 1139 struct loop_info64 info64; 1140 int err = 0; 1141 1142 if (!arg) 1143 err = -EINVAL; 1144 if (!err) 1145 err = loop_get_status(lo, &info64); 1146 if (!err && copy_to_user(arg, &info64, sizeof(info64))) 1147 err = -EFAULT; 1148 1149 return err; 1150 } 1151 1152 static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev) 1153 { 1154 if (unlikely(lo->lo_state != Lo_bound)) 1155 return -ENXIO; 1156 1157 return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit); 1158 } 1159 1160 static int lo_ioctl(struct block_device *bdev, fmode_t mode, 1161 unsigned int cmd, unsigned long arg) 1162 { 1163 struct loop_device *lo = bdev->bd_disk->private_data; 1164 int err; 1165 1166 mutex_lock_nested(&lo->lo_ctl_mutex, 1); 1167 switch (cmd) { 1168 case LOOP_SET_FD: 1169 err = loop_set_fd(lo, mode, bdev, arg); 1170 break; 1171 case LOOP_CHANGE_FD: 1172 err = loop_change_fd(lo, bdev, arg); 1173 break; 1174 case LOOP_CLR_FD: 1175 /* loop_clr_fd would have unlocked lo_ctl_mutex on success */ 1176 err = loop_clr_fd(lo); 1177 if (!err) 1178 goto out_unlocked; 1179 break; 1180 case LOOP_SET_STATUS: 1181 err = -EPERM; 1182 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) 1183 err = loop_set_status_old(lo, 1184 (struct loop_info __user *)arg); 1185 break; 1186 case LOOP_GET_STATUS: 1187 err = loop_get_status_old(lo, (struct loop_info __user *) arg); 1188 break; 1189 case LOOP_SET_STATUS64: 1190 err = -EPERM; 1191 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) 1192 err = loop_set_status64(lo, 1193 (struct loop_info64 __user *) arg); 1194 break; 1195 case LOOP_GET_STATUS64: 1196 err = loop_get_status64(lo, (struct loop_info64 __user *) arg); 1197 break; 1198 case LOOP_SET_CAPACITY: 1199 err = -EPERM; 1200 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) 1201 err = loop_set_capacity(lo, bdev); 1202 break; 1203 default: 1204 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; 1205 } 1206 mutex_unlock(&lo->lo_ctl_mutex); 1207 1208 out_unlocked: 1209 return err; 1210 } 1211 1212 #ifdef CONFIG_COMPAT 1213 struct compat_loop_info { 1214 compat_int_t lo_number; /* ioctl r/o */ 1215 compat_dev_t lo_device; /* ioctl r/o */ 1216 compat_ulong_t lo_inode; /* ioctl r/o */ 1217 compat_dev_t lo_rdevice; /* ioctl r/o */ 1218 compat_int_t lo_offset; 1219 compat_int_t lo_encrypt_type; 1220 compat_int_t lo_encrypt_key_size; /* ioctl w/o */ 1221 compat_int_t lo_flags; /* ioctl r/o */ 1222 char lo_name[LO_NAME_SIZE]; 1223 unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ 1224 compat_ulong_t lo_init[2]; 1225 char reserved[4]; 1226 }; 1227 1228 /* 1229 * Transfer 32-bit compatibility structure in userspace to 64-bit loop info 1230 * - noinlined to reduce stack space usage in main part of driver 1231 */ 1232 static noinline int 1233 loop_info64_from_compat(const struct compat_loop_info __user *arg, 1234 struct loop_info64 *info64) 1235 { 1236 struct compat_loop_info info; 1237 1238 if (copy_from_user(&info, arg, sizeof(info))) 1239 return -EFAULT; 1240 1241 memset(info64, 0, sizeof(*info64)); 1242 info64->lo_number = info.lo_number; 1243 info64->lo_device = info.lo_device; 1244 info64->lo_inode = info.lo_inode; 1245 info64->lo_rdevice = info.lo_rdevice; 1246 info64->lo_offset = info.lo_offset; 1247 info64->lo_sizelimit = 0; 1248 info64->lo_encrypt_type = info.lo_encrypt_type; 1249 info64->lo_encrypt_key_size = info.lo_encrypt_key_size; 1250 info64->lo_flags = info.lo_flags; 1251 info64->lo_init[0] = info.lo_init[0]; 1252 info64->lo_init[1] = info.lo_init[1]; 1253 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI) 1254 memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE); 1255 else 1256 memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE); 1257 memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE); 1258 return 0; 1259 } 1260 1261 /* 1262 * Transfer 64-bit loop info to 32-bit compatibility structure in userspace 1263 * - noinlined to reduce stack space usage in main part of driver 1264 */ 1265 static noinline int 1266 loop_info64_to_compat(const struct loop_info64 *info64, 1267 struct compat_loop_info __user *arg) 1268 { 1269 struct compat_loop_info info; 1270 1271 memset(&info, 0, sizeof(info)); 1272 info.lo_number = info64->lo_number; 1273 info.lo_device = info64->lo_device; 1274 info.lo_inode = info64->lo_inode; 1275 info.lo_rdevice = info64->lo_rdevice; 1276 info.lo_offset = info64->lo_offset; 1277 info.lo_encrypt_type = info64->lo_encrypt_type; 1278 info.lo_encrypt_key_size = info64->lo_encrypt_key_size; 1279 info.lo_flags = info64->lo_flags; 1280 info.lo_init[0] = info64->lo_init[0]; 1281 info.lo_init[1] = info64->lo_init[1]; 1282 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI) 1283 memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE); 1284 else 1285 memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE); 1286 memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE); 1287 1288 /* error in case values were truncated */ 1289 if (info.lo_device != info64->lo_device || 1290 info.lo_rdevice != info64->lo_rdevice || 1291 info.lo_inode != info64->lo_inode || 1292 info.lo_offset != info64->lo_offset || 1293 info.lo_init[0] != info64->lo_init[0] || 1294 info.lo_init[1] != info64->lo_init[1]) 1295 return -EOVERFLOW; 1296 1297 if (copy_to_user(arg, &info, sizeof(info))) 1298 return -EFAULT; 1299 return 0; 1300 } 1301 1302 static int 1303 loop_set_status_compat(struct loop_device *lo, 1304 const struct compat_loop_info __user *arg) 1305 { 1306 struct loop_info64 info64; 1307 int ret; 1308 1309 ret = loop_info64_from_compat(arg, &info64); 1310 if (ret < 0) 1311 return ret; 1312 return loop_set_status(lo, &info64); 1313 } 1314 1315 static int 1316 loop_get_status_compat(struct loop_device *lo, 1317 struct compat_loop_info __user *arg) 1318 { 1319 struct loop_info64 info64; 1320 int err = 0; 1321 1322 if (!arg) 1323 err = -EINVAL; 1324 if (!err) 1325 err = loop_get_status(lo, &info64); 1326 if (!err) 1327 err = loop_info64_to_compat(&info64, arg); 1328 return err; 1329 } 1330 1331 static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, 1332 unsigned int cmd, unsigned long arg) 1333 { 1334 struct loop_device *lo = bdev->bd_disk->private_data; 1335 int err; 1336 1337 switch(cmd) { 1338 case LOOP_SET_STATUS: 1339 mutex_lock(&lo->lo_ctl_mutex); 1340 err = loop_set_status_compat( 1341 lo, (const struct compat_loop_info __user *) arg); 1342 mutex_unlock(&lo->lo_ctl_mutex); 1343 break; 1344 case LOOP_GET_STATUS: 1345 mutex_lock(&lo->lo_ctl_mutex); 1346 err = loop_get_status_compat( 1347 lo, (struct compat_loop_info __user *) arg); 1348 mutex_unlock(&lo->lo_ctl_mutex); 1349 break; 1350 case LOOP_SET_CAPACITY: 1351 case LOOP_CLR_FD: 1352 case LOOP_GET_STATUS64: 1353 case LOOP_SET_STATUS64: 1354 arg = (unsigned long) compat_ptr(arg); 1355 case LOOP_SET_FD: 1356 case LOOP_CHANGE_FD: 1357 err = lo_ioctl(bdev, mode, cmd, arg); 1358 break; 1359 default: 1360 err = -ENOIOCTLCMD; 1361 break; 1362 } 1363 return err; 1364 } 1365 #endif 1366 1367 static int lo_open(struct block_device *bdev, fmode_t mode) 1368 { 1369 struct loop_device *lo; 1370 int err = 0; 1371 1372 mutex_lock(&loop_index_mutex); 1373 lo = bdev->bd_disk->private_data; 1374 if (!lo) { 1375 err = -ENXIO; 1376 goto out; 1377 } 1378 1379 mutex_lock(&lo->lo_ctl_mutex); 1380 lo->lo_refcnt++; 1381 mutex_unlock(&lo->lo_ctl_mutex); 1382 out: 1383 mutex_unlock(&loop_index_mutex); 1384 return err; 1385 } 1386 1387 static void lo_release(struct gendisk *disk, fmode_t mode) 1388 { 1389 struct loop_device *lo = disk->private_data; 1390 int err; 1391 1392 mutex_lock(&lo->lo_ctl_mutex); 1393 1394 if (--lo->lo_refcnt) 1395 goto out; 1396 1397 if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) { 1398 /* 1399 * In autoclear mode, stop the loop thread 1400 * and remove configuration after last close. 1401 */ 1402 err = loop_clr_fd(lo); 1403 if (!err) 1404 return; 1405 } else { 1406 /* 1407 * Otherwise keep thread (if running) and config, 1408 * but flush possible ongoing bios in thread. 1409 */ 1410 loop_flush(lo); 1411 } 1412 1413 out: 1414 mutex_unlock(&lo->lo_ctl_mutex); 1415 } 1416 1417 static const struct block_device_operations lo_fops = { 1418 .owner = THIS_MODULE, 1419 .open = lo_open, 1420 .release = lo_release, 1421 .ioctl = lo_ioctl, 1422 #ifdef CONFIG_COMPAT 1423 .compat_ioctl = lo_compat_ioctl, 1424 #endif 1425 }; 1426 1427 /* 1428 * And now the modules code and kernel interface. 1429 */ 1430 static int max_loop; 1431 module_param(max_loop, int, S_IRUGO); 1432 MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); 1433 module_param(max_part, int, S_IRUGO); 1434 MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device"); 1435 MODULE_LICENSE("GPL"); 1436 MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); 1437 1438 int loop_register_transfer(struct loop_func_table *funcs) 1439 { 1440 unsigned int n = funcs->number; 1441 1442 if (n >= MAX_LO_CRYPT || xfer_funcs[n]) 1443 return -EINVAL; 1444 xfer_funcs[n] = funcs; 1445 return 0; 1446 } 1447 1448 static int unregister_transfer_cb(int id, void *ptr, void *data) 1449 { 1450 struct loop_device *lo = ptr; 1451 struct loop_func_table *xfer = data; 1452 1453 mutex_lock(&lo->lo_ctl_mutex); 1454 if (lo->lo_encryption == xfer) 1455 loop_release_xfer(lo); 1456 mutex_unlock(&lo->lo_ctl_mutex); 1457 return 0; 1458 } 1459 1460 int loop_unregister_transfer(int number) 1461 { 1462 unsigned int n = number; 1463 struct loop_func_table *xfer; 1464 1465 if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) 1466 return -EINVAL; 1467 1468 xfer_funcs[n] = NULL; 1469 idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer); 1470 return 0; 1471 } 1472 1473 EXPORT_SYMBOL(loop_register_transfer); 1474 EXPORT_SYMBOL(loop_unregister_transfer); 1475 1476 static int loop_queue_rq(struct blk_mq_hw_ctx *hctx, 1477 const struct blk_mq_queue_data *bd) 1478 { 1479 struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 1480 1481 blk_mq_start_request(bd->rq); 1482 1483 if (cmd->rq->cmd_flags & REQ_WRITE) { 1484 struct loop_device *lo = cmd->rq->q->queuedata; 1485 bool need_sched = true; 1486 1487 spin_lock_irq(&lo->lo_lock); 1488 if (lo->write_started) 1489 need_sched = false; 1490 else 1491 lo->write_started = true; 1492 list_add_tail(&cmd->list, &lo->write_cmd_head); 1493 spin_unlock_irq(&lo->lo_lock); 1494 1495 if (need_sched) 1496 queue_work(loop_wq, &lo->write_work); 1497 } else { 1498 queue_work(loop_wq, &cmd->read_work); 1499 } 1500 1501 return BLK_MQ_RQ_QUEUE_OK; 1502 } 1503 1504 static void loop_handle_cmd(struct loop_cmd *cmd) 1505 { 1506 const bool write = cmd->rq->cmd_flags & REQ_WRITE; 1507 struct loop_device *lo = cmd->rq->q->queuedata; 1508 int ret = -EIO; 1509 1510 if (lo->lo_state != Lo_bound) 1511 goto failed; 1512 1513 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) 1514 goto failed; 1515 1516 ret = do_req_filebacked(lo, cmd->rq); 1517 1518 failed: 1519 if (ret) 1520 cmd->rq->errors = -EIO; 1521 blk_mq_complete_request(cmd->rq); 1522 } 1523 1524 static void loop_queue_write_work(struct work_struct *work) 1525 { 1526 struct loop_device *lo = 1527 container_of(work, struct loop_device, write_work); 1528 LIST_HEAD(cmd_list); 1529 1530 spin_lock_irq(&lo->lo_lock); 1531 repeat: 1532 list_splice_init(&lo->write_cmd_head, &cmd_list); 1533 spin_unlock_irq(&lo->lo_lock); 1534 1535 while (!list_empty(&cmd_list)) { 1536 struct loop_cmd *cmd = list_first_entry(&cmd_list, 1537 struct loop_cmd, list); 1538 list_del_init(&cmd->list); 1539 loop_handle_cmd(cmd); 1540 } 1541 1542 spin_lock_irq(&lo->lo_lock); 1543 if (!list_empty(&lo->write_cmd_head)) 1544 goto repeat; 1545 lo->write_started = false; 1546 spin_unlock_irq(&lo->lo_lock); 1547 } 1548 1549 static void loop_queue_read_work(struct work_struct *work) 1550 { 1551 struct loop_cmd *cmd = 1552 container_of(work, struct loop_cmd, read_work); 1553 1554 loop_handle_cmd(cmd); 1555 } 1556 1557 static int loop_init_request(void *data, struct request *rq, 1558 unsigned int hctx_idx, unsigned int request_idx, 1559 unsigned int numa_node) 1560 { 1561 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 1562 1563 cmd->rq = rq; 1564 INIT_WORK(&cmd->read_work, loop_queue_read_work); 1565 1566 return 0; 1567 } 1568 1569 static struct blk_mq_ops loop_mq_ops = { 1570 .queue_rq = loop_queue_rq, 1571 .map_queue = blk_mq_map_queue, 1572 .init_request = loop_init_request, 1573 }; 1574 1575 static int loop_add(struct loop_device **l, int i) 1576 { 1577 struct loop_device *lo; 1578 struct gendisk *disk; 1579 int err; 1580 1581 err = -ENOMEM; 1582 lo = kzalloc(sizeof(*lo), GFP_KERNEL); 1583 if (!lo) 1584 goto out; 1585 1586 lo->lo_state = Lo_unbound; 1587 1588 /* allocate id, if @id >= 0, we're requesting that specific id */ 1589 if (i >= 0) { 1590 err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL); 1591 if (err == -ENOSPC) 1592 err = -EEXIST; 1593 } else { 1594 err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL); 1595 } 1596 if (err < 0) 1597 goto out_free_dev; 1598 i = err; 1599 1600 err = -ENOMEM; 1601 lo->tag_set.ops = &loop_mq_ops; 1602 lo->tag_set.nr_hw_queues = 1; 1603 lo->tag_set.queue_depth = 128; 1604 lo->tag_set.numa_node = NUMA_NO_NODE; 1605 lo->tag_set.cmd_size = sizeof(struct loop_cmd); 1606 lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 1607 lo->tag_set.driver_data = lo; 1608 1609 err = blk_mq_alloc_tag_set(&lo->tag_set); 1610 if (err) 1611 goto out_free_idr; 1612 1613 lo->lo_queue = blk_mq_init_queue(&lo->tag_set); 1614 if (IS_ERR_OR_NULL(lo->lo_queue)) { 1615 err = PTR_ERR(lo->lo_queue); 1616 goto out_cleanup_tags; 1617 } 1618 lo->lo_queue->queuedata = lo; 1619 1620 INIT_LIST_HEAD(&lo->write_cmd_head); 1621 INIT_WORK(&lo->write_work, loop_queue_write_work); 1622 1623 disk = lo->lo_disk = alloc_disk(1 << part_shift); 1624 if (!disk) 1625 goto out_free_queue; 1626 1627 /* 1628 * Disable partition scanning by default. The in-kernel partition 1629 * scanning can be requested individually per-device during its 1630 * setup. Userspace can always add and remove partitions from all 1631 * devices. The needed partition minors are allocated from the 1632 * extended minor space, the main loop device numbers will continue 1633 * to match the loop minors, regardless of the number of partitions 1634 * used. 1635 * 1636 * If max_part is given, partition scanning is globally enabled for 1637 * all loop devices. The minors for the main loop devices will be 1638 * multiples of max_part. 1639 * 1640 * Note: Global-for-all-devices, set-only-at-init, read-only module 1641 * parameteters like 'max_loop' and 'max_part' make things needlessly 1642 * complicated, are too static, inflexible and may surprise 1643 * userspace tools. Parameters like this in general should be avoided. 1644 */ 1645 if (!part_shift) 1646 disk->flags |= GENHD_FL_NO_PART_SCAN; 1647 disk->flags |= GENHD_FL_EXT_DEVT; 1648 mutex_init(&lo->lo_ctl_mutex); 1649 lo->lo_number = i; 1650 spin_lock_init(&lo->lo_lock); 1651 disk->major = LOOP_MAJOR; 1652 disk->first_minor = i << part_shift; 1653 disk->fops = &lo_fops; 1654 disk->private_data = lo; 1655 disk->queue = lo->lo_queue; 1656 sprintf(disk->disk_name, "loop%d", i); 1657 add_disk(disk); 1658 *l = lo; 1659 return lo->lo_number; 1660 1661 out_free_queue: 1662 blk_cleanup_queue(lo->lo_queue); 1663 out_cleanup_tags: 1664 blk_mq_free_tag_set(&lo->tag_set); 1665 out_free_idr: 1666 idr_remove(&loop_index_idr, i); 1667 out_free_dev: 1668 kfree(lo); 1669 out: 1670 return err; 1671 } 1672 1673 static void loop_remove(struct loop_device *lo) 1674 { 1675 del_gendisk(lo->lo_disk); 1676 blk_cleanup_queue(lo->lo_queue); 1677 blk_mq_free_tag_set(&lo->tag_set); 1678 put_disk(lo->lo_disk); 1679 kfree(lo); 1680 } 1681 1682 static int find_free_cb(int id, void *ptr, void *data) 1683 { 1684 struct loop_device *lo = ptr; 1685 struct loop_device **l = data; 1686 1687 if (lo->lo_state == Lo_unbound) { 1688 *l = lo; 1689 return 1; 1690 } 1691 return 0; 1692 } 1693 1694 static int loop_lookup(struct loop_device **l, int i) 1695 { 1696 struct loop_device *lo; 1697 int ret = -ENODEV; 1698 1699 if (i < 0) { 1700 int err; 1701 1702 err = idr_for_each(&loop_index_idr, &find_free_cb, &lo); 1703 if (err == 1) { 1704 *l = lo; 1705 ret = lo->lo_number; 1706 } 1707 goto out; 1708 } 1709 1710 /* lookup and return a specific i */ 1711 lo = idr_find(&loop_index_idr, i); 1712 if (lo) { 1713 *l = lo; 1714 ret = lo->lo_number; 1715 } 1716 out: 1717 return ret; 1718 } 1719 1720 static struct kobject *loop_probe(dev_t dev, int *part, void *data) 1721 { 1722 struct loop_device *lo; 1723 struct kobject *kobj; 1724 int err; 1725 1726 mutex_lock(&loop_index_mutex); 1727 err = loop_lookup(&lo, MINOR(dev) >> part_shift); 1728 if (err < 0) 1729 err = loop_add(&lo, MINOR(dev) >> part_shift); 1730 if (err < 0) 1731 kobj = NULL; 1732 else 1733 kobj = get_disk(lo->lo_disk); 1734 mutex_unlock(&loop_index_mutex); 1735 1736 *part = 0; 1737 return kobj; 1738 } 1739 1740 static long loop_control_ioctl(struct file *file, unsigned int cmd, 1741 unsigned long parm) 1742 { 1743 struct loop_device *lo; 1744 int ret = -ENOSYS; 1745 1746 mutex_lock(&loop_index_mutex); 1747 switch (cmd) { 1748 case LOOP_CTL_ADD: 1749 ret = loop_lookup(&lo, parm); 1750 if (ret >= 0) { 1751 ret = -EEXIST; 1752 break; 1753 } 1754 ret = loop_add(&lo, parm); 1755 break; 1756 case LOOP_CTL_REMOVE: 1757 ret = loop_lookup(&lo, parm); 1758 if (ret < 0) 1759 break; 1760 mutex_lock(&lo->lo_ctl_mutex); 1761 if (lo->lo_state != Lo_unbound) { 1762 ret = -EBUSY; 1763 mutex_unlock(&lo->lo_ctl_mutex); 1764 break; 1765 } 1766 if (lo->lo_refcnt > 0) { 1767 ret = -EBUSY; 1768 mutex_unlock(&lo->lo_ctl_mutex); 1769 break; 1770 } 1771 lo->lo_disk->private_data = NULL; 1772 mutex_unlock(&lo->lo_ctl_mutex); 1773 idr_remove(&loop_index_idr, lo->lo_number); 1774 loop_remove(lo); 1775 break; 1776 case LOOP_CTL_GET_FREE: 1777 ret = loop_lookup(&lo, -1); 1778 if (ret >= 0) 1779 break; 1780 ret = loop_add(&lo, -1); 1781 } 1782 mutex_unlock(&loop_index_mutex); 1783 1784 return ret; 1785 } 1786 1787 static const struct file_operations loop_ctl_fops = { 1788 .open = nonseekable_open, 1789 .unlocked_ioctl = loop_control_ioctl, 1790 .compat_ioctl = loop_control_ioctl, 1791 .owner = THIS_MODULE, 1792 .llseek = noop_llseek, 1793 }; 1794 1795 static struct miscdevice loop_misc = { 1796 .minor = LOOP_CTRL_MINOR, 1797 .name = "loop-control", 1798 .fops = &loop_ctl_fops, 1799 }; 1800 1801 MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR); 1802 MODULE_ALIAS("devname:loop-control"); 1803 1804 static int __init loop_init(void) 1805 { 1806 int i, nr; 1807 unsigned long range; 1808 struct loop_device *lo; 1809 int err; 1810 1811 err = misc_register(&loop_misc); 1812 if (err < 0) 1813 return err; 1814 1815 part_shift = 0; 1816 if (max_part > 0) { 1817 part_shift = fls(max_part); 1818 1819 /* 1820 * Adjust max_part according to part_shift as it is exported 1821 * to user space so that user can decide correct minor number 1822 * if [s]he want to create more devices. 1823 * 1824 * Note that -1 is required because partition 0 is reserved 1825 * for the whole disk. 1826 */ 1827 max_part = (1UL << part_shift) - 1; 1828 } 1829 1830 if ((1UL << part_shift) > DISK_MAX_PARTS) { 1831 err = -EINVAL; 1832 goto misc_out; 1833 } 1834 1835 if (max_loop > 1UL << (MINORBITS - part_shift)) { 1836 err = -EINVAL; 1837 goto misc_out; 1838 } 1839 1840 /* 1841 * If max_loop is specified, create that many devices upfront. 1842 * This also becomes a hard limit. If max_loop is not specified, 1843 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module 1844 * init time. Loop devices can be requested on-demand with the 1845 * /dev/loop-control interface, or be instantiated by accessing 1846 * a 'dead' device node. 1847 */ 1848 if (max_loop) { 1849 nr = max_loop; 1850 range = max_loop << part_shift; 1851 } else { 1852 nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT; 1853 range = 1UL << MINORBITS; 1854 } 1855 1856 if (register_blkdev(LOOP_MAJOR, "loop")) { 1857 err = -EIO; 1858 goto misc_out; 1859 } 1860 1861 loop_wq = alloc_workqueue("kloopd", 1862 WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 0); 1863 if (!loop_wq) { 1864 err = -ENOMEM; 1865 goto misc_out; 1866 } 1867 1868 blk_register_region(MKDEV(LOOP_MAJOR, 0), range, 1869 THIS_MODULE, loop_probe, NULL, NULL); 1870 1871 /* pre-create number of devices given by config or max_loop */ 1872 mutex_lock(&loop_index_mutex); 1873 for (i = 0; i < nr; i++) 1874 loop_add(&lo, i); 1875 mutex_unlock(&loop_index_mutex); 1876 1877 printk(KERN_INFO "loop: module loaded\n"); 1878 return 0; 1879 1880 misc_out: 1881 misc_deregister(&loop_misc); 1882 return err; 1883 } 1884 1885 static int loop_exit_cb(int id, void *ptr, void *data) 1886 { 1887 struct loop_device *lo = ptr; 1888 1889 loop_remove(lo); 1890 return 0; 1891 } 1892 1893 static void __exit loop_exit(void) 1894 { 1895 unsigned long range; 1896 1897 range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; 1898 1899 idr_for_each(&loop_index_idr, &loop_exit_cb, NULL); 1900 idr_destroy(&loop_index_idr); 1901 1902 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); 1903 unregister_blkdev(LOOP_MAJOR, "loop"); 1904 1905 destroy_workqueue(loop_wq); 1906 1907 misc_deregister(&loop_misc); 1908 } 1909 1910 module_init(loop_init); 1911 module_exit(loop_exit); 1912 1913 #ifndef MODULE 1914 static int __init max_loop_setup(char *str) 1915 { 1916 max_loop = simple_strtol(str, NULL, 0); 1917 return 1; 1918 } 1919 1920 __setup("max_loop=", max_loop_setup); 1921 #endif 1922