1 /* 2 * linux/drivers/block/loop.c 3 * 4 * Written by Theodore Ts'o, 3/29/93 5 * 6 * Copyright 1993 by Theodore Ts'o. Redistribution of this file is 7 * permitted under the GNU General Public License. 8 * 9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993 10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996 11 * 12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994 13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996 14 * 15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997 16 * 17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998 18 * 19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998 20 * 21 * Loadable modules and other fixes by AK, 1998 22 * 23 * Make real block number available to downstream transfer functions, enables 24 * CBC (and relatives) mode encryption requiring unique IVs per data block. 25 * Reed H. Petty, rhp@draper.net 26 * 27 * Maximum number of loop devices now dynamic via max_loop module parameter. 28 * Russell Kroll <rkroll@exploits.org> 19990701 29 * 30 * Maximum number of loop devices when compiled-in now selectable by passing 31 * max_loop=<1-255> to the kernel on boot. 32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999 33 * 34 * Completely rewrite request handling to be make_request_fn style and 35 * non blocking, pushing work to a helper thread. Lots of fixes from 36 * Al Viro too. 37 * Jens Axboe <axboe@suse.de>, Nov 2000 38 * 39 * Support up to 256 loop devices 40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002 41 * 42 * Support for falling back on the write file operation when the address space 43 * operations write_begin is not available on the backing filesystem. 44 * Anton Altaparmakov, 16 Feb 2005 45 * 46 * Still To Fix: 47 * - Advisory locking is ignored here. 48 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN 49 * 50 */ 51 52 #include <linux/module.h> 53 #include <linux/moduleparam.h> 54 #include <linux/sched.h> 55 #include <linux/fs.h> 56 #include <linux/file.h> 57 #include <linux/stat.h> 58 #include <linux/errno.h> 59 #include <linux/major.h> 60 #include <linux/wait.h> 61 #include <linux/blkdev.h> 62 #include <linux/blkpg.h> 63 #include <linux/init.h> 64 #include <linux/swap.h> 65 #include <linux/slab.h> 66 #include <linux/loop.h> 67 #include <linux/compat.h> 68 #include <linux/suspend.h> 69 #include <linux/freezer.h> 70 #include <linux/mutex.h> 71 #include <linux/writeback.h> 72 #include <linux/buffer_head.h> /* for invalidate_bdev() */ 73 #include <linux/completion.h> 74 #include <linux/highmem.h> 75 #include <linux/kthread.h> 76 #include <linux/splice.h> 77 #include <linux/sysfs.h> 78 #include <linux/miscdevice.h> 79 #include <asm/uaccess.h> 80 81 static DEFINE_IDR(loop_index_idr); 82 static DEFINE_MUTEX(loop_index_mutex); 83 84 static int max_part; 85 static int part_shift; 86 87 /* 88 * Transfer functions 89 */ 90 static int transfer_none(struct loop_device *lo, int cmd, 91 struct page *raw_page, unsigned raw_off, 92 struct page *loop_page, unsigned loop_off, 93 int size, sector_t real_block) 94 { 95 char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off; 96 char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off; 97 98 if (cmd == READ) 99 memcpy(loop_buf, raw_buf, size); 100 else 101 memcpy(raw_buf, loop_buf, size); 102 103 kunmap_atomic(loop_buf, KM_USER1); 104 kunmap_atomic(raw_buf, KM_USER0); 105 cond_resched(); 106 return 0; 107 } 108 109 static int transfer_xor(struct loop_device *lo, int cmd, 110 struct page *raw_page, unsigned raw_off, 111 struct page *loop_page, unsigned loop_off, 112 int size, sector_t real_block) 113 { 114 char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off; 115 char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off; 116 char *in, *out, *key; 117 int i, keysize; 118 119 if (cmd == READ) { 120 in = raw_buf; 121 out = loop_buf; 122 } else { 123 in = loop_buf; 124 out = raw_buf; 125 } 126 127 key = lo->lo_encrypt_key; 128 keysize = lo->lo_encrypt_key_size; 129 for (i = 0; i < size; i++) 130 *out++ = *in++ ^ key[(i & 511) % keysize]; 131 132 kunmap_atomic(loop_buf, KM_USER1); 133 kunmap_atomic(raw_buf, KM_USER0); 134 cond_resched(); 135 return 0; 136 } 137 138 static int xor_init(struct loop_device *lo, const struct loop_info64 *info) 139 { 140 if (unlikely(info->lo_encrypt_key_size <= 0)) 141 return -EINVAL; 142 return 0; 143 } 144 145 static struct loop_func_table none_funcs = { 146 .number = LO_CRYPT_NONE, 147 .transfer = transfer_none, 148 }; 149 150 static struct loop_func_table xor_funcs = { 151 .number = LO_CRYPT_XOR, 152 .transfer = transfer_xor, 153 .init = xor_init 154 }; 155 156 /* xfer_funcs[0] is special - its release function is never called */ 157 static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = { 158 &none_funcs, 159 &xor_funcs 160 }; 161 162 static loff_t get_loop_size(struct loop_device *lo, struct file *file) 163 { 164 loff_t size, offset, loopsize; 165 166 /* Compute loopsize in bytes */ 167 size = i_size_read(file->f_mapping->host); 168 offset = lo->lo_offset; 169 loopsize = size - offset; 170 if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize) 171 loopsize = lo->lo_sizelimit; 172 173 /* 174 * Unfortunately, if we want to do I/O on the device, 175 * the number of 512-byte sectors has to fit into a sector_t. 176 */ 177 return loopsize >> 9; 178 } 179 180 static int 181 figure_loop_size(struct loop_device *lo) 182 { 183 loff_t size = get_loop_size(lo, lo->lo_backing_file); 184 sector_t x = (sector_t)size; 185 186 if (unlikely((loff_t)x != size)) 187 return -EFBIG; 188 189 set_capacity(lo->lo_disk, x); 190 return 0; 191 } 192 193 static inline int 194 lo_do_transfer(struct loop_device *lo, int cmd, 195 struct page *rpage, unsigned roffs, 196 struct page *lpage, unsigned loffs, 197 int size, sector_t rblock) 198 { 199 if (unlikely(!lo->transfer)) 200 return 0; 201 202 return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock); 203 } 204 205 /** 206 * do_lo_send_aops - helper for writing data to a loop device 207 * 208 * This is the fast version for backing filesystems which implement the address 209 * space operations write_begin and write_end. 210 */ 211 static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec, 212 loff_t pos, struct page *unused) 213 { 214 struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */ 215 struct address_space *mapping = file->f_mapping; 216 pgoff_t index; 217 unsigned offset, bv_offs; 218 int len, ret; 219 220 mutex_lock(&mapping->host->i_mutex); 221 index = pos >> PAGE_CACHE_SHIFT; 222 offset = pos & ((pgoff_t)PAGE_CACHE_SIZE - 1); 223 bv_offs = bvec->bv_offset; 224 len = bvec->bv_len; 225 while (len > 0) { 226 sector_t IV; 227 unsigned size, copied; 228 int transfer_result; 229 struct page *page; 230 void *fsdata; 231 232 IV = ((sector_t)index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9); 233 size = PAGE_CACHE_SIZE - offset; 234 if (size > len) 235 size = len; 236 237 ret = pagecache_write_begin(file, mapping, pos, size, 0, 238 &page, &fsdata); 239 if (ret) 240 goto fail; 241 242 file_update_time(file); 243 244 transfer_result = lo_do_transfer(lo, WRITE, page, offset, 245 bvec->bv_page, bv_offs, size, IV); 246 copied = size; 247 if (unlikely(transfer_result)) 248 copied = 0; 249 250 ret = pagecache_write_end(file, mapping, pos, size, copied, 251 page, fsdata); 252 if (ret < 0 || ret != copied) 253 goto fail; 254 255 if (unlikely(transfer_result)) 256 goto fail; 257 258 bv_offs += copied; 259 len -= copied; 260 offset = 0; 261 index++; 262 pos += copied; 263 } 264 ret = 0; 265 out: 266 mutex_unlock(&mapping->host->i_mutex); 267 return ret; 268 fail: 269 ret = -1; 270 goto out; 271 } 272 273 /** 274 * __do_lo_send_write - helper for writing data to a loop device 275 * 276 * This helper just factors out common code between do_lo_send_direct_write() 277 * and do_lo_send_write(). 278 */ 279 static int __do_lo_send_write(struct file *file, 280 u8 *buf, const int len, loff_t pos) 281 { 282 ssize_t bw; 283 mm_segment_t old_fs = get_fs(); 284 285 set_fs(get_ds()); 286 bw = file->f_op->write(file, buf, len, &pos); 287 set_fs(old_fs); 288 if (likely(bw == len)) 289 return 0; 290 printk(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n", 291 (unsigned long long)pos, len); 292 if (bw >= 0) 293 bw = -EIO; 294 return bw; 295 } 296 297 /** 298 * do_lo_send_direct_write - helper for writing data to a loop device 299 * 300 * This is the fast, non-transforming version for backing filesystems which do 301 * not implement the address space operations write_begin and write_end. 302 * It uses the write file operation which should be present on all writeable 303 * filesystems. 304 */ 305 static int do_lo_send_direct_write(struct loop_device *lo, 306 struct bio_vec *bvec, loff_t pos, struct page *page) 307 { 308 ssize_t bw = __do_lo_send_write(lo->lo_backing_file, 309 kmap(bvec->bv_page) + bvec->bv_offset, 310 bvec->bv_len, pos); 311 kunmap(bvec->bv_page); 312 cond_resched(); 313 return bw; 314 } 315 316 /** 317 * do_lo_send_write - helper for writing data to a loop device 318 * 319 * This is the slow, transforming version for filesystems which do not 320 * implement the address space operations write_begin and write_end. It 321 * uses the write file operation which should be present on all writeable 322 * filesystems. 323 * 324 * Using fops->write is slower than using aops->{prepare,commit}_write in the 325 * transforming case because we need to double buffer the data as we cannot do 326 * the transformations in place as we do not have direct access to the 327 * destination pages of the backing file. 328 */ 329 static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec, 330 loff_t pos, struct page *page) 331 { 332 int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page, 333 bvec->bv_offset, bvec->bv_len, pos >> 9); 334 if (likely(!ret)) 335 return __do_lo_send_write(lo->lo_backing_file, 336 page_address(page), bvec->bv_len, 337 pos); 338 printk(KERN_ERR "loop: Transfer error at byte offset %llu, " 339 "length %i.\n", (unsigned long long)pos, bvec->bv_len); 340 if (ret > 0) 341 ret = -EIO; 342 return ret; 343 } 344 345 static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos) 346 { 347 int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t, 348 struct page *page); 349 struct bio_vec *bvec; 350 struct page *page = NULL; 351 int i, ret = 0; 352 353 do_lo_send = do_lo_send_aops; 354 if (!(lo->lo_flags & LO_FLAGS_USE_AOPS)) { 355 do_lo_send = do_lo_send_direct_write; 356 if (lo->transfer != transfer_none) { 357 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); 358 if (unlikely(!page)) 359 goto fail; 360 kmap(page); 361 do_lo_send = do_lo_send_write; 362 } 363 } 364 bio_for_each_segment(bvec, bio, i) { 365 ret = do_lo_send(lo, bvec, pos, page); 366 if (ret < 0) 367 break; 368 pos += bvec->bv_len; 369 } 370 if (page) { 371 kunmap(page); 372 __free_page(page); 373 } 374 out: 375 return ret; 376 fail: 377 printk(KERN_ERR "loop: Failed to allocate temporary page for write.\n"); 378 ret = -ENOMEM; 379 goto out; 380 } 381 382 struct lo_read_data { 383 struct loop_device *lo; 384 struct page *page; 385 unsigned offset; 386 int bsize; 387 }; 388 389 static int 390 lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, 391 struct splice_desc *sd) 392 { 393 struct lo_read_data *p = sd->u.data; 394 struct loop_device *lo = p->lo; 395 struct page *page = buf->page; 396 sector_t IV; 397 int size; 398 399 IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) + 400 (buf->offset >> 9); 401 size = sd->len; 402 if (size > p->bsize) 403 size = p->bsize; 404 405 if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) { 406 printk(KERN_ERR "loop: transfer error block %ld\n", 407 page->index); 408 size = -EINVAL; 409 } 410 411 flush_dcache_page(p->page); 412 413 if (size > 0) 414 p->offset += size; 415 416 return size; 417 } 418 419 static int 420 lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd) 421 { 422 return __splice_from_pipe(pipe, sd, lo_splice_actor); 423 } 424 425 static int 426 do_lo_receive(struct loop_device *lo, 427 struct bio_vec *bvec, int bsize, loff_t pos) 428 { 429 struct lo_read_data cookie; 430 struct splice_desc sd; 431 struct file *file; 432 long retval; 433 434 cookie.lo = lo; 435 cookie.page = bvec->bv_page; 436 cookie.offset = bvec->bv_offset; 437 cookie.bsize = bsize; 438 439 sd.len = 0; 440 sd.total_len = bvec->bv_len; 441 sd.flags = 0; 442 sd.pos = pos; 443 sd.u.data = &cookie; 444 445 file = lo->lo_backing_file; 446 retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor); 447 448 if (retval < 0) 449 return retval; 450 451 return 0; 452 } 453 454 static int 455 lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos) 456 { 457 struct bio_vec *bvec; 458 int i, ret = 0; 459 460 bio_for_each_segment(bvec, bio, i) { 461 ret = do_lo_receive(lo, bvec, bsize, pos); 462 if (ret < 0) 463 break; 464 pos += bvec->bv_len; 465 } 466 return ret; 467 } 468 469 static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) 470 { 471 loff_t pos; 472 int ret; 473 474 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; 475 476 if (bio_rw(bio) == WRITE) { 477 struct file *file = lo->lo_backing_file; 478 479 if (bio->bi_rw & REQ_FLUSH) { 480 ret = vfs_fsync(file, 0); 481 if (unlikely(ret && ret != -EINVAL)) { 482 ret = -EIO; 483 goto out; 484 } 485 } 486 487 ret = lo_send(lo, bio, pos); 488 489 if ((bio->bi_rw & REQ_FUA) && !ret) { 490 ret = vfs_fsync(file, 0); 491 if (unlikely(ret && ret != -EINVAL)) 492 ret = -EIO; 493 } 494 } else 495 ret = lo_receive(lo, bio, lo->lo_blocksize, pos); 496 497 out: 498 return ret; 499 } 500 501 /* 502 * Add bio to back of pending list 503 */ 504 static void loop_add_bio(struct loop_device *lo, struct bio *bio) 505 { 506 bio_list_add(&lo->lo_bio_list, bio); 507 } 508 509 /* 510 * Grab first pending buffer 511 */ 512 static struct bio *loop_get_bio(struct loop_device *lo) 513 { 514 return bio_list_pop(&lo->lo_bio_list); 515 } 516 517 static int loop_make_request(struct request_queue *q, struct bio *old_bio) 518 { 519 struct loop_device *lo = q->queuedata; 520 int rw = bio_rw(old_bio); 521 522 if (rw == READA) 523 rw = READ; 524 525 BUG_ON(!lo || (rw != READ && rw != WRITE)); 526 527 spin_lock_irq(&lo->lo_lock); 528 if (lo->lo_state != Lo_bound) 529 goto out; 530 if (unlikely(rw == WRITE && (lo->lo_flags & LO_FLAGS_READ_ONLY))) 531 goto out; 532 loop_add_bio(lo, old_bio); 533 wake_up(&lo->lo_event); 534 spin_unlock_irq(&lo->lo_lock); 535 return 0; 536 537 out: 538 spin_unlock_irq(&lo->lo_lock); 539 bio_io_error(old_bio); 540 return 0; 541 } 542 543 struct switch_request { 544 struct file *file; 545 struct completion wait; 546 }; 547 548 static void do_loop_switch(struct loop_device *, struct switch_request *); 549 550 static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio) 551 { 552 if (unlikely(!bio->bi_bdev)) { 553 do_loop_switch(lo, bio->bi_private); 554 bio_put(bio); 555 } else { 556 int ret = do_bio_filebacked(lo, bio); 557 bio_endio(bio, ret); 558 } 559 } 560 561 /* 562 * worker thread that handles reads/writes to file backed loop devices, 563 * to avoid blocking in our make_request_fn. it also does loop decrypting 564 * on reads for block backed loop, as that is too heavy to do from 565 * b_end_io context where irqs may be disabled. 566 * 567 * Loop explanation: loop_clr_fd() sets lo_state to Lo_rundown before 568 * calling kthread_stop(). Therefore once kthread_should_stop() is 569 * true, make_request will not place any more requests. Therefore 570 * once kthread_should_stop() is true and lo_bio is NULL, we are 571 * done with the loop. 572 */ 573 static int loop_thread(void *data) 574 { 575 struct loop_device *lo = data; 576 struct bio *bio; 577 578 set_user_nice(current, -20); 579 580 while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) { 581 582 wait_event_interruptible(lo->lo_event, 583 !bio_list_empty(&lo->lo_bio_list) || 584 kthread_should_stop()); 585 586 if (bio_list_empty(&lo->lo_bio_list)) 587 continue; 588 spin_lock_irq(&lo->lo_lock); 589 bio = loop_get_bio(lo); 590 spin_unlock_irq(&lo->lo_lock); 591 592 BUG_ON(!bio); 593 loop_handle_bio(lo, bio); 594 } 595 596 return 0; 597 } 598 599 /* 600 * loop_switch performs the hard work of switching a backing store. 601 * First it needs to flush existing IO, it does this by sending a magic 602 * BIO down the pipe. The completion of this BIO does the actual switch. 603 */ 604 static int loop_switch(struct loop_device *lo, struct file *file) 605 { 606 struct switch_request w; 607 struct bio *bio = bio_alloc(GFP_KERNEL, 0); 608 if (!bio) 609 return -ENOMEM; 610 init_completion(&w.wait); 611 w.file = file; 612 bio->bi_private = &w; 613 bio->bi_bdev = NULL; 614 loop_make_request(lo->lo_queue, bio); 615 wait_for_completion(&w.wait); 616 return 0; 617 } 618 619 /* 620 * Helper to flush the IOs in loop, but keeping loop thread running 621 */ 622 static int loop_flush(struct loop_device *lo) 623 { 624 /* loop not yet configured, no running thread, nothing to flush */ 625 if (!lo->lo_thread) 626 return 0; 627 628 return loop_switch(lo, NULL); 629 } 630 631 /* 632 * Do the actual switch; called from the BIO completion routine 633 */ 634 static void do_loop_switch(struct loop_device *lo, struct switch_request *p) 635 { 636 struct file *file = p->file; 637 struct file *old_file = lo->lo_backing_file; 638 struct address_space *mapping; 639 640 /* if no new file, only flush of queued bios requested */ 641 if (!file) 642 goto out; 643 644 mapping = file->f_mapping; 645 mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask); 646 lo->lo_backing_file = file; 647 lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ? 648 mapping->host->i_bdev->bd_block_size : PAGE_SIZE; 649 lo->old_gfp_mask = mapping_gfp_mask(mapping); 650 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); 651 out: 652 complete(&p->wait); 653 } 654 655 656 /* 657 * loop_change_fd switched the backing store of a loopback device to 658 * a new file. This is useful for operating system installers to free up 659 * the original file and in High Availability environments to switch to 660 * an alternative location for the content in case of server meltdown. 661 * This can only work if the loop device is used read-only, and if the 662 * new backing store is the same size and type as the old backing store. 663 */ 664 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, 665 unsigned int arg) 666 { 667 struct file *file, *old_file; 668 struct inode *inode; 669 int error; 670 671 error = -ENXIO; 672 if (lo->lo_state != Lo_bound) 673 goto out; 674 675 /* the loop device has to be read-only */ 676 error = -EINVAL; 677 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY)) 678 goto out; 679 680 error = -EBADF; 681 file = fget(arg); 682 if (!file) 683 goto out; 684 685 inode = file->f_mapping->host; 686 old_file = lo->lo_backing_file; 687 688 error = -EINVAL; 689 690 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) 691 goto out_putf; 692 693 /* size of the new backing store needs to be the same */ 694 if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) 695 goto out_putf; 696 697 /* and ... switch */ 698 error = loop_switch(lo, file); 699 if (error) 700 goto out_putf; 701 702 fput(old_file); 703 if (max_part > 0) 704 ioctl_by_bdev(bdev, BLKRRPART, 0); 705 return 0; 706 707 out_putf: 708 fput(file); 709 out: 710 return error; 711 } 712 713 static inline int is_loop_device(struct file *file) 714 { 715 struct inode *i = file->f_mapping->host; 716 717 return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR; 718 } 719 720 /* loop sysfs attributes */ 721 722 static ssize_t loop_attr_show(struct device *dev, char *page, 723 ssize_t (*callback)(struct loop_device *, char *)) 724 { 725 struct gendisk *disk = dev_to_disk(dev); 726 struct loop_device *lo = disk->private_data; 727 728 return callback(lo, page); 729 } 730 731 #define LOOP_ATTR_RO(_name) \ 732 static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \ 733 static ssize_t loop_attr_do_show_##_name(struct device *d, \ 734 struct device_attribute *attr, char *b) \ 735 { \ 736 return loop_attr_show(d, b, loop_attr_##_name##_show); \ 737 } \ 738 static struct device_attribute loop_attr_##_name = \ 739 __ATTR(_name, S_IRUGO, loop_attr_do_show_##_name, NULL); 740 741 static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) 742 { 743 ssize_t ret; 744 char *p = NULL; 745 746 spin_lock_irq(&lo->lo_lock); 747 if (lo->lo_backing_file) 748 p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1); 749 spin_unlock_irq(&lo->lo_lock); 750 751 if (IS_ERR_OR_NULL(p)) 752 ret = PTR_ERR(p); 753 else { 754 ret = strlen(p); 755 memmove(buf, p, ret); 756 buf[ret++] = '\n'; 757 buf[ret] = 0; 758 } 759 760 return ret; 761 } 762 763 static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf) 764 { 765 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset); 766 } 767 768 static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf) 769 { 770 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); 771 } 772 773 static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf) 774 { 775 int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR); 776 777 return sprintf(buf, "%s\n", autoclear ? "1" : "0"); 778 } 779 780 LOOP_ATTR_RO(backing_file); 781 LOOP_ATTR_RO(offset); 782 LOOP_ATTR_RO(sizelimit); 783 LOOP_ATTR_RO(autoclear); 784 785 static struct attribute *loop_attrs[] = { 786 &loop_attr_backing_file.attr, 787 &loop_attr_offset.attr, 788 &loop_attr_sizelimit.attr, 789 &loop_attr_autoclear.attr, 790 NULL, 791 }; 792 793 static struct attribute_group loop_attribute_group = { 794 .name = "loop", 795 .attrs= loop_attrs, 796 }; 797 798 static int loop_sysfs_init(struct loop_device *lo) 799 { 800 return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj, 801 &loop_attribute_group); 802 } 803 804 static void loop_sysfs_exit(struct loop_device *lo) 805 { 806 sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj, 807 &loop_attribute_group); 808 } 809 810 static int loop_set_fd(struct loop_device *lo, fmode_t mode, 811 struct block_device *bdev, unsigned int arg) 812 { 813 struct file *file, *f; 814 struct inode *inode; 815 struct address_space *mapping; 816 unsigned lo_blocksize; 817 int lo_flags = 0; 818 int error; 819 loff_t size; 820 821 /* This is safe, since we have a reference from open(). */ 822 __module_get(THIS_MODULE); 823 824 error = -EBADF; 825 file = fget(arg); 826 if (!file) 827 goto out; 828 829 error = -EBUSY; 830 if (lo->lo_state != Lo_unbound) 831 goto out_putf; 832 833 /* Avoid recursion */ 834 f = file; 835 while (is_loop_device(f)) { 836 struct loop_device *l; 837 838 if (f->f_mapping->host->i_bdev == bdev) 839 goto out_putf; 840 841 l = f->f_mapping->host->i_bdev->bd_disk->private_data; 842 if (l->lo_state == Lo_unbound) { 843 error = -EINVAL; 844 goto out_putf; 845 } 846 f = l->lo_backing_file; 847 } 848 849 mapping = file->f_mapping; 850 inode = mapping->host; 851 852 if (!(file->f_mode & FMODE_WRITE)) 853 lo_flags |= LO_FLAGS_READ_ONLY; 854 855 error = -EINVAL; 856 if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) { 857 const struct address_space_operations *aops = mapping->a_ops; 858 859 if (aops->write_begin) 860 lo_flags |= LO_FLAGS_USE_AOPS; 861 if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write) 862 lo_flags |= LO_FLAGS_READ_ONLY; 863 864 lo_blocksize = S_ISBLK(inode->i_mode) ? 865 inode->i_bdev->bd_block_size : PAGE_SIZE; 866 867 error = 0; 868 } else { 869 goto out_putf; 870 } 871 872 size = get_loop_size(lo, file); 873 874 if ((loff_t)(sector_t)size != size) { 875 error = -EFBIG; 876 goto out_putf; 877 } 878 879 if (!(mode & FMODE_WRITE)) 880 lo_flags |= LO_FLAGS_READ_ONLY; 881 882 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); 883 884 lo->lo_blocksize = lo_blocksize; 885 lo->lo_device = bdev; 886 lo->lo_flags = lo_flags; 887 lo->lo_backing_file = file; 888 lo->transfer = transfer_none; 889 lo->ioctl = NULL; 890 lo->lo_sizelimit = 0; 891 lo->old_gfp_mask = mapping_gfp_mask(mapping); 892 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); 893 894 bio_list_init(&lo->lo_bio_list); 895 896 /* 897 * set queue make_request_fn, and add limits based on lower level 898 * device 899 */ 900 blk_queue_make_request(lo->lo_queue, loop_make_request); 901 lo->lo_queue->queuedata = lo; 902 903 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) 904 blk_queue_flush(lo->lo_queue, REQ_FLUSH); 905 906 set_capacity(lo->lo_disk, size); 907 bd_set_size(bdev, size << 9); 908 loop_sysfs_init(lo); 909 /* let user-space know about the new size */ 910 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); 911 912 set_blocksize(bdev, lo_blocksize); 913 914 lo->lo_thread = kthread_create(loop_thread, lo, "loop%d", 915 lo->lo_number); 916 if (IS_ERR(lo->lo_thread)) { 917 error = PTR_ERR(lo->lo_thread); 918 goto out_clr; 919 } 920 lo->lo_state = Lo_bound; 921 wake_up_process(lo->lo_thread); 922 if (max_part > 0) 923 ioctl_by_bdev(bdev, BLKRRPART, 0); 924 return 0; 925 926 out_clr: 927 loop_sysfs_exit(lo); 928 lo->lo_thread = NULL; 929 lo->lo_device = NULL; 930 lo->lo_backing_file = NULL; 931 lo->lo_flags = 0; 932 set_capacity(lo->lo_disk, 0); 933 invalidate_bdev(bdev); 934 bd_set_size(bdev, 0); 935 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); 936 mapping_set_gfp_mask(mapping, lo->old_gfp_mask); 937 lo->lo_state = Lo_unbound; 938 out_putf: 939 fput(file); 940 out: 941 /* This is safe: open() is still holding a reference. */ 942 module_put(THIS_MODULE); 943 return error; 944 } 945 946 static int 947 loop_release_xfer(struct loop_device *lo) 948 { 949 int err = 0; 950 struct loop_func_table *xfer = lo->lo_encryption; 951 952 if (xfer) { 953 if (xfer->release) 954 err = xfer->release(lo); 955 lo->transfer = NULL; 956 lo->lo_encryption = NULL; 957 module_put(xfer->owner); 958 } 959 return err; 960 } 961 962 static int 963 loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, 964 const struct loop_info64 *i) 965 { 966 int err = 0; 967 968 if (xfer) { 969 struct module *owner = xfer->owner; 970 971 if (!try_module_get(owner)) 972 return -EINVAL; 973 if (xfer->init) 974 err = xfer->init(lo, i); 975 if (err) 976 module_put(owner); 977 else 978 lo->lo_encryption = xfer; 979 } 980 return err; 981 } 982 983 static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) 984 { 985 struct file *filp = lo->lo_backing_file; 986 gfp_t gfp = lo->old_gfp_mask; 987 988 if (lo->lo_state != Lo_bound) 989 return -ENXIO; 990 991 if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */ 992 return -EBUSY; 993 994 if (filp == NULL) 995 return -EINVAL; 996 997 spin_lock_irq(&lo->lo_lock); 998 lo->lo_state = Lo_rundown; 999 spin_unlock_irq(&lo->lo_lock); 1000 1001 kthread_stop(lo->lo_thread); 1002 1003 spin_lock_irq(&lo->lo_lock); 1004 lo->lo_backing_file = NULL; 1005 spin_unlock_irq(&lo->lo_lock); 1006 1007 loop_release_xfer(lo); 1008 lo->transfer = NULL; 1009 lo->ioctl = NULL; 1010 lo->lo_device = NULL; 1011 lo->lo_encryption = NULL; 1012 lo->lo_offset = 0; 1013 lo->lo_sizelimit = 0; 1014 lo->lo_encrypt_key_size = 0; 1015 lo->lo_flags = 0; 1016 lo->lo_thread = NULL; 1017 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); 1018 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); 1019 memset(lo->lo_file_name, 0, LO_NAME_SIZE); 1020 if (bdev) 1021 invalidate_bdev(bdev); 1022 set_capacity(lo->lo_disk, 0); 1023 loop_sysfs_exit(lo); 1024 if (bdev) { 1025 bd_set_size(bdev, 0); 1026 /* let user-space know about this change */ 1027 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); 1028 } 1029 mapping_set_gfp_mask(filp->f_mapping, gfp); 1030 lo->lo_state = Lo_unbound; 1031 /* This is safe: open() is still holding a reference. */ 1032 module_put(THIS_MODULE); 1033 if (max_part > 0 && bdev) 1034 ioctl_by_bdev(bdev, BLKRRPART, 0); 1035 mutex_unlock(&lo->lo_ctl_mutex); 1036 /* 1037 * Need not hold lo_ctl_mutex to fput backing file. 1038 * Calling fput holding lo_ctl_mutex triggers a circular 1039 * lock dependency possibility warning as fput can take 1040 * bd_mutex which is usually taken before lo_ctl_mutex. 1041 */ 1042 fput(filp); 1043 return 0; 1044 } 1045 1046 static int 1047 loop_set_status(struct loop_device *lo, const struct loop_info64 *info) 1048 { 1049 int err; 1050 struct loop_func_table *xfer; 1051 uid_t uid = current_uid(); 1052 1053 if (lo->lo_encrypt_key_size && 1054 lo->lo_key_owner != uid && 1055 !capable(CAP_SYS_ADMIN)) 1056 return -EPERM; 1057 if (lo->lo_state != Lo_bound) 1058 return -ENXIO; 1059 if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) 1060 return -EINVAL; 1061 1062 err = loop_release_xfer(lo); 1063 if (err) 1064 return err; 1065 1066 if (info->lo_encrypt_type) { 1067 unsigned int type = info->lo_encrypt_type; 1068 1069 if (type >= MAX_LO_CRYPT) 1070 return -EINVAL; 1071 xfer = xfer_funcs[type]; 1072 if (xfer == NULL) 1073 return -EINVAL; 1074 } else 1075 xfer = NULL; 1076 1077 err = loop_init_xfer(lo, xfer, info); 1078 if (err) 1079 return err; 1080 1081 if (lo->lo_offset != info->lo_offset || 1082 lo->lo_sizelimit != info->lo_sizelimit) { 1083 lo->lo_offset = info->lo_offset; 1084 lo->lo_sizelimit = info->lo_sizelimit; 1085 if (figure_loop_size(lo)) 1086 return -EFBIG; 1087 } 1088 1089 memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); 1090 memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); 1091 lo->lo_file_name[LO_NAME_SIZE-1] = 0; 1092 lo->lo_crypt_name[LO_NAME_SIZE-1] = 0; 1093 1094 if (!xfer) 1095 xfer = &none_funcs; 1096 lo->transfer = xfer->transfer; 1097 lo->ioctl = xfer->ioctl; 1098 1099 if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) != 1100 (info->lo_flags & LO_FLAGS_AUTOCLEAR)) 1101 lo->lo_flags ^= LO_FLAGS_AUTOCLEAR; 1102 1103 lo->lo_encrypt_key_size = info->lo_encrypt_key_size; 1104 lo->lo_init[0] = info->lo_init[0]; 1105 lo->lo_init[1] = info->lo_init[1]; 1106 if (info->lo_encrypt_key_size) { 1107 memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, 1108 info->lo_encrypt_key_size); 1109 lo->lo_key_owner = uid; 1110 } 1111 1112 return 0; 1113 } 1114 1115 static int 1116 loop_get_status(struct loop_device *lo, struct loop_info64 *info) 1117 { 1118 struct file *file = lo->lo_backing_file; 1119 struct kstat stat; 1120 int error; 1121 1122 if (lo->lo_state != Lo_bound) 1123 return -ENXIO; 1124 error = vfs_getattr(file->f_path.mnt, file->f_path.dentry, &stat); 1125 if (error) 1126 return error; 1127 memset(info, 0, sizeof(*info)); 1128 info->lo_number = lo->lo_number; 1129 info->lo_device = huge_encode_dev(stat.dev); 1130 info->lo_inode = stat.ino; 1131 info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev); 1132 info->lo_offset = lo->lo_offset; 1133 info->lo_sizelimit = lo->lo_sizelimit; 1134 info->lo_flags = lo->lo_flags; 1135 memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE); 1136 memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE); 1137 info->lo_encrypt_type = 1138 lo->lo_encryption ? lo->lo_encryption->number : 0; 1139 if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) { 1140 info->lo_encrypt_key_size = lo->lo_encrypt_key_size; 1141 memcpy(info->lo_encrypt_key, lo->lo_encrypt_key, 1142 lo->lo_encrypt_key_size); 1143 } 1144 return 0; 1145 } 1146 1147 static void 1148 loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64) 1149 { 1150 memset(info64, 0, sizeof(*info64)); 1151 info64->lo_number = info->lo_number; 1152 info64->lo_device = info->lo_device; 1153 info64->lo_inode = info->lo_inode; 1154 info64->lo_rdevice = info->lo_rdevice; 1155 info64->lo_offset = info->lo_offset; 1156 info64->lo_sizelimit = 0; 1157 info64->lo_encrypt_type = info->lo_encrypt_type; 1158 info64->lo_encrypt_key_size = info->lo_encrypt_key_size; 1159 info64->lo_flags = info->lo_flags; 1160 info64->lo_init[0] = info->lo_init[0]; 1161 info64->lo_init[1] = info->lo_init[1]; 1162 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) 1163 memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE); 1164 else 1165 memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE); 1166 memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE); 1167 } 1168 1169 static int 1170 loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info) 1171 { 1172 memset(info, 0, sizeof(*info)); 1173 info->lo_number = info64->lo_number; 1174 info->lo_device = info64->lo_device; 1175 info->lo_inode = info64->lo_inode; 1176 info->lo_rdevice = info64->lo_rdevice; 1177 info->lo_offset = info64->lo_offset; 1178 info->lo_encrypt_type = info64->lo_encrypt_type; 1179 info->lo_encrypt_key_size = info64->lo_encrypt_key_size; 1180 info->lo_flags = info64->lo_flags; 1181 info->lo_init[0] = info64->lo_init[0]; 1182 info->lo_init[1] = info64->lo_init[1]; 1183 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) 1184 memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE); 1185 else 1186 memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE); 1187 memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE); 1188 1189 /* error in case values were truncated */ 1190 if (info->lo_device != info64->lo_device || 1191 info->lo_rdevice != info64->lo_rdevice || 1192 info->lo_inode != info64->lo_inode || 1193 info->lo_offset != info64->lo_offset) 1194 return -EOVERFLOW; 1195 1196 return 0; 1197 } 1198 1199 static int 1200 loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg) 1201 { 1202 struct loop_info info; 1203 struct loop_info64 info64; 1204 1205 if (copy_from_user(&info, arg, sizeof (struct loop_info))) 1206 return -EFAULT; 1207 loop_info64_from_old(&info, &info64); 1208 return loop_set_status(lo, &info64); 1209 } 1210 1211 static int 1212 loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg) 1213 { 1214 struct loop_info64 info64; 1215 1216 if (copy_from_user(&info64, arg, sizeof (struct loop_info64))) 1217 return -EFAULT; 1218 return loop_set_status(lo, &info64); 1219 } 1220 1221 static int 1222 loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) { 1223 struct loop_info info; 1224 struct loop_info64 info64; 1225 int err = 0; 1226 1227 if (!arg) 1228 err = -EINVAL; 1229 if (!err) 1230 err = loop_get_status(lo, &info64); 1231 if (!err) 1232 err = loop_info64_to_old(&info64, &info); 1233 if (!err && copy_to_user(arg, &info, sizeof(info))) 1234 err = -EFAULT; 1235 1236 return err; 1237 } 1238 1239 static int 1240 loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { 1241 struct loop_info64 info64; 1242 int err = 0; 1243 1244 if (!arg) 1245 err = -EINVAL; 1246 if (!err) 1247 err = loop_get_status(lo, &info64); 1248 if (!err && copy_to_user(arg, &info64, sizeof(info64))) 1249 err = -EFAULT; 1250 1251 return err; 1252 } 1253 1254 static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev) 1255 { 1256 int err; 1257 sector_t sec; 1258 loff_t sz; 1259 1260 err = -ENXIO; 1261 if (unlikely(lo->lo_state != Lo_bound)) 1262 goto out; 1263 err = figure_loop_size(lo); 1264 if (unlikely(err)) 1265 goto out; 1266 sec = get_capacity(lo->lo_disk); 1267 /* the width of sector_t may be narrow for bit-shift */ 1268 sz = sec; 1269 sz <<= 9; 1270 mutex_lock(&bdev->bd_mutex); 1271 bd_set_size(bdev, sz); 1272 /* let user-space know about the new size */ 1273 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); 1274 mutex_unlock(&bdev->bd_mutex); 1275 1276 out: 1277 return err; 1278 } 1279 1280 static int lo_ioctl(struct block_device *bdev, fmode_t mode, 1281 unsigned int cmd, unsigned long arg) 1282 { 1283 struct loop_device *lo = bdev->bd_disk->private_data; 1284 int err; 1285 1286 mutex_lock_nested(&lo->lo_ctl_mutex, 1); 1287 switch (cmd) { 1288 case LOOP_SET_FD: 1289 err = loop_set_fd(lo, mode, bdev, arg); 1290 break; 1291 case LOOP_CHANGE_FD: 1292 err = loop_change_fd(lo, bdev, arg); 1293 break; 1294 case LOOP_CLR_FD: 1295 /* loop_clr_fd would have unlocked lo_ctl_mutex on success */ 1296 err = loop_clr_fd(lo, bdev); 1297 if (!err) 1298 goto out_unlocked; 1299 break; 1300 case LOOP_SET_STATUS: 1301 err = loop_set_status_old(lo, (struct loop_info __user *) arg); 1302 break; 1303 case LOOP_GET_STATUS: 1304 err = loop_get_status_old(lo, (struct loop_info __user *) arg); 1305 break; 1306 case LOOP_SET_STATUS64: 1307 err = loop_set_status64(lo, (struct loop_info64 __user *) arg); 1308 break; 1309 case LOOP_GET_STATUS64: 1310 err = loop_get_status64(lo, (struct loop_info64 __user *) arg); 1311 break; 1312 case LOOP_SET_CAPACITY: 1313 err = -EPERM; 1314 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) 1315 err = loop_set_capacity(lo, bdev); 1316 break; 1317 default: 1318 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; 1319 } 1320 mutex_unlock(&lo->lo_ctl_mutex); 1321 1322 out_unlocked: 1323 return err; 1324 } 1325 1326 #ifdef CONFIG_COMPAT 1327 struct compat_loop_info { 1328 compat_int_t lo_number; /* ioctl r/o */ 1329 compat_dev_t lo_device; /* ioctl r/o */ 1330 compat_ulong_t lo_inode; /* ioctl r/o */ 1331 compat_dev_t lo_rdevice; /* ioctl r/o */ 1332 compat_int_t lo_offset; 1333 compat_int_t lo_encrypt_type; 1334 compat_int_t lo_encrypt_key_size; /* ioctl w/o */ 1335 compat_int_t lo_flags; /* ioctl r/o */ 1336 char lo_name[LO_NAME_SIZE]; 1337 unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ 1338 compat_ulong_t lo_init[2]; 1339 char reserved[4]; 1340 }; 1341 1342 /* 1343 * Transfer 32-bit compatibility structure in userspace to 64-bit loop info 1344 * - noinlined to reduce stack space usage in main part of driver 1345 */ 1346 static noinline int 1347 loop_info64_from_compat(const struct compat_loop_info __user *arg, 1348 struct loop_info64 *info64) 1349 { 1350 struct compat_loop_info info; 1351 1352 if (copy_from_user(&info, arg, sizeof(info))) 1353 return -EFAULT; 1354 1355 memset(info64, 0, sizeof(*info64)); 1356 info64->lo_number = info.lo_number; 1357 info64->lo_device = info.lo_device; 1358 info64->lo_inode = info.lo_inode; 1359 info64->lo_rdevice = info.lo_rdevice; 1360 info64->lo_offset = info.lo_offset; 1361 info64->lo_sizelimit = 0; 1362 info64->lo_encrypt_type = info.lo_encrypt_type; 1363 info64->lo_encrypt_key_size = info.lo_encrypt_key_size; 1364 info64->lo_flags = info.lo_flags; 1365 info64->lo_init[0] = info.lo_init[0]; 1366 info64->lo_init[1] = info.lo_init[1]; 1367 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI) 1368 memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE); 1369 else 1370 memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE); 1371 memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE); 1372 return 0; 1373 } 1374 1375 /* 1376 * Transfer 64-bit loop info to 32-bit compatibility structure in userspace 1377 * - noinlined to reduce stack space usage in main part of driver 1378 */ 1379 static noinline int 1380 loop_info64_to_compat(const struct loop_info64 *info64, 1381 struct compat_loop_info __user *arg) 1382 { 1383 struct compat_loop_info info; 1384 1385 memset(&info, 0, sizeof(info)); 1386 info.lo_number = info64->lo_number; 1387 info.lo_device = info64->lo_device; 1388 info.lo_inode = info64->lo_inode; 1389 info.lo_rdevice = info64->lo_rdevice; 1390 info.lo_offset = info64->lo_offset; 1391 info.lo_encrypt_type = info64->lo_encrypt_type; 1392 info.lo_encrypt_key_size = info64->lo_encrypt_key_size; 1393 info.lo_flags = info64->lo_flags; 1394 info.lo_init[0] = info64->lo_init[0]; 1395 info.lo_init[1] = info64->lo_init[1]; 1396 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI) 1397 memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE); 1398 else 1399 memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE); 1400 memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE); 1401 1402 /* error in case values were truncated */ 1403 if (info.lo_device != info64->lo_device || 1404 info.lo_rdevice != info64->lo_rdevice || 1405 info.lo_inode != info64->lo_inode || 1406 info.lo_offset != info64->lo_offset || 1407 info.lo_init[0] != info64->lo_init[0] || 1408 info.lo_init[1] != info64->lo_init[1]) 1409 return -EOVERFLOW; 1410 1411 if (copy_to_user(arg, &info, sizeof(info))) 1412 return -EFAULT; 1413 return 0; 1414 } 1415 1416 static int 1417 loop_set_status_compat(struct loop_device *lo, 1418 const struct compat_loop_info __user *arg) 1419 { 1420 struct loop_info64 info64; 1421 int ret; 1422 1423 ret = loop_info64_from_compat(arg, &info64); 1424 if (ret < 0) 1425 return ret; 1426 return loop_set_status(lo, &info64); 1427 } 1428 1429 static int 1430 loop_get_status_compat(struct loop_device *lo, 1431 struct compat_loop_info __user *arg) 1432 { 1433 struct loop_info64 info64; 1434 int err = 0; 1435 1436 if (!arg) 1437 err = -EINVAL; 1438 if (!err) 1439 err = loop_get_status(lo, &info64); 1440 if (!err) 1441 err = loop_info64_to_compat(&info64, arg); 1442 return err; 1443 } 1444 1445 static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, 1446 unsigned int cmd, unsigned long arg) 1447 { 1448 struct loop_device *lo = bdev->bd_disk->private_data; 1449 int err; 1450 1451 switch(cmd) { 1452 case LOOP_SET_STATUS: 1453 mutex_lock(&lo->lo_ctl_mutex); 1454 err = loop_set_status_compat( 1455 lo, (const struct compat_loop_info __user *) arg); 1456 mutex_unlock(&lo->lo_ctl_mutex); 1457 break; 1458 case LOOP_GET_STATUS: 1459 mutex_lock(&lo->lo_ctl_mutex); 1460 err = loop_get_status_compat( 1461 lo, (struct compat_loop_info __user *) arg); 1462 mutex_unlock(&lo->lo_ctl_mutex); 1463 break; 1464 case LOOP_SET_CAPACITY: 1465 case LOOP_CLR_FD: 1466 case LOOP_GET_STATUS64: 1467 case LOOP_SET_STATUS64: 1468 arg = (unsigned long) compat_ptr(arg); 1469 case LOOP_SET_FD: 1470 case LOOP_CHANGE_FD: 1471 err = lo_ioctl(bdev, mode, cmd, arg); 1472 break; 1473 default: 1474 err = -ENOIOCTLCMD; 1475 break; 1476 } 1477 return err; 1478 } 1479 #endif 1480 1481 static int lo_open(struct block_device *bdev, fmode_t mode) 1482 { 1483 struct loop_device *lo; 1484 int err = 0; 1485 1486 mutex_lock(&loop_index_mutex); 1487 lo = bdev->bd_disk->private_data; 1488 if (!lo) { 1489 err = -ENXIO; 1490 goto out; 1491 } 1492 1493 mutex_lock(&lo->lo_ctl_mutex); 1494 lo->lo_refcnt++; 1495 mutex_unlock(&lo->lo_ctl_mutex); 1496 out: 1497 mutex_unlock(&loop_index_mutex); 1498 return err; 1499 } 1500 1501 static int lo_release(struct gendisk *disk, fmode_t mode) 1502 { 1503 struct loop_device *lo = disk->private_data; 1504 int err; 1505 1506 mutex_lock(&lo->lo_ctl_mutex); 1507 1508 if (--lo->lo_refcnt) 1509 goto out; 1510 1511 if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) { 1512 /* 1513 * In autoclear mode, stop the loop thread 1514 * and remove configuration after last close. 1515 */ 1516 err = loop_clr_fd(lo, NULL); 1517 if (!err) 1518 goto out_unlocked; 1519 } else { 1520 /* 1521 * Otherwise keep thread (if running) and config, 1522 * but flush possible ongoing bios in thread. 1523 */ 1524 loop_flush(lo); 1525 } 1526 1527 out: 1528 mutex_unlock(&lo->lo_ctl_mutex); 1529 out_unlocked: 1530 return 0; 1531 } 1532 1533 static const struct block_device_operations lo_fops = { 1534 .owner = THIS_MODULE, 1535 .open = lo_open, 1536 .release = lo_release, 1537 .ioctl = lo_ioctl, 1538 #ifdef CONFIG_COMPAT 1539 .compat_ioctl = lo_compat_ioctl, 1540 #endif 1541 }; 1542 1543 /* 1544 * And now the modules code and kernel interface. 1545 */ 1546 static int max_loop; 1547 module_param(max_loop, int, S_IRUGO); 1548 MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); 1549 module_param(max_part, int, S_IRUGO); 1550 MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device"); 1551 MODULE_LICENSE("GPL"); 1552 MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); 1553 1554 int loop_register_transfer(struct loop_func_table *funcs) 1555 { 1556 unsigned int n = funcs->number; 1557 1558 if (n >= MAX_LO_CRYPT || xfer_funcs[n]) 1559 return -EINVAL; 1560 xfer_funcs[n] = funcs; 1561 return 0; 1562 } 1563 1564 static int unregister_transfer_cb(int id, void *ptr, void *data) 1565 { 1566 struct loop_device *lo = ptr; 1567 struct loop_func_table *xfer = data; 1568 1569 mutex_lock(&lo->lo_ctl_mutex); 1570 if (lo->lo_encryption == xfer) 1571 loop_release_xfer(lo); 1572 mutex_unlock(&lo->lo_ctl_mutex); 1573 return 0; 1574 } 1575 1576 int loop_unregister_transfer(int number) 1577 { 1578 unsigned int n = number; 1579 struct loop_func_table *xfer; 1580 1581 if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) 1582 return -EINVAL; 1583 1584 xfer_funcs[n] = NULL; 1585 idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer); 1586 return 0; 1587 } 1588 1589 EXPORT_SYMBOL(loop_register_transfer); 1590 EXPORT_SYMBOL(loop_unregister_transfer); 1591 1592 static int loop_add(struct loop_device **l, int i) 1593 { 1594 struct loop_device *lo; 1595 struct gendisk *disk; 1596 int err; 1597 1598 lo = kzalloc(sizeof(*lo), GFP_KERNEL); 1599 if (!lo) { 1600 err = -ENOMEM; 1601 goto out; 1602 } 1603 1604 err = idr_pre_get(&loop_index_idr, GFP_KERNEL); 1605 if (err < 0) 1606 goto out_free_dev; 1607 1608 if (i >= 0) { 1609 int m; 1610 1611 /* create specific i in the index */ 1612 err = idr_get_new_above(&loop_index_idr, lo, i, &m); 1613 if (err >= 0 && i != m) { 1614 idr_remove(&loop_index_idr, m); 1615 err = -EEXIST; 1616 } 1617 } else if (i == -1) { 1618 int m; 1619 1620 /* get next free nr */ 1621 err = idr_get_new(&loop_index_idr, lo, &m); 1622 if (err >= 0) 1623 i = m; 1624 } else { 1625 err = -EINVAL; 1626 } 1627 if (err < 0) 1628 goto out_free_dev; 1629 1630 lo->lo_queue = blk_alloc_queue(GFP_KERNEL); 1631 if (!lo->lo_queue) 1632 goto out_free_dev; 1633 1634 disk = lo->lo_disk = alloc_disk(1 << part_shift); 1635 if (!disk) 1636 goto out_free_queue; 1637 1638 mutex_init(&lo->lo_ctl_mutex); 1639 lo->lo_number = i; 1640 lo->lo_thread = NULL; 1641 init_waitqueue_head(&lo->lo_event); 1642 spin_lock_init(&lo->lo_lock); 1643 disk->major = LOOP_MAJOR; 1644 disk->first_minor = i << part_shift; 1645 disk->fops = &lo_fops; 1646 disk->private_data = lo; 1647 disk->queue = lo->lo_queue; 1648 sprintf(disk->disk_name, "loop%d", i); 1649 add_disk(disk); 1650 *l = lo; 1651 return lo->lo_number; 1652 1653 out_free_queue: 1654 blk_cleanup_queue(lo->lo_queue); 1655 out_free_dev: 1656 kfree(lo); 1657 out: 1658 return err; 1659 } 1660 1661 static void loop_remove(struct loop_device *lo) 1662 { 1663 del_gendisk(lo->lo_disk); 1664 blk_cleanup_queue(lo->lo_queue); 1665 put_disk(lo->lo_disk); 1666 kfree(lo); 1667 } 1668 1669 static int find_free_cb(int id, void *ptr, void *data) 1670 { 1671 struct loop_device *lo = ptr; 1672 struct loop_device **l = data; 1673 1674 if (lo->lo_state == Lo_unbound) { 1675 *l = lo; 1676 return 1; 1677 } 1678 return 0; 1679 } 1680 1681 static int loop_lookup(struct loop_device **l, int i) 1682 { 1683 struct loop_device *lo; 1684 int ret = -ENODEV; 1685 1686 if (i < 0) { 1687 int err; 1688 1689 err = idr_for_each(&loop_index_idr, &find_free_cb, &lo); 1690 if (err == 1) { 1691 *l = lo; 1692 ret = lo->lo_number; 1693 } 1694 goto out; 1695 } 1696 1697 /* lookup and return a specific i */ 1698 lo = idr_find(&loop_index_idr, i); 1699 if (lo) { 1700 *l = lo; 1701 ret = lo->lo_number; 1702 } 1703 out: 1704 return ret; 1705 } 1706 1707 static struct kobject *loop_probe(dev_t dev, int *part, void *data) 1708 { 1709 struct loop_device *lo; 1710 struct kobject *kobj; 1711 int err; 1712 1713 mutex_lock(&loop_index_mutex); 1714 err = loop_lookup(&lo, MINOR(dev) >> part_shift); 1715 if (err < 0) 1716 err = loop_add(&lo, MINOR(dev) >> part_shift); 1717 if (err < 0) 1718 kobj = ERR_PTR(err); 1719 else 1720 kobj = get_disk(lo->lo_disk); 1721 mutex_unlock(&loop_index_mutex); 1722 1723 *part = 0; 1724 return kobj; 1725 } 1726 1727 static long loop_control_ioctl(struct file *file, unsigned int cmd, 1728 unsigned long parm) 1729 { 1730 struct loop_device *lo; 1731 int ret = -ENOSYS; 1732 1733 mutex_lock(&loop_index_mutex); 1734 switch (cmd) { 1735 case LOOP_CTL_ADD: 1736 ret = loop_lookup(&lo, parm); 1737 if (ret >= 0) { 1738 ret = -EEXIST; 1739 break; 1740 } 1741 ret = loop_add(&lo, parm); 1742 break; 1743 case LOOP_CTL_REMOVE: 1744 ret = loop_lookup(&lo, parm); 1745 if (ret < 0) 1746 break; 1747 mutex_lock(&lo->lo_ctl_mutex); 1748 if (lo->lo_state != Lo_unbound) { 1749 ret = -EBUSY; 1750 mutex_unlock(&lo->lo_ctl_mutex); 1751 break; 1752 } 1753 if (lo->lo_refcnt > 0) { 1754 ret = -EBUSY; 1755 mutex_unlock(&lo->lo_ctl_mutex); 1756 break; 1757 } 1758 lo->lo_disk->private_data = NULL; 1759 mutex_unlock(&lo->lo_ctl_mutex); 1760 idr_remove(&loop_index_idr, lo->lo_number); 1761 loop_remove(lo); 1762 break; 1763 case LOOP_CTL_GET_FREE: 1764 ret = loop_lookup(&lo, -1); 1765 if (ret >= 0) 1766 break; 1767 ret = loop_add(&lo, -1); 1768 } 1769 mutex_unlock(&loop_index_mutex); 1770 1771 return ret; 1772 } 1773 1774 static const struct file_operations loop_ctl_fops = { 1775 .open = nonseekable_open, 1776 .unlocked_ioctl = loop_control_ioctl, 1777 .compat_ioctl = loop_control_ioctl, 1778 .owner = THIS_MODULE, 1779 .llseek = noop_llseek, 1780 }; 1781 1782 static struct miscdevice loop_misc = { 1783 .minor = LOOP_CTRL_MINOR, 1784 .name = "loop-control", 1785 .fops = &loop_ctl_fops, 1786 }; 1787 1788 MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR); 1789 MODULE_ALIAS("devname:loop-control"); 1790 1791 static int __init loop_init(void) 1792 { 1793 int i, nr; 1794 unsigned long range; 1795 struct loop_device *lo; 1796 int err; 1797 1798 err = misc_register(&loop_misc); 1799 if (err < 0) 1800 return err; 1801 1802 part_shift = 0; 1803 if (max_part > 0) { 1804 part_shift = fls(max_part); 1805 1806 /* 1807 * Adjust max_part according to part_shift as it is exported 1808 * to user space so that user can decide correct minor number 1809 * if [s]he want to create more devices. 1810 * 1811 * Note that -1 is required because partition 0 is reserved 1812 * for the whole disk. 1813 */ 1814 max_part = (1UL << part_shift) - 1; 1815 } 1816 1817 if ((1UL << part_shift) > DISK_MAX_PARTS) 1818 return -EINVAL; 1819 1820 if (max_loop > 1UL << (MINORBITS - part_shift)) 1821 return -EINVAL; 1822 1823 /* 1824 * If max_loop is specified, create that many devices upfront. 1825 * This also becomes a hard limit. If max_loop is not specified, 1826 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module 1827 * init time. Loop devices can be requested on-demand with the 1828 * /dev/loop-control interface, or be instantiated by accessing 1829 * a 'dead' device node. 1830 */ 1831 if (max_loop) { 1832 nr = max_loop; 1833 range = max_loop << part_shift; 1834 } else { 1835 nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT; 1836 range = 1UL << MINORBITS; 1837 } 1838 1839 if (register_blkdev(LOOP_MAJOR, "loop")) 1840 return -EIO; 1841 1842 blk_register_region(MKDEV(LOOP_MAJOR, 0), range, 1843 THIS_MODULE, loop_probe, NULL, NULL); 1844 1845 /* pre-create number of devices given by config or max_loop */ 1846 mutex_lock(&loop_index_mutex); 1847 for (i = 0; i < nr; i++) 1848 loop_add(&lo, i); 1849 mutex_unlock(&loop_index_mutex); 1850 1851 printk(KERN_INFO "loop: module loaded\n"); 1852 return 0; 1853 } 1854 1855 static int loop_exit_cb(int id, void *ptr, void *data) 1856 { 1857 struct loop_device *lo = ptr; 1858 1859 loop_remove(lo); 1860 return 0; 1861 } 1862 1863 static void __exit loop_exit(void) 1864 { 1865 unsigned long range; 1866 1867 range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; 1868 1869 idr_for_each(&loop_index_idr, &loop_exit_cb, NULL); 1870 idr_remove_all(&loop_index_idr); 1871 idr_destroy(&loop_index_idr); 1872 1873 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); 1874 unregister_blkdev(LOOP_MAJOR, "loop"); 1875 1876 misc_deregister(&loop_misc); 1877 } 1878 1879 module_init(loop_init); 1880 module_exit(loop_exit); 1881 1882 #ifndef MODULE 1883 static int __init max_loop_setup(char *str) 1884 { 1885 max_loop = simple_strtol(str, NULL, 0); 1886 return 1; 1887 } 1888 1889 __setup("max_loop=", max_loop_setup); 1890 #endif 1891