1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 1993 by Theodore Ts'o.
4 */
5 #include <linux/module.h>
6 #include <linux/moduleparam.h>
7 #include <linux/sched.h>
8 #include <linux/fs.h>
9 #include <linux/pagemap.h>
10 #include <linux/file.h>
11 #include <linux/stat.h>
12 #include <linux/errno.h>
13 #include <linux/major.h>
14 #include <linux/wait.h>
15 #include <linux/blkpg.h>
16 #include <linux/init.h>
17 #include <linux/swap.h>
18 #include <linux/slab.h>
19 #include <linux/compat.h>
20 #include <linux/suspend.h>
21 #include <linux/freezer.h>
22 #include <linux/mutex.h>
23 #include <linux/writeback.h>
24 #include <linux/completion.h>
25 #include <linux/highmem.h>
26 #include <linux/splice.h>
27 #include <linux/sysfs.h>
28 #include <linux/miscdevice.h>
29 #include <linux/falloc.h>
30 #include <linux/uio.h>
31 #include <linux/ioprio.h>
32 #include <linux/blk-cgroup.h>
33 #include <linux/sched/mm.h>
34 #include <linux/statfs.h>
35 #include <linux/uaccess.h>
36 #include <linux/blk-mq.h>
37 #include <linux/spinlock.h>
38 #include <uapi/linux/loop.h>
39
40 /* Possible states of device */
41 enum {
42 Lo_unbound,
43 Lo_bound,
44 Lo_rundown,
45 Lo_deleting,
46 };
47
48 struct loop_device {
49 int lo_number;
50 loff_t lo_offset;
51 loff_t lo_sizelimit;
52 int lo_flags;
53 char lo_file_name[LO_NAME_SIZE];
54
55 struct file *lo_backing_file;
56 unsigned int lo_min_dio_size;
57 struct block_device *lo_device;
58
59 gfp_t old_gfp_mask;
60
61 spinlock_t lo_lock;
62 int lo_state;
63 spinlock_t lo_work_lock;
64 struct workqueue_struct *workqueue;
65 struct work_struct rootcg_work;
66 struct list_head rootcg_cmd_list;
67 struct list_head idle_worker_list;
68 struct rb_root worker_tree;
69 struct timer_list timer;
70 bool sysfs_inited;
71
72 struct request_queue *lo_queue;
73 struct blk_mq_tag_set tag_set;
74 struct gendisk *lo_disk;
75 struct mutex lo_mutex;
76 bool idr_visible;
77 };
78
79 struct loop_cmd {
80 struct list_head list_entry;
81 bool use_aio; /* use AIO interface to handle I/O */
82 atomic_t ref; /* only for aio */
83 long ret;
84 struct kiocb iocb;
85 struct bio_vec *bvec;
86 struct cgroup_subsys_state *blkcg_css;
87 struct cgroup_subsys_state *memcg_css;
88 };
89
90 #define LOOP_IDLE_WORKER_TIMEOUT (60 * HZ)
91 #define LOOP_DEFAULT_HW_Q_DEPTH 128
92
93 static DEFINE_IDR(loop_index_idr);
94 static DEFINE_MUTEX(loop_ctl_mutex);
95 static DEFINE_MUTEX(loop_validate_mutex);
96
97 /**
98 * loop_global_lock_killable() - take locks for safe loop_validate_file() test
99 *
100 * @lo: struct loop_device
101 * @global: true if @lo is about to bind another "struct loop_device", false otherwise
102 *
103 * Returns 0 on success, -EINTR otherwise.
104 *
105 * Since loop_validate_file() traverses on other "struct loop_device" if
106 * is_loop_device() is true, we need a global lock for serializing concurrent
107 * loop_configure()/loop_change_fd()/__loop_clr_fd() calls.
108 */
loop_global_lock_killable(struct loop_device * lo,bool global)109 static int loop_global_lock_killable(struct loop_device *lo, bool global)
110 {
111 int err;
112
113 if (global) {
114 err = mutex_lock_killable(&loop_validate_mutex);
115 if (err)
116 return err;
117 }
118 err = mutex_lock_killable(&lo->lo_mutex);
119 if (err && global)
120 mutex_unlock(&loop_validate_mutex);
121 return err;
122 }
123
124 /**
125 * loop_global_unlock() - release locks taken by loop_global_lock_killable()
126 *
127 * @lo: struct loop_device
128 * @global: true if @lo was about to bind another "struct loop_device", false otherwise
129 */
loop_global_unlock(struct loop_device * lo,bool global)130 static void loop_global_unlock(struct loop_device *lo, bool global)
131 {
132 mutex_unlock(&lo->lo_mutex);
133 if (global)
134 mutex_unlock(&loop_validate_mutex);
135 }
136
137 static int max_part;
138 static int part_shift;
139
lo_calculate_size(struct loop_device * lo,struct file * file)140 static loff_t lo_calculate_size(struct loop_device *lo, struct file *file)
141 {
142 loff_t loopsize;
143 int ret;
144
145 if (S_ISBLK(file_inode(file)->i_mode)) {
146 loopsize = i_size_read(file->f_mapping->host);
147 } else {
148 struct kstat stat;
149
150 /*
151 * Get the accurate file size. This provides better results than
152 * cached inode data, particularly for network filesystems where
153 * metadata may be stale.
154 */
155 ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0);
156 if (ret)
157 return 0;
158
159 loopsize = stat.size;
160 }
161
162 if (lo->lo_offset > 0)
163 loopsize -= lo->lo_offset;
164 /* offset is beyond i_size, weird but possible */
165 if (loopsize < 0)
166 return 0;
167 if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
168 loopsize = lo->lo_sizelimit;
169 /*
170 * Unfortunately, if we want to do I/O on the device,
171 * the number of 512-byte sectors has to fit into a sector_t.
172 */
173 return loopsize >> 9;
174 }
175
176 /*
177 * We support direct I/O only if lo_offset is aligned with the logical I/O size
178 * of backing device, and the logical block size of loop is bigger than that of
179 * the backing device.
180 */
lo_can_use_dio(struct loop_device * lo)181 static bool lo_can_use_dio(struct loop_device *lo)
182 {
183 if (!(lo->lo_backing_file->f_mode & FMODE_CAN_ODIRECT))
184 return false;
185 if (queue_logical_block_size(lo->lo_queue) < lo->lo_min_dio_size)
186 return false;
187 if (lo->lo_offset & (lo->lo_min_dio_size - 1))
188 return false;
189 return true;
190 }
191
192 /*
193 * Direct I/O can be enabled either by using an O_DIRECT file descriptor, or by
194 * passing in the LO_FLAGS_DIRECT_IO flag from userspace. It will be silently
195 * disabled when the device block size is too small or the offset is unaligned.
196 *
197 * loop_get_status will always report the effective LO_FLAGS_DIRECT_IO flag and
198 * not the originally passed in one.
199 */
loop_update_dio(struct loop_device * lo)200 static inline void loop_update_dio(struct loop_device *lo)
201 {
202 lockdep_assert_held(&lo->lo_mutex);
203 WARN_ON_ONCE(lo->lo_state == Lo_bound &&
204 lo->lo_queue->mq_freeze_depth == 0);
205
206 if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && !lo_can_use_dio(lo))
207 lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
208 }
209
210 /**
211 * loop_set_size() - sets device size and notifies userspace
212 * @lo: struct loop_device to set the size for
213 * @size: new size of the loop device
214 *
215 * Callers must validate that the size passed into this function fits into
216 * a sector_t, eg using loop_validate_size()
217 */
loop_set_size(struct loop_device * lo,loff_t size)218 static void loop_set_size(struct loop_device *lo, loff_t size)
219 {
220 if (!set_capacity_and_notify(lo->lo_disk, size))
221 kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
222 }
223
loop_clear_limits(struct loop_device * lo,int mode)224 static void loop_clear_limits(struct loop_device *lo, int mode)
225 {
226 struct queue_limits lim = queue_limits_start_update(lo->lo_queue);
227
228 if (mode & FALLOC_FL_ZERO_RANGE)
229 lim.max_write_zeroes_sectors = 0;
230
231 if (mode & FALLOC_FL_PUNCH_HOLE) {
232 lim.max_hw_discard_sectors = 0;
233 lim.discard_granularity = 0;
234 }
235
236 /*
237 * XXX: this updates the queue limits without freezing the queue, which
238 * is against the locking protocol and dangerous. But we can't just
239 * freeze the queue as we're inside the ->queue_rq method here. So this
240 * should move out into a workqueue unless we get the file operations to
241 * advertise if they support specific fallocate operations.
242 */
243 queue_limits_commit_update(lo->lo_queue, &lim);
244 }
245
lo_fallocate(struct loop_device * lo,struct request * rq,loff_t pos,int mode)246 static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
247 int mode)
248 {
249 /*
250 * We use fallocate to manipulate the space mappings used by the image
251 * a.k.a. discard/zerorange.
252 */
253 struct file *file = lo->lo_backing_file;
254 int ret;
255
256 mode |= FALLOC_FL_KEEP_SIZE;
257
258 if (!bdev_max_discard_sectors(lo->lo_device))
259 return -EOPNOTSUPP;
260
261 ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
262 if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
263 return -EIO;
264
265 /*
266 * We initially configure the limits in a hope that fallocate is
267 * supported and clear them here if that turns out not to be true.
268 */
269 if (unlikely(ret == -EOPNOTSUPP))
270 loop_clear_limits(lo, mode);
271
272 return ret;
273 }
274
lo_req_flush(struct loop_device * lo,struct request * rq)275 static int lo_req_flush(struct loop_device *lo, struct request *rq)
276 {
277 int ret = vfs_fsync(lo->lo_backing_file, 0);
278 if (unlikely(ret && ret != -EINVAL))
279 ret = -EIO;
280
281 return ret;
282 }
283
lo_complete_rq(struct request * rq)284 static void lo_complete_rq(struct request *rq)
285 {
286 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
287 blk_status_t ret = BLK_STS_OK;
288
289 if (cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
290 req_op(rq) != REQ_OP_READ) {
291 if (cmd->ret < 0)
292 ret = errno_to_blk_status(cmd->ret);
293 goto end_io;
294 }
295
296 /*
297 * Short READ - if we got some data, advance our request and
298 * retry it. If we got no data, end the rest with EIO.
299 */
300 if (cmd->ret) {
301 blk_update_request(rq, BLK_STS_OK, cmd->ret);
302 cmd->ret = 0;
303 blk_mq_requeue_request(rq, true);
304 } else {
305 struct bio *bio = rq->bio;
306
307 while (bio) {
308 zero_fill_bio(bio);
309 bio = bio->bi_next;
310 }
311
312 ret = BLK_STS_IOERR;
313 end_io:
314 blk_mq_end_request(rq, ret);
315 }
316 }
317
lo_rw_aio_do_completion(struct loop_cmd * cmd)318 static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
319 {
320 struct request *rq = blk_mq_rq_from_pdu(cmd);
321
322 if (!atomic_dec_and_test(&cmd->ref))
323 return;
324 kfree(cmd->bvec);
325 cmd->bvec = NULL;
326 if (req_op(rq) == REQ_OP_WRITE)
327 kiocb_end_write(&cmd->iocb);
328 if (likely(!blk_should_fake_timeout(rq->q)))
329 blk_mq_complete_request(rq);
330 }
331
lo_rw_aio_complete(struct kiocb * iocb,long ret)332 static void lo_rw_aio_complete(struct kiocb *iocb, long ret)
333 {
334 struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
335
336 cmd->ret = ret;
337 lo_rw_aio_do_completion(cmd);
338 }
339
lo_rw_aio(struct loop_device * lo,struct loop_cmd * cmd,loff_t pos,int rw)340 static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
341 loff_t pos, int rw)
342 {
343 struct iov_iter iter;
344 struct req_iterator rq_iter;
345 struct bio_vec *bvec;
346 struct request *rq = blk_mq_rq_from_pdu(cmd);
347 struct bio *bio = rq->bio;
348 struct file *file = lo->lo_backing_file;
349 struct bio_vec tmp;
350 unsigned int offset;
351 unsigned int nr_bvec;
352 int ret;
353
354 nr_bvec = blk_rq_nr_bvec(rq);
355
356 if (rq->bio != rq->biotail) {
357
358 bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
359 GFP_NOIO);
360 if (!bvec)
361 return -EIO;
362 cmd->bvec = bvec;
363
364 /*
365 * The bios of the request may be started from the middle of
366 * the 'bvec' because of bio splitting, so we can't directly
367 * copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec
368 * API will take care of all details for us.
369 */
370 rq_for_each_bvec(tmp, rq, rq_iter) {
371 *bvec = tmp;
372 bvec++;
373 }
374 bvec = cmd->bvec;
375 offset = 0;
376 } else {
377 /*
378 * Same here, this bio may be started from the middle of the
379 * 'bvec' because of bio splitting, so offset from the bvec
380 * must be passed to iov iterator
381 */
382 offset = bio->bi_iter.bi_bvec_done;
383 bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
384 }
385 atomic_set(&cmd->ref, 2);
386
387 iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
388 iter.iov_offset = offset;
389
390 cmd->iocb.ki_pos = pos;
391 cmd->iocb.ki_filp = file;
392 cmd->iocb.ki_ioprio = req_get_ioprio(rq);
393 if (cmd->use_aio) {
394 cmd->iocb.ki_complete = lo_rw_aio_complete;
395 cmd->iocb.ki_flags = IOCB_DIRECT;
396 } else {
397 cmd->iocb.ki_complete = NULL;
398 cmd->iocb.ki_flags = 0;
399 }
400
401 if (rw == ITER_SOURCE) {
402 kiocb_start_write(&cmd->iocb);
403 ret = file->f_op->write_iter(&cmd->iocb, &iter);
404 } else
405 ret = file->f_op->read_iter(&cmd->iocb, &iter);
406
407 lo_rw_aio_do_completion(cmd);
408
409 if (ret != -EIOCBQUEUED)
410 lo_rw_aio_complete(&cmd->iocb, ret);
411 return -EIOCBQUEUED;
412 }
413
do_req_filebacked(struct loop_device * lo,struct request * rq)414 static int do_req_filebacked(struct loop_device *lo, struct request *rq)
415 {
416 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
417 loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
418
419 switch (req_op(rq)) {
420 case REQ_OP_FLUSH:
421 return lo_req_flush(lo, rq);
422 case REQ_OP_WRITE_ZEROES:
423 /*
424 * If the caller doesn't want deallocation, call zeroout to
425 * write zeroes the range. Otherwise, punch them out.
426 */
427 return lo_fallocate(lo, rq, pos,
428 (rq->cmd_flags & REQ_NOUNMAP) ?
429 FALLOC_FL_ZERO_RANGE :
430 FALLOC_FL_PUNCH_HOLE);
431 case REQ_OP_DISCARD:
432 return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
433 case REQ_OP_WRITE:
434 return lo_rw_aio(lo, cmd, pos, ITER_SOURCE);
435 case REQ_OP_READ:
436 return lo_rw_aio(lo, cmd, pos, ITER_DEST);
437 default:
438 WARN_ON_ONCE(1);
439 return -EIO;
440 }
441 }
442
loop_reread_partitions(struct loop_device * lo)443 static void loop_reread_partitions(struct loop_device *lo)
444 {
445 int rc;
446
447 mutex_lock(&lo->lo_disk->open_mutex);
448 rc = bdev_disk_changed(lo->lo_disk, false);
449 mutex_unlock(&lo->lo_disk->open_mutex);
450 if (rc)
451 pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
452 __func__, lo->lo_number, lo->lo_file_name, rc);
453 }
454
loop_query_min_dio_size(struct loop_device * lo)455 static unsigned int loop_query_min_dio_size(struct loop_device *lo)
456 {
457 struct file *file = lo->lo_backing_file;
458 struct block_device *sb_bdev = file->f_mapping->host->i_sb->s_bdev;
459 struct kstat st;
460
461 /*
462 * Use the minimal dio alignment of the file system if provided.
463 */
464 if (!vfs_getattr(&file->f_path, &st, STATX_DIOALIGN, 0) &&
465 (st.result_mask & STATX_DIOALIGN))
466 return st.dio_offset_align;
467
468 /*
469 * In a perfect world this wouldn't be needed, but as of Linux 6.13 only
470 * a handful of file systems support the STATX_DIOALIGN flag.
471 */
472 if (sb_bdev)
473 return bdev_logical_block_size(sb_bdev);
474 return SECTOR_SIZE;
475 }
476
is_loop_device(struct file * file)477 static inline int is_loop_device(struct file *file)
478 {
479 struct inode *i = file->f_mapping->host;
480
481 return i && S_ISBLK(i->i_mode) && imajor(i) == LOOP_MAJOR;
482 }
483
loop_validate_file(struct file * file,struct block_device * bdev)484 static int loop_validate_file(struct file *file, struct block_device *bdev)
485 {
486 struct inode *inode = file->f_mapping->host;
487 struct file *f = file;
488
489 /* Avoid recursion */
490 while (is_loop_device(f)) {
491 struct loop_device *l;
492
493 lockdep_assert_held(&loop_validate_mutex);
494 if (f->f_mapping->host->i_rdev == bdev->bd_dev)
495 return -EBADF;
496
497 l = I_BDEV(f->f_mapping->host)->bd_disk->private_data;
498 if (l->lo_state != Lo_bound)
499 return -EINVAL;
500 /* Order wrt setting lo->lo_backing_file in loop_configure(). */
501 rmb();
502 f = l->lo_backing_file;
503 }
504 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
505 return -EINVAL;
506 return 0;
507 }
508
loop_assign_backing_file(struct loop_device * lo,struct file * file)509 static void loop_assign_backing_file(struct loop_device *lo, struct file *file)
510 {
511 lo->lo_backing_file = file;
512 lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
513 mapping_set_gfp_mask(file->f_mapping,
514 lo->old_gfp_mask & ~(__GFP_IO | __GFP_FS));
515 if (lo->lo_backing_file->f_flags & O_DIRECT)
516 lo->lo_flags |= LO_FLAGS_DIRECT_IO;
517 lo->lo_min_dio_size = loop_query_min_dio_size(lo);
518 }
519
loop_check_backing_file(struct file * file)520 static int loop_check_backing_file(struct file *file)
521 {
522 if (!file->f_op->read_iter)
523 return -EINVAL;
524
525 if ((file->f_mode & FMODE_WRITE) && !file->f_op->write_iter)
526 return -EINVAL;
527
528 return 0;
529 }
530
531 /*
532 * loop_change_fd switched the backing store of a loopback device to
533 * a new file. This is useful for operating system installers to free up
534 * the original file and in High Availability environments to switch to
535 * an alternative location for the content in case of server meltdown.
536 * This can only work if the loop device is used read-only, and if the
537 * new backing store is the same size and type as the old backing store.
538 */
loop_change_fd(struct loop_device * lo,struct block_device * bdev,unsigned int arg)539 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
540 unsigned int arg)
541 {
542 struct file *file = fget(arg);
543 struct file *old_file;
544 unsigned int memflags;
545 int error;
546 bool partscan;
547 bool is_loop;
548
549 if (!file)
550 return -EBADF;
551
552 error = loop_check_backing_file(file);
553 if (error) {
554 fput(file);
555 return error;
556 }
557
558 /* suppress uevents while reconfiguring the device */
559 dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
560
561 is_loop = is_loop_device(file);
562 error = loop_global_lock_killable(lo, is_loop);
563 if (error)
564 goto out_putf;
565 error = -ENXIO;
566 if (lo->lo_state != Lo_bound)
567 goto out_err;
568
569 /* the loop device has to be read-only */
570 error = -EINVAL;
571 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
572 goto out_err;
573
574 error = loop_validate_file(file, bdev);
575 if (error)
576 goto out_err;
577
578 old_file = lo->lo_backing_file;
579
580 error = -EINVAL;
581
582 /* size of the new backing store needs to be the same */
583 if (lo_calculate_size(lo, file) != lo_calculate_size(lo, old_file))
584 goto out_err;
585
586 /*
587 * We might switch to direct I/O mode for the loop device, write back
588 * all dirty data the page cache now that so that the individual I/O
589 * operations don't have to do that.
590 */
591 vfs_fsync(file, 0);
592
593 /* and ... switch */
594 disk_force_media_change(lo->lo_disk);
595 memflags = blk_mq_freeze_queue(lo->lo_queue);
596 mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
597 loop_assign_backing_file(lo, file);
598 loop_update_dio(lo);
599 blk_mq_unfreeze_queue(lo->lo_queue, memflags);
600 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
601 loop_global_unlock(lo, is_loop);
602
603 /*
604 * Flush loop_validate_file() before fput(), for l->lo_backing_file
605 * might be pointing at old_file which might be the last reference.
606 */
607 if (!is_loop) {
608 mutex_lock(&loop_validate_mutex);
609 mutex_unlock(&loop_validate_mutex);
610 }
611 /*
612 * We must drop file reference outside of lo_mutex as dropping
613 * the file ref can take open_mutex which creates circular locking
614 * dependency.
615 */
616 fput(old_file);
617 dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
618 if (partscan)
619 loop_reread_partitions(lo);
620
621 error = 0;
622 done:
623 kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
624 return error;
625
626 out_err:
627 loop_global_unlock(lo, is_loop);
628 out_putf:
629 fput(file);
630 dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
631 goto done;
632 }
633
634 /* loop sysfs attributes */
635
loop_attr_show(struct device * dev,char * page,ssize_t (* callback)(struct loop_device *,char *))636 static ssize_t loop_attr_show(struct device *dev, char *page,
637 ssize_t (*callback)(struct loop_device *, char *))
638 {
639 struct gendisk *disk = dev_to_disk(dev);
640 struct loop_device *lo = disk->private_data;
641
642 return callback(lo, page);
643 }
644
645 #define LOOP_ATTR_RO(_name) \
646 static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \
647 static ssize_t loop_attr_do_show_##_name(struct device *d, \
648 struct device_attribute *attr, char *b) \
649 { \
650 return loop_attr_show(d, b, loop_attr_##_name##_show); \
651 } \
652 static struct device_attribute loop_attr_##_name = \
653 __ATTR(_name, 0444, loop_attr_do_show_##_name, NULL);
654
loop_attr_backing_file_show(struct loop_device * lo,char * buf)655 static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
656 {
657 ssize_t ret;
658 char *p = NULL;
659
660 spin_lock_irq(&lo->lo_lock);
661 if (lo->lo_backing_file)
662 p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1);
663 spin_unlock_irq(&lo->lo_lock);
664
665 if (IS_ERR_OR_NULL(p))
666 ret = PTR_ERR(p);
667 else {
668 ret = strlen(p);
669 memmove(buf, p, ret);
670 buf[ret++] = '\n';
671 buf[ret] = 0;
672 }
673
674 return ret;
675 }
676
loop_attr_offset_show(struct loop_device * lo,char * buf)677 static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
678 {
679 return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_offset);
680 }
681
loop_attr_sizelimit_show(struct loop_device * lo,char * buf)682 static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
683 {
684 return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
685 }
686
loop_attr_autoclear_show(struct loop_device * lo,char * buf)687 static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
688 {
689 int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
690
691 return sysfs_emit(buf, "%s\n", autoclear ? "1" : "0");
692 }
693
loop_attr_partscan_show(struct loop_device * lo,char * buf)694 static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
695 {
696 int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
697
698 return sysfs_emit(buf, "%s\n", partscan ? "1" : "0");
699 }
700
loop_attr_dio_show(struct loop_device * lo,char * buf)701 static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf)
702 {
703 int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO);
704
705 return sysfs_emit(buf, "%s\n", dio ? "1" : "0");
706 }
707
708 LOOP_ATTR_RO(backing_file);
709 LOOP_ATTR_RO(offset);
710 LOOP_ATTR_RO(sizelimit);
711 LOOP_ATTR_RO(autoclear);
712 LOOP_ATTR_RO(partscan);
713 LOOP_ATTR_RO(dio);
714
715 static struct attribute *loop_attrs[] = {
716 &loop_attr_backing_file.attr,
717 &loop_attr_offset.attr,
718 &loop_attr_sizelimit.attr,
719 &loop_attr_autoclear.attr,
720 &loop_attr_partscan.attr,
721 &loop_attr_dio.attr,
722 NULL,
723 };
724
725 static struct attribute_group loop_attribute_group = {
726 .name = "loop",
727 .attrs= loop_attrs,
728 };
729
loop_sysfs_init(struct loop_device * lo)730 static void loop_sysfs_init(struct loop_device *lo)
731 {
732 lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
733 &loop_attribute_group);
734 }
735
loop_sysfs_exit(struct loop_device * lo)736 static void loop_sysfs_exit(struct loop_device *lo)
737 {
738 if (lo->sysfs_inited)
739 sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
740 &loop_attribute_group);
741 }
742
loop_get_discard_config(struct loop_device * lo,u32 * granularity,u32 * max_discard_sectors)743 static void loop_get_discard_config(struct loop_device *lo,
744 u32 *granularity, u32 *max_discard_sectors)
745 {
746 struct file *file = lo->lo_backing_file;
747 struct inode *inode = file->f_mapping->host;
748 struct kstatfs sbuf;
749
750 /*
751 * If the backing device is a block device, mirror its zeroing
752 * capability. Set the discard sectors to the block device's zeroing
753 * capabilities because loop discards result in blkdev_issue_zeroout(),
754 * not blkdev_issue_discard(). This maintains consistent behavior with
755 * file-backed loop devices: discarded regions read back as zero.
756 */
757 if (S_ISBLK(inode->i_mode)) {
758 struct block_device *bdev = I_BDEV(inode);
759
760 *max_discard_sectors = bdev_write_zeroes_sectors(bdev);
761 *granularity = bdev_discard_granularity(bdev);
762
763 /*
764 * We use punch hole to reclaim the free space used by the
765 * image a.k.a. discard.
766 */
767 } else if (file->f_op->fallocate && !vfs_statfs(&file->f_path, &sbuf)) {
768 *max_discard_sectors = UINT_MAX >> 9;
769 *granularity = sbuf.f_bsize;
770 }
771 }
772
773 struct loop_worker {
774 struct rb_node rb_node;
775 struct work_struct work;
776 struct list_head cmd_list;
777 struct list_head idle_list;
778 struct loop_device *lo;
779 struct cgroup_subsys_state *blkcg_css;
780 unsigned long last_ran_at;
781 };
782
783 static void loop_workfn(struct work_struct *work);
784
785 #ifdef CONFIG_BLK_CGROUP
queue_on_root_worker(struct cgroup_subsys_state * css)786 static inline int queue_on_root_worker(struct cgroup_subsys_state *css)
787 {
788 return !css || css == blkcg_root_css;
789 }
790 #else
queue_on_root_worker(struct cgroup_subsys_state * css)791 static inline int queue_on_root_worker(struct cgroup_subsys_state *css)
792 {
793 return !css;
794 }
795 #endif
796
loop_queue_work(struct loop_device * lo,struct loop_cmd * cmd)797 static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
798 {
799 struct rb_node **node, *parent = NULL;
800 struct loop_worker *cur_worker, *worker = NULL;
801 struct work_struct *work;
802 struct list_head *cmd_list;
803
804 spin_lock_irq(&lo->lo_work_lock);
805
806 if (queue_on_root_worker(cmd->blkcg_css))
807 goto queue_work;
808
809 node = &lo->worker_tree.rb_node;
810
811 while (*node) {
812 parent = *node;
813 cur_worker = container_of(*node, struct loop_worker, rb_node);
814 if (cur_worker->blkcg_css == cmd->blkcg_css) {
815 worker = cur_worker;
816 break;
817 } else if ((long)cur_worker->blkcg_css < (long)cmd->blkcg_css) {
818 node = &(*node)->rb_left;
819 } else {
820 node = &(*node)->rb_right;
821 }
822 }
823 if (worker)
824 goto queue_work;
825
826 worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT);
827 /*
828 * In the event we cannot allocate a worker, just queue on the
829 * rootcg worker and issue the I/O as the rootcg
830 */
831 if (!worker) {
832 cmd->blkcg_css = NULL;
833 if (cmd->memcg_css)
834 css_put(cmd->memcg_css);
835 cmd->memcg_css = NULL;
836 goto queue_work;
837 }
838
839 worker->blkcg_css = cmd->blkcg_css;
840 css_get(worker->blkcg_css);
841 INIT_WORK(&worker->work, loop_workfn);
842 INIT_LIST_HEAD(&worker->cmd_list);
843 INIT_LIST_HEAD(&worker->idle_list);
844 worker->lo = lo;
845 rb_link_node(&worker->rb_node, parent, node);
846 rb_insert_color(&worker->rb_node, &lo->worker_tree);
847 queue_work:
848 if (worker) {
849 /*
850 * We need to remove from the idle list here while
851 * holding the lock so that the idle timer doesn't
852 * free the worker
853 */
854 if (!list_empty(&worker->idle_list))
855 list_del_init(&worker->idle_list);
856 work = &worker->work;
857 cmd_list = &worker->cmd_list;
858 } else {
859 work = &lo->rootcg_work;
860 cmd_list = &lo->rootcg_cmd_list;
861 }
862 list_add_tail(&cmd->list_entry, cmd_list);
863 queue_work(lo->workqueue, work);
864 spin_unlock_irq(&lo->lo_work_lock);
865 }
866
loop_set_timer(struct loop_device * lo)867 static void loop_set_timer(struct loop_device *lo)
868 {
869 timer_reduce(&lo->timer, jiffies + LOOP_IDLE_WORKER_TIMEOUT);
870 }
871
loop_free_idle_workers(struct loop_device * lo,bool delete_all)872 static void loop_free_idle_workers(struct loop_device *lo, bool delete_all)
873 {
874 struct loop_worker *pos, *worker;
875
876 spin_lock_irq(&lo->lo_work_lock);
877 list_for_each_entry_safe(worker, pos, &lo->idle_worker_list,
878 idle_list) {
879 if (!delete_all &&
880 time_is_after_jiffies(worker->last_ran_at +
881 LOOP_IDLE_WORKER_TIMEOUT))
882 break;
883 list_del(&worker->idle_list);
884 rb_erase(&worker->rb_node, &lo->worker_tree);
885 css_put(worker->blkcg_css);
886 kfree(worker);
887 }
888 if (!list_empty(&lo->idle_worker_list))
889 loop_set_timer(lo);
890 spin_unlock_irq(&lo->lo_work_lock);
891 }
892
loop_free_idle_workers_timer(struct timer_list * timer)893 static void loop_free_idle_workers_timer(struct timer_list *timer)
894 {
895 struct loop_device *lo = container_of(timer, struct loop_device, timer);
896
897 return loop_free_idle_workers(lo, false);
898 }
899
900 /**
901 * loop_set_status_from_info - configure device from loop_info
902 * @lo: struct loop_device to configure
903 * @info: struct loop_info64 to configure the device with
904 *
905 * Configures the loop device parameters according to the passed
906 * in loop_info64 configuration.
907 */
908 static int
loop_set_status_from_info(struct loop_device * lo,const struct loop_info64 * info)909 loop_set_status_from_info(struct loop_device *lo,
910 const struct loop_info64 *info)
911 {
912 if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
913 return -EINVAL;
914
915 switch (info->lo_encrypt_type) {
916 case LO_CRYPT_NONE:
917 break;
918 case LO_CRYPT_XOR:
919 pr_warn("support for the xor transformation has been removed.\n");
920 return -EINVAL;
921 case LO_CRYPT_CRYPTOAPI:
922 pr_warn("support for cryptoloop has been removed. Use dm-crypt instead.\n");
923 return -EINVAL;
924 default:
925 return -EINVAL;
926 }
927
928 /* Avoid assigning overflow values */
929 if (info->lo_offset > LLONG_MAX || info->lo_sizelimit > LLONG_MAX)
930 return -EOVERFLOW;
931
932 lo->lo_offset = info->lo_offset;
933 lo->lo_sizelimit = info->lo_sizelimit;
934
935 memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
936 lo->lo_file_name[LO_NAME_SIZE-1] = 0;
937 return 0;
938 }
939
loop_default_blocksize(struct loop_device * lo)940 static unsigned int loop_default_blocksize(struct loop_device *lo)
941 {
942 /* In case of direct I/O, match underlying minimum I/O size */
943 if (lo->lo_flags & LO_FLAGS_DIRECT_IO)
944 return lo->lo_min_dio_size;
945 return SECTOR_SIZE;
946 }
947
loop_update_limits(struct loop_device * lo,struct queue_limits * lim,unsigned int bsize)948 static void loop_update_limits(struct loop_device *lo, struct queue_limits *lim,
949 unsigned int bsize)
950 {
951 struct file *file = lo->lo_backing_file;
952 struct inode *inode = file->f_mapping->host;
953 struct block_device *backing_bdev = NULL;
954 u32 granularity = 0, max_discard_sectors = 0;
955
956 if (S_ISBLK(inode->i_mode))
957 backing_bdev = I_BDEV(inode);
958 else if (inode->i_sb->s_bdev)
959 backing_bdev = inode->i_sb->s_bdev;
960
961 if (!bsize)
962 bsize = loop_default_blocksize(lo);
963
964 loop_get_discard_config(lo, &granularity, &max_discard_sectors);
965
966 lim->logical_block_size = bsize;
967 lim->physical_block_size = bsize;
968 lim->io_min = bsize;
969 lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL);
970 if (file->f_op->fsync && !(lo->lo_flags & LO_FLAGS_READ_ONLY))
971 lim->features |= BLK_FEAT_WRITE_CACHE;
972 if (backing_bdev && !bdev_nonrot(backing_bdev))
973 lim->features |= BLK_FEAT_ROTATIONAL;
974 lim->max_hw_discard_sectors = max_discard_sectors;
975 lim->max_write_zeroes_sectors = max_discard_sectors;
976 if (max_discard_sectors)
977 lim->discard_granularity = granularity;
978 else
979 lim->discard_granularity = 0;
980 }
981
loop_configure(struct loop_device * lo,blk_mode_t mode,struct block_device * bdev,const struct loop_config * config)982 static int loop_configure(struct loop_device *lo, blk_mode_t mode,
983 struct block_device *bdev,
984 const struct loop_config *config)
985 {
986 struct file *file = fget(config->fd);
987 struct queue_limits lim;
988 int error;
989 loff_t size;
990 bool partscan;
991 bool is_loop;
992
993 if (!file)
994 return -EBADF;
995
996 error = loop_check_backing_file(file);
997 if (error) {
998 fput(file);
999 return error;
1000 }
1001
1002 is_loop = is_loop_device(file);
1003
1004 /* This is safe, since we have a reference from open(). */
1005 __module_get(THIS_MODULE);
1006
1007 /*
1008 * If we don't hold exclusive handle for the device, upgrade to it
1009 * here to avoid changing device under exclusive owner.
1010 */
1011 if (!(mode & BLK_OPEN_EXCL)) {
1012 error = bd_prepare_to_claim(bdev, loop_configure, NULL);
1013 if (error)
1014 goto out_putf;
1015 }
1016
1017 error = loop_global_lock_killable(lo, is_loop);
1018 if (error)
1019 goto out_bdev;
1020
1021 error = -EBUSY;
1022 if (lo->lo_state != Lo_unbound)
1023 goto out_unlock;
1024
1025 error = loop_validate_file(file, bdev);
1026 if (error)
1027 goto out_unlock;
1028
1029 if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
1030 error = -EINVAL;
1031 goto out_unlock;
1032 }
1033
1034 error = loop_set_status_from_info(lo, &config->info);
1035 if (error)
1036 goto out_unlock;
1037 lo->lo_flags = config->info.lo_flags;
1038
1039 if (!(file->f_mode & FMODE_WRITE) || !(mode & BLK_OPEN_WRITE) ||
1040 !file->f_op->write_iter)
1041 lo->lo_flags |= LO_FLAGS_READ_ONLY;
1042
1043 if (!lo->workqueue) {
1044 lo->workqueue = alloc_workqueue("loop%d",
1045 WQ_UNBOUND | WQ_FREEZABLE,
1046 0, lo->lo_number);
1047 if (!lo->workqueue) {
1048 error = -ENOMEM;
1049 goto out_unlock;
1050 }
1051 }
1052
1053 /* suppress uevents while reconfiguring the device */
1054 dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
1055
1056 disk_force_media_change(lo->lo_disk);
1057 set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
1058
1059 lo->lo_device = bdev;
1060 loop_assign_backing_file(lo, file);
1061
1062 lim = queue_limits_start_update(lo->lo_queue);
1063 loop_update_limits(lo, &lim, config->block_size);
1064 /* No need to freeze the queue as the device isn't bound yet. */
1065 error = queue_limits_commit_update(lo->lo_queue, &lim);
1066 if (error)
1067 goto out_unlock;
1068
1069 /*
1070 * We might switch to direct I/O mode for the loop device, write back
1071 * all dirty data the page cache now that so that the individual I/O
1072 * operations don't have to do that.
1073 */
1074 vfs_fsync(file, 0);
1075
1076 loop_update_dio(lo);
1077 loop_sysfs_init(lo);
1078
1079 size = lo_calculate_size(lo, file);
1080 loop_set_size(lo, size);
1081
1082 /* Order wrt reading lo_state in loop_validate_file(). */
1083 wmb();
1084
1085 lo->lo_state = Lo_bound;
1086 if (part_shift)
1087 lo->lo_flags |= LO_FLAGS_PARTSCAN;
1088 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
1089 if (partscan)
1090 clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
1091
1092 dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
1093 kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
1094
1095 loop_global_unlock(lo, is_loop);
1096 if (partscan)
1097 loop_reread_partitions(lo);
1098
1099 if (!(mode & BLK_OPEN_EXCL))
1100 bd_abort_claiming(bdev, loop_configure);
1101
1102 return 0;
1103
1104 out_unlock:
1105 loop_global_unlock(lo, is_loop);
1106 out_bdev:
1107 if (!(mode & BLK_OPEN_EXCL))
1108 bd_abort_claiming(bdev, loop_configure);
1109 out_putf:
1110 fput(file);
1111 /* This is safe: open() is still holding a reference. */
1112 module_put(THIS_MODULE);
1113 return error;
1114 }
1115
__loop_clr_fd(struct loop_device * lo)1116 static void __loop_clr_fd(struct loop_device *lo)
1117 {
1118 struct queue_limits lim;
1119 struct file *filp;
1120 gfp_t gfp = lo->old_gfp_mask;
1121
1122 spin_lock_irq(&lo->lo_lock);
1123 filp = lo->lo_backing_file;
1124 lo->lo_backing_file = NULL;
1125 spin_unlock_irq(&lo->lo_lock);
1126
1127 lo->lo_device = NULL;
1128 lo->lo_offset = 0;
1129 lo->lo_sizelimit = 0;
1130 memset(lo->lo_file_name, 0, LO_NAME_SIZE);
1131
1132 /*
1133 * Reset the block size to the default.
1134 *
1135 * No queue freezing needed because this is called from the final
1136 * ->release call only, so there can't be any outstanding I/O.
1137 */
1138 lim = queue_limits_start_update(lo->lo_queue);
1139 lim.logical_block_size = SECTOR_SIZE;
1140 lim.physical_block_size = SECTOR_SIZE;
1141 lim.io_min = SECTOR_SIZE;
1142 queue_limits_commit_update(lo->lo_queue, &lim);
1143
1144 invalidate_disk(lo->lo_disk);
1145 loop_sysfs_exit(lo);
1146 /* let user-space know about this change */
1147 kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
1148 mapping_set_gfp_mask(filp->f_mapping, gfp);
1149 /* This is safe: open() is still holding a reference. */
1150 module_put(THIS_MODULE);
1151
1152 disk_force_media_change(lo->lo_disk);
1153
1154 if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
1155 int err;
1156
1157 /*
1158 * open_mutex has been held already in release path, so don't
1159 * acquire it if this function is called in such case.
1160 *
1161 * If the reread partition isn't from release path, lo_refcnt
1162 * must be at least one and it can only become zero when the
1163 * current holder is released.
1164 */
1165 err = bdev_disk_changed(lo->lo_disk, false);
1166 if (err)
1167 pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
1168 __func__, lo->lo_number, err);
1169 /* Device is gone, no point in returning error */
1170 }
1171
1172 /*
1173 * lo->lo_state is set to Lo_unbound here after above partscan has
1174 * finished. There cannot be anybody else entering __loop_clr_fd() as
1175 * Lo_rundown state protects us from all the other places trying to
1176 * change the 'lo' device.
1177 */
1178 lo->lo_flags = 0;
1179 if (!part_shift)
1180 set_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
1181 mutex_lock(&lo->lo_mutex);
1182 lo->lo_state = Lo_unbound;
1183 mutex_unlock(&lo->lo_mutex);
1184
1185 /*
1186 * Need not hold lo_mutex to fput backing file. Calling fput holding
1187 * lo_mutex triggers a circular lock dependency possibility warning as
1188 * fput can take open_mutex which is usually taken before lo_mutex.
1189 */
1190 fput(filp);
1191 }
1192
loop_clr_fd(struct loop_device * lo)1193 static int loop_clr_fd(struct loop_device *lo)
1194 {
1195 int err;
1196
1197 /*
1198 * Since lo_ioctl() is called without locks held, it is possible that
1199 * loop_configure()/loop_change_fd() and loop_clr_fd() run in parallel.
1200 *
1201 * Therefore, use global lock when setting Lo_rundown state in order to
1202 * make sure that loop_validate_file() will fail if the "struct file"
1203 * which loop_configure()/loop_change_fd() found via fget() was this
1204 * loop device.
1205 */
1206 err = loop_global_lock_killable(lo, true);
1207 if (err)
1208 return err;
1209 if (lo->lo_state != Lo_bound) {
1210 loop_global_unlock(lo, true);
1211 return -ENXIO;
1212 }
1213 /*
1214 * Mark the device for removing the backing device on last close.
1215 * If we are the only opener, also switch the state to roundown here to
1216 * prevent new openers from coming in.
1217 */
1218
1219 lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
1220 if (disk_openers(lo->lo_disk) == 1)
1221 lo->lo_state = Lo_rundown;
1222 loop_global_unlock(lo, true);
1223
1224 return 0;
1225 }
1226
1227 static int
loop_set_status(struct loop_device * lo,const struct loop_info64 * info)1228 loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1229 {
1230 int err;
1231 bool partscan = false;
1232 bool size_changed = false;
1233 unsigned int memflags;
1234
1235 err = mutex_lock_killable(&lo->lo_mutex);
1236 if (err)
1237 return err;
1238 if (lo->lo_state != Lo_bound) {
1239 err = -ENXIO;
1240 goto out_unlock;
1241 }
1242
1243 if (lo->lo_offset != info->lo_offset ||
1244 lo->lo_sizelimit != info->lo_sizelimit) {
1245 size_changed = true;
1246 sync_blockdev(lo->lo_device);
1247 invalidate_bdev(lo->lo_device);
1248 }
1249
1250 /* I/O needs to be drained before changing lo_offset or lo_sizelimit */
1251 memflags = blk_mq_freeze_queue(lo->lo_queue);
1252
1253 err = loop_set_status_from_info(lo, info);
1254 if (err)
1255 goto out_unfreeze;
1256
1257 partscan = !(lo->lo_flags & LO_FLAGS_PARTSCAN) &&
1258 (info->lo_flags & LO_FLAGS_PARTSCAN);
1259
1260 lo->lo_flags &= ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
1261 lo->lo_flags |= (info->lo_flags & LOOP_SET_STATUS_SETTABLE_FLAGS);
1262
1263 /* update the direct I/O flag if lo_offset changed */
1264 loop_update_dio(lo);
1265
1266 out_unfreeze:
1267 blk_mq_unfreeze_queue(lo->lo_queue, memflags);
1268 if (partscan)
1269 clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
1270 if (!err && size_changed) {
1271 loff_t new_size = lo_calculate_size(lo, lo->lo_backing_file);
1272 loop_set_size(lo, new_size);
1273 }
1274 out_unlock:
1275 mutex_unlock(&lo->lo_mutex);
1276 if (partscan)
1277 loop_reread_partitions(lo);
1278
1279 return err;
1280 }
1281
1282 static int
loop_get_status(struct loop_device * lo,struct loop_info64 * info)1283 loop_get_status(struct loop_device *lo, struct loop_info64 *info)
1284 {
1285 struct path path;
1286 struct kstat stat;
1287 int ret;
1288
1289 ret = mutex_lock_killable(&lo->lo_mutex);
1290 if (ret)
1291 return ret;
1292 if (lo->lo_state != Lo_bound) {
1293 mutex_unlock(&lo->lo_mutex);
1294 return -ENXIO;
1295 }
1296
1297 memset(info, 0, sizeof(*info));
1298 info->lo_number = lo->lo_number;
1299 info->lo_offset = lo->lo_offset;
1300 info->lo_sizelimit = lo->lo_sizelimit;
1301 info->lo_flags = lo->lo_flags;
1302 memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
1303
1304 /* Drop lo_mutex while we call into the filesystem. */
1305 path = lo->lo_backing_file->f_path;
1306 path_get(&path);
1307 mutex_unlock(&lo->lo_mutex);
1308 ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
1309 if (!ret) {
1310 info->lo_device = huge_encode_dev(stat.dev);
1311 info->lo_inode = stat.ino;
1312 info->lo_rdevice = huge_encode_dev(stat.rdev);
1313 }
1314 path_put(&path);
1315 return ret;
1316 }
1317
1318 static void
loop_info64_from_old(const struct loop_info * info,struct loop_info64 * info64)1319 loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
1320 {
1321 memset(info64, 0, sizeof(*info64));
1322 info64->lo_number = info->lo_number;
1323 info64->lo_device = info->lo_device;
1324 info64->lo_inode = info->lo_inode;
1325 info64->lo_rdevice = info->lo_rdevice;
1326 info64->lo_offset = info->lo_offset;
1327 info64->lo_sizelimit = 0;
1328 info64->lo_flags = info->lo_flags;
1329 memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
1330 }
1331
1332 static int
loop_info64_to_old(const struct loop_info64 * info64,struct loop_info * info)1333 loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
1334 {
1335 memset(info, 0, sizeof(*info));
1336 info->lo_number = info64->lo_number;
1337 info->lo_device = info64->lo_device;
1338 info->lo_inode = info64->lo_inode;
1339 info->lo_rdevice = info64->lo_rdevice;
1340 info->lo_offset = info64->lo_offset;
1341 info->lo_flags = info64->lo_flags;
1342 memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
1343
1344 /* error in case values were truncated */
1345 if (info->lo_device != info64->lo_device ||
1346 info->lo_rdevice != info64->lo_rdevice ||
1347 info->lo_inode != info64->lo_inode ||
1348 info->lo_offset != info64->lo_offset)
1349 return -EOVERFLOW;
1350
1351 return 0;
1352 }
1353
1354 static int
loop_set_status_old(struct loop_device * lo,const struct loop_info __user * arg)1355 loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
1356 {
1357 struct loop_info info;
1358 struct loop_info64 info64;
1359
1360 if (copy_from_user(&info, arg, sizeof (struct loop_info)))
1361 return -EFAULT;
1362 loop_info64_from_old(&info, &info64);
1363 return loop_set_status(lo, &info64);
1364 }
1365
1366 static int
loop_set_status64(struct loop_device * lo,const struct loop_info64 __user * arg)1367 loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
1368 {
1369 struct loop_info64 info64;
1370
1371 if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
1372 return -EFAULT;
1373 return loop_set_status(lo, &info64);
1374 }
1375
1376 static int
loop_get_status_old(struct loop_device * lo,struct loop_info __user * arg)1377 loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
1378 struct loop_info info;
1379 struct loop_info64 info64;
1380 int err;
1381
1382 if (!arg)
1383 return -EINVAL;
1384 err = loop_get_status(lo, &info64);
1385 if (!err)
1386 err = loop_info64_to_old(&info64, &info);
1387 if (!err && copy_to_user(arg, &info, sizeof(info)))
1388 err = -EFAULT;
1389
1390 return err;
1391 }
1392
1393 static int
loop_get_status64(struct loop_device * lo,struct loop_info64 __user * arg)1394 loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1395 struct loop_info64 info64;
1396 int err;
1397
1398 if (!arg)
1399 return -EINVAL;
1400 err = loop_get_status(lo, &info64);
1401 if (!err && copy_to_user(arg, &info64, sizeof(info64)))
1402 err = -EFAULT;
1403
1404 return err;
1405 }
1406
loop_set_capacity(struct loop_device * lo)1407 static int loop_set_capacity(struct loop_device *lo)
1408 {
1409 loff_t size;
1410
1411 if (unlikely(lo->lo_state != Lo_bound))
1412 return -ENXIO;
1413
1414 size = lo_calculate_size(lo, lo->lo_backing_file);
1415 loop_set_size(lo, size);
1416
1417 return 0;
1418 }
1419
loop_set_dio(struct loop_device * lo,unsigned long arg)1420 static int loop_set_dio(struct loop_device *lo, unsigned long arg)
1421 {
1422 bool use_dio = !!arg;
1423 unsigned int memflags;
1424
1425 if (lo->lo_state != Lo_bound)
1426 return -ENXIO;
1427 if (use_dio == !!(lo->lo_flags & LO_FLAGS_DIRECT_IO))
1428 return 0;
1429
1430 if (use_dio) {
1431 if (!lo_can_use_dio(lo))
1432 return -EINVAL;
1433 /* flush dirty pages before starting to use direct I/O */
1434 vfs_fsync(lo->lo_backing_file, 0);
1435 }
1436
1437 memflags = blk_mq_freeze_queue(lo->lo_queue);
1438 if (use_dio)
1439 lo->lo_flags |= LO_FLAGS_DIRECT_IO;
1440 else
1441 lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
1442 blk_mq_unfreeze_queue(lo->lo_queue, memflags);
1443 return 0;
1444 }
1445
loop_set_block_size(struct loop_device * lo,blk_mode_t mode,struct block_device * bdev,unsigned long arg)1446 static int loop_set_block_size(struct loop_device *lo, blk_mode_t mode,
1447 struct block_device *bdev, unsigned long arg)
1448 {
1449 struct queue_limits lim;
1450 unsigned int memflags;
1451 int err = 0;
1452
1453 /*
1454 * If we don't hold exclusive handle for the device, upgrade to it
1455 * here to avoid changing device under exclusive owner.
1456 */
1457 if (!(mode & BLK_OPEN_EXCL)) {
1458 err = bd_prepare_to_claim(bdev, loop_set_block_size, NULL);
1459 if (err)
1460 return err;
1461 }
1462
1463 err = mutex_lock_killable(&lo->lo_mutex);
1464 if (err)
1465 goto abort_claim;
1466
1467 if (lo->lo_state != Lo_bound) {
1468 err = -ENXIO;
1469 goto unlock;
1470 }
1471
1472 if (lo->lo_queue->limits.logical_block_size == arg)
1473 goto unlock;
1474
1475 sync_blockdev(lo->lo_device);
1476 invalidate_bdev(lo->lo_device);
1477
1478 lim = queue_limits_start_update(lo->lo_queue);
1479 loop_update_limits(lo, &lim, arg);
1480
1481 memflags = blk_mq_freeze_queue(lo->lo_queue);
1482 err = queue_limits_commit_update(lo->lo_queue, &lim);
1483 loop_update_dio(lo);
1484 blk_mq_unfreeze_queue(lo->lo_queue, memflags);
1485
1486 unlock:
1487 mutex_unlock(&lo->lo_mutex);
1488 abort_claim:
1489 if (!(mode & BLK_OPEN_EXCL))
1490 bd_abort_claiming(bdev, loop_set_block_size);
1491 return err;
1492 }
1493
lo_simple_ioctl(struct loop_device * lo,unsigned int cmd,unsigned long arg)1494 static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
1495 unsigned long arg)
1496 {
1497 int err;
1498
1499 err = mutex_lock_killable(&lo->lo_mutex);
1500 if (err)
1501 return err;
1502 switch (cmd) {
1503 case LOOP_SET_CAPACITY:
1504 err = loop_set_capacity(lo);
1505 break;
1506 case LOOP_SET_DIRECT_IO:
1507 err = loop_set_dio(lo, arg);
1508 break;
1509 default:
1510 err = -EINVAL;
1511 }
1512 mutex_unlock(&lo->lo_mutex);
1513 return err;
1514 }
1515
lo_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)1516 static int lo_ioctl(struct block_device *bdev, blk_mode_t mode,
1517 unsigned int cmd, unsigned long arg)
1518 {
1519 struct loop_device *lo = bdev->bd_disk->private_data;
1520 void __user *argp = (void __user *) arg;
1521 int err;
1522
1523 switch (cmd) {
1524 case LOOP_SET_FD: {
1525 /*
1526 * Legacy case - pass in a zeroed out struct loop_config with
1527 * only the file descriptor set , which corresponds with the
1528 * default parameters we'd have used otherwise.
1529 */
1530 struct loop_config config;
1531
1532 memset(&config, 0, sizeof(config));
1533 config.fd = arg;
1534
1535 return loop_configure(lo, mode, bdev, &config);
1536 }
1537 case LOOP_CONFIGURE: {
1538 struct loop_config config;
1539
1540 if (copy_from_user(&config, argp, sizeof(config)))
1541 return -EFAULT;
1542
1543 return loop_configure(lo, mode, bdev, &config);
1544 }
1545 case LOOP_CHANGE_FD:
1546 return loop_change_fd(lo, bdev, arg);
1547 case LOOP_CLR_FD:
1548 return loop_clr_fd(lo);
1549 case LOOP_SET_STATUS:
1550 err = -EPERM;
1551 if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
1552 err = loop_set_status_old(lo, argp);
1553 break;
1554 case LOOP_GET_STATUS:
1555 return loop_get_status_old(lo, argp);
1556 case LOOP_SET_STATUS64:
1557 err = -EPERM;
1558 if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
1559 err = loop_set_status64(lo, argp);
1560 break;
1561 case LOOP_GET_STATUS64:
1562 return loop_get_status64(lo, argp);
1563 case LOOP_SET_BLOCK_SIZE:
1564 if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN))
1565 return -EPERM;
1566 return loop_set_block_size(lo, mode, bdev, arg);
1567 case LOOP_SET_CAPACITY:
1568 case LOOP_SET_DIRECT_IO:
1569 if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN))
1570 return -EPERM;
1571 fallthrough;
1572 default:
1573 err = lo_simple_ioctl(lo, cmd, arg);
1574 break;
1575 }
1576
1577 return err;
1578 }
1579
1580 #ifdef CONFIG_COMPAT
1581 struct compat_loop_info {
1582 compat_int_t lo_number; /* ioctl r/o */
1583 compat_dev_t lo_device; /* ioctl r/o */
1584 compat_ulong_t lo_inode; /* ioctl r/o */
1585 compat_dev_t lo_rdevice; /* ioctl r/o */
1586 compat_int_t lo_offset;
1587 compat_int_t lo_encrypt_type; /* obsolete, ignored */
1588 compat_int_t lo_encrypt_key_size; /* ioctl w/o */
1589 compat_int_t lo_flags; /* ioctl r/o */
1590 char lo_name[LO_NAME_SIZE];
1591 unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
1592 compat_ulong_t lo_init[2];
1593 char reserved[4];
1594 };
1595
1596 /*
1597 * Transfer 32-bit compatibility structure in userspace to 64-bit loop info
1598 * - noinlined to reduce stack space usage in main part of driver
1599 */
1600 static noinline int
loop_info64_from_compat(const struct compat_loop_info __user * arg,struct loop_info64 * info64)1601 loop_info64_from_compat(const struct compat_loop_info __user *arg,
1602 struct loop_info64 *info64)
1603 {
1604 struct compat_loop_info info;
1605
1606 if (copy_from_user(&info, arg, sizeof(info)))
1607 return -EFAULT;
1608
1609 memset(info64, 0, sizeof(*info64));
1610 info64->lo_number = info.lo_number;
1611 info64->lo_device = info.lo_device;
1612 info64->lo_inode = info.lo_inode;
1613 info64->lo_rdevice = info.lo_rdevice;
1614 info64->lo_offset = info.lo_offset;
1615 info64->lo_sizelimit = 0;
1616 info64->lo_flags = info.lo_flags;
1617 memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
1618 return 0;
1619 }
1620
1621 /*
1622 * Transfer 64-bit loop info to 32-bit compatibility structure in userspace
1623 * - noinlined to reduce stack space usage in main part of driver
1624 */
1625 static noinline int
loop_info64_to_compat(const struct loop_info64 * info64,struct compat_loop_info __user * arg)1626 loop_info64_to_compat(const struct loop_info64 *info64,
1627 struct compat_loop_info __user *arg)
1628 {
1629 struct compat_loop_info info;
1630
1631 memset(&info, 0, sizeof(info));
1632 info.lo_number = info64->lo_number;
1633 info.lo_device = info64->lo_device;
1634 info.lo_inode = info64->lo_inode;
1635 info.lo_rdevice = info64->lo_rdevice;
1636 info.lo_offset = info64->lo_offset;
1637 info.lo_flags = info64->lo_flags;
1638 memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
1639
1640 /* error in case values were truncated */
1641 if (info.lo_device != info64->lo_device ||
1642 info.lo_rdevice != info64->lo_rdevice ||
1643 info.lo_inode != info64->lo_inode ||
1644 info.lo_offset != info64->lo_offset)
1645 return -EOVERFLOW;
1646
1647 if (copy_to_user(arg, &info, sizeof(info)))
1648 return -EFAULT;
1649 return 0;
1650 }
1651
1652 static int
loop_set_status_compat(struct loop_device * lo,const struct compat_loop_info __user * arg)1653 loop_set_status_compat(struct loop_device *lo,
1654 const struct compat_loop_info __user *arg)
1655 {
1656 struct loop_info64 info64;
1657 int ret;
1658
1659 ret = loop_info64_from_compat(arg, &info64);
1660 if (ret < 0)
1661 return ret;
1662 return loop_set_status(lo, &info64);
1663 }
1664
1665 static int
loop_get_status_compat(struct loop_device * lo,struct compat_loop_info __user * arg)1666 loop_get_status_compat(struct loop_device *lo,
1667 struct compat_loop_info __user *arg)
1668 {
1669 struct loop_info64 info64;
1670 int err;
1671
1672 if (!arg)
1673 return -EINVAL;
1674 err = loop_get_status(lo, &info64);
1675 if (!err)
1676 err = loop_info64_to_compat(&info64, arg);
1677 return err;
1678 }
1679
lo_compat_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)1680 static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
1681 unsigned int cmd, unsigned long arg)
1682 {
1683 struct loop_device *lo = bdev->bd_disk->private_data;
1684 int err;
1685
1686 switch(cmd) {
1687 case LOOP_SET_STATUS:
1688 err = loop_set_status_compat(lo,
1689 (const struct compat_loop_info __user *)arg);
1690 break;
1691 case LOOP_GET_STATUS:
1692 err = loop_get_status_compat(lo,
1693 (struct compat_loop_info __user *)arg);
1694 break;
1695 case LOOP_SET_CAPACITY:
1696 case LOOP_CLR_FD:
1697 case LOOP_GET_STATUS64:
1698 case LOOP_SET_STATUS64:
1699 case LOOP_CONFIGURE:
1700 arg = (unsigned long) compat_ptr(arg);
1701 fallthrough;
1702 case LOOP_SET_FD:
1703 case LOOP_CHANGE_FD:
1704 case LOOP_SET_BLOCK_SIZE:
1705 case LOOP_SET_DIRECT_IO:
1706 err = lo_ioctl(bdev, mode, cmd, arg);
1707 break;
1708 default:
1709 err = -ENOIOCTLCMD;
1710 break;
1711 }
1712 return err;
1713 }
1714 #endif
1715
lo_open(struct gendisk * disk,blk_mode_t mode)1716 static int lo_open(struct gendisk *disk, blk_mode_t mode)
1717 {
1718 struct loop_device *lo = disk->private_data;
1719 int err;
1720
1721 err = mutex_lock_killable(&lo->lo_mutex);
1722 if (err)
1723 return err;
1724
1725 if (lo->lo_state == Lo_deleting || lo->lo_state == Lo_rundown)
1726 err = -ENXIO;
1727 mutex_unlock(&lo->lo_mutex);
1728 return err;
1729 }
1730
lo_release(struct gendisk * disk)1731 static void lo_release(struct gendisk *disk)
1732 {
1733 struct loop_device *lo = disk->private_data;
1734 bool need_clear = false;
1735
1736 if (disk_openers(disk) > 0)
1737 return;
1738 /*
1739 * Clear the backing device information if this is the last close of
1740 * a device that's been marked for auto clear, or on which LOOP_CLR_FD
1741 * has been called.
1742 */
1743
1744 mutex_lock(&lo->lo_mutex);
1745 if (lo->lo_state == Lo_bound && (lo->lo_flags & LO_FLAGS_AUTOCLEAR))
1746 lo->lo_state = Lo_rundown;
1747
1748 need_clear = (lo->lo_state == Lo_rundown);
1749 mutex_unlock(&lo->lo_mutex);
1750
1751 if (need_clear)
1752 __loop_clr_fd(lo);
1753 }
1754
lo_free_disk(struct gendisk * disk)1755 static void lo_free_disk(struct gendisk *disk)
1756 {
1757 struct loop_device *lo = disk->private_data;
1758
1759 if (lo->workqueue)
1760 destroy_workqueue(lo->workqueue);
1761 loop_free_idle_workers(lo, true);
1762 timer_shutdown_sync(&lo->timer);
1763 mutex_destroy(&lo->lo_mutex);
1764 kfree(lo);
1765 }
1766
1767 static const struct block_device_operations lo_fops = {
1768 .owner = THIS_MODULE,
1769 .open = lo_open,
1770 .release = lo_release,
1771 .ioctl = lo_ioctl,
1772 #ifdef CONFIG_COMPAT
1773 .compat_ioctl = lo_compat_ioctl,
1774 #endif
1775 .free_disk = lo_free_disk,
1776 };
1777
1778 /*
1779 * And now the modules code and kernel interface.
1780 */
1781
1782 /*
1783 * If max_loop is specified, create that many devices upfront.
1784 * This also becomes a hard limit. If max_loop is not specified,
1785 * the default isn't a hard limit (as before commit 85c50197716c
1786 * changed the default value from 0 for max_loop=0 reasons), just
1787 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
1788 * init time. Loop devices can be requested on-demand with the
1789 * /dev/loop-control interface, or be instantiated by accessing
1790 * a 'dead' device node.
1791 */
1792 static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
1793
1794 #ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
1795 static bool max_loop_specified;
1796
max_loop_param_set_int(const char * val,const struct kernel_param * kp)1797 static int max_loop_param_set_int(const char *val,
1798 const struct kernel_param *kp)
1799 {
1800 int ret;
1801
1802 ret = param_set_int(val, kp);
1803 if (ret < 0)
1804 return ret;
1805
1806 max_loop_specified = true;
1807 return 0;
1808 }
1809
1810 static const struct kernel_param_ops max_loop_param_ops = {
1811 .set = max_loop_param_set_int,
1812 .get = param_get_int,
1813 };
1814
1815 module_param_cb(max_loop, &max_loop_param_ops, &max_loop, 0444);
1816 MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
1817 #else
1818 module_param(max_loop, int, 0444);
1819 MODULE_PARM_DESC(max_loop, "Initial number of loop devices");
1820 #endif
1821
1822 module_param(max_part, int, 0444);
1823 MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
1824
1825 static int hw_queue_depth = LOOP_DEFAULT_HW_Q_DEPTH;
1826
loop_set_hw_queue_depth(const char * s,const struct kernel_param * p)1827 static int loop_set_hw_queue_depth(const char *s, const struct kernel_param *p)
1828 {
1829 int qd, ret;
1830
1831 ret = kstrtoint(s, 0, &qd);
1832 if (ret < 0)
1833 return ret;
1834 if (qd < 1)
1835 return -EINVAL;
1836 hw_queue_depth = qd;
1837 return 0;
1838 }
1839
1840 static const struct kernel_param_ops loop_hw_qdepth_param_ops = {
1841 .set = loop_set_hw_queue_depth,
1842 .get = param_get_int,
1843 };
1844
1845 device_param_cb(hw_queue_depth, &loop_hw_qdepth_param_ops, &hw_queue_depth, 0444);
1846 MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: " __stringify(LOOP_DEFAULT_HW_Q_DEPTH));
1847
1848 MODULE_DESCRIPTION("Loopback device support");
1849 MODULE_LICENSE("GPL");
1850 MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
1851
loop_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)1852 static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1853 const struct blk_mq_queue_data *bd)
1854 {
1855 struct request *rq = bd->rq;
1856 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
1857 struct loop_device *lo = rq->q->queuedata;
1858
1859 blk_mq_start_request(rq);
1860
1861 if (lo->lo_state != Lo_bound)
1862 return BLK_STS_IOERR;
1863
1864 switch (req_op(rq)) {
1865 case REQ_OP_FLUSH:
1866 case REQ_OP_DISCARD:
1867 case REQ_OP_WRITE_ZEROES:
1868 cmd->use_aio = false;
1869 break;
1870 default:
1871 cmd->use_aio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
1872 break;
1873 }
1874
1875 /* always use the first bio's css */
1876 cmd->blkcg_css = NULL;
1877 cmd->memcg_css = NULL;
1878 #ifdef CONFIG_BLK_CGROUP
1879 if (rq->bio) {
1880 cmd->blkcg_css = bio_blkcg_css(rq->bio);
1881 #ifdef CONFIG_MEMCG
1882 if (cmd->blkcg_css) {
1883 cmd->memcg_css =
1884 cgroup_get_e_css(cmd->blkcg_css->cgroup,
1885 &memory_cgrp_subsys);
1886 }
1887 #endif
1888 }
1889 #endif
1890 loop_queue_work(lo, cmd);
1891
1892 return BLK_STS_OK;
1893 }
1894
loop_handle_cmd(struct loop_cmd * cmd)1895 static void loop_handle_cmd(struct loop_cmd *cmd)
1896 {
1897 struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css;
1898 struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css;
1899 struct request *rq = blk_mq_rq_from_pdu(cmd);
1900 const bool write = op_is_write(req_op(rq));
1901 struct loop_device *lo = rq->q->queuedata;
1902 int ret = 0;
1903 struct mem_cgroup *old_memcg = NULL;
1904
1905 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
1906 ret = -EIO;
1907 goto failed;
1908 }
1909
1910 /* We can block in this context, so ignore REQ_NOWAIT. */
1911 if (rq->cmd_flags & REQ_NOWAIT)
1912 rq->cmd_flags &= ~REQ_NOWAIT;
1913
1914 if (cmd_blkcg_css)
1915 kthread_associate_blkcg(cmd_blkcg_css);
1916 if (cmd_memcg_css)
1917 old_memcg = set_active_memcg(
1918 mem_cgroup_from_css(cmd_memcg_css));
1919
1920 /*
1921 * do_req_filebacked() may call blk_mq_complete_request() synchronously
1922 * or asynchronously if using aio. Hence, do not touch 'cmd' after
1923 * do_req_filebacked() has returned unless we are sure that 'cmd' has
1924 * not yet been completed.
1925 */
1926 ret = do_req_filebacked(lo, rq);
1927
1928 if (cmd_blkcg_css)
1929 kthread_associate_blkcg(NULL);
1930
1931 if (cmd_memcg_css) {
1932 set_active_memcg(old_memcg);
1933 css_put(cmd_memcg_css);
1934 }
1935 failed:
1936 /* complete non-aio request */
1937 if (ret != -EIOCBQUEUED) {
1938 if (ret == -EOPNOTSUPP)
1939 cmd->ret = ret;
1940 else
1941 cmd->ret = ret ? -EIO : 0;
1942 if (likely(!blk_should_fake_timeout(rq->q)))
1943 blk_mq_complete_request(rq);
1944 }
1945 }
1946
loop_process_work(struct loop_worker * worker,struct list_head * cmd_list,struct loop_device * lo)1947 static void loop_process_work(struct loop_worker *worker,
1948 struct list_head *cmd_list, struct loop_device *lo)
1949 {
1950 int orig_flags = current->flags;
1951 struct loop_cmd *cmd;
1952
1953 current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
1954 spin_lock_irq(&lo->lo_work_lock);
1955 while (!list_empty(cmd_list)) {
1956 cmd = container_of(
1957 cmd_list->next, struct loop_cmd, list_entry);
1958 list_del(cmd_list->next);
1959 spin_unlock_irq(&lo->lo_work_lock);
1960
1961 loop_handle_cmd(cmd);
1962 cond_resched();
1963
1964 spin_lock_irq(&lo->lo_work_lock);
1965 }
1966
1967 /*
1968 * We only add to the idle list if there are no pending cmds
1969 * *and* the worker will not run again which ensures that it
1970 * is safe to free any worker on the idle list
1971 */
1972 if (worker && !work_pending(&worker->work)) {
1973 worker->last_ran_at = jiffies;
1974 list_add_tail(&worker->idle_list, &lo->idle_worker_list);
1975 loop_set_timer(lo);
1976 }
1977 spin_unlock_irq(&lo->lo_work_lock);
1978 current->flags = orig_flags;
1979 }
1980
loop_workfn(struct work_struct * work)1981 static void loop_workfn(struct work_struct *work)
1982 {
1983 struct loop_worker *worker =
1984 container_of(work, struct loop_worker, work);
1985 loop_process_work(worker, &worker->cmd_list, worker->lo);
1986 }
1987
loop_rootcg_workfn(struct work_struct * work)1988 static void loop_rootcg_workfn(struct work_struct *work)
1989 {
1990 struct loop_device *lo =
1991 container_of(work, struct loop_device, rootcg_work);
1992 loop_process_work(NULL, &lo->rootcg_cmd_list, lo);
1993 }
1994
1995 static const struct blk_mq_ops loop_mq_ops = {
1996 .queue_rq = loop_queue_rq,
1997 .complete = lo_complete_rq,
1998 };
1999
loop_add(int i)2000 static int loop_add(int i)
2001 {
2002 struct queue_limits lim = {
2003 /*
2004 * Random number picked from the historic block max_sectors cap.
2005 */
2006 .max_hw_sectors = 2560u,
2007 };
2008 struct loop_device *lo;
2009 struct gendisk *disk;
2010 int err;
2011
2012 err = -ENOMEM;
2013 lo = kzalloc(sizeof(*lo), GFP_KERNEL);
2014 if (!lo)
2015 goto out;
2016 lo->worker_tree = RB_ROOT;
2017 INIT_LIST_HEAD(&lo->idle_worker_list);
2018 timer_setup(&lo->timer, loop_free_idle_workers_timer, TIMER_DEFERRABLE);
2019 lo->lo_state = Lo_unbound;
2020
2021 err = mutex_lock_killable(&loop_ctl_mutex);
2022 if (err)
2023 goto out_free_dev;
2024
2025 /* allocate id, if @id >= 0, we're requesting that specific id */
2026 if (i >= 0) {
2027 err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
2028 if (err == -ENOSPC)
2029 err = -EEXIST;
2030 } else {
2031 err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
2032 }
2033 mutex_unlock(&loop_ctl_mutex);
2034 if (err < 0)
2035 goto out_free_dev;
2036 i = err;
2037
2038 lo->tag_set.ops = &loop_mq_ops;
2039 lo->tag_set.nr_hw_queues = 1;
2040 lo->tag_set.queue_depth = hw_queue_depth;
2041 lo->tag_set.numa_node = NUMA_NO_NODE;
2042 lo->tag_set.cmd_size = sizeof(struct loop_cmd);
2043 lo->tag_set.flags = BLK_MQ_F_STACKING | BLK_MQ_F_NO_SCHED_BY_DEFAULT;
2044 lo->tag_set.driver_data = lo;
2045
2046 err = blk_mq_alloc_tag_set(&lo->tag_set);
2047 if (err)
2048 goto out_free_idr;
2049
2050 disk = lo->lo_disk = blk_mq_alloc_disk(&lo->tag_set, &lim, lo);
2051 if (IS_ERR(disk)) {
2052 err = PTR_ERR(disk);
2053 goto out_cleanup_tags;
2054 }
2055 lo->lo_queue = lo->lo_disk->queue;
2056
2057 /*
2058 * Disable partition scanning by default. The in-kernel partition
2059 * scanning can be requested individually per-device during its
2060 * setup. Userspace can always add and remove partitions from all
2061 * devices. The needed partition minors are allocated from the
2062 * extended minor space, the main loop device numbers will continue
2063 * to match the loop minors, regardless of the number of partitions
2064 * used.
2065 *
2066 * If max_part is given, partition scanning is globally enabled for
2067 * all loop devices. The minors for the main loop devices will be
2068 * multiples of max_part.
2069 *
2070 * Note: Global-for-all-devices, set-only-at-init, read-only module
2071 * parameteters like 'max_loop' and 'max_part' make things needlessly
2072 * complicated, are too static, inflexible and may surprise
2073 * userspace tools. Parameters like this in general should be avoided.
2074 */
2075 if (!part_shift)
2076 set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
2077 mutex_init(&lo->lo_mutex);
2078 lo->lo_number = i;
2079 spin_lock_init(&lo->lo_lock);
2080 spin_lock_init(&lo->lo_work_lock);
2081 INIT_WORK(&lo->rootcg_work, loop_rootcg_workfn);
2082 INIT_LIST_HEAD(&lo->rootcg_cmd_list);
2083 disk->major = LOOP_MAJOR;
2084 disk->first_minor = i << part_shift;
2085 disk->minors = 1 << part_shift;
2086 disk->fops = &lo_fops;
2087 disk->private_data = lo;
2088 disk->queue = lo->lo_queue;
2089 disk->events = DISK_EVENT_MEDIA_CHANGE;
2090 disk->event_flags = DISK_EVENT_FLAG_UEVENT;
2091 sprintf(disk->disk_name, "loop%d", i);
2092 /* Make this loop device reachable from pathname. */
2093 err = add_disk(disk);
2094 if (err)
2095 goto out_cleanup_disk;
2096
2097 /* Show this loop device. */
2098 mutex_lock(&loop_ctl_mutex);
2099 lo->idr_visible = true;
2100 mutex_unlock(&loop_ctl_mutex);
2101
2102 return i;
2103
2104 out_cleanup_disk:
2105 put_disk(disk);
2106 out_cleanup_tags:
2107 blk_mq_free_tag_set(&lo->tag_set);
2108 out_free_idr:
2109 mutex_lock(&loop_ctl_mutex);
2110 idr_remove(&loop_index_idr, i);
2111 mutex_unlock(&loop_ctl_mutex);
2112 out_free_dev:
2113 kfree(lo);
2114 out:
2115 return err;
2116 }
2117
loop_remove(struct loop_device * lo)2118 static void loop_remove(struct loop_device *lo)
2119 {
2120 /* Make this loop device unreachable from pathname. */
2121 del_gendisk(lo->lo_disk);
2122 blk_mq_free_tag_set(&lo->tag_set);
2123
2124 mutex_lock(&loop_ctl_mutex);
2125 idr_remove(&loop_index_idr, lo->lo_number);
2126 mutex_unlock(&loop_ctl_mutex);
2127
2128 put_disk(lo->lo_disk);
2129 }
2130
2131 #ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
loop_probe(dev_t dev)2132 static void loop_probe(dev_t dev)
2133 {
2134 int idx = MINOR(dev) >> part_shift;
2135
2136 if (max_loop_specified && max_loop && idx >= max_loop)
2137 return;
2138 loop_add(idx);
2139 }
2140 #else
2141 #define loop_probe NULL
2142 #endif /* !CONFIG_BLOCK_LEGACY_AUTOLOAD */
2143
loop_control_remove(int idx)2144 static int loop_control_remove(int idx)
2145 {
2146 struct loop_device *lo;
2147 int ret;
2148
2149 if (idx < 0) {
2150 pr_warn_once("deleting an unspecified loop device is not supported.\n");
2151 return -EINVAL;
2152 }
2153
2154 /* Hide this loop device for serialization. */
2155 ret = mutex_lock_killable(&loop_ctl_mutex);
2156 if (ret)
2157 return ret;
2158 lo = idr_find(&loop_index_idr, idx);
2159 if (!lo || !lo->idr_visible)
2160 ret = -ENODEV;
2161 else
2162 lo->idr_visible = false;
2163 mutex_unlock(&loop_ctl_mutex);
2164 if (ret)
2165 return ret;
2166
2167 /* Check whether this loop device can be removed. */
2168 ret = mutex_lock_killable(&lo->lo_mutex);
2169 if (ret)
2170 goto mark_visible;
2171 if (lo->lo_state != Lo_unbound || disk_openers(lo->lo_disk) > 0) {
2172 mutex_unlock(&lo->lo_mutex);
2173 ret = -EBUSY;
2174 goto mark_visible;
2175 }
2176 /* Mark this loop device as no more bound, but not quite unbound yet */
2177 lo->lo_state = Lo_deleting;
2178 mutex_unlock(&lo->lo_mutex);
2179
2180 loop_remove(lo);
2181 return 0;
2182
2183 mark_visible:
2184 /* Show this loop device again. */
2185 mutex_lock(&loop_ctl_mutex);
2186 lo->idr_visible = true;
2187 mutex_unlock(&loop_ctl_mutex);
2188 return ret;
2189 }
2190
loop_control_get_free(int idx)2191 static int loop_control_get_free(int idx)
2192 {
2193 struct loop_device *lo;
2194 int id, ret;
2195
2196 ret = mutex_lock_killable(&loop_ctl_mutex);
2197 if (ret)
2198 return ret;
2199 idr_for_each_entry(&loop_index_idr, lo, id) {
2200 /* Hitting a race results in creating a new loop device which is harmless. */
2201 if (lo->idr_visible && data_race(lo->lo_state) == Lo_unbound)
2202 goto found;
2203 }
2204 mutex_unlock(&loop_ctl_mutex);
2205 return loop_add(-1);
2206 found:
2207 mutex_unlock(&loop_ctl_mutex);
2208 return id;
2209 }
2210
loop_control_ioctl(struct file * file,unsigned int cmd,unsigned long parm)2211 static long loop_control_ioctl(struct file *file, unsigned int cmd,
2212 unsigned long parm)
2213 {
2214 switch (cmd) {
2215 case LOOP_CTL_ADD:
2216 return loop_add(parm);
2217 case LOOP_CTL_REMOVE:
2218 return loop_control_remove(parm);
2219 case LOOP_CTL_GET_FREE:
2220 return loop_control_get_free(parm);
2221 default:
2222 return -ENOSYS;
2223 }
2224 }
2225
2226 static const struct file_operations loop_ctl_fops = {
2227 .open = nonseekable_open,
2228 .unlocked_ioctl = loop_control_ioctl,
2229 .compat_ioctl = loop_control_ioctl,
2230 .owner = THIS_MODULE,
2231 .llseek = noop_llseek,
2232 };
2233
2234 static struct miscdevice loop_misc = {
2235 .minor = LOOP_CTRL_MINOR,
2236 .name = "loop-control",
2237 .fops = &loop_ctl_fops,
2238 };
2239
2240 MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
2241 MODULE_ALIAS("devname:loop-control");
2242
loop_init(void)2243 static int __init loop_init(void)
2244 {
2245 int i;
2246 int err;
2247
2248 part_shift = 0;
2249 if (max_part > 0) {
2250 part_shift = fls(max_part);
2251
2252 /*
2253 * Adjust max_part according to part_shift as it is exported
2254 * to user space so that user can decide correct minor number
2255 * if [s]he want to create more devices.
2256 *
2257 * Note that -1 is required because partition 0 is reserved
2258 * for the whole disk.
2259 */
2260 max_part = (1UL << part_shift) - 1;
2261 }
2262
2263 if ((1UL << part_shift) > DISK_MAX_PARTS) {
2264 err = -EINVAL;
2265 goto err_out;
2266 }
2267
2268 if (max_loop > 1UL << (MINORBITS - part_shift)) {
2269 err = -EINVAL;
2270 goto err_out;
2271 }
2272
2273 err = misc_register(&loop_misc);
2274 if (err < 0)
2275 goto err_out;
2276
2277
2278 if (__register_blkdev(LOOP_MAJOR, "loop", loop_probe)) {
2279 err = -EIO;
2280 goto misc_out;
2281 }
2282
2283 /* pre-create number of devices given by config or max_loop */
2284 for (i = 0; i < max_loop; i++)
2285 loop_add(i);
2286
2287 printk(KERN_INFO "loop: module loaded\n");
2288 return 0;
2289
2290 misc_out:
2291 misc_deregister(&loop_misc);
2292 err_out:
2293 return err;
2294 }
2295
loop_exit(void)2296 static void __exit loop_exit(void)
2297 {
2298 struct loop_device *lo;
2299 int id;
2300
2301 unregister_blkdev(LOOP_MAJOR, "loop");
2302 misc_deregister(&loop_misc);
2303
2304 /*
2305 * There is no need to use loop_ctl_mutex here, for nobody else can
2306 * access loop_index_idr when this module is unloading (unless forced
2307 * module unloading is requested). If this is not a clean unloading,
2308 * we have no means to avoid kernel crash.
2309 */
2310 idr_for_each_entry(&loop_index_idr, lo, id)
2311 loop_remove(lo);
2312
2313 idr_destroy(&loop_index_idr);
2314 }
2315
2316 module_init(loop_init);
2317 module_exit(loop_exit);
2318
2319 #ifndef MODULE
max_loop_setup(char * str)2320 static int __init max_loop_setup(char *str)
2321 {
2322 max_loop = simple_strtol(str, NULL, 0);
2323 #ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
2324 max_loop_specified = true;
2325 #endif
2326 return 1;
2327 }
2328
2329 __setup("max_loop=", max_loop_setup);
2330 #endif
2331