1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
6 */
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/blkdev.h>
10 #include <linux/blk-integrity.h>
11 #include <linux/buffer_head.h>
12 #include <linux/mpage.h>
13 #include <linux/uio.h>
14 #include <linux/namei.h>
15 #include <linux/task_io_accounting_ops.h>
16 #include <linux/falloc.h>
17 #include <linux/suspend.h>
18 #include <linux/fs.h>
19 #include <linux/iomap.h>
20 #include <linux/module.h>
21 #include <linux/io_uring/cmd.h>
22 #include "blk.h"
23
bdev_file_inode(struct file * file)24 static inline struct inode *bdev_file_inode(struct file *file)
25 {
26 return file->f_mapping->host;
27 }
28
dio_bio_write_op(struct kiocb * iocb)29 static blk_opf_t dio_bio_write_op(struct kiocb *iocb)
30 {
31 blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
32
33 /* avoid the need for a I/O completion work item */
34 if (iocb_is_dsync(iocb))
35 opf |= REQ_FUA;
36 return opf;
37 }
38
blkdev_dio_invalid(struct block_device * bdev,struct kiocb * iocb,struct iov_iter * iter)39 static bool blkdev_dio_invalid(struct block_device *bdev, struct kiocb *iocb,
40 struct iov_iter *iter)
41 {
42 return (iocb->ki_pos | iov_iter_count(iter)) &
43 (bdev_logical_block_size(bdev) - 1);
44 }
45
blkdev_iov_iter_get_pages(struct bio * bio,struct iov_iter * iter,struct block_device * bdev)46 static inline int blkdev_iov_iter_get_pages(struct bio *bio,
47 struct iov_iter *iter, struct block_device *bdev)
48 {
49 return bio_iov_iter_get_pages(bio, iter,
50 bdev_logical_block_size(bdev) - 1);
51 }
52
53 #define DIO_INLINE_BIO_VECS 4
54
__blkdev_direct_IO_simple(struct kiocb * iocb,struct iov_iter * iter,struct block_device * bdev,unsigned int nr_pages)55 static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
56 struct iov_iter *iter, struct block_device *bdev,
57 unsigned int nr_pages)
58 {
59 struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
60 loff_t pos = iocb->ki_pos;
61 bool should_dirty = false;
62 struct bio bio;
63 ssize_t ret;
64
65 if (nr_pages <= DIO_INLINE_BIO_VECS)
66 vecs = inline_vecs;
67 else {
68 vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
69 GFP_KERNEL);
70 if (!vecs)
71 return -ENOMEM;
72 }
73
74 if (iov_iter_rw(iter) == READ) {
75 bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ);
76 if (user_backed_iter(iter))
77 should_dirty = true;
78 } else {
79 bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
80 }
81 bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
82 bio.bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
83 bio.bi_write_stream = iocb->ki_write_stream;
84 bio.bi_ioprio = iocb->ki_ioprio;
85 if (iocb->ki_flags & IOCB_ATOMIC)
86 bio.bi_opf |= REQ_ATOMIC;
87
88 ret = blkdev_iov_iter_get_pages(&bio, iter, bdev);
89 if (unlikely(ret))
90 goto out;
91 ret = bio.bi_iter.bi_size;
92
93 if (iov_iter_rw(iter) == WRITE)
94 task_io_account_write(ret);
95
96 if (iocb->ki_flags & IOCB_NOWAIT)
97 bio.bi_opf |= REQ_NOWAIT;
98
99 submit_bio_wait(&bio);
100
101 bio_release_pages(&bio, should_dirty);
102 if (unlikely(bio.bi_status))
103 ret = blk_status_to_errno(bio.bi_status);
104
105 out:
106 if (vecs != inline_vecs)
107 kfree(vecs);
108
109 bio_uninit(&bio);
110
111 return ret;
112 }
113
114 enum {
115 DIO_SHOULD_DIRTY = 1,
116 DIO_IS_SYNC = 2,
117 };
118
119 struct blkdev_dio {
120 union {
121 struct kiocb *iocb;
122 struct task_struct *waiter;
123 };
124 size_t size;
125 atomic_t ref;
126 unsigned int flags;
127 struct bio bio ____cacheline_aligned_in_smp;
128 };
129
130 static struct bio_set blkdev_dio_pool;
131
blkdev_bio_end_io(struct bio * bio)132 static void blkdev_bio_end_io(struct bio *bio)
133 {
134 struct blkdev_dio *dio = bio->bi_private;
135 bool should_dirty = dio->flags & DIO_SHOULD_DIRTY;
136 bool is_sync = dio->flags & DIO_IS_SYNC;
137
138 if (bio->bi_status && !dio->bio.bi_status)
139 dio->bio.bi_status = bio->bi_status;
140
141 if (bio_integrity(bio))
142 bio_integrity_unmap_user(bio);
143
144 if (atomic_dec_and_test(&dio->ref)) {
145 if (!is_sync) {
146 struct kiocb *iocb = dio->iocb;
147 ssize_t ret;
148
149 WRITE_ONCE(iocb->private, NULL);
150
151 if (likely(!dio->bio.bi_status)) {
152 ret = dio->size;
153 iocb->ki_pos += ret;
154 } else {
155 ret = blk_status_to_errno(dio->bio.bi_status);
156 }
157
158 dio->iocb->ki_complete(iocb, ret);
159 bio_put(&dio->bio);
160 } else {
161 struct task_struct *waiter = dio->waiter;
162
163 WRITE_ONCE(dio->waiter, NULL);
164 blk_wake_io_task(waiter);
165 }
166 }
167
168 if (should_dirty) {
169 bio_check_pages_dirty(bio);
170 } else {
171 bio_release_pages(bio, false);
172 bio_put(bio);
173 }
174 }
175
__blkdev_direct_IO(struct kiocb * iocb,struct iov_iter * iter,struct block_device * bdev,unsigned int nr_pages)176 static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
177 struct block_device *bdev, unsigned int nr_pages)
178 {
179 struct blk_plug plug;
180 struct blkdev_dio *dio;
181 struct bio *bio;
182 bool is_read = (iov_iter_rw(iter) == READ), is_sync;
183 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
184 loff_t pos = iocb->ki_pos;
185 int ret = 0;
186
187 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
188 &blkdev_dio_pool);
189 dio = container_of(bio, struct blkdev_dio, bio);
190 atomic_set(&dio->ref, 1);
191 /*
192 * Grab an extra reference to ensure the dio structure which is embedded
193 * into the first bio stays around.
194 */
195 bio_get(bio);
196
197 is_sync = is_sync_kiocb(iocb);
198 if (is_sync) {
199 dio->flags = DIO_IS_SYNC;
200 dio->waiter = current;
201 } else {
202 dio->flags = 0;
203 dio->iocb = iocb;
204 }
205
206 dio->size = 0;
207 if (is_read && user_backed_iter(iter))
208 dio->flags |= DIO_SHOULD_DIRTY;
209
210 blk_start_plug(&plug);
211
212 for (;;) {
213 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
214 bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
215 bio->bi_write_stream = iocb->ki_write_stream;
216 bio->bi_private = dio;
217 bio->bi_end_io = blkdev_bio_end_io;
218 bio->bi_ioprio = iocb->ki_ioprio;
219
220 ret = blkdev_iov_iter_get_pages(bio, iter, bdev);
221 if (unlikely(ret)) {
222 bio->bi_status = BLK_STS_IOERR;
223 bio_endio(bio);
224 break;
225 }
226 if (iocb->ki_flags & IOCB_NOWAIT) {
227 /*
228 * This is nonblocking IO, and we need to allocate
229 * another bio if we have data left to map. As we
230 * cannot guarantee that one of the sub bios will not
231 * fail getting issued FOR NOWAIT and as error results
232 * are coalesced across all of them, be safe and ask for
233 * a retry of this from blocking context.
234 */
235 if (unlikely(iov_iter_count(iter))) {
236 ret = -EAGAIN;
237 goto fail;
238 }
239 bio->bi_opf |= REQ_NOWAIT;
240 }
241 if (iocb->ki_flags & IOCB_HAS_METADATA) {
242 ret = bio_integrity_map_iter(bio, iocb->private);
243 if (unlikely(ret))
244 goto fail;
245 }
246
247 if (is_read) {
248 if (dio->flags & DIO_SHOULD_DIRTY)
249 bio_set_pages_dirty(bio);
250 } else {
251 task_io_account_write(bio->bi_iter.bi_size);
252 }
253 dio->size += bio->bi_iter.bi_size;
254 pos += bio->bi_iter.bi_size;
255
256 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
257 if (!nr_pages) {
258 submit_bio(bio);
259 break;
260 }
261 atomic_inc(&dio->ref);
262 submit_bio(bio);
263 bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL);
264 }
265
266 blk_finish_plug(&plug);
267
268 if (!is_sync)
269 return -EIOCBQUEUED;
270
271 for (;;) {
272 set_current_state(TASK_UNINTERRUPTIBLE);
273 if (!READ_ONCE(dio->waiter))
274 break;
275 blk_io_schedule();
276 }
277 __set_current_state(TASK_RUNNING);
278
279 if (!ret)
280 ret = blk_status_to_errno(dio->bio.bi_status);
281 if (likely(!ret))
282 ret = dio->size;
283
284 bio_put(&dio->bio);
285 return ret;
286 fail:
287 bio_release_pages(bio, false);
288 bio_clear_flag(bio, BIO_REFFED);
289 bio_put(bio);
290 blk_finish_plug(&plug);
291 return ret;
292 }
293
blkdev_bio_end_io_async(struct bio * bio)294 static void blkdev_bio_end_io_async(struct bio *bio)
295 {
296 struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio);
297 struct kiocb *iocb = dio->iocb;
298 ssize_t ret;
299
300 WRITE_ONCE(iocb->private, NULL);
301
302 if (likely(!bio->bi_status)) {
303 ret = dio->size;
304 iocb->ki_pos += ret;
305 } else {
306 ret = blk_status_to_errno(bio->bi_status);
307 }
308
309 if (bio_integrity(bio))
310 bio_integrity_unmap_user(bio);
311
312 iocb->ki_complete(iocb, ret);
313
314 if (dio->flags & DIO_SHOULD_DIRTY) {
315 bio_check_pages_dirty(bio);
316 } else {
317 bio_release_pages(bio, false);
318 bio_put(bio);
319 }
320 }
321
__blkdev_direct_IO_async(struct kiocb * iocb,struct iov_iter * iter,struct block_device * bdev,unsigned int nr_pages)322 static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
323 struct iov_iter *iter,
324 struct block_device *bdev,
325 unsigned int nr_pages)
326 {
327 bool is_read = iov_iter_rw(iter) == READ;
328 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
329 struct blkdev_dio *dio;
330 struct bio *bio;
331 loff_t pos = iocb->ki_pos;
332 int ret = 0;
333
334 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
335 &blkdev_dio_pool);
336 dio = container_of(bio, struct blkdev_dio, bio);
337 dio->flags = 0;
338 dio->iocb = iocb;
339 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
340 bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
341 bio->bi_write_stream = iocb->ki_write_stream;
342 bio->bi_end_io = blkdev_bio_end_io_async;
343 bio->bi_ioprio = iocb->ki_ioprio;
344
345 if (iov_iter_is_bvec(iter)) {
346 /*
347 * Users don't rely on the iterator being in any particular
348 * state for async I/O returning -EIOCBQUEUED, hence we can
349 * avoid expensive iov_iter_advance(). Bypass
350 * bio_iov_iter_get_pages() and set the bvec directly.
351 */
352 bio_iov_bvec_set(bio, iter);
353 } else {
354 ret = blkdev_iov_iter_get_pages(bio, iter, bdev);
355 if (unlikely(ret))
356 goto out_bio_put;
357 }
358 dio->size = bio->bi_iter.bi_size;
359
360 if (is_read) {
361 if (user_backed_iter(iter)) {
362 dio->flags |= DIO_SHOULD_DIRTY;
363 bio_set_pages_dirty(bio);
364 }
365 } else {
366 task_io_account_write(bio->bi_iter.bi_size);
367 }
368
369 if (iocb->ki_flags & IOCB_HAS_METADATA) {
370 ret = bio_integrity_map_iter(bio, iocb->private);
371 WRITE_ONCE(iocb->private, NULL);
372 if (unlikely(ret))
373 goto out_bio_put;
374 }
375
376 if (iocb->ki_flags & IOCB_ATOMIC)
377 bio->bi_opf |= REQ_ATOMIC;
378
379 if (iocb->ki_flags & IOCB_NOWAIT)
380 bio->bi_opf |= REQ_NOWAIT;
381
382 if (iocb->ki_flags & IOCB_HIPRI) {
383 bio->bi_opf |= REQ_POLLED;
384 submit_bio(bio);
385 WRITE_ONCE(iocb->private, bio);
386 } else {
387 submit_bio(bio);
388 }
389 return -EIOCBQUEUED;
390
391 out_bio_put:
392 bio_put(bio);
393 return ret;
394 }
395
blkdev_direct_IO(struct kiocb * iocb,struct iov_iter * iter)396 static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
397 {
398 struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
399 unsigned int nr_pages;
400
401 if (!iov_iter_count(iter))
402 return 0;
403
404 if (blkdev_dio_invalid(bdev, iocb, iter))
405 return -EINVAL;
406
407 if (iov_iter_rw(iter) == WRITE) {
408 u16 max_write_streams = bdev_max_write_streams(bdev);
409
410 if (iocb->ki_write_stream) {
411 if (iocb->ki_write_stream > max_write_streams)
412 return -EINVAL;
413 } else if (max_write_streams) {
414 enum rw_hint write_hint =
415 file_inode(iocb->ki_filp)->i_write_hint;
416
417 /*
418 * Just use the write hint as write stream for block
419 * device writes. This assumes no file system is
420 * mounted that would use the streams differently.
421 */
422 if (write_hint <= max_write_streams)
423 iocb->ki_write_stream = write_hint;
424 }
425 }
426
427 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
428 if (likely(nr_pages <= BIO_MAX_VECS &&
429 !(iocb->ki_flags & IOCB_HAS_METADATA))) {
430 if (is_sync_kiocb(iocb))
431 return __blkdev_direct_IO_simple(iocb, iter, bdev,
432 nr_pages);
433 return __blkdev_direct_IO_async(iocb, iter, bdev, nr_pages);
434 } else if (iocb->ki_flags & IOCB_ATOMIC) {
435 return -EINVAL;
436 }
437 return __blkdev_direct_IO(iocb, iter, bdev, bio_max_segs(nr_pages));
438 }
439
blkdev_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)440 static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
441 unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
442 {
443 struct block_device *bdev = I_BDEV(inode);
444 loff_t isize = i_size_read(inode);
445
446 if (offset >= isize)
447 return -EIO;
448
449 iomap->bdev = bdev;
450 iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev));
451 iomap->type = IOMAP_MAPPED;
452 iomap->addr = iomap->offset;
453 iomap->length = isize - iomap->offset;
454 iomap->flags |= IOMAP_F_BUFFER_HEAD; /* noop for !CONFIG_BUFFER_HEAD */
455 return 0;
456 }
457
458 static const struct iomap_ops blkdev_iomap_ops = {
459 .iomap_begin = blkdev_iomap_begin,
460 };
461
462 #ifdef CONFIG_BUFFER_HEAD
blkdev_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh,int create)463 static int blkdev_get_block(struct inode *inode, sector_t iblock,
464 struct buffer_head *bh, int create)
465 {
466 bh->b_bdev = I_BDEV(inode);
467 bh->b_blocknr = iblock;
468 set_buffer_mapped(bh);
469 return 0;
470 }
471
472 /*
473 * We cannot call mpage_writepages() as it does not take the buffer lock.
474 * We must use block_write_full_folio() directly which holds the buffer
475 * lock. The buffer lock provides the synchronisation with writeback
476 * that filesystems rely on when they use the blockdev's mapping.
477 */
blkdev_writepages(struct address_space * mapping,struct writeback_control * wbc)478 static int blkdev_writepages(struct address_space *mapping,
479 struct writeback_control *wbc)
480 {
481 struct folio *folio = NULL;
482 struct blk_plug plug;
483 int err;
484
485 blk_start_plug(&plug);
486 while ((folio = writeback_iter(mapping, wbc, folio, &err)))
487 err = block_write_full_folio(folio, wbc, blkdev_get_block);
488 blk_finish_plug(&plug);
489
490 return err;
491 }
492
blkdev_read_folio(struct file * file,struct folio * folio)493 static int blkdev_read_folio(struct file *file, struct folio *folio)
494 {
495 return block_read_full_folio(folio, blkdev_get_block);
496 }
497
blkdev_readahead(struct readahead_control * rac)498 static void blkdev_readahead(struct readahead_control *rac)
499 {
500 mpage_readahead(rac, blkdev_get_block);
501 }
502
blkdev_write_begin(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)503 static int blkdev_write_begin(const struct kiocb *iocb,
504 struct address_space *mapping, loff_t pos,
505 unsigned len, struct folio **foliop,
506 void **fsdata)
507 {
508 return block_write_begin(mapping, pos, len, foliop, blkdev_get_block);
509 }
510
blkdev_write_end(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)511 static int blkdev_write_end(const struct kiocb *iocb,
512 struct address_space *mapping,
513 loff_t pos, unsigned len, unsigned copied,
514 struct folio *folio, void *fsdata)
515 {
516 int ret;
517 ret = block_write_end(pos, len, copied, folio);
518
519 folio_unlock(folio);
520 folio_put(folio);
521
522 return ret;
523 }
524
525 const struct address_space_operations def_blk_aops = {
526 .dirty_folio = block_dirty_folio,
527 .invalidate_folio = block_invalidate_folio,
528 .read_folio = blkdev_read_folio,
529 .readahead = blkdev_readahead,
530 .writepages = blkdev_writepages,
531 .write_begin = blkdev_write_begin,
532 .write_end = blkdev_write_end,
533 .migrate_folio = buffer_migrate_folio_norefs,
534 .is_dirty_writeback = buffer_check_dirty_writeback,
535 };
536 #else /* CONFIG_BUFFER_HEAD */
blkdev_read_folio(struct file * file,struct folio * folio)537 static int blkdev_read_folio(struct file *file, struct folio *folio)
538 {
539 iomap_bio_read_folio(folio, &blkdev_iomap_ops);
540 return 0;
541 }
542
blkdev_readahead(struct readahead_control * rac)543 static void blkdev_readahead(struct readahead_control *rac)
544 {
545 iomap_bio_readahead(rac, &blkdev_iomap_ops);
546 }
547
blkdev_writeback_range(struct iomap_writepage_ctx * wpc,struct folio * folio,u64 offset,unsigned int len,u64 end_pos)548 static ssize_t blkdev_writeback_range(struct iomap_writepage_ctx *wpc,
549 struct folio *folio, u64 offset, unsigned int len, u64 end_pos)
550 {
551 loff_t isize = i_size_read(wpc->inode);
552
553 if (WARN_ON_ONCE(offset >= isize))
554 return -EIO;
555
556 if (offset < wpc->iomap.offset ||
557 offset >= wpc->iomap.offset + wpc->iomap.length) {
558 int error;
559
560 error = blkdev_iomap_begin(wpc->inode, offset, isize - offset,
561 IOMAP_WRITE, &wpc->iomap, NULL);
562 if (error)
563 return error;
564 }
565
566 return iomap_add_to_ioend(wpc, folio, offset, end_pos, len);
567 }
568
569 static const struct iomap_writeback_ops blkdev_writeback_ops = {
570 .writeback_range = blkdev_writeback_range,
571 .writeback_submit = iomap_ioend_writeback_submit,
572 };
573
blkdev_writepages(struct address_space * mapping,struct writeback_control * wbc)574 static int blkdev_writepages(struct address_space *mapping,
575 struct writeback_control *wbc)
576 {
577 struct iomap_writepage_ctx wpc = {
578 .inode = mapping->host,
579 .wbc = wbc,
580 .ops = &blkdev_writeback_ops
581 };
582
583 return iomap_writepages(&wpc);
584 }
585
586 const struct address_space_operations def_blk_aops = {
587 .dirty_folio = filemap_dirty_folio,
588 .release_folio = iomap_release_folio,
589 .invalidate_folio = iomap_invalidate_folio,
590 .read_folio = blkdev_read_folio,
591 .readahead = blkdev_readahead,
592 .writepages = blkdev_writepages,
593 .is_partially_uptodate = iomap_is_partially_uptodate,
594 .error_remove_folio = generic_error_remove_folio,
595 .migrate_folio = filemap_migrate_folio,
596 };
597 #endif /* CONFIG_BUFFER_HEAD */
598
599 /*
600 * for a block special file file_inode(file)->i_size is zero
601 * so we compute the size by hand (just as in block_read/write above)
602 */
blkdev_llseek(struct file * file,loff_t offset,int whence)603 static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence)
604 {
605 struct inode *bd_inode = bdev_file_inode(file);
606 loff_t retval;
607
608 inode_lock(bd_inode);
609 retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
610 inode_unlock(bd_inode);
611 return retval;
612 }
613
blkdev_fsync(struct file * filp,loff_t start,loff_t end,int datasync)614 static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
615 int datasync)
616 {
617 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
618 int error;
619
620 error = file_write_and_wait_range(filp, start, end);
621 if (error)
622 return error;
623
624 /*
625 * There is no need to serialise calls to blkdev_issue_flush with
626 * i_mutex and doing so causes performance issues with concurrent
627 * O_SYNC writers to a block device.
628 */
629 error = blkdev_issue_flush(bdev);
630 if (error == -EOPNOTSUPP)
631 error = 0;
632
633 return error;
634 }
635
636 /**
637 * file_to_blk_mode - get block open flags from file flags
638 * @file: file whose open flags should be converted
639 *
640 * Look at file open flags and generate corresponding block open flags from
641 * them. The function works both for file just being open (e.g. during ->open
642 * callback) and for file that is already open. This is actually non-trivial
643 * (see comment in the function).
644 */
file_to_blk_mode(struct file * file)645 blk_mode_t file_to_blk_mode(struct file *file)
646 {
647 blk_mode_t mode = 0;
648
649 if (file->f_mode & FMODE_READ)
650 mode |= BLK_OPEN_READ;
651 if (file->f_mode & FMODE_WRITE)
652 mode |= BLK_OPEN_WRITE;
653 /*
654 * do_dentry_open() clears O_EXCL from f_flags, use file->private_data
655 * to determine whether the open was exclusive for already open files.
656 */
657 if (file->private_data)
658 mode |= BLK_OPEN_EXCL;
659 else if (file->f_flags & O_EXCL)
660 mode |= BLK_OPEN_EXCL;
661 if (file->f_flags & O_NDELAY)
662 mode |= BLK_OPEN_NDELAY;
663
664 /*
665 * If all bits in O_ACCMODE set (aka O_RDWR | O_WRONLY), the floppy
666 * driver has historically allowed ioctls as if the file was opened for
667 * writing, but does not allow and actual reads or writes.
668 */
669 if ((file->f_flags & O_ACCMODE) == (O_RDWR | O_WRONLY))
670 mode |= BLK_OPEN_WRITE_IOCTL;
671
672 return mode;
673 }
674
blkdev_open(struct inode * inode,struct file * filp)675 static int blkdev_open(struct inode *inode, struct file *filp)
676 {
677 struct block_device *bdev;
678 blk_mode_t mode;
679 int ret;
680
681 mode = file_to_blk_mode(filp);
682 /* Use the file as the holder. */
683 if (mode & BLK_OPEN_EXCL)
684 filp->private_data = filp;
685 ret = bdev_permission(inode->i_rdev, mode, filp->private_data);
686 if (ret)
687 return ret;
688
689 bdev = blkdev_get_no_open(inode->i_rdev, true);
690 if (!bdev)
691 return -ENXIO;
692
693 if (bdev_can_atomic_write(bdev))
694 filp->f_mode |= FMODE_CAN_ATOMIC_WRITE;
695 if (blk_get_integrity(bdev->bd_disk))
696 filp->f_mode |= FMODE_HAS_METADATA;
697
698 ret = bdev_open(bdev, mode, filp->private_data, NULL, filp);
699 if (ret)
700 blkdev_put_no_open(bdev);
701 return ret;
702 }
703
blkdev_release(struct inode * inode,struct file * filp)704 static int blkdev_release(struct inode *inode, struct file *filp)
705 {
706 bdev_release(filp);
707 return 0;
708 }
709
710 static ssize_t
blkdev_direct_write(struct kiocb * iocb,struct iov_iter * from)711 blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from)
712 {
713 size_t count = iov_iter_count(from);
714 ssize_t written;
715
716 written = kiocb_invalidate_pages(iocb, count);
717 if (written) {
718 if (written == -EBUSY)
719 return 0;
720 return written;
721 }
722
723 written = blkdev_direct_IO(iocb, from);
724 if (written > 0) {
725 kiocb_invalidate_post_direct_write(iocb, count);
726 iocb->ki_pos += written;
727 count -= written;
728 }
729 if (written != -EIOCBQUEUED)
730 iov_iter_revert(from, count - iov_iter_count(from));
731 return written;
732 }
733
blkdev_buffered_write(struct kiocb * iocb,struct iov_iter * from)734 static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
735 {
736 return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops, NULL,
737 NULL);
738 }
739
740 /*
741 * Write data to the block device. Only intended for the block device itself
742 * and the raw driver which basically is a fake block device.
743 *
744 * Does not take i_mutex for the write and thus is not for general purpose
745 * use.
746 */
blkdev_write_iter(struct kiocb * iocb,struct iov_iter * from)747 static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
748 {
749 struct file *file = iocb->ki_filp;
750 struct inode *bd_inode = bdev_file_inode(file);
751 struct block_device *bdev = I_BDEV(bd_inode);
752 bool atomic = iocb->ki_flags & IOCB_ATOMIC;
753 loff_t size = bdev_nr_bytes(bdev);
754 size_t shorted = 0;
755 ssize_t ret;
756
757 if (bdev_read_only(bdev))
758 return -EPERM;
759
760 if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
761 return -ETXTBSY;
762
763 if (!iov_iter_count(from))
764 return 0;
765
766 if (iocb->ki_pos >= size)
767 return -ENOSPC;
768
769 if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
770 return -EOPNOTSUPP;
771
772 if (atomic) {
773 ret = generic_atomic_write_valid(iocb, from);
774 if (ret)
775 return ret;
776 }
777
778 size -= iocb->ki_pos;
779 if (iov_iter_count(from) > size) {
780 if (atomic)
781 return -EINVAL;
782 shorted = iov_iter_count(from) - size;
783 iov_iter_truncate(from, size);
784 }
785
786 ret = file_update_time(file);
787 if (ret)
788 return ret;
789
790 if (iocb->ki_flags & IOCB_DIRECT) {
791 ret = blkdev_direct_write(iocb, from);
792 if (ret >= 0 && iov_iter_count(from))
793 ret = direct_write_fallback(iocb, from, ret,
794 blkdev_buffered_write(iocb, from));
795 } else {
796 /*
797 * Take i_rwsem and invalidate_lock to avoid racing with
798 * set_blocksize changing i_blkbits/folio order and punching
799 * out the pagecache.
800 */
801 inode_lock_shared(bd_inode);
802 ret = blkdev_buffered_write(iocb, from);
803 inode_unlock_shared(bd_inode);
804 }
805
806 if (ret > 0)
807 ret = generic_write_sync(iocb, ret);
808 iov_iter_reexpand(from, iov_iter_count(from) + shorted);
809 return ret;
810 }
811
blkdev_read_iter(struct kiocb * iocb,struct iov_iter * to)812 static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
813 {
814 struct inode *bd_inode = bdev_file_inode(iocb->ki_filp);
815 struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
816 loff_t size = bdev_nr_bytes(bdev);
817 loff_t pos = iocb->ki_pos;
818 size_t shorted = 0;
819 ssize_t ret = 0;
820 size_t count;
821
822 if (unlikely(pos + iov_iter_count(to) > size)) {
823 if (pos >= size)
824 return 0;
825 size -= pos;
826 shorted = iov_iter_count(to) - size;
827 iov_iter_truncate(to, size);
828 }
829
830 count = iov_iter_count(to);
831 if (!count)
832 goto reexpand; /* skip atime */
833
834 if (iocb->ki_flags & IOCB_DIRECT) {
835 ret = kiocb_write_and_wait(iocb, count);
836 if (ret < 0)
837 goto reexpand;
838 file_accessed(iocb->ki_filp);
839
840 ret = blkdev_direct_IO(iocb, to);
841 if (ret > 0) {
842 iocb->ki_pos += ret;
843 count -= ret;
844 }
845 if (ret != -EIOCBQUEUED)
846 iov_iter_revert(to, count - iov_iter_count(to));
847 if (ret < 0 || !count)
848 goto reexpand;
849 }
850
851 /*
852 * Take i_rwsem and invalidate_lock to avoid racing with set_blocksize
853 * changing i_blkbits/folio order and punching out the pagecache.
854 */
855 inode_lock_shared(bd_inode);
856 ret = filemap_read(iocb, to, ret);
857 inode_unlock_shared(bd_inode);
858
859 reexpand:
860 if (unlikely(shorted))
861 iov_iter_reexpand(to, iov_iter_count(to) + shorted);
862 return ret;
863 }
864
865 #define BLKDEV_FALLOC_FL_SUPPORTED \
866 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
867 FALLOC_FL_ZERO_RANGE | FALLOC_FL_WRITE_ZEROES)
868
blkdev_fallocate(struct file * file,int mode,loff_t start,loff_t len)869 static long blkdev_fallocate(struct file *file, int mode, loff_t start,
870 loff_t len)
871 {
872 struct inode *inode = bdev_file_inode(file);
873 struct block_device *bdev = I_BDEV(inode);
874 loff_t end = start + len - 1;
875 loff_t isize;
876 unsigned int flags;
877 int error;
878
879 /* Fail if we don't recognize the flags. */
880 if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
881 return -EOPNOTSUPP;
882 /*
883 * Don't allow writing zeroes if the device does not enable the
884 * unmap write zeroes operation.
885 */
886 if ((mode & FALLOC_FL_WRITE_ZEROES) &&
887 !bdev_write_zeroes_unmap_sectors(bdev))
888 return -EOPNOTSUPP;
889
890 /* Don't go off the end of the device. */
891 isize = bdev_nr_bytes(bdev);
892 if (start >= isize)
893 return -EINVAL;
894 if (end >= isize) {
895 if (mode & FALLOC_FL_KEEP_SIZE) {
896 len = isize - start;
897 end = start + len - 1;
898 } else
899 return -EINVAL;
900 }
901
902 /*
903 * Don't allow IO that isn't aligned to logical block size.
904 */
905 if ((start | len) & (bdev_logical_block_size(bdev) - 1))
906 return -EINVAL;
907
908 inode_lock(inode);
909 filemap_invalidate_lock(inode->i_mapping);
910
911 switch (mode) {
912 case FALLOC_FL_ZERO_RANGE:
913 case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
914 flags = BLKDEV_ZERO_NOUNMAP;
915 break;
916 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
917 flags = BLKDEV_ZERO_NOFALLBACK;
918 break;
919 case FALLOC_FL_WRITE_ZEROES:
920 flags = 0;
921 break;
922 default:
923 error = -EOPNOTSUPP;
924 goto fail;
925 }
926
927 /*
928 * Invalidate the page cache, including dirty pages, for valid
929 * de-allocate mode calls to fallocate().
930 */
931 error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
932 if (error)
933 goto fail;
934
935 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
936 len >> SECTOR_SHIFT, GFP_KERNEL, flags);
937 fail:
938 filemap_invalidate_unlock(inode->i_mapping);
939 inode_unlock(inode);
940 return error;
941 }
942
blkdev_mmap_prepare(struct vm_area_desc * desc)943 static int blkdev_mmap_prepare(struct vm_area_desc *desc)
944 {
945 struct file *file = desc->file;
946
947 if (bdev_read_only(I_BDEV(bdev_file_inode(file))))
948 return generic_file_readonly_mmap_prepare(desc);
949
950 return generic_file_mmap_prepare(desc);
951 }
952
953 const struct file_operations def_blk_fops = {
954 .open = blkdev_open,
955 .release = blkdev_release,
956 .llseek = blkdev_llseek,
957 .read_iter = blkdev_read_iter,
958 .write_iter = blkdev_write_iter,
959 .iopoll = iocb_bio_iopoll,
960 .mmap_prepare = blkdev_mmap_prepare,
961 .fsync = blkdev_fsync,
962 .unlocked_ioctl = blkdev_ioctl,
963 #ifdef CONFIG_COMPAT
964 .compat_ioctl = compat_blkdev_ioctl,
965 #endif
966 .splice_read = filemap_splice_read,
967 .splice_write = iter_file_splice_write,
968 .fallocate = blkdev_fallocate,
969 .uring_cmd = blkdev_uring_cmd,
970 .fop_flags = FOP_BUFFER_RASYNC,
971 };
972
blkdev_init(void)973 static __init int blkdev_init(void)
974 {
975 return bioset_init(&blkdev_dio_pool, 4,
976 offsetof(struct blkdev_dio, bio),
977 BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE);
978 }
979 module_init(blkdev_init);
980