1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
6 */
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/blkdev.h>
10 #include <linux/blk-integrity.h>
11 #include <linux/buffer_head.h>
12 #include <linux/mpage.h>
13 #include <linux/uio.h>
14 #include <linux/namei.h>
15 #include <linux/task_io_accounting_ops.h>
16 #include <linux/falloc.h>
17 #include <linux/suspend.h>
18 #include <linux/fs.h>
19 #include <linux/iomap.h>
20 #include <linux/module.h>
21 #include <linux/io_uring/cmd.h>
22 #include "blk.h"
23
bdev_file_inode(struct file * file)24 static inline struct inode *bdev_file_inode(struct file *file)
25 {
26 return file->f_mapping->host;
27 }
28
dio_bio_write_op(struct kiocb * iocb)29 static blk_opf_t dio_bio_write_op(struct kiocb *iocb)
30 {
31 blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
32
33 /* avoid the need for a I/O completion work item */
34 if (iocb_is_dsync(iocb))
35 opf |= REQ_FUA;
36 return opf;
37 }
38
blkdev_dio_invalid(struct block_device * bdev,struct kiocb * iocb,struct iov_iter * iter)39 static bool blkdev_dio_invalid(struct block_device *bdev, struct kiocb *iocb,
40 struct iov_iter *iter)
41 {
42 return (iocb->ki_pos | iov_iter_count(iter)) &
43 (bdev_logical_block_size(bdev) - 1);
44 }
45
blkdev_iov_iter_get_pages(struct bio * bio,struct iov_iter * iter,struct block_device * bdev)46 static inline int blkdev_iov_iter_get_pages(struct bio *bio,
47 struct iov_iter *iter, struct block_device *bdev)
48 {
49 return bio_iov_iter_get_pages(bio, iter,
50 bdev_logical_block_size(bdev) - 1);
51 }
52
53 #define DIO_INLINE_BIO_VECS 4
54
__blkdev_direct_IO_simple(struct kiocb * iocb,struct iov_iter * iter,struct block_device * bdev,unsigned int nr_pages)55 static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
56 struct iov_iter *iter, struct block_device *bdev,
57 unsigned int nr_pages)
58 {
59 struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
60 loff_t pos = iocb->ki_pos;
61 bool should_dirty = false;
62 struct bio bio;
63 ssize_t ret;
64
65 if (nr_pages <= DIO_INLINE_BIO_VECS)
66 vecs = inline_vecs;
67 else {
68 vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
69 GFP_KERNEL);
70 if (!vecs)
71 return -ENOMEM;
72 }
73
74 if (iov_iter_rw(iter) == READ) {
75 bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ);
76 if (user_backed_iter(iter))
77 should_dirty = true;
78 } else {
79 bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
80 }
81 bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
82 bio.bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
83 bio.bi_write_stream = iocb->ki_write_stream;
84 bio.bi_ioprio = iocb->ki_ioprio;
85 if (iocb->ki_flags & IOCB_ATOMIC)
86 bio.bi_opf |= REQ_ATOMIC;
87
88 ret = blkdev_iov_iter_get_pages(&bio, iter, bdev);
89 if (unlikely(ret))
90 goto out;
91 ret = bio.bi_iter.bi_size;
92
93 if (iov_iter_rw(iter) == WRITE)
94 task_io_account_write(ret);
95
96 if (iocb->ki_flags & IOCB_NOWAIT)
97 bio.bi_opf |= REQ_NOWAIT;
98
99 submit_bio_wait(&bio);
100
101 bio_release_pages(&bio, should_dirty);
102 if (unlikely(bio.bi_status))
103 ret = blk_status_to_errno(bio.bi_status);
104
105 out:
106 if (vecs != inline_vecs)
107 kfree(vecs);
108
109 bio_uninit(&bio);
110
111 return ret;
112 }
113
114 enum {
115 DIO_SHOULD_DIRTY = 1,
116 DIO_IS_SYNC = 2,
117 };
118
119 struct blkdev_dio {
120 union {
121 struct kiocb *iocb;
122 struct task_struct *waiter;
123 };
124 size_t size;
125 atomic_t ref;
126 unsigned int flags;
127 struct bio bio ____cacheline_aligned_in_smp;
128 };
129
130 static struct bio_set blkdev_dio_pool;
131
blkdev_bio_end_io(struct bio * bio)132 static void blkdev_bio_end_io(struct bio *bio)
133 {
134 struct blkdev_dio *dio = bio->bi_private;
135 bool should_dirty = dio->flags & DIO_SHOULD_DIRTY;
136 bool is_sync = dio->flags & DIO_IS_SYNC;
137
138 if (bio->bi_status && !dio->bio.bi_status)
139 dio->bio.bi_status = bio->bi_status;
140
141 if (bio_integrity(bio))
142 bio_integrity_unmap_user(bio);
143
144 if (atomic_dec_and_test(&dio->ref)) {
145 if (!is_sync) {
146 struct kiocb *iocb = dio->iocb;
147 ssize_t ret;
148
149 WRITE_ONCE(iocb->private, NULL);
150
151 if (likely(!dio->bio.bi_status)) {
152 ret = dio->size;
153 iocb->ki_pos += ret;
154 } else {
155 ret = blk_status_to_errno(dio->bio.bi_status);
156 }
157
158 dio->iocb->ki_complete(iocb, ret);
159 bio_put(&dio->bio);
160 } else {
161 struct task_struct *waiter = dio->waiter;
162
163 WRITE_ONCE(dio->waiter, NULL);
164 blk_wake_io_task(waiter);
165 }
166 }
167
168 if (should_dirty) {
169 bio_check_pages_dirty(bio);
170 } else {
171 bio_release_pages(bio, false);
172 bio_put(bio);
173 }
174 }
175
__blkdev_direct_IO(struct kiocb * iocb,struct iov_iter * iter,struct block_device * bdev,unsigned int nr_pages)176 static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
177 struct block_device *bdev, unsigned int nr_pages)
178 {
179 struct blk_plug plug;
180 struct blkdev_dio *dio;
181 struct bio *bio;
182 bool is_read = (iov_iter_rw(iter) == READ), is_sync;
183 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
184 loff_t pos = iocb->ki_pos;
185 int ret = 0;
186
187 if (iocb->ki_flags & IOCB_ALLOC_CACHE)
188 opf |= REQ_ALLOC_CACHE;
189 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
190 &blkdev_dio_pool);
191 dio = container_of(bio, struct blkdev_dio, bio);
192 atomic_set(&dio->ref, 1);
193 /*
194 * Grab an extra reference to ensure the dio structure which is embedded
195 * into the first bio stays around.
196 */
197 bio_get(bio);
198
199 is_sync = is_sync_kiocb(iocb);
200 if (is_sync) {
201 dio->flags = DIO_IS_SYNC;
202 dio->waiter = current;
203 } else {
204 dio->flags = 0;
205 dio->iocb = iocb;
206 }
207
208 dio->size = 0;
209 if (is_read && user_backed_iter(iter))
210 dio->flags |= DIO_SHOULD_DIRTY;
211
212 blk_start_plug(&plug);
213
214 for (;;) {
215 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
216 bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
217 bio->bi_write_stream = iocb->ki_write_stream;
218 bio->bi_private = dio;
219 bio->bi_end_io = blkdev_bio_end_io;
220 bio->bi_ioprio = iocb->ki_ioprio;
221
222 ret = blkdev_iov_iter_get_pages(bio, iter, bdev);
223 if (unlikely(ret)) {
224 bio->bi_status = BLK_STS_IOERR;
225 bio_endio(bio);
226 break;
227 }
228 if (iocb->ki_flags & IOCB_NOWAIT) {
229 /*
230 * This is nonblocking IO, and we need to allocate
231 * another bio if we have data left to map. As we
232 * cannot guarantee that one of the sub bios will not
233 * fail getting issued FOR NOWAIT and as error results
234 * are coalesced across all of them, be safe and ask for
235 * a retry of this from blocking context.
236 */
237 if (unlikely(iov_iter_count(iter))) {
238 ret = -EAGAIN;
239 goto fail;
240 }
241 bio->bi_opf |= REQ_NOWAIT;
242 }
243 if (iocb->ki_flags & IOCB_HAS_METADATA) {
244 ret = bio_integrity_map_iter(bio, iocb->private);
245 if (unlikely(ret))
246 goto fail;
247 }
248
249 if (is_read) {
250 if (dio->flags & DIO_SHOULD_DIRTY)
251 bio_set_pages_dirty(bio);
252 } else {
253 task_io_account_write(bio->bi_iter.bi_size);
254 }
255 dio->size += bio->bi_iter.bi_size;
256 pos += bio->bi_iter.bi_size;
257
258 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
259 if (!nr_pages) {
260 submit_bio(bio);
261 break;
262 }
263 atomic_inc(&dio->ref);
264 submit_bio(bio);
265 bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL);
266 }
267
268 blk_finish_plug(&plug);
269
270 if (!is_sync)
271 return -EIOCBQUEUED;
272
273 for (;;) {
274 set_current_state(TASK_UNINTERRUPTIBLE);
275 if (!READ_ONCE(dio->waiter))
276 break;
277 blk_io_schedule();
278 }
279 __set_current_state(TASK_RUNNING);
280
281 if (!ret)
282 ret = blk_status_to_errno(dio->bio.bi_status);
283 if (likely(!ret))
284 ret = dio->size;
285
286 bio_put(&dio->bio);
287 return ret;
288 fail:
289 bio_release_pages(bio, false);
290 bio_clear_flag(bio, BIO_REFFED);
291 bio_put(bio);
292 blk_finish_plug(&plug);
293 return ret;
294 }
295
blkdev_bio_end_io_async(struct bio * bio)296 static void blkdev_bio_end_io_async(struct bio *bio)
297 {
298 struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio);
299 struct kiocb *iocb = dio->iocb;
300 ssize_t ret;
301
302 WRITE_ONCE(iocb->private, NULL);
303
304 if (likely(!bio->bi_status)) {
305 ret = dio->size;
306 iocb->ki_pos += ret;
307 } else {
308 ret = blk_status_to_errno(bio->bi_status);
309 }
310
311 if (bio_integrity(bio))
312 bio_integrity_unmap_user(bio);
313
314 iocb->ki_complete(iocb, ret);
315
316 if (dio->flags & DIO_SHOULD_DIRTY) {
317 bio_check_pages_dirty(bio);
318 } else {
319 bio_release_pages(bio, false);
320 bio_put(bio);
321 }
322 }
323
__blkdev_direct_IO_async(struct kiocb * iocb,struct iov_iter * iter,struct block_device * bdev,unsigned int nr_pages)324 static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
325 struct iov_iter *iter,
326 struct block_device *bdev,
327 unsigned int nr_pages)
328 {
329 bool is_read = iov_iter_rw(iter) == READ;
330 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
331 struct blkdev_dio *dio;
332 struct bio *bio;
333 loff_t pos = iocb->ki_pos;
334 int ret = 0;
335
336 if (iocb->ki_flags & IOCB_ALLOC_CACHE)
337 opf |= REQ_ALLOC_CACHE;
338 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
339 &blkdev_dio_pool);
340 dio = container_of(bio, struct blkdev_dio, bio);
341 dio->flags = 0;
342 dio->iocb = iocb;
343 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
344 bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
345 bio->bi_write_stream = iocb->ki_write_stream;
346 bio->bi_end_io = blkdev_bio_end_io_async;
347 bio->bi_ioprio = iocb->ki_ioprio;
348
349 if (iov_iter_is_bvec(iter)) {
350 /*
351 * Users don't rely on the iterator being in any particular
352 * state for async I/O returning -EIOCBQUEUED, hence we can
353 * avoid expensive iov_iter_advance(). Bypass
354 * bio_iov_iter_get_pages() and set the bvec directly.
355 */
356 bio_iov_bvec_set(bio, iter);
357 } else {
358 ret = blkdev_iov_iter_get_pages(bio, iter, bdev);
359 if (unlikely(ret))
360 goto out_bio_put;
361 }
362 dio->size = bio->bi_iter.bi_size;
363
364 if (is_read) {
365 if (user_backed_iter(iter)) {
366 dio->flags |= DIO_SHOULD_DIRTY;
367 bio_set_pages_dirty(bio);
368 }
369 } else {
370 task_io_account_write(bio->bi_iter.bi_size);
371 }
372
373 if (iocb->ki_flags & IOCB_HAS_METADATA) {
374 ret = bio_integrity_map_iter(bio, iocb->private);
375 WRITE_ONCE(iocb->private, NULL);
376 if (unlikely(ret))
377 goto out_bio_put;
378 }
379
380 if (iocb->ki_flags & IOCB_ATOMIC)
381 bio->bi_opf |= REQ_ATOMIC;
382
383 if (iocb->ki_flags & IOCB_NOWAIT)
384 bio->bi_opf |= REQ_NOWAIT;
385
386 if (iocb->ki_flags & IOCB_HIPRI) {
387 bio->bi_opf |= REQ_POLLED;
388 submit_bio(bio);
389 WRITE_ONCE(iocb->private, bio);
390 } else {
391 submit_bio(bio);
392 }
393 return -EIOCBQUEUED;
394
395 out_bio_put:
396 bio_put(bio);
397 return ret;
398 }
399
blkdev_direct_IO(struct kiocb * iocb,struct iov_iter * iter)400 static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
401 {
402 struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
403 unsigned int nr_pages;
404
405 if (!iov_iter_count(iter))
406 return 0;
407
408 if (blkdev_dio_invalid(bdev, iocb, iter))
409 return -EINVAL;
410
411 if (iov_iter_rw(iter) == WRITE) {
412 u16 max_write_streams = bdev_max_write_streams(bdev);
413
414 if (iocb->ki_write_stream) {
415 if (iocb->ki_write_stream > max_write_streams)
416 return -EINVAL;
417 } else if (max_write_streams) {
418 enum rw_hint write_hint =
419 file_inode(iocb->ki_filp)->i_write_hint;
420
421 /*
422 * Just use the write hint as write stream for block
423 * device writes. This assumes no file system is
424 * mounted that would use the streams differently.
425 */
426 if (write_hint <= max_write_streams)
427 iocb->ki_write_stream = write_hint;
428 }
429 }
430
431 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
432 if (likely(nr_pages <= BIO_MAX_VECS &&
433 !(iocb->ki_flags & IOCB_HAS_METADATA))) {
434 if (is_sync_kiocb(iocb))
435 return __blkdev_direct_IO_simple(iocb, iter, bdev,
436 nr_pages);
437 return __blkdev_direct_IO_async(iocb, iter, bdev, nr_pages);
438 } else if (iocb->ki_flags & IOCB_ATOMIC) {
439 return -EINVAL;
440 }
441 return __blkdev_direct_IO(iocb, iter, bdev, bio_max_segs(nr_pages));
442 }
443
blkdev_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)444 static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
445 unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
446 {
447 struct block_device *bdev = I_BDEV(inode);
448 loff_t isize = i_size_read(inode);
449
450 if (offset >= isize)
451 return -EIO;
452
453 iomap->bdev = bdev;
454 iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev));
455 iomap->type = IOMAP_MAPPED;
456 iomap->addr = iomap->offset;
457 iomap->length = isize - iomap->offset;
458 iomap->flags |= IOMAP_F_BUFFER_HEAD; /* noop for !CONFIG_BUFFER_HEAD */
459 return 0;
460 }
461
462 static const struct iomap_ops blkdev_iomap_ops = {
463 .iomap_begin = blkdev_iomap_begin,
464 };
465
466 #ifdef CONFIG_BUFFER_HEAD
blkdev_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh,int create)467 static int blkdev_get_block(struct inode *inode, sector_t iblock,
468 struct buffer_head *bh, int create)
469 {
470 bh->b_bdev = I_BDEV(inode);
471 bh->b_blocknr = iblock;
472 set_buffer_mapped(bh);
473 return 0;
474 }
475
476 /*
477 * We cannot call mpage_writepages() as it does not take the buffer lock.
478 * We must use block_write_full_folio() directly which holds the buffer
479 * lock. The buffer lock provides the synchronisation with writeback
480 * that filesystems rely on when they use the blockdev's mapping.
481 */
blkdev_writepages(struct address_space * mapping,struct writeback_control * wbc)482 static int blkdev_writepages(struct address_space *mapping,
483 struct writeback_control *wbc)
484 {
485 struct folio *folio = NULL;
486 struct blk_plug plug;
487 int err;
488
489 blk_start_plug(&plug);
490 while ((folio = writeback_iter(mapping, wbc, folio, &err)))
491 err = block_write_full_folio(folio, wbc, blkdev_get_block);
492 blk_finish_plug(&plug);
493
494 return err;
495 }
496
blkdev_read_folio(struct file * file,struct folio * folio)497 static int blkdev_read_folio(struct file *file, struct folio *folio)
498 {
499 return block_read_full_folio(folio, blkdev_get_block);
500 }
501
blkdev_readahead(struct readahead_control * rac)502 static void blkdev_readahead(struct readahead_control *rac)
503 {
504 mpage_readahead(rac, blkdev_get_block);
505 }
506
blkdev_write_begin(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)507 static int blkdev_write_begin(const struct kiocb *iocb,
508 struct address_space *mapping, loff_t pos,
509 unsigned len, struct folio **foliop,
510 void **fsdata)
511 {
512 return block_write_begin(mapping, pos, len, foliop, blkdev_get_block);
513 }
514
blkdev_write_end(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)515 static int blkdev_write_end(const struct kiocb *iocb,
516 struct address_space *mapping,
517 loff_t pos, unsigned len, unsigned copied,
518 struct folio *folio, void *fsdata)
519 {
520 int ret;
521 ret = block_write_end(pos, len, copied, folio);
522
523 folio_unlock(folio);
524 folio_put(folio);
525
526 return ret;
527 }
528
529 const struct address_space_operations def_blk_aops = {
530 .dirty_folio = block_dirty_folio,
531 .invalidate_folio = block_invalidate_folio,
532 .read_folio = blkdev_read_folio,
533 .readahead = blkdev_readahead,
534 .writepages = blkdev_writepages,
535 .write_begin = blkdev_write_begin,
536 .write_end = blkdev_write_end,
537 .migrate_folio = buffer_migrate_folio_norefs,
538 .is_dirty_writeback = buffer_check_dirty_writeback,
539 };
540 #else /* CONFIG_BUFFER_HEAD */
blkdev_read_folio(struct file * file,struct folio * folio)541 static int blkdev_read_folio(struct file *file, struct folio *folio)
542 {
543 return iomap_read_folio(folio, &blkdev_iomap_ops);
544 }
545
blkdev_readahead(struct readahead_control * rac)546 static void blkdev_readahead(struct readahead_control *rac)
547 {
548 iomap_readahead(rac, &blkdev_iomap_ops);
549 }
550
blkdev_writeback_range(struct iomap_writepage_ctx * wpc,struct folio * folio,u64 offset,unsigned int len,u64 end_pos)551 static ssize_t blkdev_writeback_range(struct iomap_writepage_ctx *wpc,
552 struct folio *folio, u64 offset, unsigned int len, u64 end_pos)
553 {
554 loff_t isize = i_size_read(wpc->inode);
555
556 if (WARN_ON_ONCE(offset >= isize))
557 return -EIO;
558
559 if (offset < wpc->iomap.offset ||
560 offset >= wpc->iomap.offset + wpc->iomap.length) {
561 int error;
562
563 error = blkdev_iomap_begin(wpc->inode, offset, isize - offset,
564 IOMAP_WRITE, &wpc->iomap, NULL);
565 if (error)
566 return error;
567 }
568
569 return iomap_add_to_ioend(wpc, folio, offset, end_pos, len);
570 }
571
572 static const struct iomap_writeback_ops blkdev_writeback_ops = {
573 .writeback_range = blkdev_writeback_range,
574 .writeback_submit = iomap_ioend_writeback_submit,
575 };
576
blkdev_writepages(struct address_space * mapping,struct writeback_control * wbc)577 static int blkdev_writepages(struct address_space *mapping,
578 struct writeback_control *wbc)
579 {
580 struct iomap_writepage_ctx wpc = {
581 .inode = mapping->host,
582 .wbc = wbc,
583 .ops = &blkdev_writeback_ops
584 };
585
586 return iomap_writepages(&wpc);
587 }
588
589 const struct address_space_operations def_blk_aops = {
590 .dirty_folio = filemap_dirty_folio,
591 .release_folio = iomap_release_folio,
592 .invalidate_folio = iomap_invalidate_folio,
593 .read_folio = blkdev_read_folio,
594 .readahead = blkdev_readahead,
595 .writepages = blkdev_writepages,
596 .is_partially_uptodate = iomap_is_partially_uptodate,
597 .error_remove_folio = generic_error_remove_folio,
598 .migrate_folio = filemap_migrate_folio,
599 };
600 #endif /* CONFIG_BUFFER_HEAD */
601
602 /*
603 * for a block special file file_inode(file)->i_size is zero
604 * so we compute the size by hand (just as in block_read/write above)
605 */
blkdev_llseek(struct file * file,loff_t offset,int whence)606 static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence)
607 {
608 struct inode *bd_inode = bdev_file_inode(file);
609 loff_t retval;
610
611 inode_lock(bd_inode);
612 retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
613 inode_unlock(bd_inode);
614 return retval;
615 }
616
blkdev_fsync(struct file * filp,loff_t start,loff_t end,int datasync)617 static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
618 int datasync)
619 {
620 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
621 int error;
622
623 error = file_write_and_wait_range(filp, start, end);
624 if (error)
625 return error;
626
627 /*
628 * There is no need to serialise calls to blkdev_issue_flush with
629 * i_mutex and doing so causes performance issues with concurrent
630 * O_SYNC writers to a block device.
631 */
632 error = blkdev_issue_flush(bdev);
633 if (error == -EOPNOTSUPP)
634 error = 0;
635
636 return error;
637 }
638
639 /**
640 * file_to_blk_mode - get block open flags from file flags
641 * @file: file whose open flags should be converted
642 *
643 * Look at file open flags and generate corresponding block open flags from
644 * them. The function works both for file just being open (e.g. during ->open
645 * callback) and for file that is already open. This is actually non-trivial
646 * (see comment in the function).
647 */
file_to_blk_mode(struct file * file)648 blk_mode_t file_to_blk_mode(struct file *file)
649 {
650 blk_mode_t mode = 0;
651
652 if (file->f_mode & FMODE_READ)
653 mode |= BLK_OPEN_READ;
654 if (file->f_mode & FMODE_WRITE)
655 mode |= BLK_OPEN_WRITE;
656 /*
657 * do_dentry_open() clears O_EXCL from f_flags, use file->private_data
658 * to determine whether the open was exclusive for already open files.
659 */
660 if (file->private_data)
661 mode |= BLK_OPEN_EXCL;
662 else if (file->f_flags & O_EXCL)
663 mode |= BLK_OPEN_EXCL;
664 if (file->f_flags & O_NDELAY)
665 mode |= BLK_OPEN_NDELAY;
666
667 /*
668 * If all bits in O_ACCMODE set (aka O_RDWR | O_WRONLY), the floppy
669 * driver has historically allowed ioctls as if the file was opened for
670 * writing, but does not allow and actual reads or writes.
671 */
672 if ((file->f_flags & O_ACCMODE) == (O_RDWR | O_WRONLY))
673 mode |= BLK_OPEN_WRITE_IOCTL;
674
675 return mode;
676 }
677
blkdev_open(struct inode * inode,struct file * filp)678 static int blkdev_open(struct inode *inode, struct file *filp)
679 {
680 struct block_device *bdev;
681 blk_mode_t mode;
682 int ret;
683
684 mode = file_to_blk_mode(filp);
685 /* Use the file as the holder. */
686 if (mode & BLK_OPEN_EXCL)
687 filp->private_data = filp;
688 ret = bdev_permission(inode->i_rdev, mode, filp->private_data);
689 if (ret)
690 return ret;
691
692 bdev = blkdev_get_no_open(inode->i_rdev, true);
693 if (!bdev)
694 return -ENXIO;
695
696 if (bdev_can_atomic_write(bdev))
697 filp->f_mode |= FMODE_CAN_ATOMIC_WRITE;
698 if (blk_get_integrity(bdev->bd_disk))
699 filp->f_mode |= FMODE_HAS_METADATA;
700
701 ret = bdev_open(bdev, mode, filp->private_data, NULL, filp);
702 if (ret)
703 blkdev_put_no_open(bdev);
704 return ret;
705 }
706
blkdev_release(struct inode * inode,struct file * filp)707 static int blkdev_release(struct inode *inode, struct file *filp)
708 {
709 bdev_release(filp);
710 return 0;
711 }
712
713 static ssize_t
blkdev_direct_write(struct kiocb * iocb,struct iov_iter * from)714 blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from)
715 {
716 size_t count = iov_iter_count(from);
717 ssize_t written;
718
719 written = kiocb_invalidate_pages(iocb, count);
720 if (written) {
721 if (written == -EBUSY)
722 return 0;
723 return written;
724 }
725
726 written = blkdev_direct_IO(iocb, from);
727 if (written > 0) {
728 kiocb_invalidate_post_direct_write(iocb, count);
729 iocb->ki_pos += written;
730 count -= written;
731 }
732 if (written != -EIOCBQUEUED)
733 iov_iter_revert(from, count - iov_iter_count(from));
734 return written;
735 }
736
blkdev_buffered_write(struct kiocb * iocb,struct iov_iter * from)737 static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
738 {
739 return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops, NULL,
740 NULL);
741 }
742
743 /*
744 * Write data to the block device. Only intended for the block device itself
745 * and the raw driver which basically is a fake block device.
746 *
747 * Does not take i_mutex for the write and thus is not for general purpose
748 * use.
749 */
blkdev_write_iter(struct kiocb * iocb,struct iov_iter * from)750 static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
751 {
752 struct file *file = iocb->ki_filp;
753 struct inode *bd_inode = bdev_file_inode(file);
754 struct block_device *bdev = I_BDEV(bd_inode);
755 bool atomic = iocb->ki_flags & IOCB_ATOMIC;
756 loff_t size = bdev_nr_bytes(bdev);
757 size_t shorted = 0;
758 ssize_t ret;
759
760 if (bdev_read_only(bdev))
761 return -EPERM;
762
763 if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
764 return -ETXTBSY;
765
766 if (!iov_iter_count(from))
767 return 0;
768
769 if (iocb->ki_pos >= size)
770 return -ENOSPC;
771
772 if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
773 return -EOPNOTSUPP;
774
775 if (atomic) {
776 ret = generic_atomic_write_valid(iocb, from);
777 if (ret)
778 return ret;
779 }
780
781 size -= iocb->ki_pos;
782 if (iov_iter_count(from) > size) {
783 if (atomic)
784 return -EINVAL;
785 shorted = iov_iter_count(from) - size;
786 iov_iter_truncate(from, size);
787 }
788
789 ret = file_update_time(file);
790 if (ret)
791 return ret;
792
793 if (iocb->ki_flags & IOCB_DIRECT) {
794 ret = blkdev_direct_write(iocb, from);
795 if (ret >= 0 && iov_iter_count(from))
796 ret = direct_write_fallback(iocb, from, ret,
797 blkdev_buffered_write(iocb, from));
798 } else {
799 /*
800 * Take i_rwsem and invalidate_lock to avoid racing with
801 * set_blocksize changing i_blkbits/folio order and punching
802 * out the pagecache.
803 */
804 inode_lock_shared(bd_inode);
805 ret = blkdev_buffered_write(iocb, from);
806 inode_unlock_shared(bd_inode);
807 }
808
809 if (ret > 0)
810 ret = generic_write_sync(iocb, ret);
811 iov_iter_reexpand(from, iov_iter_count(from) + shorted);
812 return ret;
813 }
814
blkdev_read_iter(struct kiocb * iocb,struct iov_iter * to)815 static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
816 {
817 struct inode *bd_inode = bdev_file_inode(iocb->ki_filp);
818 struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
819 loff_t size = bdev_nr_bytes(bdev);
820 loff_t pos = iocb->ki_pos;
821 size_t shorted = 0;
822 ssize_t ret = 0;
823 size_t count;
824
825 if (unlikely(pos + iov_iter_count(to) > size)) {
826 if (pos >= size)
827 return 0;
828 size -= pos;
829 shorted = iov_iter_count(to) - size;
830 iov_iter_truncate(to, size);
831 }
832
833 count = iov_iter_count(to);
834 if (!count)
835 goto reexpand; /* skip atime */
836
837 if (iocb->ki_flags & IOCB_DIRECT) {
838 ret = kiocb_write_and_wait(iocb, count);
839 if (ret < 0)
840 goto reexpand;
841 file_accessed(iocb->ki_filp);
842
843 ret = blkdev_direct_IO(iocb, to);
844 if (ret > 0) {
845 iocb->ki_pos += ret;
846 count -= ret;
847 }
848 if (ret != -EIOCBQUEUED)
849 iov_iter_revert(to, count - iov_iter_count(to));
850 if (ret < 0 || !count)
851 goto reexpand;
852 }
853
854 /*
855 * Take i_rwsem and invalidate_lock to avoid racing with set_blocksize
856 * changing i_blkbits/folio order and punching out the pagecache.
857 */
858 inode_lock_shared(bd_inode);
859 ret = filemap_read(iocb, to, ret);
860 inode_unlock_shared(bd_inode);
861
862 reexpand:
863 if (unlikely(shorted))
864 iov_iter_reexpand(to, iov_iter_count(to) + shorted);
865 return ret;
866 }
867
868 #define BLKDEV_FALLOC_FL_SUPPORTED \
869 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
870 FALLOC_FL_ZERO_RANGE | FALLOC_FL_WRITE_ZEROES)
871
blkdev_fallocate(struct file * file,int mode,loff_t start,loff_t len)872 static long blkdev_fallocate(struct file *file, int mode, loff_t start,
873 loff_t len)
874 {
875 struct inode *inode = bdev_file_inode(file);
876 struct block_device *bdev = I_BDEV(inode);
877 loff_t end = start + len - 1;
878 loff_t isize;
879 unsigned int flags;
880 int error;
881
882 /* Fail if we don't recognize the flags. */
883 if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
884 return -EOPNOTSUPP;
885 /*
886 * Don't allow writing zeroes if the device does not enable the
887 * unmap write zeroes operation.
888 */
889 if ((mode & FALLOC_FL_WRITE_ZEROES) &&
890 !bdev_write_zeroes_unmap_sectors(bdev))
891 return -EOPNOTSUPP;
892
893 /* Don't go off the end of the device. */
894 isize = bdev_nr_bytes(bdev);
895 if (start >= isize)
896 return -EINVAL;
897 if (end >= isize) {
898 if (mode & FALLOC_FL_KEEP_SIZE) {
899 len = isize - start;
900 end = start + len - 1;
901 } else
902 return -EINVAL;
903 }
904
905 /*
906 * Don't allow IO that isn't aligned to logical block size.
907 */
908 if ((start | len) & (bdev_logical_block_size(bdev) - 1))
909 return -EINVAL;
910
911 inode_lock(inode);
912 filemap_invalidate_lock(inode->i_mapping);
913
914 switch (mode) {
915 case FALLOC_FL_ZERO_RANGE:
916 case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
917 flags = BLKDEV_ZERO_NOUNMAP;
918 break;
919 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
920 flags = BLKDEV_ZERO_NOFALLBACK;
921 break;
922 case FALLOC_FL_WRITE_ZEROES:
923 flags = 0;
924 break;
925 default:
926 error = -EOPNOTSUPP;
927 goto fail;
928 }
929
930 /*
931 * Invalidate the page cache, including dirty pages, for valid
932 * de-allocate mode calls to fallocate().
933 */
934 error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
935 if (error)
936 goto fail;
937
938 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
939 len >> SECTOR_SHIFT, GFP_KERNEL, flags);
940 fail:
941 filemap_invalidate_unlock(inode->i_mapping);
942 inode_unlock(inode);
943 return error;
944 }
945
blkdev_mmap_prepare(struct vm_area_desc * desc)946 static int blkdev_mmap_prepare(struct vm_area_desc *desc)
947 {
948 struct file *file = desc->file;
949
950 if (bdev_read_only(I_BDEV(bdev_file_inode(file))))
951 return generic_file_readonly_mmap_prepare(desc);
952
953 return generic_file_mmap_prepare(desc);
954 }
955
956 const struct file_operations def_blk_fops = {
957 .open = blkdev_open,
958 .release = blkdev_release,
959 .llseek = blkdev_llseek,
960 .read_iter = blkdev_read_iter,
961 .write_iter = blkdev_write_iter,
962 .iopoll = iocb_bio_iopoll,
963 .mmap_prepare = blkdev_mmap_prepare,
964 .fsync = blkdev_fsync,
965 .unlocked_ioctl = blkdev_ioctl,
966 #ifdef CONFIG_COMPAT
967 .compat_ioctl = compat_blkdev_ioctl,
968 #endif
969 .splice_read = filemap_splice_read,
970 .splice_write = iter_file_splice_write,
971 .fallocate = blkdev_fallocate,
972 .uring_cmd = blkdev_uring_cmd,
973 .fop_flags = FOP_BUFFER_RASYNC,
974 };
975
blkdev_init(void)976 static __init int blkdev_init(void)
977 {
978 return bioset_init(&blkdev_dio_pool, 4,
979 offsetof(struct blkdev_dio, bio),
980 BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE);
981 }
982 module_init(blkdev_init);
983