1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
6 */
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/blkdev.h>
10 #include <linux/blk-integrity.h>
11 #include <linux/buffer_head.h>
12 #include <linux/mpage.h>
13 #include <linux/uio.h>
14 #include <linux/namei.h>
15 #include <linux/task_io_accounting_ops.h>
16 #include <linux/falloc.h>
17 #include <linux/suspend.h>
18 #include <linux/fs.h>
19 #include <linux/iomap.h>
20 #include <linux/module.h>
21 #include <linux/io_uring/cmd.h>
22 #include "blk.h"
23
bdev_file_inode(struct file * file)24 static inline struct inode *bdev_file_inode(struct file *file)
25 {
26 return file->f_mapping->host;
27 }
28
dio_bio_write_op(struct kiocb * iocb)29 static blk_opf_t dio_bio_write_op(struct kiocb *iocb)
30 {
31 blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
32
33 /* avoid the need for a I/O completion work item */
34 if (iocb_is_dsync(iocb))
35 opf |= REQ_FUA;
36 return opf;
37 }
38
blkdev_dio_invalid(struct block_device * bdev,struct kiocb * iocb,struct iov_iter * iter)39 static bool blkdev_dio_invalid(struct block_device *bdev, struct kiocb *iocb,
40 struct iov_iter *iter)
41 {
42 return (iocb->ki_pos | iov_iter_count(iter)) &
43 (bdev_logical_block_size(bdev) - 1);
44 }
45
blkdev_iov_iter_get_pages(struct bio * bio,struct iov_iter * iter,struct block_device * bdev)46 static inline int blkdev_iov_iter_get_pages(struct bio *bio,
47 struct iov_iter *iter, struct block_device *bdev)
48 {
49 return bio_iov_iter_get_pages(bio, iter,
50 bdev_logical_block_size(bdev) - 1);
51 }
52
53 #define DIO_INLINE_BIO_VECS 4
54
__blkdev_direct_IO_simple(struct kiocb * iocb,struct iov_iter * iter,struct block_device * bdev,unsigned int nr_pages)55 static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
56 struct iov_iter *iter, struct block_device *bdev,
57 unsigned int nr_pages)
58 {
59 struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
60 loff_t pos = iocb->ki_pos;
61 bool should_dirty = false;
62 struct bio bio;
63 ssize_t ret;
64
65 if (nr_pages <= DIO_INLINE_BIO_VECS)
66 vecs = inline_vecs;
67 else {
68 vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
69 GFP_KERNEL);
70 if (!vecs)
71 return -ENOMEM;
72 }
73
74 if (iov_iter_rw(iter) == READ) {
75 bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ);
76 if (user_backed_iter(iter))
77 should_dirty = true;
78 } else {
79 bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
80 }
81 bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
82 bio.bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
83 bio.bi_write_stream = iocb->ki_write_stream;
84 bio.bi_ioprio = iocb->ki_ioprio;
85 if (iocb->ki_flags & IOCB_ATOMIC)
86 bio.bi_opf |= REQ_ATOMIC;
87
88 ret = blkdev_iov_iter_get_pages(&bio, iter, bdev);
89 if (unlikely(ret))
90 goto out;
91 ret = bio.bi_iter.bi_size;
92
93 if (iov_iter_rw(iter) == WRITE)
94 task_io_account_write(ret);
95
96 if (iocb->ki_flags & IOCB_NOWAIT)
97 bio.bi_opf |= REQ_NOWAIT;
98
99 submit_bio_wait(&bio);
100
101 bio_release_pages(&bio, should_dirty);
102 if (unlikely(bio.bi_status))
103 ret = blk_status_to_errno(bio.bi_status);
104
105 out:
106 if (vecs != inline_vecs)
107 kfree(vecs);
108
109 bio_uninit(&bio);
110
111 return ret;
112 }
113
114 enum {
115 DIO_SHOULD_DIRTY = 1,
116 DIO_IS_SYNC = 2,
117 };
118
119 struct blkdev_dio {
120 union {
121 struct kiocb *iocb;
122 struct task_struct *waiter;
123 };
124 size_t size;
125 atomic_t ref;
126 unsigned int flags;
127 struct bio bio ____cacheline_aligned_in_smp;
128 };
129
130 static struct bio_set blkdev_dio_pool;
131
blkdev_bio_end_io(struct bio * bio)132 static void blkdev_bio_end_io(struct bio *bio)
133 {
134 struct blkdev_dio *dio = bio->bi_private;
135 bool should_dirty = dio->flags & DIO_SHOULD_DIRTY;
136 bool is_sync = dio->flags & DIO_IS_SYNC;
137
138 if (bio->bi_status && !dio->bio.bi_status)
139 dio->bio.bi_status = bio->bi_status;
140
141 if (bio_integrity(bio))
142 bio_integrity_unmap_user(bio);
143
144 if (atomic_dec_and_test(&dio->ref)) {
145 if (!is_sync) {
146 struct kiocb *iocb = dio->iocb;
147 ssize_t ret;
148
149 WRITE_ONCE(iocb->private, NULL);
150
151 if (likely(!dio->bio.bi_status)) {
152 ret = dio->size;
153 iocb->ki_pos += ret;
154 } else {
155 ret = blk_status_to_errno(dio->bio.bi_status);
156 }
157
158 dio->iocb->ki_complete(iocb, ret);
159 bio_put(&dio->bio);
160 } else {
161 struct task_struct *waiter = dio->waiter;
162
163 WRITE_ONCE(dio->waiter, NULL);
164 blk_wake_io_task(waiter);
165 }
166 }
167
168 if (should_dirty) {
169 bio_check_pages_dirty(bio);
170 } else {
171 bio_release_pages(bio, false);
172 bio_put(bio);
173 }
174 }
175
__blkdev_direct_IO(struct kiocb * iocb,struct iov_iter * iter,struct block_device * bdev,unsigned int nr_pages)176 static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
177 struct block_device *bdev, unsigned int nr_pages)
178 {
179 struct blk_plug plug;
180 struct blkdev_dio *dio;
181 struct bio *bio;
182 bool is_read = (iov_iter_rw(iter) == READ), is_sync;
183 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
184 loff_t pos = iocb->ki_pos;
185 int ret = 0;
186
187 if (iocb->ki_flags & IOCB_ALLOC_CACHE)
188 opf |= REQ_ALLOC_CACHE;
189 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
190 &blkdev_dio_pool);
191 dio = container_of(bio, struct blkdev_dio, bio);
192 atomic_set(&dio->ref, 1);
193 /*
194 * Grab an extra reference to ensure the dio structure which is embedded
195 * into the first bio stays around.
196 */
197 bio_get(bio);
198
199 is_sync = is_sync_kiocb(iocb);
200 if (is_sync) {
201 dio->flags = DIO_IS_SYNC;
202 dio->waiter = current;
203 } else {
204 dio->flags = 0;
205 dio->iocb = iocb;
206 }
207
208 dio->size = 0;
209 if (is_read && user_backed_iter(iter))
210 dio->flags |= DIO_SHOULD_DIRTY;
211
212 blk_start_plug(&plug);
213
214 for (;;) {
215 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
216 bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
217 bio->bi_write_stream = iocb->ki_write_stream;
218 bio->bi_private = dio;
219 bio->bi_end_io = blkdev_bio_end_io;
220 bio->bi_ioprio = iocb->ki_ioprio;
221
222 ret = blkdev_iov_iter_get_pages(bio, iter, bdev);
223 if (unlikely(ret)) {
224 bio->bi_status = BLK_STS_IOERR;
225 bio_endio(bio);
226 break;
227 }
228 if (iocb->ki_flags & IOCB_NOWAIT) {
229 /*
230 * This is nonblocking IO, and we need to allocate
231 * another bio if we have data left to map. As we
232 * cannot guarantee that one of the sub bios will not
233 * fail getting issued FOR NOWAIT and as error results
234 * are coalesced across all of them, be safe and ask for
235 * a retry of this from blocking context.
236 */
237 if (unlikely(iov_iter_count(iter))) {
238 ret = -EAGAIN;
239 goto fail;
240 }
241 bio->bi_opf |= REQ_NOWAIT;
242 }
243 if (iocb->ki_flags & IOCB_HAS_METADATA) {
244 ret = bio_integrity_map_iter(bio, iocb->private);
245 if (unlikely(ret))
246 goto fail;
247 }
248
249 if (is_read) {
250 if (dio->flags & DIO_SHOULD_DIRTY)
251 bio_set_pages_dirty(bio);
252 } else {
253 task_io_account_write(bio->bi_iter.bi_size);
254 }
255 dio->size += bio->bi_iter.bi_size;
256 pos += bio->bi_iter.bi_size;
257
258 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
259 if (!nr_pages) {
260 submit_bio(bio);
261 break;
262 }
263 atomic_inc(&dio->ref);
264 submit_bio(bio);
265 bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL);
266 }
267
268 blk_finish_plug(&plug);
269
270 if (!is_sync)
271 return -EIOCBQUEUED;
272
273 for (;;) {
274 set_current_state(TASK_UNINTERRUPTIBLE);
275 if (!READ_ONCE(dio->waiter))
276 break;
277 blk_io_schedule();
278 }
279 __set_current_state(TASK_RUNNING);
280
281 if (!ret)
282 ret = blk_status_to_errno(dio->bio.bi_status);
283 if (likely(!ret))
284 ret = dio->size;
285
286 bio_put(&dio->bio);
287 return ret;
288 fail:
289 bio_release_pages(bio, false);
290 bio_clear_flag(bio, BIO_REFFED);
291 bio_put(bio);
292 blk_finish_plug(&plug);
293 return ret;
294 }
295
blkdev_bio_end_io_async(struct bio * bio)296 static void blkdev_bio_end_io_async(struct bio *bio)
297 {
298 struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio);
299 struct kiocb *iocb = dio->iocb;
300 ssize_t ret;
301
302 WRITE_ONCE(iocb->private, NULL);
303
304 if (likely(!bio->bi_status)) {
305 ret = dio->size;
306 iocb->ki_pos += ret;
307 } else {
308 ret = blk_status_to_errno(bio->bi_status);
309 }
310
311 if (bio_integrity(bio))
312 bio_integrity_unmap_user(bio);
313
314 iocb->ki_complete(iocb, ret);
315
316 if (dio->flags & DIO_SHOULD_DIRTY) {
317 bio_check_pages_dirty(bio);
318 } else {
319 bio_release_pages(bio, false);
320 bio_put(bio);
321 }
322 }
323
__blkdev_direct_IO_async(struct kiocb * iocb,struct iov_iter * iter,struct block_device * bdev,unsigned int nr_pages)324 static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
325 struct iov_iter *iter,
326 struct block_device *bdev,
327 unsigned int nr_pages)
328 {
329 bool is_read = iov_iter_rw(iter) == READ;
330 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
331 struct blkdev_dio *dio;
332 struct bio *bio;
333 loff_t pos = iocb->ki_pos;
334 int ret = 0;
335
336 if (iocb->ki_flags & IOCB_ALLOC_CACHE)
337 opf |= REQ_ALLOC_CACHE;
338 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
339 &blkdev_dio_pool);
340 dio = container_of(bio, struct blkdev_dio, bio);
341 dio->flags = 0;
342 dio->iocb = iocb;
343 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
344 bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
345 bio->bi_write_stream = iocb->ki_write_stream;
346 bio->bi_end_io = blkdev_bio_end_io_async;
347 bio->bi_ioprio = iocb->ki_ioprio;
348
349 if (iov_iter_is_bvec(iter)) {
350 /*
351 * Users don't rely on the iterator being in any particular
352 * state for async I/O returning -EIOCBQUEUED, hence we can
353 * avoid expensive iov_iter_advance(). Bypass
354 * bio_iov_iter_get_pages() and set the bvec directly.
355 */
356 bio_iov_bvec_set(bio, iter);
357 } else {
358 ret = blkdev_iov_iter_get_pages(bio, iter, bdev);
359 if (unlikely(ret))
360 goto out_bio_put;
361 }
362 dio->size = bio->bi_iter.bi_size;
363
364 if (is_read) {
365 if (user_backed_iter(iter)) {
366 dio->flags |= DIO_SHOULD_DIRTY;
367 bio_set_pages_dirty(bio);
368 }
369 } else {
370 task_io_account_write(bio->bi_iter.bi_size);
371 }
372
373 if (iocb->ki_flags & IOCB_HAS_METADATA) {
374 ret = bio_integrity_map_iter(bio, iocb->private);
375 WRITE_ONCE(iocb->private, NULL);
376 if (unlikely(ret))
377 goto out_bio_put;
378 }
379
380 if (iocb->ki_flags & IOCB_ATOMIC)
381 bio->bi_opf |= REQ_ATOMIC;
382
383 if (iocb->ki_flags & IOCB_NOWAIT)
384 bio->bi_opf |= REQ_NOWAIT;
385
386 if (iocb->ki_flags & IOCB_HIPRI) {
387 bio->bi_opf |= REQ_POLLED;
388 submit_bio(bio);
389 WRITE_ONCE(iocb->private, bio);
390 } else {
391 submit_bio(bio);
392 }
393 return -EIOCBQUEUED;
394
395 out_bio_put:
396 bio_put(bio);
397 return ret;
398 }
399
blkdev_direct_IO(struct kiocb * iocb,struct iov_iter * iter)400 static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
401 {
402 struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
403 unsigned int nr_pages;
404
405 if (!iov_iter_count(iter))
406 return 0;
407
408 if (blkdev_dio_invalid(bdev, iocb, iter))
409 return -EINVAL;
410
411 if (iov_iter_rw(iter) == WRITE) {
412 u16 max_write_streams = bdev_max_write_streams(bdev);
413
414 if (iocb->ki_write_stream) {
415 if (iocb->ki_write_stream > max_write_streams)
416 return -EINVAL;
417 } else if (max_write_streams) {
418 enum rw_hint write_hint =
419 file_inode(iocb->ki_filp)->i_write_hint;
420
421 /*
422 * Just use the write hint as write stream for block
423 * device writes. This assumes no file system is
424 * mounted that would use the streams differently.
425 */
426 if (write_hint <= max_write_streams)
427 iocb->ki_write_stream = write_hint;
428 }
429 }
430
431 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
432 if (likely(nr_pages <= BIO_MAX_VECS &&
433 !(iocb->ki_flags & IOCB_HAS_METADATA))) {
434 if (is_sync_kiocb(iocb))
435 return __blkdev_direct_IO_simple(iocb, iter, bdev,
436 nr_pages);
437 return __blkdev_direct_IO_async(iocb, iter, bdev, nr_pages);
438 } else if (iocb->ki_flags & IOCB_ATOMIC) {
439 return -EINVAL;
440 }
441 return __blkdev_direct_IO(iocb, iter, bdev, bio_max_segs(nr_pages));
442 }
443
blkdev_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)444 static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
445 unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
446 {
447 struct block_device *bdev = I_BDEV(inode);
448 loff_t isize = i_size_read(inode);
449
450 if (offset >= isize)
451 return -EIO;
452
453 iomap->bdev = bdev;
454 iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev));
455 iomap->type = IOMAP_MAPPED;
456 iomap->addr = iomap->offset;
457 iomap->length = isize - iomap->offset;
458 iomap->flags |= IOMAP_F_BUFFER_HEAD; /* noop for !CONFIG_BUFFER_HEAD */
459 return 0;
460 }
461
462 static const struct iomap_ops blkdev_iomap_ops = {
463 .iomap_begin = blkdev_iomap_begin,
464 };
465
466 #ifdef CONFIG_BUFFER_HEAD
blkdev_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh,int create)467 static int blkdev_get_block(struct inode *inode, sector_t iblock,
468 struct buffer_head *bh, int create)
469 {
470 bh->b_bdev = I_BDEV(inode);
471 bh->b_blocknr = iblock;
472 set_buffer_mapped(bh);
473 return 0;
474 }
475
476 /*
477 * We cannot call mpage_writepages() as it does not take the buffer lock.
478 * We must use block_write_full_folio() directly which holds the buffer
479 * lock. The buffer lock provides the synchronisation with writeback
480 * that filesystems rely on when they use the blockdev's mapping.
481 */
blkdev_writepages(struct address_space * mapping,struct writeback_control * wbc)482 static int blkdev_writepages(struct address_space *mapping,
483 struct writeback_control *wbc)
484 {
485 struct folio *folio = NULL;
486 struct blk_plug plug;
487 int err;
488
489 blk_start_plug(&plug);
490 while ((folio = writeback_iter(mapping, wbc, folio, &err)))
491 err = block_write_full_folio(folio, wbc, blkdev_get_block);
492 blk_finish_plug(&plug);
493
494 return err;
495 }
496
blkdev_read_folio(struct file * file,struct folio * folio)497 static int blkdev_read_folio(struct file *file, struct folio *folio)
498 {
499 return block_read_full_folio(folio, blkdev_get_block);
500 }
501
blkdev_readahead(struct readahead_control * rac)502 static void blkdev_readahead(struct readahead_control *rac)
503 {
504 mpage_readahead(rac, blkdev_get_block);
505 }
506
blkdev_write_begin(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)507 static int blkdev_write_begin(const struct kiocb *iocb,
508 struct address_space *mapping, loff_t pos,
509 unsigned len, struct folio **foliop,
510 void **fsdata)
511 {
512 return block_write_begin(mapping, pos, len, foliop, blkdev_get_block);
513 }
514
blkdev_write_end(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)515 static int blkdev_write_end(const struct kiocb *iocb,
516 struct address_space *mapping,
517 loff_t pos, unsigned len, unsigned copied,
518 struct folio *folio, void *fsdata)
519 {
520 int ret;
521 ret = block_write_end(pos, len, copied, folio);
522
523 folio_unlock(folio);
524 folio_put(folio);
525
526 return ret;
527 }
528
529 const struct address_space_operations def_blk_aops = {
530 .dirty_folio = block_dirty_folio,
531 .invalidate_folio = block_invalidate_folio,
532 .read_folio = blkdev_read_folio,
533 .readahead = blkdev_readahead,
534 .writepages = blkdev_writepages,
535 .write_begin = blkdev_write_begin,
536 .write_end = blkdev_write_end,
537 .migrate_folio = buffer_migrate_folio_norefs,
538 .is_dirty_writeback = buffer_check_dirty_writeback,
539 };
540 #else /* CONFIG_BUFFER_HEAD */
blkdev_read_folio(struct file * file,struct folio * folio)541 static int blkdev_read_folio(struct file *file, struct folio *folio)
542 {
543 iomap_bio_read_folio(folio, &blkdev_iomap_ops);
544 return 0;
545 }
546
blkdev_readahead(struct readahead_control * rac)547 static void blkdev_readahead(struct readahead_control *rac)
548 {
549 iomap_bio_readahead(rac, &blkdev_iomap_ops);
550 }
551
blkdev_writeback_range(struct iomap_writepage_ctx * wpc,struct folio * folio,u64 offset,unsigned int len,u64 end_pos)552 static ssize_t blkdev_writeback_range(struct iomap_writepage_ctx *wpc,
553 struct folio *folio, u64 offset, unsigned int len, u64 end_pos)
554 {
555 loff_t isize = i_size_read(wpc->inode);
556
557 if (WARN_ON_ONCE(offset >= isize))
558 return -EIO;
559
560 if (offset < wpc->iomap.offset ||
561 offset >= wpc->iomap.offset + wpc->iomap.length) {
562 int error;
563
564 error = blkdev_iomap_begin(wpc->inode, offset, isize - offset,
565 IOMAP_WRITE, &wpc->iomap, NULL);
566 if (error)
567 return error;
568 }
569
570 return iomap_add_to_ioend(wpc, folio, offset, end_pos, len);
571 }
572
573 static const struct iomap_writeback_ops blkdev_writeback_ops = {
574 .writeback_range = blkdev_writeback_range,
575 .writeback_submit = iomap_ioend_writeback_submit,
576 };
577
blkdev_writepages(struct address_space * mapping,struct writeback_control * wbc)578 static int blkdev_writepages(struct address_space *mapping,
579 struct writeback_control *wbc)
580 {
581 struct iomap_writepage_ctx wpc = {
582 .inode = mapping->host,
583 .wbc = wbc,
584 .ops = &blkdev_writeback_ops
585 };
586
587 return iomap_writepages(&wpc);
588 }
589
590 const struct address_space_operations def_blk_aops = {
591 .dirty_folio = filemap_dirty_folio,
592 .release_folio = iomap_release_folio,
593 .invalidate_folio = iomap_invalidate_folio,
594 .read_folio = blkdev_read_folio,
595 .readahead = blkdev_readahead,
596 .writepages = blkdev_writepages,
597 .is_partially_uptodate = iomap_is_partially_uptodate,
598 .error_remove_folio = generic_error_remove_folio,
599 .migrate_folio = filemap_migrate_folio,
600 };
601 #endif /* CONFIG_BUFFER_HEAD */
602
603 /*
604 * for a block special file file_inode(file)->i_size is zero
605 * so we compute the size by hand (just as in block_read/write above)
606 */
blkdev_llseek(struct file * file,loff_t offset,int whence)607 static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence)
608 {
609 struct inode *bd_inode = bdev_file_inode(file);
610 loff_t retval;
611
612 inode_lock(bd_inode);
613 retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
614 inode_unlock(bd_inode);
615 return retval;
616 }
617
blkdev_fsync(struct file * filp,loff_t start,loff_t end,int datasync)618 static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
619 int datasync)
620 {
621 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
622 int error;
623
624 error = file_write_and_wait_range(filp, start, end);
625 if (error)
626 return error;
627
628 /*
629 * There is no need to serialise calls to blkdev_issue_flush with
630 * i_mutex and doing so causes performance issues with concurrent
631 * O_SYNC writers to a block device.
632 */
633 error = blkdev_issue_flush(bdev);
634 if (error == -EOPNOTSUPP)
635 error = 0;
636
637 return error;
638 }
639
640 /**
641 * file_to_blk_mode - get block open flags from file flags
642 * @file: file whose open flags should be converted
643 *
644 * Look at file open flags and generate corresponding block open flags from
645 * them. The function works both for file just being open (e.g. during ->open
646 * callback) and for file that is already open. This is actually non-trivial
647 * (see comment in the function).
648 */
file_to_blk_mode(struct file * file)649 blk_mode_t file_to_blk_mode(struct file *file)
650 {
651 blk_mode_t mode = 0;
652
653 if (file->f_mode & FMODE_READ)
654 mode |= BLK_OPEN_READ;
655 if (file->f_mode & FMODE_WRITE)
656 mode |= BLK_OPEN_WRITE;
657 /*
658 * do_dentry_open() clears O_EXCL from f_flags, use file->private_data
659 * to determine whether the open was exclusive for already open files.
660 */
661 if (file->private_data)
662 mode |= BLK_OPEN_EXCL;
663 else if (file->f_flags & O_EXCL)
664 mode |= BLK_OPEN_EXCL;
665 if (file->f_flags & O_NDELAY)
666 mode |= BLK_OPEN_NDELAY;
667
668 /*
669 * If all bits in O_ACCMODE set (aka O_RDWR | O_WRONLY), the floppy
670 * driver has historically allowed ioctls as if the file was opened for
671 * writing, but does not allow and actual reads or writes.
672 */
673 if ((file->f_flags & O_ACCMODE) == (O_RDWR | O_WRONLY))
674 mode |= BLK_OPEN_WRITE_IOCTL;
675
676 return mode;
677 }
678
blkdev_open(struct inode * inode,struct file * filp)679 static int blkdev_open(struct inode *inode, struct file *filp)
680 {
681 struct block_device *bdev;
682 blk_mode_t mode;
683 int ret;
684
685 mode = file_to_blk_mode(filp);
686 /* Use the file as the holder. */
687 if (mode & BLK_OPEN_EXCL)
688 filp->private_data = filp;
689 ret = bdev_permission(inode->i_rdev, mode, filp->private_data);
690 if (ret)
691 return ret;
692
693 bdev = blkdev_get_no_open(inode->i_rdev, true);
694 if (!bdev)
695 return -ENXIO;
696
697 if (bdev_can_atomic_write(bdev))
698 filp->f_mode |= FMODE_CAN_ATOMIC_WRITE;
699 if (blk_get_integrity(bdev->bd_disk))
700 filp->f_mode |= FMODE_HAS_METADATA;
701
702 ret = bdev_open(bdev, mode, filp->private_data, NULL, filp);
703 if (ret)
704 blkdev_put_no_open(bdev);
705 return ret;
706 }
707
blkdev_release(struct inode * inode,struct file * filp)708 static int blkdev_release(struct inode *inode, struct file *filp)
709 {
710 bdev_release(filp);
711 return 0;
712 }
713
714 static ssize_t
blkdev_direct_write(struct kiocb * iocb,struct iov_iter * from)715 blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from)
716 {
717 size_t count = iov_iter_count(from);
718 ssize_t written;
719
720 written = kiocb_invalidate_pages(iocb, count);
721 if (written) {
722 if (written == -EBUSY)
723 return 0;
724 return written;
725 }
726
727 written = blkdev_direct_IO(iocb, from);
728 if (written > 0) {
729 kiocb_invalidate_post_direct_write(iocb, count);
730 iocb->ki_pos += written;
731 count -= written;
732 }
733 if (written != -EIOCBQUEUED)
734 iov_iter_revert(from, count - iov_iter_count(from));
735 return written;
736 }
737
blkdev_buffered_write(struct kiocb * iocb,struct iov_iter * from)738 static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
739 {
740 return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops, NULL,
741 NULL);
742 }
743
744 /*
745 * Write data to the block device. Only intended for the block device itself
746 * and the raw driver which basically is a fake block device.
747 *
748 * Does not take i_mutex for the write and thus is not for general purpose
749 * use.
750 */
blkdev_write_iter(struct kiocb * iocb,struct iov_iter * from)751 static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
752 {
753 struct file *file = iocb->ki_filp;
754 struct inode *bd_inode = bdev_file_inode(file);
755 struct block_device *bdev = I_BDEV(bd_inode);
756 bool atomic = iocb->ki_flags & IOCB_ATOMIC;
757 loff_t size = bdev_nr_bytes(bdev);
758 size_t shorted = 0;
759 ssize_t ret;
760
761 if (bdev_read_only(bdev))
762 return -EPERM;
763
764 if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
765 return -ETXTBSY;
766
767 if (!iov_iter_count(from))
768 return 0;
769
770 if (iocb->ki_pos >= size)
771 return -ENOSPC;
772
773 if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
774 return -EOPNOTSUPP;
775
776 if (atomic) {
777 ret = generic_atomic_write_valid(iocb, from);
778 if (ret)
779 return ret;
780 }
781
782 size -= iocb->ki_pos;
783 if (iov_iter_count(from) > size) {
784 if (atomic)
785 return -EINVAL;
786 shorted = iov_iter_count(from) - size;
787 iov_iter_truncate(from, size);
788 }
789
790 ret = file_update_time(file);
791 if (ret)
792 return ret;
793
794 if (iocb->ki_flags & IOCB_DIRECT) {
795 ret = blkdev_direct_write(iocb, from);
796 if (ret >= 0 && iov_iter_count(from))
797 ret = direct_write_fallback(iocb, from, ret,
798 blkdev_buffered_write(iocb, from));
799 } else {
800 /*
801 * Take i_rwsem and invalidate_lock to avoid racing with
802 * set_blocksize changing i_blkbits/folio order and punching
803 * out the pagecache.
804 */
805 inode_lock_shared(bd_inode);
806 ret = blkdev_buffered_write(iocb, from);
807 inode_unlock_shared(bd_inode);
808 }
809
810 if (ret > 0)
811 ret = generic_write_sync(iocb, ret);
812 iov_iter_reexpand(from, iov_iter_count(from) + shorted);
813 return ret;
814 }
815
blkdev_read_iter(struct kiocb * iocb,struct iov_iter * to)816 static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
817 {
818 struct inode *bd_inode = bdev_file_inode(iocb->ki_filp);
819 struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
820 loff_t size = bdev_nr_bytes(bdev);
821 loff_t pos = iocb->ki_pos;
822 size_t shorted = 0;
823 ssize_t ret = 0;
824 size_t count;
825
826 if (unlikely(pos + iov_iter_count(to) > size)) {
827 if (pos >= size)
828 return 0;
829 size -= pos;
830 shorted = iov_iter_count(to) - size;
831 iov_iter_truncate(to, size);
832 }
833
834 count = iov_iter_count(to);
835 if (!count)
836 goto reexpand; /* skip atime */
837
838 if (iocb->ki_flags & IOCB_DIRECT) {
839 ret = kiocb_write_and_wait(iocb, count);
840 if (ret < 0)
841 goto reexpand;
842 file_accessed(iocb->ki_filp);
843
844 ret = blkdev_direct_IO(iocb, to);
845 if (ret > 0) {
846 iocb->ki_pos += ret;
847 count -= ret;
848 }
849 if (ret != -EIOCBQUEUED)
850 iov_iter_revert(to, count - iov_iter_count(to));
851 if (ret < 0 || !count)
852 goto reexpand;
853 }
854
855 /*
856 * Take i_rwsem and invalidate_lock to avoid racing with set_blocksize
857 * changing i_blkbits/folio order and punching out the pagecache.
858 */
859 inode_lock_shared(bd_inode);
860 ret = filemap_read(iocb, to, ret);
861 inode_unlock_shared(bd_inode);
862
863 reexpand:
864 if (unlikely(shorted))
865 iov_iter_reexpand(to, iov_iter_count(to) + shorted);
866 return ret;
867 }
868
869 #define BLKDEV_FALLOC_FL_SUPPORTED \
870 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
871 FALLOC_FL_ZERO_RANGE | FALLOC_FL_WRITE_ZEROES)
872
blkdev_fallocate(struct file * file,int mode,loff_t start,loff_t len)873 static long blkdev_fallocate(struct file *file, int mode, loff_t start,
874 loff_t len)
875 {
876 struct inode *inode = bdev_file_inode(file);
877 struct block_device *bdev = I_BDEV(inode);
878 loff_t end = start + len - 1;
879 loff_t isize;
880 unsigned int flags;
881 int error;
882
883 /* Fail if we don't recognize the flags. */
884 if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
885 return -EOPNOTSUPP;
886 /*
887 * Don't allow writing zeroes if the device does not enable the
888 * unmap write zeroes operation.
889 */
890 if ((mode & FALLOC_FL_WRITE_ZEROES) &&
891 !bdev_write_zeroes_unmap_sectors(bdev))
892 return -EOPNOTSUPP;
893
894 /* Don't go off the end of the device. */
895 isize = bdev_nr_bytes(bdev);
896 if (start >= isize)
897 return -EINVAL;
898 if (end >= isize) {
899 if (mode & FALLOC_FL_KEEP_SIZE) {
900 len = isize - start;
901 end = start + len - 1;
902 } else
903 return -EINVAL;
904 }
905
906 /*
907 * Don't allow IO that isn't aligned to logical block size.
908 */
909 if ((start | len) & (bdev_logical_block_size(bdev) - 1))
910 return -EINVAL;
911
912 inode_lock(inode);
913 filemap_invalidate_lock(inode->i_mapping);
914
915 switch (mode) {
916 case FALLOC_FL_ZERO_RANGE:
917 case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
918 flags = BLKDEV_ZERO_NOUNMAP;
919 break;
920 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
921 flags = BLKDEV_ZERO_NOFALLBACK;
922 break;
923 case FALLOC_FL_WRITE_ZEROES:
924 flags = 0;
925 break;
926 default:
927 error = -EOPNOTSUPP;
928 goto fail;
929 }
930
931 /*
932 * Invalidate the page cache, including dirty pages, for valid
933 * de-allocate mode calls to fallocate().
934 */
935 error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
936 if (error)
937 goto fail;
938
939 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
940 len >> SECTOR_SHIFT, GFP_KERNEL, flags);
941 fail:
942 filemap_invalidate_unlock(inode->i_mapping);
943 inode_unlock(inode);
944 return error;
945 }
946
blkdev_mmap_prepare(struct vm_area_desc * desc)947 static int blkdev_mmap_prepare(struct vm_area_desc *desc)
948 {
949 struct file *file = desc->file;
950
951 if (bdev_read_only(I_BDEV(bdev_file_inode(file))))
952 return generic_file_readonly_mmap_prepare(desc);
953
954 return generic_file_mmap_prepare(desc);
955 }
956
957 const struct file_operations def_blk_fops = {
958 .open = blkdev_open,
959 .release = blkdev_release,
960 .llseek = blkdev_llseek,
961 .read_iter = blkdev_read_iter,
962 .write_iter = blkdev_write_iter,
963 .iopoll = iocb_bio_iopoll,
964 .mmap_prepare = blkdev_mmap_prepare,
965 .fsync = blkdev_fsync,
966 .unlocked_ioctl = blkdev_ioctl,
967 #ifdef CONFIG_COMPAT
968 .compat_ioctl = compat_blkdev_ioctl,
969 #endif
970 .splice_read = filemap_splice_read,
971 .splice_write = iter_file_splice_write,
972 .fallocate = blkdev_fallocate,
973 .uring_cmd = blkdev_uring_cmd,
974 .fop_flags = FOP_BUFFER_RASYNC,
975 };
976
blkdev_init(void)977 static __init int blkdev_init(void)
978 {
979 return bioset_init(&blkdev_dio_pool, 4,
980 offsetof(struct blkdev_dio, bio),
981 BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE);
982 }
983 module_init(blkdev_init);
984