xref: /linux/block/fops.c (revision b5d760d53ac2e36825fbbb8d1f54ad9ce6138f7b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 1991, 1992  Linus Torvalds
4  * Copyright (C) 2001  Andrea Arcangeli <andrea@suse.de> SuSE
5  * Copyright (C) 2016 - 2020 Christoph Hellwig
6  */
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/blkdev.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/uio.h>
13 #include <linux/namei.h>
14 #include <linux/task_io_accounting_ops.h>
15 #include <linux/falloc.h>
16 #include <linux/suspend.h>
17 #include <linux/fs.h>
18 #include <linux/iomap.h>
19 #include <linux/module.h>
20 #include <linux/io_uring/cmd.h>
21 #include "blk.h"
22 
bdev_file_inode(struct file * file)23 static inline struct inode *bdev_file_inode(struct file *file)
24 {
25 	return file->f_mapping->host;
26 }
27 
dio_bio_write_op(struct kiocb * iocb)28 static blk_opf_t dio_bio_write_op(struct kiocb *iocb)
29 {
30 	blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
31 
32 	/* avoid the need for a I/O completion work item */
33 	if (iocb_is_dsync(iocb))
34 		opf |= REQ_FUA;
35 	return opf;
36 }
37 
blkdev_dio_invalid(struct block_device * bdev,struct kiocb * iocb,struct iov_iter * iter)38 static bool blkdev_dio_invalid(struct block_device *bdev, struct kiocb *iocb,
39 				struct iov_iter *iter)
40 {
41 	return iocb->ki_pos & (bdev_logical_block_size(bdev) - 1) ||
42 		!bdev_iter_is_aligned(bdev, iter);
43 }
44 
45 #define DIO_INLINE_BIO_VECS 4
46 
__blkdev_direct_IO_simple(struct kiocb * iocb,struct iov_iter * iter,struct block_device * bdev,unsigned int nr_pages)47 static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
48 		struct iov_iter *iter, struct block_device *bdev,
49 		unsigned int nr_pages)
50 {
51 	struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
52 	loff_t pos = iocb->ki_pos;
53 	bool should_dirty = false;
54 	struct bio bio;
55 	ssize_t ret;
56 
57 	WARN_ON_ONCE(iocb->ki_flags & IOCB_HAS_METADATA);
58 	if (nr_pages <= DIO_INLINE_BIO_VECS)
59 		vecs = inline_vecs;
60 	else {
61 		vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
62 				     GFP_KERNEL);
63 		if (!vecs)
64 			return -ENOMEM;
65 	}
66 
67 	if (iov_iter_rw(iter) == READ) {
68 		bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ);
69 		if (user_backed_iter(iter))
70 			should_dirty = true;
71 	} else {
72 		bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
73 	}
74 	bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
75 	bio.bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
76 	bio.bi_write_stream = iocb->ki_write_stream;
77 	bio.bi_ioprio = iocb->ki_ioprio;
78 	if (iocb->ki_flags & IOCB_ATOMIC)
79 		bio.bi_opf |= REQ_ATOMIC;
80 
81 	ret = bio_iov_iter_get_pages(&bio, iter);
82 	if (unlikely(ret))
83 		goto out;
84 	ret = bio.bi_iter.bi_size;
85 
86 	if (iov_iter_rw(iter) == WRITE)
87 		task_io_account_write(ret);
88 
89 	if (iocb->ki_flags & IOCB_NOWAIT)
90 		bio.bi_opf |= REQ_NOWAIT;
91 
92 	submit_bio_wait(&bio);
93 
94 	bio_release_pages(&bio, should_dirty);
95 	if (unlikely(bio.bi_status))
96 		ret = blk_status_to_errno(bio.bi_status);
97 
98 out:
99 	if (vecs != inline_vecs)
100 		kfree(vecs);
101 
102 	bio_uninit(&bio);
103 
104 	return ret;
105 }
106 
107 enum {
108 	DIO_SHOULD_DIRTY	= 1,
109 	DIO_IS_SYNC		= 2,
110 };
111 
112 struct blkdev_dio {
113 	union {
114 		struct kiocb		*iocb;
115 		struct task_struct	*waiter;
116 	};
117 	size_t			size;
118 	atomic_t		ref;
119 	unsigned int		flags;
120 	struct bio		bio ____cacheline_aligned_in_smp;
121 };
122 
123 static struct bio_set blkdev_dio_pool;
124 
blkdev_bio_end_io(struct bio * bio)125 static void blkdev_bio_end_io(struct bio *bio)
126 {
127 	struct blkdev_dio *dio = bio->bi_private;
128 	bool should_dirty = dio->flags & DIO_SHOULD_DIRTY;
129 	bool is_sync = dio->flags & DIO_IS_SYNC;
130 
131 	if (bio->bi_status && !dio->bio.bi_status)
132 		dio->bio.bi_status = bio->bi_status;
133 
134 	if (!is_sync && (dio->iocb->ki_flags & IOCB_HAS_METADATA))
135 		bio_integrity_unmap_user(bio);
136 
137 	if (atomic_dec_and_test(&dio->ref)) {
138 		if (!is_sync) {
139 			struct kiocb *iocb = dio->iocb;
140 			ssize_t ret;
141 
142 			WRITE_ONCE(iocb->private, NULL);
143 
144 			if (likely(!dio->bio.bi_status)) {
145 				ret = dio->size;
146 				iocb->ki_pos += ret;
147 			} else {
148 				ret = blk_status_to_errno(dio->bio.bi_status);
149 			}
150 
151 			dio->iocb->ki_complete(iocb, ret);
152 			bio_put(&dio->bio);
153 		} else {
154 			struct task_struct *waiter = dio->waiter;
155 
156 			WRITE_ONCE(dio->waiter, NULL);
157 			blk_wake_io_task(waiter);
158 		}
159 	}
160 
161 	if (should_dirty) {
162 		bio_check_pages_dirty(bio);
163 	} else {
164 		bio_release_pages(bio, false);
165 		bio_put(bio);
166 	}
167 }
168 
__blkdev_direct_IO(struct kiocb * iocb,struct iov_iter * iter,struct block_device * bdev,unsigned int nr_pages)169 static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
170 		struct block_device *bdev, unsigned int nr_pages)
171 {
172 	struct blk_plug plug;
173 	struct blkdev_dio *dio;
174 	struct bio *bio;
175 	bool is_read = (iov_iter_rw(iter) == READ), is_sync;
176 	blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
177 	loff_t pos = iocb->ki_pos;
178 	int ret = 0;
179 
180 	if (iocb->ki_flags & IOCB_ALLOC_CACHE)
181 		opf |= REQ_ALLOC_CACHE;
182 	bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
183 			       &blkdev_dio_pool);
184 	dio = container_of(bio, struct blkdev_dio, bio);
185 	atomic_set(&dio->ref, 1);
186 	/*
187 	 * Grab an extra reference to ensure the dio structure which is embedded
188 	 * into the first bio stays around.
189 	 */
190 	bio_get(bio);
191 
192 	is_sync = is_sync_kiocb(iocb);
193 	if (is_sync) {
194 		dio->flags = DIO_IS_SYNC;
195 		dio->waiter = current;
196 	} else {
197 		dio->flags = 0;
198 		dio->iocb = iocb;
199 	}
200 
201 	dio->size = 0;
202 	if (is_read && user_backed_iter(iter))
203 		dio->flags |= DIO_SHOULD_DIRTY;
204 
205 	blk_start_plug(&plug);
206 
207 	for (;;) {
208 		bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
209 		bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
210 		bio->bi_write_stream = iocb->ki_write_stream;
211 		bio->bi_private = dio;
212 		bio->bi_end_io = blkdev_bio_end_io;
213 		bio->bi_ioprio = iocb->ki_ioprio;
214 
215 		ret = bio_iov_iter_get_pages(bio, iter);
216 		if (unlikely(ret)) {
217 			bio->bi_status = BLK_STS_IOERR;
218 			bio_endio(bio);
219 			break;
220 		}
221 		if (iocb->ki_flags & IOCB_NOWAIT) {
222 			/*
223 			 * This is nonblocking IO, and we need to allocate
224 			 * another bio if we have data left to map. As we
225 			 * cannot guarantee that one of the sub bios will not
226 			 * fail getting issued FOR NOWAIT and as error results
227 			 * are coalesced across all of them, be safe and ask for
228 			 * a retry of this from blocking context.
229 			 */
230 			if (unlikely(iov_iter_count(iter))) {
231 				ret = -EAGAIN;
232 				goto fail;
233 			}
234 			bio->bi_opf |= REQ_NOWAIT;
235 		}
236 		if (!is_sync && (iocb->ki_flags & IOCB_HAS_METADATA)) {
237 			ret = bio_integrity_map_iter(bio, iocb->private);
238 			if (unlikely(ret))
239 				goto fail;
240 		}
241 
242 		if (is_read) {
243 			if (dio->flags & DIO_SHOULD_DIRTY)
244 				bio_set_pages_dirty(bio);
245 		} else {
246 			task_io_account_write(bio->bi_iter.bi_size);
247 		}
248 		dio->size += bio->bi_iter.bi_size;
249 		pos += bio->bi_iter.bi_size;
250 
251 		nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
252 		if (!nr_pages) {
253 			submit_bio(bio);
254 			break;
255 		}
256 		atomic_inc(&dio->ref);
257 		submit_bio(bio);
258 		bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL);
259 	}
260 
261 	blk_finish_plug(&plug);
262 
263 	if (!is_sync)
264 		return -EIOCBQUEUED;
265 
266 	for (;;) {
267 		set_current_state(TASK_UNINTERRUPTIBLE);
268 		if (!READ_ONCE(dio->waiter))
269 			break;
270 		blk_io_schedule();
271 	}
272 	__set_current_state(TASK_RUNNING);
273 
274 	if (!ret)
275 		ret = blk_status_to_errno(dio->bio.bi_status);
276 	if (likely(!ret))
277 		ret = dio->size;
278 
279 	bio_put(&dio->bio);
280 	return ret;
281 fail:
282 	bio_release_pages(bio, false);
283 	bio_clear_flag(bio, BIO_REFFED);
284 	bio_put(bio);
285 	blk_finish_plug(&plug);
286 	return ret;
287 }
288 
blkdev_bio_end_io_async(struct bio * bio)289 static void blkdev_bio_end_io_async(struct bio *bio)
290 {
291 	struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio);
292 	struct kiocb *iocb = dio->iocb;
293 	ssize_t ret;
294 
295 	WRITE_ONCE(iocb->private, NULL);
296 
297 	if (likely(!bio->bi_status)) {
298 		ret = dio->size;
299 		iocb->ki_pos += ret;
300 	} else {
301 		ret = blk_status_to_errno(bio->bi_status);
302 	}
303 
304 	if (iocb->ki_flags & IOCB_HAS_METADATA)
305 		bio_integrity_unmap_user(bio);
306 
307 	iocb->ki_complete(iocb, ret);
308 
309 	if (dio->flags & DIO_SHOULD_DIRTY) {
310 		bio_check_pages_dirty(bio);
311 	} else {
312 		bio_release_pages(bio, false);
313 		bio_put(bio);
314 	}
315 }
316 
__blkdev_direct_IO_async(struct kiocb * iocb,struct iov_iter * iter,struct block_device * bdev,unsigned int nr_pages)317 static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
318 					struct iov_iter *iter,
319 					struct block_device *bdev,
320 					unsigned int nr_pages)
321 {
322 	bool is_read = iov_iter_rw(iter) == READ;
323 	blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
324 	struct blkdev_dio *dio;
325 	struct bio *bio;
326 	loff_t pos = iocb->ki_pos;
327 	int ret = 0;
328 
329 	if (iocb->ki_flags & IOCB_ALLOC_CACHE)
330 		opf |= REQ_ALLOC_CACHE;
331 	bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
332 			       &blkdev_dio_pool);
333 	dio = container_of(bio, struct blkdev_dio, bio);
334 	dio->flags = 0;
335 	dio->iocb = iocb;
336 	bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
337 	bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
338 	bio->bi_write_stream = iocb->ki_write_stream;
339 	bio->bi_end_io = blkdev_bio_end_io_async;
340 	bio->bi_ioprio = iocb->ki_ioprio;
341 
342 	if (iov_iter_is_bvec(iter)) {
343 		/*
344 		 * Users don't rely on the iterator being in any particular
345 		 * state for async I/O returning -EIOCBQUEUED, hence we can
346 		 * avoid expensive iov_iter_advance(). Bypass
347 		 * bio_iov_iter_get_pages() and set the bvec directly.
348 		 */
349 		bio_iov_bvec_set(bio, iter);
350 	} else {
351 		ret = bio_iov_iter_get_pages(bio, iter);
352 		if (unlikely(ret))
353 			goto out_bio_put;
354 	}
355 	dio->size = bio->bi_iter.bi_size;
356 
357 	if (is_read) {
358 		if (user_backed_iter(iter)) {
359 			dio->flags |= DIO_SHOULD_DIRTY;
360 			bio_set_pages_dirty(bio);
361 		}
362 	} else {
363 		task_io_account_write(bio->bi_iter.bi_size);
364 	}
365 
366 	if (iocb->ki_flags & IOCB_HAS_METADATA) {
367 		ret = bio_integrity_map_iter(bio, iocb->private);
368 		WRITE_ONCE(iocb->private, NULL);
369 		if (unlikely(ret))
370 			goto out_bio_put;
371 	}
372 
373 	if (iocb->ki_flags & IOCB_ATOMIC)
374 		bio->bi_opf |= REQ_ATOMIC;
375 
376 	if (iocb->ki_flags & IOCB_NOWAIT)
377 		bio->bi_opf |= REQ_NOWAIT;
378 
379 	if (iocb->ki_flags & IOCB_HIPRI) {
380 		bio->bi_opf |= REQ_POLLED;
381 		submit_bio(bio);
382 		WRITE_ONCE(iocb->private, bio);
383 	} else {
384 		submit_bio(bio);
385 	}
386 	return -EIOCBQUEUED;
387 
388 out_bio_put:
389 	bio_put(bio);
390 	return ret;
391 }
392 
blkdev_direct_IO(struct kiocb * iocb,struct iov_iter * iter)393 static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
394 {
395 	struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
396 	unsigned int nr_pages;
397 
398 	if (!iov_iter_count(iter))
399 		return 0;
400 
401 	if (blkdev_dio_invalid(bdev, iocb, iter))
402 		return -EINVAL;
403 
404 	if (iov_iter_rw(iter) == WRITE) {
405 		u16 max_write_streams = bdev_max_write_streams(bdev);
406 
407 		if (iocb->ki_write_stream) {
408 			if (iocb->ki_write_stream > max_write_streams)
409 				return -EINVAL;
410 		} else if (max_write_streams) {
411 			enum rw_hint write_hint =
412 				file_inode(iocb->ki_filp)->i_write_hint;
413 
414 			/*
415 			 * Just use the write hint as write stream for block
416 			 * device writes.  This assumes no file system is
417 			 * mounted that would use the streams differently.
418 			 */
419 			if (write_hint <= max_write_streams)
420 				iocb->ki_write_stream = write_hint;
421 		}
422 	}
423 
424 	nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
425 	if (likely(nr_pages <= BIO_MAX_VECS)) {
426 		if (is_sync_kiocb(iocb))
427 			return __blkdev_direct_IO_simple(iocb, iter, bdev,
428 							nr_pages);
429 		return __blkdev_direct_IO_async(iocb, iter, bdev, nr_pages);
430 	} else if (iocb->ki_flags & IOCB_ATOMIC) {
431 		return -EINVAL;
432 	}
433 	return __blkdev_direct_IO(iocb, iter, bdev, bio_max_segs(nr_pages));
434 }
435 
blkdev_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)436 static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
437 		unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
438 {
439 	struct block_device *bdev = I_BDEV(inode);
440 	loff_t isize = i_size_read(inode);
441 
442 	if (offset >= isize)
443 		return -EIO;
444 
445 	iomap->bdev = bdev;
446 	iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev));
447 	iomap->type = IOMAP_MAPPED;
448 	iomap->addr = iomap->offset;
449 	iomap->length = isize - iomap->offset;
450 	iomap->flags |= IOMAP_F_BUFFER_HEAD; /* noop for !CONFIG_BUFFER_HEAD */
451 	return 0;
452 }
453 
454 static const struct iomap_ops blkdev_iomap_ops = {
455 	.iomap_begin		= blkdev_iomap_begin,
456 };
457 
458 #ifdef CONFIG_BUFFER_HEAD
blkdev_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh,int create)459 static int blkdev_get_block(struct inode *inode, sector_t iblock,
460 		struct buffer_head *bh, int create)
461 {
462 	bh->b_bdev = I_BDEV(inode);
463 	bh->b_blocknr = iblock;
464 	set_buffer_mapped(bh);
465 	return 0;
466 }
467 
468 /*
469  * We cannot call mpage_writepages() as it does not take the buffer lock.
470  * We must use block_write_full_folio() directly which holds the buffer
471  * lock.  The buffer lock provides the synchronisation with writeback
472  * that filesystems rely on when they use the blockdev's mapping.
473  */
blkdev_writepages(struct address_space * mapping,struct writeback_control * wbc)474 static int blkdev_writepages(struct address_space *mapping,
475 		struct writeback_control *wbc)
476 {
477 	struct folio *folio = NULL;
478 	struct blk_plug plug;
479 	int err;
480 
481 	blk_start_plug(&plug);
482 	while ((folio = writeback_iter(mapping, wbc, folio, &err)))
483 		err = block_write_full_folio(folio, wbc, blkdev_get_block);
484 	blk_finish_plug(&plug);
485 
486 	return err;
487 }
488 
blkdev_read_folio(struct file * file,struct folio * folio)489 static int blkdev_read_folio(struct file *file, struct folio *folio)
490 {
491 	return block_read_full_folio(folio, blkdev_get_block);
492 }
493 
blkdev_readahead(struct readahead_control * rac)494 static void blkdev_readahead(struct readahead_control *rac)
495 {
496 	mpage_readahead(rac, blkdev_get_block);
497 }
498 
blkdev_write_begin(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)499 static int blkdev_write_begin(const struct kiocb *iocb,
500 			      struct address_space *mapping, loff_t pos,
501 			      unsigned len, struct folio **foliop,
502 			      void **fsdata)
503 {
504 	return block_write_begin(mapping, pos, len, foliop, blkdev_get_block);
505 }
506 
blkdev_write_end(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)507 static int blkdev_write_end(const struct kiocb *iocb,
508 			    struct address_space *mapping,
509 			    loff_t pos, unsigned len, unsigned copied,
510 			    struct folio *folio, void *fsdata)
511 {
512 	int ret;
513 	ret = block_write_end(pos, len, copied, folio);
514 
515 	folio_unlock(folio);
516 	folio_put(folio);
517 
518 	return ret;
519 }
520 
521 const struct address_space_operations def_blk_aops = {
522 	.dirty_folio	= block_dirty_folio,
523 	.invalidate_folio = block_invalidate_folio,
524 	.read_folio	= blkdev_read_folio,
525 	.readahead	= blkdev_readahead,
526 	.writepages	= blkdev_writepages,
527 	.write_begin	= blkdev_write_begin,
528 	.write_end	= blkdev_write_end,
529 	.migrate_folio	= buffer_migrate_folio_norefs,
530 	.is_dirty_writeback = buffer_check_dirty_writeback,
531 };
532 #else /* CONFIG_BUFFER_HEAD */
blkdev_read_folio(struct file * file,struct folio * folio)533 static int blkdev_read_folio(struct file *file, struct folio *folio)
534 {
535 	return iomap_read_folio(folio, &blkdev_iomap_ops);
536 }
537 
blkdev_readahead(struct readahead_control * rac)538 static void blkdev_readahead(struct readahead_control *rac)
539 {
540 	iomap_readahead(rac, &blkdev_iomap_ops);
541 }
542 
blkdev_writeback_range(struct iomap_writepage_ctx * wpc,struct folio * folio,u64 offset,unsigned int len,u64 end_pos)543 static ssize_t blkdev_writeback_range(struct iomap_writepage_ctx *wpc,
544 		struct folio *folio, u64 offset, unsigned int len, u64 end_pos)
545 {
546 	loff_t isize = i_size_read(wpc->inode);
547 
548 	if (WARN_ON_ONCE(offset >= isize))
549 		return -EIO;
550 
551 	if (offset < wpc->iomap.offset ||
552 	    offset >= wpc->iomap.offset + wpc->iomap.length) {
553 		int error;
554 
555 		error = blkdev_iomap_begin(wpc->inode, offset, isize - offset,
556 				IOMAP_WRITE, &wpc->iomap, NULL);
557 		if (error)
558 			return error;
559 	}
560 
561 	return iomap_add_to_ioend(wpc, folio, offset, end_pos, len);
562 }
563 
564 static const struct iomap_writeback_ops blkdev_writeback_ops = {
565 	.writeback_range	= blkdev_writeback_range,
566 	.writeback_submit	= iomap_ioend_writeback_submit,
567 };
568 
blkdev_writepages(struct address_space * mapping,struct writeback_control * wbc)569 static int blkdev_writepages(struct address_space *mapping,
570 		struct writeback_control *wbc)
571 {
572 	struct iomap_writepage_ctx wpc = {
573 		.inode		= mapping->host,
574 		.wbc		= wbc,
575 		.ops		= &blkdev_writeback_ops
576 	};
577 
578 	return iomap_writepages(&wpc);
579 }
580 
581 const struct address_space_operations def_blk_aops = {
582 	.dirty_folio	= filemap_dirty_folio,
583 	.release_folio		= iomap_release_folio,
584 	.invalidate_folio	= iomap_invalidate_folio,
585 	.read_folio		= blkdev_read_folio,
586 	.readahead		= blkdev_readahead,
587 	.writepages		= blkdev_writepages,
588 	.is_partially_uptodate  = iomap_is_partially_uptodate,
589 	.error_remove_folio	= generic_error_remove_folio,
590 	.migrate_folio		= filemap_migrate_folio,
591 };
592 #endif /* CONFIG_BUFFER_HEAD */
593 
594 /*
595  * for a block special file file_inode(file)->i_size is zero
596  * so we compute the size by hand (just as in block_read/write above)
597  */
blkdev_llseek(struct file * file,loff_t offset,int whence)598 static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence)
599 {
600 	struct inode *bd_inode = bdev_file_inode(file);
601 	loff_t retval;
602 
603 	inode_lock(bd_inode);
604 	retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
605 	inode_unlock(bd_inode);
606 	return retval;
607 }
608 
blkdev_fsync(struct file * filp,loff_t start,loff_t end,int datasync)609 static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
610 		int datasync)
611 {
612 	struct block_device *bdev = I_BDEV(filp->f_mapping->host);
613 	int error;
614 
615 	error = file_write_and_wait_range(filp, start, end);
616 	if (error)
617 		return error;
618 
619 	/*
620 	 * There is no need to serialise calls to blkdev_issue_flush with
621 	 * i_mutex and doing so causes performance issues with concurrent
622 	 * O_SYNC writers to a block device.
623 	 */
624 	error = blkdev_issue_flush(bdev);
625 	if (error == -EOPNOTSUPP)
626 		error = 0;
627 
628 	return error;
629 }
630 
631 /**
632  * file_to_blk_mode - get block open flags from file flags
633  * @file: file whose open flags should be converted
634  *
635  * Look at file open flags and generate corresponding block open flags from
636  * them. The function works both for file just being open (e.g. during ->open
637  * callback) and for file that is already open. This is actually non-trivial
638  * (see comment in the function).
639  */
file_to_blk_mode(struct file * file)640 blk_mode_t file_to_blk_mode(struct file *file)
641 {
642 	blk_mode_t mode = 0;
643 
644 	if (file->f_mode & FMODE_READ)
645 		mode |= BLK_OPEN_READ;
646 	if (file->f_mode & FMODE_WRITE)
647 		mode |= BLK_OPEN_WRITE;
648 	/*
649 	 * do_dentry_open() clears O_EXCL from f_flags, use file->private_data
650 	 * to determine whether the open was exclusive for already open files.
651 	 */
652 	if (file->private_data)
653 		mode |= BLK_OPEN_EXCL;
654 	else if (file->f_flags & O_EXCL)
655 		mode |= BLK_OPEN_EXCL;
656 	if (file->f_flags & O_NDELAY)
657 		mode |= BLK_OPEN_NDELAY;
658 
659 	/*
660 	 * If all bits in O_ACCMODE set (aka O_RDWR | O_WRONLY), the floppy
661 	 * driver has historically allowed ioctls as if the file was opened for
662 	 * writing, but does not allow and actual reads or writes.
663 	 */
664 	if ((file->f_flags & O_ACCMODE) == (O_RDWR | O_WRONLY))
665 		mode |= BLK_OPEN_WRITE_IOCTL;
666 
667 	return mode;
668 }
669 
blkdev_open(struct inode * inode,struct file * filp)670 static int blkdev_open(struct inode *inode, struct file *filp)
671 {
672 	struct block_device *bdev;
673 	blk_mode_t mode;
674 	int ret;
675 
676 	mode = file_to_blk_mode(filp);
677 	/* Use the file as the holder. */
678 	if (mode & BLK_OPEN_EXCL)
679 		filp->private_data = filp;
680 	ret = bdev_permission(inode->i_rdev, mode, filp->private_data);
681 	if (ret)
682 		return ret;
683 
684 	bdev = blkdev_get_no_open(inode->i_rdev, true);
685 	if (!bdev)
686 		return -ENXIO;
687 
688 	if (bdev_can_atomic_write(bdev))
689 		filp->f_mode |= FMODE_CAN_ATOMIC_WRITE;
690 
691 	ret = bdev_open(bdev, mode, filp->private_data, NULL, filp);
692 	if (ret)
693 		blkdev_put_no_open(bdev);
694 	return ret;
695 }
696 
blkdev_release(struct inode * inode,struct file * filp)697 static int blkdev_release(struct inode *inode, struct file *filp)
698 {
699 	bdev_release(filp);
700 	return 0;
701 }
702 
703 static ssize_t
blkdev_direct_write(struct kiocb * iocb,struct iov_iter * from)704 blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from)
705 {
706 	size_t count = iov_iter_count(from);
707 	ssize_t written;
708 
709 	written = kiocb_invalidate_pages(iocb, count);
710 	if (written) {
711 		if (written == -EBUSY)
712 			return 0;
713 		return written;
714 	}
715 
716 	written = blkdev_direct_IO(iocb, from);
717 	if (written > 0) {
718 		kiocb_invalidate_post_direct_write(iocb, count);
719 		iocb->ki_pos += written;
720 		count -= written;
721 	}
722 	if (written != -EIOCBQUEUED)
723 		iov_iter_revert(from, count - iov_iter_count(from));
724 	return written;
725 }
726 
blkdev_buffered_write(struct kiocb * iocb,struct iov_iter * from)727 static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
728 {
729 	return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops, NULL,
730 			NULL);
731 }
732 
733 /*
734  * Write data to the block device.  Only intended for the block device itself
735  * and the raw driver which basically is a fake block device.
736  *
737  * Does not take i_mutex for the write and thus is not for general purpose
738  * use.
739  */
blkdev_write_iter(struct kiocb * iocb,struct iov_iter * from)740 static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
741 {
742 	struct file *file = iocb->ki_filp;
743 	struct inode *bd_inode = bdev_file_inode(file);
744 	struct block_device *bdev = I_BDEV(bd_inode);
745 	bool atomic = iocb->ki_flags & IOCB_ATOMIC;
746 	loff_t size = bdev_nr_bytes(bdev);
747 	size_t shorted = 0;
748 	ssize_t ret;
749 
750 	if (bdev_read_only(bdev))
751 		return -EPERM;
752 
753 	if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
754 		return -ETXTBSY;
755 
756 	if (!iov_iter_count(from))
757 		return 0;
758 
759 	if (iocb->ki_pos >= size)
760 		return -ENOSPC;
761 
762 	if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
763 		return -EOPNOTSUPP;
764 
765 	if (atomic) {
766 		ret = generic_atomic_write_valid(iocb, from);
767 		if (ret)
768 			return ret;
769 	}
770 
771 	size -= iocb->ki_pos;
772 	if (iov_iter_count(from) > size) {
773 		if (atomic)
774 			return -EINVAL;
775 		shorted = iov_iter_count(from) - size;
776 		iov_iter_truncate(from, size);
777 	}
778 
779 	ret = file_update_time(file);
780 	if (ret)
781 		return ret;
782 
783 	if (iocb->ki_flags & IOCB_DIRECT) {
784 		ret = blkdev_direct_write(iocb, from);
785 		if (ret >= 0 && iov_iter_count(from))
786 			ret = direct_write_fallback(iocb, from, ret,
787 					blkdev_buffered_write(iocb, from));
788 	} else {
789 		/*
790 		 * Take i_rwsem and invalidate_lock to avoid racing with
791 		 * set_blocksize changing i_blkbits/folio order and punching
792 		 * out the pagecache.
793 		 */
794 		inode_lock_shared(bd_inode);
795 		ret = blkdev_buffered_write(iocb, from);
796 		inode_unlock_shared(bd_inode);
797 	}
798 
799 	if (ret > 0)
800 		ret = generic_write_sync(iocb, ret);
801 	iov_iter_reexpand(from, iov_iter_count(from) + shorted);
802 	return ret;
803 }
804 
blkdev_read_iter(struct kiocb * iocb,struct iov_iter * to)805 static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
806 {
807 	struct inode *bd_inode = bdev_file_inode(iocb->ki_filp);
808 	struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
809 	loff_t size = bdev_nr_bytes(bdev);
810 	loff_t pos = iocb->ki_pos;
811 	size_t shorted = 0;
812 	ssize_t ret = 0;
813 	size_t count;
814 
815 	if (unlikely(pos + iov_iter_count(to) > size)) {
816 		if (pos >= size)
817 			return 0;
818 		size -= pos;
819 		shorted = iov_iter_count(to) - size;
820 		iov_iter_truncate(to, size);
821 	}
822 
823 	count = iov_iter_count(to);
824 	if (!count)
825 		goto reexpand; /* skip atime */
826 
827 	if (iocb->ki_flags & IOCB_DIRECT) {
828 		ret = kiocb_write_and_wait(iocb, count);
829 		if (ret < 0)
830 			goto reexpand;
831 		file_accessed(iocb->ki_filp);
832 
833 		ret = blkdev_direct_IO(iocb, to);
834 		if (ret > 0) {
835 			iocb->ki_pos += ret;
836 			count -= ret;
837 		}
838 		if (ret != -EIOCBQUEUED)
839 			iov_iter_revert(to, count - iov_iter_count(to));
840 		if (ret < 0 || !count)
841 			goto reexpand;
842 	}
843 
844 	/*
845 	 * Take i_rwsem and invalidate_lock to avoid racing with set_blocksize
846 	 * changing i_blkbits/folio order and punching out the pagecache.
847 	 */
848 	inode_lock_shared(bd_inode);
849 	ret = filemap_read(iocb, to, ret);
850 	inode_unlock_shared(bd_inode);
851 
852 reexpand:
853 	if (unlikely(shorted))
854 		iov_iter_reexpand(to, iov_iter_count(to) + shorted);
855 	return ret;
856 }
857 
858 #define	BLKDEV_FALLOC_FL_SUPPORTED					\
859 		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
860 		 FALLOC_FL_ZERO_RANGE | FALLOC_FL_WRITE_ZEROES)
861 
blkdev_fallocate(struct file * file,int mode,loff_t start,loff_t len)862 static long blkdev_fallocate(struct file *file, int mode, loff_t start,
863 			     loff_t len)
864 {
865 	struct inode *inode = bdev_file_inode(file);
866 	struct block_device *bdev = I_BDEV(inode);
867 	loff_t end = start + len - 1;
868 	loff_t isize;
869 	unsigned int flags;
870 	int error;
871 
872 	/* Fail if we don't recognize the flags. */
873 	if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
874 		return -EOPNOTSUPP;
875 	/*
876 	 * Don't allow writing zeroes if the device does not enable the
877 	 * unmap write zeroes operation.
878 	 */
879 	if ((mode & FALLOC_FL_WRITE_ZEROES) &&
880 	    !bdev_write_zeroes_unmap_sectors(bdev))
881 		return -EOPNOTSUPP;
882 
883 	/* Don't go off the end of the device. */
884 	isize = bdev_nr_bytes(bdev);
885 	if (start >= isize)
886 		return -EINVAL;
887 	if (end >= isize) {
888 		if (mode & FALLOC_FL_KEEP_SIZE) {
889 			len = isize - start;
890 			end = start + len - 1;
891 		} else
892 			return -EINVAL;
893 	}
894 
895 	/*
896 	 * Don't allow IO that isn't aligned to logical block size.
897 	 */
898 	if ((start | len) & (bdev_logical_block_size(bdev) - 1))
899 		return -EINVAL;
900 
901 	inode_lock(inode);
902 	filemap_invalidate_lock(inode->i_mapping);
903 
904 	switch (mode) {
905 	case FALLOC_FL_ZERO_RANGE:
906 	case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
907 		flags = BLKDEV_ZERO_NOUNMAP;
908 		break;
909 	case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
910 		flags = BLKDEV_ZERO_NOFALLBACK;
911 		break;
912 	case FALLOC_FL_WRITE_ZEROES:
913 		flags = 0;
914 		break;
915 	default:
916 		error = -EOPNOTSUPP;
917 		goto fail;
918 	}
919 
920 	/*
921 	 * Invalidate the page cache, including dirty pages, for valid
922 	 * de-allocate mode calls to fallocate().
923 	 */
924 	error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
925 	if (error)
926 		goto fail;
927 
928 	error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
929 				     len >> SECTOR_SHIFT, GFP_KERNEL, flags);
930  fail:
931 	filemap_invalidate_unlock(inode->i_mapping);
932 	inode_unlock(inode);
933 	return error;
934 }
935 
blkdev_mmap_prepare(struct vm_area_desc * desc)936 static int blkdev_mmap_prepare(struct vm_area_desc *desc)
937 {
938 	struct file *file = desc->file;
939 
940 	if (bdev_read_only(I_BDEV(bdev_file_inode(file))))
941 		return generic_file_readonly_mmap_prepare(desc);
942 
943 	return generic_file_mmap_prepare(desc);
944 }
945 
946 const struct file_operations def_blk_fops = {
947 	.open		= blkdev_open,
948 	.release	= blkdev_release,
949 	.llseek		= blkdev_llseek,
950 	.read_iter	= blkdev_read_iter,
951 	.write_iter	= blkdev_write_iter,
952 	.iopoll		= iocb_bio_iopoll,
953 	.mmap_prepare	= blkdev_mmap_prepare,
954 	.fsync		= blkdev_fsync,
955 	.unlocked_ioctl	= blkdev_ioctl,
956 #ifdef CONFIG_COMPAT
957 	.compat_ioctl	= compat_blkdev_ioctl,
958 #endif
959 	.splice_read	= filemap_splice_read,
960 	.splice_write	= iter_file_splice_write,
961 	.fallocate	= blkdev_fallocate,
962 	.uring_cmd	= blkdev_uring_cmd,
963 	.fop_flags	= FOP_BUFFER_RASYNC,
964 };
965 
blkdev_init(void)966 static __init int blkdev_init(void)
967 {
968 	return bioset_init(&blkdev_dio_pool, 4,
969 				offsetof(struct blkdev_dio, bio),
970 				BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE);
971 }
972 module_init(blkdev_init);
973