xref: /linux/block/fops.c (revision baaa68a9796ef2cadfe5caaf4c730412eda0f31c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 1991, 1992  Linus Torvalds
4  * Copyright (C) 2001  Andrea Arcangeli <andrea@suse.de> SuSE
5  * Copyright (C) 2016 - 2020 Christoph Hellwig
6  */
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/blkdev.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/uio.h>
13 #include <linux/namei.h>
14 #include <linux/task_io_accounting_ops.h>
15 #include <linux/falloc.h>
16 #include <linux/suspend.h>
17 #include <linux/fs.h>
18 #include <linux/module.h>
19 #include "blk.h"
20 
21 static inline struct inode *bdev_file_inode(struct file *file)
22 {
23 	return file->f_mapping->host;
24 }
25 
26 static int blkdev_get_block(struct inode *inode, sector_t iblock,
27 		struct buffer_head *bh, int create)
28 {
29 	bh->b_bdev = I_BDEV(inode);
30 	bh->b_blocknr = iblock;
31 	set_buffer_mapped(bh);
32 	return 0;
33 }
34 
35 static unsigned int dio_bio_write_op(struct kiocb *iocb)
36 {
37 	unsigned int op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
38 
39 	/* avoid the need for a I/O completion work item */
40 	if (iocb->ki_flags & IOCB_DSYNC)
41 		op |= REQ_FUA;
42 	return op;
43 }
44 
45 #define DIO_INLINE_BIO_VECS 4
46 
47 static void blkdev_bio_end_io_simple(struct bio *bio)
48 {
49 	struct task_struct *waiter = bio->bi_private;
50 
51 	WRITE_ONCE(bio->bi_private, NULL);
52 	blk_wake_io_task(waiter);
53 }
54 
55 static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
56 		struct iov_iter *iter, unsigned int nr_pages)
57 {
58 	struct block_device *bdev = iocb->ki_filp->private_data;
59 	struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
60 	loff_t pos = iocb->ki_pos;
61 	bool should_dirty = false;
62 	struct bio bio;
63 	ssize_t ret;
64 
65 	if ((pos | iov_iter_alignment(iter)) &
66 	    (bdev_logical_block_size(bdev) - 1))
67 		return -EINVAL;
68 
69 	if (nr_pages <= DIO_INLINE_BIO_VECS)
70 		vecs = inline_vecs;
71 	else {
72 		vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
73 				     GFP_KERNEL);
74 		if (!vecs)
75 			return -ENOMEM;
76 	}
77 
78 	if (iov_iter_rw(iter) == READ) {
79 		bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ);
80 		if (iter_is_iovec(iter))
81 			should_dirty = true;
82 	} else {
83 		bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
84 	}
85 	bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
86 	bio.bi_write_hint = iocb->ki_hint;
87 	bio.bi_private = current;
88 	bio.bi_end_io = blkdev_bio_end_io_simple;
89 	bio.bi_ioprio = iocb->ki_ioprio;
90 
91 	ret = bio_iov_iter_get_pages(&bio, iter);
92 	if (unlikely(ret))
93 		goto out;
94 	ret = bio.bi_iter.bi_size;
95 
96 	if (iov_iter_rw(iter) == WRITE)
97 		task_io_account_write(ret);
98 
99 	if (iocb->ki_flags & IOCB_NOWAIT)
100 		bio.bi_opf |= REQ_NOWAIT;
101 	if (iocb->ki_flags & IOCB_HIPRI)
102 		bio_set_polled(&bio, iocb);
103 
104 	submit_bio(&bio);
105 	for (;;) {
106 		set_current_state(TASK_UNINTERRUPTIBLE);
107 		if (!READ_ONCE(bio.bi_private))
108 			break;
109 		if (!(iocb->ki_flags & IOCB_HIPRI) || !bio_poll(&bio, NULL, 0))
110 			blk_io_schedule();
111 	}
112 	__set_current_state(TASK_RUNNING);
113 
114 	bio_release_pages(&bio, should_dirty);
115 	if (unlikely(bio.bi_status))
116 		ret = blk_status_to_errno(bio.bi_status);
117 
118 out:
119 	if (vecs != inline_vecs)
120 		kfree(vecs);
121 
122 	bio_uninit(&bio);
123 
124 	return ret;
125 }
126 
127 enum {
128 	DIO_SHOULD_DIRTY	= 1,
129 	DIO_IS_SYNC		= 2,
130 };
131 
132 struct blkdev_dio {
133 	union {
134 		struct kiocb		*iocb;
135 		struct task_struct	*waiter;
136 	};
137 	size_t			size;
138 	atomic_t		ref;
139 	unsigned int		flags;
140 	struct bio		bio ____cacheline_aligned_in_smp;
141 };
142 
143 static struct bio_set blkdev_dio_pool;
144 
145 static void blkdev_bio_end_io(struct bio *bio)
146 {
147 	struct blkdev_dio *dio = bio->bi_private;
148 	bool should_dirty = dio->flags & DIO_SHOULD_DIRTY;
149 
150 	if (bio->bi_status && !dio->bio.bi_status)
151 		dio->bio.bi_status = bio->bi_status;
152 
153 	if (atomic_dec_and_test(&dio->ref)) {
154 		if (!(dio->flags & DIO_IS_SYNC)) {
155 			struct kiocb *iocb = dio->iocb;
156 			ssize_t ret;
157 
158 			WRITE_ONCE(iocb->private, NULL);
159 
160 			if (likely(!dio->bio.bi_status)) {
161 				ret = dio->size;
162 				iocb->ki_pos += ret;
163 			} else {
164 				ret = blk_status_to_errno(dio->bio.bi_status);
165 			}
166 
167 			dio->iocb->ki_complete(iocb, ret);
168 			bio_put(&dio->bio);
169 		} else {
170 			struct task_struct *waiter = dio->waiter;
171 
172 			WRITE_ONCE(dio->waiter, NULL);
173 			blk_wake_io_task(waiter);
174 		}
175 	}
176 
177 	if (should_dirty) {
178 		bio_check_pages_dirty(bio);
179 	} else {
180 		bio_release_pages(bio, false);
181 		bio_put(bio);
182 	}
183 }
184 
185 static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
186 		unsigned int nr_pages)
187 {
188 	struct block_device *bdev = iocb->ki_filp->private_data;
189 	struct blk_plug plug;
190 	struct blkdev_dio *dio;
191 	struct bio *bio;
192 	bool is_read = (iov_iter_rw(iter) == READ), is_sync;
193 	unsigned int opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
194 	loff_t pos = iocb->ki_pos;
195 	int ret = 0;
196 
197 	if ((pos | iov_iter_alignment(iter)) &
198 	    (bdev_logical_block_size(bdev) - 1))
199 		return -EINVAL;
200 
201 	bio = bio_alloc_kiocb(iocb, bdev, nr_pages, opf, &blkdev_dio_pool);
202 
203 	dio = container_of(bio, struct blkdev_dio, bio);
204 	atomic_set(&dio->ref, 1);
205 	/*
206 	 * Grab an extra reference to ensure the dio structure which is embedded
207 	 * into the first bio stays around.
208 	 */
209 	bio_get(bio);
210 
211 	is_sync = is_sync_kiocb(iocb);
212 	if (is_sync) {
213 		dio->flags = DIO_IS_SYNC;
214 		dio->waiter = current;
215 	} else {
216 		dio->flags = 0;
217 		dio->iocb = iocb;
218 	}
219 
220 	dio->size = 0;
221 	if (is_read && iter_is_iovec(iter))
222 		dio->flags |= DIO_SHOULD_DIRTY;
223 
224 	blk_start_plug(&plug);
225 
226 	for (;;) {
227 		bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
228 		bio->bi_write_hint = iocb->ki_hint;
229 		bio->bi_private = dio;
230 		bio->bi_end_io = blkdev_bio_end_io;
231 		bio->bi_ioprio = iocb->ki_ioprio;
232 
233 		ret = bio_iov_iter_get_pages(bio, iter);
234 		if (unlikely(ret)) {
235 			bio->bi_status = BLK_STS_IOERR;
236 			bio_endio(bio);
237 			break;
238 		}
239 
240 		if (is_read) {
241 			if (dio->flags & DIO_SHOULD_DIRTY)
242 				bio_set_pages_dirty(bio);
243 		} else {
244 			task_io_account_write(bio->bi_iter.bi_size);
245 		}
246 		if (iocb->ki_flags & IOCB_NOWAIT)
247 			bio->bi_opf |= REQ_NOWAIT;
248 
249 		dio->size += bio->bi_iter.bi_size;
250 		pos += bio->bi_iter.bi_size;
251 
252 		nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
253 		if (!nr_pages) {
254 			submit_bio(bio);
255 			break;
256 		}
257 		atomic_inc(&dio->ref);
258 		submit_bio(bio);
259 		bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL);
260 	}
261 
262 	blk_finish_plug(&plug);
263 
264 	if (!is_sync)
265 		return -EIOCBQUEUED;
266 
267 	for (;;) {
268 		set_current_state(TASK_UNINTERRUPTIBLE);
269 		if (!READ_ONCE(dio->waiter))
270 			break;
271 		blk_io_schedule();
272 	}
273 	__set_current_state(TASK_RUNNING);
274 
275 	if (!ret)
276 		ret = blk_status_to_errno(dio->bio.bi_status);
277 	if (likely(!ret))
278 		ret = dio->size;
279 
280 	bio_put(&dio->bio);
281 	return ret;
282 }
283 
284 static void blkdev_bio_end_io_async(struct bio *bio)
285 {
286 	struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio);
287 	struct kiocb *iocb = dio->iocb;
288 	ssize_t ret;
289 
290 	WRITE_ONCE(iocb->private, NULL);
291 
292 	if (likely(!bio->bi_status)) {
293 		ret = dio->size;
294 		iocb->ki_pos += ret;
295 	} else {
296 		ret = blk_status_to_errno(bio->bi_status);
297 	}
298 
299 	iocb->ki_complete(iocb, ret);
300 
301 	if (dio->flags & DIO_SHOULD_DIRTY) {
302 		bio_check_pages_dirty(bio);
303 	} else {
304 		bio_release_pages(bio, false);
305 		bio_put(bio);
306 	}
307 }
308 
309 static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
310 					struct iov_iter *iter,
311 					unsigned int nr_pages)
312 {
313 	struct block_device *bdev = iocb->ki_filp->private_data;
314 	bool is_read = iov_iter_rw(iter) == READ;
315 	unsigned int opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
316 	struct blkdev_dio *dio;
317 	struct bio *bio;
318 	loff_t pos = iocb->ki_pos;
319 	int ret = 0;
320 
321 	if ((pos | iov_iter_alignment(iter)) &
322 	    (bdev_logical_block_size(bdev) - 1))
323 		return -EINVAL;
324 
325 	bio = bio_alloc_kiocb(iocb, bdev, nr_pages, opf, &blkdev_dio_pool);
326 	dio = container_of(bio, struct blkdev_dio, bio);
327 	dio->flags = 0;
328 	dio->iocb = iocb;
329 	bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
330 	bio->bi_write_hint = iocb->ki_hint;
331 	bio->bi_end_io = blkdev_bio_end_io_async;
332 	bio->bi_ioprio = iocb->ki_ioprio;
333 
334 	if (iov_iter_is_bvec(iter)) {
335 		/*
336 		 * Users don't rely on the iterator being in any particular
337 		 * state for async I/O returning -EIOCBQUEUED, hence we can
338 		 * avoid expensive iov_iter_advance(). Bypass
339 		 * bio_iov_iter_get_pages() and set the bvec directly.
340 		 */
341 		bio_iov_bvec_set(bio, iter);
342 	} else {
343 		ret = bio_iov_iter_get_pages(bio, iter);
344 		if (unlikely(ret)) {
345 			bio_put(bio);
346 			return ret;
347 		}
348 	}
349 	dio->size = bio->bi_iter.bi_size;
350 
351 	if (is_read) {
352 		if (iter_is_iovec(iter)) {
353 			dio->flags |= DIO_SHOULD_DIRTY;
354 			bio_set_pages_dirty(bio);
355 		}
356 	} else {
357 		task_io_account_write(bio->bi_iter.bi_size);
358 	}
359 
360 	if (iocb->ki_flags & IOCB_HIPRI) {
361 		bio->bi_opf |= REQ_POLLED | REQ_NOWAIT;
362 		submit_bio(bio);
363 		WRITE_ONCE(iocb->private, bio);
364 	} else {
365 		if (iocb->ki_flags & IOCB_NOWAIT)
366 			bio->bi_opf |= REQ_NOWAIT;
367 		submit_bio(bio);
368 	}
369 	return -EIOCBQUEUED;
370 }
371 
372 static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
373 {
374 	unsigned int nr_pages;
375 
376 	if (!iov_iter_count(iter))
377 		return 0;
378 
379 	nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
380 	if (likely(nr_pages <= BIO_MAX_VECS)) {
381 		if (is_sync_kiocb(iocb))
382 			return __blkdev_direct_IO_simple(iocb, iter, nr_pages);
383 		return __blkdev_direct_IO_async(iocb, iter, nr_pages);
384 	}
385 	return __blkdev_direct_IO(iocb, iter, bio_max_segs(nr_pages));
386 }
387 
388 static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
389 {
390 	return block_write_full_page(page, blkdev_get_block, wbc);
391 }
392 
393 static int blkdev_readpage(struct file * file, struct page * page)
394 {
395 	return block_read_full_page(page, blkdev_get_block);
396 }
397 
398 static void blkdev_readahead(struct readahead_control *rac)
399 {
400 	mpage_readahead(rac, blkdev_get_block);
401 }
402 
403 static int blkdev_write_begin(struct file *file, struct address_space *mapping,
404 		loff_t pos, unsigned len, unsigned flags, struct page **pagep,
405 		void **fsdata)
406 {
407 	return block_write_begin(mapping, pos, len, flags, pagep,
408 				 blkdev_get_block);
409 }
410 
411 static int blkdev_write_end(struct file *file, struct address_space *mapping,
412 		loff_t pos, unsigned len, unsigned copied, struct page *page,
413 		void *fsdata)
414 {
415 	int ret;
416 	ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
417 
418 	unlock_page(page);
419 	put_page(page);
420 
421 	return ret;
422 }
423 
424 static int blkdev_writepages(struct address_space *mapping,
425 			     struct writeback_control *wbc)
426 {
427 	return generic_writepages(mapping, wbc);
428 }
429 
430 const struct address_space_operations def_blk_aops = {
431 	.dirty_folio	= block_dirty_folio,
432 	.invalidate_folio = block_invalidate_folio,
433 	.readpage	= blkdev_readpage,
434 	.readahead	= blkdev_readahead,
435 	.writepage	= blkdev_writepage,
436 	.write_begin	= blkdev_write_begin,
437 	.write_end	= blkdev_write_end,
438 	.writepages	= blkdev_writepages,
439 	.direct_IO	= blkdev_direct_IO,
440 	.migratepage	= buffer_migrate_page_norefs,
441 	.is_dirty_writeback = buffer_check_dirty_writeback,
442 };
443 
444 /*
445  * for a block special file file_inode(file)->i_size is zero
446  * so we compute the size by hand (just as in block_read/write above)
447  */
448 static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence)
449 {
450 	struct inode *bd_inode = bdev_file_inode(file);
451 	loff_t retval;
452 
453 	inode_lock(bd_inode);
454 	retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
455 	inode_unlock(bd_inode);
456 	return retval;
457 }
458 
459 static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
460 		int datasync)
461 {
462 	struct block_device *bdev = filp->private_data;
463 	int error;
464 
465 	error = file_write_and_wait_range(filp, start, end);
466 	if (error)
467 		return error;
468 
469 	/*
470 	 * There is no need to serialise calls to blkdev_issue_flush with
471 	 * i_mutex and doing so causes performance issues with concurrent
472 	 * O_SYNC writers to a block device.
473 	 */
474 	error = blkdev_issue_flush(bdev);
475 	if (error == -EOPNOTSUPP)
476 		error = 0;
477 
478 	return error;
479 }
480 
481 static int blkdev_open(struct inode *inode, struct file *filp)
482 {
483 	struct block_device *bdev;
484 
485 	/*
486 	 * Preserve backwards compatibility and allow large file access
487 	 * even if userspace doesn't ask for it explicitly. Some mkfs
488 	 * binary needs it. We might want to drop this workaround
489 	 * during an unstable branch.
490 	 */
491 	filp->f_flags |= O_LARGEFILE;
492 	filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
493 
494 	if (filp->f_flags & O_NDELAY)
495 		filp->f_mode |= FMODE_NDELAY;
496 	if (filp->f_flags & O_EXCL)
497 		filp->f_mode |= FMODE_EXCL;
498 	if ((filp->f_flags & O_ACCMODE) == 3)
499 		filp->f_mode |= FMODE_WRITE_IOCTL;
500 
501 	bdev = blkdev_get_by_dev(inode->i_rdev, filp->f_mode, filp);
502 	if (IS_ERR(bdev))
503 		return PTR_ERR(bdev);
504 
505 	filp->private_data = bdev;
506 	filp->f_mapping = bdev->bd_inode->i_mapping;
507 	filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
508 	return 0;
509 }
510 
511 static int blkdev_close(struct inode *inode, struct file *filp)
512 {
513 	struct block_device *bdev = filp->private_data;
514 
515 	blkdev_put(bdev, filp->f_mode);
516 	return 0;
517 }
518 
519 /*
520  * Write data to the block device.  Only intended for the block device itself
521  * and the raw driver which basically is a fake block device.
522  *
523  * Does not take i_mutex for the write and thus is not for general purpose
524  * use.
525  */
526 static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
527 {
528 	struct block_device *bdev = iocb->ki_filp->private_data;
529 	struct inode *bd_inode = bdev->bd_inode;
530 	loff_t size = bdev_nr_bytes(bdev);
531 	struct blk_plug plug;
532 	size_t shorted = 0;
533 	ssize_t ret;
534 
535 	if (bdev_read_only(bdev))
536 		return -EPERM;
537 
538 	if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
539 		return -ETXTBSY;
540 
541 	if (!iov_iter_count(from))
542 		return 0;
543 
544 	if (iocb->ki_pos >= size)
545 		return -ENOSPC;
546 
547 	if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
548 		return -EOPNOTSUPP;
549 
550 	size -= iocb->ki_pos;
551 	if (iov_iter_count(from) > size) {
552 		shorted = iov_iter_count(from) - size;
553 		iov_iter_truncate(from, size);
554 	}
555 
556 	blk_start_plug(&plug);
557 	ret = __generic_file_write_iter(iocb, from);
558 	if (ret > 0)
559 		ret = generic_write_sync(iocb, ret);
560 	iov_iter_reexpand(from, iov_iter_count(from) + shorted);
561 	blk_finish_plug(&plug);
562 	return ret;
563 }
564 
565 static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
566 {
567 	struct block_device *bdev = iocb->ki_filp->private_data;
568 	loff_t size = bdev_nr_bytes(bdev);
569 	loff_t pos = iocb->ki_pos;
570 	size_t shorted = 0;
571 	ssize_t ret = 0;
572 	size_t count;
573 
574 	if (unlikely(pos + iov_iter_count(to) > size)) {
575 		if (pos >= size)
576 			return 0;
577 		size -= pos;
578 		shorted = iov_iter_count(to) - size;
579 		iov_iter_truncate(to, size);
580 	}
581 
582 	count = iov_iter_count(to);
583 	if (!count)
584 		goto reexpand; /* skip atime */
585 
586 	if (iocb->ki_flags & IOCB_DIRECT) {
587 		struct address_space *mapping = iocb->ki_filp->f_mapping;
588 
589 		if (iocb->ki_flags & IOCB_NOWAIT) {
590 			if (filemap_range_needs_writeback(mapping, pos,
591 							  pos + count - 1)) {
592 				ret = -EAGAIN;
593 				goto reexpand;
594 			}
595 		} else {
596 			ret = filemap_write_and_wait_range(mapping, pos,
597 							   pos + count - 1);
598 			if (ret < 0)
599 				goto reexpand;
600 		}
601 
602 		file_accessed(iocb->ki_filp);
603 
604 		ret = blkdev_direct_IO(iocb, to);
605 		if (ret >= 0) {
606 			iocb->ki_pos += ret;
607 			count -= ret;
608 		}
609 		iov_iter_revert(to, count - iov_iter_count(to));
610 		if (ret < 0 || !count)
611 			goto reexpand;
612 	}
613 
614 	ret = filemap_read(iocb, to, ret);
615 
616 reexpand:
617 	if (unlikely(shorted))
618 		iov_iter_reexpand(to, iov_iter_count(to) + shorted);
619 	return ret;
620 }
621 
622 #define	BLKDEV_FALLOC_FL_SUPPORTED					\
623 		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
624 		 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
625 
626 static long blkdev_fallocate(struct file *file, int mode, loff_t start,
627 			     loff_t len)
628 {
629 	struct inode *inode = bdev_file_inode(file);
630 	struct block_device *bdev = I_BDEV(inode);
631 	loff_t end = start + len - 1;
632 	loff_t isize;
633 	int error;
634 
635 	/* Fail if we don't recognize the flags. */
636 	if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
637 		return -EOPNOTSUPP;
638 
639 	/* Don't go off the end of the device. */
640 	isize = bdev_nr_bytes(bdev);
641 	if (start >= isize)
642 		return -EINVAL;
643 	if (end >= isize) {
644 		if (mode & FALLOC_FL_KEEP_SIZE) {
645 			len = isize - start;
646 			end = start + len - 1;
647 		} else
648 			return -EINVAL;
649 	}
650 
651 	/*
652 	 * Don't allow IO that isn't aligned to logical block size.
653 	 */
654 	if ((start | len) & (bdev_logical_block_size(bdev) - 1))
655 		return -EINVAL;
656 
657 	filemap_invalidate_lock(inode->i_mapping);
658 
659 	/* Invalidate the page cache, including dirty pages. */
660 	error = truncate_bdev_range(bdev, file->f_mode, start, end);
661 	if (error)
662 		goto fail;
663 
664 	switch (mode) {
665 	case FALLOC_FL_ZERO_RANGE:
666 	case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
667 		error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
668 					     len >> SECTOR_SHIFT, GFP_KERNEL,
669 					     BLKDEV_ZERO_NOUNMAP);
670 		break;
671 	case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
672 		error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
673 					     len >> SECTOR_SHIFT, GFP_KERNEL,
674 					     BLKDEV_ZERO_NOFALLBACK);
675 		break;
676 	case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
677 		error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
678 					     len >> SECTOR_SHIFT, GFP_KERNEL, 0);
679 		break;
680 	default:
681 		error = -EOPNOTSUPP;
682 	}
683 
684  fail:
685 	filemap_invalidate_unlock(inode->i_mapping);
686 	return error;
687 }
688 
689 const struct file_operations def_blk_fops = {
690 	.open		= blkdev_open,
691 	.release	= blkdev_close,
692 	.llseek		= blkdev_llseek,
693 	.read_iter	= blkdev_read_iter,
694 	.write_iter	= blkdev_write_iter,
695 	.iopoll		= iocb_bio_iopoll,
696 	.mmap		= generic_file_mmap,
697 	.fsync		= blkdev_fsync,
698 	.unlocked_ioctl	= blkdev_ioctl,
699 #ifdef CONFIG_COMPAT
700 	.compat_ioctl	= compat_blkdev_ioctl,
701 #endif
702 	.splice_read	= generic_file_splice_read,
703 	.splice_write	= iter_file_splice_write,
704 	.fallocate	= blkdev_fallocate,
705 };
706 
707 static __init int blkdev_init(void)
708 {
709 	return bioset_init(&blkdev_dio_pool, 4,
710 				offsetof(struct blkdev_dio, bio),
711 				BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE);
712 }
713 module_init(blkdev_init);
714