xref: /linux/fs/ext4/file.c (revision 26b433d0da062d6e19d75350c0171d3cf8ff560d)
1 /*
2  *  linux/fs/ext4/file.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/file.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  ext4 fs regular file handling primitives
16  *
17  *  64-bit file support on 64-bit platforms by Jakub Jelinek
18  *	(jj@sunsite.ms.mff.cuni.cz)
19  */
20 
21 #include <linux/time.h>
22 #include <linux/fs.h>
23 #include <linux/mount.h>
24 #include <linux/path.h>
25 #include <linux/dax.h>
26 #include <linux/quotaops.h>
27 #include <linux/pagevec.h>
28 #include <linux/uio.h>
29 #include "ext4.h"
30 #include "ext4_jbd2.h"
31 #include "xattr.h"
32 #include "acl.h"
33 
34 #ifdef CONFIG_FS_DAX
35 static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
36 {
37 	struct inode *inode = file_inode(iocb->ki_filp);
38 	ssize_t ret;
39 
40 	if (!inode_trylock_shared(inode)) {
41 		if (iocb->ki_flags & IOCB_NOWAIT)
42 			return -EAGAIN;
43 		inode_lock_shared(inode);
44 	}
45 	/*
46 	 * Recheck under inode lock - at this point we are sure it cannot
47 	 * change anymore
48 	 */
49 	if (!IS_DAX(inode)) {
50 		inode_unlock_shared(inode);
51 		/* Fallback to buffered IO in case we cannot support DAX */
52 		return generic_file_read_iter(iocb, to);
53 	}
54 	ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
55 	inode_unlock_shared(inode);
56 
57 	file_accessed(iocb->ki_filp);
58 	return ret;
59 }
60 #endif
61 
62 static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
63 {
64 	if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb))))
65 		return -EIO;
66 
67 	if (!iov_iter_count(to))
68 		return 0; /* skip atime */
69 
70 #ifdef CONFIG_FS_DAX
71 	if (IS_DAX(file_inode(iocb->ki_filp)))
72 		return ext4_dax_read_iter(iocb, to);
73 #endif
74 	return generic_file_read_iter(iocb, to);
75 }
76 
77 /*
78  * Called when an inode is released. Note that this is different
79  * from ext4_file_open: open gets called at every open, but release
80  * gets called only when /all/ the files are closed.
81  */
82 static int ext4_release_file(struct inode *inode, struct file *filp)
83 {
84 	if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
85 		ext4_alloc_da_blocks(inode);
86 		ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
87 	}
88 	/* if we are the last writer on the inode, drop the block reservation */
89 	if ((filp->f_mode & FMODE_WRITE) &&
90 			(atomic_read(&inode->i_writecount) == 1) &&
91 		        !EXT4_I(inode)->i_reserved_data_blocks)
92 	{
93 		down_write(&EXT4_I(inode)->i_data_sem);
94 		ext4_discard_preallocations(inode);
95 		up_write(&EXT4_I(inode)->i_data_sem);
96 	}
97 	if (is_dx(inode) && filp->private_data)
98 		ext4_htree_free_dir_info(filp->private_data);
99 
100 	return 0;
101 }
102 
103 static void ext4_unwritten_wait(struct inode *inode)
104 {
105 	wait_queue_head_t *wq = ext4_ioend_wq(inode);
106 
107 	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
108 }
109 
110 /*
111  * This tests whether the IO in question is block-aligned or not.
112  * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
113  * are converted to written only after the IO is complete.  Until they are
114  * mapped, these blocks appear as holes, so dio_zero_block() will assume that
115  * it needs to zero out portions of the start and/or end block.  If 2 AIO
116  * threads are at work on the same unwritten block, they must be synchronized
117  * or one thread will zero the other's data, causing corruption.
118  */
119 static int
120 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
121 {
122 	struct super_block *sb = inode->i_sb;
123 	int blockmask = sb->s_blocksize - 1;
124 
125 	if (pos >= i_size_read(inode))
126 		return 0;
127 
128 	if ((pos | iov_iter_alignment(from)) & blockmask)
129 		return 1;
130 
131 	return 0;
132 }
133 
134 /* Is IO overwriting allocated and initialized blocks? */
135 static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
136 {
137 	struct ext4_map_blocks map;
138 	unsigned int blkbits = inode->i_blkbits;
139 	int err, blklen;
140 
141 	if (pos + len > i_size_read(inode))
142 		return false;
143 
144 	map.m_lblk = pos >> blkbits;
145 	map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
146 	blklen = map.m_len;
147 
148 	err = ext4_map_blocks(NULL, inode, &map, 0);
149 	/*
150 	 * 'err==len' means that all of the blocks have been preallocated,
151 	 * regardless of whether they have been initialized or not. To exclude
152 	 * unwritten extents, we need to check m_flags.
153 	 */
154 	return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
155 }
156 
157 static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
158 {
159 	struct inode *inode = file_inode(iocb->ki_filp);
160 	ssize_t ret;
161 
162 	ret = generic_write_checks(iocb, from);
163 	if (ret <= 0)
164 		return ret;
165 	/*
166 	 * If we have encountered a bitmap-format file, the size limit
167 	 * is smaller than s_maxbytes, which is for extent-mapped files.
168 	 */
169 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
170 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
171 
172 		if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
173 			return -EFBIG;
174 		iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
175 	}
176 	return iov_iter_count(from);
177 }
178 
179 #ifdef CONFIG_FS_DAX
180 static ssize_t
181 ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
182 {
183 	struct inode *inode = file_inode(iocb->ki_filp);
184 	ssize_t ret;
185 
186 	if (!inode_trylock(inode)) {
187 		if (iocb->ki_flags & IOCB_NOWAIT)
188 			return -EAGAIN;
189 		inode_lock(inode);
190 	}
191 	ret = ext4_write_checks(iocb, from);
192 	if (ret <= 0)
193 		goto out;
194 	ret = file_remove_privs(iocb->ki_filp);
195 	if (ret)
196 		goto out;
197 	ret = file_update_time(iocb->ki_filp);
198 	if (ret)
199 		goto out;
200 
201 	ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
202 out:
203 	inode_unlock(inode);
204 	if (ret > 0)
205 		ret = generic_write_sync(iocb, ret);
206 	return ret;
207 }
208 #endif
209 
210 static ssize_t
211 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
212 {
213 	struct inode *inode = file_inode(iocb->ki_filp);
214 	int o_direct = iocb->ki_flags & IOCB_DIRECT;
215 	int unaligned_aio = 0;
216 	int overwrite = 0;
217 	ssize_t ret;
218 
219 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
220 		return -EIO;
221 
222 #ifdef CONFIG_FS_DAX
223 	if (IS_DAX(inode))
224 		return ext4_dax_write_iter(iocb, from);
225 #endif
226 
227 	if (!inode_trylock(inode)) {
228 		if (iocb->ki_flags & IOCB_NOWAIT)
229 			return -EAGAIN;
230 		inode_lock(inode);
231 	}
232 
233 	ret = ext4_write_checks(iocb, from);
234 	if (ret <= 0)
235 		goto out;
236 
237 	/*
238 	 * Unaligned direct AIO must be serialized among each other as zeroing
239 	 * of partial blocks of two competing unaligned AIOs can result in data
240 	 * corruption.
241 	 */
242 	if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
243 	    !is_sync_kiocb(iocb) &&
244 	    ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
245 		unaligned_aio = 1;
246 		ext4_unwritten_wait(inode);
247 	}
248 
249 	iocb->private = &overwrite;
250 	/* Check whether we do a DIO overwrite or not */
251 	if (o_direct && !unaligned_aio) {
252 		if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
253 			if (ext4_should_dioread_nolock(inode))
254 				overwrite = 1;
255 		} else if (iocb->ki_flags & IOCB_NOWAIT) {
256 			ret = -EAGAIN;
257 			goto out;
258 		}
259 	}
260 
261 	ret = __generic_file_write_iter(iocb, from);
262 	inode_unlock(inode);
263 
264 	if (ret > 0)
265 		ret = generic_write_sync(iocb, ret);
266 
267 	return ret;
268 
269 out:
270 	inode_unlock(inode);
271 	return ret;
272 }
273 
274 #ifdef CONFIG_FS_DAX
275 static int ext4_dax_huge_fault(struct vm_fault *vmf,
276 		enum page_entry_size pe_size)
277 {
278 	int result;
279 	handle_t *handle = NULL;
280 	struct inode *inode = file_inode(vmf->vma->vm_file);
281 	struct super_block *sb = inode->i_sb;
282 	bool write = vmf->flags & FAULT_FLAG_WRITE;
283 
284 	if (write) {
285 		sb_start_pagefault(sb);
286 		file_update_time(vmf->vma->vm_file);
287 		down_read(&EXT4_I(inode)->i_mmap_sem);
288 		handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
289 					       EXT4_DATA_TRANS_BLOCKS(sb));
290 	} else {
291 		down_read(&EXT4_I(inode)->i_mmap_sem);
292 	}
293 	if (!IS_ERR(handle))
294 		result = dax_iomap_fault(vmf, pe_size, &ext4_iomap_ops);
295 	else
296 		result = VM_FAULT_SIGBUS;
297 	if (write) {
298 		if (!IS_ERR(handle))
299 			ext4_journal_stop(handle);
300 		up_read(&EXT4_I(inode)->i_mmap_sem);
301 		sb_end_pagefault(sb);
302 	} else {
303 		up_read(&EXT4_I(inode)->i_mmap_sem);
304 	}
305 
306 	return result;
307 }
308 
309 static int ext4_dax_fault(struct vm_fault *vmf)
310 {
311 	return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
312 }
313 
314 static const struct vm_operations_struct ext4_dax_vm_ops = {
315 	.fault		= ext4_dax_fault,
316 	.huge_fault	= ext4_dax_huge_fault,
317 	.page_mkwrite	= ext4_dax_fault,
318 	.pfn_mkwrite	= ext4_dax_fault,
319 };
320 #else
321 #define ext4_dax_vm_ops	ext4_file_vm_ops
322 #endif
323 
324 static const struct vm_operations_struct ext4_file_vm_ops = {
325 	.fault		= ext4_filemap_fault,
326 	.map_pages	= filemap_map_pages,
327 	.page_mkwrite   = ext4_page_mkwrite,
328 };
329 
330 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
331 {
332 	struct inode *inode = file->f_mapping->host;
333 
334 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
335 		return -EIO;
336 
337 	file_accessed(file);
338 	if (IS_DAX(file_inode(file))) {
339 		vma->vm_ops = &ext4_dax_vm_ops;
340 		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
341 	} else {
342 		vma->vm_ops = &ext4_file_vm_ops;
343 	}
344 	return 0;
345 }
346 
347 static int ext4_file_open(struct inode * inode, struct file * filp)
348 {
349 	struct super_block *sb = inode->i_sb;
350 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
351 	struct vfsmount *mnt = filp->f_path.mnt;
352 	struct dentry *dir;
353 	struct path path;
354 	char buf[64], *cp;
355 	int ret;
356 
357 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
358 		return -EIO;
359 
360 	if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
361 		     !(sb->s_flags & MS_RDONLY))) {
362 		sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
363 		/*
364 		 * Sample where the filesystem has been mounted and
365 		 * store it in the superblock for sysadmin convenience
366 		 * when trying to sort through large numbers of block
367 		 * devices or filesystem images.
368 		 */
369 		memset(buf, 0, sizeof(buf));
370 		path.mnt = mnt;
371 		path.dentry = mnt->mnt_root;
372 		cp = d_path(&path, buf, sizeof(buf));
373 		if (!IS_ERR(cp)) {
374 			handle_t *handle;
375 			int err;
376 
377 			handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
378 			if (IS_ERR(handle))
379 				return PTR_ERR(handle);
380 			BUFFER_TRACE(sbi->s_sbh, "get_write_access");
381 			err = ext4_journal_get_write_access(handle, sbi->s_sbh);
382 			if (err) {
383 				ext4_journal_stop(handle);
384 				return err;
385 			}
386 			strlcpy(sbi->s_es->s_last_mounted, cp,
387 				sizeof(sbi->s_es->s_last_mounted));
388 			ext4_handle_dirty_super(handle, sb);
389 			ext4_journal_stop(handle);
390 		}
391 	}
392 	if (ext4_encrypted_inode(inode)) {
393 		ret = fscrypt_get_encryption_info(inode);
394 		if (ret)
395 			return -EACCES;
396 		if (!fscrypt_has_encryption_key(inode))
397 			return -ENOKEY;
398 	}
399 
400 	dir = dget_parent(file_dentry(filp));
401 	if (ext4_encrypted_inode(d_inode(dir)) &&
402 			!fscrypt_has_permitted_context(d_inode(dir), inode)) {
403 		ext4_warning(inode->i_sb,
404 			     "Inconsistent encryption contexts: %lu/%lu",
405 			     (unsigned long) d_inode(dir)->i_ino,
406 			     (unsigned long) inode->i_ino);
407 		dput(dir);
408 		return -EPERM;
409 	}
410 	dput(dir);
411 	/*
412 	 * Set up the jbd2_inode if we are opening the inode for
413 	 * writing and the journal is present
414 	 */
415 	if (filp->f_mode & FMODE_WRITE) {
416 		ret = ext4_inode_attach_jinode(inode);
417 		if (ret < 0)
418 			return ret;
419 	}
420 
421 	/* Set the flags to support nowait AIO */
422 	filp->f_mode |= FMODE_AIO_NOWAIT;
423 
424 	return dquot_file_open(inode, filp);
425 }
426 
427 /*
428  * Here we use ext4_map_blocks() to get a block mapping for a extent-based
429  * file rather than ext4_ext_walk_space() because we can introduce
430  * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
431  * function.  When extent status tree has been fully implemented, it will
432  * track all extent status for a file and we can directly use it to
433  * retrieve the offset for SEEK_DATA/SEEK_HOLE.
434  */
435 
436 /*
437  * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
438  * lookup page cache to check whether or not there has some data between
439  * [startoff, endoff] because, if this range contains an unwritten extent,
440  * we determine this extent as a data or a hole according to whether the
441  * page cache has data or not.
442  */
443 static int ext4_find_unwritten_pgoff(struct inode *inode,
444 				     int whence,
445 				     ext4_lblk_t end_blk,
446 				     loff_t *offset)
447 {
448 	struct pagevec pvec;
449 	unsigned int blkbits;
450 	pgoff_t index;
451 	pgoff_t end;
452 	loff_t endoff;
453 	loff_t startoff;
454 	loff_t lastoff;
455 	int found = 0;
456 
457 	blkbits = inode->i_sb->s_blocksize_bits;
458 	startoff = *offset;
459 	lastoff = startoff;
460 	endoff = (loff_t)end_blk << blkbits;
461 
462 	index = startoff >> PAGE_SHIFT;
463 	end = (endoff - 1) >> PAGE_SHIFT;
464 
465 	pagevec_init(&pvec, 0);
466 	do {
467 		int i, num;
468 		unsigned long nr_pages;
469 
470 		num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
471 		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
472 					  (pgoff_t)num);
473 		if (nr_pages == 0)
474 			break;
475 
476 		for (i = 0; i < nr_pages; i++) {
477 			struct page *page = pvec.pages[i];
478 			struct buffer_head *bh, *head;
479 
480 			/*
481 			 * If current offset is smaller than the page offset,
482 			 * there is a hole at this offset.
483 			 */
484 			if (whence == SEEK_HOLE && lastoff < endoff &&
485 			    lastoff < page_offset(pvec.pages[i])) {
486 				found = 1;
487 				*offset = lastoff;
488 				goto out;
489 			}
490 
491 			if (page->index > end)
492 				goto out;
493 
494 			lock_page(page);
495 
496 			if (unlikely(page->mapping != inode->i_mapping)) {
497 				unlock_page(page);
498 				continue;
499 			}
500 
501 			if (!page_has_buffers(page)) {
502 				unlock_page(page);
503 				continue;
504 			}
505 
506 			if (page_has_buffers(page)) {
507 				lastoff = page_offset(page);
508 				bh = head = page_buffers(page);
509 				do {
510 					if (lastoff + bh->b_size <= startoff)
511 						goto next;
512 					if (buffer_uptodate(bh) ||
513 					    buffer_unwritten(bh)) {
514 						if (whence == SEEK_DATA)
515 							found = 1;
516 					} else {
517 						if (whence == SEEK_HOLE)
518 							found = 1;
519 					}
520 					if (found) {
521 						*offset = max_t(loff_t,
522 							startoff, lastoff);
523 						unlock_page(page);
524 						goto out;
525 					}
526 next:
527 					lastoff += bh->b_size;
528 					bh = bh->b_this_page;
529 				} while (bh != head);
530 			}
531 
532 			lastoff = page_offset(page) + PAGE_SIZE;
533 			unlock_page(page);
534 		}
535 
536 		/* The no. of pages is less than our desired, we are done. */
537 		if (nr_pages < num)
538 			break;
539 
540 		index = pvec.pages[i - 1]->index + 1;
541 		pagevec_release(&pvec);
542 	} while (index <= end);
543 
544 	if (whence == SEEK_HOLE && lastoff < endoff) {
545 		found = 1;
546 		*offset = lastoff;
547 	}
548 out:
549 	pagevec_release(&pvec);
550 	return found;
551 }
552 
553 /*
554  * ext4_seek_data() retrieves the offset for SEEK_DATA.
555  */
556 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
557 {
558 	struct inode *inode = file->f_mapping->host;
559 	struct extent_status es;
560 	ext4_lblk_t start, last, end;
561 	loff_t dataoff, isize;
562 	int blkbits;
563 	int ret;
564 
565 	inode_lock(inode);
566 
567 	isize = i_size_read(inode);
568 	if (offset >= isize) {
569 		inode_unlock(inode);
570 		return -ENXIO;
571 	}
572 
573 	blkbits = inode->i_sb->s_blocksize_bits;
574 	start = offset >> blkbits;
575 	last = start;
576 	end = isize >> blkbits;
577 	dataoff = offset;
578 
579 	do {
580 		ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
581 		if (ret <= 0) {
582 			/* No extent found -> no data */
583 			if (ret == 0)
584 				ret = -ENXIO;
585 			inode_unlock(inode);
586 			return ret;
587 		}
588 
589 		last = es.es_lblk;
590 		if (last != start)
591 			dataoff = (loff_t)last << blkbits;
592 		if (!ext4_es_is_unwritten(&es))
593 			break;
594 
595 		/*
596 		 * If there is a unwritten extent at this offset,
597 		 * it will be as a data or a hole according to page
598 		 * cache that has data or not.
599 		 */
600 		if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
601 					      es.es_lblk + es.es_len, &dataoff))
602 			break;
603 		last += es.es_len;
604 		dataoff = (loff_t)last << blkbits;
605 		cond_resched();
606 	} while (last <= end);
607 
608 	inode_unlock(inode);
609 
610 	if (dataoff > isize)
611 		return -ENXIO;
612 
613 	return vfs_setpos(file, dataoff, maxsize);
614 }
615 
616 /*
617  * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
618  */
619 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
620 {
621 	struct inode *inode = file->f_mapping->host;
622 	struct extent_status es;
623 	ext4_lblk_t start, last, end;
624 	loff_t holeoff, isize;
625 	int blkbits;
626 	int ret;
627 
628 	inode_lock(inode);
629 
630 	isize = i_size_read(inode);
631 	if (offset >= isize) {
632 		inode_unlock(inode);
633 		return -ENXIO;
634 	}
635 
636 	blkbits = inode->i_sb->s_blocksize_bits;
637 	start = offset >> blkbits;
638 	last = start;
639 	end = isize >> blkbits;
640 	holeoff = offset;
641 
642 	do {
643 		ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
644 		if (ret < 0) {
645 			inode_unlock(inode);
646 			return ret;
647 		}
648 		/* Found a hole? */
649 		if (ret == 0 || es.es_lblk > last) {
650 			if (last != start)
651 				holeoff = (loff_t)last << blkbits;
652 			break;
653 		}
654 		/*
655 		 * If there is a unwritten extent at this offset,
656 		 * it will be as a data or a hole according to page
657 		 * cache that has data or not.
658 		 */
659 		if (ext4_es_is_unwritten(&es) &&
660 		    ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
661 					      last + es.es_len, &holeoff))
662 			break;
663 
664 		last += es.es_len;
665 		holeoff = (loff_t)last << blkbits;
666 		cond_resched();
667 	} while (last <= end);
668 
669 	inode_unlock(inode);
670 
671 	if (holeoff > isize)
672 		holeoff = isize;
673 
674 	return vfs_setpos(file, holeoff, maxsize);
675 }
676 
677 /*
678  * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
679  * by calling generic_file_llseek_size() with the appropriate maxbytes
680  * value for each.
681  */
682 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
683 {
684 	struct inode *inode = file->f_mapping->host;
685 	loff_t maxbytes;
686 
687 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
688 		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
689 	else
690 		maxbytes = inode->i_sb->s_maxbytes;
691 
692 	switch (whence) {
693 	case SEEK_SET:
694 	case SEEK_CUR:
695 	case SEEK_END:
696 		return generic_file_llseek_size(file, offset, whence,
697 						maxbytes, i_size_read(inode));
698 	case SEEK_DATA:
699 		return ext4_seek_data(file, offset, maxbytes);
700 	case SEEK_HOLE:
701 		return ext4_seek_hole(file, offset, maxbytes);
702 	}
703 
704 	return -EINVAL;
705 }
706 
707 const struct file_operations ext4_file_operations = {
708 	.llseek		= ext4_llseek,
709 	.read_iter	= ext4_file_read_iter,
710 	.write_iter	= ext4_file_write_iter,
711 	.unlocked_ioctl = ext4_ioctl,
712 #ifdef CONFIG_COMPAT
713 	.compat_ioctl	= ext4_compat_ioctl,
714 #endif
715 	.mmap		= ext4_file_mmap,
716 	.open		= ext4_file_open,
717 	.release	= ext4_release_file,
718 	.fsync		= ext4_sync_file,
719 	.get_unmapped_area = thp_get_unmapped_area,
720 	.splice_read	= generic_file_splice_read,
721 	.splice_write	= iter_file_splice_write,
722 	.fallocate	= ext4_fallocate,
723 };
724 
725 const struct inode_operations ext4_file_inode_operations = {
726 	.setattr	= ext4_setattr,
727 	.getattr	= ext4_file_getattr,
728 	.listxattr	= ext4_listxattr,
729 	.get_acl	= ext4_get_acl,
730 	.set_acl	= ext4_set_acl,
731 	.fiemap		= ext4_fiemap,
732 };
733 
734