1 /* 2 * linux/fs/ext4/file.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/file.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * ext4 fs regular file handling primitives 16 * 17 * 64-bit file support on 64-bit platforms by Jakub Jelinek 18 * (jj@sunsite.ms.mff.cuni.cz) 19 */ 20 21 #include <linux/time.h> 22 #include <linux/fs.h> 23 #include <linux/mount.h> 24 #include <linux/path.h> 25 #include <linux/dax.h> 26 #include <linux/quotaops.h> 27 #include <linux/pagevec.h> 28 #include <linux/uio.h> 29 #include "ext4.h" 30 #include "ext4_jbd2.h" 31 #include "xattr.h" 32 #include "acl.h" 33 34 #ifdef CONFIG_FS_DAX 35 static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to) 36 { 37 struct inode *inode = file_inode(iocb->ki_filp); 38 ssize_t ret; 39 40 inode_lock_shared(inode); 41 /* 42 * Recheck under inode lock - at this point we are sure it cannot 43 * change anymore 44 */ 45 if (!IS_DAX(inode)) { 46 inode_unlock_shared(inode); 47 /* Fallback to buffered IO in case we cannot support DAX */ 48 return generic_file_read_iter(iocb, to); 49 } 50 ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops); 51 inode_unlock_shared(inode); 52 53 file_accessed(iocb->ki_filp); 54 return ret; 55 } 56 #endif 57 58 static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 59 { 60 if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb)))) 61 return -EIO; 62 63 if (!iov_iter_count(to)) 64 return 0; /* skip atime */ 65 66 #ifdef CONFIG_FS_DAX 67 if (IS_DAX(file_inode(iocb->ki_filp))) 68 return ext4_dax_read_iter(iocb, to); 69 #endif 70 return generic_file_read_iter(iocb, to); 71 } 72 73 /* 74 * Called when an inode is released. Note that this is different 75 * from ext4_file_open: open gets called at every open, but release 76 * gets called only when /all/ the files are closed. 77 */ 78 static int ext4_release_file(struct inode *inode, struct file *filp) 79 { 80 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) { 81 ext4_alloc_da_blocks(inode); 82 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 83 } 84 /* if we are the last writer on the inode, drop the block reservation */ 85 if ((filp->f_mode & FMODE_WRITE) && 86 (atomic_read(&inode->i_writecount) == 1) && 87 !EXT4_I(inode)->i_reserved_data_blocks) 88 { 89 down_write(&EXT4_I(inode)->i_data_sem); 90 ext4_discard_preallocations(inode); 91 up_write(&EXT4_I(inode)->i_data_sem); 92 } 93 if (is_dx(inode) && filp->private_data) 94 ext4_htree_free_dir_info(filp->private_data); 95 96 return 0; 97 } 98 99 static void ext4_unwritten_wait(struct inode *inode) 100 { 101 wait_queue_head_t *wq = ext4_ioend_wq(inode); 102 103 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0)); 104 } 105 106 /* 107 * This tests whether the IO in question is block-aligned or not. 108 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they 109 * are converted to written only after the IO is complete. Until they are 110 * mapped, these blocks appear as holes, so dio_zero_block() will assume that 111 * it needs to zero out portions of the start and/or end block. If 2 AIO 112 * threads are at work on the same unwritten block, they must be synchronized 113 * or one thread will zero the other's data, causing corruption. 114 */ 115 static int 116 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos) 117 { 118 struct super_block *sb = inode->i_sb; 119 int blockmask = sb->s_blocksize - 1; 120 121 if (pos >= i_size_read(inode)) 122 return 0; 123 124 if ((pos | iov_iter_alignment(from)) & blockmask) 125 return 1; 126 127 return 0; 128 } 129 130 /* Is IO overwriting allocated and initialized blocks? */ 131 static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len) 132 { 133 struct ext4_map_blocks map; 134 unsigned int blkbits = inode->i_blkbits; 135 int err, blklen; 136 137 if (pos + len > i_size_read(inode)) 138 return false; 139 140 map.m_lblk = pos >> blkbits; 141 map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits); 142 blklen = map.m_len; 143 144 err = ext4_map_blocks(NULL, inode, &map, 0); 145 /* 146 * 'err==len' means that all of the blocks have been preallocated, 147 * regardless of whether they have been initialized or not. To exclude 148 * unwritten extents, we need to check m_flags. 149 */ 150 return err == blklen && (map.m_flags & EXT4_MAP_MAPPED); 151 } 152 153 static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from) 154 { 155 struct inode *inode = file_inode(iocb->ki_filp); 156 ssize_t ret; 157 158 ret = generic_write_checks(iocb, from); 159 if (ret <= 0) 160 return ret; 161 /* 162 * If we have encountered a bitmap-format file, the size limit 163 * is smaller than s_maxbytes, which is for extent-mapped files. 164 */ 165 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 166 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 167 168 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) 169 return -EFBIG; 170 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos); 171 } 172 return iov_iter_count(from); 173 } 174 175 #ifdef CONFIG_FS_DAX 176 static ssize_t 177 ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) 178 { 179 struct inode *inode = file_inode(iocb->ki_filp); 180 ssize_t ret; 181 182 inode_lock(inode); 183 ret = ext4_write_checks(iocb, from); 184 if (ret <= 0) 185 goto out; 186 ret = file_remove_privs(iocb->ki_filp); 187 if (ret) 188 goto out; 189 ret = file_update_time(iocb->ki_filp); 190 if (ret) 191 goto out; 192 193 ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops); 194 out: 195 inode_unlock(inode); 196 if (ret > 0) 197 ret = generic_write_sync(iocb, ret); 198 return ret; 199 } 200 #endif 201 202 static ssize_t 203 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 204 { 205 struct inode *inode = file_inode(iocb->ki_filp); 206 int o_direct = iocb->ki_flags & IOCB_DIRECT; 207 int unaligned_aio = 0; 208 int overwrite = 0; 209 ssize_t ret; 210 211 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 212 return -EIO; 213 214 #ifdef CONFIG_FS_DAX 215 if (IS_DAX(inode)) 216 return ext4_dax_write_iter(iocb, from); 217 #endif 218 219 inode_lock(inode); 220 ret = ext4_write_checks(iocb, from); 221 if (ret <= 0) 222 goto out; 223 224 /* 225 * Unaligned direct AIO must be serialized among each other as zeroing 226 * of partial blocks of two competing unaligned AIOs can result in data 227 * corruption. 228 */ 229 if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) && 230 !is_sync_kiocb(iocb) && 231 ext4_unaligned_aio(inode, from, iocb->ki_pos)) { 232 unaligned_aio = 1; 233 ext4_unwritten_wait(inode); 234 } 235 236 iocb->private = &overwrite; 237 /* Check whether we do a DIO overwrite or not */ 238 if (o_direct && ext4_should_dioread_nolock(inode) && !unaligned_aio && 239 ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) 240 overwrite = 1; 241 242 ret = __generic_file_write_iter(iocb, from); 243 inode_unlock(inode); 244 245 if (ret > 0) 246 ret = generic_write_sync(iocb, ret); 247 248 return ret; 249 250 out: 251 inode_unlock(inode); 252 return ret; 253 } 254 255 #ifdef CONFIG_FS_DAX 256 static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 257 { 258 int result; 259 struct inode *inode = file_inode(vma->vm_file); 260 struct super_block *sb = inode->i_sb; 261 bool write = vmf->flags & FAULT_FLAG_WRITE; 262 263 if (write) { 264 sb_start_pagefault(sb); 265 file_update_time(vma->vm_file); 266 } 267 down_read(&EXT4_I(inode)->i_mmap_sem); 268 result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops); 269 up_read(&EXT4_I(inode)->i_mmap_sem); 270 if (write) 271 sb_end_pagefault(sb); 272 273 return result; 274 } 275 276 static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, 277 pmd_t *pmd, unsigned int flags) 278 { 279 int result; 280 struct inode *inode = file_inode(vma->vm_file); 281 struct super_block *sb = inode->i_sb; 282 bool write = flags & FAULT_FLAG_WRITE; 283 284 if (write) { 285 sb_start_pagefault(sb); 286 file_update_time(vma->vm_file); 287 } 288 down_read(&EXT4_I(inode)->i_mmap_sem); 289 result = dax_iomap_pmd_fault(vma, addr, pmd, flags, 290 &ext4_iomap_ops); 291 up_read(&EXT4_I(inode)->i_mmap_sem); 292 if (write) 293 sb_end_pagefault(sb); 294 295 return result; 296 } 297 298 /* 299 * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault() 300 * handler we check for races agaist truncate. Note that since we cycle through 301 * i_mmap_sem, we are sure that also any hole punching that began before we 302 * were called is finished by now and so if it included part of the file we 303 * are working on, our pte will get unmapped and the check for pte_same() in 304 * wp_pfn_shared() fails. Thus fault gets retried and things work out as 305 * desired. 306 */ 307 static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma, 308 struct vm_fault *vmf) 309 { 310 struct inode *inode = file_inode(vma->vm_file); 311 struct super_block *sb = inode->i_sb; 312 loff_t size; 313 int ret; 314 315 sb_start_pagefault(sb); 316 file_update_time(vma->vm_file); 317 down_read(&EXT4_I(inode)->i_mmap_sem); 318 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; 319 if (vmf->pgoff >= size) 320 ret = VM_FAULT_SIGBUS; 321 else 322 ret = dax_pfn_mkwrite(vma, vmf); 323 up_read(&EXT4_I(inode)->i_mmap_sem); 324 sb_end_pagefault(sb); 325 326 return ret; 327 } 328 329 static const struct vm_operations_struct ext4_dax_vm_ops = { 330 .fault = ext4_dax_fault, 331 .pmd_fault = ext4_dax_pmd_fault, 332 .page_mkwrite = ext4_dax_fault, 333 .pfn_mkwrite = ext4_dax_pfn_mkwrite, 334 }; 335 #else 336 #define ext4_dax_vm_ops ext4_file_vm_ops 337 #endif 338 339 static const struct vm_operations_struct ext4_file_vm_ops = { 340 .fault = ext4_filemap_fault, 341 .map_pages = filemap_map_pages, 342 .page_mkwrite = ext4_page_mkwrite, 343 }; 344 345 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) 346 { 347 struct inode *inode = file->f_mapping->host; 348 349 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 350 return -EIO; 351 352 if (ext4_encrypted_inode(inode)) { 353 int err = fscrypt_get_encryption_info(inode); 354 if (err) 355 return 0; 356 if (!fscrypt_has_encryption_key(inode)) 357 return -ENOKEY; 358 } 359 file_accessed(file); 360 if (IS_DAX(file_inode(file))) { 361 vma->vm_ops = &ext4_dax_vm_ops; 362 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; 363 } else { 364 vma->vm_ops = &ext4_file_vm_ops; 365 } 366 return 0; 367 } 368 369 static int ext4_file_open(struct inode * inode, struct file * filp) 370 { 371 struct super_block *sb = inode->i_sb; 372 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 373 struct vfsmount *mnt = filp->f_path.mnt; 374 struct dentry *dir; 375 struct path path; 376 char buf[64], *cp; 377 int ret; 378 379 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 380 return -EIO; 381 382 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && 383 !(sb->s_flags & MS_RDONLY))) { 384 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED; 385 /* 386 * Sample where the filesystem has been mounted and 387 * store it in the superblock for sysadmin convenience 388 * when trying to sort through large numbers of block 389 * devices or filesystem images. 390 */ 391 memset(buf, 0, sizeof(buf)); 392 path.mnt = mnt; 393 path.dentry = mnt->mnt_root; 394 cp = d_path(&path, buf, sizeof(buf)); 395 if (!IS_ERR(cp)) { 396 handle_t *handle; 397 int err; 398 399 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1); 400 if (IS_ERR(handle)) 401 return PTR_ERR(handle); 402 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); 403 err = ext4_journal_get_write_access(handle, sbi->s_sbh); 404 if (err) { 405 ext4_journal_stop(handle); 406 return err; 407 } 408 strlcpy(sbi->s_es->s_last_mounted, cp, 409 sizeof(sbi->s_es->s_last_mounted)); 410 ext4_handle_dirty_super(handle, sb); 411 ext4_journal_stop(handle); 412 } 413 } 414 if (ext4_encrypted_inode(inode)) { 415 ret = fscrypt_get_encryption_info(inode); 416 if (ret) 417 return -EACCES; 418 if (!fscrypt_has_encryption_key(inode)) 419 return -ENOKEY; 420 } 421 422 dir = dget_parent(file_dentry(filp)); 423 if (ext4_encrypted_inode(d_inode(dir)) && 424 !fscrypt_has_permitted_context(d_inode(dir), inode)) { 425 ext4_warning(inode->i_sb, 426 "Inconsistent encryption contexts: %lu/%lu", 427 (unsigned long) d_inode(dir)->i_ino, 428 (unsigned long) inode->i_ino); 429 dput(dir); 430 return -EPERM; 431 } 432 dput(dir); 433 /* 434 * Set up the jbd2_inode if we are opening the inode for 435 * writing and the journal is present 436 */ 437 if (filp->f_mode & FMODE_WRITE) { 438 ret = ext4_inode_attach_jinode(inode); 439 if (ret < 0) 440 return ret; 441 } 442 return dquot_file_open(inode, filp); 443 } 444 445 /* 446 * Here we use ext4_map_blocks() to get a block mapping for a extent-based 447 * file rather than ext4_ext_walk_space() because we can introduce 448 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same 449 * function. When extent status tree has been fully implemented, it will 450 * track all extent status for a file and we can directly use it to 451 * retrieve the offset for SEEK_DATA/SEEK_HOLE. 452 */ 453 454 /* 455 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to 456 * lookup page cache to check whether or not there has some data between 457 * [startoff, endoff] because, if this range contains an unwritten extent, 458 * we determine this extent as a data or a hole according to whether the 459 * page cache has data or not. 460 */ 461 static int ext4_find_unwritten_pgoff(struct inode *inode, 462 int whence, 463 ext4_lblk_t end_blk, 464 loff_t *offset) 465 { 466 struct pagevec pvec; 467 unsigned int blkbits; 468 pgoff_t index; 469 pgoff_t end; 470 loff_t endoff; 471 loff_t startoff; 472 loff_t lastoff; 473 int found = 0; 474 475 blkbits = inode->i_sb->s_blocksize_bits; 476 startoff = *offset; 477 lastoff = startoff; 478 endoff = (loff_t)end_blk << blkbits; 479 480 index = startoff >> PAGE_SHIFT; 481 end = endoff >> PAGE_SHIFT; 482 483 pagevec_init(&pvec, 0); 484 do { 485 int i, num; 486 unsigned long nr_pages; 487 488 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE); 489 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, 490 (pgoff_t)num); 491 if (nr_pages == 0) { 492 if (whence == SEEK_DATA) 493 break; 494 495 BUG_ON(whence != SEEK_HOLE); 496 /* 497 * If this is the first time to go into the loop and 498 * offset is not beyond the end offset, it will be a 499 * hole at this offset 500 */ 501 if (lastoff == startoff || lastoff < endoff) 502 found = 1; 503 break; 504 } 505 506 /* 507 * If this is the first time to go into the loop and 508 * offset is smaller than the first page offset, it will be a 509 * hole at this offset. 510 */ 511 if (lastoff == startoff && whence == SEEK_HOLE && 512 lastoff < page_offset(pvec.pages[0])) { 513 found = 1; 514 break; 515 } 516 517 for (i = 0; i < nr_pages; i++) { 518 struct page *page = pvec.pages[i]; 519 struct buffer_head *bh, *head; 520 521 /* 522 * If the current offset is not beyond the end of given 523 * range, it will be a hole. 524 */ 525 if (lastoff < endoff && whence == SEEK_HOLE && 526 page->index > end) { 527 found = 1; 528 *offset = lastoff; 529 goto out; 530 } 531 532 lock_page(page); 533 534 if (unlikely(page->mapping != inode->i_mapping)) { 535 unlock_page(page); 536 continue; 537 } 538 539 if (!page_has_buffers(page)) { 540 unlock_page(page); 541 continue; 542 } 543 544 if (page_has_buffers(page)) { 545 lastoff = page_offset(page); 546 bh = head = page_buffers(page); 547 do { 548 if (buffer_uptodate(bh) || 549 buffer_unwritten(bh)) { 550 if (whence == SEEK_DATA) 551 found = 1; 552 } else { 553 if (whence == SEEK_HOLE) 554 found = 1; 555 } 556 if (found) { 557 *offset = max_t(loff_t, 558 startoff, lastoff); 559 unlock_page(page); 560 goto out; 561 } 562 lastoff += bh->b_size; 563 bh = bh->b_this_page; 564 } while (bh != head); 565 } 566 567 lastoff = page_offset(page) + PAGE_SIZE; 568 unlock_page(page); 569 } 570 571 /* 572 * The no. of pages is less than our desired, that would be a 573 * hole in there. 574 */ 575 if (nr_pages < num && whence == SEEK_HOLE) { 576 found = 1; 577 *offset = lastoff; 578 break; 579 } 580 581 index = pvec.pages[i - 1]->index + 1; 582 pagevec_release(&pvec); 583 } while (index <= end); 584 585 out: 586 pagevec_release(&pvec); 587 return found; 588 } 589 590 /* 591 * ext4_seek_data() retrieves the offset for SEEK_DATA. 592 */ 593 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) 594 { 595 struct inode *inode = file->f_mapping->host; 596 struct extent_status es; 597 ext4_lblk_t start, last, end; 598 loff_t dataoff, isize; 599 int blkbits; 600 int ret; 601 602 inode_lock(inode); 603 604 isize = i_size_read(inode); 605 if (offset >= isize) { 606 inode_unlock(inode); 607 return -ENXIO; 608 } 609 610 blkbits = inode->i_sb->s_blocksize_bits; 611 start = offset >> blkbits; 612 last = start; 613 end = isize >> blkbits; 614 dataoff = offset; 615 616 do { 617 ret = ext4_get_next_extent(inode, last, end - last + 1, &es); 618 if (ret <= 0) { 619 /* No extent found -> no data */ 620 if (ret == 0) 621 ret = -ENXIO; 622 inode_unlock(inode); 623 return ret; 624 } 625 626 last = es.es_lblk; 627 if (last != start) 628 dataoff = (loff_t)last << blkbits; 629 if (!ext4_es_is_unwritten(&es)) 630 break; 631 632 /* 633 * If there is a unwritten extent at this offset, 634 * it will be as a data or a hole according to page 635 * cache that has data or not. 636 */ 637 if (ext4_find_unwritten_pgoff(inode, SEEK_DATA, 638 es.es_lblk + es.es_len, &dataoff)) 639 break; 640 last += es.es_len; 641 dataoff = (loff_t)last << blkbits; 642 cond_resched(); 643 } while (last <= end); 644 645 inode_unlock(inode); 646 647 if (dataoff > isize) 648 return -ENXIO; 649 650 return vfs_setpos(file, dataoff, maxsize); 651 } 652 653 /* 654 * ext4_seek_hole() retrieves the offset for SEEK_HOLE. 655 */ 656 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) 657 { 658 struct inode *inode = file->f_mapping->host; 659 struct extent_status es; 660 ext4_lblk_t start, last, end; 661 loff_t holeoff, isize; 662 int blkbits; 663 int ret; 664 665 inode_lock(inode); 666 667 isize = i_size_read(inode); 668 if (offset >= isize) { 669 inode_unlock(inode); 670 return -ENXIO; 671 } 672 673 blkbits = inode->i_sb->s_blocksize_bits; 674 start = offset >> blkbits; 675 last = start; 676 end = isize >> blkbits; 677 holeoff = offset; 678 679 do { 680 ret = ext4_get_next_extent(inode, last, end - last + 1, &es); 681 if (ret < 0) { 682 inode_unlock(inode); 683 return ret; 684 } 685 /* Found a hole? */ 686 if (ret == 0 || es.es_lblk > last) { 687 if (last != start) 688 holeoff = (loff_t)last << blkbits; 689 break; 690 } 691 /* 692 * If there is a unwritten extent at this offset, 693 * it will be as a data or a hole according to page 694 * cache that has data or not. 695 */ 696 if (ext4_es_is_unwritten(&es) && 697 ext4_find_unwritten_pgoff(inode, SEEK_HOLE, 698 last + es.es_len, &holeoff)) 699 break; 700 701 last += es.es_len; 702 holeoff = (loff_t)last << blkbits; 703 cond_resched(); 704 } while (last <= end); 705 706 inode_unlock(inode); 707 708 if (holeoff > isize) 709 holeoff = isize; 710 711 return vfs_setpos(file, holeoff, maxsize); 712 } 713 714 /* 715 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values 716 * by calling generic_file_llseek_size() with the appropriate maxbytes 717 * value for each. 718 */ 719 loff_t ext4_llseek(struct file *file, loff_t offset, int whence) 720 { 721 struct inode *inode = file->f_mapping->host; 722 loff_t maxbytes; 723 724 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 725 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; 726 else 727 maxbytes = inode->i_sb->s_maxbytes; 728 729 switch (whence) { 730 case SEEK_SET: 731 case SEEK_CUR: 732 case SEEK_END: 733 return generic_file_llseek_size(file, offset, whence, 734 maxbytes, i_size_read(inode)); 735 case SEEK_DATA: 736 return ext4_seek_data(file, offset, maxbytes); 737 case SEEK_HOLE: 738 return ext4_seek_hole(file, offset, maxbytes); 739 } 740 741 return -EINVAL; 742 } 743 744 const struct file_operations ext4_file_operations = { 745 .llseek = ext4_llseek, 746 .read_iter = ext4_file_read_iter, 747 .write_iter = ext4_file_write_iter, 748 .unlocked_ioctl = ext4_ioctl, 749 #ifdef CONFIG_COMPAT 750 .compat_ioctl = ext4_compat_ioctl, 751 #endif 752 .mmap = ext4_file_mmap, 753 .open = ext4_file_open, 754 .release = ext4_release_file, 755 .fsync = ext4_sync_file, 756 .get_unmapped_area = thp_get_unmapped_area, 757 .splice_read = generic_file_splice_read, 758 .splice_write = iter_file_splice_write, 759 .fallocate = ext4_fallocate, 760 }; 761 762 const struct inode_operations ext4_file_inode_operations = { 763 .setattr = ext4_setattr, 764 .getattr = ext4_getattr, 765 .listxattr = ext4_listxattr, 766 .get_acl = ext4_get_acl, 767 .set_acl = ext4_set_acl, 768 .fiemap = ext4_fiemap, 769 }; 770 771