1 /* 2 * linux/fs/ext4/file.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/file.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * ext4 fs regular file handling primitives 16 * 17 * 64-bit file support on 64-bit platforms by Jakub Jelinek 18 * (jj@sunsite.ms.mff.cuni.cz) 19 */ 20 21 #include <linux/time.h> 22 #include <linux/fs.h> 23 #include <linux/mount.h> 24 #include <linux/path.h> 25 #include <linux/dax.h> 26 #include <linux/quotaops.h> 27 #include <linux/pagevec.h> 28 #include <linux/uio.h> 29 #include "ext4.h" 30 #include "ext4_jbd2.h" 31 #include "xattr.h" 32 #include "acl.h" 33 34 #ifdef CONFIG_FS_DAX 35 static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to) 36 { 37 struct inode *inode = file_inode(iocb->ki_filp); 38 ssize_t ret; 39 40 if (!inode_trylock_shared(inode)) { 41 if (iocb->ki_flags & IOCB_NOWAIT) 42 return -EAGAIN; 43 inode_lock_shared(inode); 44 } 45 /* 46 * Recheck under inode lock - at this point we are sure it cannot 47 * change anymore 48 */ 49 if (!IS_DAX(inode)) { 50 inode_unlock_shared(inode); 51 /* Fallback to buffered IO in case we cannot support DAX */ 52 return generic_file_read_iter(iocb, to); 53 } 54 ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops); 55 inode_unlock_shared(inode); 56 57 file_accessed(iocb->ki_filp); 58 return ret; 59 } 60 #endif 61 62 static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 63 { 64 if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb)))) 65 return -EIO; 66 67 if (!iov_iter_count(to)) 68 return 0; /* skip atime */ 69 70 #ifdef CONFIG_FS_DAX 71 if (IS_DAX(file_inode(iocb->ki_filp))) 72 return ext4_dax_read_iter(iocb, to); 73 #endif 74 return generic_file_read_iter(iocb, to); 75 } 76 77 /* 78 * Called when an inode is released. Note that this is different 79 * from ext4_file_open: open gets called at every open, but release 80 * gets called only when /all/ the files are closed. 81 */ 82 static int ext4_release_file(struct inode *inode, struct file *filp) 83 { 84 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) { 85 ext4_alloc_da_blocks(inode); 86 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 87 } 88 /* if we are the last writer on the inode, drop the block reservation */ 89 if ((filp->f_mode & FMODE_WRITE) && 90 (atomic_read(&inode->i_writecount) == 1) && 91 !EXT4_I(inode)->i_reserved_data_blocks) 92 { 93 down_write(&EXT4_I(inode)->i_data_sem); 94 ext4_discard_preallocations(inode); 95 up_write(&EXT4_I(inode)->i_data_sem); 96 } 97 if (is_dx(inode) && filp->private_data) 98 ext4_htree_free_dir_info(filp->private_data); 99 100 return 0; 101 } 102 103 static void ext4_unwritten_wait(struct inode *inode) 104 { 105 wait_queue_head_t *wq = ext4_ioend_wq(inode); 106 107 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0)); 108 } 109 110 /* 111 * This tests whether the IO in question is block-aligned or not. 112 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they 113 * are converted to written only after the IO is complete. Until they are 114 * mapped, these blocks appear as holes, so dio_zero_block() will assume that 115 * it needs to zero out portions of the start and/or end block. If 2 AIO 116 * threads are at work on the same unwritten block, they must be synchronized 117 * or one thread will zero the other's data, causing corruption. 118 */ 119 static int 120 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos) 121 { 122 struct super_block *sb = inode->i_sb; 123 int blockmask = sb->s_blocksize - 1; 124 125 if (pos >= i_size_read(inode)) 126 return 0; 127 128 if ((pos | iov_iter_alignment(from)) & blockmask) 129 return 1; 130 131 return 0; 132 } 133 134 /* Is IO overwriting allocated and initialized blocks? */ 135 static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len) 136 { 137 struct ext4_map_blocks map; 138 unsigned int blkbits = inode->i_blkbits; 139 int err, blklen; 140 141 if (pos + len > i_size_read(inode)) 142 return false; 143 144 map.m_lblk = pos >> blkbits; 145 map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits); 146 blklen = map.m_len; 147 148 err = ext4_map_blocks(NULL, inode, &map, 0); 149 /* 150 * 'err==len' means that all of the blocks have been preallocated, 151 * regardless of whether they have been initialized or not. To exclude 152 * unwritten extents, we need to check m_flags. 153 */ 154 return err == blklen && (map.m_flags & EXT4_MAP_MAPPED); 155 } 156 157 static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from) 158 { 159 struct inode *inode = file_inode(iocb->ki_filp); 160 ssize_t ret; 161 162 ret = generic_write_checks(iocb, from); 163 if (ret <= 0) 164 return ret; 165 /* 166 * If we have encountered a bitmap-format file, the size limit 167 * is smaller than s_maxbytes, which is for extent-mapped files. 168 */ 169 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 170 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 171 172 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) 173 return -EFBIG; 174 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos); 175 } 176 return iov_iter_count(from); 177 } 178 179 #ifdef CONFIG_FS_DAX 180 static ssize_t 181 ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) 182 { 183 struct inode *inode = file_inode(iocb->ki_filp); 184 ssize_t ret; 185 186 if (!inode_trylock(inode)) { 187 if (iocb->ki_flags & IOCB_NOWAIT) 188 return -EAGAIN; 189 inode_lock(inode); 190 } 191 ret = ext4_write_checks(iocb, from); 192 if (ret <= 0) 193 goto out; 194 ret = file_remove_privs(iocb->ki_filp); 195 if (ret) 196 goto out; 197 ret = file_update_time(iocb->ki_filp); 198 if (ret) 199 goto out; 200 201 ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops); 202 out: 203 inode_unlock(inode); 204 if (ret > 0) 205 ret = generic_write_sync(iocb, ret); 206 return ret; 207 } 208 #endif 209 210 static ssize_t 211 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 212 { 213 struct inode *inode = file_inode(iocb->ki_filp); 214 int o_direct = iocb->ki_flags & IOCB_DIRECT; 215 int unaligned_aio = 0; 216 int overwrite = 0; 217 ssize_t ret; 218 219 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 220 return -EIO; 221 222 #ifdef CONFIG_FS_DAX 223 if (IS_DAX(inode)) 224 return ext4_dax_write_iter(iocb, from); 225 #endif 226 227 if (!inode_trylock(inode)) { 228 if (iocb->ki_flags & IOCB_NOWAIT) 229 return -EAGAIN; 230 inode_lock(inode); 231 } 232 233 ret = ext4_write_checks(iocb, from); 234 if (ret <= 0) 235 goto out; 236 237 /* 238 * Unaligned direct AIO must be serialized among each other as zeroing 239 * of partial blocks of two competing unaligned AIOs can result in data 240 * corruption. 241 */ 242 if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) && 243 !is_sync_kiocb(iocb) && 244 ext4_unaligned_aio(inode, from, iocb->ki_pos)) { 245 unaligned_aio = 1; 246 ext4_unwritten_wait(inode); 247 } 248 249 iocb->private = &overwrite; 250 /* Check whether we do a DIO overwrite or not */ 251 if (o_direct && !unaligned_aio) { 252 if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) { 253 if (ext4_should_dioread_nolock(inode)) 254 overwrite = 1; 255 } else if (iocb->ki_flags & IOCB_NOWAIT) { 256 ret = -EAGAIN; 257 goto out; 258 } 259 } 260 261 ret = __generic_file_write_iter(iocb, from); 262 inode_unlock(inode); 263 264 if (ret > 0) 265 ret = generic_write_sync(iocb, ret); 266 267 return ret; 268 269 out: 270 inode_unlock(inode); 271 return ret; 272 } 273 274 #ifdef CONFIG_FS_DAX 275 static int ext4_dax_huge_fault(struct vm_fault *vmf, 276 enum page_entry_size pe_size) 277 { 278 int result; 279 handle_t *handle = NULL; 280 struct inode *inode = file_inode(vmf->vma->vm_file); 281 struct super_block *sb = inode->i_sb; 282 bool write = vmf->flags & FAULT_FLAG_WRITE; 283 284 if (write) { 285 sb_start_pagefault(sb); 286 file_update_time(vmf->vma->vm_file); 287 down_read(&EXT4_I(inode)->i_mmap_sem); 288 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, 289 EXT4_DATA_TRANS_BLOCKS(sb)); 290 } else { 291 down_read(&EXT4_I(inode)->i_mmap_sem); 292 } 293 if (!IS_ERR(handle)) 294 result = dax_iomap_fault(vmf, pe_size, &ext4_iomap_ops); 295 else 296 result = VM_FAULT_SIGBUS; 297 if (write) { 298 if (!IS_ERR(handle)) 299 ext4_journal_stop(handle); 300 up_read(&EXT4_I(inode)->i_mmap_sem); 301 sb_end_pagefault(sb); 302 } else { 303 up_read(&EXT4_I(inode)->i_mmap_sem); 304 } 305 306 return result; 307 } 308 309 static int ext4_dax_fault(struct vm_fault *vmf) 310 { 311 return ext4_dax_huge_fault(vmf, PE_SIZE_PTE); 312 } 313 314 /* 315 * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault() 316 * handler we check for races agaist truncate. Note that since we cycle through 317 * i_mmap_sem, we are sure that also any hole punching that began before we 318 * were called is finished by now and so if it included part of the file we 319 * are working on, our pte will get unmapped and the check for pte_same() in 320 * wp_pfn_shared() fails. Thus fault gets retried and things work out as 321 * desired. 322 */ 323 static int ext4_dax_pfn_mkwrite(struct vm_fault *vmf) 324 { 325 struct inode *inode = file_inode(vmf->vma->vm_file); 326 struct super_block *sb = inode->i_sb; 327 loff_t size; 328 int ret; 329 330 sb_start_pagefault(sb); 331 file_update_time(vmf->vma->vm_file); 332 down_read(&EXT4_I(inode)->i_mmap_sem); 333 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; 334 if (vmf->pgoff >= size) 335 ret = VM_FAULT_SIGBUS; 336 else 337 ret = dax_pfn_mkwrite(vmf); 338 up_read(&EXT4_I(inode)->i_mmap_sem); 339 sb_end_pagefault(sb); 340 341 return ret; 342 } 343 344 static const struct vm_operations_struct ext4_dax_vm_ops = { 345 .fault = ext4_dax_fault, 346 .huge_fault = ext4_dax_huge_fault, 347 .page_mkwrite = ext4_dax_fault, 348 .pfn_mkwrite = ext4_dax_pfn_mkwrite, 349 }; 350 #else 351 #define ext4_dax_vm_ops ext4_file_vm_ops 352 #endif 353 354 static const struct vm_operations_struct ext4_file_vm_ops = { 355 .fault = ext4_filemap_fault, 356 .map_pages = filemap_map_pages, 357 .page_mkwrite = ext4_page_mkwrite, 358 }; 359 360 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) 361 { 362 struct inode *inode = file->f_mapping->host; 363 364 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 365 return -EIO; 366 367 file_accessed(file); 368 if (IS_DAX(file_inode(file))) { 369 vma->vm_ops = &ext4_dax_vm_ops; 370 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; 371 } else { 372 vma->vm_ops = &ext4_file_vm_ops; 373 } 374 return 0; 375 } 376 377 static int ext4_file_open(struct inode * inode, struct file * filp) 378 { 379 struct super_block *sb = inode->i_sb; 380 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 381 struct vfsmount *mnt = filp->f_path.mnt; 382 struct dentry *dir; 383 struct path path; 384 char buf[64], *cp; 385 int ret; 386 387 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 388 return -EIO; 389 390 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && 391 !(sb->s_flags & MS_RDONLY))) { 392 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED; 393 /* 394 * Sample where the filesystem has been mounted and 395 * store it in the superblock for sysadmin convenience 396 * when trying to sort through large numbers of block 397 * devices or filesystem images. 398 */ 399 memset(buf, 0, sizeof(buf)); 400 path.mnt = mnt; 401 path.dentry = mnt->mnt_root; 402 cp = d_path(&path, buf, sizeof(buf)); 403 if (!IS_ERR(cp)) { 404 handle_t *handle; 405 int err; 406 407 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1); 408 if (IS_ERR(handle)) 409 return PTR_ERR(handle); 410 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); 411 err = ext4_journal_get_write_access(handle, sbi->s_sbh); 412 if (err) { 413 ext4_journal_stop(handle); 414 return err; 415 } 416 strlcpy(sbi->s_es->s_last_mounted, cp, 417 sizeof(sbi->s_es->s_last_mounted)); 418 ext4_handle_dirty_super(handle, sb); 419 ext4_journal_stop(handle); 420 } 421 } 422 if (ext4_encrypted_inode(inode)) { 423 ret = fscrypt_get_encryption_info(inode); 424 if (ret) 425 return -EACCES; 426 if (!fscrypt_has_encryption_key(inode)) 427 return -ENOKEY; 428 } 429 430 dir = dget_parent(file_dentry(filp)); 431 if (ext4_encrypted_inode(d_inode(dir)) && 432 !fscrypt_has_permitted_context(d_inode(dir), inode)) { 433 ext4_warning(inode->i_sb, 434 "Inconsistent encryption contexts: %lu/%lu", 435 (unsigned long) d_inode(dir)->i_ino, 436 (unsigned long) inode->i_ino); 437 dput(dir); 438 return -EPERM; 439 } 440 dput(dir); 441 /* 442 * Set up the jbd2_inode if we are opening the inode for 443 * writing and the journal is present 444 */ 445 if (filp->f_mode & FMODE_WRITE) { 446 ret = ext4_inode_attach_jinode(inode); 447 if (ret < 0) 448 return ret; 449 } 450 451 /* Set the flags to support nowait AIO */ 452 filp->f_mode |= FMODE_AIO_NOWAIT; 453 454 return dquot_file_open(inode, filp); 455 } 456 457 /* 458 * Here we use ext4_map_blocks() to get a block mapping for a extent-based 459 * file rather than ext4_ext_walk_space() because we can introduce 460 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same 461 * function. When extent status tree has been fully implemented, it will 462 * track all extent status for a file and we can directly use it to 463 * retrieve the offset for SEEK_DATA/SEEK_HOLE. 464 */ 465 466 /* 467 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to 468 * lookup page cache to check whether or not there has some data between 469 * [startoff, endoff] because, if this range contains an unwritten extent, 470 * we determine this extent as a data or a hole according to whether the 471 * page cache has data or not. 472 */ 473 static int ext4_find_unwritten_pgoff(struct inode *inode, 474 int whence, 475 ext4_lblk_t end_blk, 476 loff_t *offset) 477 { 478 struct pagevec pvec; 479 unsigned int blkbits; 480 pgoff_t index; 481 pgoff_t end; 482 loff_t endoff; 483 loff_t startoff; 484 loff_t lastoff; 485 int found = 0; 486 487 blkbits = inode->i_sb->s_blocksize_bits; 488 startoff = *offset; 489 lastoff = startoff; 490 endoff = (loff_t)end_blk << blkbits; 491 492 index = startoff >> PAGE_SHIFT; 493 end = (endoff - 1) >> PAGE_SHIFT; 494 495 pagevec_init(&pvec, 0); 496 do { 497 int i, num; 498 unsigned long nr_pages; 499 500 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1; 501 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, 502 (pgoff_t)num); 503 if (nr_pages == 0) 504 break; 505 506 for (i = 0; i < nr_pages; i++) { 507 struct page *page = pvec.pages[i]; 508 struct buffer_head *bh, *head; 509 510 /* 511 * If current offset is smaller than the page offset, 512 * there is a hole at this offset. 513 */ 514 if (whence == SEEK_HOLE && lastoff < endoff && 515 lastoff < page_offset(pvec.pages[i])) { 516 found = 1; 517 *offset = lastoff; 518 goto out; 519 } 520 521 if (page->index > end) 522 goto out; 523 524 lock_page(page); 525 526 if (unlikely(page->mapping != inode->i_mapping)) { 527 unlock_page(page); 528 continue; 529 } 530 531 if (!page_has_buffers(page)) { 532 unlock_page(page); 533 continue; 534 } 535 536 if (page_has_buffers(page)) { 537 lastoff = page_offset(page); 538 bh = head = page_buffers(page); 539 do { 540 if (buffer_uptodate(bh) || 541 buffer_unwritten(bh)) { 542 if (whence == SEEK_DATA) 543 found = 1; 544 } else { 545 if (whence == SEEK_HOLE) 546 found = 1; 547 } 548 if (found) { 549 *offset = max_t(loff_t, 550 startoff, lastoff); 551 unlock_page(page); 552 goto out; 553 } 554 lastoff += bh->b_size; 555 bh = bh->b_this_page; 556 } while (bh != head); 557 } 558 559 lastoff = page_offset(page) + PAGE_SIZE; 560 unlock_page(page); 561 } 562 563 /* The no. of pages is less than our desired, we are done. */ 564 if (nr_pages < num) 565 break; 566 567 index = pvec.pages[i - 1]->index + 1; 568 pagevec_release(&pvec); 569 } while (index <= end); 570 571 if (whence == SEEK_HOLE && lastoff < endoff) { 572 found = 1; 573 *offset = lastoff; 574 } 575 out: 576 pagevec_release(&pvec); 577 return found; 578 } 579 580 /* 581 * ext4_seek_data() retrieves the offset for SEEK_DATA. 582 */ 583 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) 584 { 585 struct inode *inode = file->f_mapping->host; 586 struct extent_status es; 587 ext4_lblk_t start, last, end; 588 loff_t dataoff, isize; 589 int blkbits; 590 int ret; 591 592 inode_lock(inode); 593 594 isize = i_size_read(inode); 595 if (offset >= isize) { 596 inode_unlock(inode); 597 return -ENXIO; 598 } 599 600 blkbits = inode->i_sb->s_blocksize_bits; 601 start = offset >> blkbits; 602 last = start; 603 end = isize >> blkbits; 604 dataoff = offset; 605 606 do { 607 ret = ext4_get_next_extent(inode, last, end - last + 1, &es); 608 if (ret <= 0) { 609 /* No extent found -> no data */ 610 if (ret == 0) 611 ret = -ENXIO; 612 inode_unlock(inode); 613 return ret; 614 } 615 616 last = es.es_lblk; 617 if (last != start) 618 dataoff = (loff_t)last << blkbits; 619 if (!ext4_es_is_unwritten(&es)) 620 break; 621 622 /* 623 * If there is a unwritten extent at this offset, 624 * it will be as a data or a hole according to page 625 * cache that has data or not. 626 */ 627 if (ext4_find_unwritten_pgoff(inode, SEEK_DATA, 628 es.es_lblk + es.es_len, &dataoff)) 629 break; 630 last += es.es_len; 631 dataoff = (loff_t)last << blkbits; 632 cond_resched(); 633 } while (last <= end); 634 635 inode_unlock(inode); 636 637 if (dataoff > isize) 638 return -ENXIO; 639 640 return vfs_setpos(file, dataoff, maxsize); 641 } 642 643 /* 644 * ext4_seek_hole() retrieves the offset for SEEK_HOLE. 645 */ 646 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) 647 { 648 struct inode *inode = file->f_mapping->host; 649 struct extent_status es; 650 ext4_lblk_t start, last, end; 651 loff_t holeoff, isize; 652 int blkbits; 653 int ret; 654 655 inode_lock(inode); 656 657 isize = i_size_read(inode); 658 if (offset >= isize) { 659 inode_unlock(inode); 660 return -ENXIO; 661 } 662 663 blkbits = inode->i_sb->s_blocksize_bits; 664 start = offset >> blkbits; 665 last = start; 666 end = isize >> blkbits; 667 holeoff = offset; 668 669 do { 670 ret = ext4_get_next_extent(inode, last, end - last + 1, &es); 671 if (ret < 0) { 672 inode_unlock(inode); 673 return ret; 674 } 675 /* Found a hole? */ 676 if (ret == 0 || es.es_lblk > last) { 677 if (last != start) 678 holeoff = (loff_t)last << blkbits; 679 break; 680 } 681 /* 682 * If there is a unwritten extent at this offset, 683 * it will be as a data or a hole according to page 684 * cache that has data or not. 685 */ 686 if (ext4_es_is_unwritten(&es) && 687 ext4_find_unwritten_pgoff(inode, SEEK_HOLE, 688 last + es.es_len, &holeoff)) 689 break; 690 691 last += es.es_len; 692 holeoff = (loff_t)last << blkbits; 693 cond_resched(); 694 } while (last <= end); 695 696 inode_unlock(inode); 697 698 if (holeoff > isize) 699 holeoff = isize; 700 701 return vfs_setpos(file, holeoff, maxsize); 702 } 703 704 /* 705 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values 706 * by calling generic_file_llseek_size() with the appropriate maxbytes 707 * value for each. 708 */ 709 loff_t ext4_llseek(struct file *file, loff_t offset, int whence) 710 { 711 struct inode *inode = file->f_mapping->host; 712 loff_t maxbytes; 713 714 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 715 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; 716 else 717 maxbytes = inode->i_sb->s_maxbytes; 718 719 switch (whence) { 720 case SEEK_SET: 721 case SEEK_CUR: 722 case SEEK_END: 723 return generic_file_llseek_size(file, offset, whence, 724 maxbytes, i_size_read(inode)); 725 case SEEK_DATA: 726 return ext4_seek_data(file, offset, maxbytes); 727 case SEEK_HOLE: 728 return ext4_seek_hole(file, offset, maxbytes); 729 } 730 731 return -EINVAL; 732 } 733 734 const struct file_operations ext4_file_operations = { 735 .llseek = ext4_llseek, 736 .read_iter = ext4_file_read_iter, 737 .write_iter = ext4_file_write_iter, 738 .unlocked_ioctl = ext4_ioctl, 739 #ifdef CONFIG_COMPAT 740 .compat_ioctl = ext4_compat_ioctl, 741 #endif 742 .mmap = ext4_file_mmap, 743 .open = ext4_file_open, 744 .release = ext4_release_file, 745 .fsync = ext4_sync_file, 746 .get_unmapped_area = thp_get_unmapped_area, 747 .splice_read = generic_file_splice_read, 748 .splice_write = iter_file_splice_write, 749 .fallocate = ext4_fallocate, 750 }; 751 752 const struct inode_operations ext4_file_inode_operations = { 753 .setattr = ext4_setattr, 754 .getattr = ext4_file_getattr, 755 .listxattr = ext4_listxattr, 756 .get_acl = ext4_get_acl, 757 .set_acl = ext4_set_acl, 758 .fiemap = ext4_fiemap, 759 }; 760 761