1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext4/file.c 4 * 5 * Copyright (C) 1992, 1993, 1994, 1995 6 * Remy Card (card@masi.ibp.fr) 7 * Laboratoire MASI - Institut Blaise Pascal 8 * Universite Pierre et Marie Curie (Paris VI) 9 * 10 * from 11 * 12 * linux/fs/minix/file.c 13 * 14 * Copyright (C) 1991, 1992 Linus Torvalds 15 * 16 * ext4 fs regular file handling primitives 17 * 18 * 64-bit file support on 64-bit platforms by Jakub Jelinek 19 * (jj@sunsite.ms.mff.cuni.cz) 20 */ 21 22 #include <linux/time.h> 23 #include <linux/fs.h> 24 #include <linux/iomap.h> 25 #include <linux/mount.h> 26 #include <linux/path.h> 27 #include <linux/dax.h> 28 #include <linux/quotaops.h> 29 #include <linux/pagevec.h> 30 #include <linux/uio.h> 31 #include <linux/mman.h> 32 #include <linux/backing-dev.h> 33 #include "ext4.h" 34 #include "ext4_jbd2.h" 35 #include "xattr.h" 36 #include "acl.h" 37 #include "truncate.h" 38 39 /* 40 * Returns %true if the given DIO request should be attempted with DIO, or 41 * %false if it should fall back to buffered I/O. 42 * 43 * DIO isn't well specified; when it's unsupported (either due to the request 44 * being misaligned, or due to the file not supporting DIO at all), filesystems 45 * either fall back to buffered I/O or return EINVAL. For files that don't use 46 * any special features like encryption or verity, ext4 has traditionally 47 * returned EINVAL for misaligned DIO. iomap_dio_rw() uses this convention too. 48 * In this case, we should attempt the DIO, *not* fall back to buffered I/O. 49 * 50 * In contrast, in cases where DIO is unsupported due to ext4 features, ext4 51 * traditionally falls back to buffered I/O. 52 * 53 * This function implements the traditional ext4 behavior in all these cases. 54 */ 55 static bool ext4_should_use_dio(struct kiocb *iocb, struct iov_iter *iter) 56 { 57 struct inode *inode = file_inode(iocb->ki_filp); 58 u32 dio_align = ext4_dio_alignment(inode); 59 60 if (dio_align == 0) 61 return false; 62 63 if (dio_align == 1) 64 return true; 65 66 return IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), dio_align); 67 } 68 69 static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to) 70 { 71 ssize_t ret; 72 struct inode *inode = file_inode(iocb->ki_filp); 73 74 if (iocb->ki_flags & IOCB_NOWAIT) { 75 if (!inode_trylock_shared(inode)) 76 return -EAGAIN; 77 } else { 78 inode_lock_shared(inode); 79 } 80 81 if (!ext4_should_use_dio(iocb, to)) { 82 inode_unlock_shared(inode); 83 /* 84 * Fallback to buffered I/O if the operation being performed on 85 * the inode is not supported by direct I/O. The IOCB_DIRECT 86 * flag needs to be cleared here in order to ensure that the 87 * direct I/O path within generic_file_read_iter() is not 88 * taken. 89 */ 90 iocb->ki_flags &= ~IOCB_DIRECT; 91 return generic_file_read_iter(iocb, to); 92 } 93 94 ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL, 0, NULL, 0); 95 inode_unlock_shared(inode); 96 97 file_accessed(iocb->ki_filp); 98 return ret; 99 } 100 101 #ifdef CONFIG_FS_DAX 102 static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to) 103 { 104 struct inode *inode = file_inode(iocb->ki_filp); 105 ssize_t ret; 106 107 if (iocb->ki_flags & IOCB_NOWAIT) { 108 if (!inode_trylock_shared(inode)) 109 return -EAGAIN; 110 } else { 111 inode_lock_shared(inode); 112 } 113 /* 114 * Recheck under inode lock - at this point we are sure it cannot 115 * change anymore 116 */ 117 if (!IS_DAX(inode)) { 118 inode_unlock_shared(inode); 119 /* Fallback to buffered IO in case we cannot support DAX */ 120 return generic_file_read_iter(iocb, to); 121 } 122 ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops); 123 inode_unlock_shared(inode); 124 125 file_accessed(iocb->ki_filp); 126 return ret; 127 } 128 #endif 129 130 static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 131 { 132 struct inode *inode = file_inode(iocb->ki_filp); 133 134 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 135 return -EIO; 136 137 if (!iov_iter_count(to)) 138 return 0; /* skip atime */ 139 140 #ifdef CONFIG_FS_DAX 141 if (IS_DAX(inode)) 142 return ext4_dax_read_iter(iocb, to); 143 #endif 144 if (iocb->ki_flags & IOCB_DIRECT) 145 return ext4_dio_read_iter(iocb, to); 146 147 return generic_file_read_iter(iocb, to); 148 } 149 150 /* 151 * Called when an inode is released. Note that this is different 152 * from ext4_file_open: open gets called at every open, but release 153 * gets called only when /all/ the files are closed. 154 */ 155 static int ext4_release_file(struct inode *inode, struct file *filp) 156 { 157 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) { 158 ext4_alloc_da_blocks(inode); 159 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 160 } 161 /* if we are the last writer on the inode, drop the block reservation */ 162 if ((filp->f_mode & FMODE_WRITE) && 163 (atomic_read(&inode->i_writecount) == 1) && 164 !EXT4_I(inode)->i_reserved_data_blocks) { 165 down_write(&EXT4_I(inode)->i_data_sem); 166 ext4_discard_preallocations(inode, 0); 167 up_write(&EXT4_I(inode)->i_data_sem); 168 } 169 if (is_dx(inode) && filp->private_data) 170 ext4_htree_free_dir_info(filp->private_data); 171 172 return 0; 173 } 174 175 /* 176 * This tests whether the IO in question is block-aligned or not. 177 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they 178 * are converted to written only after the IO is complete. Until they are 179 * mapped, these blocks appear as holes, so dio_zero_block() will assume that 180 * it needs to zero out portions of the start and/or end block. If 2 AIO 181 * threads are at work on the same unwritten block, they must be synchronized 182 * or one thread will zero the other's data, causing corruption. 183 */ 184 static bool 185 ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos) 186 { 187 struct super_block *sb = inode->i_sb; 188 unsigned long blockmask = sb->s_blocksize - 1; 189 190 if ((pos | iov_iter_alignment(from)) & blockmask) 191 return true; 192 193 return false; 194 } 195 196 static bool 197 ext4_extending_io(struct inode *inode, loff_t offset, size_t len) 198 { 199 if (offset + len > i_size_read(inode) || 200 offset + len > EXT4_I(inode)->i_disksize) 201 return true; 202 return false; 203 } 204 205 /* Is IO overwriting allocated and initialized blocks? */ 206 static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len) 207 { 208 struct ext4_map_blocks map; 209 unsigned int blkbits = inode->i_blkbits; 210 int err, blklen; 211 212 if (pos + len > i_size_read(inode)) 213 return false; 214 215 map.m_lblk = pos >> blkbits; 216 map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits); 217 blklen = map.m_len; 218 219 err = ext4_map_blocks(NULL, inode, &map, 0); 220 /* 221 * 'err==len' means that all of the blocks have been preallocated, 222 * regardless of whether they have been initialized or not. To exclude 223 * unwritten extents, we need to check m_flags. 224 */ 225 return err == blklen && (map.m_flags & EXT4_MAP_MAPPED); 226 } 227 228 static ssize_t ext4_generic_write_checks(struct kiocb *iocb, 229 struct iov_iter *from) 230 { 231 struct inode *inode = file_inode(iocb->ki_filp); 232 ssize_t ret; 233 234 if (unlikely(IS_IMMUTABLE(inode))) 235 return -EPERM; 236 237 ret = generic_write_checks(iocb, from); 238 if (ret <= 0) 239 return ret; 240 241 /* 242 * If we have encountered a bitmap-format file, the size limit 243 * is smaller than s_maxbytes, which is for extent-mapped files. 244 */ 245 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 246 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 247 248 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) 249 return -EFBIG; 250 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos); 251 } 252 253 return iov_iter_count(from); 254 } 255 256 static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from) 257 { 258 ssize_t ret, count; 259 260 count = ext4_generic_write_checks(iocb, from); 261 if (count <= 0) 262 return count; 263 264 ret = file_modified(iocb->ki_filp); 265 if (ret) 266 return ret; 267 return count; 268 } 269 270 static ssize_t ext4_buffered_write_iter(struct kiocb *iocb, 271 struct iov_iter *from) 272 { 273 ssize_t ret; 274 struct inode *inode = file_inode(iocb->ki_filp); 275 276 if (iocb->ki_flags & IOCB_NOWAIT) 277 return -EOPNOTSUPP; 278 279 inode_lock(inode); 280 ret = ext4_write_checks(iocb, from); 281 if (ret <= 0) 282 goto out; 283 284 current->backing_dev_info = inode_to_bdi(inode); 285 ret = generic_perform_write(iocb, from); 286 current->backing_dev_info = NULL; 287 288 out: 289 inode_unlock(inode); 290 if (likely(ret > 0)) { 291 iocb->ki_pos += ret; 292 ret = generic_write_sync(iocb, ret); 293 } 294 295 return ret; 296 } 297 298 static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset, 299 ssize_t written, size_t count) 300 { 301 handle_t *handle; 302 bool truncate = false; 303 u8 blkbits = inode->i_blkbits; 304 ext4_lblk_t written_blk, end_blk; 305 int ret; 306 307 /* 308 * Note that EXT4_I(inode)->i_disksize can get extended up to 309 * inode->i_size while the I/O was running due to writeback of delalloc 310 * blocks. But, the code in ext4_iomap_alloc() is careful to use 311 * zeroed/unwritten extents if this is possible; thus we won't leave 312 * uninitialized blocks in a file even if we didn't succeed in writing 313 * as much as we intended. 314 */ 315 WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize); 316 if (offset + count <= EXT4_I(inode)->i_disksize) { 317 /* 318 * We need to ensure that the inode is removed from the orphan 319 * list if it has been added prematurely, due to writeback of 320 * delalloc blocks. 321 */ 322 if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) { 323 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 324 325 if (IS_ERR(handle)) { 326 ext4_orphan_del(NULL, inode); 327 return PTR_ERR(handle); 328 } 329 330 ext4_orphan_del(handle, inode); 331 ext4_journal_stop(handle); 332 } 333 334 return written; 335 } 336 337 if (written < 0) 338 goto truncate; 339 340 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 341 if (IS_ERR(handle)) { 342 written = PTR_ERR(handle); 343 goto truncate; 344 } 345 346 if (ext4_update_inode_size(inode, offset + written)) { 347 ret = ext4_mark_inode_dirty(handle, inode); 348 if (unlikely(ret)) { 349 written = ret; 350 ext4_journal_stop(handle); 351 goto truncate; 352 } 353 } 354 355 /* 356 * We may need to truncate allocated but not written blocks beyond EOF. 357 */ 358 written_blk = ALIGN(offset + written, 1 << blkbits); 359 end_blk = ALIGN(offset + count, 1 << blkbits); 360 if (written_blk < end_blk && ext4_can_truncate(inode)) 361 truncate = true; 362 363 /* 364 * Remove the inode from the orphan list if it has been extended and 365 * everything went OK. 366 */ 367 if (!truncate && inode->i_nlink) 368 ext4_orphan_del(handle, inode); 369 ext4_journal_stop(handle); 370 371 if (truncate) { 372 truncate: 373 ext4_truncate_failed_write(inode); 374 /* 375 * If the truncate operation failed early, then the inode may 376 * still be on the orphan list. In that case, we need to try 377 * remove the inode from the in-memory linked list. 378 */ 379 if (inode->i_nlink) 380 ext4_orphan_del(NULL, inode); 381 } 382 383 return written; 384 } 385 386 static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size, 387 int error, unsigned int flags) 388 { 389 loff_t pos = iocb->ki_pos; 390 struct inode *inode = file_inode(iocb->ki_filp); 391 392 if (error) 393 return error; 394 395 if (size && flags & IOMAP_DIO_UNWRITTEN) { 396 error = ext4_convert_unwritten_extents(NULL, inode, pos, size); 397 if (error < 0) 398 return error; 399 } 400 /* 401 * If we are extending the file, we have to update i_size here before 402 * page cache gets invalidated in iomap_dio_rw(). Otherwise racing 403 * buffered reads could zero out too much from page cache pages. Update 404 * of on-disk size will happen later in ext4_dio_write_iter() where 405 * we have enough information to also perform orphan list handling etc. 406 * Note that we perform all extending writes synchronously under 407 * i_rwsem held exclusively so i_size update is safe here in that case. 408 * If the write was not extending, we cannot see pos > i_size here 409 * because operations reducing i_size like truncate wait for all 410 * outstanding DIO before updating i_size. 411 */ 412 pos += size; 413 if (pos > i_size_read(inode)) 414 i_size_write(inode, pos); 415 416 return 0; 417 } 418 419 static const struct iomap_dio_ops ext4_dio_write_ops = { 420 .end_io = ext4_dio_write_end_io, 421 }; 422 423 /* 424 * The intention here is to start with shared lock acquired then see if any 425 * condition requires an exclusive inode lock. If yes, then we restart the 426 * whole operation by releasing the shared lock and acquiring exclusive lock. 427 * 428 * - For unaligned_io we never take shared lock as it may cause data corruption 429 * when two unaligned IO tries to modify the same block e.g. while zeroing. 430 * 431 * - For extending writes case we don't take the shared lock, since it requires 432 * updating inode i_disksize and/or orphan handling with exclusive lock. 433 * 434 * - shared locking will only be true mostly with overwrites. Otherwise we will 435 * switch to exclusive i_rwsem lock. 436 */ 437 static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from, 438 bool *ilock_shared, bool *extend) 439 { 440 struct file *file = iocb->ki_filp; 441 struct inode *inode = file_inode(file); 442 loff_t offset; 443 size_t count; 444 ssize_t ret; 445 446 restart: 447 ret = ext4_generic_write_checks(iocb, from); 448 if (ret <= 0) 449 goto out; 450 451 offset = iocb->ki_pos; 452 count = ret; 453 if (ext4_extending_io(inode, offset, count)) 454 *extend = true; 455 /* 456 * Determine whether the IO operation will overwrite allocated 457 * and initialized blocks. 458 * We need exclusive i_rwsem for changing security info 459 * in file_modified(). 460 */ 461 if (*ilock_shared && (!IS_NOSEC(inode) || *extend || 462 !ext4_overwrite_io(inode, offset, count))) { 463 if (iocb->ki_flags & IOCB_NOWAIT) { 464 ret = -EAGAIN; 465 goto out; 466 } 467 inode_unlock_shared(inode); 468 *ilock_shared = false; 469 inode_lock(inode); 470 goto restart; 471 } 472 473 ret = file_modified(file); 474 if (ret < 0) 475 goto out; 476 477 return count; 478 out: 479 if (*ilock_shared) 480 inode_unlock_shared(inode); 481 else 482 inode_unlock(inode); 483 return ret; 484 } 485 486 static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from) 487 { 488 ssize_t ret; 489 handle_t *handle; 490 struct inode *inode = file_inode(iocb->ki_filp); 491 loff_t offset = iocb->ki_pos; 492 size_t count = iov_iter_count(from); 493 const struct iomap_ops *iomap_ops = &ext4_iomap_ops; 494 bool extend = false, unaligned_io = false; 495 bool ilock_shared = true; 496 497 /* 498 * We initially start with shared inode lock unless it is 499 * unaligned IO which needs exclusive lock anyways. 500 */ 501 if (ext4_unaligned_io(inode, from, offset)) { 502 unaligned_io = true; 503 ilock_shared = false; 504 } 505 /* 506 * Quick check here without any i_rwsem lock to see if it is extending 507 * IO. A more reliable check is done in ext4_dio_write_checks() with 508 * proper locking in place. 509 */ 510 if (offset + count > i_size_read(inode)) 511 ilock_shared = false; 512 513 if (iocb->ki_flags & IOCB_NOWAIT) { 514 if (ilock_shared) { 515 if (!inode_trylock_shared(inode)) 516 return -EAGAIN; 517 } else { 518 if (!inode_trylock(inode)) 519 return -EAGAIN; 520 } 521 } else { 522 if (ilock_shared) 523 inode_lock_shared(inode); 524 else 525 inode_lock(inode); 526 } 527 528 /* Fallback to buffered I/O if the inode does not support direct I/O. */ 529 if (!ext4_should_use_dio(iocb, from)) { 530 if (ilock_shared) 531 inode_unlock_shared(inode); 532 else 533 inode_unlock(inode); 534 return ext4_buffered_write_iter(iocb, from); 535 } 536 537 ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend); 538 if (ret <= 0) 539 return ret; 540 541 /* if we're going to block and IOCB_NOWAIT is set, return -EAGAIN */ 542 if ((iocb->ki_flags & IOCB_NOWAIT) && (unaligned_io || extend)) { 543 ret = -EAGAIN; 544 goto out; 545 } 546 547 offset = iocb->ki_pos; 548 count = ret; 549 550 /* 551 * Unaligned direct IO must be serialized among each other as zeroing 552 * of partial blocks of two competing unaligned IOs can result in data 553 * corruption. 554 * 555 * So we make sure we don't allow any unaligned IO in flight. 556 * For IOs where we need not wait (like unaligned non-AIO DIO), 557 * below inode_dio_wait() may anyway become a no-op, since we start 558 * with exclusive lock. 559 */ 560 if (unaligned_io) 561 inode_dio_wait(inode); 562 563 if (extend) { 564 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 565 if (IS_ERR(handle)) { 566 ret = PTR_ERR(handle); 567 goto out; 568 } 569 570 ret = ext4_orphan_add(handle, inode); 571 if (ret) { 572 ext4_journal_stop(handle); 573 goto out; 574 } 575 576 ext4_journal_stop(handle); 577 } 578 579 if (ilock_shared) 580 iomap_ops = &ext4_iomap_overwrite_ops; 581 ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops, 582 (unaligned_io || extend) ? IOMAP_DIO_FORCE_WAIT : 0, 583 NULL, 0); 584 if (ret == -ENOTBLK) 585 ret = 0; 586 587 if (extend) 588 ret = ext4_handle_inode_extension(inode, offset, ret, count); 589 590 out: 591 if (ilock_shared) 592 inode_unlock_shared(inode); 593 else 594 inode_unlock(inode); 595 596 if (ret >= 0 && iov_iter_count(from)) { 597 ssize_t err; 598 loff_t endbyte; 599 600 offset = iocb->ki_pos; 601 err = ext4_buffered_write_iter(iocb, from); 602 if (err < 0) 603 return err; 604 605 /* 606 * We need to ensure that the pages within the page cache for 607 * the range covered by this I/O are written to disk and 608 * invalidated. This is in attempt to preserve the expected 609 * direct I/O semantics in the case we fallback to buffered I/O 610 * to complete off the I/O request. 611 */ 612 ret += err; 613 endbyte = offset + err - 1; 614 err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping, 615 offset, endbyte); 616 if (!err) 617 invalidate_mapping_pages(iocb->ki_filp->f_mapping, 618 offset >> PAGE_SHIFT, 619 endbyte >> PAGE_SHIFT); 620 } 621 622 return ret; 623 } 624 625 #ifdef CONFIG_FS_DAX 626 static ssize_t 627 ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) 628 { 629 ssize_t ret; 630 size_t count; 631 loff_t offset; 632 handle_t *handle; 633 bool extend = false; 634 struct inode *inode = file_inode(iocb->ki_filp); 635 636 if (iocb->ki_flags & IOCB_NOWAIT) { 637 if (!inode_trylock(inode)) 638 return -EAGAIN; 639 } else { 640 inode_lock(inode); 641 } 642 643 ret = ext4_write_checks(iocb, from); 644 if (ret <= 0) 645 goto out; 646 647 offset = iocb->ki_pos; 648 count = iov_iter_count(from); 649 650 if (offset + count > EXT4_I(inode)->i_disksize) { 651 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 652 if (IS_ERR(handle)) { 653 ret = PTR_ERR(handle); 654 goto out; 655 } 656 657 ret = ext4_orphan_add(handle, inode); 658 if (ret) { 659 ext4_journal_stop(handle); 660 goto out; 661 } 662 663 extend = true; 664 ext4_journal_stop(handle); 665 } 666 667 ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops); 668 669 if (extend) 670 ret = ext4_handle_inode_extension(inode, offset, ret, count); 671 out: 672 inode_unlock(inode); 673 if (ret > 0) 674 ret = generic_write_sync(iocb, ret); 675 return ret; 676 } 677 #endif 678 679 static ssize_t 680 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 681 { 682 struct inode *inode = file_inode(iocb->ki_filp); 683 684 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 685 return -EIO; 686 687 #ifdef CONFIG_FS_DAX 688 if (IS_DAX(inode)) 689 return ext4_dax_write_iter(iocb, from); 690 #endif 691 if (iocb->ki_flags & IOCB_DIRECT) 692 return ext4_dio_write_iter(iocb, from); 693 else 694 return ext4_buffered_write_iter(iocb, from); 695 } 696 697 #ifdef CONFIG_FS_DAX 698 static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf, 699 enum page_entry_size pe_size) 700 { 701 int error = 0; 702 vm_fault_t result; 703 int retries = 0; 704 handle_t *handle = NULL; 705 struct inode *inode = file_inode(vmf->vma->vm_file); 706 struct super_block *sb = inode->i_sb; 707 708 /* 709 * We have to distinguish real writes from writes which will result in a 710 * COW page; COW writes should *not* poke the journal (the file will not 711 * be changed). Doing so would cause unintended failures when mounted 712 * read-only. 713 * 714 * We check for VM_SHARED rather than vmf->cow_page since the latter is 715 * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for 716 * other sizes, dax_iomap_fault will handle splitting / fallback so that 717 * we eventually come back with a COW page. 718 */ 719 bool write = (vmf->flags & FAULT_FLAG_WRITE) && 720 (vmf->vma->vm_flags & VM_SHARED); 721 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 722 pfn_t pfn; 723 724 if (write) { 725 sb_start_pagefault(sb); 726 file_update_time(vmf->vma->vm_file); 727 filemap_invalidate_lock_shared(mapping); 728 retry: 729 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, 730 EXT4_DATA_TRANS_BLOCKS(sb)); 731 if (IS_ERR(handle)) { 732 filemap_invalidate_unlock_shared(mapping); 733 sb_end_pagefault(sb); 734 return VM_FAULT_SIGBUS; 735 } 736 } else { 737 filemap_invalidate_lock_shared(mapping); 738 } 739 result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops); 740 if (write) { 741 ext4_journal_stop(handle); 742 743 if ((result & VM_FAULT_ERROR) && error == -ENOSPC && 744 ext4_should_retry_alloc(sb, &retries)) 745 goto retry; 746 /* Handling synchronous page fault? */ 747 if (result & VM_FAULT_NEEDDSYNC) 748 result = dax_finish_sync_fault(vmf, pe_size, pfn); 749 filemap_invalidate_unlock_shared(mapping); 750 sb_end_pagefault(sb); 751 } else { 752 filemap_invalidate_unlock_shared(mapping); 753 } 754 755 return result; 756 } 757 758 static vm_fault_t ext4_dax_fault(struct vm_fault *vmf) 759 { 760 return ext4_dax_huge_fault(vmf, PE_SIZE_PTE); 761 } 762 763 static const struct vm_operations_struct ext4_dax_vm_ops = { 764 .fault = ext4_dax_fault, 765 .huge_fault = ext4_dax_huge_fault, 766 .page_mkwrite = ext4_dax_fault, 767 .pfn_mkwrite = ext4_dax_fault, 768 }; 769 #else 770 #define ext4_dax_vm_ops ext4_file_vm_ops 771 #endif 772 773 static const struct vm_operations_struct ext4_file_vm_ops = { 774 .fault = filemap_fault, 775 .map_pages = filemap_map_pages, 776 .page_mkwrite = ext4_page_mkwrite, 777 }; 778 779 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) 780 { 781 struct inode *inode = file->f_mapping->host; 782 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 783 struct dax_device *dax_dev = sbi->s_daxdev; 784 785 if (unlikely(ext4_forced_shutdown(sbi))) 786 return -EIO; 787 788 /* 789 * We don't support synchronous mappings for non-DAX files and 790 * for DAX files if underneath dax_device is not synchronous. 791 */ 792 if (!daxdev_mapping_supported(vma, dax_dev)) 793 return -EOPNOTSUPP; 794 795 file_accessed(file); 796 if (IS_DAX(file_inode(file))) { 797 vma->vm_ops = &ext4_dax_vm_ops; 798 vma->vm_flags |= VM_HUGEPAGE; 799 } else { 800 vma->vm_ops = &ext4_file_vm_ops; 801 } 802 return 0; 803 } 804 805 static int ext4_sample_last_mounted(struct super_block *sb, 806 struct vfsmount *mnt) 807 { 808 struct ext4_sb_info *sbi = EXT4_SB(sb); 809 struct path path; 810 char buf[64], *cp; 811 handle_t *handle; 812 int err; 813 814 if (likely(ext4_test_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED))) 815 return 0; 816 817 if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb)) 818 return 0; 819 820 ext4_set_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED); 821 /* 822 * Sample where the filesystem has been mounted and 823 * store it in the superblock for sysadmin convenience 824 * when trying to sort through large numbers of block 825 * devices or filesystem images. 826 */ 827 memset(buf, 0, sizeof(buf)); 828 path.mnt = mnt; 829 path.dentry = mnt->mnt_root; 830 cp = d_path(&path, buf, sizeof(buf)); 831 err = 0; 832 if (IS_ERR(cp)) 833 goto out; 834 835 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1); 836 err = PTR_ERR(handle); 837 if (IS_ERR(handle)) 838 goto out; 839 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); 840 err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, 841 EXT4_JTR_NONE); 842 if (err) 843 goto out_journal; 844 lock_buffer(sbi->s_sbh); 845 strncpy(sbi->s_es->s_last_mounted, cp, 846 sizeof(sbi->s_es->s_last_mounted)); 847 ext4_superblock_csum_set(sb); 848 unlock_buffer(sbi->s_sbh); 849 ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); 850 out_journal: 851 ext4_journal_stop(handle); 852 out: 853 sb_end_intwrite(sb); 854 return err; 855 } 856 857 static int ext4_file_open(struct inode *inode, struct file *filp) 858 { 859 int ret; 860 861 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 862 return -EIO; 863 864 ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt); 865 if (ret) 866 return ret; 867 868 ret = fscrypt_file_open(inode, filp); 869 if (ret) 870 return ret; 871 872 ret = fsverity_file_open(inode, filp); 873 if (ret) 874 return ret; 875 876 /* 877 * Set up the jbd2_inode if we are opening the inode for 878 * writing and the journal is present 879 */ 880 if (filp->f_mode & FMODE_WRITE) { 881 ret = ext4_inode_attach_jinode(inode); 882 if (ret < 0) 883 return ret; 884 } 885 886 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC; 887 return dquot_file_open(inode, filp); 888 } 889 890 /* 891 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values 892 * by calling generic_file_llseek_size() with the appropriate maxbytes 893 * value for each. 894 */ 895 loff_t ext4_llseek(struct file *file, loff_t offset, int whence) 896 { 897 struct inode *inode = file->f_mapping->host; 898 loff_t maxbytes; 899 900 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 901 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; 902 else 903 maxbytes = inode->i_sb->s_maxbytes; 904 905 switch (whence) { 906 default: 907 return generic_file_llseek_size(file, offset, whence, 908 maxbytes, i_size_read(inode)); 909 case SEEK_HOLE: 910 inode_lock_shared(inode); 911 offset = iomap_seek_hole(inode, offset, 912 &ext4_iomap_report_ops); 913 inode_unlock_shared(inode); 914 break; 915 case SEEK_DATA: 916 inode_lock_shared(inode); 917 offset = iomap_seek_data(inode, offset, 918 &ext4_iomap_report_ops); 919 inode_unlock_shared(inode); 920 break; 921 } 922 923 if (offset < 0) 924 return offset; 925 return vfs_setpos(file, offset, maxbytes); 926 } 927 928 const struct file_operations ext4_file_operations = { 929 .llseek = ext4_llseek, 930 .read_iter = ext4_file_read_iter, 931 .write_iter = ext4_file_write_iter, 932 .iopoll = iocb_bio_iopoll, 933 .unlocked_ioctl = ext4_ioctl, 934 #ifdef CONFIG_COMPAT 935 .compat_ioctl = ext4_compat_ioctl, 936 #endif 937 .mmap = ext4_file_mmap, 938 .mmap_supported_flags = MAP_SYNC, 939 .open = ext4_file_open, 940 .release = ext4_release_file, 941 .fsync = ext4_sync_file, 942 .get_unmapped_area = thp_get_unmapped_area, 943 .splice_read = generic_file_splice_read, 944 .splice_write = iter_file_splice_write, 945 .fallocate = ext4_fallocate, 946 }; 947 948 const struct inode_operations ext4_file_inode_operations = { 949 .setattr = ext4_setattr, 950 .getattr = ext4_file_getattr, 951 .listxattr = ext4_listxattr, 952 .get_acl = ext4_get_acl, 953 .set_acl = ext4_set_acl, 954 .fiemap = ext4_fiemap, 955 .fileattr_get = ext4_fileattr_get, 956 .fileattr_set = ext4_fileattr_set, 957 }; 958 959