1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext4/file.c 4 * 5 * Copyright (C) 1992, 1993, 1994, 1995 6 * Remy Card (card@masi.ibp.fr) 7 * Laboratoire MASI - Institut Blaise Pascal 8 * Universite Pierre et Marie Curie (Paris VI) 9 * 10 * from 11 * 12 * linux/fs/minix/file.c 13 * 14 * Copyright (C) 1991, 1992 Linus Torvalds 15 * 16 * ext4 fs regular file handling primitives 17 * 18 * 64-bit file support on 64-bit platforms by Jakub Jelinek 19 * (jj@sunsite.ms.mff.cuni.cz) 20 */ 21 22 #include <linux/time.h> 23 #include <linux/fs.h> 24 #include <linux/iomap.h> 25 #include <linux/mount.h> 26 #include <linux/path.h> 27 #include <linux/dax.h> 28 #include <linux/filelock.h> 29 #include <linux/quotaops.h> 30 #include <linux/uio.h> 31 #include <linux/mman.h> 32 #include <linux/backing-dev.h> 33 #include "ext4.h" 34 #include "ext4_jbd2.h" 35 #include "xattr.h" 36 #include "acl.h" 37 #include "truncate.h" 38 39 /* 40 * Returns %true if the given DIO request should be attempted with DIO, or 41 * %false if it should fall back to buffered I/O. 42 * 43 * DIO isn't well specified; when it's unsupported (either due to the request 44 * being misaligned, or due to the file not supporting DIO at all), filesystems 45 * either fall back to buffered I/O or return EINVAL. For files that don't use 46 * any special features like encryption or verity, ext4 has traditionally 47 * returned EINVAL for misaligned DIO. iomap_dio_rw() uses this convention too. 48 * In this case, we should attempt the DIO, *not* fall back to buffered I/O. 49 * 50 * In contrast, in cases where DIO is unsupported due to ext4 features, ext4 51 * traditionally falls back to buffered I/O. 52 * 53 * This function implements the traditional ext4 behavior in all these cases. 54 */ 55 static bool ext4_should_use_dio(struct kiocb *iocb, struct iov_iter *iter) 56 { 57 struct inode *inode = file_inode(iocb->ki_filp); 58 u32 dio_align = ext4_dio_alignment(inode); 59 60 if (dio_align == 0) 61 return false; 62 63 if (dio_align == 1) 64 return true; 65 66 return IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), dio_align); 67 } 68 69 static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to) 70 { 71 ssize_t ret; 72 struct inode *inode = file_inode(iocb->ki_filp); 73 74 if (iocb->ki_flags & IOCB_NOWAIT) { 75 if (!inode_trylock_shared(inode)) 76 return -EAGAIN; 77 } else { 78 inode_lock_shared(inode); 79 } 80 81 if (!ext4_should_use_dio(iocb, to)) { 82 inode_unlock_shared(inode); 83 /* 84 * Fallback to buffered I/O if the operation being performed on 85 * the inode is not supported by direct I/O. The IOCB_DIRECT 86 * flag needs to be cleared here in order to ensure that the 87 * direct I/O path within generic_file_read_iter() is not 88 * taken. 89 */ 90 iocb->ki_flags &= ~IOCB_DIRECT; 91 return generic_file_read_iter(iocb, to); 92 } 93 94 ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL, 0, NULL, 0); 95 inode_unlock_shared(inode); 96 97 file_accessed(iocb->ki_filp); 98 return ret; 99 } 100 101 #ifdef CONFIG_FS_DAX 102 static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to) 103 { 104 struct inode *inode = file_inode(iocb->ki_filp); 105 ssize_t ret; 106 107 if (iocb->ki_flags & IOCB_NOWAIT) { 108 if (!inode_trylock_shared(inode)) 109 return -EAGAIN; 110 } else { 111 inode_lock_shared(inode); 112 } 113 /* 114 * Recheck under inode lock - at this point we are sure it cannot 115 * change anymore 116 */ 117 if (!IS_DAX(inode)) { 118 inode_unlock_shared(inode); 119 /* Fallback to buffered IO in case we cannot support DAX */ 120 return generic_file_read_iter(iocb, to); 121 } 122 ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops); 123 inode_unlock_shared(inode); 124 125 file_accessed(iocb->ki_filp); 126 return ret; 127 } 128 #endif 129 130 static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 131 { 132 struct inode *inode = file_inode(iocb->ki_filp); 133 134 if (unlikely(ext4_forced_shutdown(inode->i_sb))) 135 return -EIO; 136 137 if (!iov_iter_count(to)) 138 return 0; /* skip atime */ 139 140 #ifdef CONFIG_FS_DAX 141 if (IS_DAX(inode)) 142 return ext4_dax_read_iter(iocb, to); 143 #endif 144 if (iocb->ki_flags & IOCB_DIRECT) 145 return ext4_dio_read_iter(iocb, to); 146 147 return generic_file_read_iter(iocb, to); 148 } 149 150 static ssize_t ext4_file_splice_read(struct file *in, loff_t *ppos, 151 struct pipe_inode_info *pipe, 152 size_t len, unsigned int flags) 153 { 154 struct inode *inode = file_inode(in); 155 156 if (unlikely(ext4_forced_shutdown(inode->i_sb))) 157 return -EIO; 158 return filemap_splice_read(in, ppos, pipe, len, flags); 159 } 160 161 /* 162 * Called when an inode is released. Note that this is different 163 * from ext4_file_open: open gets called at every open, but release 164 * gets called only when /all/ the files are closed. 165 */ 166 static int ext4_release_file(struct inode *inode, struct file *filp) 167 { 168 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) { 169 ext4_alloc_da_blocks(inode); 170 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 171 } 172 /* if we are the last writer on the inode, drop the block reservation */ 173 if ((filp->f_mode & FMODE_WRITE) && 174 (atomic_read(&inode->i_writecount) == 1) && 175 !EXT4_I(inode)->i_reserved_data_blocks) { 176 down_write(&EXT4_I(inode)->i_data_sem); 177 ext4_discard_preallocations(inode); 178 up_write(&EXT4_I(inode)->i_data_sem); 179 } 180 if (is_dx(inode) && filp->private_data) 181 ext4_htree_free_dir_info(filp->private_data); 182 183 return 0; 184 } 185 186 /* 187 * This tests whether the IO in question is block-aligned or not. 188 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they 189 * are converted to written only after the IO is complete. Until they are 190 * mapped, these blocks appear as holes, so dio_zero_block() will assume that 191 * it needs to zero out portions of the start and/or end block. If 2 AIO 192 * threads are at work on the same unwritten block, they must be synchronized 193 * or one thread will zero the other's data, causing corruption. 194 */ 195 static bool 196 ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos) 197 { 198 struct super_block *sb = inode->i_sb; 199 unsigned long blockmask = sb->s_blocksize - 1; 200 201 if ((pos | iov_iter_alignment(from)) & blockmask) 202 return true; 203 204 return false; 205 } 206 207 static bool 208 ext4_extending_io(struct inode *inode, loff_t offset, size_t len) 209 { 210 if (offset + len > i_size_read(inode) || 211 offset + len > EXT4_I(inode)->i_disksize) 212 return true; 213 return false; 214 } 215 216 /* Is IO overwriting allocated or initialized blocks? */ 217 static bool ext4_overwrite_io(struct inode *inode, 218 loff_t pos, loff_t len, bool *unwritten) 219 { 220 struct ext4_map_blocks map; 221 unsigned int blkbits = inode->i_blkbits; 222 int err, blklen; 223 224 if (pos + len > i_size_read(inode)) 225 return false; 226 227 map.m_lblk = pos >> blkbits; 228 map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits); 229 blklen = map.m_len; 230 231 err = ext4_map_blocks(NULL, inode, &map, 0); 232 if (err != blklen) 233 return false; 234 /* 235 * 'err==len' means that all of the blocks have been preallocated, 236 * regardless of whether they have been initialized or not. We need to 237 * check m_flags to distinguish the unwritten extents. 238 */ 239 *unwritten = !(map.m_flags & EXT4_MAP_MAPPED); 240 return true; 241 } 242 243 static ssize_t ext4_generic_write_checks(struct kiocb *iocb, 244 struct iov_iter *from) 245 { 246 struct inode *inode = file_inode(iocb->ki_filp); 247 ssize_t ret; 248 249 if (unlikely(IS_IMMUTABLE(inode))) 250 return -EPERM; 251 252 ret = generic_write_checks(iocb, from); 253 if (ret <= 0) 254 return ret; 255 256 /* 257 * If we have encountered a bitmap-format file, the size limit 258 * is smaller than s_maxbytes, which is for extent-mapped files. 259 */ 260 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 261 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 262 263 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) 264 return -EFBIG; 265 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos); 266 } 267 268 return iov_iter_count(from); 269 } 270 271 static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from) 272 { 273 struct inode *inode = file_inode(iocb->ki_filp); 274 loff_t old_size = i_size_read(inode); 275 ssize_t ret, count; 276 277 count = ext4_generic_write_checks(iocb, from); 278 if (count <= 0) 279 return count; 280 281 ret = file_modified(iocb->ki_filp); 282 if (ret) 283 return ret; 284 285 /* 286 * If the position is beyond the EOF, it is necessary to zero out the 287 * partial block that beyond the existing EOF, as it may contains 288 * stale data written through mmap. 289 */ 290 if (iocb->ki_pos > old_size && !ext4_verity_in_progress(inode)) { 291 if (iocb->ki_flags & IOCB_NOWAIT) 292 return -EAGAIN; 293 294 ret = ext4_block_zero_eof(inode, old_size, iocb->ki_pos); 295 if (ret) 296 return ret; 297 } 298 299 return count; 300 } 301 302 static ssize_t ext4_buffered_write_iter(struct kiocb *iocb, 303 struct iov_iter *from) 304 { 305 ssize_t ret; 306 struct inode *inode = file_inode(iocb->ki_filp); 307 308 if (iocb->ki_flags & IOCB_NOWAIT) 309 return -EOPNOTSUPP; 310 311 inode_lock(inode); 312 ret = ext4_write_checks(iocb, from); 313 if (ret <= 0) 314 goto out; 315 316 ret = generic_perform_write(iocb, from); 317 318 out: 319 inode_unlock(inode); 320 if (unlikely(ret <= 0)) 321 return ret; 322 return generic_write_sync(iocb, ret); 323 } 324 325 static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset, 326 ssize_t written, ssize_t count) 327 { 328 handle_t *handle; 329 330 lockdep_assert_held_write(&inode->i_rwsem); 331 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 332 if (IS_ERR(handle)) 333 return PTR_ERR(handle); 334 335 if (ext4_update_inode_size(inode, offset + written)) { 336 int ret = ext4_mark_inode_dirty(handle, inode); 337 if (unlikely(ret)) { 338 ext4_journal_stop(handle); 339 return ret; 340 } 341 } 342 343 if ((written == count) && inode->i_nlink) 344 ext4_orphan_del(handle, inode); 345 ext4_journal_stop(handle); 346 347 return written; 348 } 349 350 /* 351 * Clean up the inode after DIO or DAX extending write has completed and the 352 * inode size has been updated using ext4_handle_inode_extension(). 353 */ 354 static void ext4_inode_extension_cleanup(struct inode *inode, bool need_trunc) 355 { 356 lockdep_assert_held_write(&inode->i_rwsem); 357 if (need_trunc) { 358 ext4_truncate_failed_write(inode); 359 /* 360 * If the truncate operation failed early, then the inode may 361 * still be on the orphan list. In that case, we need to try 362 * remove the inode from the in-memory linked list. 363 */ 364 if (inode->i_nlink) 365 ext4_orphan_del(NULL, inode); 366 return; 367 } 368 /* 369 * If i_disksize got extended either due to writeback of delalloc 370 * blocks or extending truncate while the DIO was running we could fail 371 * to cleanup the orphan list in ext4_handle_inode_extension(). Do it 372 * now. 373 */ 374 if (ext4_inode_orphan_tracked(inode) && inode->i_nlink) { 375 handle_t *handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 376 377 if (IS_ERR(handle)) { 378 /* 379 * The write has successfully completed. Not much to 380 * do with the error here so just cleanup the orphan 381 * list and hope for the best. 382 */ 383 ext4_orphan_del(NULL, inode); 384 return; 385 } 386 ext4_orphan_del(handle, inode); 387 ext4_journal_stop(handle); 388 } 389 } 390 391 static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size, 392 int error, unsigned int flags) 393 { 394 loff_t pos = iocb->ki_pos; 395 struct inode *inode = file_inode(iocb->ki_filp); 396 397 398 if (!error && size && (flags & IOMAP_DIO_UNWRITTEN) && 399 (iocb->ki_flags & IOCB_ATOMIC)) 400 error = ext4_convert_unwritten_extents_atomic(NULL, inode, pos, 401 size); 402 else if (!error && size && flags & IOMAP_DIO_UNWRITTEN) 403 error = ext4_convert_unwritten_extents(NULL, inode, pos, size); 404 if (error) 405 return error; 406 /* 407 * Note that EXT4_I(inode)->i_disksize can get extended up to 408 * inode->i_size while the I/O was running due to writeback of delalloc 409 * blocks. But the code in ext4_iomap_alloc() is careful to use 410 * zeroed/unwritten extents if this is possible; thus we won't leave 411 * uninitialized blocks in a file even if we didn't succeed in writing 412 * as much as we intended. Also we can race with truncate or write 413 * expanding the file so we have to be a bit careful here. 414 */ 415 if (pos + size <= READ_ONCE(EXT4_I(inode)->i_disksize) && 416 pos + size <= i_size_read(inode)) 417 return 0; 418 error = ext4_handle_inode_extension(inode, pos, size, size); 419 return error < 0 ? error : 0; 420 } 421 422 static const struct iomap_dio_ops ext4_dio_write_ops = { 423 .end_io = ext4_dio_write_end_io, 424 }; 425 426 /* 427 * The intention here is to start with shared lock acquired then see if any 428 * condition requires an exclusive inode lock. If yes, then we restart the 429 * whole operation by releasing the shared lock and acquiring exclusive lock. 430 * 431 * - For unaligned_io we never take shared lock as it may cause data corruption 432 * when two unaligned IO tries to modify the same block e.g. while zeroing. 433 * 434 * - For extending writes case we don't take the shared lock, since it requires 435 * updating inode i_disksize and/or orphan handling with exclusive lock. 436 * 437 * - shared locking will only be true mostly with overwrites, including 438 * initialized blocks and unwritten blocks. 439 * 440 * - Otherwise we will switch to exclusive i_rwsem lock. 441 */ 442 static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from, 443 bool *ilock_shared, bool *extend, 444 int *dio_flags) 445 { 446 struct file *file = iocb->ki_filp; 447 struct inode *inode = file_inode(file); 448 loff_t offset; 449 size_t count; 450 ssize_t ret; 451 bool overwrite, unaligned_io, unwritten; 452 453 restart: 454 ret = ext4_generic_write_checks(iocb, from); 455 if (ret <= 0) 456 goto out; 457 458 offset = iocb->ki_pos; 459 count = ret; 460 461 unaligned_io = ext4_unaligned_io(inode, from, offset); 462 *extend = ext4_extending_io(inode, offset, count); 463 overwrite = ext4_overwrite_io(inode, offset, count, &unwritten); 464 465 /* 466 * Determine whether we need to upgrade to an exclusive lock. This is 467 * required to change security info in file_modified(), for extending 468 * I/O, any form of non-overwrite I/O, and unaligned I/O to unwritten 469 * extents (as partial block zeroing may be required). 470 * 471 * Note that unaligned writes are allowed under shared lock so long as 472 * they are pure overwrites. Otherwise, concurrent unaligned writes risk 473 * data corruption due to partial block zeroing in the dio layer, and so 474 * the I/O must occur exclusively. 475 */ 476 if (*ilock_shared && 477 ((!IS_NOSEC(inode) || *extend || !overwrite || 478 (unaligned_io && unwritten)))) { 479 if (iocb->ki_flags & IOCB_NOWAIT) { 480 ret = -EAGAIN; 481 goto out; 482 } 483 inode_unlock_shared(inode); 484 *ilock_shared = false; 485 inode_lock(inode); 486 goto restart; 487 } 488 489 /* 490 * Now that locking is settled, determine dio flags and exclusivity 491 * requirements. We don't use DIO_OVERWRITE_ONLY because we enforce 492 * behavior already. The inode lock is already held exclusive if the 493 * write is non-overwrite or extending, so drain all outstanding dio and 494 * set the force wait dio flag. 495 */ 496 if (!*ilock_shared && (unaligned_io || *extend)) { 497 if (iocb->ki_flags & IOCB_NOWAIT) { 498 ret = -EAGAIN; 499 goto out; 500 } 501 if (unaligned_io && (!overwrite || unwritten)) 502 inode_dio_wait(inode); 503 *dio_flags = IOMAP_DIO_FORCE_WAIT; 504 } 505 506 ret = file_modified(file); 507 if (ret < 0) 508 goto out; 509 510 return count; 511 out: 512 if (*ilock_shared) 513 inode_unlock_shared(inode); 514 else 515 inode_unlock(inode); 516 return ret; 517 } 518 519 static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from) 520 { 521 ssize_t ret; 522 handle_t *handle; 523 struct inode *inode = file_inode(iocb->ki_filp); 524 loff_t offset = iocb->ki_pos; 525 size_t count = iov_iter_count(from); 526 bool extend = false; 527 bool ilock_shared = true; 528 int dio_flags = 0; 529 530 /* 531 * Quick check here without any i_rwsem lock to see if it is extending 532 * IO. A more reliable check is done in ext4_dio_write_checks() with 533 * proper locking in place. 534 */ 535 if (offset + count > i_size_read(inode)) 536 ilock_shared = false; 537 538 if (iocb->ki_flags & IOCB_NOWAIT) { 539 if (ilock_shared) { 540 if (!inode_trylock_shared(inode)) 541 return -EAGAIN; 542 } else { 543 if (!inode_trylock(inode)) 544 return -EAGAIN; 545 } 546 } else { 547 if (ilock_shared) 548 inode_lock_shared(inode); 549 else 550 inode_lock(inode); 551 } 552 553 /* Fallback to buffered I/O if the inode does not support direct I/O. */ 554 if (!ext4_should_use_dio(iocb, from)) { 555 if (ilock_shared) 556 inode_unlock_shared(inode); 557 else 558 inode_unlock(inode); 559 return ext4_buffered_write_iter(iocb, from); 560 } 561 562 /* 563 * Prevent inline data from being created since we are going to allocate 564 * blocks for DIO. We know the inode does not currently have inline data 565 * because ext4_should_use_dio() checked for it, but we have to clear 566 * the state flag before the write checks because a lock cycle could 567 * introduce races with other writers. 568 */ 569 ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); 570 571 ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend, 572 &dio_flags); 573 if (ret <= 0) 574 return ret; 575 576 offset = iocb->ki_pos; 577 count = ret; 578 579 if (extend) { 580 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 581 if (IS_ERR(handle)) { 582 ret = PTR_ERR(handle); 583 goto out; 584 } 585 586 ret = ext4_orphan_add(handle, inode); 587 ext4_journal_stop(handle); 588 if (ret) 589 goto out; 590 } 591 592 ret = iomap_dio_rw(iocb, from, &ext4_iomap_ops, &ext4_dio_write_ops, 593 dio_flags, NULL, 0); 594 if (ret == -ENOTBLK) 595 ret = 0; 596 if (extend) { 597 /* 598 * We always perform extending DIO write synchronously so by 599 * now the IO is completed and ext4_handle_inode_extension() 600 * was called. Cleanup the inode in case of error or race with 601 * writeback of delalloc blocks. 602 */ 603 WARN_ON_ONCE(ret == -EIOCBQUEUED); 604 ext4_inode_extension_cleanup(inode, ret < 0); 605 } 606 607 out: 608 if (ilock_shared) 609 inode_unlock_shared(inode); 610 else 611 inode_unlock(inode); 612 613 if (ret >= 0 && iov_iter_count(from)) { 614 ssize_t err; 615 loff_t endbyte; 616 617 /* 618 * There is no support for atomic writes on buffered-io yet, 619 * we should never fallback to buffered-io for DIO atomic 620 * writes. 621 */ 622 WARN_ON_ONCE(iocb->ki_flags & IOCB_ATOMIC); 623 624 offset = iocb->ki_pos; 625 err = ext4_buffered_write_iter(iocb, from); 626 if (err < 0) 627 return err; 628 629 /* 630 * We need to ensure that the pages within the page cache for 631 * the range covered by this I/O are written to disk and 632 * invalidated. This is in attempt to preserve the expected 633 * direct I/O semantics in the case we fallback to buffered I/O 634 * to complete off the I/O request. 635 */ 636 ret += err; 637 endbyte = offset + err - 1; 638 err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping, 639 offset, endbyte); 640 if (!err) 641 invalidate_mapping_pages(iocb->ki_filp->f_mapping, 642 offset >> PAGE_SHIFT, 643 endbyte >> PAGE_SHIFT); 644 } 645 646 return ret; 647 } 648 649 #ifdef CONFIG_FS_DAX 650 static ssize_t 651 ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) 652 { 653 ssize_t ret; 654 size_t count; 655 loff_t offset; 656 handle_t *handle; 657 bool extend = false; 658 struct inode *inode = file_inode(iocb->ki_filp); 659 660 if (iocb->ki_flags & IOCB_NOWAIT) { 661 if (!inode_trylock(inode)) 662 return -EAGAIN; 663 } else { 664 inode_lock(inode); 665 } 666 667 ret = ext4_write_checks(iocb, from); 668 if (ret <= 0) 669 goto out; 670 671 offset = iocb->ki_pos; 672 count = iov_iter_count(from); 673 674 if (offset + count > EXT4_I(inode)->i_disksize) { 675 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 676 if (IS_ERR(handle)) { 677 ret = PTR_ERR(handle); 678 goto out; 679 } 680 681 ret = ext4_orphan_add(handle, inode); 682 if (ret) { 683 ext4_journal_stop(handle); 684 goto out; 685 } 686 687 extend = true; 688 ext4_journal_stop(handle); 689 } 690 691 ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops); 692 693 if (extend) { 694 ret = ext4_handle_inode_extension(inode, offset, ret, count); 695 ext4_inode_extension_cleanup(inode, ret < (ssize_t)count); 696 } 697 out: 698 inode_unlock(inode); 699 if (ret > 0) 700 ret = generic_write_sync(iocb, ret); 701 return ret; 702 } 703 #endif 704 705 static ssize_t 706 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 707 { 708 int ret; 709 struct inode *inode = file_inode(iocb->ki_filp); 710 711 ret = ext4_emergency_state(inode->i_sb); 712 if (unlikely(ret)) 713 return ret; 714 715 #ifdef CONFIG_FS_DAX 716 if (IS_DAX(inode)) 717 return ext4_dax_write_iter(iocb, from); 718 #endif 719 720 if (iocb->ki_flags & IOCB_ATOMIC) { 721 size_t len = iov_iter_count(from); 722 723 if (len < EXT4_SB(inode->i_sb)->s_awu_min || 724 len > EXT4_SB(inode->i_sb)->s_awu_max) 725 return -EINVAL; 726 727 ret = generic_atomic_write_valid(iocb, from); 728 if (ret) 729 return ret; 730 } 731 732 if (iocb->ki_flags & IOCB_DIRECT) 733 return ext4_dio_write_iter(iocb, from); 734 else 735 return ext4_buffered_write_iter(iocb, from); 736 } 737 738 #ifdef CONFIG_FS_DAX 739 static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf, unsigned int order) 740 { 741 int error = 0; 742 vm_fault_t result; 743 int retries = 0; 744 handle_t *handle = NULL; 745 struct inode *inode = file_inode(vmf->vma->vm_file); 746 struct super_block *sb = inode->i_sb; 747 748 /* 749 * We have to distinguish real writes from writes which will result in a 750 * COW page; COW writes should *not* poke the journal (the file will not 751 * be changed). Doing so would cause unintended failures when mounted 752 * read-only. 753 * 754 * We check for VM_SHARED rather than vmf->cow_page since the latter is 755 * unset for order != 0 (i.e. only in do_cow_fault); for 756 * other sizes, dax_iomap_fault will handle splitting / fallback so that 757 * we eventually come back with a COW page. 758 */ 759 bool write = (vmf->flags & FAULT_FLAG_WRITE) && 760 (vmf->vma->vm_flags & VM_SHARED); 761 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 762 unsigned long pfn; 763 764 if (write) { 765 sb_start_pagefault(sb); 766 file_update_time(vmf->vma->vm_file); 767 filemap_invalidate_lock_shared(mapping); 768 retry: 769 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, 770 EXT4_DATA_TRANS_BLOCKS(sb)); 771 if (IS_ERR(handle)) { 772 filemap_invalidate_unlock_shared(mapping); 773 sb_end_pagefault(sb); 774 return VM_FAULT_SIGBUS; 775 } 776 } else { 777 filemap_invalidate_lock_shared(mapping); 778 } 779 result = dax_iomap_fault(vmf, order, &pfn, &error, &ext4_iomap_ops); 780 if (write) { 781 ext4_journal_stop(handle); 782 783 if ((result & VM_FAULT_ERROR) && error == -ENOSPC && 784 ext4_should_retry_alloc(sb, &retries)) 785 goto retry; 786 /* Handling synchronous page fault? */ 787 if (result & VM_FAULT_NEEDDSYNC) 788 result = dax_finish_sync_fault(vmf, order, pfn); 789 filemap_invalidate_unlock_shared(mapping); 790 sb_end_pagefault(sb); 791 } else { 792 filemap_invalidate_unlock_shared(mapping); 793 } 794 795 return result; 796 } 797 798 static vm_fault_t ext4_dax_fault(struct vm_fault *vmf) 799 { 800 return ext4_dax_huge_fault(vmf, 0); 801 } 802 803 static const struct vm_operations_struct ext4_dax_vm_ops = { 804 .fault = ext4_dax_fault, 805 .huge_fault = ext4_dax_huge_fault, 806 .page_mkwrite = ext4_dax_fault, 807 .pfn_mkwrite = ext4_dax_fault, 808 }; 809 #else 810 #define ext4_dax_vm_ops ext4_file_vm_ops 811 #endif 812 813 static const struct vm_operations_struct ext4_file_vm_ops = { 814 .fault = filemap_fault, 815 .map_pages = filemap_map_pages, 816 .page_mkwrite = ext4_page_mkwrite, 817 }; 818 819 static int ext4_file_mmap_prepare(struct vm_area_desc *desc) 820 { 821 int ret; 822 struct file *file = desc->file; 823 struct inode *inode = file->f_mapping->host; 824 struct dax_device *dax_dev = EXT4_SB(inode->i_sb)->s_daxdev; 825 826 if (file->f_mode & FMODE_WRITE) 827 ret = ext4_emergency_state(inode->i_sb); 828 else 829 ret = ext4_forced_shutdown(inode->i_sb) ? -EIO : 0; 830 if (unlikely(ret)) 831 return ret; 832 833 /* 834 * We don't support synchronous mappings for non-DAX files and 835 * for DAX files if underneath dax_device is not synchronous. 836 */ 837 if (!daxdev_mapping_supported(desc, file_inode(file), dax_dev)) 838 return -EOPNOTSUPP; 839 840 file_accessed(file); 841 if (IS_DAX(file_inode(file))) { 842 desc->vm_ops = &ext4_dax_vm_ops; 843 vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT); 844 } else { 845 desc->vm_ops = &ext4_file_vm_ops; 846 } 847 return 0; 848 } 849 850 static int ext4_sample_last_mounted(struct super_block *sb, 851 struct vfsmount *mnt) 852 { 853 struct ext4_sb_info *sbi = EXT4_SB(sb); 854 struct path path; 855 char buf[64], *cp; 856 handle_t *handle; 857 int err; 858 859 if (likely(ext4_test_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED))) 860 return 0; 861 862 if (ext4_emergency_state(sb) || sb_rdonly(sb) || 863 !sb_start_intwrite_trylock(sb)) 864 return 0; 865 866 ext4_set_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED); 867 /* 868 * Sample where the filesystem has been mounted and 869 * store it in the superblock for sysadmin convenience 870 * when trying to sort through large numbers of block 871 * devices or filesystem images. 872 */ 873 path.mnt = mnt; 874 path.dentry = mnt->mnt_root; 875 cp = d_path(&path, buf, sizeof(buf)); 876 err = 0; 877 if (IS_ERR(cp)) 878 goto out; 879 880 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1); 881 err = PTR_ERR(handle); 882 if (IS_ERR(handle)) 883 goto out; 884 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); 885 err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, 886 EXT4_JTR_NONE); 887 if (err) 888 goto out_journal; 889 lock_buffer(sbi->s_sbh); 890 strtomem_pad(sbi->s_es->s_last_mounted, cp, 0); 891 ext4_superblock_csum_set(sb); 892 unlock_buffer(sbi->s_sbh); 893 ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); 894 out_journal: 895 ext4_journal_stop(handle); 896 out: 897 sb_end_intwrite(sb); 898 return err; 899 } 900 901 static int ext4_file_open(struct inode *inode, struct file *filp) 902 { 903 int ret; 904 905 if (filp->f_mode & FMODE_WRITE) 906 ret = ext4_emergency_state(inode->i_sb); 907 else 908 ret = ext4_forced_shutdown(inode->i_sb) ? -EIO : 0; 909 if (unlikely(ret)) 910 return ret; 911 912 ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt); 913 if (ret) 914 return ret; 915 916 ret = fscrypt_file_open(inode, filp); 917 if (ret) 918 return ret; 919 920 ret = fsverity_file_open(inode, filp); 921 if (ret) 922 return ret; 923 924 /* 925 * Set up the jbd2_inode if we are opening the inode for 926 * writing and the journal is present 927 */ 928 if (filp->f_mode & FMODE_WRITE) { 929 ret = ext4_inode_attach_jinode(inode); 930 if (ret < 0) 931 return ret; 932 } 933 934 if (ext4_inode_can_atomic_write(inode)) 935 filp->f_mode |= FMODE_CAN_ATOMIC_WRITE; 936 937 filp->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT; 938 return dquot_file_open(inode, filp); 939 } 940 941 /* 942 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values 943 * by calling generic_file_llseek_size() with the appropriate maxbytes 944 * value for each. 945 */ 946 loff_t ext4_llseek(struct file *file, loff_t offset, int whence) 947 { 948 struct inode *inode = file->f_mapping->host; 949 loff_t maxbytes = ext4_get_maxbytes(inode); 950 951 switch (whence) { 952 default: 953 return generic_file_llseek_size(file, offset, whence, 954 maxbytes, i_size_read(inode)); 955 case SEEK_HOLE: 956 inode_lock_shared(inode); 957 offset = iomap_seek_hole(inode, offset, 958 &ext4_iomap_report_ops); 959 inode_unlock_shared(inode); 960 break; 961 case SEEK_DATA: 962 inode_lock_shared(inode); 963 offset = iomap_seek_data(inode, offset, 964 &ext4_iomap_report_ops); 965 inode_unlock_shared(inode); 966 break; 967 } 968 969 if (offset < 0) 970 return offset; 971 return vfs_setpos(file, offset, maxbytes); 972 } 973 974 const struct file_operations ext4_file_operations = { 975 .llseek = ext4_llseek, 976 .read_iter = ext4_file_read_iter, 977 .write_iter = ext4_file_write_iter, 978 .iopoll = iocb_bio_iopoll, 979 .unlocked_ioctl = ext4_ioctl, 980 #ifdef CONFIG_COMPAT 981 .compat_ioctl = ext4_compat_ioctl, 982 #endif 983 .mmap_prepare = ext4_file_mmap_prepare, 984 .open = ext4_file_open, 985 .release = ext4_release_file, 986 .fsync = ext4_sync_file, 987 .get_unmapped_area = thp_get_unmapped_area, 988 .splice_read = ext4_file_splice_read, 989 .splice_write = iter_file_splice_write, 990 .fallocate = ext4_fallocate, 991 .fop_flags = FOP_MMAP_SYNC | FOP_BUFFER_RASYNC | 992 FOP_DIO_PARALLEL_WRITE | 993 FOP_DONTCACHE, 994 .setlease = generic_setlease, 995 }; 996 997 const struct inode_operations ext4_file_inode_operations = { 998 .setattr = ext4_setattr, 999 .getattr = ext4_file_getattr, 1000 .listxattr = ext4_listxattr, 1001 .get_inode_acl = ext4_get_acl, 1002 .set_acl = ext4_set_acl, 1003 .fiemap = ext4_fiemap, 1004 .fileattr_get = ext4_fileattr_get, 1005 .fileattr_set = ext4_fileattr_set, 1006 }; 1007 1008