1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. 5 * 6 * Regular file handling primitives for NTFS-based filesystems. 7 * 8 */ 9 10 #include <linux/backing-dev.h> 11 #include <linux/blkdev.h> 12 #include <linux/buffer_head.h> 13 #include <linux/compat.h> 14 #include <linux/falloc.h> 15 #include <linux/fiemap.h> 16 #include <linux/fileattr.h> 17 18 #include "debug.h" 19 #include "ntfs.h" 20 #include "ntfs_fs.h" 21 22 static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg) 23 { 24 struct fstrim_range __user *user_range; 25 struct fstrim_range range; 26 struct block_device *dev; 27 int err; 28 29 if (!capable(CAP_SYS_ADMIN)) 30 return -EPERM; 31 32 dev = sbi->sb->s_bdev; 33 if (!bdev_max_discard_sectors(dev)) 34 return -EOPNOTSUPP; 35 36 user_range = (struct fstrim_range __user *)arg; 37 if (copy_from_user(&range, user_range, sizeof(range))) 38 return -EFAULT; 39 40 range.minlen = max_t(u32, range.minlen, bdev_discard_granularity(dev)); 41 42 err = ntfs_trim_fs(sbi, &range); 43 if (err < 0) 44 return err; 45 46 if (copy_to_user(user_range, &range, sizeof(range))) 47 return -EFAULT; 48 49 return 0; 50 } 51 52 /* 53 * ntfs_ioctl - file_operations::unlocked_ioctl 54 */ 55 long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg) 56 { 57 struct inode *inode = file_inode(filp); 58 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 59 60 /* Avoid any operation if inode is bad. */ 61 if (unlikely(is_bad_ni(ntfs_i(inode)))) 62 return -EINVAL; 63 64 switch (cmd) { 65 case FITRIM: 66 return ntfs_ioctl_fitrim(sbi, arg); 67 } 68 return -ENOTTY; /* Inappropriate ioctl for device. */ 69 } 70 71 #ifdef CONFIG_COMPAT 72 long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg) 73 74 { 75 return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); 76 } 77 #endif 78 79 /* 80 * ntfs_getattr - inode_operations::getattr 81 */ 82 int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path, 83 struct kstat *stat, u32 request_mask, u32 flags) 84 { 85 struct inode *inode = d_inode(path->dentry); 86 struct ntfs_inode *ni = ntfs_i(inode); 87 88 /* Avoid any operation if inode is bad. */ 89 if (unlikely(is_bad_ni(ni))) 90 return -EINVAL; 91 92 stat->result_mask |= STATX_BTIME; 93 stat->btime = ni->i_crtime; 94 stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */ 95 96 if (inode->i_flags & S_IMMUTABLE) 97 stat->attributes |= STATX_ATTR_IMMUTABLE; 98 99 if (inode->i_flags & S_APPEND) 100 stat->attributes |= STATX_ATTR_APPEND; 101 102 if (is_compressed(ni)) 103 stat->attributes |= STATX_ATTR_COMPRESSED; 104 105 if (is_encrypted(ni)) 106 stat->attributes |= STATX_ATTR_ENCRYPTED; 107 108 stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED | 109 STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND; 110 111 generic_fillattr(idmap, request_mask, inode, stat); 112 113 return 0; 114 } 115 116 static int ntfs_extend_initialized_size(struct file *file, 117 struct ntfs_inode *ni, 118 const loff_t valid, 119 const loff_t new_valid) 120 { 121 struct inode *inode = &ni->vfs_inode; 122 struct address_space *mapping = inode->i_mapping; 123 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 124 loff_t pos = valid; 125 int err; 126 127 if (valid >= new_valid) 128 return 0; 129 130 if (is_resident(ni)) { 131 ni->i_valid = new_valid; 132 return 0; 133 } 134 135 WARN_ON(is_compressed(ni)); 136 137 for (;;) { 138 u32 zerofrom, len; 139 struct folio *folio; 140 u8 bits; 141 CLST vcn, lcn, clen; 142 143 if (is_sparsed(ni)) { 144 bits = sbi->cluster_bits; 145 vcn = pos >> bits; 146 147 err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL, 148 false); 149 if (err) 150 goto out; 151 152 if (lcn == SPARSE_LCN) { 153 pos = ((loff_t)clen + vcn) << bits; 154 ni->i_valid = pos; 155 goto next; 156 } 157 } 158 159 zerofrom = pos & (PAGE_SIZE - 1); 160 len = PAGE_SIZE - zerofrom; 161 162 if (pos + len > new_valid) 163 len = new_valid - pos; 164 165 err = ntfs_write_begin(NULL, mapping, pos, len, &folio, NULL); 166 if (err) 167 goto out; 168 169 folio_zero_range(folio, zerofrom, folio_size(folio) - zerofrom); 170 171 err = ntfs_write_end(NULL, mapping, pos, len, len, folio, NULL); 172 if (err < 0) 173 goto out; 174 pos += len; 175 176 next: 177 if (pos >= new_valid) 178 break; 179 180 balance_dirty_pages_ratelimited(mapping); 181 cond_resched(); 182 } 183 184 return 0; 185 186 out: 187 ni->i_valid = valid; 188 ntfs_inode_warn(inode, "failed to extend initialized size to %llx.", 189 new_valid); 190 return err; 191 } 192 193 /* 194 * ntfs_zero_range - Helper function for punch_hole. 195 * 196 * It zeroes a range [vbo, vbo_to). 197 */ 198 static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to) 199 { 200 int err = 0; 201 struct address_space *mapping = inode->i_mapping; 202 u32 blocksize = i_blocksize(inode); 203 pgoff_t idx = vbo >> PAGE_SHIFT; 204 u32 from = vbo & (PAGE_SIZE - 1); 205 pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT; 206 loff_t page_off; 207 struct buffer_head *head, *bh; 208 u32 bh_next, bh_off, to; 209 sector_t iblock; 210 struct folio *folio; 211 bool dirty = false; 212 213 for (; idx < idx_end; idx += 1, from = 0) { 214 page_off = (loff_t)idx << PAGE_SHIFT; 215 to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) : 216 PAGE_SIZE; 217 iblock = page_off >> inode->i_blkbits; 218 219 folio = __filemap_get_folio( 220 mapping, idx, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 221 mapping_gfp_constraint(mapping, ~__GFP_FS)); 222 if (IS_ERR(folio)) 223 return PTR_ERR(folio); 224 225 head = folio_buffers(folio); 226 if (!head) 227 head = create_empty_buffers(folio, blocksize, 0); 228 229 bh = head; 230 bh_off = 0; 231 do { 232 bh_next = bh_off + blocksize; 233 234 if (bh_next <= from || bh_off >= to) 235 continue; 236 237 if (!buffer_mapped(bh)) { 238 ntfs_get_block(inode, iblock, bh, 0); 239 /* Unmapped? It's a hole - nothing to do. */ 240 if (!buffer_mapped(bh)) 241 continue; 242 } 243 244 /* Ok, it's mapped. Make sure it's up-to-date. */ 245 if (folio_test_uptodate(folio)) 246 set_buffer_uptodate(bh); 247 else if (bh_read(bh, 0) < 0) { 248 err = -EIO; 249 folio_unlock(folio); 250 folio_put(folio); 251 goto out; 252 } 253 254 mark_buffer_dirty(bh); 255 } while (bh_off = bh_next, iblock += 1, 256 head != (bh = bh->b_this_page)); 257 258 folio_zero_segment(folio, from, to); 259 dirty = true; 260 261 folio_unlock(folio); 262 folio_put(folio); 263 cond_resched(); 264 } 265 out: 266 if (dirty) 267 mark_inode_dirty(inode); 268 return err; 269 } 270 271 /* 272 * ntfs_file_mmap_prepare - file_operations::mmap_prepare 273 */ 274 static int ntfs_file_mmap_prepare(struct vm_area_desc *desc) 275 { 276 struct file *file = desc->file; 277 struct inode *inode = file_inode(file); 278 struct ntfs_inode *ni = ntfs_i(inode); 279 u64 from = ((u64)desc->pgoff << PAGE_SHIFT); 280 bool rw = desc->vm_flags & VM_WRITE; 281 int err; 282 283 /* Avoid any operation if inode is bad. */ 284 if (unlikely(is_bad_ni(ni))) 285 return -EINVAL; 286 287 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 288 return -EIO; 289 290 if (is_encrypted(ni)) { 291 ntfs_inode_warn(inode, "mmap encrypted not supported"); 292 return -EOPNOTSUPP; 293 } 294 295 if (is_dedup(ni)) { 296 ntfs_inode_warn(inode, "mmap deduplicated not supported"); 297 return -EOPNOTSUPP; 298 } 299 300 if (is_compressed(ni) && rw) { 301 ntfs_inode_warn(inode, "mmap(write) compressed not supported"); 302 return -EOPNOTSUPP; 303 } 304 305 if (rw) { 306 u64 to = min_t(loff_t, i_size_read(inode), 307 from + desc->end - desc->start); 308 309 if (is_sparsed(ni)) { 310 /* Allocate clusters for rw map. */ 311 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 312 CLST lcn, len; 313 CLST vcn = from >> sbi->cluster_bits; 314 CLST end = bytes_to_cluster(sbi, to); 315 bool new; 316 317 for (; vcn < end; vcn += len) { 318 err = attr_data_get_block(ni, vcn, 1, &lcn, 319 &len, &new, true); 320 if (err) 321 goto out; 322 } 323 } 324 325 if (ni->i_valid < to) { 326 if (!inode_trylock(inode)) { 327 err = -EAGAIN; 328 goto out; 329 } 330 err = ntfs_extend_initialized_size(file, ni, 331 ni->i_valid, to); 332 inode_unlock(inode); 333 if (err) 334 goto out; 335 } 336 } 337 338 err = generic_file_mmap_prepare(desc); 339 out: 340 return err; 341 } 342 343 static int ntfs_extend(struct inode *inode, loff_t pos, size_t count, 344 struct file *file) 345 { 346 struct ntfs_inode *ni = ntfs_i(inode); 347 struct address_space *mapping = inode->i_mapping; 348 loff_t end = pos + count; 349 bool extend_init = file && pos > ni->i_valid; 350 int err; 351 352 if (end <= inode->i_size && !extend_init) 353 return 0; 354 355 /* Mark rw ntfs as dirty. It will be cleared at umount. */ 356 ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY); 357 358 if (end > inode->i_size) { 359 err = ntfs_set_size(inode, end); 360 if (err) 361 goto out; 362 } 363 364 if (extend_init && !is_compressed(ni)) { 365 err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos); 366 if (err) 367 goto out; 368 } else { 369 err = 0; 370 } 371 372 if (file && is_sparsed(ni)) { 373 /* 374 * This code optimizes large writes to sparse file. 375 * TODO: merge this fragment with fallocate fragment. 376 */ 377 struct ntfs_sb_info *sbi = ni->mi.sbi; 378 CLST vcn = pos >> sbi->cluster_bits; 379 CLST cend = bytes_to_cluster(sbi, end); 380 CLST cend_v = bytes_to_cluster(sbi, ni->i_valid); 381 CLST lcn, clen; 382 bool new; 383 384 if (cend_v > cend) 385 cend_v = cend; 386 387 /* 388 * Allocate and zero new clusters. 389 * Zeroing these clusters may be too long. 390 */ 391 for (; vcn < cend_v; vcn += clen) { 392 err = attr_data_get_block(ni, vcn, cend_v - vcn, &lcn, 393 &clen, &new, true); 394 if (err) 395 goto out; 396 } 397 /* 398 * Allocate but not zero new clusters. 399 */ 400 for (; vcn < cend; vcn += clen) { 401 err = attr_data_get_block(ni, vcn, cend - vcn, &lcn, 402 &clen, &new, false); 403 if (err) 404 goto out; 405 } 406 } 407 408 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 409 mark_inode_dirty(inode); 410 411 if (IS_SYNC(inode)) { 412 int err2; 413 414 err = filemap_fdatawrite_range(mapping, pos, end - 1); 415 err2 = sync_mapping_buffers(mapping); 416 if (!err) 417 err = err2; 418 err2 = write_inode_now(inode, 1); 419 if (!err) 420 err = err2; 421 if (!err) 422 err = filemap_fdatawait_range(mapping, pos, end - 1); 423 } 424 425 out: 426 return err; 427 } 428 429 static int ntfs_truncate(struct inode *inode, loff_t new_size) 430 { 431 struct super_block *sb = inode->i_sb; 432 struct ntfs_inode *ni = ntfs_i(inode); 433 int err, dirty = 0; 434 u64 new_valid; 435 436 if (!S_ISREG(inode->i_mode)) 437 return 0; 438 439 if (is_compressed(ni)) { 440 if (ni->i_valid > new_size) 441 ni->i_valid = new_size; 442 } else { 443 err = block_truncate_page(inode->i_mapping, new_size, 444 ntfs_get_block); 445 if (err) 446 return err; 447 } 448 449 new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size)); 450 451 truncate_setsize(inode, new_size); 452 453 ni_lock(ni); 454 455 down_write(&ni->file.run_lock); 456 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size, 457 &new_valid, ni->mi.sbi->options->prealloc, NULL); 458 up_write(&ni->file.run_lock); 459 460 if (new_valid < ni->i_valid) 461 ni->i_valid = new_valid; 462 463 ni_unlock(ni); 464 465 ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE; 466 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 467 if (!IS_DIRSYNC(inode)) { 468 dirty = 1; 469 } else { 470 err = ntfs_sync_inode(inode); 471 if (err) 472 return err; 473 } 474 475 if (dirty) 476 mark_inode_dirty(inode); 477 478 /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/ 479 480 return 0; 481 } 482 483 /* 484 * ntfs_fallocate - file_operations::ntfs_fallocate 485 * 486 * Preallocate space for a file. This implements ntfs's fallocate file 487 * operation, which gets called from sys_fallocate system call. User 488 * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set 489 * we just allocate clusters without zeroing them out. Otherwise we 490 * allocate and zero out clusters via an expanding truncate. 491 */ 492 static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) 493 { 494 struct inode *inode = file_inode(file); 495 struct address_space *mapping = inode->i_mapping; 496 struct super_block *sb = inode->i_sb; 497 struct ntfs_sb_info *sbi = sb->s_fs_info; 498 struct ntfs_inode *ni = ntfs_i(inode); 499 loff_t end = vbo + len; 500 loff_t vbo_down = round_down(vbo, max_t(unsigned long, 501 sbi->cluster_size, PAGE_SIZE)); 502 bool is_supported_holes = is_sparsed(ni) || is_compressed(ni); 503 loff_t i_size, new_size; 504 bool map_locked; 505 int err; 506 507 /* No support for dir. */ 508 if (!S_ISREG(inode->i_mode)) 509 return -EOPNOTSUPP; 510 511 /* 512 * vfs_fallocate checks all possible combinations of mode. 513 * Do additional checks here before ntfs_set_state(dirty). 514 */ 515 if (mode & FALLOC_FL_PUNCH_HOLE) { 516 if (!is_supported_holes) 517 return -EOPNOTSUPP; 518 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 519 } else if (mode & FALLOC_FL_INSERT_RANGE) { 520 if (!is_supported_holes) 521 return -EOPNOTSUPP; 522 } else if (mode & 523 ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 524 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)) { 525 ntfs_inode_warn(inode, "fallocate(0x%x) is not supported", 526 mode); 527 return -EOPNOTSUPP; 528 } 529 530 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY); 531 532 inode_lock(inode); 533 i_size = inode->i_size; 534 new_size = max(end, i_size); 535 map_locked = false; 536 537 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 538 /* Should never be here, see ntfs_file_open. */ 539 err = -EOPNOTSUPP; 540 goto out; 541 } 542 543 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | 544 FALLOC_FL_INSERT_RANGE)) { 545 inode_dio_wait(inode); 546 filemap_invalidate_lock(mapping); 547 map_locked = true; 548 } 549 550 if (mode & FALLOC_FL_PUNCH_HOLE) { 551 u32 frame_size; 552 loff_t mask, vbo_a, end_a, tmp; 553 554 err = filemap_write_and_wait_range(mapping, vbo_down, 555 LLONG_MAX); 556 if (err) 557 goto out; 558 559 truncate_pagecache(inode, vbo_down); 560 561 ni_lock(ni); 562 err = attr_punch_hole(ni, vbo, len, &frame_size); 563 ni_unlock(ni); 564 if (!err) 565 goto ok; 566 567 if (err != E_NTFS_NOTALIGNED) 568 goto out; 569 570 /* Process not aligned punch. */ 571 err = 0; 572 mask = frame_size - 1; 573 vbo_a = (vbo + mask) & ~mask; 574 end_a = end & ~mask; 575 576 tmp = min(vbo_a, end); 577 if (tmp > vbo) { 578 err = ntfs_zero_range(inode, vbo, tmp); 579 if (err) 580 goto out; 581 } 582 583 if (vbo < end_a && end_a < end) { 584 err = ntfs_zero_range(inode, end_a, end); 585 if (err) 586 goto out; 587 } 588 589 /* Aligned punch_hole */ 590 if (end_a > vbo_a) { 591 ni_lock(ni); 592 err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL); 593 ni_unlock(ni); 594 if (err) 595 goto out; 596 } 597 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 598 /* 599 * Write tail of the last page before removed range since 600 * it will get removed from the page cache below. 601 */ 602 err = filemap_write_and_wait_range(mapping, vbo_down, vbo); 603 if (err) 604 goto out; 605 606 /* 607 * Write data that will be shifted to preserve them 608 * when discarding page cache below. 609 */ 610 err = filemap_write_and_wait_range(mapping, end, LLONG_MAX); 611 if (err) 612 goto out; 613 614 truncate_pagecache(inode, vbo_down); 615 616 ni_lock(ni); 617 err = attr_collapse_range(ni, vbo, len); 618 ni_unlock(ni); 619 if (err) 620 goto out; 621 } else if (mode & FALLOC_FL_INSERT_RANGE) { 622 /* Check new size. */ 623 err = inode_newsize_ok(inode, new_size); 624 if (err) 625 goto out; 626 627 /* Write out all dirty pages. */ 628 err = filemap_write_and_wait_range(mapping, vbo_down, 629 LLONG_MAX); 630 if (err) 631 goto out; 632 truncate_pagecache(inode, vbo_down); 633 634 ni_lock(ni); 635 err = attr_insert_range(ni, vbo, len); 636 ni_unlock(ni); 637 if (err) 638 goto out; 639 } else { 640 /* Check new size. */ 641 u8 cluster_bits = sbi->cluster_bits; 642 643 /* Be sure file is non resident. */ 644 if (is_resident(ni)) { 645 ni_lock(ni); 646 err = attr_force_nonresident(ni); 647 ni_unlock(ni); 648 if (err) 649 goto out; 650 } 651 652 /* generic/213: expected -ENOSPC instead of -EFBIG. */ 653 if (!is_supported_holes) { 654 loff_t to_alloc = new_size - inode_get_bytes(inode); 655 656 if (to_alloc > 0 && 657 (to_alloc >> cluster_bits) > 658 wnd_zeroes(&sbi->used.bitmap)) { 659 err = -ENOSPC; 660 goto out; 661 } 662 } 663 664 err = inode_newsize_ok(inode, new_size); 665 if (err) 666 goto out; 667 668 if (new_size > i_size) { 669 /* 670 * Allocate clusters, do not change 'valid' size. 671 */ 672 err = ntfs_set_size(inode, new_size); 673 if (err) 674 goto out; 675 } 676 677 if (is_supported_holes) { 678 CLST vcn = vbo >> cluster_bits; 679 CLST cend = bytes_to_cluster(sbi, end); 680 CLST cend_v = bytes_to_cluster(sbi, ni->i_valid); 681 CLST lcn, clen; 682 bool new; 683 684 if (cend_v > cend) 685 cend_v = cend; 686 687 /* 688 * Allocate and zero new clusters. 689 * Zeroing these clusters may be too long. 690 */ 691 for (; vcn < cend_v; vcn += clen) { 692 err = attr_data_get_block(ni, vcn, cend_v - vcn, 693 &lcn, &clen, &new, 694 true); 695 if (err) 696 goto out; 697 } 698 /* 699 * Allocate but not zero new clusters. 700 */ 701 for (; vcn < cend; vcn += clen) { 702 err = attr_data_get_block(ni, vcn, cend - vcn, 703 &lcn, &clen, &new, 704 false); 705 if (err) 706 goto out; 707 } 708 } 709 710 if (mode & FALLOC_FL_KEEP_SIZE) { 711 ni_lock(ni); 712 /* True - Keep preallocated. */ 713 err = attr_set_size(ni, ATTR_DATA, NULL, 0, 714 &ni->file.run, i_size, &ni->i_valid, 715 true, NULL); 716 ni_unlock(ni); 717 if (err) 718 goto out; 719 } else if (new_size > i_size) { 720 i_size_write(inode, new_size); 721 } 722 } 723 724 ok: 725 err = file_modified(file); 726 if (err) 727 goto out; 728 729 out: 730 if (map_locked) 731 filemap_invalidate_unlock(mapping); 732 733 if (!err) { 734 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 735 mark_inode_dirty(inode); 736 } 737 738 inode_unlock(inode); 739 return err; 740 } 741 742 /* 743 * ntfs_setattr - inode_operations::setattr 744 */ 745 int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 746 struct iattr *attr) 747 { 748 struct inode *inode = d_inode(dentry); 749 struct ntfs_inode *ni = ntfs_i(inode); 750 u32 ia_valid = attr->ia_valid; 751 umode_t mode = inode->i_mode; 752 int err; 753 754 /* Avoid any operation if inode is bad. */ 755 if (unlikely(is_bad_ni(ni))) 756 return -EINVAL; 757 758 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 759 return -EIO; 760 761 err = setattr_prepare(idmap, dentry, attr); 762 if (err) 763 goto out; 764 765 if (ia_valid & ATTR_SIZE) { 766 loff_t newsize, oldsize; 767 768 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 769 /* Should never be here, see ntfs_file_open(). */ 770 err = -EOPNOTSUPP; 771 goto out; 772 } 773 inode_dio_wait(inode); 774 oldsize = i_size_read(inode); 775 newsize = attr->ia_size; 776 777 if (newsize <= oldsize) 778 err = ntfs_truncate(inode, newsize); 779 else 780 err = ntfs_extend(inode, newsize, 0, NULL); 781 782 if (err) 783 goto out; 784 785 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 786 i_size_write(inode, newsize); 787 } 788 789 setattr_copy(idmap, inode, attr); 790 791 if (mode != inode->i_mode) { 792 err = ntfs_acl_chmod(idmap, dentry); 793 if (err) 794 goto out; 795 796 /* Linux 'w' -> Windows 'ro'. */ 797 if (0222 & inode->i_mode) 798 ni->std_fa &= ~FILE_ATTRIBUTE_READONLY; 799 else 800 ni->std_fa |= FILE_ATTRIBUTE_READONLY; 801 } 802 803 if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE)) 804 ntfs_save_wsl_perm(inode, NULL); 805 mark_inode_dirty(inode); 806 out: 807 return err; 808 } 809 810 /* 811 * check_read_restriction: 812 * common code for ntfs_file_read_iter and ntfs_file_splice_read 813 */ 814 static int check_read_restriction(struct inode *inode) 815 { 816 struct ntfs_inode *ni = ntfs_i(inode); 817 818 /* Avoid any operation if inode is bad. */ 819 if (unlikely(is_bad_ni(ni))) 820 return -EINVAL; 821 822 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 823 return -EIO; 824 825 if (is_encrypted(ni)) { 826 ntfs_inode_warn(inode, "encrypted i/o not supported"); 827 return -EOPNOTSUPP; 828 } 829 830 #ifndef CONFIG_NTFS3_LZX_XPRESS 831 if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) { 832 ntfs_inode_warn( 833 inode, 834 "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files"); 835 return -EOPNOTSUPP; 836 } 837 #endif 838 839 if (is_dedup(ni)) { 840 ntfs_inode_warn(inode, "read deduplicated not supported"); 841 return -EOPNOTSUPP; 842 } 843 844 return 0; 845 } 846 847 /* 848 * ntfs_file_read_iter - file_operations::read_iter 849 */ 850 static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 851 { 852 struct file *file = iocb->ki_filp; 853 struct inode *inode = file_inode(file); 854 struct ntfs_inode *ni = ntfs_i(inode); 855 ssize_t err; 856 857 err = check_read_restriction(inode); 858 if (err) 859 return err; 860 861 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { 862 ntfs_inode_warn(inode, "direct i/o + compressed not supported"); 863 return -EOPNOTSUPP; 864 } 865 866 return generic_file_read_iter(iocb, iter); 867 } 868 869 /* 870 * ntfs_file_splice_read - file_operations::splice_read 871 */ 872 static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos, 873 struct pipe_inode_info *pipe, size_t len, 874 unsigned int flags) 875 { 876 struct inode *inode = file_inode(in); 877 ssize_t err; 878 879 err = check_read_restriction(inode); 880 if (err) 881 return err; 882 883 return filemap_splice_read(in, ppos, pipe, len, flags); 884 } 885 886 /* 887 * ntfs_get_frame_pages 888 * 889 * Return: Array of locked pages. 890 */ 891 static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index, 892 struct page **pages, u32 pages_per_frame, 893 bool *frame_uptodate) 894 { 895 gfp_t gfp_mask = mapping_gfp_mask(mapping); 896 u32 npages; 897 898 *frame_uptodate = true; 899 900 for (npages = 0; npages < pages_per_frame; npages++, index++) { 901 struct folio *folio; 902 903 folio = __filemap_get_folio(mapping, index, 904 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 905 gfp_mask); 906 if (IS_ERR(folio)) { 907 while (npages--) { 908 folio = page_folio(pages[npages]); 909 folio_unlock(folio); 910 folio_put(folio); 911 } 912 913 return -ENOMEM; 914 } 915 916 if (!folio_test_uptodate(folio)) 917 *frame_uptodate = false; 918 919 pages[npages] = &folio->page; 920 } 921 922 return 0; 923 } 924 925 /* 926 * ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files). 927 */ 928 static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) 929 { 930 int err; 931 struct file *file = iocb->ki_filp; 932 size_t count = iov_iter_count(from); 933 loff_t pos = iocb->ki_pos; 934 struct inode *inode = file_inode(file); 935 loff_t i_size = i_size_read(inode); 936 struct address_space *mapping = inode->i_mapping; 937 struct ntfs_inode *ni = ntfs_i(inode); 938 u64 valid = ni->i_valid; 939 struct ntfs_sb_info *sbi = ni->mi.sbi; 940 struct page **pages = NULL; 941 struct folio *folio; 942 size_t written = 0; 943 u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits; 944 u32 frame_size = 1u << frame_bits; 945 u32 pages_per_frame = frame_size >> PAGE_SHIFT; 946 u32 ip, off; 947 CLST frame; 948 u64 frame_vbo; 949 pgoff_t index; 950 bool frame_uptodate; 951 952 if (frame_size < PAGE_SIZE) { 953 /* 954 * frame_size == 8K if cluster 512 955 * frame_size == 64K if cluster 4096 956 */ 957 ntfs_inode_warn(inode, "page size is bigger than frame size"); 958 return -EOPNOTSUPP; 959 } 960 961 pages = kmalloc_array(pages_per_frame, sizeof(struct page *), GFP_NOFS); 962 if (!pages) 963 return -ENOMEM; 964 965 err = file_remove_privs(file); 966 if (err) 967 goto out; 968 969 err = file_update_time(file); 970 if (err) 971 goto out; 972 973 /* Zero range [valid : pos). */ 974 while (valid < pos) { 975 CLST lcn, clen; 976 977 frame = valid >> frame_bits; 978 frame_vbo = valid & ~(frame_size - 1); 979 off = valid & (frame_size - 1); 980 981 err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 1, &lcn, 982 &clen, NULL, false); 983 if (err) 984 goto out; 985 986 if (lcn == SPARSE_LCN) { 987 ni->i_valid = valid = 988 frame_vbo + ((u64)clen << sbi->cluster_bits); 989 continue; 990 } 991 992 /* Load full frame. */ 993 err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT, 994 pages, pages_per_frame, 995 &frame_uptodate); 996 if (err) 997 goto out; 998 999 if (!frame_uptodate && off) { 1000 err = ni_read_frame(ni, frame_vbo, pages, 1001 pages_per_frame); 1002 if (err) { 1003 for (ip = 0; ip < pages_per_frame; ip++) { 1004 folio = page_folio(pages[ip]); 1005 folio_unlock(folio); 1006 folio_put(folio); 1007 } 1008 goto out; 1009 } 1010 } 1011 1012 ip = off >> PAGE_SHIFT; 1013 off = offset_in_page(valid); 1014 for (; ip < pages_per_frame; ip++, off = 0) { 1015 folio = page_folio(pages[ip]); 1016 folio_zero_segment(folio, off, PAGE_SIZE); 1017 flush_dcache_folio(folio); 1018 folio_mark_uptodate(folio); 1019 } 1020 1021 ni_lock(ni); 1022 err = ni_write_frame(ni, pages, pages_per_frame); 1023 ni_unlock(ni); 1024 1025 for (ip = 0; ip < pages_per_frame; ip++) { 1026 folio = page_folio(pages[ip]); 1027 folio_mark_uptodate(folio); 1028 folio_unlock(folio); 1029 folio_put(folio); 1030 } 1031 1032 if (err) 1033 goto out; 1034 1035 ni->i_valid = valid = frame_vbo + frame_size; 1036 } 1037 1038 /* Copy user data [pos : pos + count). */ 1039 while (count) { 1040 size_t copied, bytes; 1041 1042 off = pos & (frame_size - 1); 1043 bytes = frame_size - off; 1044 if (bytes > count) 1045 bytes = count; 1046 1047 frame_vbo = pos & ~(frame_size - 1); 1048 index = frame_vbo >> PAGE_SHIFT; 1049 1050 if (unlikely(fault_in_iov_iter_readable(from, bytes))) { 1051 err = -EFAULT; 1052 goto out; 1053 } 1054 1055 /* Load full frame. */ 1056 err = ntfs_get_frame_pages(mapping, index, pages, 1057 pages_per_frame, &frame_uptodate); 1058 if (err) 1059 goto out; 1060 1061 if (!frame_uptodate) { 1062 loff_t to = pos + bytes; 1063 1064 if (off || (to < i_size && (to & (frame_size - 1)))) { 1065 err = ni_read_frame(ni, frame_vbo, pages, 1066 pages_per_frame); 1067 if (err) { 1068 for (ip = 0; ip < pages_per_frame; 1069 ip++) { 1070 folio = page_folio(pages[ip]); 1071 folio_unlock(folio); 1072 folio_put(folio); 1073 } 1074 goto out; 1075 } 1076 } 1077 } 1078 1079 WARN_ON(!bytes); 1080 copied = 0; 1081 ip = off >> PAGE_SHIFT; 1082 off = offset_in_page(pos); 1083 1084 /* Copy user data to pages. */ 1085 for (;;) { 1086 size_t cp, tail = PAGE_SIZE - off; 1087 1088 folio = page_folio(pages[ip]); 1089 cp = copy_folio_from_iter_atomic(folio, off, 1090 min(tail, bytes), from); 1091 flush_dcache_folio(folio); 1092 1093 copied += cp; 1094 bytes -= cp; 1095 if (!bytes || !cp) 1096 break; 1097 1098 if (cp < tail) { 1099 off += cp; 1100 } else { 1101 ip++; 1102 off = 0; 1103 } 1104 } 1105 1106 ni_lock(ni); 1107 err = ni_write_frame(ni, pages, pages_per_frame); 1108 ni_unlock(ni); 1109 1110 for (ip = 0; ip < pages_per_frame; ip++) { 1111 folio = page_folio(pages[ip]); 1112 folio_clear_dirty(folio); 1113 folio_mark_uptodate(folio); 1114 folio_unlock(folio); 1115 folio_put(folio); 1116 } 1117 1118 if (err) 1119 goto out; 1120 1121 /* 1122 * We can loop for a long time in here. Be nice and allow 1123 * us to schedule out to avoid softlocking if preempt 1124 * is disabled. 1125 */ 1126 cond_resched(); 1127 1128 pos += copied; 1129 written += copied; 1130 1131 count = iov_iter_count(from); 1132 } 1133 1134 out: 1135 kfree(pages); 1136 1137 if (err < 0) 1138 return err; 1139 1140 iocb->ki_pos += written; 1141 if (iocb->ki_pos > ni->i_valid) 1142 ni->i_valid = iocb->ki_pos; 1143 if (iocb->ki_pos > i_size) 1144 i_size_write(inode, iocb->ki_pos); 1145 1146 return written; 1147 } 1148 1149 /* 1150 * check_write_restriction: 1151 * common code for ntfs_file_write_iter and ntfs_file_splice_write 1152 */ 1153 static int check_write_restriction(struct inode *inode) 1154 { 1155 struct ntfs_inode *ni = ntfs_i(inode); 1156 1157 /* Avoid any operation if inode is bad. */ 1158 if (unlikely(is_bad_ni(ni))) 1159 return -EINVAL; 1160 1161 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 1162 return -EIO; 1163 1164 if (is_encrypted(ni)) { 1165 ntfs_inode_warn(inode, "encrypted i/o not supported"); 1166 return -EOPNOTSUPP; 1167 } 1168 1169 if (is_dedup(ni)) { 1170 ntfs_inode_warn(inode, "write into deduplicated not supported"); 1171 return -EOPNOTSUPP; 1172 } 1173 1174 return 0; 1175 } 1176 1177 /* 1178 * ntfs_file_write_iter - file_operations::write_iter 1179 */ 1180 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 1181 { 1182 struct file *file = iocb->ki_filp; 1183 struct inode *inode = file_inode(file); 1184 struct ntfs_inode *ni = ntfs_i(inode); 1185 ssize_t ret; 1186 int err; 1187 1188 if (!inode_trylock(inode)) { 1189 if (iocb->ki_flags & IOCB_NOWAIT) 1190 return -EAGAIN; 1191 inode_lock(inode); 1192 } 1193 1194 ret = check_write_restriction(inode); 1195 if (ret) 1196 goto out; 1197 1198 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { 1199 ntfs_inode_warn(inode, "direct i/o + compressed not supported"); 1200 ret = -EOPNOTSUPP; 1201 goto out; 1202 } 1203 1204 ret = generic_write_checks(iocb, from); 1205 if (ret <= 0) 1206 goto out; 1207 1208 err = file_modified(iocb->ki_filp); 1209 if (err) { 1210 ret = err; 1211 goto out; 1212 } 1213 1214 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 1215 /* Should never be here, see ntfs_file_open(). */ 1216 ret = -EOPNOTSUPP; 1217 goto out; 1218 } 1219 1220 ret = ntfs_extend(inode, iocb->ki_pos, ret, file); 1221 if (ret) 1222 goto out; 1223 1224 ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) : 1225 __generic_file_write_iter(iocb, from); 1226 1227 out: 1228 inode_unlock(inode); 1229 1230 if (ret > 0) 1231 ret = generic_write_sync(iocb, ret); 1232 1233 return ret; 1234 } 1235 1236 /* 1237 * ntfs_file_open - file_operations::open 1238 */ 1239 int ntfs_file_open(struct inode *inode, struct file *file) 1240 { 1241 struct ntfs_inode *ni = ntfs_i(inode); 1242 1243 /* Avoid any operation if inode is bad. */ 1244 if (unlikely(is_bad_ni(ni))) 1245 return -EINVAL; 1246 1247 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 1248 return -EIO; 1249 1250 if (unlikely((is_compressed(ni) || is_encrypted(ni)) && 1251 (file->f_flags & O_DIRECT))) { 1252 return -EOPNOTSUPP; 1253 } 1254 1255 /* Decompress "external compressed" file if opened for rw. */ 1256 if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) && 1257 (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) { 1258 #ifdef CONFIG_NTFS3_LZX_XPRESS 1259 int err = ni_decompress_file(ni); 1260 1261 if (err) 1262 return err; 1263 #else 1264 ntfs_inode_warn( 1265 inode, 1266 "activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files"); 1267 return -EOPNOTSUPP; 1268 #endif 1269 } 1270 1271 return generic_file_open(inode, file); 1272 } 1273 1274 /* 1275 * ntfs_file_release - file_operations::release 1276 */ 1277 static int ntfs_file_release(struct inode *inode, struct file *file) 1278 { 1279 struct ntfs_inode *ni = ntfs_i(inode); 1280 struct ntfs_sb_info *sbi = ni->mi.sbi; 1281 int err = 0; 1282 1283 /* If we are last writer on the inode, drop the block reservation. */ 1284 if (sbi->options->prealloc && 1285 ((file->f_mode & FMODE_WRITE) && 1286 atomic_read(&inode->i_writecount) == 1) 1287 /* 1288 * The only file when inode->i_fop = &ntfs_file_operations and 1289 * init_rwsem(&ni->file.run_lock) is not called explicitly is MFT. 1290 * 1291 * Add additional check here. 1292 */ 1293 && inode->i_ino != MFT_REC_MFT) { 1294 ni_lock(ni); 1295 down_write(&ni->file.run_lock); 1296 1297 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, 1298 i_size_read(inode), &ni->i_valid, false, 1299 NULL); 1300 1301 up_write(&ni->file.run_lock); 1302 ni_unlock(ni); 1303 } 1304 return err; 1305 } 1306 1307 /* 1308 * ntfs_fiemap - inode_operations::fiemap 1309 */ 1310 int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1311 __u64 start, __u64 len) 1312 { 1313 int err; 1314 struct ntfs_inode *ni = ntfs_i(inode); 1315 1316 /* Avoid any operation if inode is bad. */ 1317 if (unlikely(is_bad_ni(ni))) 1318 return -EINVAL; 1319 1320 err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR); 1321 if (err) 1322 return err; 1323 1324 ni_lock(ni); 1325 1326 err = ni_fiemap(ni, fieinfo, start, len); 1327 1328 ni_unlock(ni); 1329 1330 return err; 1331 } 1332 1333 /* 1334 * ntfs_file_splice_write - file_operations::splice_write 1335 */ 1336 static ssize_t ntfs_file_splice_write(struct pipe_inode_info *pipe, 1337 struct file *file, loff_t *ppos, 1338 size_t len, unsigned int flags) 1339 { 1340 ssize_t err; 1341 struct inode *inode = file_inode(file); 1342 1343 err = check_write_restriction(inode); 1344 if (err) 1345 return err; 1346 1347 return iter_file_splice_write(pipe, file, ppos, len, flags); 1348 } 1349 1350 // clang-format off 1351 const struct inode_operations ntfs_file_inode_operations = { 1352 .getattr = ntfs_getattr, 1353 .setattr = ntfs_setattr, 1354 .listxattr = ntfs_listxattr, 1355 .get_acl = ntfs_get_acl, 1356 .set_acl = ntfs_set_acl, 1357 .fiemap = ntfs_fiemap, 1358 }; 1359 1360 const struct file_operations ntfs_file_operations = { 1361 .llseek = generic_file_llseek, 1362 .read_iter = ntfs_file_read_iter, 1363 .write_iter = ntfs_file_write_iter, 1364 .unlocked_ioctl = ntfs_ioctl, 1365 #ifdef CONFIG_COMPAT 1366 .compat_ioctl = ntfs_compat_ioctl, 1367 #endif 1368 .splice_read = ntfs_file_splice_read, 1369 .splice_write = ntfs_file_splice_write, 1370 .mmap_prepare = ntfs_file_mmap_prepare, 1371 .open = ntfs_file_open, 1372 .fsync = generic_file_fsync, 1373 .fallocate = ntfs_fallocate, 1374 .release = ntfs_file_release, 1375 }; 1376 1377 #if IS_ENABLED(CONFIG_NTFS_FS) 1378 const struct file_operations ntfs_legacy_file_operations = { 1379 .llseek = generic_file_llseek, 1380 .read_iter = ntfs_file_read_iter, 1381 .splice_read = ntfs_file_splice_read, 1382 .open = ntfs_file_open, 1383 .release = ntfs_file_release, 1384 }; 1385 #endif 1386 // clang-format on 1387