1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. 5 * 6 * Regular file handling primitives for NTFS-based filesystems. 7 * 8 */ 9 10 #include <linux/backing-dev.h> 11 #include <linux/blkdev.h> 12 #include <linux/buffer_head.h> 13 #include <linux/compat.h> 14 #include <linux/falloc.h> 15 #include <linux/fiemap.h> 16 #include <linux/fileattr.h> 17 18 #include "debug.h" 19 #include "ntfs.h" 20 #include "ntfs_fs.h" 21 22 static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg) 23 { 24 struct fstrim_range __user *user_range; 25 struct fstrim_range range; 26 struct block_device *dev; 27 int err; 28 29 if (!capable(CAP_SYS_ADMIN)) 30 return -EPERM; 31 32 dev = sbi->sb->s_bdev; 33 if (!bdev_max_discard_sectors(dev)) 34 return -EOPNOTSUPP; 35 36 user_range = (struct fstrim_range __user *)arg; 37 if (copy_from_user(&range, user_range, sizeof(range))) 38 return -EFAULT; 39 40 range.minlen = max_t(u32, range.minlen, bdev_discard_granularity(dev)); 41 42 err = ntfs_trim_fs(sbi, &range); 43 if (err < 0) 44 return err; 45 46 if (copy_to_user(user_range, &range, sizeof(range))) 47 return -EFAULT; 48 49 return 0; 50 } 51 52 /* 53 * ntfs_fileattr_get - inode_operations::fileattr_get 54 */ 55 int ntfs_fileattr_get(struct dentry *dentry, struct fileattr *fa) 56 { 57 struct inode *inode = d_inode(dentry); 58 struct ntfs_inode *ni = ntfs_i(inode); 59 u32 flags = 0; 60 61 if (inode->i_flags & S_IMMUTABLE) 62 flags |= FS_IMMUTABLE_FL; 63 64 if (inode->i_flags & S_APPEND) 65 flags |= FS_APPEND_FL; 66 67 if (is_compressed(ni)) 68 flags |= FS_COMPR_FL; 69 70 if (is_encrypted(ni)) 71 flags |= FS_ENCRYPT_FL; 72 73 fileattr_fill_flags(fa, flags); 74 75 return 0; 76 } 77 78 /* 79 * ntfs_fileattr_set - inode_operations::fileattr_set 80 */ 81 int ntfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, 82 struct fileattr *fa) 83 { 84 struct inode *inode = d_inode(dentry); 85 u32 flags = fa->flags; 86 unsigned int new_fl = 0; 87 88 if (fileattr_has_fsx(fa)) 89 return -EOPNOTSUPP; 90 91 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL)) 92 return -EOPNOTSUPP; 93 94 if (flags & FS_IMMUTABLE_FL) 95 new_fl |= S_IMMUTABLE; 96 97 if (flags & FS_APPEND_FL) 98 new_fl |= S_APPEND; 99 100 inode_set_flags(inode, new_fl, S_IMMUTABLE | S_APPEND); 101 102 inode_set_ctime_current(inode); 103 mark_inode_dirty(inode); 104 105 return 0; 106 } 107 108 /* 109 * ntfs_ioctl - file_operations::unlocked_ioctl 110 */ 111 long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg) 112 { 113 struct inode *inode = file_inode(filp); 114 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 115 116 switch (cmd) { 117 case FITRIM: 118 return ntfs_ioctl_fitrim(sbi, arg); 119 } 120 return -ENOTTY; /* Inappropriate ioctl for device. */ 121 } 122 123 #ifdef CONFIG_COMPAT 124 long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg) 125 126 { 127 return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); 128 } 129 #endif 130 131 /* 132 * ntfs_getattr - inode_operations::getattr 133 */ 134 int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path, 135 struct kstat *stat, u32 request_mask, u32 flags) 136 { 137 struct inode *inode = d_inode(path->dentry); 138 struct ntfs_inode *ni = ntfs_i(inode); 139 140 stat->result_mask |= STATX_BTIME; 141 stat->btime = ni->i_crtime; 142 stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */ 143 144 if (inode->i_flags & S_IMMUTABLE) 145 stat->attributes |= STATX_ATTR_IMMUTABLE; 146 147 if (inode->i_flags & S_APPEND) 148 stat->attributes |= STATX_ATTR_APPEND; 149 150 if (is_compressed(ni)) 151 stat->attributes |= STATX_ATTR_COMPRESSED; 152 153 if (is_encrypted(ni)) 154 stat->attributes |= STATX_ATTR_ENCRYPTED; 155 156 stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED | 157 STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND; 158 159 generic_fillattr(idmap, request_mask, inode, stat); 160 161 return 0; 162 } 163 164 static int ntfs_extend_initialized_size(struct file *file, 165 struct ntfs_inode *ni, 166 const loff_t valid, 167 const loff_t new_valid) 168 { 169 struct inode *inode = &ni->vfs_inode; 170 struct address_space *mapping = inode->i_mapping; 171 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 172 loff_t pos = valid; 173 int err; 174 175 if (is_resident(ni)) { 176 ni->i_valid = new_valid; 177 return 0; 178 } 179 180 WARN_ON(is_compressed(ni)); 181 WARN_ON(valid >= new_valid); 182 183 for (;;) { 184 u32 zerofrom, len; 185 struct folio *folio; 186 u8 bits; 187 CLST vcn, lcn, clen; 188 189 if (is_sparsed(ni)) { 190 bits = sbi->cluster_bits; 191 vcn = pos >> bits; 192 193 err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL, 194 false); 195 if (err) 196 goto out; 197 198 if (lcn == SPARSE_LCN) { 199 pos = ((loff_t)clen + vcn) << bits; 200 ni->i_valid = pos; 201 goto next; 202 } 203 } 204 205 zerofrom = pos & (PAGE_SIZE - 1); 206 len = PAGE_SIZE - zerofrom; 207 208 if (pos + len > new_valid) 209 len = new_valid - pos; 210 211 err = ntfs_write_begin(file, mapping, pos, len, &folio, NULL); 212 if (err) 213 goto out; 214 215 folio_zero_range(folio, zerofrom, folio_size(folio)); 216 217 err = ntfs_write_end(file, mapping, pos, len, len, folio, NULL); 218 if (err < 0) 219 goto out; 220 pos += len; 221 222 next: 223 if (pos >= new_valid) 224 break; 225 226 balance_dirty_pages_ratelimited(mapping); 227 cond_resched(); 228 } 229 230 return 0; 231 232 out: 233 ni->i_valid = valid; 234 ntfs_inode_warn(inode, "failed to extend initialized size to %llx.", 235 new_valid); 236 return err; 237 } 238 239 /* 240 * ntfs_zero_range - Helper function for punch_hole. 241 * 242 * It zeroes a range [vbo, vbo_to). 243 */ 244 static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to) 245 { 246 int err = 0; 247 struct address_space *mapping = inode->i_mapping; 248 u32 blocksize = i_blocksize(inode); 249 pgoff_t idx = vbo >> PAGE_SHIFT; 250 u32 from = vbo & (PAGE_SIZE - 1); 251 pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT; 252 loff_t page_off; 253 struct buffer_head *head, *bh; 254 u32 bh_next, bh_off, to; 255 sector_t iblock; 256 struct folio *folio; 257 bool dirty = false; 258 259 for (; idx < idx_end; idx += 1, from = 0) { 260 page_off = (loff_t)idx << PAGE_SHIFT; 261 to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) : 262 PAGE_SIZE; 263 iblock = page_off >> inode->i_blkbits; 264 265 folio = __filemap_get_folio( 266 mapping, idx, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 267 mapping_gfp_constraint(mapping, ~__GFP_FS)); 268 if (IS_ERR(folio)) 269 return PTR_ERR(folio); 270 271 head = folio_buffers(folio); 272 if (!head) 273 head = create_empty_buffers(folio, blocksize, 0); 274 275 bh = head; 276 bh_off = 0; 277 do { 278 bh_next = bh_off + blocksize; 279 280 if (bh_next <= from || bh_off >= to) 281 continue; 282 283 if (!buffer_mapped(bh)) { 284 ntfs_get_block(inode, iblock, bh, 0); 285 /* Unmapped? It's a hole - nothing to do. */ 286 if (!buffer_mapped(bh)) 287 continue; 288 } 289 290 /* Ok, it's mapped. Make sure it's up-to-date. */ 291 if (folio_test_uptodate(folio)) 292 set_buffer_uptodate(bh); 293 else if (bh_read(bh, 0) < 0) { 294 err = -EIO; 295 folio_unlock(folio); 296 folio_put(folio); 297 goto out; 298 } 299 300 mark_buffer_dirty(bh); 301 } while (bh_off = bh_next, iblock += 1, 302 head != (bh = bh->b_this_page)); 303 304 folio_zero_segment(folio, from, to); 305 dirty = true; 306 307 folio_unlock(folio); 308 folio_put(folio); 309 cond_resched(); 310 } 311 out: 312 if (dirty) 313 mark_inode_dirty(inode); 314 return err; 315 } 316 317 /* 318 * ntfs_file_mmap - file_operations::mmap 319 */ 320 static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma) 321 { 322 struct inode *inode = file_inode(file); 323 struct ntfs_inode *ni = ntfs_i(inode); 324 u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT); 325 bool rw = vma->vm_flags & VM_WRITE; 326 int err; 327 328 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 329 return -EIO; 330 331 if (is_encrypted(ni)) { 332 ntfs_inode_warn(inode, "mmap encrypted not supported"); 333 return -EOPNOTSUPP; 334 } 335 336 if (is_dedup(ni)) { 337 ntfs_inode_warn(inode, "mmap deduplicated not supported"); 338 return -EOPNOTSUPP; 339 } 340 341 if (is_compressed(ni) && rw) { 342 ntfs_inode_warn(inode, "mmap(write) compressed not supported"); 343 return -EOPNOTSUPP; 344 } 345 346 if (rw) { 347 u64 to = min_t(loff_t, i_size_read(inode), 348 from + vma->vm_end - vma->vm_start); 349 350 if (is_sparsed(ni)) { 351 /* Allocate clusters for rw map. */ 352 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 353 CLST lcn, len; 354 CLST vcn = from >> sbi->cluster_bits; 355 CLST end = bytes_to_cluster(sbi, to); 356 bool new; 357 358 for (; vcn < end; vcn += len) { 359 err = attr_data_get_block(ni, vcn, 1, &lcn, 360 &len, &new, true); 361 if (err) 362 goto out; 363 } 364 } 365 366 if (ni->i_valid < to) { 367 inode_lock(inode); 368 err = ntfs_extend_initialized_size(file, ni, 369 ni->i_valid, to); 370 inode_unlock(inode); 371 if (err) 372 goto out; 373 } 374 } 375 376 err = generic_file_mmap(file, vma); 377 out: 378 return err; 379 } 380 381 static int ntfs_extend(struct inode *inode, loff_t pos, size_t count, 382 struct file *file) 383 { 384 struct ntfs_inode *ni = ntfs_i(inode); 385 struct address_space *mapping = inode->i_mapping; 386 loff_t end = pos + count; 387 bool extend_init = file && pos > ni->i_valid; 388 int err; 389 390 if (end <= inode->i_size && !extend_init) 391 return 0; 392 393 /* Mark rw ntfs as dirty. It will be cleared at umount. */ 394 ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY); 395 396 if (end > inode->i_size) { 397 err = ntfs_set_size(inode, end); 398 if (err) 399 goto out; 400 } 401 402 if (extend_init && !is_compressed(ni)) { 403 err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos); 404 if (err) 405 goto out; 406 } else { 407 err = 0; 408 } 409 410 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 411 mark_inode_dirty(inode); 412 413 if (IS_SYNC(inode)) { 414 int err2; 415 416 err = filemap_fdatawrite_range(mapping, pos, end - 1); 417 err2 = sync_mapping_buffers(mapping); 418 if (!err) 419 err = err2; 420 err2 = write_inode_now(inode, 1); 421 if (!err) 422 err = err2; 423 if (!err) 424 err = filemap_fdatawait_range(mapping, pos, end - 1); 425 } 426 427 out: 428 return err; 429 } 430 431 static int ntfs_truncate(struct inode *inode, loff_t new_size) 432 { 433 struct super_block *sb = inode->i_sb; 434 struct ntfs_inode *ni = ntfs_i(inode); 435 int err, dirty = 0; 436 u64 new_valid; 437 438 if (!S_ISREG(inode->i_mode)) 439 return 0; 440 441 if (is_compressed(ni)) { 442 if (ni->i_valid > new_size) 443 ni->i_valid = new_size; 444 } else { 445 err = block_truncate_page(inode->i_mapping, new_size, 446 ntfs_get_block); 447 if (err) 448 return err; 449 } 450 451 new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size)); 452 453 truncate_setsize(inode, new_size); 454 455 ni_lock(ni); 456 457 down_write(&ni->file.run_lock); 458 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size, 459 &new_valid, ni->mi.sbi->options->prealloc, NULL); 460 up_write(&ni->file.run_lock); 461 462 if (new_valid < ni->i_valid) 463 ni->i_valid = new_valid; 464 465 ni_unlock(ni); 466 467 ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE; 468 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 469 if (!IS_DIRSYNC(inode)) { 470 dirty = 1; 471 } else { 472 err = ntfs_sync_inode(inode); 473 if (err) 474 return err; 475 } 476 477 if (dirty) 478 mark_inode_dirty(inode); 479 480 /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/ 481 482 return 0; 483 } 484 485 /* 486 * ntfs_fallocate 487 * 488 * Preallocate space for a file. This implements ntfs's fallocate file 489 * operation, which gets called from sys_fallocate system call. User 490 * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set 491 * we just allocate clusters without zeroing them out. Otherwise we 492 * allocate and zero out clusters via an expanding truncate. 493 */ 494 static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) 495 { 496 struct inode *inode = file_inode(file); 497 struct address_space *mapping = inode->i_mapping; 498 struct super_block *sb = inode->i_sb; 499 struct ntfs_sb_info *sbi = sb->s_fs_info; 500 struct ntfs_inode *ni = ntfs_i(inode); 501 loff_t end = vbo + len; 502 loff_t vbo_down = round_down(vbo, max_t(unsigned long, 503 sbi->cluster_size, PAGE_SIZE)); 504 bool is_supported_holes = is_sparsed(ni) || is_compressed(ni); 505 loff_t i_size, new_size; 506 bool map_locked; 507 int err; 508 509 /* No support for dir. */ 510 if (!S_ISREG(inode->i_mode)) 511 return -EOPNOTSUPP; 512 513 /* 514 * vfs_fallocate checks all possible combinations of mode. 515 * Do additional checks here before ntfs_set_state(dirty). 516 */ 517 if (mode & FALLOC_FL_PUNCH_HOLE) { 518 if (!is_supported_holes) 519 return -EOPNOTSUPP; 520 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 521 } else if (mode & FALLOC_FL_INSERT_RANGE) { 522 if (!is_supported_holes) 523 return -EOPNOTSUPP; 524 } else if (mode & 525 ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 526 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)) { 527 ntfs_inode_warn(inode, "fallocate(0x%x) is not supported", 528 mode); 529 return -EOPNOTSUPP; 530 } 531 532 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY); 533 534 inode_lock(inode); 535 i_size = inode->i_size; 536 new_size = max(end, i_size); 537 map_locked = false; 538 539 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 540 /* Should never be here, see ntfs_file_open. */ 541 err = -EOPNOTSUPP; 542 goto out; 543 } 544 545 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | 546 FALLOC_FL_INSERT_RANGE)) { 547 inode_dio_wait(inode); 548 filemap_invalidate_lock(mapping); 549 map_locked = true; 550 } 551 552 if (mode & FALLOC_FL_PUNCH_HOLE) { 553 u32 frame_size; 554 loff_t mask, vbo_a, end_a, tmp; 555 556 err = filemap_write_and_wait_range(mapping, vbo_down, 557 LLONG_MAX); 558 if (err) 559 goto out; 560 561 truncate_pagecache(inode, vbo_down); 562 563 ni_lock(ni); 564 err = attr_punch_hole(ni, vbo, len, &frame_size); 565 ni_unlock(ni); 566 if (!err) 567 goto ok; 568 569 if (err != E_NTFS_NOTALIGNED) 570 goto out; 571 572 /* Process not aligned punch. */ 573 err = 0; 574 mask = frame_size - 1; 575 vbo_a = (vbo + mask) & ~mask; 576 end_a = end & ~mask; 577 578 tmp = min(vbo_a, end); 579 if (tmp > vbo) { 580 err = ntfs_zero_range(inode, vbo, tmp); 581 if (err) 582 goto out; 583 } 584 585 if (vbo < end_a && end_a < end) { 586 err = ntfs_zero_range(inode, end_a, end); 587 if (err) 588 goto out; 589 } 590 591 /* Aligned punch_hole */ 592 if (end_a > vbo_a) { 593 ni_lock(ni); 594 err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL); 595 ni_unlock(ni); 596 if (err) 597 goto out; 598 } 599 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 600 /* 601 * Write tail of the last page before removed range since 602 * it will get removed from the page cache below. 603 */ 604 err = filemap_write_and_wait_range(mapping, vbo_down, vbo); 605 if (err) 606 goto out; 607 608 /* 609 * Write data that will be shifted to preserve them 610 * when discarding page cache below. 611 */ 612 err = filemap_write_and_wait_range(mapping, end, LLONG_MAX); 613 if (err) 614 goto out; 615 616 truncate_pagecache(inode, vbo_down); 617 618 ni_lock(ni); 619 err = attr_collapse_range(ni, vbo, len); 620 ni_unlock(ni); 621 } else if (mode & FALLOC_FL_INSERT_RANGE) { 622 /* Check new size. */ 623 err = inode_newsize_ok(inode, new_size); 624 if (err) 625 goto out; 626 627 /* Write out all dirty pages. */ 628 err = filemap_write_and_wait_range(mapping, vbo_down, 629 LLONG_MAX); 630 if (err) 631 goto out; 632 truncate_pagecache(inode, vbo_down); 633 634 ni_lock(ni); 635 err = attr_insert_range(ni, vbo, len); 636 ni_unlock(ni); 637 if (err) 638 goto out; 639 } else { 640 /* Check new size. */ 641 u8 cluster_bits = sbi->cluster_bits; 642 643 /* Be sure file is non resident. */ 644 if (is_resident(ni)) { 645 ni_lock(ni); 646 err = attr_force_nonresident(ni); 647 ni_unlock(ni); 648 if (err) 649 goto out; 650 } 651 652 /* generic/213: expected -ENOSPC instead of -EFBIG. */ 653 if (!is_supported_holes) { 654 loff_t to_alloc = new_size - inode_get_bytes(inode); 655 656 if (to_alloc > 0 && 657 (to_alloc >> cluster_bits) > 658 wnd_zeroes(&sbi->used.bitmap)) { 659 err = -ENOSPC; 660 goto out; 661 } 662 } 663 664 err = inode_newsize_ok(inode, new_size); 665 if (err) 666 goto out; 667 668 if (new_size > i_size) { 669 /* 670 * Allocate clusters, do not change 'valid' size. 671 */ 672 err = ntfs_set_size(inode, new_size); 673 if (err) 674 goto out; 675 } 676 677 if (is_supported_holes) { 678 CLST vcn = vbo >> cluster_bits; 679 CLST cend = bytes_to_cluster(sbi, end); 680 CLST cend_v = bytes_to_cluster(sbi, ni->i_valid); 681 CLST lcn, clen; 682 bool new; 683 684 if (cend_v > cend) 685 cend_v = cend; 686 687 /* 688 * Allocate and zero new clusters. 689 * Zeroing these clusters may be too long. 690 */ 691 for (; vcn < cend_v; vcn += clen) { 692 err = attr_data_get_block(ni, vcn, cend_v - vcn, 693 &lcn, &clen, &new, 694 true); 695 if (err) 696 goto out; 697 } 698 /* 699 * Allocate but not zero new clusters. 700 */ 701 for (; vcn < cend; vcn += clen) { 702 err = attr_data_get_block(ni, vcn, cend - vcn, 703 &lcn, &clen, &new, 704 false); 705 if (err) 706 goto out; 707 } 708 } 709 710 if (mode & FALLOC_FL_KEEP_SIZE) { 711 ni_lock(ni); 712 /* True - Keep preallocated. */ 713 err = attr_set_size(ni, ATTR_DATA, NULL, 0, 714 &ni->file.run, i_size, &ni->i_valid, 715 true, NULL); 716 ni_unlock(ni); 717 if (err) 718 goto out; 719 } else if (new_size > i_size) { 720 i_size_write(inode, new_size); 721 } 722 } 723 724 ok: 725 err = file_modified(file); 726 if (err) 727 goto out; 728 729 out: 730 if (map_locked) 731 filemap_invalidate_unlock(mapping); 732 733 if (!err) { 734 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 735 mark_inode_dirty(inode); 736 } 737 738 inode_unlock(inode); 739 return err; 740 } 741 742 /* 743 * ntfs3_setattr - inode_operations::setattr 744 */ 745 int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 746 struct iattr *attr) 747 { 748 struct inode *inode = d_inode(dentry); 749 struct ntfs_inode *ni = ntfs_i(inode); 750 u32 ia_valid = attr->ia_valid; 751 umode_t mode = inode->i_mode; 752 int err; 753 754 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 755 return -EIO; 756 757 err = setattr_prepare(idmap, dentry, attr); 758 if (err) 759 goto out; 760 761 if (ia_valid & ATTR_SIZE) { 762 loff_t newsize, oldsize; 763 764 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 765 /* Should never be here, see ntfs_file_open(). */ 766 err = -EOPNOTSUPP; 767 goto out; 768 } 769 inode_dio_wait(inode); 770 oldsize = i_size_read(inode); 771 newsize = attr->ia_size; 772 773 if (newsize <= oldsize) 774 err = ntfs_truncate(inode, newsize); 775 else 776 err = ntfs_extend(inode, newsize, 0, NULL); 777 778 if (err) 779 goto out; 780 781 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 782 i_size_write(inode, newsize); 783 } 784 785 setattr_copy(idmap, inode, attr); 786 787 if (mode != inode->i_mode) { 788 err = ntfs_acl_chmod(idmap, dentry); 789 if (err) 790 goto out; 791 792 /* Linux 'w' -> Windows 'ro'. */ 793 if (0222 & inode->i_mode) 794 ni->std_fa &= ~FILE_ATTRIBUTE_READONLY; 795 else 796 ni->std_fa |= FILE_ATTRIBUTE_READONLY; 797 } 798 799 if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE)) 800 ntfs_save_wsl_perm(inode, NULL); 801 mark_inode_dirty(inode); 802 out: 803 return err; 804 } 805 806 static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 807 { 808 struct file *file = iocb->ki_filp; 809 struct inode *inode = file_inode(file); 810 struct ntfs_inode *ni = ntfs_i(inode); 811 812 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 813 return -EIO; 814 815 if (is_encrypted(ni)) { 816 ntfs_inode_warn(inode, "encrypted i/o not supported"); 817 return -EOPNOTSUPP; 818 } 819 820 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { 821 ntfs_inode_warn(inode, "direct i/o + compressed not supported"); 822 return -EOPNOTSUPP; 823 } 824 825 #ifndef CONFIG_NTFS3_LZX_XPRESS 826 if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) { 827 ntfs_inode_warn( 828 inode, 829 "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files"); 830 return -EOPNOTSUPP; 831 } 832 #endif 833 834 if (is_dedup(ni)) { 835 ntfs_inode_warn(inode, "read deduplicated not supported"); 836 return -EOPNOTSUPP; 837 } 838 839 return generic_file_read_iter(iocb, iter); 840 } 841 842 static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos, 843 struct pipe_inode_info *pipe, size_t len, 844 unsigned int flags) 845 { 846 struct inode *inode = file_inode(in); 847 struct ntfs_inode *ni = ntfs_i(inode); 848 849 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 850 return -EIO; 851 852 if (is_encrypted(ni)) { 853 ntfs_inode_warn(inode, "encrypted i/o not supported"); 854 return -EOPNOTSUPP; 855 } 856 857 #ifndef CONFIG_NTFS3_LZX_XPRESS 858 if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) { 859 ntfs_inode_warn( 860 inode, 861 "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files"); 862 return -EOPNOTSUPP; 863 } 864 #endif 865 866 if (is_dedup(ni)) { 867 ntfs_inode_warn(inode, "read deduplicated not supported"); 868 return -EOPNOTSUPP; 869 } 870 871 return filemap_splice_read(in, ppos, pipe, len, flags); 872 } 873 874 /* 875 * ntfs_get_frame_pages 876 * 877 * Return: Array of locked pages. 878 */ 879 static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index, 880 struct page **pages, u32 pages_per_frame, 881 bool *frame_uptodate) 882 { 883 gfp_t gfp_mask = mapping_gfp_mask(mapping); 884 u32 npages; 885 886 *frame_uptodate = true; 887 888 for (npages = 0; npages < pages_per_frame; npages++, index++) { 889 struct folio *folio; 890 891 folio = __filemap_get_folio(mapping, index, 892 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 893 gfp_mask); 894 if (IS_ERR(folio)) { 895 while (npages--) { 896 folio = page_folio(pages[npages]); 897 folio_unlock(folio); 898 folio_put(folio); 899 } 900 901 return -ENOMEM; 902 } 903 904 if (!folio_test_uptodate(folio)) 905 *frame_uptodate = false; 906 907 pages[npages] = &folio->page; 908 } 909 910 return 0; 911 } 912 913 /* 914 * ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files). 915 */ 916 static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) 917 { 918 int err; 919 struct file *file = iocb->ki_filp; 920 size_t count = iov_iter_count(from); 921 loff_t pos = iocb->ki_pos; 922 struct inode *inode = file_inode(file); 923 loff_t i_size = i_size_read(inode); 924 struct address_space *mapping = inode->i_mapping; 925 struct ntfs_inode *ni = ntfs_i(inode); 926 u64 valid = ni->i_valid; 927 struct ntfs_sb_info *sbi = ni->mi.sbi; 928 struct page *page, **pages = NULL; 929 size_t written = 0; 930 u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits; 931 u32 frame_size = 1u << frame_bits; 932 u32 pages_per_frame = frame_size >> PAGE_SHIFT; 933 u32 ip, off; 934 CLST frame; 935 u64 frame_vbo; 936 pgoff_t index; 937 bool frame_uptodate; 938 939 if (frame_size < PAGE_SIZE) { 940 /* 941 * frame_size == 8K if cluster 512 942 * frame_size == 64K if cluster 4096 943 */ 944 ntfs_inode_warn(inode, "page size is bigger than frame size"); 945 return -EOPNOTSUPP; 946 } 947 948 pages = kmalloc_array(pages_per_frame, sizeof(struct page *), GFP_NOFS); 949 if (!pages) 950 return -ENOMEM; 951 952 err = file_remove_privs(file); 953 if (err) 954 goto out; 955 956 err = file_update_time(file); 957 if (err) 958 goto out; 959 960 /* Zero range [valid : pos). */ 961 while (valid < pos) { 962 CLST lcn, clen; 963 964 frame = valid >> frame_bits; 965 frame_vbo = valid & ~(frame_size - 1); 966 off = valid & (frame_size - 1); 967 968 err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 1, &lcn, 969 &clen, NULL, false); 970 if (err) 971 goto out; 972 973 if (lcn == SPARSE_LCN) { 974 ni->i_valid = valid = 975 frame_vbo + ((u64)clen << sbi->cluster_bits); 976 continue; 977 } 978 979 /* Load full frame. */ 980 err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT, 981 pages, pages_per_frame, 982 &frame_uptodate); 983 if (err) 984 goto out; 985 986 if (!frame_uptodate && off) { 987 err = ni_read_frame(ni, frame_vbo, pages, 988 pages_per_frame); 989 if (err) { 990 for (ip = 0; ip < pages_per_frame; ip++) { 991 page = pages[ip]; 992 unlock_page(page); 993 put_page(page); 994 } 995 goto out; 996 } 997 } 998 999 ip = off >> PAGE_SHIFT; 1000 off = offset_in_page(valid); 1001 for (; ip < pages_per_frame; ip++, off = 0) { 1002 page = pages[ip]; 1003 zero_user_segment(page, off, PAGE_SIZE); 1004 flush_dcache_page(page); 1005 SetPageUptodate(page); 1006 } 1007 1008 ni_lock(ni); 1009 err = ni_write_frame(ni, pages, pages_per_frame); 1010 ni_unlock(ni); 1011 1012 for (ip = 0; ip < pages_per_frame; ip++) { 1013 page = pages[ip]; 1014 SetPageUptodate(page); 1015 unlock_page(page); 1016 put_page(page); 1017 } 1018 1019 if (err) 1020 goto out; 1021 1022 ni->i_valid = valid = frame_vbo + frame_size; 1023 } 1024 1025 /* Copy user data [pos : pos + count). */ 1026 while (count) { 1027 size_t copied, bytes; 1028 1029 off = pos & (frame_size - 1); 1030 bytes = frame_size - off; 1031 if (bytes > count) 1032 bytes = count; 1033 1034 frame_vbo = pos & ~(frame_size - 1); 1035 index = frame_vbo >> PAGE_SHIFT; 1036 1037 if (unlikely(fault_in_iov_iter_readable(from, bytes))) { 1038 err = -EFAULT; 1039 goto out; 1040 } 1041 1042 /* Load full frame. */ 1043 err = ntfs_get_frame_pages(mapping, index, pages, 1044 pages_per_frame, &frame_uptodate); 1045 if (err) 1046 goto out; 1047 1048 if (!frame_uptodate) { 1049 loff_t to = pos + bytes; 1050 1051 if (off || (to < i_size && (to & (frame_size - 1)))) { 1052 err = ni_read_frame(ni, frame_vbo, pages, 1053 pages_per_frame); 1054 if (err) { 1055 for (ip = 0; ip < pages_per_frame; 1056 ip++) { 1057 page = pages[ip]; 1058 unlock_page(page); 1059 put_page(page); 1060 } 1061 goto out; 1062 } 1063 } 1064 } 1065 1066 WARN_ON(!bytes); 1067 copied = 0; 1068 ip = off >> PAGE_SHIFT; 1069 off = offset_in_page(pos); 1070 1071 /* Copy user data to pages. */ 1072 for (;;) { 1073 size_t cp, tail = PAGE_SIZE - off; 1074 1075 page = pages[ip]; 1076 cp = copy_page_from_iter_atomic(page, off, 1077 min(tail, bytes), from); 1078 flush_dcache_page(page); 1079 1080 copied += cp; 1081 bytes -= cp; 1082 if (!bytes || !cp) 1083 break; 1084 1085 if (cp < tail) { 1086 off += cp; 1087 } else { 1088 ip++; 1089 off = 0; 1090 } 1091 } 1092 1093 ni_lock(ni); 1094 err = ni_write_frame(ni, pages, pages_per_frame); 1095 ni_unlock(ni); 1096 1097 for (ip = 0; ip < pages_per_frame; ip++) { 1098 page = pages[ip]; 1099 ClearPageDirty(page); 1100 SetPageUptodate(page); 1101 unlock_page(page); 1102 put_page(page); 1103 } 1104 1105 if (err) 1106 goto out; 1107 1108 /* 1109 * We can loop for a long time in here. Be nice and allow 1110 * us to schedule out to avoid softlocking if preempt 1111 * is disabled. 1112 */ 1113 cond_resched(); 1114 1115 pos += copied; 1116 written += copied; 1117 1118 count = iov_iter_count(from); 1119 } 1120 1121 out: 1122 kfree(pages); 1123 1124 if (err < 0) 1125 return err; 1126 1127 iocb->ki_pos += written; 1128 if (iocb->ki_pos > ni->i_valid) 1129 ni->i_valid = iocb->ki_pos; 1130 if (iocb->ki_pos > i_size) 1131 i_size_write(inode, iocb->ki_pos); 1132 1133 return written; 1134 } 1135 1136 /* 1137 * ntfs_file_write_iter - file_operations::write_iter 1138 */ 1139 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 1140 { 1141 struct file *file = iocb->ki_filp; 1142 struct inode *inode = file_inode(file); 1143 ssize_t ret; 1144 int err; 1145 struct ntfs_inode *ni = ntfs_i(inode); 1146 1147 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 1148 return -EIO; 1149 1150 if (is_encrypted(ni)) { 1151 ntfs_inode_warn(inode, "encrypted i/o not supported"); 1152 return -EOPNOTSUPP; 1153 } 1154 1155 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { 1156 ntfs_inode_warn(inode, "direct i/o + compressed not supported"); 1157 return -EOPNOTSUPP; 1158 } 1159 1160 if (is_dedup(ni)) { 1161 ntfs_inode_warn(inode, "write into deduplicated not supported"); 1162 return -EOPNOTSUPP; 1163 } 1164 1165 if (!inode_trylock(inode)) { 1166 if (iocb->ki_flags & IOCB_NOWAIT) 1167 return -EAGAIN; 1168 inode_lock(inode); 1169 } 1170 1171 ret = generic_write_checks(iocb, from); 1172 if (ret <= 0) 1173 goto out; 1174 1175 err = file_modified(iocb->ki_filp); 1176 if (err) { 1177 ret = err; 1178 goto out; 1179 } 1180 1181 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 1182 /* Should never be here, see ntfs_file_open(). */ 1183 ret = -EOPNOTSUPP; 1184 goto out; 1185 } 1186 1187 ret = ntfs_extend(inode, iocb->ki_pos, ret, file); 1188 if (ret) 1189 goto out; 1190 1191 ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) : 1192 __generic_file_write_iter(iocb, from); 1193 1194 out: 1195 inode_unlock(inode); 1196 1197 if (ret > 0) 1198 ret = generic_write_sync(iocb, ret); 1199 1200 return ret; 1201 } 1202 1203 /* 1204 * ntfs_file_open - file_operations::open 1205 */ 1206 int ntfs_file_open(struct inode *inode, struct file *file) 1207 { 1208 struct ntfs_inode *ni = ntfs_i(inode); 1209 1210 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 1211 return -EIO; 1212 1213 if (unlikely((is_compressed(ni) || is_encrypted(ni)) && 1214 (file->f_flags & O_DIRECT))) { 1215 return -EOPNOTSUPP; 1216 } 1217 1218 /* Decompress "external compressed" file if opened for rw. */ 1219 if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) && 1220 (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) { 1221 #ifdef CONFIG_NTFS3_LZX_XPRESS 1222 int err = ni_decompress_file(ni); 1223 1224 if (err) 1225 return err; 1226 #else 1227 ntfs_inode_warn( 1228 inode, 1229 "activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files"); 1230 return -EOPNOTSUPP; 1231 #endif 1232 } 1233 1234 return generic_file_open(inode, file); 1235 } 1236 1237 /* 1238 * ntfs_file_release - file_operations::release 1239 */ 1240 static int ntfs_file_release(struct inode *inode, struct file *file) 1241 { 1242 struct ntfs_inode *ni = ntfs_i(inode); 1243 struct ntfs_sb_info *sbi = ni->mi.sbi; 1244 int err = 0; 1245 1246 /* If we are last writer on the inode, drop the block reservation. */ 1247 if (sbi->options->prealloc && 1248 ((file->f_mode & FMODE_WRITE) && 1249 atomic_read(&inode->i_writecount) == 1)) { 1250 ni_lock(ni); 1251 down_write(&ni->file.run_lock); 1252 1253 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, 1254 i_size_read(inode), &ni->i_valid, false, 1255 NULL); 1256 1257 up_write(&ni->file.run_lock); 1258 ni_unlock(ni); 1259 } 1260 return err; 1261 } 1262 1263 /* 1264 * ntfs_fiemap - inode_operations::fiemap 1265 */ 1266 int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1267 __u64 start, __u64 len) 1268 { 1269 int err; 1270 struct ntfs_inode *ni = ntfs_i(inode); 1271 1272 err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR); 1273 if (err) 1274 return err; 1275 1276 ni_lock(ni); 1277 1278 err = ni_fiemap(ni, fieinfo, start, len); 1279 1280 ni_unlock(ni); 1281 1282 return err; 1283 } 1284 1285 // clang-format off 1286 const struct inode_operations ntfs_file_inode_operations = { 1287 .getattr = ntfs_getattr, 1288 .setattr = ntfs3_setattr, 1289 .listxattr = ntfs_listxattr, 1290 .get_acl = ntfs_get_acl, 1291 .set_acl = ntfs_set_acl, 1292 .fiemap = ntfs_fiemap, 1293 .fileattr_get = ntfs_fileattr_get, 1294 .fileattr_set = ntfs_fileattr_set, 1295 }; 1296 1297 const struct file_operations ntfs_file_operations = { 1298 .llseek = generic_file_llseek, 1299 .read_iter = ntfs_file_read_iter, 1300 .write_iter = ntfs_file_write_iter, 1301 .unlocked_ioctl = ntfs_ioctl, 1302 #ifdef CONFIG_COMPAT 1303 .compat_ioctl = ntfs_compat_ioctl, 1304 #endif 1305 .splice_read = ntfs_file_splice_read, 1306 .mmap = ntfs_file_mmap, 1307 .open = ntfs_file_open, 1308 .fsync = generic_file_fsync, 1309 .splice_write = iter_file_splice_write, 1310 .fallocate = ntfs_fallocate, 1311 .release = ntfs_file_release, 1312 }; 1313 1314 #if IS_ENABLED(CONFIG_NTFS_FS) 1315 const struct file_operations ntfs_legacy_file_operations = { 1316 .llseek = generic_file_llseek, 1317 .read_iter = ntfs_file_read_iter, 1318 .splice_read = ntfs_file_splice_read, 1319 .open = ntfs_file_open, 1320 .release = ntfs_file_release, 1321 }; 1322 #endif 1323 // clang-format on 1324