1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. 5 * 6 * Regular file handling primitives for NTFS-based filesystems. 7 * 8 */ 9 10 #include <linux/backing-dev.h> 11 #include <linux/blkdev.h> 12 #include <linux/buffer_head.h> 13 #include <linux/compat.h> 14 #include <linux/falloc.h> 15 #include <linux/fiemap.h> 16 #include <linux/fileattr.h> 17 18 #include "debug.h" 19 #include "ntfs.h" 20 #include "ntfs_fs.h" 21 22 static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg) 23 { 24 struct fstrim_range __user *user_range; 25 struct fstrim_range range; 26 struct block_device *dev; 27 int err; 28 29 if (!capable(CAP_SYS_ADMIN)) 30 return -EPERM; 31 32 dev = sbi->sb->s_bdev; 33 if (!bdev_max_discard_sectors(dev)) 34 return -EOPNOTSUPP; 35 36 user_range = (struct fstrim_range __user *)arg; 37 if (copy_from_user(&range, user_range, sizeof(range))) 38 return -EFAULT; 39 40 range.minlen = max_t(u32, range.minlen, bdev_discard_granularity(dev)); 41 42 err = ntfs_trim_fs(sbi, &range); 43 if (err < 0) 44 return err; 45 46 if (copy_to_user(user_range, &range, sizeof(range))) 47 return -EFAULT; 48 49 return 0; 50 } 51 52 /* 53 * ntfs_ioctl - file_operations::unlocked_ioctl 54 */ 55 long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg) 56 { 57 struct inode *inode = file_inode(filp); 58 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 59 60 switch (cmd) { 61 case FITRIM: 62 return ntfs_ioctl_fitrim(sbi, arg); 63 } 64 return -ENOTTY; /* Inappropriate ioctl for device. */ 65 } 66 67 #ifdef CONFIG_COMPAT 68 long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg) 69 70 { 71 return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); 72 } 73 #endif 74 75 /* 76 * ntfs_getattr - inode_operations::getattr 77 */ 78 int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path, 79 struct kstat *stat, u32 request_mask, u32 flags) 80 { 81 struct inode *inode = d_inode(path->dentry); 82 struct ntfs_inode *ni = ntfs_i(inode); 83 84 stat->result_mask |= STATX_BTIME; 85 stat->btime = ni->i_crtime; 86 stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */ 87 88 if (inode->i_flags & S_IMMUTABLE) 89 stat->attributes |= STATX_ATTR_IMMUTABLE; 90 91 if (inode->i_flags & S_APPEND) 92 stat->attributes |= STATX_ATTR_APPEND; 93 94 if (is_compressed(ni)) 95 stat->attributes |= STATX_ATTR_COMPRESSED; 96 97 if (is_encrypted(ni)) 98 stat->attributes |= STATX_ATTR_ENCRYPTED; 99 100 stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED | 101 STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND; 102 103 generic_fillattr(idmap, request_mask, inode, stat); 104 105 return 0; 106 } 107 108 static int ntfs_extend_initialized_size(struct file *file, 109 struct ntfs_inode *ni, 110 const loff_t valid, 111 const loff_t new_valid) 112 { 113 struct inode *inode = &ni->vfs_inode; 114 struct address_space *mapping = inode->i_mapping; 115 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 116 loff_t pos = valid; 117 int err; 118 119 if (valid >= new_valid) 120 return 0; 121 122 if (is_resident(ni)) { 123 ni->i_valid = new_valid; 124 return 0; 125 } 126 127 WARN_ON(is_compressed(ni)); 128 129 for (;;) { 130 u32 zerofrom, len; 131 struct folio *folio; 132 u8 bits; 133 CLST vcn, lcn, clen; 134 135 if (is_sparsed(ni)) { 136 bits = sbi->cluster_bits; 137 vcn = pos >> bits; 138 139 err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL, 140 false); 141 if (err) 142 goto out; 143 144 if (lcn == SPARSE_LCN) { 145 pos = ((loff_t)clen + vcn) << bits; 146 ni->i_valid = pos; 147 goto next; 148 } 149 } 150 151 zerofrom = pos & (PAGE_SIZE - 1); 152 len = PAGE_SIZE - zerofrom; 153 154 if (pos + len > new_valid) 155 len = new_valid - pos; 156 157 err = ntfs_write_begin(file, mapping, pos, len, &folio, NULL); 158 if (err) 159 goto out; 160 161 folio_zero_range(folio, zerofrom, folio_size(folio) - zerofrom); 162 163 err = ntfs_write_end(file, mapping, pos, len, len, folio, NULL); 164 if (err < 0) 165 goto out; 166 pos += len; 167 168 next: 169 if (pos >= new_valid) 170 break; 171 172 balance_dirty_pages_ratelimited(mapping); 173 cond_resched(); 174 } 175 176 return 0; 177 178 out: 179 ni->i_valid = valid; 180 ntfs_inode_warn(inode, "failed to extend initialized size to %llx.", 181 new_valid); 182 return err; 183 } 184 185 /* 186 * ntfs_zero_range - Helper function for punch_hole. 187 * 188 * It zeroes a range [vbo, vbo_to). 189 */ 190 static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to) 191 { 192 int err = 0; 193 struct address_space *mapping = inode->i_mapping; 194 u32 blocksize = i_blocksize(inode); 195 pgoff_t idx = vbo >> PAGE_SHIFT; 196 u32 from = vbo & (PAGE_SIZE - 1); 197 pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT; 198 loff_t page_off; 199 struct buffer_head *head, *bh; 200 u32 bh_next, bh_off, to; 201 sector_t iblock; 202 struct folio *folio; 203 bool dirty = false; 204 205 for (; idx < idx_end; idx += 1, from = 0) { 206 page_off = (loff_t)idx << PAGE_SHIFT; 207 to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) : 208 PAGE_SIZE; 209 iblock = page_off >> inode->i_blkbits; 210 211 folio = __filemap_get_folio( 212 mapping, idx, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 213 mapping_gfp_constraint(mapping, ~__GFP_FS)); 214 if (IS_ERR(folio)) 215 return PTR_ERR(folio); 216 217 head = folio_buffers(folio); 218 if (!head) 219 head = create_empty_buffers(folio, blocksize, 0); 220 221 bh = head; 222 bh_off = 0; 223 do { 224 bh_next = bh_off + blocksize; 225 226 if (bh_next <= from || bh_off >= to) 227 continue; 228 229 if (!buffer_mapped(bh)) { 230 ntfs_get_block(inode, iblock, bh, 0); 231 /* Unmapped? It's a hole - nothing to do. */ 232 if (!buffer_mapped(bh)) 233 continue; 234 } 235 236 /* Ok, it's mapped. Make sure it's up-to-date. */ 237 if (folio_test_uptodate(folio)) 238 set_buffer_uptodate(bh); 239 else if (bh_read(bh, 0) < 0) { 240 err = -EIO; 241 folio_unlock(folio); 242 folio_put(folio); 243 goto out; 244 } 245 246 mark_buffer_dirty(bh); 247 } while (bh_off = bh_next, iblock += 1, 248 head != (bh = bh->b_this_page)); 249 250 folio_zero_segment(folio, from, to); 251 dirty = true; 252 253 folio_unlock(folio); 254 folio_put(folio); 255 cond_resched(); 256 } 257 out: 258 if (dirty) 259 mark_inode_dirty(inode); 260 return err; 261 } 262 263 /* 264 * ntfs_file_mmap - file_operations::mmap 265 */ 266 static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma) 267 { 268 struct inode *inode = file_inode(file); 269 struct ntfs_inode *ni = ntfs_i(inode); 270 u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT); 271 bool rw = vma->vm_flags & VM_WRITE; 272 int err; 273 274 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 275 return -EIO; 276 277 if (is_encrypted(ni)) { 278 ntfs_inode_warn(inode, "mmap encrypted not supported"); 279 return -EOPNOTSUPP; 280 } 281 282 if (is_dedup(ni)) { 283 ntfs_inode_warn(inode, "mmap deduplicated not supported"); 284 return -EOPNOTSUPP; 285 } 286 287 if (is_compressed(ni) && rw) { 288 ntfs_inode_warn(inode, "mmap(write) compressed not supported"); 289 return -EOPNOTSUPP; 290 } 291 292 if (rw) { 293 u64 to = min_t(loff_t, i_size_read(inode), 294 from + vma->vm_end - vma->vm_start); 295 296 if (is_sparsed(ni)) { 297 /* Allocate clusters for rw map. */ 298 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 299 CLST lcn, len; 300 CLST vcn = from >> sbi->cluster_bits; 301 CLST end = bytes_to_cluster(sbi, to); 302 bool new; 303 304 for (; vcn < end; vcn += len) { 305 err = attr_data_get_block(ni, vcn, 1, &lcn, 306 &len, &new, true); 307 if (err) 308 goto out; 309 } 310 } 311 312 if (ni->i_valid < to) { 313 inode_lock(inode); 314 err = ntfs_extend_initialized_size(file, ni, 315 ni->i_valid, to); 316 inode_unlock(inode); 317 if (err) 318 goto out; 319 } 320 } 321 322 err = generic_file_mmap(file, vma); 323 out: 324 return err; 325 } 326 327 static int ntfs_extend(struct inode *inode, loff_t pos, size_t count, 328 struct file *file) 329 { 330 struct ntfs_inode *ni = ntfs_i(inode); 331 struct address_space *mapping = inode->i_mapping; 332 loff_t end = pos + count; 333 bool extend_init = file && pos > ni->i_valid; 334 int err; 335 336 if (end <= inode->i_size && !extend_init) 337 return 0; 338 339 /* Mark rw ntfs as dirty. It will be cleared at umount. */ 340 ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY); 341 342 if (end > inode->i_size) { 343 err = ntfs_set_size(inode, end); 344 if (err) 345 goto out; 346 } 347 348 if (extend_init && !is_compressed(ni)) { 349 err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos); 350 if (err) 351 goto out; 352 } else { 353 err = 0; 354 } 355 356 if (file && is_sparsed(ni)) { 357 /* 358 * This code optimizes large writes to sparse file. 359 * TODO: merge this fragment with fallocate fragment. 360 */ 361 struct ntfs_sb_info *sbi = ni->mi.sbi; 362 CLST vcn = pos >> sbi->cluster_bits; 363 CLST cend = bytes_to_cluster(sbi, end); 364 CLST cend_v = bytes_to_cluster(sbi, ni->i_valid); 365 CLST lcn, clen; 366 bool new; 367 368 if (cend_v > cend) 369 cend_v = cend; 370 371 /* 372 * Allocate and zero new clusters. 373 * Zeroing these clusters may be too long. 374 */ 375 for (; vcn < cend_v; vcn += clen) { 376 err = attr_data_get_block(ni, vcn, cend_v - vcn, &lcn, 377 &clen, &new, true); 378 if (err) 379 goto out; 380 } 381 /* 382 * Allocate but not zero new clusters. 383 */ 384 for (; vcn < cend; vcn += clen) { 385 err = attr_data_get_block(ni, vcn, cend - vcn, &lcn, 386 &clen, &new, false); 387 if (err) 388 goto out; 389 } 390 } 391 392 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 393 mark_inode_dirty(inode); 394 395 if (IS_SYNC(inode)) { 396 int err2; 397 398 err = filemap_fdatawrite_range(mapping, pos, end - 1); 399 err2 = sync_mapping_buffers(mapping); 400 if (!err) 401 err = err2; 402 err2 = write_inode_now(inode, 1); 403 if (!err) 404 err = err2; 405 if (!err) 406 err = filemap_fdatawait_range(mapping, pos, end - 1); 407 } 408 409 out: 410 return err; 411 } 412 413 static int ntfs_truncate(struct inode *inode, loff_t new_size) 414 { 415 struct super_block *sb = inode->i_sb; 416 struct ntfs_inode *ni = ntfs_i(inode); 417 int err, dirty = 0; 418 u64 new_valid; 419 420 if (!S_ISREG(inode->i_mode)) 421 return 0; 422 423 if (is_compressed(ni)) { 424 if (ni->i_valid > new_size) 425 ni->i_valid = new_size; 426 } else { 427 err = block_truncate_page(inode->i_mapping, new_size, 428 ntfs_get_block); 429 if (err) 430 return err; 431 } 432 433 new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size)); 434 435 truncate_setsize(inode, new_size); 436 437 ni_lock(ni); 438 439 down_write(&ni->file.run_lock); 440 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size, 441 &new_valid, ni->mi.sbi->options->prealloc, NULL); 442 up_write(&ni->file.run_lock); 443 444 if (new_valid < ni->i_valid) 445 ni->i_valid = new_valid; 446 447 ni_unlock(ni); 448 449 ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE; 450 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 451 if (!IS_DIRSYNC(inode)) { 452 dirty = 1; 453 } else { 454 err = ntfs_sync_inode(inode); 455 if (err) 456 return err; 457 } 458 459 if (dirty) 460 mark_inode_dirty(inode); 461 462 /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/ 463 464 return 0; 465 } 466 467 /* 468 * ntfs_fallocate - file_operations::ntfs_fallocate 469 * 470 * Preallocate space for a file. This implements ntfs's fallocate file 471 * operation, which gets called from sys_fallocate system call. User 472 * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set 473 * we just allocate clusters without zeroing them out. Otherwise we 474 * allocate and zero out clusters via an expanding truncate. 475 */ 476 static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) 477 { 478 struct inode *inode = file_inode(file); 479 struct address_space *mapping = inode->i_mapping; 480 struct super_block *sb = inode->i_sb; 481 struct ntfs_sb_info *sbi = sb->s_fs_info; 482 struct ntfs_inode *ni = ntfs_i(inode); 483 loff_t end = vbo + len; 484 loff_t vbo_down = round_down(vbo, max_t(unsigned long, 485 sbi->cluster_size, PAGE_SIZE)); 486 bool is_supported_holes = is_sparsed(ni) || is_compressed(ni); 487 loff_t i_size, new_size; 488 bool map_locked; 489 int err; 490 491 /* No support for dir. */ 492 if (!S_ISREG(inode->i_mode)) 493 return -EOPNOTSUPP; 494 495 /* 496 * vfs_fallocate checks all possible combinations of mode. 497 * Do additional checks here before ntfs_set_state(dirty). 498 */ 499 if (mode & FALLOC_FL_PUNCH_HOLE) { 500 if (!is_supported_holes) 501 return -EOPNOTSUPP; 502 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 503 } else if (mode & FALLOC_FL_INSERT_RANGE) { 504 if (!is_supported_holes) 505 return -EOPNOTSUPP; 506 } else if (mode & 507 ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 508 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)) { 509 ntfs_inode_warn(inode, "fallocate(0x%x) is not supported", 510 mode); 511 return -EOPNOTSUPP; 512 } 513 514 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY); 515 516 inode_lock(inode); 517 i_size = inode->i_size; 518 new_size = max(end, i_size); 519 map_locked = false; 520 521 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 522 /* Should never be here, see ntfs_file_open. */ 523 err = -EOPNOTSUPP; 524 goto out; 525 } 526 527 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | 528 FALLOC_FL_INSERT_RANGE)) { 529 inode_dio_wait(inode); 530 filemap_invalidate_lock(mapping); 531 map_locked = true; 532 } 533 534 if (mode & FALLOC_FL_PUNCH_HOLE) { 535 u32 frame_size; 536 loff_t mask, vbo_a, end_a, tmp; 537 538 err = filemap_write_and_wait_range(mapping, vbo_down, 539 LLONG_MAX); 540 if (err) 541 goto out; 542 543 truncate_pagecache(inode, vbo_down); 544 545 ni_lock(ni); 546 err = attr_punch_hole(ni, vbo, len, &frame_size); 547 ni_unlock(ni); 548 if (!err) 549 goto ok; 550 551 if (err != E_NTFS_NOTALIGNED) 552 goto out; 553 554 /* Process not aligned punch. */ 555 err = 0; 556 mask = frame_size - 1; 557 vbo_a = (vbo + mask) & ~mask; 558 end_a = end & ~mask; 559 560 tmp = min(vbo_a, end); 561 if (tmp > vbo) { 562 err = ntfs_zero_range(inode, vbo, tmp); 563 if (err) 564 goto out; 565 } 566 567 if (vbo < end_a && end_a < end) { 568 err = ntfs_zero_range(inode, end_a, end); 569 if (err) 570 goto out; 571 } 572 573 /* Aligned punch_hole */ 574 if (end_a > vbo_a) { 575 ni_lock(ni); 576 err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL); 577 ni_unlock(ni); 578 if (err) 579 goto out; 580 } 581 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 582 /* 583 * Write tail of the last page before removed range since 584 * it will get removed from the page cache below. 585 */ 586 err = filemap_write_and_wait_range(mapping, vbo_down, vbo); 587 if (err) 588 goto out; 589 590 /* 591 * Write data that will be shifted to preserve them 592 * when discarding page cache below. 593 */ 594 err = filemap_write_and_wait_range(mapping, end, LLONG_MAX); 595 if (err) 596 goto out; 597 598 truncate_pagecache(inode, vbo_down); 599 600 ni_lock(ni); 601 err = attr_collapse_range(ni, vbo, len); 602 ni_unlock(ni); 603 if (err) 604 goto out; 605 } else if (mode & FALLOC_FL_INSERT_RANGE) { 606 /* Check new size. */ 607 err = inode_newsize_ok(inode, new_size); 608 if (err) 609 goto out; 610 611 /* Write out all dirty pages. */ 612 err = filemap_write_and_wait_range(mapping, vbo_down, 613 LLONG_MAX); 614 if (err) 615 goto out; 616 truncate_pagecache(inode, vbo_down); 617 618 ni_lock(ni); 619 err = attr_insert_range(ni, vbo, len); 620 ni_unlock(ni); 621 if (err) 622 goto out; 623 } else { 624 /* Check new size. */ 625 u8 cluster_bits = sbi->cluster_bits; 626 627 /* Be sure file is non resident. */ 628 if (is_resident(ni)) { 629 ni_lock(ni); 630 err = attr_force_nonresident(ni); 631 ni_unlock(ni); 632 if (err) 633 goto out; 634 } 635 636 /* generic/213: expected -ENOSPC instead of -EFBIG. */ 637 if (!is_supported_holes) { 638 loff_t to_alloc = new_size - inode_get_bytes(inode); 639 640 if (to_alloc > 0 && 641 (to_alloc >> cluster_bits) > 642 wnd_zeroes(&sbi->used.bitmap)) { 643 err = -ENOSPC; 644 goto out; 645 } 646 } 647 648 err = inode_newsize_ok(inode, new_size); 649 if (err) 650 goto out; 651 652 if (new_size > i_size) { 653 /* 654 * Allocate clusters, do not change 'valid' size. 655 */ 656 err = ntfs_set_size(inode, new_size); 657 if (err) 658 goto out; 659 } 660 661 if (is_supported_holes) { 662 CLST vcn = vbo >> cluster_bits; 663 CLST cend = bytes_to_cluster(sbi, end); 664 CLST cend_v = bytes_to_cluster(sbi, ni->i_valid); 665 CLST lcn, clen; 666 bool new; 667 668 if (cend_v > cend) 669 cend_v = cend; 670 671 /* 672 * Allocate and zero new clusters. 673 * Zeroing these clusters may be too long. 674 */ 675 for (; vcn < cend_v; vcn += clen) { 676 err = attr_data_get_block(ni, vcn, cend_v - vcn, 677 &lcn, &clen, &new, 678 true); 679 if (err) 680 goto out; 681 } 682 /* 683 * Allocate but not zero new clusters. 684 */ 685 for (; vcn < cend; vcn += clen) { 686 err = attr_data_get_block(ni, vcn, cend - vcn, 687 &lcn, &clen, &new, 688 false); 689 if (err) 690 goto out; 691 } 692 } 693 694 if (mode & FALLOC_FL_KEEP_SIZE) { 695 ni_lock(ni); 696 /* True - Keep preallocated. */ 697 err = attr_set_size(ni, ATTR_DATA, NULL, 0, 698 &ni->file.run, i_size, &ni->i_valid, 699 true, NULL); 700 ni_unlock(ni); 701 if (err) 702 goto out; 703 } else if (new_size > i_size) { 704 i_size_write(inode, new_size); 705 } 706 } 707 708 ok: 709 err = file_modified(file); 710 if (err) 711 goto out; 712 713 out: 714 if (map_locked) 715 filemap_invalidate_unlock(mapping); 716 717 if (!err) { 718 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 719 mark_inode_dirty(inode); 720 } 721 722 inode_unlock(inode); 723 return err; 724 } 725 726 /* 727 * ntfs_setattr - inode_operations::setattr 728 */ 729 int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 730 struct iattr *attr) 731 { 732 struct inode *inode = d_inode(dentry); 733 struct ntfs_inode *ni = ntfs_i(inode); 734 u32 ia_valid = attr->ia_valid; 735 umode_t mode = inode->i_mode; 736 int err; 737 738 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 739 return -EIO; 740 741 err = setattr_prepare(idmap, dentry, attr); 742 if (err) 743 goto out; 744 745 if (ia_valid & ATTR_SIZE) { 746 loff_t newsize, oldsize; 747 748 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 749 /* Should never be here, see ntfs_file_open(). */ 750 err = -EOPNOTSUPP; 751 goto out; 752 } 753 inode_dio_wait(inode); 754 oldsize = i_size_read(inode); 755 newsize = attr->ia_size; 756 757 if (newsize <= oldsize) 758 err = ntfs_truncate(inode, newsize); 759 else 760 err = ntfs_extend(inode, newsize, 0, NULL); 761 762 if (err) 763 goto out; 764 765 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 766 i_size_write(inode, newsize); 767 } 768 769 setattr_copy(idmap, inode, attr); 770 771 if (mode != inode->i_mode) { 772 err = ntfs_acl_chmod(idmap, dentry); 773 if (err) 774 goto out; 775 776 /* Linux 'w' -> Windows 'ro'. */ 777 if (0222 & inode->i_mode) 778 ni->std_fa &= ~FILE_ATTRIBUTE_READONLY; 779 else 780 ni->std_fa |= FILE_ATTRIBUTE_READONLY; 781 } 782 783 if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE)) 784 ntfs_save_wsl_perm(inode, NULL); 785 mark_inode_dirty(inode); 786 out: 787 return err; 788 } 789 790 /* 791 * check_read_restriction: 792 * common code for ntfs_file_read_iter and ntfs_file_splice_read 793 */ 794 static int check_read_restriction(struct inode *inode) 795 { 796 struct ntfs_inode *ni = ntfs_i(inode); 797 798 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 799 return -EIO; 800 801 if (is_encrypted(ni)) { 802 ntfs_inode_warn(inode, "encrypted i/o not supported"); 803 return -EOPNOTSUPP; 804 } 805 806 #ifndef CONFIG_NTFS3_LZX_XPRESS 807 if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) { 808 ntfs_inode_warn( 809 inode, 810 "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files"); 811 return -EOPNOTSUPP; 812 } 813 #endif 814 815 if (is_dedup(ni)) { 816 ntfs_inode_warn(inode, "read deduplicated not supported"); 817 return -EOPNOTSUPP; 818 } 819 820 return 0; 821 } 822 823 /* 824 * ntfs_file_read_iter - file_operations::read_iter 825 */ 826 static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 827 { 828 struct file *file = iocb->ki_filp; 829 struct inode *inode = file_inode(file); 830 struct ntfs_inode *ni = ntfs_i(inode); 831 ssize_t err; 832 833 err = check_read_restriction(inode); 834 if (err) 835 return err; 836 837 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { 838 ntfs_inode_warn(inode, "direct i/o + compressed not supported"); 839 return -EOPNOTSUPP; 840 } 841 842 return generic_file_read_iter(iocb, iter); 843 } 844 845 /* 846 * ntfs_file_splice_read - file_operations::splice_read 847 */ 848 static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos, 849 struct pipe_inode_info *pipe, size_t len, 850 unsigned int flags) 851 { 852 struct inode *inode = file_inode(in); 853 ssize_t err; 854 855 err = check_read_restriction(inode); 856 if (err) 857 return err; 858 859 return filemap_splice_read(in, ppos, pipe, len, flags); 860 } 861 862 /* 863 * ntfs_get_frame_pages 864 * 865 * Return: Array of locked pages. 866 */ 867 static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index, 868 struct page **pages, u32 pages_per_frame, 869 bool *frame_uptodate) 870 { 871 gfp_t gfp_mask = mapping_gfp_mask(mapping); 872 u32 npages; 873 874 *frame_uptodate = true; 875 876 for (npages = 0; npages < pages_per_frame; npages++, index++) { 877 struct folio *folio; 878 879 folio = __filemap_get_folio(mapping, index, 880 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 881 gfp_mask); 882 if (IS_ERR(folio)) { 883 while (npages--) { 884 folio = page_folio(pages[npages]); 885 folio_unlock(folio); 886 folio_put(folio); 887 } 888 889 return -ENOMEM; 890 } 891 892 if (!folio_test_uptodate(folio)) 893 *frame_uptodate = false; 894 895 pages[npages] = &folio->page; 896 } 897 898 return 0; 899 } 900 901 /* 902 * ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files). 903 */ 904 static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) 905 { 906 int err; 907 struct file *file = iocb->ki_filp; 908 size_t count = iov_iter_count(from); 909 loff_t pos = iocb->ki_pos; 910 struct inode *inode = file_inode(file); 911 loff_t i_size = i_size_read(inode); 912 struct address_space *mapping = inode->i_mapping; 913 struct ntfs_inode *ni = ntfs_i(inode); 914 u64 valid = ni->i_valid; 915 struct ntfs_sb_info *sbi = ni->mi.sbi; 916 struct page *page, **pages = NULL; 917 size_t written = 0; 918 u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits; 919 u32 frame_size = 1u << frame_bits; 920 u32 pages_per_frame = frame_size >> PAGE_SHIFT; 921 u32 ip, off; 922 CLST frame; 923 u64 frame_vbo; 924 pgoff_t index; 925 bool frame_uptodate; 926 struct folio *folio; 927 928 if (frame_size < PAGE_SIZE) { 929 /* 930 * frame_size == 8K if cluster 512 931 * frame_size == 64K if cluster 4096 932 */ 933 ntfs_inode_warn(inode, "page size is bigger than frame size"); 934 return -EOPNOTSUPP; 935 } 936 937 pages = kmalloc_array(pages_per_frame, sizeof(struct page *), GFP_NOFS); 938 if (!pages) 939 return -ENOMEM; 940 941 err = file_remove_privs(file); 942 if (err) 943 goto out; 944 945 err = file_update_time(file); 946 if (err) 947 goto out; 948 949 /* Zero range [valid : pos). */ 950 while (valid < pos) { 951 CLST lcn, clen; 952 953 frame = valid >> frame_bits; 954 frame_vbo = valid & ~(frame_size - 1); 955 off = valid & (frame_size - 1); 956 957 err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 1, &lcn, 958 &clen, NULL, false); 959 if (err) 960 goto out; 961 962 if (lcn == SPARSE_LCN) { 963 ni->i_valid = valid = 964 frame_vbo + ((u64)clen << sbi->cluster_bits); 965 continue; 966 } 967 968 /* Load full frame. */ 969 err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT, 970 pages, pages_per_frame, 971 &frame_uptodate); 972 if (err) 973 goto out; 974 975 if (!frame_uptodate && off) { 976 err = ni_read_frame(ni, frame_vbo, pages, 977 pages_per_frame); 978 if (err) { 979 for (ip = 0; ip < pages_per_frame; ip++) { 980 page = pages[ip]; 981 folio = page_folio(page); 982 folio_unlock(folio); 983 folio_put(folio); 984 } 985 goto out; 986 } 987 } 988 989 ip = off >> PAGE_SHIFT; 990 off = offset_in_page(valid); 991 for (; ip < pages_per_frame; ip++, off = 0) { 992 page = pages[ip]; 993 folio = page_folio(page); 994 zero_user_segment(page, off, PAGE_SIZE); 995 flush_dcache_page(page); 996 folio_mark_uptodate(folio); 997 } 998 999 ni_lock(ni); 1000 err = ni_write_frame(ni, pages, pages_per_frame); 1001 ni_unlock(ni); 1002 1003 for (ip = 0; ip < pages_per_frame; ip++) { 1004 page = pages[ip]; 1005 folio = page_folio(page); 1006 folio_mark_uptodate(folio); 1007 folio_unlock(folio); 1008 folio_put(folio); 1009 } 1010 1011 if (err) 1012 goto out; 1013 1014 ni->i_valid = valid = frame_vbo + frame_size; 1015 } 1016 1017 /* Copy user data [pos : pos + count). */ 1018 while (count) { 1019 size_t copied, bytes; 1020 1021 off = pos & (frame_size - 1); 1022 bytes = frame_size - off; 1023 if (bytes > count) 1024 bytes = count; 1025 1026 frame_vbo = pos & ~(frame_size - 1); 1027 index = frame_vbo >> PAGE_SHIFT; 1028 1029 if (unlikely(fault_in_iov_iter_readable(from, bytes))) { 1030 err = -EFAULT; 1031 goto out; 1032 } 1033 1034 /* Load full frame. */ 1035 err = ntfs_get_frame_pages(mapping, index, pages, 1036 pages_per_frame, &frame_uptodate); 1037 if (err) 1038 goto out; 1039 1040 if (!frame_uptodate) { 1041 loff_t to = pos + bytes; 1042 1043 if (off || (to < i_size && (to & (frame_size - 1)))) { 1044 err = ni_read_frame(ni, frame_vbo, pages, 1045 pages_per_frame); 1046 if (err) { 1047 for (ip = 0; ip < pages_per_frame; 1048 ip++) { 1049 page = pages[ip]; 1050 folio = page_folio(page); 1051 folio_unlock(folio); 1052 folio_put(folio); 1053 } 1054 goto out; 1055 } 1056 } 1057 } 1058 1059 WARN_ON(!bytes); 1060 copied = 0; 1061 ip = off >> PAGE_SHIFT; 1062 off = offset_in_page(pos); 1063 1064 /* Copy user data to pages. */ 1065 for (;;) { 1066 size_t cp, tail = PAGE_SIZE - off; 1067 1068 page = pages[ip]; 1069 cp = copy_page_from_iter_atomic(page, off, 1070 min(tail, bytes), from); 1071 flush_dcache_page(page); 1072 1073 copied += cp; 1074 bytes -= cp; 1075 if (!bytes || !cp) 1076 break; 1077 1078 if (cp < tail) { 1079 off += cp; 1080 } else { 1081 ip++; 1082 off = 0; 1083 } 1084 } 1085 1086 ni_lock(ni); 1087 err = ni_write_frame(ni, pages, pages_per_frame); 1088 ni_unlock(ni); 1089 1090 for (ip = 0; ip < pages_per_frame; ip++) { 1091 page = pages[ip]; 1092 ClearPageDirty(page); 1093 folio = page_folio(page); 1094 folio_mark_uptodate(folio); 1095 folio_unlock(folio); 1096 folio_put(folio); 1097 } 1098 1099 if (err) 1100 goto out; 1101 1102 /* 1103 * We can loop for a long time in here. Be nice and allow 1104 * us to schedule out to avoid softlocking if preempt 1105 * is disabled. 1106 */ 1107 cond_resched(); 1108 1109 pos += copied; 1110 written += copied; 1111 1112 count = iov_iter_count(from); 1113 } 1114 1115 out: 1116 kfree(pages); 1117 1118 if (err < 0) 1119 return err; 1120 1121 iocb->ki_pos += written; 1122 if (iocb->ki_pos > ni->i_valid) 1123 ni->i_valid = iocb->ki_pos; 1124 if (iocb->ki_pos > i_size) 1125 i_size_write(inode, iocb->ki_pos); 1126 1127 return written; 1128 } 1129 1130 /* 1131 * check_write_restriction: 1132 * common code for ntfs_file_write_iter and ntfs_file_splice_write 1133 */ 1134 static int check_write_restriction(struct inode *inode) 1135 { 1136 struct ntfs_inode *ni = ntfs_i(inode); 1137 1138 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 1139 return -EIO; 1140 1141 if (is_encrypted(ni)) { 1142 ntfs_inode_warn(inode, "encrypted i/o not supported"); 1143 return -EOPNOTSUPP; 1144 } 1145 1146 if (is_dedup(ni)) { 1147 ntfs_inode_warn(inode, "write into deduplicated not supported"); 1148 return -EOPNOTSUPP; 1149 } 1150 1151 return 0; 1152 } 1153 1154 /* 1155 * ntfs_file_write_iter - file_operations::write_iter 1156 */ 1157 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 1158 { 1159 struct file *file = iocb->ki_filp; 1160 struct inode *inode = file_inode(file); 1161 struct ntfs_inode *ni = ntfs_i(inode); 1162 ssize_t ret; 1163 int err; 1164 1165 if (!inode_trylock(inode)) { 1166 if (iocb->ki_flags & IOCB_NOWAIT) 1167 return -EAGAIN; 1168 inode_lock(inode); 1169 } 1170 1171 ret = check_write_restriction(inode); 1172 if (ret) 1173 goto out; 1174 1175 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { 1176 ntfs_inode_warn(inode, "direct i/o + compressed not supported"); 1177 ret = -EOPNOTSUPP; 1178 goto out; 1179 } 1180 1181 ret = generic_write_checks(iocb, from); 1182 if (ret <= 0) 1183 goto out; 1184 1185 err = file_modified(iocb->ki_filp); 1186 if (err) { 1187 ret = err; 1188 goto out; 1189 } 1190 1191 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 1192 /* Should never be here, see ntfs_file_open(). */ 1193 ret = -EOPNOTSUPP; 1194 goto out; 1195 } 1196 1197 ret = ntfs_extend(inode, iocb->ki_pos, ret, file); 1198 if (ret) 1199 goto out; 1200 1201 ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) : 1202 __generic_file_write_iter(iocb, from); 1203 1204 out: 1205 inode_unlock(inode); 1206 1207 if (ret > 0) 1208 ret = generic_write_sync(iocb, ret); 1209 1210 return ret; 1211 } 1212 1213 /* 1214 * ntfs_file_open - file_operations::open 1215 */ 1216 int ntfs_file_open(struct inode *inode, struct file *file) 1217 { 1218 struct ntfs_inode *ni = ntfs_i(inode); 1219 1220 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 1221 return -EIO; 1222 1223 if (unlikely((is_compressed(ni) || is_encrypted(ni)) && 1224 (file->f_flags & O_DIRECT))) { 1225 return -EOPNOTSUPP; 1226 } 1227 1228 /* Decompress "external compressed" file if opened for rw. */ 1229 if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) && 1230 (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) { 1231 #ifdef CONFIG_NTFS3_LZX_XPRESS 1232 int err = ni_decompress_file(ni); 1233 1234 if (err) 1235 return err; 1236 #else 1237 ntfs_inode_warn( 1238 inode, 1239 "activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files"); 1240 return -EOPNOTSUPP; 1241 #endif 1242 } 1243 1244 return generic_file_open(inode, file); 1245 } 1246 1247 /* 1248 * ntfs_file_release - file_operations::release 1249 */ 1250 static int ntfs_file_release(struct inode *inode, struct file *file) 1251 { 1252 struct ntfs_inode *ni = ntfs_i(inode); 1253 struct ntfs_sb_info *sbi = ni->mi.sbi; 1254 int err = 0; 1255 1256 /* If we are last writer on the inode, drop the block reservation. */ 1257 if (sbi->options->prealloc && 1258 ((file->f_mode & FMODE_WRITE) && 1259 atomic_read(&inode->i_writecount) == 1) 1260 /* 1261 * The only file when inode->i_fop = &ntfs_file_operations and 1262 * init_rwsem(&ni->file.run_lock) is not called explicitly is MFT. 1263 * 1264 * Add additional check here. 1265 */ 1266 && inode->i_ino != MFT_REC_MFT) { 1267 ni_lock(ni); 1268 down_write(&ni->file.run_lock); 1269 1270 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, 1271 i_size_read(inode), &ni->i_valid, false, 1272 NULL); 1273 1274 up_write(&ni->file.run_lock); 1275 ni_unlock(ni); 1276 } 1277 return err; 1278 } 1279 1280 /* 1281 * ntfs_fiemap - inode_operations::fiemap 1282 */ 1283 int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1284 __u64 start, __u64 len) 1285 { 1286 int err; 1287 struct ntfs_inode *ni = ntfs_i(inode); 1288 1289 err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR); 1290 if (err) 1291 return err; 1292 1293 ni_lock(ni); 1294 1295 err = ni_fiemap(ni, fieinfo, start, len); 1296 1297 ni_unlock(ni); 1298 1299 return err; 1300 } 1301 1302 /* 1303 * ntfs_file_splice_write - file_operations::splice_write 1304 */ 1305 static ssize_t ntfs_file_splice_write(struct pipe_inode_info *pipe, 1306 struct file *file, loff_t *ppos, 1307 size_t len, unsigned int flags) 1308 { 1309 ssize_t err; 1310 struct inode *inode = file_inode(file); 1311 1312 err = check_write_restriction(inode); 1313 if (err) 1314 return err; 1315 1316 return iter_file_splice_write(pipe, file, ppos, len, flags); 1317 } 1318 1319 // clang-format off 1320 const struct inode_operations ntfs_file_inode_operations = { 1321 .getattr = ntfs_getattr, 1322 .setattr = ntfs_setattr, 1323 .listxattr = ntfs_listxattr, 1324 .get_acl = ntfs_get_acl, 1325 .set_acl = ntfs_set_acl, 1326 .fiemap = ntfs_fiemap, 1327 }; 1328 1329 const struct file_operations ntfs_file_operations = { 1330 .llseek = generic_file_llseek, 1331 .read_iter = ntfs_file_read_iter, 1332 .write_iter = ntfs_file_write_iter, 1333 .unlocked_ioctl = ntfs_ioctl, 1334 #ifdef CONFIG_COMPAT 1335 .compat_ioctl = ntfs_compat_ioctl, 1336 #endif 1337 .splice_read = ntfs_file_splice_read, 1338 .splice_write = ntfs_file_splice_write, 1339 .mmap = ntfs_file_mmap, 1340 .open = ntfs_file_open, 1341 .fsync = generic_file_fsync, 1342 .fallocate = ntfs_fallocate, 1343 .release = ntfs_file_release, 1344 }; 1345 1346 #if IS_ENABLED(CONFIG_NTFS_FS) 1347 const struct file_operations ntfs_legacy_file_operations = { 1348 .llseek = generic_file_llseek, 1349 .read_iter = ntfs_file_read_iter, 1350 .splice_read = ntfs_file_splice_read, 1351 .open = ntfs_file_open, 1352 .release = ntfs_file_release, 1353 }; 1354 #endif 1355 // clang-format on 1356