1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. 5 * 6 * Regular file handling primitives for NTFS-based filesystems. 7 * 8 */ 9 10 #include <linux/backing-dev.h> 11 #include <linux/blkdev.h> 12 #include <linux/buffer_head.h> 13 #include <linux/compat.h> 14 #include <linux/falloc.h> 15 #include <linux/fiemap.h> 16 #include <linux/fileattr.h> 17 18 #include "debug.h" 19 #include "ntfs.h" 20 #include "ntfs_fs.h" 21 22 static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg) 23 { 24 struct fstrim_range __user *user_range; 25 struct fstrim_range range; 26 struct block_device *dev; 27 int err; 28 29 if (!capable(CAP_SYS_ADMIN)) 30 return -EPERM; 31 32 dev = sbi->sb->s_bdev; 33 if (!bdev_max_discard_sectors(dev)) 34 return -EOPNOTSUPP; 35 36 user_range = (struct fstrim_range __user *)arg; 37 if (copy_from_user(&range, user_range, sizeof(range))) 38 return -EFAULT; 39 40 range.minlen = max_t(u32, range.minlen, bdev_discard_granularity(dev)); 41 42 err = ntfs_trim_fs(sbi, &range); 43 if (err < 0) 44 return err; 45 46 if (copy_to_user(user_range, &range, sizeof(range))) 47 return -EFAULT; 48 49 return 0; 50 } 51 52 /* 53 * ntfs_fileattr_get - inode_operations::fileattr_get 54 */ 55 int ntfs_fileattr_get(struct dentry *dentry, struct fileattr *fa) 56 { 57 struct inode *inode = d_inode(dentry); 58 struct ntfs_inode *ni = ntfs_i(inode); 59 u32 flags = 0; 60 61 if (inode->i_flags & S_IMMUTABLE) 62 flags |= FS_IMMUTABLE_FL; 63 64 if (inode->i_flags & S_APPEND) 65 flags |= FS_APPEND_FL; 66 67 if (is_compressed(ni)) 68 flags |= FS_COMPR_FL; 69 70 if (is_encrypted(ni)) 71 flags |= FS_ENCRYPT_FL; 72 73 fileattr_fill_flags(fa, flags); 74 75 return 0; 76 } 77 78 /* 79 * ntfs_fileattr_set - inode_operations::fileattr_set 80 */ 81 int ntfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, 82 struct fileattr *fa) 83 { 84 struct inode *inode = d_inode(dentry); 85 struct ntfs_inode *ni = ntfs_i(inode); 86 u32 flags = fa->flags; 87 unsigned int new_fl = 0; 88 89 if (fileattr_has_fsx(fa)) 90 return -EOPNOTSUPP; 91 92 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_COMPR_FL)) 93 return -EOPNOTSUPP; 94 95 if (flags & FS_IMMUTABLE_FL) 96 new_fl |= S_IMMUTABLE; 97 98 if (flags & FS_APPEND_FL) 99 new_fl |= S_APPEND; 100 101 /* Allowed to change compression for empty files and for directories only. */ 102 if (!is_dedup(ni) && !is_encrypted(ni) && 103 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { 104 int err = 0; 105 struct address_space *mapping = inode->i_mapping; 106 107 /* write out all data and wait. */ 108 filemap_invalidate_lock(mapping); 109 err = filemap_write_and_wait(mapping); 110 111 if (err >= 0) { 112 /* Change compress state. */ 113 bool compr = flags & FS_COMPR_FL; 114 err = ni_set_compress(inode, compr); 115 116 /* For files change a_ops too. */ 117 if (!err) 118 mapping->a_ops = compr ? &ntfs_aops_cmpr : 119 &ntfs_aops; 120 } 121 122 filemap_invalidate_unlock(mapping); 123 124 if (err) 125 return err; 126 } 127 128 inode_set_flags(inode, new_fl, S_IMMUTABLE | S_APPEND); 129 130 inode_set_ctime_current(inode); 131 mark_inode_dirty(inode); 132 133 return 0; 134 } 135 136 /* 137 * ntfs_ioctl - file_operations::unlocked_ioctl 138 */ 139 long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg) 140 { 141 struct inode *inode = file_inode(filp); 142 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 143 144 switch (cmd) { 145 case FITRIM: 146 return ntfs_ioctl_fitrim(sbi, arg); 147 } 148 return -ENOTTY; /* Inappropriate ioctl for device. */ 149 } 150 151 #ifdef CONFIG_COMPAT 152 long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg) 153 154 { 155 return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); 156 } 157 #endif 158 159 /* 160 * ntfs_getattr - inode_operations::getattr 161 */ 162 int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path, 163 struct kstat *stat, u32 request_mask, u32 flags) 164 { 165 struct inode *inode = d_inode(path->dentry); 166 struct ntfs_inode *ni = ntfs_i(inode); 167 168 stat->result_mask |= STATX_BTIME; 169 stat->btime = ni->i_crtime; 170 stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */ 171 172 if (inode->i_flags & S_IMMUTABLE) 173 stat->attributes |= STATX_ATTR_IMMUTABLE; 174 175 if (inode->i_flags & S_APPEND) 176 stat->attributes |= STATX_ATTR_APPEND; 177 178 if (is_compressed(ni)) 179 stat->attributes |= STATX_ATTR_COMPRESSED; 180 181 if (is_encrypted(ni)) 182 stat->attributes |= STATX_ATTR_ENCRYPTED; 183 184 stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED | 185 STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND; 186 187 generic_fillattr(idmap, request_mask, inode, stat); 188 189 return 0; 190 } 191 192 static int ntfs_extend_initialized_size(struct file *file, 193 struct ntfs_inode *ni, 194 const loff_t valid, 195 const loff_t new_valid) 196 { 197 struct inode *inode = &ni->vfs_inode; 198 struct address_space *mapping = inode->i_mapping; 199 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 200 loff_t pos = valid; 201 int err; 202 203 if (valid >= new_valid) 204 return 0; 205 206 if (is_resident(ni)) { 207 ni->i_valid = new_valid; 208 return 0; 209 } 210 211 WARN_ON(is_compressed(ni)); 212 213 for (;;) { 214 u32 zerofrom, len; 215 struct folio *folio; 216 u8 bits; 217 CLST vcn, lcn, clen; 218 219 if (is_sparsed(ni)) { 220 bits = sbi->cluster_bits; 221 vcn = pos >> bits; 222 223 err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL, 224 false); 225 if (err) 226 goto out; 227 228 if (lcn == SPARSE_LCN) { 229 pos = ((loff_t)clen + vcn) << bits; 230 ni->i_valid = pos; 231 goto next; 232 } 233 } 234 235 zerofrom = pos & (PAGE_SIZE - 1); 236 len = PAGE_SIZE - zerofrom; 237 238 if (pos + len > new_valid) 239 len = new_valid - pos; 240 241 err = ntfs_write_begin(file, mapping, pos, len, &folio, NULL); 242 if (err) 243 goto out; 244 245 folio_zero_range(folio, zerofrom, folio_size(folio) - zerofrom); 246 247 err = ntfs_write_end(file, mapping, pos, len, len, folio, NULL); 248 if (err < 0) 249 goto out; 250 pos += len; 251 252 next: 253 if (pos >= new_valid) 254 break; 255 256 balance_dirty_pages_ratelimited(mapping); 257 cond_resched(); 258 } 259 260 return 0; 261 262 out: 263 ni->i_valid = valid; 264 ntfs_inode_warn(inode, "failed to extend initialized size to %llx.", 265 new_valid); 266 return err; 267 } 268 269 /* 270 * ntfs_zero_range - Helper function for punch_hole. 271 * 272 * It zeroes a range [vbo, vbo_to). 273 */ 274 static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to) 275 { 276 int err = 0; 277 struct address_space *mapping = inode->i_mapping; 278 u32 blocksize = i_blocksize(inode); 279 pgoff_t idx = vbo >> PAGE_SHIFT; 280 u32 from = vbo & (PAGE_SIZE - 1); 281 pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT; 282 loff_t page_off; 283 struct buffer_head *head, *bh; 284 u32 bh_next, bh_off, to; 285 sector_t iblock; 286 struct folio *folio; 287 bool dirty = false; 288 289 for (; idx < idx_end; idx += 1, from = 0) { 290 page_off = (loff_t)idx << PAGE_SHIFT; 291 to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) : 292 PAGE_SIZE; 293 iblock = page_off >> inode->i_blkbits; 294 295 folio = __filemap_get_folio( 296 mapping, idx, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 297 mapping_gfp_constraint(mapping, ~__GFP_FS)); 298 if (IS_ERR(folio)) 299 return PTR_ERR(folio); 300 301 head = folio_buffers(folio); 302 if (!head) 303 head = create_empty_buffers(folio, blocksize, 0); 304 305 bh = head; 306 bh_off = 0; 307 do { 308 bh_next = bh_off + blocksize; 309 310 if (bh_next <= from || bh_off >= to) 311 continue; 312 313 if (!buffer_mapped(bh)) { 314 ntfs_get_block(inode, iblock, bh, 0); 315 /* Unmapped? It's a hole - nothing to do. */ 316 if (!buffer_mapped(bh)) 317 continue; 318 } 319 320 /* Ok, it's mapped. Make sure it's up-to-date. */ 321 if (folio_test_uptodate(folio)) 322 set_buffer_uptodate(bh); 323 else if (bh_read(bh, 0) < 0) { 324 err = -EIO; 325 folio_unlock(folio); 326 folio_put(folio); 327 goto out; 328 } 329 330 mark_buffer_dirty(bh); 331 } while (bh_off = bh_next, iblock += 1, 332 head != (bh = bh->b_this_page)); 333 334 folio_zero_segment(folio, from, to); 335 dirty = true; 336 337 folio_unlock(folio); 338 folio_put(folio); 339 cond_resched(); 340 } 341 out: 342 if (dirty) 343 mark_inode_dirty(inode); 344 return err; 345 } 346 347 /* 348 * ntfs_file_mmap - file_operations::mmap 349 */ 350 static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma) 351 { 352 struct inode *inode = file_inode(file); 353 struct ntfs_inode *ni = ntfs_i(inode); 354 u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT); 355 bool rw = vma->vm_flags & VM_WRITE; 356 int err; 357 358 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 359 return -EIO; 360 361 if (is_encrypted(ni)) { 362 ntfs_inode_warn(inode, "mmap encrypted not supported"); 363 return -EOPNOTSUPP; 364 } 365 366 if (is_dedup(ni)) { 367 ntfs_inode_warn(inode, "mmap deduplicated not supported"); 368 return -EOPNOTSUPP; 369 } 370 371 if (is_compressed(ni) && rw) { 372 ntfs_inode_warn(inode, "mmap(write) compressed not supported"); 373 return -EOPNOTSUPP; 374 } 375 376 if (rw) { 377 u64 to = min_t(loff_t, i_size_read(inode), 378 from + vma->vm_end - vma->vm_start); 379 380 if (is_sparsed(ni)) { 381 /* Allocate clusters for rw map. */ 382 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 383 CLST lcn, len; 384 CLST vcn = from >> sbi->cluster_bits; 385 CLST end = bytes_to_cluster(sbi, to); 386 bool new; 387 388 for (; vcn < end; vcn += len) { 389 err = attr_data_get_block(ni, vcn, 1, &lcn, 390 &len, &new, true); 391 if (err) 392 goto out; 393 } 394 } 395 396 if (ni->i_valid < to) { 397 inode_lock(inode); 398 err = ntfs_extend_initialized_size(file, ni, 399 ni->i_valid, to); 400 inode_unlock(inode); 401 if (err) 402 goto out; 403 } 404 } 405 406 err = generic_file_mmap(file, vma); 407 out: 408 return err; 409 } 410 411 static int ntfs_extend(struct inode *inode, loff_t pos, size_t count, 412 struct file *file) 413 { 414 struct ntfs_inode *ni = ntfs_i(inode); 415 struct address_space *mapping = inode->i_mapping; 416 loff_t end = pos + count; 417 bool extend_init = file && pos > ni->i_valid; 418 int err; 419 420 if (end <= inode->i_size && !extend_init) 421 return 0; 422 423 /* Mark rw ntfs as dirty. It will be cleared at umount. */ 424 ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY); 425 426 if (end > inode->i_size) { 427 err = ntfs_set_size(inode, end); 428 if (err) 429 goto out; 430 } 431 432 if (extend_init && !is_compressed(ni)) { 433 WARN_ON(ni->i_valid >= pos); 434 err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos); 435 if (err) 436 goto out; 437 } else { 438 err = 0; 439 } 440 441 if (file && is_sparsed(ni)) { 442 /* 443 * This code optimizes large writes to sparse file. 444 * TODO: merge this fragment with fallocate fragment. 445 */ 446 struct ntfs_sb_info *sbi = ni->mi.sbi; 447 CLST vcn = pos >> sbi->cluster_bits; 448 CLST cend = bytes_to_cluster(sbi, end); 449 CLST cend_v = bytes_to_cluster(sbi, ni->i_valid); 450 CLST lcn, clen; 451 bool new; 452 453 if (cend_v > cend) 454 cend_v = cend; 455 456 /* 457 * Allocate and zero new clusters. 458 * Zeroing these clusters may be too long. 459 */ 460 for (; vcn < cend_v; vcn += clen) { 461 err = attr_data_get_block(ni, vcn, cend_v - vcn, &lcn, 462 &clen, &new, true); 463 if (err) 464 goto out; 465 } 466 /* 467 * Allocate but not zero new clusters. 468 */ 469 for (; vcn < cend; vcn += clen) { 470 err = attr_data_get_block(ni, vcn, cend - vcn, &lcn, 471 &clen, &new, false); 472 if (err) 473 goto out; 474 } 475 } 476 477 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 478 mark_inode_dirty(inode); 479 480 if (IS_SYNC(inode)) { 481 int err2; 482 483 err = filemap_fdatawrite_range(mapping, pos, end - 1); 484 err2 = sync_mapping_buffers(mapping); 485 if (!err) 486 err = err2; 487 err2 = write_inode_now(inode, 1); 488 if (!err) 489 err = err2; 490 if (!err) 491 err = filemap_fdatawait_range(mapping, pos, end - 1); 492 } 493 494 out: 495 return err; 496 } 497 498 static int ntfs_truncate(struct inode *inode, loff_t new_size) 499 { 500 struct super_block *sb = inode->i_sb; 501 struct ntfs_inode *ni = ntfs_i(inode); 502 int err, dirty = 0; 503 u64 new_valid; 504 505 if (!S_ISREG(inode->i_mode)) 506 return 0; 507 508 if (is_compressed(ni)) { 509 if (ni->i_valid > new_size) 510 ni->i_valid = new_size; 511 } else { 512 err = block_truncate_page(inode->i_mapping, new_size, 513 ntfs_get_block); 514 if (err) 515 return err; 516 } 517 518 new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size)); 519 520 truncate_setsize(inode, new_size); 521 522 ni_lock(ni); 523 524 down_write(&ni->file.run_lock); 525 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size, 526 &new_valid, ni->mi.sbi->options->prealloc, NULL); 527 up_write(&ni->file.run_lock); 528 529 if (new_valid < ni->i_valid) 530 ni->i_valid = new_valid; 531 532 ni_unlock(ni); 533 534 ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE; 535 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 536 if (!IS_DIRSYNC(inode)) { 537 dirty = 1; 538 } else { 539 err = ntfs_sync_inode(inode); 540 if (err) 541 return err; 542 } 543 544 if (dirty) 545 mark_inode_dirty(inode); 546 547 /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/ 548 549 return 0; 550 } 551 552 /* 553 * ntfs_fallocate - file_operations::ntfs_fallocate 554 * 555 * Preallocate space for a file. This implements ntfs's fallocate file 556 * operation, which gets called from sys_fallocate system call. User 557 * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set 558 * we just allocate clusters without zeroing them out. Otherwise we 559 * allocate and zero out clusters via an expanding truncate. 560 */ 561 static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) 562 { 563 struct inode *inode = file_inode(file); 564 struct address_space *mapping = inode->i_mapping; 565 struct super_block *sb = inode->i_sb; 566 struct ntfs_sb_info *sbi = sb->s_fs_info; 567 struct ntfs_inode *ni = ntfs_i(inode); 568 loff_t end = vbo + len; 569 loff_t vbo_down = round_down(vbo, max_t(unsigned long, 570 sbi->cluster_size, PAGE_SIZE)); 571 bool is_supported_holes = is_sparsed(ni) || is_compressed(ni); 572 loff_t i_size, new_size; 573 bool map_locked; 574 int err; 575 576 /* No support for dir. */ 577 if (!S_ISREG(inode->i_mode)) 578 return -EOPNOTSUPP; 579 580 /* 581 * vfs_fallocate checks all possible combinations of mode. 582 * Do additional checks here before ntfs_set_state(dirty). 583 */ 584 if (mode & FALLOC_FL_PUNCH_HOLE) { 585 if (!is_supported_holes) 586 return -EOPNOTSUPP; 587 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 588 } else if (mode & FALLOC_FL_INSERT_RANGE) { 589 if (!is_supported_holes) 590 return -EOPNOTSUPP; 591 } else if (mode & 592 ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 593 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)) { 594 ntfs_inode_warn(inode, "fallocate(0x%x) is not supported", 595 mode); 596 return -EOPNOTSUPP; 597 } 598 599 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY); 600 601 inode_lock(inode); 602 i_size = inode->i_size; 603 new_size = max(end, i_size); 604 map_locked = false; 605 606 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 607 /* Should never be here, see ntfs_file_open. */ 608 err = -EOPNOTSUPP; 609 goto out; 610 } 611 612 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | 613 FALLOC_FL_INSERT_RANGE)) { 614 inode_dio_wait(inode); 615 filemap_invalidate_lock(mapping); 616 map_locked = true; 617 } 618 619 if (mode & FALLOC_FL_PUNCH_HOLE) { 620 u32 frame_size; 621 loff_t mask, vbo_a, end_a, tmp; 622 623 err = filemap_write_and_wait_range(mapping, vbo_down, 624 LLONG_MAX); 625 if (err) 626 goto out; 627 628 truncate_pagecache(inode, vbo_down); 629 630 ni_lock(ni); 631 err = attr_punch_hole(ni, vbo, len, &frame_size); 632 ni_unlock(ni); 633 if (!err) 634 goto ok; 635 636 if (err != E_NTFS_NOTALIGNED) 637 goto out; 638 639 /* Process not aligned punch. */ 640 err = 0; 641 mask = frame_size - 1; 642 vbo_a = (vbo + mask) & ~mask; 643 end_a = end & ~mask; 644 645 tmp = min(vbo_a, end); 646 if (tmp > vbo) { 647 err = ntfs_zero_range(inode, vbo, tmp); 648 if (err) 649 goto out; 650 } 651 652 if (vbo < end_a && end_a < end) { 653 err = ntfs_zero_range(inode, end_a, end); 654 if (err) 655 goto out; 656 } 657 658 /* Aligned punch_hole */ 659 if (end_a > vbo_a) { 660 ni_lock(ni); 661 err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL); 662 ni_unlock(ni); 663 if (err) 664 goto out; 665 } 666 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 667 /* 668 * Write tail of the last page before removed range since 669 * it will get removed from the page cache below. 670 */ 671 err = filemap_write_and_wait_range(mapping, vbo_down, vbo); 672 if (err) 673 goto out; 674 675 /* 676 * Write data that will be shifted to preserve them 677 * when discarding page cache below. 678 */ 679 err = filemap_write_and_wait_range(mapping, end, LLONG_MAX); 680 if (err) 681 goto out; 682 683 truncate_pagecache(inode, vbo_down); 684 685 ni_lock(ni); 686 err = attr_collapse_range(ni, vbo, len); 687 ni_unlock(ni); 688 if (err) 689 goto out; 690 } else if (mode & FALLOC_FL_INSERT_RANGE) { 691 /* Check new size. */ 692 err = inode_newsize_ok(inode, new_size); 693 if (err) 694 goto out; 695 696 /* Write out all dirty pages. */ 697 err = filemap_write_and_wait_range(mapping, vbo_down, 698 LLONG_MAX); 699 if (err) 700 goto out; 701 truncate_pagecache(inode, vbo_down); 702 703 ni_lock(ni); 704 err = attr_insert_range(ni, vbo, len); 705 ni_unlock(ni); 706 if (err) 707 goto out; 708 } else { 709 /* Check new size. */ 710 u8 cluster_bits = sbi->cluster_bits; 711 712 /* Be sure file is non resident. */ 713 if (is_resident(ni)) { 714 ni_lock(ni); 715 err = attr_force_nonresident(ni); 716 ni_unlock(ni); 717 if (err) 718 goto out; 719 } 720 721 /* generic/213: expected -ENOSPC instead of -EFBIG. */ 722 if (!is_supported_holes) { 723 loff_t to_alloc = new_size - inode_get_bytes(inode); 724 725 if (to_alloc > 0 && 726 (to_alloc >> cluster_bits) > 727 wnd_zeroes(&sbi->used.bitmap)) { 728 err = -ENOSPC; 729 goto out; 730 } 731 } 732 733 err = inode_newsize_ok(inode, new_size); 734 if (err) 735 goto out; 736 737 if (new_size > i_size) { 738 /* 739 * Allocate clusters, do not change 'valid' size. 740 */ 741 err = ntfs_set_size(inode, new_size); 742 if (err) 743 goto out; 744 } 745 746 if (is_supported_holes) { 747 CLST vcn = vbo >> cluster_bits; 748 CLST cend = bytes_to_cluster(sbi, end); 749 CLST cend_v = bytes_to_cluster(sbi, ni->i_valid); 750 CLST lcn, clen; 751 bool new; 752 753 if (cend_v > cend) 754 cend_v = cend; 755 756 /* 757 * Allocate and zero new clusters. 758 * Zeroing these clusters may be too long. 759 */ 760 for (; vcn < cend_v; vcn += clen) { 761 err = attr_data_get_block(ni, vcn, cend_v - vcn, 762 &lcn, &clen, &new, 763 true); 764 if (err) 765 goto out; 766 } 767 /* 768 * Allocate but not zero new clusters. 769 */ 770 for (; vcn < cend; vcn += clen) { 771 err = attr_data_get_block(ni, vcn, cend - vcn, 772 &lcn, &clen, &new, 773 false); 774 if (err) 775 goto out; 776 } 777 } 778 779 if (mode & FALLOC_FL_KEEP_SIZE) { 780 ni_lock(ni); 781 /* True - Keep preallocated. */ 782 err = attr_set_size(ni, ATTR_DATA, NULL, 0, 783 &ni->file.run, i_size, &ni->i_valid, 784 true, NULL); 785 ni_unlock(ni); 786 if (err) 787 goto out; 788 } else if (new_size > i_size) { 789 i_size_write(inode, new_size); 790 } 791 } 792 793 ok: 794 err = file_modified(file); 795 if (err) 796 goto out; 797 798 out: 799 if (map_locked) 800 filemap_invalidate_unlock(mapping); 801 802 if (!err) { 803 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 804 mark_inode_dirty(inode); 805 } 806 807 inode_unlock(inode); 808 return err; 809 } 810 811 /* 812 * ntfs_setattr - inode_operations::setattr 813 */ 814 int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 815 struct iattr *attr) 816 { 817 struct inode *inode = d_inode(dentry); 818 struct ntfs_inode *ni = ntfs_i(inode); 819 u32 ia_valid = attr->ia_valid; 820 umode_t mode = inode->i_mode; 821 int err; 822 823 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 824 return -EIO; 825 826 err = setattr_prepare(idmap, dentry, attr); 827 if (err) 828 goto out; 829 830 if (ia_valid & ATTR_SIZE) { 831 loff_t newsize, oldsize; 832 833 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 834 /* Should never be here, see ntfs_file_open(). */ 835 err = -EOPNOTSUPP; 836 goto out; 837 } 838 inode_dio_wait(inode); 839 oldsize = i_size_read(inode); 840 newsize = attr->ia_size; 841 842 if (newsize <= oldsize) 843 err = ntfs_truncate(inode, newsize); 844 else 845 err = ntfs_extend(inode, newsize, 0, NULL); 846 847 if (err) 848 goto out; 849 850 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 851 i_size_write(inode, newsize); 852 } 853 854 setattr_copy(idmap, inode, attr); 855 856 if (mode != inode->i_mode) { 857 err = ntfs_acl_chmod(idmap, dentry); 858 if (err) 859 goto out; 860 861 /* Linux 'w' -> Windows 'ro'. */ 862 if (0222 & inode->i_mode) 863 ni->std_fa &= ~FILE_ATTRIBUTE_READONLY; 864 else 865 ni->std_fa |= FILE_ATTRIBUTE_READONLY; 866 } 867 868 if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE)) 869 ntfs_save_wsl_perm(inode, NULL); 870 mark_inode_dirty(inode); 871 out: 872 return err; 873 } 874 875 /* 876 * check_read_restriction: 877 * common code for ntfs_file_read_iter and ntfs_file_splice_read 878 */ 879 static int check_read_restriction(struct inode *inode) 880 { 881 struct ntfs_inode *ni = ntfs_i(inode); 882 883 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 884 return -EIO; 885 886 if (is_encrypted(ni)) { 887 ntfs_inode_warn(inode, "encrypted i/o not supported"); 888 return -EOPNOTSUPP; 889 } 890 891 #ifndef CONFIG_NTFS3_LZX_XPRESS 892 if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) { 893 ntfs_inode_warn( 894 inode, 895 "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files"); 896 return -EOPNOTSUPP; 897 } 898 #endif 899 900 if (is_dedup(ni)) { 901 ntfs_inode_warn(inode, "read deduplicated not supported"); 902 return -EOPNOTSUPP; 903 } 904 905 return 0; 906 } 907 908 /* 909 * ntfs_file_read_iter - file_operations::read_iter 910 */ 911 static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 912 { 913 struct file *file = iocb->ki_filp; 914 struct inode *inode = file_inode(file); 915 struct ntfs_inode *ni = ntfs_i(inode); 916 ssize_t err; 917 918 err = check_read_restriction(inode); 919 if (err) 920 return err; 921 922 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { 923 ntfs_inode_warn(inode, "direct i/o + compressed not supported"); 924 return -EOPNOTSUPP; 925 } 926 927 return generic_file_read_iter(iocb, iter); 928 } 929 930 /* 931 * ntfs_file_splice_read - file_operations::splice_read 932 */ 933 static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos, 934 struct pipe_inode_info *pipe, size_t len, 935 unsigned int flags) 936 { 937 struct inode *inode = file_inode(in); 938 ssize_t err; 939 940 err = check_read_restriction(inode); 941 if (err) 942 return err; 943 944 return filemap_splice_read(in, ppos, pipe, len, flags); 945 } 946 947 /* 948 * ntfs_get_frame_pages 949 * 950 * Return: Array of locked pages. 951 */ 952 static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index, 953 struct page **pages, u32 pages_per_frame, 954 bool *frame_uptodate) 955 { 956 gfp_t gfp_mask = mapping_gfp_mask(mapping); 957 u32 npages; 958 959 *frame_uptodate = true; 960 961 for (npages = 0; npages < pages_per_frame; npages++, index++) { 962 struct folio *folio; 963 964 folio = __filemap_get_folio(mapping, index, 965 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 966 gfp_mask); 967 if (IS_ERR(folio)) { 968 while (npages--) { 969 folio = page_folio(pages[npages]); 970 folio_unlock(folio); 971 folio_put(folio); 972 } 973 974 return -ENOMEM; 975 } 976 977 if (!folio_test_uptodate(folio)) 978 *frame_uptodate = false; 979 980 pages[npages] = &folio->page; 981 } 982 983 return 0; 984 } 985 986 /* 987 * ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files). 988 */ 989 static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) 990 { 991 int err; 992 struct file *file = iocb->ki_filp; 993 size_t count = iov_iter_count(from); 994 loff_t pos = iocb->ki_pos; 995 struct inode *inode = file_inode(file); 996 loff_t i_size = i_size_read(inode); 997 struct address_space *mapping = inode->i_mapping; 998 struct ntfs_inode *ni = ntfs_i(inode); 999 u64 valid = ni->i_valid; 1000 struct ntfs_sb_info *sbi = ni->mi.sbi; 1001 struct page *page, **pages = NULL; 1002 size_t written = 0; 1003 u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits; 1004 u32 frame_size = 1u << frame_bits; 1005 u32 pages_per_frame = frame_size >> PAGE_SHIFT; 1006 u32 ip, off; 1007 CLST frame; 1008 u64 frame_vbo; 1009 pgoff_t index; 1010 bool frame_uptodate; 1011 struct folio *folio; 1012 1013 if (frame_size < PAGE_SIZE) { 1014 /* 1015 * frame_size == 8K if cluster 512 1016 * frame_size == 64K if cluster 4096 1017 */ 1018 ntfs_inode_warn(inode, "page size is bigger than frame size"); 1019 return -EOPNOTSUPP; 1020 } 1021 1022 pages = kmalloc_array(pages_per_frame, sizeof(struct page *), GFP_NOFS); 1023 if (!pages) 1024 return -ENOMEM; 1025 1026 err = file_remove_privs(file); 1027 if (err) 1028 goto out; 1029 1030 err = file_update_time(file); 1031 if (err) 1032 goto out; 1033 1034 /* Zero range [valid : pos). */ 1035 while (valid < pos) { 1036 CLST lcn, clen; 1037 1038 frame = valid >> frame_bits; 1039 frame_vbo = valid & ~(frame_size - 1); 1040 off = valid & (frame_size - 1); 1041 1042 err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 1, &lcn, 1043 &clen, NULL, false); 1044 if (err) 1045 goto out; 1046 1047 if (lcn == SPARSE_LCN) { 1048 ni->i_valid = valid = 1049 frame_vbo + ((u64)clen << sbi->cluster_bits); 1050 continue; 1051 } 1052 1053 /* Load full frame. */ 1054 err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT, 1055 pages, pages_per_frame, 1056 &frame_uptodate); 1057 if (err) 1058 goto out; 1059 1060 if (!frame_uptodate && off) { 1061 err = ni_read_frame(ni, frame_vbo, pages, 1062 pages_per_frame); 1063 if (err) { 1064 for (ip = 0; ip < pages_per_frame; ip++) { 1065 page = pages[ip]; 1066 folio = page_folio(page); 1067 folio_unlock(folio); 1068 folio_put(folio); 1069 } 1070 goto out; 1071 } 1072 } 1073 1074 ip = off >> PAGE_SHIFT; 1075 off = offset_in_page(valid); 1076 for (; ip < pages_per_frame; ip++, off = 0) { 1077 page = pages[ip]; 1078 folio = page_folio(page); 1079 zero_user_segment(page, off, PAGE_SIZE); 1080 flush_dcache_page(page); 1081 folio_mark_uptodate(folio); 1082 } 1083 1084 ni_lock(ni); 1085 err = ni_write_frame(ni, pages, pages_per_frame); 1086 ni_unlock(ni); 1087 1088 for (ip = 0; ip < pages_per_frame; ip++) { 1089 page = pages[ip]; 1090 folio = page_folio(page); 1091 folio_mark_uptodate(folio); 1092 folio_unlock(folio); 1093 folio_put(folio); 1094 } 1095 1096 if (err) 1097 goto out; 1098 1099 ni->i_valid = valid = frame_vbo + frame_size; 1100 } 1101 1102 /* Copy user data [pos : pos + count). */ 1103 while (count) { 1104 size_t copied, bytes; 1105 1106 off = pos & (frame_size - 1); 1107 bytes = frame_size - off; 1108 if (bytes > count) 1109 bytes = count; 1110 1111 frame_vbo = pos & ~(frame_size - 1); 1112 index = frame_vbo >> PAGE_SHIFT; 1113 1114 if (unlikely(fault_in_iov_iter_readable(from, bytes))) { 1115 err = -EFAULT; 1116 goto out; 1117 } 1118 1119 /* Load full frame. */ 1120 err = ntfs_get_frame_pages(mapping, index, pages, 1121 pages_per_frame, &frame_uptodate); 1122 if (err) 1123 goto out; 1124 1125 if (!frame_uptodate) { 1126 loff_t to = pos + bytes; 1127 1128 if (off || (to < i_size && (to & (frame_size - 1)))) { 1129 err = ni_read_frame(ni, frame_vbo, pages, 1130 pages_per_frame); 1131 if (err) { 1132 for (ip = 0; ip < pages_per_frame; 1133 ip++) { 1134 page = pages[ip]; 1135 folio = page_folio(page); 1136 folio_unlock(folio); 1137 folio_put(folio); 1138 } 1139 goto out; 1140 } 1141 } 1142 } 1143 1144 WARN_ON(!bytes); 1145 copied = 0; 1146 ip = off >> PAGE_SHIFT; 1147 off = offset_in_page(pos); 1148 1149 /* Copy user data to pages. */ 1150 for (;;) { 1151 size_t cp, tail = PAGE_SIZE - off; 1152 1153 page = pages[ip]; 1154 cp = copy_page_from_iter_atomic(page, off, 1155 min(tail, bytes), from); 1156 flush_dcache_page(page); 1157 1158 copied += cp; 1159 bytes -= cp; 1160 if (!bytes || !cp) 1161 break; 1162 1163 if (cp < tail) { 1164 off += cp; 1165 } else { 1166 ip++; 1167 off = 0; 1168 } 1169 } 1170 1171 ni_lock(ni); 1172 err = ni_write_frame(ni, pages, pages_per_frame); 1173 ni_unlock(ni); 1174 1175 for (ip = 0; ip < pages_per_frame; ip++) { 1176 page = pages[ip]; 1177 ClearPageDirty(page); 1178 folio = page_folio(page); 1179 folio_mark_uptodate(folio); 1180 folio_unlock(folio); 1181 folio_put(folio); 1182 } 1183 1184 if (err) 1185 goto out; 1186 1187 /* 1188 * We can loop for a long time in here. Be nice and allow 1189 * us to schedule out to avoid softlocking if preempt 1190 * is disabled. 1191 */ 1192 cond_resched(); 1193 1194 pos += copied; 1195 written += copied; 1196 1197 count = iov_iter_count(from); 1198 } 1199 1200 out: 1201 kfree(pages); 1202 1203 if (err < 0) 1204 return err; 1205 1206 iocb->ki_pos += written; 1207 if (iocb->ki_pos > ni->i_valid) 1208 ni->i_valid = iocb->ki_pos; 1209 if (iocb->ki_pos > i_size) 1210 i_size_write(inode, iocb->ki_pos); 1211 1212 return written; 1213 } 1214 1215 /* 1216 * check_write_restriction: 1217 * common code for ntfs_file_write_iter and ntfs_file_splice_write 1218 */ 1219 static int check_write_restriction(struct inode *inode) 1220 { 1221 struct ntfs_inode *ni = ntfs_i(inode); 1222 1223 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 1224 return -EIO; 1225 1226 if (is_encrypted(ni)) { 1227 ntfs_inode_warn(inode, "encrypted i/o not supported"); 1228 return -EOPNOTSUPP; 1229 } 1230 1231 if (is_dedup(ni)) { 1232 ntfs_inode_warn(inode, "write into deduplicated not supported"); 1233 return -EOPNOTSUPP; 1234 } 1235 1236 return 0; 1237 } 1238 1239 /* 1240 * ntfs_file_write_iter - file_operations::write_iter 1241 */ 1242 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 1243 { 1244 struct file *file = iocb->ki_filp; 1245 struct inode *inode = file_inode(file); 1246 struct ntfs_inode *ni = ntfs_i(inode); 1247 ssize_t ret; 1248 int err; 1249 1250 if (!inode_trylock(inode)) { 1251 if (iocb->ki_flags & IOCB_NOWAIT) 1252 return -EAGAIN; 1253 inode_lock(inode); 1254 } 1255 1256 ret = check_write_restriction(inode); 1257 if (ret) 1258 goto out; 1259 1260 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { 1261 ntfs_inode_warn(inode, "direct i/o + compressed not supported"); 1262 ret = -EOPNOTSUPP; 1263 goto out; 1264 } 1265 1266 ret = generic_write_checks(iocb, from); 1267 if (ret <= 0) 1268 goto out; 1269 1270 err = file_modified(iocb->ki_filp); 1271 if (err) { 1272 ret = err; 1273 goto out; 1274 } 1275 1276 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 1277 /* Should never be here, see ntfs_file_open(). */ 1278 ret = -EOPNOTSUPP; 1279 goto out; 1280 } 1281 1282 ret = ntfs_extend(inode, iocb->ki_pos, ret, file); 1283 if (ret) 1284 goto out; 1285 1286 ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) : 1287 __generic_file_write_iter(iocb, from); 1288 1289 out: 1290 inode_unlock(inode); 1291 1292 if (ret > 0) 1293 ret = generic_write_sync(iocb, ret); 1294 1295 return ret; 1296 } 1297 1298 /* 1299 * ntfs_file_open - file_operations::open 1300 */ 1301 int ntfs_file_open(struct inode *inode, struct file *file) 1302 { 1303 struct ntfs_inode *ni = ntfs_i(inode); 1304 1305 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 1306 return -EIO; 1307 1308 if (unlikely((is_compressed(ni) || is_encrypted(ni)) && 1309 (file->f_flags & O_DIRECT))) { 1310 return -EOPNOTSUPP; 1311 } 1312 1313 /* Decompress "external compressed" file if opened for rw. */ 1314 if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) && 1315 (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) { 1316 #ifdef CONFIG_NTFS3_LZX_XPRESS 1317 int err = ni_decompress_file(ni); 1318 1319 if (err) 1320 return err; 1321 #else 1322 ntfs_inode_warn( 1323 inode, 1324 "activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files"); 1325 return -EOPNOTSUPP; 1326 #endif 1327 } 1328 1329 return generic_file_open(inode, file); 1330 } 1331 1332 /* 1333 * ntfs_file_release - file_operations::release 1334 */ 1335 static int ntfs_file_release(struct inode *inode, struct file *file) 1336 { 1337 struct ntfs_inode *ni = ntfs_i(inode); 1338 struct ntfs_sb_info *sbi = ni->mi.sbi; 1339 int err = 0; 1340 1341 /* If we are last writer on the inode, drop the block reservation. */ 1342 if (sbi->options->prealloc && 1343 ((file->f_mode & FMODE_WRITE) && 1344 atomic_read(&inode->i_writecount) == 1) 1345 /* 1346 * The only file when inode->i_fop = &ntfs_file_operations and 1347 * init_rwsem(&ni->file.run_lock) is not called explicitly is MFT. 1348 * 1349 * Add additional check here. 1350 */ 1351 && inode->i_ino != MFT_REC_MFT) { 1352 ni_lock(ni); 1353 down_write(&ni->file.run_lock); 1354 1355 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, 1356 i_size_read(inode), &ni->i_valid, false, 1357 NULL); 1358 1359 up_write(&ni->file.run_lock); 1360 ni_unlock(ni); 1361 } 1362 return err; 1363 } 1364 1365 /* 1366 * ntfs_fiemap - inode_operations::fiemap 1367 */ 1368 int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1369 __u64 start, __u64 len) 1370 { 1371 int err; 1372 struct ntfs_inode *ni = ntfs_i(inode); 1373 1374 err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR); 1375 if (err) 1376 return err; 1377 1378 ni_lock(ni); 1379 1380 err = ni_fiemap(ni, fieinfo, start, len); 1381 1382 ni_unlock(ni); 1383 1384 return err; 1385 } 1386 1387 /* 1388 * ntfs_file_splice_write - file_operations::splice_write 1389 */ 1390 static ssize_t ntfs_file_splice_write(struct pipe_inode_info *pipe, 1391 struct file *file, loff_t *ppos, 1392 size_t len, unsigned int flags) 1393 { 1394 ssize_t err; 1395 struct inode *inode = file_inode(file); 1396 1397 err = check_write_restriction(inode); 1398 if (err) 1399 return err; 1400 1401 return iter_file_splice_write(pipe, file, ppos, len, flags); 1402 } 1403 1404 // clang-format off 1405 const struct inode_operations ntfs_file_inode_operations = { 1406 .getattr = ntfs_getattr, 1407 .setattr = ntfs_setattr, 1408 .listxattr = ntfs_listxattr, 1409 .get_acl = ntfs_get_acl, 1410 .set_acl = ntfs_set_acl, 1411 .fiemap = ntfs_fiemap, 1412 .fileattr_get = ntfs_fileattr_get, 1413 .fileattr_set = ntfs_fileattr_set, 1414 }; 1415 1416 const struct file_operations ntfs_file_operations = { 1417 .llseek = generic_file_llseek, 1418 .read_iter = ntfs_file_read_iter, 1419 .write_iter = ntfs_file_write_iter, 1420 .unlocked_ioctl = ntfs_ioctl, 1421 #ifdef CONFIG_COMPAT 1422 .compat_ioctl = ntfs_compat_ioctl, 1423 #endif 1424 .splice_read = ntfs_file_splice_read, 1425 .splice_write = ntfs_file_splice_write, 1426 .mmap = ntfs_file_mmap, 1427 .open = ntfs_file_open, 1428 .fsync = generic_file_fsync, 1429 .fallocate = ntfs_fallocate, 1430 .release = ntfs_file_release, 1431 }; 1432 1433 #if IS_ENABLED(CONFIG_NTFS_FS) 1434 const struct file_operations ntfs_legacy_file_operations = { 1435 .llseek = generic_file_llseek, 1436 .read_iter = ntfs_file_read_iter, 1437 .splice_read = ntfs_file_splice_read, 1438 .open = ntfs_file_open, 1439 .release = ntfs_file_release, 1440 }; 1441 #endif 1442 // clang-format on 1443