1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. 5 * 6 * Regular file handling primitives for NTFS-based filesystems. 7 * 8 */ 9 10 #include <linux/backing-dev.h> 11 #include <linux/blkdev.h> 12 #include <linux/buffer_head.h> 13 #include <linux/compat.h> 14 #include <linux/falloc.h> 15 #include <linux/fiemap.h> 16 #include <linux/fileattr.h> 17 18 #include "debug.h" 19 #include "ntfs.h" 20 #include "ntfs_fs.h" 21 22 /* 23 * cifx, btrfs, exfat, ext4, f2fs use this constant. 24 * Hope this value will become common to all fs. 25 */ 26 #define NTFS3_IOC_SHUTDOWN _IOR('X', 125, __u32) 27 28 static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg) 29 { 30 struct fstrim_range __user *user_range; 31 struct fstrim_range range; 32 struct block_device *dev; 33 int err; 34 35 if (!capable(CAP_SYS_ADMIN)) 36 return -EPERM; 37 38 dev = sbi->sb->s_bdev; 39 if (!bdev_max_discard_sectors(dev)) 40 return -EOPNOTSUPP; 41 42 user_range = (struct fstrim_range __user *)arg; 43 if (copy_from_user(&range, user_range, sizeof(range))) 44 return -EFAULT; 45 46 range.minlen = max_t(u32, range.minlen, bdev_discard_granularity(dev)); 47 48 err = ntfs_trim_fs(sbi, &range); 49 if (err < 0) 50 return err; 51 52 if (copy_to_user(user_range, &range, sizeof(range))) 53 return -EFAULT; 54 55 return 0; 56 } 57 58 static int ntfs_ioctl_get_volume_label(struct ntfs_sb_info *sbi, u8 __user *buf) 59 { 60 if (copy_to_user(buf, sbi->volume.label, FSLABEL_MAX)) 61 return -EFAULT; 62 63 return 0; 64 } 65 66 static int ntfs_ioctl_set_volume_label(struct ntfs_sb_info *sbi, u8 __user *buf) 67 { 68 u8 user[FSLABEL_MAX] = { 0 }; 69 int len; 70 71 if (!capable(CAP_SYS_ADMIN)) 72 return -EPERM; 73 74 if (copy_from_user(user, buf, FSLABEL_MAX)) 75 return -EFAULT; 76 77 len = strnlen(user, FSLABEL_MAX); 78 79 return ntfs_set_label(sbi, user, len); 80 } 81 82 /* 83 * ntfs_force_shutdown - helper function. Called from ioctl 84 */ 85 static int ntfs_force_shutdown(struct super_block *sb, u32 flags) 86 { 87 int err; 88 struct ntfs_sb_info *sbi = sb->s_fs_info; 89 90 if (unlikely(ntfs3_forced_shutdown(sb))) 91 return 0; 92 93 /* No additional options yet (flags). */ 94 err = bdev_freeze(sb->s_bdev); 95 if (err) 96 return err; 97 set_bit(NTFS_FLAGS_SHUTDOWN_BIT, &sbi->flags); 98 bdev_thaw(sb->s_bdev); 99 return 0; 100 } 101 102 static int ntfs_ioctl_shutdown(struct super_block *sb, unsigned long arg) 103 { 104 u32 flags; 105 106 if (!capable(CAP_SYS_ADMIN)) 107 return -EPERM; 108 109 if (get_user(flags, (__u32 __user *)arg)) 110 return -EFAULT; 111 112 return ntfs_force_shutdown(sb, flags); 113 } 114 115 /* 116 * ntfs_ioctl - file_operations::unlocked_ioctl 117 */ 118 long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg) 119 { 120 struct inode *inode = file_inode(filp); 121 struct super_block *sb = inode->i_sb; 122 struct ntfs_sb_info *sbi = sb->s_fs_info; 123 124 /* Avoid any operation if inode is bad. */ 125 if (unlikely(is_bad_ni(ntfs_i(inode)))) 126 return -EINVAL; 127 128 switch (cmd) { 129 case FITRIM: 130 return ntfs_ioctl_fitrim(sbi, arg); 131 case FS_IOC_GETFSLABEL: 132 return ntfs_ioctl_get_volume_label(sbi, (u8 __user *)arg); 133 case FS_IOC_SETFSLABEL: 134 return ntfs_ioctl_set_volume_label(sbi, (u8 __user *)arg); 135 case NTFS3_IOC_SHUTDOWN: 136 return ntfs_ioctl_shutdown(sb, arg); 137 } 138 return -ENOTTY; /* Inappropriate ioctl for device. */ 139 } 140 141 #ifdef CONFIG_COMPAT 142 long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg) 143 144 { 145 return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); 146 } 147 #endif 148 149 /* 150 * ntfs_getattr - inode_operations::getattr 151 */ 152 int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path, 153 struct kstat *stat, u32 request_mask, u32 flags) 154 { 155 struct inode *inode = d_inode(path->dentry); 156 struct ntfs_inode *ni = ntfs_i(inode); 157 158 /* Avoid any operation if inode is bad. */ 159 if (unlikely(is_bad_ni(ni))) 160 return -EINVAL; 161 162 stat->result_mask |= STATX_BTIME; 163 stat->btime = ni->i_crtime; 164 stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */ 165 166 if (inode->i_flags & S_IMMUTABLE) 167 stat->attributes |= STATX_ATTR_IMMUTABLE; 168 169 if (inode->i_flags & S_APPEND) 170 stat->attributes |= STATX_ATTR_APPEND; 171 172 if (is_compressed(ni)) 173 stat->attributes |= STATX_ATTR_COMPRESSED; 174 175 if (is_encrypted(ni)) 176 stat->attributes |= STATX_ATTR_ENCRYPTED; 177 178 stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED | 179 STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND; 180 181 generic_fillattr(idmap, request_mask, inode, stat); 182 183 return 0; 184 } 185 186 static int ntfs_extend_initialized_size(struct file *file, 187 struct ntfs_inode *ni, 188 const loff_t valid, 189 const loff_t new_valid) 190 { 191 struct inode *inode = &ni->vfs_inode; 192 struct address_space *mapping = inode->i_mapping; 193 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 194 loff_t pos = valid; 195 int err; 196 197 if (valid >= new_valid) 198 return 0; 199 200 if (is_resident(ni)) { 201 ni->i_valid = new_valid; 202 return 0; 203 } 204 205 WARN_ON(is_compressed(ni)); 206 207 for (;;) { 208 u32 zerofrom, len; 209 struct folio *folio; 210 u8 bits; 211 CLST vcn, lcn, clen; 212 213 if (is_sparsed(ni)) { 214 bits = sbi->cluster_bits; 215 vcn = pos >> bits; 216 217 err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL, 218 false); 219 if (err) 220 goto out; 221 222 if (lcn == SPARSE_LCN) { 223 pos = ((loff_t)clen + vcn) << bits; 224 ni->i_valid = pos; 225 goto next; 226 } 227 } 228 229 zerofrom = pos & (PAGE_SIZE - 1); 230 len = PAGE_SIZE - zerofrom; 231 232 if (pos + len > new_valid) 233 len = new_valid - pos; 234 235 err = ntfs_write_begin(NULL, mapping, pos, len, &folio, NULL); 236 if (err) 237 goto out; 238 239 folio_zero_range(folio, zerofrom, folio_size(folio) - zerofrom); 240 241 err = ntfs_write_end(NULL, mapping, pos, len, len, folio, NULL); 242 if (err < 0) 243 goto out; 244 pos += len; 245 246 next: 247 if (pos >= new_valid) 248 break; 249 250 balance_dirty_pages_ratelimited(mapping); 251 cond_resched(); 252 } 253 254 return 0; 255 256 out: 257 ni->i_valid = valid; 258 ntfs_inode_warn(inode, "failed to extend initialized size to %llx.", 259 new_valid); 260 return err; 261 } 262 263 /* 264 * ntfs_zero_range - Helper function for punch_hole. 265 * 266 * It zeroes a range [vbo, vbo_to). 267 */ 268 static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to) 269 { 270 int err = 0; 271 struct address_space *mapping = inode->i_mapping; 272 u32 blocksize = i_blocksize(inode); 273 pgoff_t idx = vbo >> PAGE_SHIFT; 274 u32 from = vbo & (PAGE_SIZE - 1); 275 pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT; 276 loff_t page_off; 277 struct buffer_head *head, *bh; 278 u32 bh_next, bh_off, to; 279 sector_t iblock; 280 struct folio *folio; 281 bool dirty = false; 282 283 for (; idx < idx_end; idx += 1, from = 0) { 284 page_off = (loff_t)idx << PAGE_SHIFT; 285 to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) : 286 PAGE_SIZE; 287 iblock = page_off >> inode->i_blkbits; 288 289 folio = __filemap_get_folio( 290 mapping, idx, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 291 mapping_gfp_constraint(mapping, ~__GFP_FS)); 292 if (IS_ERR(folio)) 293 return PTR_ERR(folio); 294 295 head = folio_buffers(folio); 296 if (!head) 297 head = create_empty_buffers(folio, blocksize, 0); 298 299 bh = head; 300 bh_off = 0; 301 do { 302 bh_next = bh_off + blocksize; 303 304 if (bh_next <= from || bh_off >= to) 305 continue; 306 307 if (!buffer_mapped(bh)) { 308 ntfs_get_block(inode, iblock, bh, 0); 309 /* Unmapped? It's a hole - nothing to do. */ 310 if (!buffer_mapped(bh)) 311 continue; 312 } 313 314 /* Ok, it's mapped. Make sure it's up-to-date. */ 315 if (folio_test_uptodate(folio)) 316 set_buffer_uptodate(bh); 317 else if (bh_read(bh, 0) < 0) { 318 err = -EIO; 319 folio_unlock(folio); 320 folio_put(folio); 321 goto out; 322 } 323 324 mark_buffer_dirty(bh); 325 } while (bh_off = bh_next, iblock += 1, 326 head != (bh = bh->b_this_page)); 327 328 folio_zero_segment(folio, from, to); 329 dirty = true; 330 331 folio_unlock(folio); 332 folio_put(folio); 333 cond_resched(); 334 } 335 out: 336 if (dirty) 337 mark_inode_dirty(inode); 338 return err; 339 } 340 341 /* 342 * ntfs_file_mmap_prepare - file_operations::mmap_prepare 343 */ 344 static int ntfs_file_mmap_prepare(struct vm_area_desc *desc) 345 { 346 struct file *file = desc->file; 347 struct inode *inode = file_inode(file); 348 struct ntfs_inode *ni = ntfs_i(inode); 349 u64 from = ((u64)desc->pgoff << PAGE_SHIFT); 350 bool rw = desc->vm_flags & VM_WRITE; 351 int err; 352 353 /* Avoid any operation if inode is bad. */ 354 if (unlikely(is_bad_ni(ni))) 355 return -EINVAL; 356 357 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 358 return -EIO; 359 360 if (is_encrypted(ni)) { 361 ntfs_inode_warn(inode, "mmap encrypted not supported"); 362 return -EOPNOTSUPP; 363 } 364 365 if (is_dedup(ni)) { 366 ntfs_inode_warn(inode, "mmap deduplicated not supported"); 367 return -EOPNOTSUPP; 368 } 369 370 if (is_compressed(ni)) { 371 if (rw) { 372 ntfs_inode_warn(inode, 373 "mmap(write) compressed not supported"); 374 return -EOPNOTSUPP; 375 } 376 /* Turn off readahead for compressed files. */ 377 file->f_ra.ra_pages = 0; 378 } 379 380 if (rw) { 381 u64 to = min_t(loff_t, i_size_read(inode), 382 from + vma_desc_size(desc)); 383 384 if (is_sparsed(ni)) { 385 /* Allocate clusters for rw map. */ 386 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 387 CLST lcn, len; 388 CLST vcn = from >> sbi->cluster_bits; 389 CLST end = bytes_to_cluster(sbi, to); 390 bool new; 391 392 for (; vcn < end; vcn += len) { 393 err = attr_data_get_block(ni, vcn, 1, &lcn, 394 &len, &new, true); 395 if (err) 396 goto out; 397 } 398 } 399 400 if (ni->i_valid < to) { 401 if (!inode_trylock(inode)) { 402 err = -EAGAIN; 403 goto out; 404 } 405 err = ntfs_extend_initialized_size(file, ni, 406 ni->i_valid, to); 407 inode_unlock(inode); 408 if (err) 409 goto out; 410 } 411 } 412 413 err = generic_file_mmap_prepare(desc); 414 out: 415 return err; 416 } 417 418 static int ntfs_extend(struct inode *inode, loff_t pos, size_t count, 419 struct file *file) 420 { 421 struct ntfs_inode *ni = ntfs_i(inode); 422 struct address_space *mapping = inode->i_mapping; 423 loff_t end = pos + count; 424 bool extend_init = file && pos > ni->i_valid; 425 int err; 426 427 if (end <= inode->i_size && !extend_init) 428 return 0; 429 430 /* Mark rw ntfs as dirty. It will be cleared at umount. */ 431 ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY); 432 433 if (end > inode->i_size) { 434 err = ntfs_set_size(inode, end); 435 if (err) 436 goto out; 437 } 438 439 if (extend_init && !is_compressed(ni)) { 440 err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos); 441 if (err) 442 goto out; 443 } else { 444 err = 0; 445 } 446 447 if (file && is_sparsed(ni)) { 448 /* 449 * This code optimizes large writes to sparse file. 450 * TODO: merge this fragment with fallocate fragment. 451 */ 452 struct ntfs_sb_info *sbi = ni->mi.sbi; 453 CLST vcn = pos >> sbi->cluster_bits; 454 CLST cend = bytes_to_cluster(sbi, end); 455 CLST cend_v = bytes_to_cluster(sbi, ni->i_valid); 456 CLST lcn, clen; 457 bool new; 458 459 if (cend_v > cend) 460 cend_v = cend; 461 462 /* 463 * Allocate and zero new clusters. 464 * Zeroing these clusters may be too long. 465 */ 466 for (; vcn < cend_v; vcn += clen) { 467 err = attr_data_get_block(ni, vcn, cend_v - vcn, &lcn, 468 &clen, &new, true); 469 if (err) 470 goto out; 471 } 472 /* 473 * Allocate but not zero new clusters. 474 */ 475 for (; vcn < cend; vcn += clen) { 476 err = attr_data_get_block(ni, vcn, cend - vcn, &lcn, 477 &clen, &new, false); 478 if (err) 479 goto out; 480 } 481 } 482 483 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 484 mark_inode_dirty(inode); 485 486 if (IS_SYNC(inode)) { 487 int err2; 488 489 err = filemap_fdatawrite_range(mapping, pos, end - 1); 490 err2 = sync_mapping_buffers(mapping); 491 if (!err) 492 err = err2; 493 err2 = write_inode_now(inode, 1); 494 if (!err) 495 err = err2; 496 if (!err) 497 err = filemap_fdatawait_range(mapping, pos, end - 1); 498 } 499 500 out: 501 return err; 502 } 503 504 static int ntfs_truncate(struct inode *inode, loff_t new_size) 505 { 506 struct super_block *sb = inode->i_sb; 507 struct ntfs_inode *ni = ntfs_i(inode); 508 int err, dirty = 0; 509 u64 new_valid; 510 511 if (!S_ISREG(inode->i_mode)) 512 return 0; 513 514 if (is_compressed(ni)) { 515 if (ni->i_valid > new_size) 516 ni->i_valid = new_size; 517 } else { 518 err = block_truncate_page(inode->i_mapping, new_size, 519 ntfs_get_block); 520 if (err) 521 return err; 522 } 523 524 new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size)); 525 526 truncate_setsize(inode, new_size); 527 528 ni_lock(ni); 529 530 down_write(&ni->file.run_lock); 531 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size, 532 &new_valid, ni->mi.sbi->options->prealloc, NULL); 533 up_write(&ni->file.run_lock); 534 535 if (new_valid < ni->i_valid) 536 ni->i_valid = new_valid; 537 538 ni_unlock(ni); 539 540 ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE; 541 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 542 if (!IS_DIRSYNC(inode)) { 543 dirty = 1; 544 } else { 545 err = ntfs_sync_inode(inode); 546 if (err) 547 return err; 548 } 549 550 if (dirty) 551 mark_inode_dirty(inode); 552 553 return 0; 554 } 555 556 /* 557 * ntfs_fallocate - file_operations::ntfs_fallocate 558 * 559 * Preallocate space for a file. This implements ntfs's fallocate file 560 * operation, which gets called from sys_fallocate system call. User 561 * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set 562 * we just allocate clusters without zeroing them out. Otherwise we 563 * allocate and zero out clusters via an expanding truncate. 564 */ 565 static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) 566 { 567 struct inode *inode = file_inode(file); 568 struct address_space *mapping = inode->i_mapping; 569 struct super_block *sb = inode->i_sb; 570 struct ntfs_sb_info *sbi = sb->s_fs_info; 571 struct ntfs_inode *ni = ntfs_i(inode); 572 loff_t end = vbo + len; 573 loff_t vbo_down = round_down(vbo, max_t(unsigned long, 574 sbi->cluster_size, PAGE_SIZE)); 575 bool is_supported_holes = is_sparsed(ni) || is_compressed(ni); 576 loff_t i_size, new_size; 577 bool map_locked; 578 int err; 579 580 /* No support for dir. */ 581 if (!S_ISREG(inode->i_mode)) 582 return -EOPNOTSUPP; 583 584 /* 585 * vfs_fallocate checks all possible combinations of mode. 586 * Do additional checks here before ntfs_set_state(dirty). 587 */ 588 if (mode & FALLOC_FL_PUNCH_HOLE) { 589 if (!is_supported_holes) 590 return -EOPNOTSUPP; 591 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 592 } else if (mode & FALLOC_FL_INSERT_RANGE) { 593 if (!is_supported_holes) 594 return -EOPNOTSUPP; 595 } else if (mode & 596 ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 597 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)) { 598 ntfs_inode_warn(inode, "fallocate(0x%x) is not supported", 599 mode); 600 return -EOPNOTSUPP; 601 } 602 603 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY); 604 605 inode_lock(inode); 606 i_size = inode->i_size; 607 new_size = max(end, i_size); 608 map_locked = false; 609 610 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 611 /* Should never be here, see ntfs_file_open. */ 612 err = -EOPNOTSUPP; 613 goto out; 614 } 615 616 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | 617 FALLOC_FL_INSERT_RANGE)) { 618 inode_dio_wait(inode); 619 filemap_invalidate_lock(mapping); 620 map_locked = true; 621 } 622 623 if (mode & FALLOC_FL_PUNCH_HOLE) { 624 u32 frame_size; 625 loff_t mask, vbo_a, end_a, tmp; 626 627 err = filemap_write_and_wait_range(mapping, vbo_down, 628 LLONG_MAX); 629 if (err) 630 goto out; 631 632 truncate_pagecache(inode, vbo_down); 633 634 ni_lock(ni); 635 err = attr_punch_hole(ni, vbo, len, &frame_size); 636 ni_unlock(ni); 637 if (!err) 638 goto ok; 639 640 if (err != E_NTFS_NOTALIGNED) 641 goto out; 642 643 /* Process not aligned punch. */ 644 err = 0; 645 mask = frame_size - 1; 646 vbo_a = (vbo + mask) & ~mask; 647 end_a = end & ~mask; 648 649 tmp = min(vbo_a, end); 650 if (tmp > vbo) { 651 err = ntfs_zero_range(inode, vbo, tmp); 652 if (err) 653 goto out; 654 } 655 656 if (vbo < end_a && end_a < end) { 657 err = ntfs_zero_range(inode, end_a, end); 658 if (err) 659 goto out; 660 } 661 662 /* Aligned punch_hole */ 663 if (end_a > vbo_a) { 664 ni_lock(ni); 665 err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL); 666 ni_unlock(ni); 667 if (err) 668 goto out; 669 } 670 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 671 /* 672 * Write tail of the last page before removed range since 673 * it will get removed from the page cache below. 674 */ 675 err = filemap_write_and_wait_range(mapping, vbo_down, vbo); 676 if (err) 677 goto out; 678 679 /* 680 * Write data that will be shifted to preserve them 681 * when discarding page cache below. 682 */ 683 err = filemap_write_and_wait_range(mapping, end, LLONG_MAX); 684 if (err) 685 goto out; 686 687 truncate_pagecache(inode, vbo_down); 688 689 ni_lock(ni); 690 err = attr_collapse_range(ni, vbo, len); 691 ni_unlock(ni); 692 if (err) 693 goto out; 694 } else if (mode & FALLOC_FL_INSERT_RANGE) { 695 /* Check new size. */ 696 err = inode_newsize_ok(inode, new_size); 697 if (err) 698 goto out; 699 700 /* Write out all dirty pages. */ 701 err = filemap_write_and_wait_range(mapping, vbo_down, 702 LLONG_MAX); 703 if (err) 704 goto out; 705 truncate_pagecache(inode, vbo_down); 706 707 ni_lock(ni); 708 err = attr_insert_range(ni, vbo, len); 709 ni_unlock(ni); 710 if (err) 711 goto out; 712 } else { 713 /* Check new size. */ 714 u8 cluster_bits = sbi->cluster_bits; 715 716 /* Be sure file is non resident. */ 717 if (is_resident(ni)) { 718 ni_lock(ni); 719 err = attr_force_nonresident(ni); 720 ni_unlock(ni); 721 if (err) 722 goto out; 723 } 724 725 /* generic/213: expected -ENOSPC instead of -EFBIG. */ 726 if (!is_supported_holes) { 727 loff_t to_alloc = new_size - inode_get_bytes(inode); 728 729 if (to_alloc > 0 && 730 (to_alloc >> cluster_bits) > 731 wnd_zeroes(&sbi->used.bitmap)) { 732 err = -ENOSPC; 733 goto out; 734 } 735 } 736 737 err = inode_newsize_ok(inode, new_size); 738 if (err) 739 goto out; 740 741 if (new_size > i_size) { 742 /* 743 * Allocate clusters, do not change 'valid' size. 744 */ 745 err = ntfs_set_size(inode, new_size); 746 if (err) 747 goto out; 748 } 749 750 if (is_supported_holes) { 751 CLST vcn = vbo >> cluster_bits; 752 CLST cend = bytes_to_cluster(sbi, end); 753 CLST cend_v = bytes_to_cluster(sbi, ni->i_valid); 754 CLST lcn, clen; 755 bool new; 756 757 if (cend_v > cend) 758 cend_v = cend; 759 760 /* 761 * Allocate and zero new clusters. 762 * Zeroing these clusters may be too long. 763 */ 764 for (; vcn < cend_v; vcn += clen) { 765 err = attr_data_get_block(ni, vcn, cend_v - vcn, 766 &lcn, &clen, &new, 767 true); 768 if (err) 769 goto out; 770 } 771 /* 772 * Allocate but not zero new clusters. 773 */ 774 for (; vcn < cend; vcn += clen) { 775 err = attr_data_get_block(ni, vcn, cend - vcn, 776 &lcn, &clen, &new, 777 false); 778 if (err) 779 goto out; 780 } 781 } 782 783 if (mode & FALLOC_FL_KEEP_SIZE) { 784 ni_lock(ni); 785 /* True - Keep preallocated. */ 786 err = attr_set_size(ni, ATTR_DATA, NULL, 0, 787 &ni->file.run, i_size, &ni->i_valid, 788 true, NULL); 789 ni_unlock(ni); 790 if (err) 791 goto out; 792 } else if (new_size > i_size) { 793 i_size_write(inode, new_size); 794 } 795 } 796 797 ok: 798 err = file_modified(file); 799 if (err) 800 goto out; 801 802 out: 803 if (map_locked) 804 filemap_invalidate_unlock(mapping); 805 806 if (!err) { 807 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 808 mark_inode_dirty(inode); 809 } 810 811 inode_unlock(inode); 812 return err; 813 } 814 815 /* 816 * ntfs_setattr - inode_operations::setattr 817 */ 818 int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 819 struct iattr *attr) 820 { 821 struct inode *inode = d_inode(dentry); 822 struct ntfs_inode *ni = ntfs_i(inode); 823 u32 ia_valid = attr->ia_valid; 824 umode_t mode = inode->i_mode; 825 int err; 826 827 /* Avoid any operation if inode is bad. */ 828 if (unlikely(is_bad_ni(ni))) 829 return -EINVAL; 830 831 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 832 return -EIO; 833 834 err = setattr_prepare(idmap, dentry, attr); 835 if (err) 836 goto out; 837 838 if (ia_valid & ATTR_SIZE) { 839 loff_t newsize, oldsize; 840 841 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 842 /* Should never be here, see ntfs_file_open(). */ 843 err = -EOPNOTSUPP; 844 goto out; 845 } 846 inode_dio_wait(inode); 847 oldsize = i_size_read(inode); 848 newsize = attr->ia_size; 849 850 if (newsize <= oldsize) 851 err = ntfs_truncate(inode, newsize); 852 else 853 err = ntfs_extend(inode, newsize, 0, NULL); 854 855 if (err) 856 goto out; 857 858 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 859 i_size_write(inode, newsize); 860 } 861 862 setattr_copy(idmap, inode, attr); 863 864 if (mode != inode->i_mode) { 865 err = ntfs_acl_chmod(idmap, dentry); 866 if (err) 867 goto out; 868 869 /* Linux 'w' -> Windows 'ro'. */ 870 if (0222 & inode->i_mode) 871 ni->std_fa &= ~FILE_ATTRIBUTE_READONLY; 872 else 873 ni->std_fa |= FILE_ATTRIBUTE_READONLY; 874 } 875 876 if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE)) 877 ntfs_save_wsl_perm(inode, NULL); 878 mark_inode_dirty(inode); 879 out: 880 return err; 881 } 882 883 /* 884 * check_read_restriction: 885 * common code for ntfs_file_read_iter and ntfs_file_splice_read 886 */ 887 static int check_read_restriction(struct inode *inode) 888 { 889 struct ntfs_inode *ni = ntfs_i(inode); 890 891 /* Avoid any operation if inode is bad. */ 892 if (unlikely(is_bad_ni(ni))) 893 return -EINVAL; 894 895 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 896 return -EIO; 897 898 if (is_encrypted(ni)) { 899 ntfs_inode_warn(inode, "encrypted i/o not supported"); 900 return -EOPNOTSUPP; 901 } 902 903 #ifndef CONFIG_NTFS3_LZX_XPRESS 904 if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) { 905 ntfs_inode_warn( 906 inode, 907 "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files"); 908 return -EOPNOTSUPP; 909 } 910 #endif 911 912 if (is_dedup(ni)) { 913 ntfs_inode_warn(inode, "read deduplicated not supported"); 914 return -EOPNOTSUPP; 915 } 916 917 return 0; 918 } 919 920 /* 921 * ntfs_file_read_iter - file_operations::read_iter 922 */ 923 static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 924 { 925 struct file *file = iocb->ki_filp; 926 struct inode *inode = file_inode(file); 927 struct ntfs_inode *ni = ntfs_i(inode); 928 ssize_t err; 929 930 err = check_read_restriction(inode); 931 if (err) 932 return err; 933 934 if (is_compressed(ni)) { 935 if (iocb->ki_flags & IOCB_DIRECT) { 936 ntfs_inode_warn( 937 inode, "direct i/o + compressed not supported"); 938 return -EOPNOTSUPP; 939 } 940 /* Turn off readahead for compressed files. */ 941 file->f_ra.ra_pages = 0; 942 } 943 944 /* Check minimum alignment for dio. */ 945 if (iocb->ki_flags & IOCB_DIRECT) { 946 struct super_block *sb = inode->i_sb; 947 struct ntfs_sb_info *sbi = sb->s_fs_info; 948 if ((iocb->ki_pos | iov_iter_alignment(iter)) & 949 sbi->bdev_blocksize_mask) { 950 iocb->ki_flags &= ~IOCB_DIRECT; 951 } 952 } 953 954 return generic_file_read_iter(iocb, iter); 955 } 956 957 /* 958 * ntfs_file_splice_read - file_operations::splice_read 959 */ 960 static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos, 961 struct pipe_inode_info *pipe, size_t len, 962 unsigned int flags) 963 { 964 struct inode *inode = file_inode(in); 965 ssize_t err; 966 967 err = check_read_restriction(inode); 968 if (err) 969 return err; 970 971 if (is_compressed(ntfs_i(inode))) { 972 /* Turn off readahead for compressed files. */ 973 in->f_ra.ra_pages = 0; 974 } 975 976 return filemap_splice_read(in, ppos, pipe, len, flags); 977 } 978 979 /* 980 * ntfs_get_frame_pages 981 * 982 * Return: Array of locked pages. 983 */ 984 static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index, 985 struct page **pages, u32 pages_per_frame, 986 bool *frame_uptodate) 987 { 988 gfp_t gfp_mask = mapping_gfp_mask(mapping); 989 u32 npages; 990 991 *frame_uptodate = true; 992 993 for (npages = 0; npages < pages_per_frame; npages++, index++) { 994 struct folio *folio; 995 996 folio = __filemap_get_folio(mapping, index, 997 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 998 gfp_mask); 999 if (IS_ERR(folio)) { 1000 while (npages--) { 1001 folio = page_folio(pages[npages]); 1002 folio_unlock(folio); 1003 folio_put(folio); 1004 } 1005 1006 return -ENOMEM; 1007 } 1008 1009 if (!folio_test_uptodate(folio)) 1010 *frame_uptodate = false; 1011 1012 pages[npages] = &folio->page; 1013 } 1014 1015 return 0; 1016 } 1017 1018 /* 1019 * ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files). 1020 */ 1021 static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) 1022 { 1023 int err; 1024 struct file *file = iocb->ki_filp; 1025 size_t count = iov_iter_count(from); 1026 loff_t pos = iocb->ki_pos; 1027 struct inode *inode = file_inode(file); 1028 loff_t i_size = i_size_read(inode); 1029 struct address_space *mapping = inode->i_mapping; 1030 struct ntfs_inode *ni = ntfs_i(inode); 1031 u64 valid = ni->i_valid; 1032 struct ntfs_sb_info *sbi = ni->mi.sbi; 1033 struct page **pages = NULL; 1034 struct folio *folio; 1035 size_t written = 0; 1036 u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits; 1037 u32 frame_size = 1u << frame_bits; 1038 u32 pages_per_frame = frame_size >> PAGE_SHIFT; 1039 u32 ip, off; 1040 CLST frame; 1041 u64 frame_vbo; 1042 pgoff_t index; 1043 bool frame_uptodate; 1044 1045 if (frame_size < PAGE_SIZE) { 1046 /* 1047 * frame_size == 8K if cluster 512 1048 * frame_size == 64K if cluster 4096 1049 */ 1050 ntfs_inode_warn(inode, "page size is bigger than frame size"); 1051 return -EOPNOTSUPP; 1052 } 1053 1054 pages = kmalloc_array(pages_per_frame, sizeof(struct page *), GFP_NOFS); 1055 if (!pages) 1056 return -ENOMEM; 1057 1058 err = file_remove_privs(file); 1059 if (err) 1060 goto out; 1061 1062 err = file_update_time(file); 1063 if (err) 1064 goto out; 1065 1066 /* Zero range [valid : pos). */ 1067 while (valid < pos) { 1068 CLST lcn, clen; 1069 1070 frame = valid >> frame_bits; 1071 frame_vbo = valid & ~(frame_size - 1); 1072 off = valid & (frame_size - 1); 1073 1074 err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 1, &lcn, 1075 &clen, NULL, false); 1076 if (err) 1077 goto out; 1078 1079 if (lcn == SPARSE_LCN) { 1080 ni->i_valid = valid = 1081 frame_vbo + ((u64)clen << sbi->cluster_bits); 1082 continue; 1083 } 1084 1085 /* Load full frame. */ 1086 err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT, 1087 pages, pages_per_frame, 1088 &frame_uptodate); 1089 if (err) 1090 goto out; 1091 1092 if (!frame_uptodate && off) { 1093 err = ni_read_frame(ni, frame_vbo, pages, 1094 pages_per_frame, 0); 1095 if (err) { 1096 for (ip = 0; ip < pages_per_frame; ip++) { 1097 folio = page_folio(pages[ip]); 1098 folio_unlock(folio); 1099 folio_put(folio); 1100 } 1101 goto out; 1102 } 1103 } 1104 1105 ip = off >> PAGE_SHIFT; 1106 off = offset_in_page(valid); 1107 for (; ip < pages_per_frame; ip++, off = 0) { 1108 folio = page_folio(pages[ip]); 1109 folio_zero_segment(folio, off, PAGE_SIZE); 1110 flush_dcache_folio(folio); 1111 folio_mark_uptodate(folio); 1112 } 1113 1114 ni_lock(ni); 1115 err = ni_write_frame(ni, pages, pages_per_frame); 1116 ni_unlock(ni); 1117 1118 for (ip = 0; ip < pages_per_frame; ip++) { 1119 folio = page_folio(pages[ip]); 1120 folio_mark_uptodate(folio); 1121 folio_unlock(folio); 1122 folio_put(folio); 1123 } 1124 1125 if (err) 1126 goto out; 1127 1128 ni->i_valid = valid = frame_vbo + frame_size; 1129 } 1130 1131 /* Copy user data [pos : pos + count). */ 1132 while (count) { 1133 size_t copied, bytes; 1134 1135 off = pos & (frame_size - 1); 1136 bytes = frame_size - off; 1137 if (bytes > count) 1138 bytes = count; 1139 1140 frame_vbo = pos & ~(frame_size - 1); 1141 index = frame_vbo >> PAGE_SHIFT; 1142 1143 if (unlikely(fault_in_iov_iter_readable(from, bytes))) { 1144 err = -EFAULT; 1145 goto out; 1146 } 1147 1148 /* Load full frame. */ 1149 err = ntfs_get_frame_pages(mapping, index, pages, 1150 pages_per_frame, &frame_uptodate); 1151 if (err) 1152 goto out; 1153 1154 if (!frame_uptodate) { 1155 loff_t to = pos + bytes; 1156 1157 if (off || (to < i_size && (to & (frame_size - 1)))) { 1158 err = ni_read_frame(ni, frame_vbo, pages, 1159 pages_per_frame, 0); 1160 if (err) { 1161 for (ip = 0; ip < pages_per_frame; 1162 ip++) { 1163 folio = page_folio(pages[ip]); 1164 folio_unlock(folio); 1165 folio_put(folio); 1166 } 1167 goto out; 1168 } 1169 } 1170 } 1171 1172 WARN_ON(!bytes); 1173 copied = 0; 1174 ip = off >> PAGE_SHIFT; 1175 off = offset_in_page(pos); 1176 1177 /* Copy user data to pages. */ 1178 for (;;) { 1179 size_t cp, tail = PAGE_SIZE - off; 1180 1181 folio = page_folio(pages[ip]); 1182 cp = copy_folio_from_iter_atomic( 1183 folio, off, min(tail, bytes), from); 1184 flush_dcache_folio(folio); 1185 1186 copied += cp; 1187 bytes -= cp; 1188 if (!bytes || !cp) 1189 break; 1190 1191 if (cp < tail) { 1192 off += cp; 1193 } else { 1194 ip++; 1195 off = 0; 1196 } 1197 } 1198 1199 ni_lock(ni); 1200 err = ni_write_frame(ni, pages, pages_per_frame); 1201 ni_unlock(ni); 1202 1203 for (ip = 0; ip < pages_per_frame; ip++) { 1204 folio = page_folio(pages[ip]); 1205 folio_clear_dirty(folio); 1206 folio_mark_uptodate(folio); 1207 folio_unlock(folio); 1208 folio_put(folio); 1209 } 1210 1211 if (err) 1212 goto out; 1213 1214 /* 1215 * We can loop for a long time in here. Be nice and allow 1216 * us to schedule out to avoid softlocking if preempt 1217 * is disabled. 1218 */ 1219 cond_resched(); 1220 1221 pos += copied; 1222 written += copied; 1223 1224 count = iov_iter_count(from); 1225 } 1226 1227 out: 1228 kfree(pages); 1229 1230 if (err < 0) 1231 return err; 1232 1233 iocb->ki_pos += written; 1234 if (iocb->ki_pos > ni->i_valid) 1235 ni->i_valid = iocb->ki_pos; 1236 if (iocb->ki_pos > i_size) 1237 i_size_write(inode, iocb->ki_pos); 1238 1239 return written; 1240 } 1241 1242 /* 1243 * check_write_restriction: 1244 * common code for ntfs_file_write_iter and ntfs_file_splice_write 1245 */ 1246 static int check_write_restriction(struct inode *inode) 1247 { 1248 struct ntfs_inode *ni = ntfs_i(inode); 1249 1250 /* Avoid any operation if inode is bad. */ 1251 if (unlikely(is_bad_ni(ni))) 1252 return -EINVAL; 1253 1254 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 1255 return -EIO; 1256 1257 if (is_encrypted(ni)) { 1258 ntfs_inode_warn(inode, "encrypted i/o not supported"); 1259 return -EOPNOTSUPP; 1260 } 1261 1262 if (is_dedup(ni)) { 1263 ntfs_inode_warn(inode, "write into deduplicated not supported"); 1264 return -EOPNOTSUPP; 1265 } 1266 1267 return 0; 1268 } 1269 1270 /* 1271 * ntfs_file_write_iter - file_operations::write_iter 1272 */ 1273 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 1274 { 1275 struct file *file = iocb->ki_filp; 1276 struct inode *inode = file_inode(file); 1277 struct ntfs_inode *ni = ntfs_i(inode); 1278 ssize_t ret; 1279 int err; 1280 1281 if (!inode_trylock(inode)) { 1282 if (iocb->ki_flags & IOCB_NOWAIT) 1283 return -EAGAIN; 1284 inode_lock(inode); 1285 } 1286 1287 ret = check_write_restriction(inode); 1288 if (ret) 1289 goto out; 1290 1291 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { 1292 ntfs_inode_warn(inode, "direct i/o + compressed not supported"); 1293 ret = -EOPNOTSUPP; 1294 goto out; 1295 } 1296 1297 ret = generic_write_checks(iocb, from); 1298 if (ret <= 0) 1299 goto out; 1300 1301 err = file_modified(iocb->ki_filp); 1302 if (err) { 1303 ret = err; 1304 goto out; 1305 } 1306 1307 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 1308 /* Should never be here, see ntfs_file_open(). */ 1309 ret = -EOPNOTSUPP; 1310 goto out; 1311 } 1312 1313 ret = ntfs_extend(inode, iocb->ki_pos, ret, file); 1314 if (ret) 1315 goto out; 1316 1317 ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) : 1318 __generic_file_write_iter(iocb, from); 1319 1320 out: 1321 inode_unlock(inode); 1322 1323 if (ret > 0) 1324 ret = generic_write_sync(iocb, ret); 1325 1326 return ret; 1327 } 1328 1329 /* 1330 * ntfs_file_open - file_operations::open 1331 */ 1332 int ntfs_file_open(struct inode *inode, struct file *file) 1333 { 1334 struct ntfs_inode *ni = ntfs_i(inode); 1335 1336 /* Avoid any operation if inode is bad. */ 1337 if (unlikely(is_bad_ni(ni))) 1338 return -EINVAL; 1339 1340 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 1341 return -EIO; 1342 1343 if (unlikely((is_compressed(ni) || is_encrypted(ni)) && 1344 (file->f_flags & O_DIRECT))) { 1345 return -EOPNOTSUPP; 1346 } 1347 1348 /* Decompress "external compressed" file if opened for rw. */ 1349 if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) && 1350 (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) { 1351 #ifdef CONFIG_NTFS3_LZX_XPRESS 1352 int err = ni_decompress_file(ni); 1353 1354 if (err) 1355 return err; 1356 #else 1357 ntfs_inode_warn( 1358 inode, 1359 "activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files"); 1360 return -EOPNOTSUPP; 1361 #endif 1362 } 1363 1364 return generic_file_open(inode, file); 1365 } 1366 1367 /* 1368 * ntfs_file_release - file_operations::release 1369 */ 1370 static int ntfs_file_release(struct inode *inode, struct file *file) 1371 { 1372 struct ntfs_inode *ni = ntfs_i(inode); 1373 struct ntfs_sb_info *sbi = ni->mi.sbi; 1374 int err = 0; 1375 1376 /* If we are last writer on the inode, drop the block reservation. */ 1377 if (sbi->options->prealloc && 1378 ((file->f_mode & FMODE_WRITE) && 1379 atomic_read(&inode->i_writecount) == 1) 1380 /* 1381 * The only file when inode->i_fop = &ntfs_file_operations and 1382 * init_rwsem(&ni->file.run_lock) is not called explicitly is MFT. 1383 * 1384 * Add additional check here. 1385 */ 1386 && inode->i_ino != MFT_REC_MFT) { 1387 ni_lock(ni); 1388 down_write(&ni->file.run_lock); 1389 1390 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, 1391 i_size_read(inode), &ni->i_valid, false, 1392 NULL); 1393 1394 up_write(&ni->file.run_lock); 1395 ni_unlock(ni); 1396 } 1397 return err; 1398 } 1399 1400 /* 1401 * ntfs_fiemap - inode_operations::fiemap 1402 */ 1403 int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1404 __u64 start, __u64 len) 1405 { 1406 int err; 1407 struct ntfs_inode *ni = ntfs_i(inode); 1408 1409 /* Avoid any operation if inode is bad. */ 1410 if (unlikely(is_bad_ni(ni))) 1411 return -EINVAL; 1412 1413 err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR); 1414 if (err) 1415 return err; 1416 1417 ni_lock(ni); 1418 1419 err = ni_fiemap(ni, fieinfo, start, len); 1420 1421 ni_unlock(ni); 1422 1423 return err; 1424 } 1425 1426 /* 1427 * ntfs_file_splice_write - file_operations::splice_write 1428 */ 1429 static ssize_t ntfs_file_splice_write(struct pipe_inode_info *pipe, 1430 struct file *file, loff_t *ppos, 1431 size_t len, unsigned int flags) 1432 { 1433 ssize_t err; 1434 struct inode *inode = file_inode(file); 1435 1436 err = check_write_restriction(inode); 1437 if (err) 1438 return err; 1439 1440 return iter_file_splice_write(pipe, file, ppos, len, flags); 1441 } 1442 1443 /* 1444 * ntfs_file_fsync - file_operations::fsync 1445 */ 1446 static int ntfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) 1447 { 1448 struct inode *inode = file_inode(file); 1449 if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 1450 return -EIO; 1451 1452 return generic_file_fsync(file, start, end, datasync); 1453 } 1454 1455 // clang-format off 1456 const struct inode_operations ntfs_file_inode_operations = { 1457 .getattr = ntfs_getattr, 1458 .setattr = ntfs_setattr, 1459 .listxattr = ntfs_listxattr, 1460 .get_acl = ntfs_get_acl, 1461 .set_acl = ntfs_set_acl, 1462 .fiemap = ntfs_fiemap, 1463 }; 1464 1465 const struct file_operations ntfs_file_operations = { 1466 .llseek = generic_file_llseek, 1467 .read_iter = ntfs_file_read_iter, 1468 .write_iter = ntfs_file_write_iter, 1469 .unlocked_ioctl = ntfs_ioctl, 1470 #ifdef CONFIG_COMPAT 1471 .compat_ioctl = ntfs_compat_ioctl, 1472 #endif 1473 .splice_read = ntfs_file_splice_read, 1474 .splice_write = ntfs_file_splice_write, 1475 .mmap_prepare = ntfs_file_mmap_prepare, 1476 .open = ntfs_file_open, 1477 .fsync = ntfs_file_fsync, 1478 .fallocate = ntfs_fallocate, 1479 .release = ntfs_file_release, 1480 }; 1481 1482 #if IS_ENABLED(CONFIG_NTFS_FS) 1483 const struct file_operations ntfs_legacy_file_operations = { 1484 .llseek = generic_file_llseek, 1485 .read_iter = ntfs_file_read_iter, 1486 .splice_read = ntfs_file_splice_read, 1487 .open = ntfs_file_open, 1488 .release = ntfs_file_release, 1489 }; 1490 #endif 1491 // clang-format on 1492