xref: /linux/fs/ntfs3/file.c (revision 6aaa1d6337b5721ede04385444e5796392419241)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  *  Regular file handling primitives for NTFS-based filesystems.
7  *
8  */
9 
10 #include <linux/backing-dev.h>
11 #include <linux/blkdev.h>
12 #include <linux/buffer_head.h>
13 #include <linux/compat.h>
14 #include <linux/falloc.h>
15 #include <linux/fiemap.h>
16 #include <linux/fileattr.h>
17 #include <linux/filelock.h>
18 
19 #include "debug.h"
20 #include "ntfs.h"
21 #include "ntfs_fs.h"
22 
23 /*
24  * cifx, btrfs, exfat, ext4, f2fs use this constant.
25  * Hope this value will become common to all fs.
26  */
27 #define NTFS3_IOC_SHUTDOWN _IOR('X', 125, __u32)
28 
29 static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
30 {
31 	struct fstrim_range __user *user_range;
32 	struct fstrim_range range;
33 	struct block_device *dev;
34 	int err;
35 
36 	if (!capable(CAP_SYS_ADMIN))
37 		return -EPERM;
38 
39 	dev = sbi->sb->s_bdev;
40 	if (!bdev_max_discard_sectors(dev))
41 		return -EOPNOTSUPP;
42 
43 	user_range = (struct fstrim_range __user *)arg;
44 	if (copy_from_user(&range, user_range, sizeof(range)))
45 		return -EFAULT;
46 
47 	range.minlen = max_t(u32, range.minlen, bdev_discard_granularity(dev));
48 
49 	err = ntfs_trim_fs(sbi, &range);
50 	if (err < 0)
51 		return err;
52 
53 	if (copy_to_user(user_range, &range, sizeof(range)))
54 		return -EFAULT;
55 
56 	return 0;
57 }
58 
59 static int ntfs_ioctl_get_volume_label(struct ntfs_sb_info *sbi, u8 __user *buf)
60 {
61 	if (copy_to_user(buf, sbi->volume.label, FSLABEL_MAX))
62 		return -EFAULT;
63 
64 	return 0;
65 }
66 
67 static int ntfs_ioctl_set_volume_label(struct ntfs_sb_info *sbi, u8 __user *buf)
68 {
69 	u8 user[FSLABEL_MAX] = { 0 };
70 	int len;
71 
72 	if (!capable(CAP_SYS_ADMIN))
73 		return -EPERM;
74 
75 	if (copy_from_user(user, buf, FSLABEL_MAX))
76 		return -EFAULT;
77 
78 	len = strnlen(user, FSLABEL_MAX);
79 
80 	return ntfs_set_label(sbi, user, len);
81 }
82 
83 /*
84  * ntfs_force_shutdown - helper function. Called from ioctl
85  */
86 static int ntfs_force_shutdown(struct super_block *sb, u32 flags)
87 {
88 	int err;
89 	struct ntfs_sb_info *sbi = sb->s_fs_info;
90 
91 	if (unlikely(ntfs3_forced_shutdown(sb)))
92 		return 0;
93 
94 	/* No additional options yet (flags). */
95 	err = bdev_freeze(sb->s_bdev);
96 	if (err)
97 		return err;
98 	set_bit(NTFS_FLAGS_SHUTDOWN_BIT, &sbi->flags);
99 	bdev_thaw(sb->s_bdev);
100 	return 0;
101 }
102 
103 static int ntfs_ioctl_shutdown(struct super_block *sb, unsigned long arg)
104 {
105 	u32 flags;
106 
107 	if (!capable(CAP_SYS_ADMIN))
108 		return -EPERM;
109 
110 	if (get_user(flags, (__u32 __user *)arg))
111 		return -EFAULT;
112 
113 	return ntfs_force_shutdown(sb, flags);
114 }
115 
116 /*
117  * ntfs_ioctl - file_operations::unlocked_ioctl
118  */
119 long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
120 {
121 	struct inode *inode = file_inode(filp);
122 	struct super_block *sb = inode->i_sb;
123 	struct ntfs_sb_info *sbi = sb->s_fs_info;
124 
125 	/* Avoid any operation if inode is bad. */
126 	if (unlikely(is_bad_ni(ntfs_i(inode))))
127 		return -EINVAL;
128 
129 	switch (cmd) {
130 	case FITRIM:
131 		return ntfs_ioctl_fitrim(sbi, arg);
132 	case FS_IOC_GETFSLABEL:
133 		return ntfs_ioctl_get_volume_label(sbi, (u8 __user *)arg);
134 	case FS_IOC_SETFSLABEL:
135 		return ntfs_ioctl_set_volume_label(sbi, (u8 __user *)arg);
136 	case NTFS3_IOC_SHUTDOWN:
137 		return ntfs_ioctl_shutdown(sb, arg);
138 	}
139 	return -ENOTTY; /* Inappropriate ioctl for device. */
140 }
141 
142 #ifdef CONFIG_COMPAT
143 long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg)
144 
145 {
146 	return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
147 }
148 #endif
149 
150 /*
151  * ntfs_getattr - inode_operations::getattr
152  */
153 int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path,
154 		 struct kstat *stat, u32 request_mask, u32 flags)
155 {
156 	struct inode *inode = d_inode(path->dentry);
157 	struct ntfs_inode *ni = ntfs_i(inode);
158 
159 	/* Avoid any operation if inode is bad. */
160 	if (unlikely(is_bad_ni(ni)))
161 		return -EINVAL;
162 
163 	stat->result_mask |= STATX_BTIME;
164 	stat->btime = ni->i_crtime;
165 	stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */
166 
167 	if (inode->i_flags & S_IMMUTABLE)
168 		stat->attributes |= STATX_ATTR_IMMUTABLE;
169 
170 	if (inode->i_flags & S_APPEND)
171 		stat->attributes |= STATX_ATTR_APPEND;
172 
173 	if (is_compressed(ni))
174 		stat->attributes |= STATX_ATTR_COMPRESSED;
175 
176 	if (is_encrypted(ni))
177 		stat->attributes |= STATX_ATTR_ENCRYPTED;
178 
179 	stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED |
180 				 STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND;
181 
182 	generic_fillattr(idmap, request_mask, inode, stat);
183 
184 	return 0;
185 }
186 
187 static int ntfs_extend_initialized_size(struct file *file,
188 					struct ntfs_inode *ni,
189 					const loff_t valid,
190 					const loff_t new_valid)
191 {
192 	struct inode *inode = &ni->vfs_inode;
193 	struct address_space *mapping = inode->i_mapping;
194 	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
195 	loff_t pos = valid;
196 	int err;
197 
198 	if (valid >= new_valid)
199 		return 0;
200 
201 	if (is_resident(ni)) {
202 		ni->i_valid = new_valid;
203 		return 0;
204 	}
205 
206 	WARN_ON(is_compressed(ni));
207 
208 	for (;;) {
209 		u32 zerofrom, len;
210 		struct folio *folio;
211 		u8 bits;
212 		CLST vcn, lcn, clen;
213 
214 		if (is_sparsed(ni)) {
215 			bits = sbi->cluster_bits;
216 			vcn = pos >> bits;
217 
218 			err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL,
219 						  false);
220 			if (err)
221 				goto out;
222 
223 			if (lcn == SPARSE_LCN) {
224 				pos = ((loff_t)clen + vcn) << bits;
225 				ni->i_valid = pos;
226 				goto next;
227 			}
228 		}
229 
230 		zerofrom = pos & (PAGE_SIZE - 1);
231 		len = PAGE_SIZE - zerofrom;
232 
233 		if (pos + len > new_valid)
234 			len = new_valid - pos;
235 
236 		err = ntfs_write_begin(NULL, mapping, pos, len, &folio, NULL);
237 		if (err)
238 			goto out;
239 
240 		folio_zero_range(folio, zerofrom, folio_size(folio) - zerofrom);
241 
242 		err = ntfs_write_end(NULL, mapping, pos, len, len, folio, NULL);
243 		if (err < 0)
244 			goto out;
245 		pos += len;
246 
247 next:
248 		if (pos >= new_valid)
249 			break;
250 
251 		balance_dirty_pages_ratelimited(mapping);
252 		cond_resched();
253 	}
254 
255 	return 0;
256 
257 out:
258 	ni->i_valid = valid;
259 	ntfs_inode_warn(inode, "failed to extend initialized size to %llx.",
260 			new_valid);
261 	return err;
262 }
263 
264 /*
265  * ntfs_zero_range - Helper function for punch_hole.
266  *
267  * It zeroes a range [vbo, vbo_to).
268  */
269 static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
270 {
271 	int err = 0;
272 	struct address_space *mapping = inode->i_mapping;
273 	u32 blocksize = i_blocksize(inode);
274 	pgoff_t idx = vbo >> PAGE_SHIFT;
275 	u32 from = vbo & (PAGE_SIZE - 1);
276 	pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT;
277 	loff_t page_off;
278 	struct buffer_head *head, *bh;
279 	u32 bh_next, bh_off, to;
280 	sector_t iblock;
281 	struct folio *folio;
282 	bool dirty = false;
283 
284 	for (; idx < idx_end; idx += 1, from = 0) {
285 		page_off = (loff_t)idx << PAGE_SHIFT;
286 		to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) :
287 						       PAGE_SIZE;
288 		iblock = page_off >> inode->i_blkbits;
289 
290 		folio = __filemap_get_folio(
291 			mapping, idx, FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
292 			mapping_gfp_constraint(mapping, ~__GFP_FS));
293 		if (IS_ERR(folio))
294 			return PTR_ERR(folio);
295 
296 		head = folio_buffers(folio);
297 		if (!head)
298 			head = create_empty_buffers(folio, blocksize, 0);
299 
300 		bh = head;
301 		bh_off = 0;
302 		do {
303 			bh_next = bh_off + blocksize;
304 
305 			if (bh_next <= from || bh_off >= to)
306 				continue;
307 
308 			if (!buffer_mapped(bh)) {
309 				ntfs_get_block(inode, iblock, bh, 0);
310 				/* Unmapped? It's a hole - nothing to do. */
311 				if (!buffer_mapped(bh))
312 					continue;
313 			}
314 
315 			/* Ok, it's mapped. Make sure it's up-to-date. */
316 			if (folio_test_uptodate(folio))
317 				set_buffer_uptodate(bh);
318 			else if (bh_read(bh, 0) < 0) {
319 				err = -EIO;
320 				folio_unlock(folio);
321 				folio_put(folio);
322 				goto out;
323 			}
324 
325 			mark_buffer_dirty(bh);
326 		} while (bh_off = bh_next, iblock += 1,
327 			 head != (bh = bh->b_this_page));
328 
329 		folio_zero_segment(folio, from, to);
330 		dirty = true;
331 
332 		folio_unlock(folio);
333 		folio_put(folio);
334 		cond_resched();
335 	}
336 out:
337 	if (dirty)
338 		mark_inode_dirty(inode);
339 	return err;
340 }
341 
342 /*
343  * ntfs_file_mmap_prepare - file_operations::mmap_prepare
344  */
345 static int ntfs_file_mmap_prepare(struct vm_area_desc *desc)
346 {
347 	struct file *file = desc->file;
348 	struct inode *inode = file_inode(file);
349 	struct ntfs_inode *ni = ntfs_i(inode);
350 	u64 from = ((u64)desc->pgoff << PAGE_SHIFT);
351 	bool rw = desc->vm_flags & VM_WRITE;
352 	int err;
353 
354 	/* Avoid any operation if inode is bad. */
355 	if (unlikely(is_bad_ni(ni)))
356 		return -EINVAL;
357 
358 	if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
359 		return -EIO;
360 
361 	if (is_encrypted(ni)) {
362 		ntfs_inode_warn(inode, "mmap encrypted not supported");
363 		return -EOPNOTSUPP;
364 	}
365 
366 	if (is_dedup(ni)) {
367 		ntfs_inode_warn(inode, "mmap deduplicated not supported");
368 		return -EOPNOTSUPP;
369 	}
370 
371 	if (is_compressed(ni)) {
372 		if (rw) {
373 			ntfs_inode_warn(inode,
374 					"mmap(write) compressed not supported");
375 			return -EOPNOTSUPP;
376 		}
377 		/* Turn off readahead for compressed files. */
378 		file->f_ra.ra_pages = 0;
379 	}
380 
381 	if (rw) {
382 		u64 to = min_t(loff_t, i_size_read(inode),
383 			       from + vma_desc_size(desc));
384 
385 		if (is_sparsed(ni)) {
386 			/* Allocate clusters for rw map. */
387 			struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
388 			CLST lcn, len;
389 			CLST vcn = from >> sbi->cluster_bits;
390 			CLST end = bytes_to_cluster(sbi, to);
391 			bool new;
392 
393 			for (; vcn < end; vcn += len) {
394 				err = attr_data_get_block(ni, vcn, 1, &lcn,
395 							  &len, &new, true);
396 				if (err)
397 					goto out;
398 			}
399 		}
400 
401 		if (ni->i_valid < to) {
402 			if (!inode_trylock(inode)) {
403 				err = -EAGAIN;
404 				goto out;
405 			}
406 			err = ntfs_extend_initialized_size(file, ni,
407 							   ni->i_valid, to);
408 			inode_unlock(inode);
409 			if (err)
410 				goto out;
411 		}
412 	}
413 
414 	err = generic_file_mmap_prepare(desc);
415 out:
416 	return err;
417 }
418 
419 static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
420 		       struct file *file)
421 {
422 	struct ntfs_inode *ni = ntfs_i(inode);
423 	struct address_space *mapping = inode->i_mapping;
424 	loff_t end = pos + count;
425 	bool extend_init = file && pos > ni->i_valid;
426 	int err;
427 
428 	if (end <= inode->i_size && !extend_init)
429 		return 0;
430 
431 	/* Mark rw ntfs as dirty. It will be cleared at umount. */
432 	ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY);
433 
434 	if (end > inode->i_size) {
435 		err = ntfs_set_size(inode, end);
436 		if (err)
437 			goto out;
438 	}
439 
440 	if (extend_init && !is_compressed(ni)) {
441 		err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos);
442 		if (err)
443 			goto out;
444 	} else {
445 		err = 0;
446 	}
447 
448 	if (file && is_sparsed(ni)) {
449 		/*
450 		 * This code optimizes large writes to sparse file.
451 		 * TODO: merge this fragment with fallocate fragment.
452 		 */
453 		struct ntfs_sb_info *sbi = ni->mi.sbi;
454 		CLST vcn = pos >> sbi->cluster_bits;
455 		CLST cend = bytes_to_cluster(sbi, end);
456 		CLST cend_v = bytes_to_cluster(sbi, ni->i_valid);
457 		CLST lcn, clen;
458 		bool new;
459 
460 		if (cend_v > cend)
461 			cend_v = cend;
462 
463 		/*
464 		 * Allocate and zero new clusters.
465 		 * Zeroing these clusters may be too long.
466 		 */
467 		for (; vcn < cend_v; vcn += clen) {
468 			err = attr_data_get_block(ni, vcn, cend_v - vcn, &lcn,
469 						  &clen, &new, true);
470 			if (err)
471 				goto out;
472 		}
473 		/*
474 		 * Allocate but not zero new clusters.
475 		 */
476 		for (; vcn < cend; vcn += clen) {
477 			err = attr_data_get_block(ni, vcn, cend - vcn, &lcn,
478 						  &clen, &new, false);
479 			if (err)
480 				goto out;
481 		}
482 	}
483 
484 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
485 	mark_inode_dirty(inode);
486 
487 	if (IS_SYNC(inode)) {
488 		int err2;
489 
490 		err = filemap_fdatawrite_range(mapping, pos, end - 1);
491 		err2 = sync_mapping_buffers(mapping);
492 		if (!err)
493 			err = err2;
494 		err2 = write_inode_now(inode, 1);
495 		if (!err)
496 			err = err2;
497 		if (!err)
498 			err = filemap_fdatawait_range(mapping, pos, end - 1);
499 	}
500 
501 out:
502 	return err;
503 }
504 
505 static int ntfs_truncate(struct inode *inode, loff_t new_size)
506 {
507 	struct super_block *sb = inode->i_sb;
508 	struct ntfs_inode *ni = ntfs_i(inode);
509 	int err, dirty = 0;
510 	u64 new_valid;
511 
512 	if (!S_ISREG(inode->i_mode))
513 		return 0;
514 
515 	if (is_compressed(ni)) {
516 		if (ni->i_valid > new_size)
517 			ni->i_valid = new_size;
518 	} else {
519 		err = block_truncate_page(inode->i_mapping, new_size,
520 					  ntfs_get_block);
521 		if (err)
522 			return err;
523 	}
524 
525 	new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size));
526 
527 	truncate_setsize(inode, new_size);
528 
529 	ni_lock(ni);
530 
531 	down_write(&ni->file.run_lock);
532 	err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
533 			    &new_valid, ni->mi.sbi->options->prealloc, NULL);
534 	up_write(&ni->file.run_lock);
535 
536 	if (new_valid < ni->i_valid)
537 		ni->i_valid = new_valid;
538 
539 	ni_unlock(ni);
540 
541 	ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
542 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
543 	if (!IS_DIRSYNC(inode)) {
544 		dirty = 1;
545 	} else {
546 		err = ntfs_sync_inode(inode);
547 		if (err)
548 			return err;
549 	}
550 
551 	if (dirty)
552 		mark_inode_dirty(inode);
553 
554 	return 0;
555 }
556 
557 /*
558  * ntfs_fallocate - file_operations::ntfs_fallocate
559  *
560  * Preallocate space for a file. This implements ntfs's fallocate file
561  * operation, which gets called from sys_fallocate system call. User
562  * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set
563  * we just allocate clusters without zeroing them out. Otherwise we
564  * allocate and zero out clusters via an expanding truncate.
565  */
566 static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
567 {
568 	struct inode *inode = file_inode(file);
569 	struct address_space *mapping = inode->i_mapping;
570 	struct super_block *sb = inode->i_sb;
571 	struct ntfs_sb_info *sbi = sb->s_fs_info;
572 	struct ntfs_inode *ni = ntfs_i(inode);
573 	loff_t end = vbo + len;
574 	loff_t vbo_down = round_down(vbo, max_t(unsigned long,
575 						sbi->cluster_size, PAGE_SIZE));
576 	bool is_supported_holes = is_sparsed(ni) || is_compressed(ni);
577 	loff_t i_size, new_size;
578 	bool map_locked;
579 	int err;
580 
581 	/* No support for dir. */
582 	if (!S_ISREG(inode->i_mode))
583 		return -EOPNOTSUPP;
584 
585 	/*
586 	 * vfs_fallocate checks all possible combinations of mode.
587 	 * Do additional checks here before ntfs_set_state(dirty).
588 	 */
589 	if (mode & FALLOC_FL_PUNCH_HOLE) {
590 		if (!is_supported_holes)
591 			return -EOPNOTSUPP;
592 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
593 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
594 		if (!is_supported_holes)
595 			return -EOPNOTSUPP;
596 	} else if (mode &
597 		   ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
598 		     FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)) {
599 		ntfs_inode_warn(inode, "fallocate(0x%x) is not supported",
600 				mode);
601 		return -EOPNOTSUPP;
602 	}
603 
604 	ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
605 
606 	inode_lock(inode);
607 	i_size = inode->i_size;
608 	new_size = max(end, i_size);
609 	map_locked = false;
610 
611 	if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
612 		/* Should never be here, see ntfs_file_open. */
613 		err = -EOPNOTSUPP;
614 		goto out;
615 	}
616 
617 	if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
618 		    FALLOC_FL_INSERT_RANGE)) {
619 		inode_dio_wait(inode);
620 		filemap_invalidate_lock(mapping);
621 		map_locked = true;
622 	}
623 
624 	if (mode & FALLOC_FL_PUNCH_HOLE) {
625 		u32 frame_size;
626 		loff_t mask, vbo_a, end_a, tmp;
627 
628 		err = filemap_write_and_wait_range(mapping, vbo_down,
629 						   LLONG_MAX);
630 		if (err)
631 			goto out;
632 
633 		truncate_pagecache(inode, vbo_down);
634 
635 		ni_lock(ni);
636 		err = attr_punch_hole(ni, vbo, len, &frame_size);
637 		ni_unlock(ni);
638 		if (!err)
639 			goto ok;
640 
641 		if (err != E_NTFS_NOTALIGNED)
642 			goto out;
643 
644 		/* Process not aligned punch. */
645 		err = 0;
646 		mask = frame_size - 1;
647 		vbo_a = (vbo + mask) & ~mask;
648 		end_a = end & ~mask;
649 
650 		tmp = min(vbo_a, end);
651 		if (tmp > vbo) {
652 			err = ntfs_zero_range(inode, vbo, tmp);
653 			if (err)
654 				goto out;
655 		}
656 
657 		if (vbo < end_a && end_a < end) {
658 			err = ntfs_zero_range(inode, end_a, end);
659 			if (err)
660 				goto out;
661 		}
662 
663 		/* Aligned punch_hole */
664 		if (end_a > vbo_a) {
665 			ni_lock(ni);
666 			err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL);
667 			ni_unlock(ni);
668 			if (err)
669 				goto out;
670 		}
671 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
672 		/*
673 		 * Write tail of the last page before removed range since
674 		 * it will get removed from the page cache below.
675 		 */
676 		err = filemap_write_and_wait_range(mapping, vbo_down, vbo);
677 		if (err)
678 			goto out;
679 
680 		/*
681 		 * Write data that will be shifted to preserve them
682 		 * when discarding page cache below.
683 		 */
684 		err = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
685 		if (err)
686 			goto out;
687 
688 		truncate_pagecache(inode, vbo_down);
689 
690 		ni_lock(ni);
691 		err = attr_collapse_range(ni, vbo, len);
692 		ni_unlock(ni);
693 		if (err)
694 			goto out;
695 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
696 		/* Check new size. */
697 		err = inode_newsize_ok(inode, new_size);
698 		if (err)
699 			goto out;
700 
701 		/* Write out all dirty pages. */
702 		err = filemap_write_and_wait_range(mapping, vbo_down,
703 						   LLONG_MAX);
704 		if (err)
705 			goto out;
706 		truncate_pagecache(inode, vbo_down);
707 
708 		ni_lock(ni);
709 		err = attr_insert_range(ni, vbo, len);
710 		ni_unlock(ni);
711 		if (err)
712 			goto out;
713 	} else {
714 		/* Check new size. */
715 		u8 cluster_bits = sbi->cluster_bits;
716 
717 		/* Be sure file is non resident. */
718 		if (is_resident(ni)) {
719 			ni_lock(ni);
720 			err = attr_force_nonresident(ni);
721 			ni_unlock(ni);
722 			if (err)
723 				goto out;
724 		}
725 
726 		/* generic/213: expected -ENOSPC instead of -EFBIG. */
727 		if (!is_supported_holes) {
728 			loff_t to_alloc = new_size - inode_get_bytes(inode);
729 
730 			if (to_alloc > 0 &&
731 			    (to_alloc >> cluster_bits) >
732 				    wnd_zeroes(&sbi->used.bitmap)) {
733 				err = -ENOSPC;
734 				goto out;
735 			}
736 		}
737 
738 		err = inode_newsize_ok(inode, new_size);
739 		if (err)
740 			goto out;
741 
742 		if (new_size > i_size) {
743 			/*
744 			 * Allocate clusters, do not change 'valid' size.
745 			 */
746 			err = ntfs_set_size(inode, new_size);
747 			if (err)
748 				goto out;
749 		}
750 
751 		if (is_supported_holes) {
752 			CLST vcn = vbo >> cluster_bits;
753 			CLST cend = bytes_to_cluster(sbi, end);
754 			CLST cend_v = bytes_to_cluster(sbi, ni->i_valid);
755 			CLST lcn, clen;
756 			bool new;
757 
758 			if (cend_v > cend)
759 				cend_v = cend;
760 
761 			/*
762 			 * Allocate and zero new clusters.
763 			 * Zeroing these clusters may be too long.
764 			 */
765 			for (; vcn < cend_v; vcn += clen) {
766 				err = attr_data_get_block(ni, vcn, cend_v - vcn,
767 							  &lcn, &clen, &new,
768 							  true);
769 				if (err)
770 					goto out;
771 			}
772 			/*
773 			 * Allocate but not zero new clusters.
774 			 */
775 			for (; vcn < cend; vcn += clen) {
776 				err = attr_data_get_block(ni, vcn, cend - vcn,
777 							  &lcn, &clen, &new,
778 							  false);
779 				if (err)
780 					goto out;
781 			}
782 		}
783 
784 		if (mode & FALLOC_FL_KEEP_SIZE) {
785 			ni_lock(ni);
786 			/* True - Keep preallocated. */
787 			err = attr_set_size(ni, ATTR_DATA, NULL, 0,
788 					    &ni->file.run, i_size, &ni->i_valid,
789 					    true, NULL);
790 			ni_unlock(ni);
791 			if (err)
792 				goto out;
793 		} else if (new_size > i_size) {
794 			i_size_write(inode, new_size);
795 		}
796 	}
797 
798 ok:
799 	err = file_modified(file);
800 	if (err)
801 		goto out;
802 
803 out:
804 	if (map_locked)
805 		filemap_invalidate_unlock(mapping);
806 
807 	if (!err) {
808 		inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
809 		mark_inode_dirty(inode);
810 	}
811 
812 	inode_unlock(inode);
813 	return err;
814 }
815 
816 /*
817  * ntfs_setattr - inode_operations::setattr
818  */
819 int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
820 		 struct iattr *attr)
821 {
822 	struct inode *inode = d_inode(dentry);
823 	struct ntfs_inode *ni = ntfs_i(inode);
824 	u32 ia_valid = attr->ia_valid;
825 	umode_t mode = inode->i_mode;
826 	int err;
827 
828 	/* Avoid any operation if inode is bad. */
829 	if (unlikely(is_bad_ni(ni)))
830 		return -EINVAL;
831 
832 	if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
833 		return -EIO;
834 
835 	err = setattr_prepare(idmap, dentry, attr);
836 	if (err)
837 		goto out;
838 
839 	if (ia_valid & ATTR_SIZE) {
840 		loff_t newsize, oldsize;
841 
842 		if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
843 			/* Should never be here, see ntfs_file_open(). */
844 			err = -EOPNOTSUPP;
845 			goto out;
846 		}
847 		inode_dio_wait(inode);
848 		oldsize = i_size_read(inode);
849 		newsize = attr->ia_size;
850 
851 		if (newsize <= oldsize)
852 			err = ntfs_truncate(inode, newsize);
853 		else
854 			err = ntfs_extend(inode, newsize, 0, NULL);
855 
856 		if (err)
857 			goto out;
858 
859 		ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
860 		i_size_write(inode, newsize);
861 	}
862 
863 	setattr_copy(idmap, inode, attr);
864 
865 	if (mode != inode->i_mode) {
866 		err = ntfs_acl_chmod(idmap, dentry);
867 		if (err)
868 			goto out;
869 
870 		/* Linux 'w' -> Windows 'ro'. */
871 		if (0222 & inode->i_mode)
872 			ni->std_fa &= ~FILE_ATTRIBUTE_READONLY;
873 		else
874 			ni->std_fa |= FILE_ATTRIBUTE_READONLY;
875 	}
876 
877 	if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE))
878 		ntfs_save_wsl_perm(inode, NULL);
879 	mark_inode_dirty(inode);
880 out:
881 	return err;
882 }
883 
884 /*
885  * check_read_restriction:
886  * common code for ntfs_file_read_iter and ntfs_file_splice_read
887  */
888 static int check_read_restriction(struct inode *inode)
889 {
890 	struct ntfs_inode *ni = ntfs_i(inode);
891 
892 	/* Avoid any operation if inode is bad. */
893 	if (unlikely(is_bad_ni(ni)))
894 		return -EINVAL;
895 
896 	if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
897 		return -EIO;
898 
899 	if (is_encrypted(ni)) {
900 		ntfs_inode_warn(inode, "encrypted i/o not supported");
901 		return -EOPNOTSUPP;
902 	}
903 
904 #ifndef CONFIG_NTFS3_LZX_XPRESS
905 	if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
906 		ntfs_inode_warn(
907 			inode,
908 			"activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files");
909 		return -EOPNOTSUPP;
910 	}
911 #endif
912 
913 	if (is_dedup(ni)) {
914 		ntfs_inode_warn(inode, "read deduplicated not supported");
915 		return -EOPNOTSUPP;
916 	}
917 
918 	return 0;
919 }
920 
921 /*
922  * ntfs_file_read_iter - file_operations::read_iter
923  */
924 static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
925 {
926 	struct file *file = iocb->ki_filp;
927 	struct inode *inode = file_inode(file);
928 	struct ntfs_inode *ni = ntfs_i(inode);
929 	ssize_t err;
930 
931 	err = check_read_restriction(inode);
932 	if (err)
933 		return err;
934 
935 	if (is_compressed(ni)) {
936 		if (iocb->ki_flags & IOCB_DIRECT) {
937 			ntfs_inode_warn(
938 				inode, "direct i/o + compressed not supported");
939 			return -EOPNOTSUPP;
940 		}
941 		/* Turn off readahead for compressed files. */
942 		file->f_ra.ra_pages = 0;
943 	}
944 
945 	/* Check minimum alignment for dio. */
946 	if (iocb->ki_flags & IOCB_DIRECT) {
947 		struct super_block *sb = inode->i_sb;
948 		struct ntfs_sb_info *sbi = sb->s_fs_info;
949 		if ((iocb->ki_pos | iov_iter_alignment(iter)) &
950 		    sbi->bdev_blocksize_mask) {
951 			iocb->ki_flags &= ~IOCB_DIRECT;
952 		}
953 	}
954 
955 	return generic_file_read_iter(iocb, iter);
956 }
957 
958 /*
959  * ntfs_file_splice_read - file_operations::splice_read
960  */
961 static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos,
962 				     struct pipe_inode_info *pipe, size_t len,
963 				     unsigned int flags)
964 {
965 	struct inode *inode = file_inode(in);
966 	ssize_t err;
967 
968 	err = check_read_restriction(inode);
969 	if (err)
970 		return err;
971 
972 	if (is_compressed(ntfs_i(inode))) {
973 		/* Turn off readahead for compressed files. */
974 		in->f_ra.ra_pages = 0;
975 	}
976 
977 	return filemap_splice_read(in, ppos, pipe, len, flags);
978 }
979 
980 /*
981  * ntfs_get_frame_pages
982  *
983  * Return: Array of locked pages.
984  */
985 static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index,
986 				struct page **pages, u32 pages_per_frame,
987 				bool *frame_uptodate)
988 {
989 	gfp_t gfp_mask = mapping_gfp_mask(mapping);
990 	u32 npages;
991 
992 	*frame_uptodate = true;
993 
994 	for (npages = 0; npages < pages_per_frame; npages++, index++) {
995 		struct folio *folio;
996 
997 		folio = __filemap_get_folio(mapping, index,
998 					    FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
999 					    gfp_mask);
1000 		if (IS_ERR(folio)) {
1001 			while (npages--) {
1002 				folio = page_folio(pages[npages]);
1003 				folio_unlock(folio);
1004 				folio_put(folio);
1005 			}
1006 
1007 			return -ENOMEM;
1008 		}
1009 
1010 		if (!folio_test_uptodate(folio))
1011 			*frame_uptodate = false;
1012 
1013 		pages[npages] = &folio->page;
1014 	}
1015 
1016 	return 0;
1017 }
1018 
1019 /*
1020  * ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files).
1021  */
1022 static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
1023 {
1024 	int err;
1025 	struct file *file = iocb->ki_filp;
1026 	size_t count = iov_iter_count(from);
1027 	loff_t pos = iocb->ki_pos;
1028 	struct inode *inode = file_inode(file);
1029 	loff_t i_size = i_size_read(inode);
1030 	struct address_space *mapping = inode->i_mapping;
1031 	struct ntfs_inode *ni = ntfs_i(inode);
1032 	u64 valid = ni->i_valid;
1033 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1034 	struct page **pages = NULL;
1035 	struct folio *folio;
1036 	size_t written = 0;
1037 	u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
1038 	u32 frame_size = 1u << frame_bits;
1039 	u32 pages_per_frame = frame_size >> PAGE_SHIFT;
1040 	u32 ip, off;
1041 	CLST frame;
1042 	u64 frame_vbo;
1043 	pgoff_t index;
1044 	bool frame_uptodate;
1045 
1046 	if (frame_size < PAGE_SIZE) {
1047 		/*
1048 		 * frame_size == 8K if cluster 512
1049 		 * frame_size == 64K if cluster 4096
1050 		 */
1051 		ntfs_inode_warn(inode, "page size is bigger than frame size");
1052 		return -EOPNOTSUPP;
1053 	}
1054 
1055 	pages = kmalloc_array(pages_per_frame, sizeof(struct page *), GFP_NOFS);
1056 	if (!pages)
1057 		return -ENOMEM;
1058 
1059 	err = file_remove_privs(file);
1060 	if (err)
1061 		goto out;
1062 
1063 	err = file_update_time(file);
1064 	if (err)
1065 		goto out;
1066 
1067 	/* Zero range [valid : pos). */
1068 	while (valid < pos) {
1069 		CLST lcn, clen;
1070 
1071 		frame = valid >> frame_bits;
1072 		frame_vbo = valid & ~(frame_size - 1);
1073 		off = valid & (frame_size - 1);
1074 
1075 		err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 1, &lcn,
1076 					  &clen, NULL, false);
1077 		if (err)
1078 			goto out;
1079 
1080 		if (lcn == SPARSE_LCN) {
1081 			ni->i_valid = valid =
1082 				frame_vbo + ((u64)clen << sbi->cluster_bits);
1083 			continue;
1084 		}
1085 
1086 		/* Load full frame. */
1087 		err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT,
1088 					   pages, pages_per_frame,
1089 					   &frame_uptodate);
1090 		if (err)
1091 			goto out;
1092 
1093 		if (!frame_uptodate && off) {
1094 			err = ni_read_frame(ni, frame_vbo, pages,
1095 					    pages_per_frame, 0);
1096 			if (err) {
1097 				for (ip = 0; ip < pages_per_frame; ip++) {
1098 					folio = page_folio(pages[ip]);
1099 					folio_unlock(folio);
1100 					folio_put(folio);
1101 				}
1102 				goto out;
1103 			}
1104 		}
1105 
1106 		ip = off >> PAGE_SHIFT;
1107 		off = offset_in_page(valid);
1108 		for (; ip < pages_per_frame; ip++, off = 0) {
1109 			folio = page_folio(pages[ip]);
1110 			folio_zero_segment(folio, off, PAGE_SIZE);
1111 			flush_dcache_folio(folio);
1112 			folio_mark_uptodate(folio);
1113 		}
1114 
1115 		ni_lock(ni);
1116 		err = ni_write_frame(ni, pages, pages_per_frame);
1117 		ni_unlock(ni);
1118 
1119 		for (ip = 0; ip < pages_per_frame; ip++) {
1120 			folio = page_folio(pages[ip]);
1121 			folio_mark_uptodate(folio);
1122 			folio_unlock(folio);
1123 			folio_put(folio);
1124 		}
1125 
1126 		if (err)
1127 			goto out;
1128 
1129 		ni->i_valid = valid = frame_vbo + frame_size;
1130 	}
1131 
1132 	/* Copy user data [pos : pos + count). */
1133 	while (count) {
1134 		size_t copied, bytes;
1135 
1136 		off = pos & (frame_size - 1);
1137 		bytes = frame_size - off;
1138 		if (bytes > count)
1139 			bytes = count;
1140 
1141 		frame_vbo = pos & ~(frame_size - 1);
1142 		index = frame_vbo >> PAGE_SHIFT;
1143 
1144 		if (unlikely(fault_in_iov_iter_readable(from, bytes))) {
1145 			err = -EFAULT;
1146 			goto out;
1147 		}
1148 
1149 		/* Load full frame. */
1150 		err = ntfs_get_frame_pages(mapping, index, pages,
1151 					   pages_per_frame, &frame_uptodate);
1152 		if (err)
1153 			goto out;
1154 
1155 		if (!frame_uptodate) {
1156 			loff_t to = pos + bytes;
1157 
1158 			if (off || (to < i_size && (to & (frame_size - 1)))) {
1159 				err = ni_read_frame(ni, frame_vbo, pages,
1160 						    pages_per_frame, 0);
1161 				if (err) {
1162 					for (ip = 0; ip < pages_per_frame;
1163 					     ip++) {
1164 						folio = page_folio(pages[ip]);
1165 						folio_unlock(folio);
1166 						folio_put(folio);
1167 					}
1168 					goto out;
1169 				}
1170 			}
1171 		}
1172 
1173 		WARN_ON(!bytes);
1174 		copied = 0;
1175 		ip = off >> PAGE_SHIFT;
1176 		off = offset_in_page(pos);
1177 
1178 		/* Copy user data to pages. */
1179 		for (;;) {
1180 			size_t cp, tail = PAGE_SIZE - off;
1181 
1182 			folio = page_folio(pages[ip]);
1183 			cp = copy_folio_from_iter_atomic(
1184 				folio, off, min(tail, bytes), from);
1185 			flush_dcache_folio(folio);
1186 
1187 			copied += cp;
1188 			bytes -= cp;
1189 			if (!bytes || !cp)
1190 				break;
1191 
1192 			if (cp < tail) {
1193 				off += cp;
1194 			} else {
1195 				ip++;
1196 				off = 0;
1197 			}
1198 		}
1199 
1200 		ni_lock(ni);
1201 		err = ni_write_frame(ni, pages, pages_per_frame);
1202 		ni_unlock(ni);
1203 
1204 		for (ip = 0; ip < pages_per_frame; ip++) {
1205 			folio = page_folio(pages[ip]);
1206 			folio_clear_dirty(folio);
1207 			folio_mark_uptodate(folio);
1208 			folio_unlock(folio);
1209 			folio_put(folio);
1210 		}
1211 
1212 		if (err)
1213 			goto out;
1214 
1215 		/*
1216 		 * We can loop for a long time in here. Be nice and allow
1217 		 * us to schedule out to avoid softlocking if preempt
1218 		 * is disabled.
1219 		 */
1220 		cond_resched();
1221 
1222 		pos += copied;
1223 		written += copied;
1224 
1225 		count = iov_iter_count(from);
1226 	}
1227 
1228 out:
1229 	kfree(pages);
1230 
1231 	if (err < 0)
1232 		return err;
1233 
1234 	iocb->ki_pos += written;
1235 	if (iocb->ki_pos > ni->i_valid)
1236 		ni->i_valid = iocb->ki_pos;
1237 	if (iocb->ki_pos > i_size)
1238 		i_size_write(inode, iocb->ki_pos);
1239 
1240 	return written;
1241 }
1242 
1243 /*
1244  * check_write_restriction:
1245  * common code for ntfs_file_write_iter and ntfs_file_splice_write
1246  */
1247 static int check_write_restriction(struct inode *inode)
1248 {
1249 	struct ntfs_inode *ni = ntfs_i(inode);
1250 
1251 	/* Avoid any operation if inode is bad. */
1252 	if (unlikely(is_bad_ni(ni)))
1253 		return -EINVAL;
1254 
1255 	if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
1256 		return -EIO;
1257 
1258 	if (is_encrypted(ni)) {
1259 		ntfs_inode_warn(inode, "encrypted i/o not supported");
1260 		return -EOPNOTSUPP;
1261 	}
1262 
1263 	if (is_dedup(ni)) {
1264 		ntfs_inode_warn(inode, "write into deduplicated not supported");
1265 		return -EOPNOTSUPP;
1266 	}
1267 
1268 	return 0;
1269 }
1270 
1271 /*
1272  * ntfs_file_write_iter - file_operations::write_iter
1273  */
1274 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1275 {
1276 	struct file *file = iocb->ki_filp;
1277 	struct inode *inode = file_inode(file);
1278 	struct ntfs_inode *ni = ntfs_i(inode);
1279 	ssize_t ret;
1280 	int err;
1281 
1282 	if (!inode_trylock(inode)) {
1283 		if (iocb->ki_flags & IOCB_NOWAIT)
1284 			return -EAGAIN;
1285 		inode_lock(inode);
1286 	}
1287 
1288 	ret = check_write_restriction(inode);
1289 	if (ret)
1290 		goto out;
1291 
1292 	if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
1293 		ntfs_inode_warn(inode, "direct i/o + compressed not supported");
1294 		ret = -EOPNOTSUPP;
1295 		goto out;
1296 	}
1297 
1298 	ret = generic_write_checks(iocb, from);
1299 	if (ret <= 0)
1300 		goto out;
1301 
1302 	err = file_modified(iocb->ki_filp);
1303 	if (err) {
1304 		ret = err;
1305 		goto out;
1306 	}
1307 
1308 	if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
1309 		/* Should never be here, see ntfs_file_open(). */
1310 		ret = -EOPNOTSUPP;
1311 		goto out;
1312 	}
1313 
1314 	ret = ntfs_extend(inode, iocb->ki_pos, ret, file);
1315 	if (ret)
1316 		goto out;
1317 
1318 	ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) :
1319 				  __generic_file_write_iter(iocb, from);
1320 
1321 out:
1322 	inode_unlock(inode);
1323 
1324 	if (ret > 0)
1325 		ret = generic_write_sync(iocb, ret);
1326 
1327 	return ret;
1328 }
1329 
1330 /*
1331  * ntfs_file_open - file_operations::open
1332  */
1333 int ntfs_file_open(struct inode *inode, struct file *file)
1334 {
1335 	struct ntfs_inode *ni = ntfs_i(inode);
1336 
1337 	/* Avoid any operation if inode is bad. */
1338 	if (unlikely(is_bad_ni(ni)))
1339 		return -EINVAL;
1340 
1341 	if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
1342 		return -EIO;
1343 
1344 	if (unlikely((is_compressed(ni) || is_encrypted(ni)) &&
1345 		     (file->f_flags & O_DIRECT))) {
1346 		return -EOPNOTSUPP;
1347 	}
1348 
1349 	/* Decompress "external compressed" file if opened for rw. */
1350 	if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) &&
1351 	    (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) {
1352 #ifdef CONFIG_NTFS3_LZX_XPRESS
1353 		int err = ni_decompress_file(ni);
1354 
1355 		if (err)
1356 			return err;
1357 #else
1358 		ntfs_inode_warn(
1359 			inode,
1360 			"activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files");
1361 		return -EOPNOTSUPP;
1362 #endif
1363 	}
1364 
1365 	return generic_file_open(inode, file);
1366 }
1367 
1368 /*
1369  * ntfs_file_release - file_operations::release
1370  */
1371 static int ntfs_file_release(struct inode *inode, struct file *file)
1372 {
1373 	struct ntfs_inode *ni = ntfs_i(inode);
1374 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1375 	int err = 0;
1376 
1377 	/* If we are last writer on the inode, drop the block reservation. */
1378 	if (sbi->options->prealloc &&
1379 	    ((file->f_mode & FMODE_WRITE) &&
1380 	     atomic_read(&inode->i_writecount) == 1)
1381 	    /*
1382 	    * The only file when inode->i_fop = &ntfs_file_operations and
1383 	    * init_rwsem(&ni->file.run_lock) is not called explicitly is MFT.
1384 	    *
1385 	    * Add additional check here.
1386 	    */
1387 	    && inode->i_ino != MFT_REC_MFT) {
1388 		ni_lock(ni);
1389 		down_write(&ni->file.run_lock);
1390 
1391 		err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
1392 				    i_size_read(inode), &ni->i_valid, false,
1393 				    NULL);
1394 
1395 		up_write(&ni->file.run_lock);
1396 		ni_unlock(ni);
1397 	}
1398 	return err;
1399 }
1400 
1401 /*
1402  * ntfs_fiemap - inode_operations::fiemap
1403  */
1404 int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1405 		__u64 start, __u64 len)
1406 {
1407 	int err;
1408 	struct ntfs_inode *ni = ntfs_i(inode);
1409 
1410 	/* Avoid any operation if inode is bad. */
1411 	if (unlikely(is_bad_ni(ni)))
1412 		return -EINVAL;
1413 
1414 	err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR);
1415 	if (err)
1416 		return err;
1417 
1418 	ni_lock(ni);
1419 
1420 	err = ni_fiemap(ni, fieinfo, start, len);
1421 
1422 	ni_unlock(ni);
1423 
1424 	return err;
1425 }
1426 
1427 /*
1428  * ntfs_file_splice_write - file_operations::splice_write
1429  */
1430 static ssize_t ntfs_file_splice_write(struct pipe_inode_info *pipe,
1431 				      struct file *file, loff_t *ppos,
1432 				      size_t len, unsigned int flags)
1433 {
1434 	ssize_t err;
1435 	struct inode *inode = file_inode(file);
1436 
1437 	err = check_write_restriction(inode);
1438 	if (err)
1439 		return err;
1440 
1441 	return iter_file_splice_write(pipe, file, ppos, len, flags);
1442 }
1443 
1444 /*
1445  * ntfs_file_fsync - file_operations::fsync
1446  */
1447 static int ntfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1448 {
1449 	struct inode *inode = file_inode(file);
1450 	if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
1451 		return -EIO;
1452 
1453 	return generic_file_fsync(file, start, end, datasync);
1454 }
1455 
1456 // clang-format off
1457 const struct inode_operations ntfs_file_inode_operations = {
1458 	.getattr	= ntfs_getattr,
1459 	.setattr	= ntfs_setattr,
1460 	.listxattr	= ntfs_listxattr,
1461 	.get_acl	= ntfs_get_acl,
1462 	.set_acl	= ntfs_set_acl,
1463 	.fiemap		= ntfs_fiemap,
1464 };
1465 
1466 const struct file_operations ntfs_file_operations = {
1467 	.llseek		= generic_file_llseek,
1468 	.read_iter	= ntfs_file_read_iter,
1469 	.write_iter	= ntfs_file_write_iter,
1470 	.unlocked_ioctl = ntfs_ioctl,
1471 #ifdef CONFIG_COMPAT
1472 	.compat_ioctl	= ntfs_compat_ioctl,
1473 #endif
1474 	.splice_read	= ntfs_file_splice_read,
1475 	.splice_write	= ntfs_file_splice_write,
1476 	.mmap_prepare	= ntfs_file_mmap_prepare,
1477 	.open		= ntfs_file_open,
1478 	.fsync		= ntfs_file_fsync,
1479 	.fallocate	= ntfs_fallocate,
1480 	.release	= ntfs_file_release,
1481 	.setlease	= generic_setlease,
1482 };
1483 
1484 #if IS_ENABLED(CONFIG_NTFS_FS)
1485 const struct file_operations ntfs_legacy_file_operations = {
1486 	.llseek		= generic_file_llseek,
1487 	.read_iter	= ntfs_file_read_iter,
1488 	.splice_read	= ntfs_file_splice_read,
1489 	.open		= ntfs_file_open,
1490 	.release	= ntfs_file_release,
1491 	.setlease	= generic_setlease,
1492 };
1493 #endif
1494 // clang-format on
1495