xref: /linux/fs/ntfs3/file.c (revision e1c4c5436b4ad579762fbe78bfabc8aef59bd5b1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  *  Regular file handling primitives for NTFS-based filesystems.
7  *
8  */
9 
10 #include <linux/backing-dev.h>
11 #include <linux/blkdev.h>
12 #include <linux/buffer_head.h>
13 #include <linux/compat.h>
14 #include <linux/falloc.h>
15 #include <linux/fiemap.h>
16 
17 #include "debug.h"
18 #include "ntfs.h"
19 #include "ntfs_fs.h"
20 
21 static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
22 {
23 	struct fstrim_range __user *user_range;
24 	struct fstrim_range range;
25 	int err;
26 
27 	if (!capable(CAP_SYS_ADMIN))
28 		return -EPERM;
29 
30 	if (!bdev_max_discard_sectors(sbi->sb->s_bdev))
31 		return -EOPNOTSUPP;
32 
33 	user_range = (struct fstrim_range __user *)arg;
34 	if (copy_from_user(&range, user_range, sizeof(range)))
35 		return -EFAULT;
36 
37 	range.minlen = max_t(u32, range.minlen,
38 			     bdev_discard_granularity(sbi->sb->s_bdev));
39 
40 	err = ntfs_trim_fs(sbi, &range);
41 	if (err < 0)
42 		return err;
43 
44 	if (copy_to_user(user_range, &range, sizeof(range)))
45 		return -EFAULT;
46 
47 	return 0;
48 }
49 
50 static long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
51 {
52 	struct inode *inode = file_inode(filp);
53 	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
54 
55 	switch (cmd) {
56 	case FITRIM:
57 		return ntfs_ioctl_fitrim(sbi, arg);
58 	}
59 	return -ENOTTY; /* Inappropriate ioctl for device. */
60 }
61 
62 #ifdef CONFIG_COMPAT
63 static long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg)
64 
65 {
66 	return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
67 }
68 #endif
69 
70 /*
71  * ntfs_getattr - inode_operations::getattr
72  */
73 int ntfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
74 		 struct kstat *stat, u32 request_mask, u32 flags)
75 {
76 	struct inode *inode = d_inode(path->dentry);
77 	struct ntfs_inode *ni = ntfs_i(inode);
78 
79 	if (is_compressed(ni))
80 		stat->attributes |= STATX_ATTR_COMPRESSED;
81 
82 	if (is_encrypted(ni))
83 		stat->attributes |= STATX_ATTR_ENCRYPTED;
84 
85 	stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED;
86 
87 	generic_fillattr(mnt_userns, inode, stat);
88 
89 	stat->result_mask |= STATX_BTIME;
90 	stat->btime = ni->i_crtime;
91 	stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */
92 
93 	return 0;
94 }
95 
96 static int ntfs_extend_initialized_size(struct file *file,
97 					struct ntfs_inode *ni,
98 					const loff_t valid,
99 					const loff_t new_valid)
100 {
101 	struct inode *inode = &ni->vfs_inode;
102 	struct address_space *mapping = inode->i_mapping;
103 	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
104 	loff_t pos = valid;
105 	int err;
106 
107 	if (is_resident(ni)) {
108 		ni->i_valid = new_valid;
109 		return 0;
110 	}
111 
112 	WARN_ON(is_compressed(ni));
113 	WARN_ON(valid >= new_valid);
114 
115 	for (;;) {
116 		u32 zerofrom, len;
117 		struct page *page;
118 		u8 bits;
119 		CLST vcn, lcn, clen;
120 
121 		if (is_sparsed(ni)) {
122 			bits = sbi->cluster_bits;
123 			vcn = pos >> bits;
124 
125 			err = attr_data_get_block(ni, vcn, 0, &lcn, &clen,
126 						  NULL);
127 			if (err)
128 				goto out;
129 
130 			if (lcn == SPARSE_LCN) {
131 				loff_t vbo = (loff_t)vcn << bits;
132 				loff_t to = vbo + ((loff_t)clen << bits);
133 
134 				if (to <= new_valid) {
135 					ni->i_valid = to;
136 					pos = to;
137 					goto next;
138 				}
139 
140 				if (vbo < pos) {
141 					pos = vbo;
142 				} else {
143 					to = (new_valid >> bits) << bits;
144 					if (pos < to) {
145 						ni->i_valid = to;
146 						pos = to;
147 						goto next;
148 					}
149 				}
150 			}
151 		}
152 
153 		zerofrom = pos & (PAGE_SIZE - 1);
154 		len = PAGE_SIZE - zerofrom;
155 
156 		if (pos + len > new_valid)
157 			len = new_valid - pos;
158 
159 		err = ntfs_write_begin(file, mapping, pos, len, &page, NULL);
160 		if (err)
161 			goto out;
162 
163 		zero_user_segment(page, zerofrom, PAGE_SIZE);
164 
165 		/* This function in any case puts page. */
166 		err = ntfs_write_end(file, mapping, pos, len, len, page, NULL);
167 		if (err < 0)
168 			goto out;
169 		pos += len;
170 
171 next:
172 		if (pos >= new_valid)
173 			break;
174 
175 		balance_dirty_pages_ratelimited(mapping);
176 		cond_resched();
177 	}
178 
179 	return 0;
180 
181 out:
182 	ni->i_valid = valid;
183 	ntfs_inode_warn(inode, "failed to extend initialized size to %llx.",
184 			new_valid);
185 	return err;
186 }
187 
188 /*
189  * ntfs_zero_range - Helper function for punch_hole.
190  *
191  * It zeroes a range [vbo, vbo_to).
192  */
193 static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
194 {
195 	int err = 0;
196 	struct address_space *mapping = inode->i_mapping;
197 	u32 blocksize = 1 << inode->i_blkbits;
198 	pgoff_t idx = vbo >> PAGE_SHIFT;
199 	u32 z_start = vbo & (PAGE_SIZE - 1);
200 	pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT;
201 	loff_t page_off;
202 	struct buffer_head *head, *bh;
203 	u32 bh_next, bh_off, z_end;
204 	sector_t iblock;
205 	struct page *page;
206 
207 	for (; idx < idx_end; idx += 1, z_start = 0) {
208 		page_off = (loff_t)idx << PAGE_SHIFT;
209 		z_end = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off)
210 							: PAGE_SIZE;
211 		iblock = page_off >> inode->i_blkbits;
212 
213 		page = find_or_create_page(mapping, idx,
214 					   mapping_gfp_constraint(mapping,
215 								  ~__GFP_FS));
216 		if (!page)
217 			return -ENOMEM;
218 
219 		if (!page_has_buffers(page))
220 			create_empty_buffers(page, blocksize, 0);
221 
222 		bh = head = page_buffers(page);
223 		bh_off = 0;
224 		do {
225 			bh_next = bh_off + blocksize;
226 
227 			if (bh_next <= z_start || bh_off >= z_end)
228 				continue;
229 
230 			if (!buffer_mapped(bh)) {
231 				ntfs_get_block(inode, iblock, bh, 0);
232 				/* Unmapped? It's a hole - nothing to do. */
233 				if (!buffer_mapped(bh))
234 					continue;
235 			}
236 
237 			/* Ok, it's mapped. Make sure it's up-to-date. */
238 			if (PageUptodate(page))
239 				set_buffer_uptodate(bh);
240 
241 			if (!buffer_uptodate(bh)) {
242 				lock_buffer(bh);
243 				bh->b_end_io = end_buffer_read_sync;
244 				get_bh(bh);
245 				submit_bh(REQ_OP_READ, bh);
246 
247 				wait_on_buffer(bh);
248 				if (!buffer_uptodate(bh)) {
249 					unlock_page(page);
250 					put_page(page);
251 					err = -EIO;
252 					goto out;
253 				}
254 			}
255 
256 			mark_buffer_dirty(bh);
257 
258 		} while (bh_off = bh_next, iblock += 1,
259 			 head != (bh = bh->b_this_page));
260 
261 		zero_user_segment(page, z_start, z_end);
262 
263 		unlock_page(page);
264 		put_page(page);
265 		cond_resched();
266 	}
267 out:
268 	mark_inode_dirty(inode);
269 	return err;
270 }
271 
272 /*
273  * ntfs_sparse_cluster - Helper function to zero a new allocated clusters.
274  *
275  * NOTE: 512 <= cluster size <= 2M
276  */
277 void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
278 			 CLST len)
279 {
280 	struct address_space *mapping = inode->i_mapping;
281 	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
282 	u64 vbo = (u64)vcn << sbi->cluster_bits;
283 	u64 bytes = (u64)len << sbi->cluster_bits;
284 	u32 blocksize = 1 << inode->i_blkbits;
285 	pgoff_t idx0 = page0 ? page0->index : -1;
286 	loff_t vbo_clst = vbo & sbi->cluster_mask_inv;
287 	loff_t end = ntfs_up_cluster(sbi, vbo + bytes);
288 	pgoff_t idx = vbo_clst >> PAGE_SHIFT;
289 	u32 from = vbo_clst & (PAGE_SIZE - 1);
290 	pgoff_t idx_end = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
291 	loff_t page_off;
292 	u32 to;
293 	bool partial;
294 	struct page *page;
295 
296 	for (; idx < idx_end; idx += 1, from = 0) {
297 		page = idx == idx0 ? page0 : grab_cache_page(mapping, idx);
298 
299 		if (!page)
300 			continue;
301 
302 		page_off = (loff_t)idx << PAGE_SHIFT;
303 		to = (page_off + PAGE_SIZE) > end ? (end - page_off)
304 						  : PAGE_SIZE;
305 		partial = false;
306 
307 		if ((from || PAGE_SIZE != to) &&
308 		    likely(!page_has_buffers(page))) {
309 			create_empty_buffers(page, blocksize, 0);
310 		}
311 
312 		if (page_has_buffers(page)) {
313 			struct buffer_head *head, *bh;
314 			u32 bh_off = 0;
315 
316 			bh = head = page_buffers(page);
317 			do {
318 				u32 bh_next = bh_off + blocksize;
319 
320 				if (from <= bh_off && bh_next <= to) {
321 					set_buffer_uptodate(bh);
322 					mark_buffer_dirty(bh);
323 				} else if (!buffer_uptodate(bh)) {
324 					partial = true;
325 				}
326 				bh_off = bh_next;
327 			} while (head != (bh = bh->b_this_page));
328 		}
329 
330 		zero_user_segment(page, from, to);
331 
332 		if (!partial) {
333 			if (!PageUptodate(page))
334 				SetPageUptodate(page);
335 			set_page_dirty(page);
336 		}
337 
338 		if (idx != idx0) {
339 			unlock_page(page);
340 			put_page(page);
341 		}
342 		cond_resched();
343 	}
344 	mark_inode_dirty(inode);
345 }
346 
347 /*
348  * ntfs_file_mmap - file_operations::mmap
349  */
350 static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
351 {
352 	struct address_space *mapping = file->f_mapping;
353 	struct inode *inode = mapping->host;
354 	struct ntfs_inode *ni = ntfs_i(inode);
355 	u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT);
356 	bool rw = vma->vm_flags & VM_WRITE;
357 	int err;
358 
359 	if (is_encrypted(ni)) {
360 		ntfs_inode_warn(inode, "mmap encrypted not supported");
361 		return -EOPNOTSUPP;
362 	}
363 
364 	if (is_dedup(ni)) {
365 		ntfs_inode_warn(inode, "mmap deduplicated not supported");
366 		return -EOPNOTSUPP;
367 	}
368 
369 	if (is_compressed(ni) && rw) {
370 		ntfs_inode_warn(inode, "mmap(write) compressed not supported");
371 		return -EOPNOTSUPP;
372 	}
373 
374 	if (rw) {
375 		u64 to = min_t(loff_t, i_size_read(inode),
376 			       from + vma->vm_end - vma->vm_start);
377 
378 		if (is_sparsed(ni)) {
379 			/* Allocate clusters for rw map. */
380 			struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
381 			CLST lcn, len;
382 			CLST vcn = from >> sbi->cluster_bits;
383 			CLST end = bytes_to_cluster(sbi, to);
384 			bool new;
385 
386 			for (; vcn < end; vcn += len) {
387 				err = attr_data_get_block(ni, vcn, 1, &lcn,
388 							  &len, &new);
389 				if (err)
390 					goto out;
391 
392 				if (!new)
393 					continue;
394 				ntfs_sparse_cluster(inode, NULL, vcn, 1);
395 			}
396 		}
397 
398 		if (ni->i_valid < to) {
399 			if (!inode_trylock(inode)) {
400 				err = -EAGAIN;
401 				goto out;
402 			}
403 			err = ntfs_extend_initialized_size(file, ni,
404 							   ni->i_valid, to);
405 			inode_unlock(inode);
406 			if (err)
407 				goto out;
408 		}
409 	}
410 
411 	err = generic_file_mmap(file, vma);
412 out:
413 	return err;
414 }
415 
416 static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
417 		       struct file *file)
418 {
419 	struct ntfs_inode *ni = ntfs_i(inode);
420 	struct address_space *mapping = inode->i_mapping;
421 	loff_t end = pos + count;
422 	bool extend_init = file && pos > ni->i_valid;
423 	int err;
424 
425 	if (end <= inode->i_size && !extend_init)
426 		return 0;
427 
428 	/* Mark rw ntfs as dirty. It will be cleared at umount. */
429 	ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY);
430 
431 	if (end > inode->i_size) {
432 		err = ntfs_set_size(inode, end);
433 		if (err)
434 			goto out;
435 		inode->i_size = end;
436 	}
437 
438 	if (extend_init && !is_compressed(ni)) {
439 		err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos);
440 		if (err)
441 			goto out;
442 	} else {
443 		err = 0;
444 	}
445 
446 	inode->i_ctime = inode->i_mtime = current_time(inode);
447 	mark_inode_dirty(inode);
448 
449 	if (IS_SYNC(inode)) {
450 		int err2;
451 
452 		err = filemap_fdatawrite_range(mapping, pos, end - 1);
453 		err2 = sync_mapping_buffers(mapping);
454 		if (!err)
455 			err = err2;
456 		err2 = write_inode_now(inode, 1);
457 		if (!err)
458 			err = err2;
459 		if (!err)
460 			err = filemap_fdatawait_range(mapping, pos, end - 1);
461 	}
462 
463 out:
464 	return err;
465 }
466 
467 static int ntfs_truncate(struct inode *inode, loff_t new_size)
468 {
469 	struct super_block *sb = inode->i_sb;
470 	struct ntfs_inode *ni = ntfs_i(inode);
471 	int err, dirty = 0;
472 	u64 new_valid;
473 
474 	if (!S_ISREG(inode->i_mode))
475 		return 0;
476 
477 	if (is_compressed(ni)) {
478 		if (ni->i_valid > new_size)
479 			ni->i_valid = new_size;
480 	} else {
481 		err = block_truncate_page(inode->i_mapping, new_size,
482 					  ntfs_get_block);
483 		if (err)
484 			return err;
485 	}
486 
487 	new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size));
488 
489 	ni_lock(ni);
490 
491 	truncate_setsize(inode, new_size);
492 
493 	down_write(&ni->file.run_lock);
494 	err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
495 			    &new_valid, ni->mi.sbi->options->prealloc, NULL);
496 	up_write(&ni->file.run_lock);
497 
498 	if (new_valid < ni->i_valid)
499 		ni->i_valid = new_valid;
500 
501 	ni_unlock(ni);
502 
503 	ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
504 	inode->i_ctime = inode->i_mtime = current_time(inode);
505 	if (!IS_DIRSYNC(inode)) {
506 		dirty = 1;
507 	} else {
508 		err = ntfs_sync_inode(inode);
509 		if (err)
510 			return err;
511 	}
512 
513 	if (dirty)
514 		mark_inode_dirty(inode);
515 
516 	/*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/
517 
518 	return 0;
519 }
520 
521 /*
522  * ntfs_fallocate
523  *
524  * Preallocate space for a file. This implements ntfs's fallocate file
525  * operation, which gets called from sys_fallocate system call. User
526  * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set
527  * we just allocate clusters without zeroing them out. Otherwise we
528  * allocate and zero out clusters via an expanding truncate.
529  */
530 static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
531 {
532 	struct inode *inode = file->f_mapping->host;
533 	struct address_space *mapping = inode->i_mapping;
534 	struct super_block *sb = inode->i_sb;
535 	struct ntfs_sb_info *sbi = sb->s_fs_info;
536 	struct ntfs_inode *ni = ntfs_i(inode);
537 	loff_t end = vbo + len;
538 	loff_t vbo_down = round_down(vbo, PAGE_SIZE);
539 	bool is_supported_holes = is_sparsed(ni) || is_compressed(ni);
540 	loff_t i_size, new_size;
541 	bool map_locked;
542 	int err;
543 
544 	/* No support for dir. */
545 	if (!S_ISREG(inode->i_mode))
546 		return -EOPNOTSUPP;
547 
548 	/*
549 	 * vfs_fallocate checks all possible combinations of mode.
550 	 * Do additional checks here before ntfs_set_state(dirty).
551 	 */
552 	if (mode & FALLOC_FL_PUNCH_HOLE) {
553 		if (!is_supported_holes)
554 			return -EOPNOTSUPP;
555 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
556 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
557 		if (!is_supported_holes)
558 			return -EOPNOTSUPP;
559 	} else if (mode &
560 		   ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
561 		     FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)) {
562 		ntfs_inode_warn(inode, "fallocate(0x%x) is not supported",
563 				mode);
564 		return -EOPNOTSUPP;
565 	}
566 
567 	ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
568 
569 	inode_lock(inode);
570 	i_size = inode->i_size;
571 	new_size = max(end, i_size);
572 	map_locked = false;
573 
574 	if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
575 		/* Should never be here, see ntfs_file_open. */
576 		err = -EOPNOTSUPP;
577 		goto out;
578 	}
579 
580 	if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
581 		    FALLOC_FL_INSERT_RANGE)) {
582 		inode_dio_wait(inode);
583 		filemap_invalidate_lock(mapping);
584 		map_locked = true;
585 	}
586 
587 	if (mode & FALLOC_FL_PUNCH_HOLE) {
588 		u32 frame_size;
589 		loff_t mask, vbo_a, end_a, tmp;
590 
591 		err = filemap_write_and_wait_range(mapping, vbo, end - 1);
592 		if (err)
593 			goto out;
594 
595 		err = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
596 		if (err)
597 			goto out;
598 
599 		truncate_pagecache(inode, vbo_down);
600 
601 		ni_lock(ni);
602 		err = attr_punch_hole(ni, vbo, len, &frame_size);
603 		ni_unlock(ni);
604 		if (err != E_NTFS_NOTALIGNED)
605 			goto out;
606 
607 		/* Process not aligned punch. */
608 		mask = frame_size - 1;
609 		vbo_a = (vbo + mask) & ~mask;
610 		end_a = end & ~mask;
611 
612 		tmp = min(vbo_a, end);
613 		if (tmp > vbo) {
614 			err = ntfs_zero_range(inode, vbo, tmp);
615 			if (err)
616 				goto out;
617 		}
618 
619 		if (vbo < end_a && end_a < end) {
620 			err = ntfs_zero_range(inode, end_a, end);
621 			if (err)
622 				goto out;
623 		}
624 
625 		/* Aligned punch_hole */
626 		if (end_a > vbo_a) {
627 			ni_lock(ni);
628 			err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL);
629 			ni_unlock(ni);
630 		}
631 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
632 		/*
633 		 * Write tail of the last page before removed range since
634 		 * it will get removed from the page cache below.
635 		 */
636 		err = filemap_write_and_wait_range(mapping, vbo_down, vbo);
637 		if (err)
638 			goto out;
639 
640 		/*
641 		 * Write data that will be shifted to preserve them
642 		 * when discarding page cache below.
643 		 */
644 		err = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
645 		if (err)
646 			goto out;
647 
648 		truncate_pagecache(inode, vbo_down);
649 
650 		ni_lock(ni);
651 		err = attr_collapse_range(ni, vbo, len);
652 		ni_unlock(ni);
653 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
654 		/* Check new size. */
655 		err = inode_newsize_ok(inode, new_size);
656 		if (err)
657 			goto out;
658 
659 		/* Write out all dirty pages. */
660 		err = filemap_write_and_wait_range(mapping, vbo_down,
661 						   LLONG_MAX);
662 		if (err)
663 			goto out;
664 		truncate_pagecache(inode, vbo_down);
665 
666 		ni_lock(ni);
667 		err = attr_insert_range(ni, vbo, len);
668 		ni_unlock(ni);
669 	} else {
670 		/* Check new size. */
671 
672 		/* generic/213: expected -ENOSPC instead of -EFBIG. */
673 		if (!is_supported_holes) {
674 			loff_t to_alloc = new_size - inode_get_bytes(inode);
675 
676 			if (to_alloc > 0 &&
677 			    (to_alloc >> sbi->cluster_bits) >
678 				    wnd_zeroes(&sbi->used.bitmap)) {
679 				err = -ENOSPC;
680 				goto out;
681 			}
682 		}
683 
684 		err = inode_newsize_ok(inode, new_size);
685 		if (err)
686 			goto out;
687 
688 		/*
689 		 * Allocate clusters, do not change 'valid' size.
690 		 */
691 		err = ntfs_set_size(inode, new_size);
692 		if (err)
693 			goto out;
694 
695 		if (is_supported_holes) {
696 			CLST vcn_v = ni->i_valid >> sbi->cluster_bits;
697 			CLST vcn = vbo >> sbi->cluster_bits;
698 			CLST cend = bytes_to_cluster(sbi, end);
699 			CLST lcn, clen;
700 			bool new;
701 
702 			/*
703 			 * Allocate but do not zero new clusters. (see below comments)
704 			 * This breaks security: One can read unused on-disk areas.
705 			 * Zeroing these clusters may be too long.
706 			 * Maybe we should check here for root rights?
707 			 */
708 			for (; vcn < cend; vcn += clen) {
709 				err = attr_data_get_block(ni, vcn, cend - vcn,
710 							  &lcn, &clen, &new);
711 				if (err)
712 					goto out;
713 				if (!new || vcn >= vcn_v)
714 					continue;
715 
716 				/*
717 				 * Unwritten area.
718 				 * NTFS is not able to store several unwritten areas.
719 				 * Activate 'ntfs_sparse_cluster' to zero new allocated clusters.
720 				 *
721 				 * Dangerous in case:
722 				 * 1G of sparsed clusters + 1 cluster of data =>
723 				 * valid_size == 1G + 1 cluster
724 				 * fallocate(1G) will zero 1G and this can be very long
725 				 * xfstest 016/086 will fail without 'ntfs_sparse_cluster'.
726 				 */
727 				ntfs_sparse_cluster(inode, NULL, vcn,
728 						    min(vcn_v - vcn, clen));
729 			}
730 		}
731 
732 		if (mode & FALLOC_FL_KEEP_SIZE) {
733 			ni_lock(ni);
734 			/* True - Keep preallocated. */
735 			err = attr_set_size(ni, ATTR_DATA, NULL, 0,
736 					    &ni->file.run, i_size, &ni->i_valid,
737 					    true, NULL);
738 			ni_unlock(ni);
739 		}
740 	}
741 
742 out:
743 	if (map_locked)
744 		filemap_invalidate_unlock(mapping);
745 
746 	if (!err) {
747 		inode->i_ctime = inode->i_mtime = current_time(inode);
748 		mark_inode_dirty(inode);
749 	}
750 
751 	inode_unlock(inode);
752 	return err;
753 }
754 
755 /*
756  * ntfs3_setattr - inode_operations::setattr
757  */
758 int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
759 		  struct iattr *attr)
760 {
761 	struct super_block *sb = dentry->d_sb;
762 	struct ntfs_sb_info *sbi = sb->s_fs_info;
763 	struct inode *inode = d_inode(dentry);
764 	struct ntfs_inode *ni = ntfs_i(inode);
765 	u32 ia_valid = attr->ia_valid;
766 	umode_t mode = inode->i_mode;
767 	int err;
768 
769 	if (sbi->options->noacsrules) {
770 		/* "No access rules" - Force any changes of time etc. */
771 		attr->ia_valid |= ATTR_FORCE;
772 		/* and disable for editing some attributes. */
773 		attr->ia_valid &= ~(ATTR_UID | ATTR_GID | ATTR_MODE);
774 		ia_valid = attr->ia_valid;
775 	}
776 
777 	err = setattr_prepare(mnt_userns, dentry, attr);
778 	if (err)
779 		goto out;
780 
781 	if (ia_valid & ATTR_SIZE) {
782 		loff_t oldsize = inode->i_size;
783 
784 		if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
785 			/* Should never be here, see ntfs_file_open(). */
786 			err = -EOPNOTSUPP;
787 			goto out;
788 		}
789 		inode_dio_wait(inode);
790 
791 		if (attr->ia_size <= oldsize)
792 			err = ntfs_truncate(inode, attr->ia_size);
793 		else if (attr->ia_size > oldsize)
794 			err = ntfs_extend(inode, attr->ia_size, 0, NULL);
795 
796 		if (err)
797 			goto out;
798 
799 		ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
800 	}
801 
802 	setattr_copy(mnt_userns, inode, attr);
803 
804 	if (mode != inode->i_mode) {
805 		err = ntfs_acl_chmod(mnt_userns, inode);
806 		if (err)
807 			goto out;
808 
809 		/* Linux 'w' -> Windows 'ro'. */
810 		if (0222 & inode->i_mode)
811 			ni->std_fa &= ~FILE_ATTRIBUTE_READONLY;
812 		else
813 			ni->std_fa |= FILE_ATTRIBUTE_READONLY;
814 	}
815 
816 	if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE))
817 		ntfs_save_wsl_perm(inode);
818 	mark_inode_dirty(inode);
819 out:
820 	return err;
821 }
822 
823 static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
824 {
825 	struct file *file = iocb->ki_filp;
826 	struct inode *inode = file->f_mapping->host;
827 	struct ntfs_inode *ni = ntfs_i(inode);
828 
829 	if (is_encrypted(ni)) {
830 		ntfs_inode_warn(inode, "encrypted i/o not supported");
831 		return -EOPNOTSUPP;
832 	}
833 
834 	if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
835 		ntfs_inode_warn(inode, "direct i/o + compressed not supported");
836 		return -EOPNOTSUPP;
837 	}
838 
839 #ifndef CONFIG_NTFS3_LZX_XPRESS
840 	if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
841 		ntfs_inode_warn(
842 			inode,
843 			"activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files");
844 		return -EOPNOTSUPP;
845 	}
846 #endif
847 
848 	if (is_dedup(ni)) {
849 		ntfs_inode_warn(inode, "read deduplicated not supported");
850 		return -EOPNOTSUPP;
851 	}
852 
853 	return generic_file_read_iter(iocb, iter);
854 }
855 
856 /*
857  * ntfs_get_frame_pages
858  *
859  * Return: Array of locked pages.
860  */
861 static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index,
862 				struct page **pages, u32 pages_per_frame,
863 				bool *frame_uptodate)
864 {
865 	gfp_t gfp_mask = mapping_gfp_mask(mapping);
866 	u32 npages;
867 
868 	*frame_uptodate = true;
869 
870 	for (npages = 0; npages < pages_per_frame; npages++, index++) {
871 		struct page *page;
872 
873 		page = find_or_create_page(mapping, index, gfp_mask);
874 		if (!page) {
875 			while (npages--) {
876 				page = pages[npages];
877 				unlock_page(page);
878 				put_page(page);
879 			}
880 
881 			return -ENOMEM;
882 		}
883 
884 		if (!PageUptodate(page))
885 			*frame_uptodate = false;
886 
887 		pages[npages] = page;
888 	}
889 
890 	return 0;
891 }
892 
893 /*
894  * ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files).
895  */
896 static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
897 {
898 	int err;
899 	struct file *file = iocb->ki_filp;
900 	size_t count = iov_iter_count(from);
901 	loff_t pos = iocb->ki_pos;
902 	struct inode *inode = file_inode(file);
903 	loff_t i_size = inode->i_size;
904 	struct address_space *mapping = inode->i_mapping;
905 	struct ntfs_inode *ni = ntfs_i(inode);
906 	u64 valid = ni->i_valid;
907 	struct ntfs_sb_info *sbi = ni->mi.sbi;
908 	struct page *page, **pages = NULL;
909 	size_t written = 0;
910 	u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
911 	u32 frame_size = 1u << frame_bits;
912 	u32 pages_per_frame = frame_size >> PAGE_SHIFT;
913 	u32 ip, off;
914 	CLST frame;
915 	u64 frame_vbo;
916 	pgoff_t index;
917 	bool frame_uptodate;
918 
919 	if (frame_size < PAGE_SIZE) {
920 		/*
921 		 * frame_size == 8K if cluster 512
922 		 * frame_size == 64K if cluster 4096
923 		 */
924 		ntfs_inode_warn(inode, "page size is bigger than frame size");
925 		return -EOPNOTSUPP;
926 	}
927 
928 	pages = kmalloc_array(pages_per_frame, sizeof(struct page *), GFP_NOFS);
929 	if (!pages)
930 		return -ENOMEM;
931 
932 	current->backing_dev_info = inode_to_bdi(inode);
933 	err = file_remove_privs(file);
934 	if (err)
935 		goto out;
936 
937 	err = file_update_time(file);
938 	if (err)
939 		goto out;
940 
941 	/* Zero range [valid : pos). */
942 	while (valid < pos) {
943 		CLST lcn, clen;
944 
945 		frame = valid >> frame_bits;
946 		frame_vbo = valid & ~(frame_size - 1);
947 		off = valid & (frame_size - 1);
948 
949 		err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 0, &lcn,
950 					  &clen, NULL);
951 		if (err)
952 			goto out;
953 
954 		if (lcn == SPARSE_LCN) {
955 			ni->i_valid = valid =
956 				frame_vbo + ((u64)clen << sbi->cluster_bits);
957 			continue;
958 		}
959 
960 		/* Load full frame. */
961 		err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT,
962 					   pages, pages_per_frame,
963 					   &frame_uptodate);
964 		if (err)
965 			goto out;
966 
967 		if (!frame_uptodate && off) {
968 			err = ni_read_frame(ni, frame_vbo, pages,
969 					    pages_per_frame);
970 			if (err) {
971 				for (ip = 0; ip < pages_per_frame; ip++) {
972 					page = pages[ip];
973 					unlock_page(page);
974 					put_page(page);
975 				}
976 				goto out;
977 			}
978 		}
979 
980 		ip = off >> PAGE_SHIFT;
981 		off = offset_in_page(valid);
982 		for (; ip < pages_per_frame; ip++, off = 0) {
983 			page = pages[ip];
984 			zero_user_segment(page, off, PAGE_SIZE);
985 			flush_dcache_page(page);
986 			SetPageUptodate(page);
987 		}
988 
989 		ni_lock(ni);
990 		err = ni_write_frame(ni, pages, pages_per_frame);
991 		ni_unlock(ni);
992 
993 		for (ip = 0; ip < pages_per_frame; ip++) {
994 			page = pages[ip];
995 			SetPageUptodate(page);
996 			unlock_page(page);
997 			put_page(page);
998 		}
999 
1000 		if (err)
1001 			goto out;
1002 
1003 		ni->i_valid = valid = frame_vbo + frame_size;
1004 	}
1005 
1006 	/* Copy user data [pos : pos + count). */
1007 	while (count) {
1008 		size_t copied, bytes;
1009 
1010 		off = pos & (frame_size - 1);
1011 		bytes = frame_size - off;
1012 		if (bytes > count)
1013 			bytes = count;
1014 
1015 		frame_vbo = pos & ~(frame_size - 1);
1016 		index = frame_vbo >> PAGE_SHIFT;
1017 
1018 		if (unlikely(fault_in_iov_iter_readable(from, bytes))) {
1019 			err = -EFAULT;
1020 			goto out;
1021 		}
1022 
1023 		/* Load full frame. */
1024 		err = ntfs_get_frame_pages(mapping, index, pages,
1025 					   pages_per_frame, &frame_uptodate);
1026 		if (err)
1027 			goto out;
1028 
1029 		if (!frame_uptodate) {
1030 			loff_t to = pos + bytes;
1031 
1032 			if (off || (to < i_size && (to & (frame_size - 1)))) {
1033 				err = ni_read_frame(ni, frame_vbo, pages,
1034 						    pages_per_frame);
1035 				if (err) {
1036 					for (ip = 0; ip < pages_per_frame;
1037 					     ip++) {
1038 						page = pages[ip];
1039 						unlock_page(page);
1040 						put_page(page);
1041 					}
1042 					goto out;
1043 				}
1044 			}
1045 		}
1046 
1047 		WARN_ON(!bytes);
1048 		copied = 0;
1049 		ip = off >> PAGE_SHIFT;
1050 		off = offset_in_page(pos);
1051 
1052 		/* Copy user data to pages. */
1053 		for (;;) {
1054 			size_t cp, tail = PAGE_SIZE - off;
1055 
1056 			page = pages[ip];
1057 			cp = copy_page_from_iter_atomic(page, off,
1058 							min(tail, bytes), from);
1059 			flush_dcache_page(page);
1060 
1061 			copied += cp;
1062 			bytes -= cp;
1063 			if (!bytes || !cp)
1064 				break;
1065 
1066 			if (cp < tail) {
1067 				off += cp;
1068 			} else {
1069 				ip++;
1070 				off = 0;
1071 			}
1072 		}
1073 
1074 		ni_lock(ni);
1075 		err = ni_write_frame(ni, pages, pages_per_frame);
1076 		ni_unlock(ni);
1077 
1078 		for (ip = 0; ip < pages_per_frame; ip++) {
1079 			page = pages[ip];
1080 			ClearPageDirty(page);
1081 			SetPageUptodate(page);
1082 			unlock_page(page);
1083 			put_page(page);
1084 		}
1085 
1086 		if (err)
1087 			goto out;
1088 
1089 		/*
1090 		 * We can loop for a long time in here. Be nice and allow
1091 		 * us to schedule out to avoid softlocking if preempt
1092 		 * is disabled.
1093 		 */
1094 		cond_resched();
1095 
1096 		pos += copied;
1097 		written += copied;
1098 
1099 		count = iov_iter_count(from);
1100 	}
1101 
1102 out:
1103 	kfree(pages);
1104 
1105 	current->backing_dev_info = NULL;
1106 
1107 	if (err < 0)
1108 		return err;
1109 
1110 	iocb->ki_pos += written;
1111 	if (iocb->ki_pos > ni->i_valid)
1112 		ni->i_valid = iocb->ki_pos;
1113 
1114 	return written;
1115 }
1116 
1117 /*
1118  * ntfs_file_write_iter - file_operations::write_iter
1119  */
1120 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1121 {
1122 	struct file *file = iocb->ki_filp;
1123 	struct address_space *mapping = file->f_mapping;
1124 	struct inode *inode = mapping->host;
1125 	ssize_t ret;
1126 	struct ntfs_inode *ni = ntfs_i(inode);
1127 
1128 	if (is_encrypted(ni)) {
1129 		ntfs_inode_warn(inode, "encrypted i/o not supported");
1130 		return -EOPNOTSUPP;
1131 	}
1132 
1133 	if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
1134 		ntfs_inode_warn(inode, "direct i/o + compressed not supported");
1135 		return -EOPNOTSUPP;
1136 	}
1137 
1138 	if (is_dedup(ni)) {
1139 		ntfs_inode_warn(inode, "write into deduplicated not supported");
1140 		return -EOPNOTSUPP;
1141 	}
1142 
1143 	if (!inode_trylock(inode)) {
1144 		if (iocb->ki_flags & IOCB_NOWAIT)
1145 			return -EAGAIN;
1146 		inode_lock(inode);
1147 	}
1148 
1149 	ret = generic_write_checks(iocb, from);
1150 	if (ret <= 0)
1151 		goto out;
1152 
1153 	if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
1154 		/* Should never be here, see ntfs_file_open(). */
1155 		ret = -EOPNOTSUPP;
1156 		goto out;
1157 	}
1158 
1159 	ret = ntfs_extend(inode, iocb->ki_pos, ret, file);
1160 	if (ret)
1161 		goto out;
1162 
1163 	ret = is_compressed(ni) ? ntfs_compress_write(iocb, from)
1164 				: __generic_file_write_iter(iocb, from);
1165 
1166 out:
1167 	inode_unlock(inode);
1168 
1169 	if (ret > 0)
1170 		ret = generic_write_sync(iocb, ret);
1171 
1172 	return ret;
1173 }
1174 
1175 /*
1176  * ntfs_file_open - file_operations::open
1177  */
1178 int ntfs_file_open(struct inode *inode, struct file *file)
1179 {
1180 	struct ntfs_inode *ni = ntfs_i(inode);
1181 
1182 	if (unlikely((is_compressed(ni) || is_encrypted(ni)) &&
1183 		     (file->f_flags & O_DIRECT))) {
1184 		return -EOPNOTSUPP;
1185 	}
1186 
1187 	/* Decompress "external compressed" file if opened for rw. */
1188 	if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) &&
1189 	    (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) {
1190 #ifdef CONFIG_NTFS3_LZX_XPRESS
1191 		int err = ni_decompress_file(ni);
1192 
1193 		if (err)
1194 			return err;
1195 #else
1196 		ntfs_inode_warn(
1197 			inode,
1198 			"activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files");
1199 		return -EOPNOTSUPP;
1200 #endif
1201 	}
1202 
1203 	return generic_file_open(inode, file);
1204 }
1205 
1206 /*
1207  * ntfs_file_release - file_operations::release
1208  */
1209 static int ntfs_file_release(struct inode *inode, struct file *file)
1210 {
1211 	struct ntfs_inode *ni = ntfs_i(inode);
1212 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1213 	int err = 0;
1214 
1215 	/* If we are last writer on the inode, drop the block reservation. */
1216 	if (sbi->options->prealloc && ((file->f_mode & FMODE_WRITE) &&
1217 				      atomic_read(&inode->i_writecount) == 1)) {
1218 		ni_lock(ni);
1219 		down_write(&ni->file.run_lock);
1220 
1221 		err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
1222 				    inode->i_size, &ni->i_valid, false, NULL);
1223 
1224 		up_write(&ni->file.run_lock);
1225 		ni_unlock(ni);
1226 	}
1227 	return err;
1228 }
1229 
1230 /*
1231  * ntfs_fiemap - file_operations::fiemap
1232  */
1233 int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1234 		__u64 start, __u64 len)
1235 {
1236 	int err;
1237 	struct ntfs_inode *ni = ntfs_i(inode);
1238 
1239 	err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR);
1240 	if (err)
1241 		return err;
1242 
1243 	ni_lock(ni);
1244 
1245 	err = ni_fiemap(ni, fieinfo, start, len);
1246 
1247 	ni_unlock(ni);
1248 
1249 	return err;
1250 }
1251 
1252 // clang-format off
1253 const struct inode_operations ntfs_file_inode_operations = {
1254 	.getattr	= ntfs_getattr,
1255 	.setattr	= ntfs3_setattr,
1256 	.listxattr	= ntfs_listxattr,
1257 	.permission	= ntfs_permission,
1258 	.get_acl	= ntfs_get_acl,
1259 	.set_acl	= ntfs_set_acl,
1260 	.fiemap		= ntfs_fiemap,
1261 };
1262 
1263 const struct file_operations ntfs_file_operations = {
1264 	.llseek		= generic_file_llseek,
1265 	.read_iter	= ntfs_file_read_iter,
1266 	.write_iter	= ntfs_file_write_iter,
1267 	.unlocked_ioctl = ntfs_ioctl,
1268 #ifdef CONFIG_COMPAT
1269 	.compat_ioctl	= ntfs_compat_ioctl,
1270 #endif
1271 	.splice_read	= generic_file_splice_read,
1272 	.mmap		= ntfs_file_mmap,
1273 	.open		= ntfs_file_open,
1274 	.fsync		= generic_file_fsync,
1275 	.splice_write	= iter_file_splice_write,
1276 	.fallocate	= ntfs_fallocate,
1277 	.release	= ntfs_file_release,
1278 };
1279 // clang-format on
1280