xref: /linux/fs/ntfs3/attrib.c (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
7  */
8 
9 #include <linux/fs.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 
13 #include "debug.h"
14 #include "ntfs.h"
15 #include "ntfs_fs.h"
16 
17 /*
18  * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
19  * preallocate algorithm.
20  */
21 #ifndef NTFS_MIN_LOG2_OF_CLUMP
22 #define NTFS_MIN_LOG2_OF_CLUMP 16
23 #endif
24 
25 #ifndef NTFS_MAX_LOG2_OF_CLUMP
26 #define NTFS_MAX_LOG2_OF_CLUMP 26
27 #endif
28 
29 // 16M
30 #define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
31 // 16G
32 #define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
33 
34 static inline u64 get_pre_allocated(u64 size)
35 {
36 	u32 clump;
37 	u8 align_shift;
38 	u64 ret;
39 
40 	if (size <= NTFS_CLUMP_MIN) {
41 		clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
42 		align_shift = NTFS_MIN_LOG2_OF_CLUMP;
43 	} else if (size >= NTFS_CLUMP_MAX) {
44 		clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
45 		align_shift = NTFS_MAX_LOG2_OF_CLUMP;
46 	} else {
47 		align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
48 			      __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
49 		clump = 1u << align_shift;
50 	}
51 
52 	ret = (((size + clump - 1) >> align_shift)) << align_shift;
53 
54 	return ret;
55 }
56 
57 /*
58  * attr_load_runs - Load all runs stored in @attr.
59  */
60 static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
61 			  struct runs_tree *run, const CLST *vcn)
62 {
63 	int err;
64 	CLST svcn = le64_to_cpu(attr->nres.svcn);
65 	CLST evcn = le64_to_cpu(attr->nres.evcn);
66 	u32 asize;
67 	u16 run_off;
68 
69 	if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
70 		return 0;
71 
72 	if (vcn && (evcn < *vcn || *vcn < svcn))
73 		return -EINVAL;
74 
75 	asize = le32_to_cpu(attr->size);
76 	run_off = le16_to_cpu(attr->nres.run_off);
77 
78 	if (run_off > asize)
79 		return -EINVAL;
80 
81 	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
82 			    vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
83 			    asize - run_off);
84 	if (err < 0)
85 		return err;
86 
87 	return 0;
88 }
89 
90 /*
91  * run_deallocate_ex - Deallocate clusters.
92  */
93 static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
94 			     CLST vcn, CLST len, CLST *done, bool trim)
95 {
96 	int err = 0;
97 	CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
98 	size_t idx;
99 
100 	if (!len)
101 		goto out;
102 
103 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
104 failed:
105 		run_truncate(run, vcn0);
106 		err = -EINVAL;
107 		goto out;
108 	}
109 
110 	for (;;) {
111 		if (clen > len)
112 			clen = len;
113 
114 		if (!clen) {
115 			err = -EINVAL;
116 			goto out;
117 		}
118 
119 		if (lcn != SPARSE_LCN) {
120 			if (sbi) {
121 				/* mark bitmap range [lcn + clen) as free and trim clusters. */
122 				mark_as_free_ex(sbi, lcn, clen, trim);
123 			}
124 			dn += clen;
125 		}
126 
127 		len -= clen;
128 		if (!len)
129 			break;
130 
131 		vcn_next = vcn + clen;
132 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
133 		    vcn != vcn_next) {
134 			/* Save memory - don't load entire run. */
135 			goto failed;
136 		}
137 	}
138 
139 out:
140 	if (done)
141 		*done += dn;
142 
143 	return err;
144 }
145 
146 /*
147  * attr_allocate_clusters - Find free space, mark it as used and store in @run.
148  */
149 int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
150 			   CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
151 			   enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
152 			   CLST *new_lcn, CLST *new_len)
153 {
154 	int err;
155 	CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
156 	size_t cnt = run->count;
157 
158 	for (;;) {
159 		err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
160 					       opt);
161 
162 		if (err == -ENOSPC && pre) {
163 			pre = 0;
164 			if (*pre_alloc)
165 				*pre_alloc = 0;
166 			continue;
167 		}
168 
169 		if (err)
170 			goto out;
171 
172 		if (vcn == vcn0) {
173 			/* Return the first fragment. */
174 			if (new_lcn)
175 				*new_lcn = lcn;
176 			if (new_len)
177 				*new_len = flen;
178 		}
179 
180 		/* Add new fragment into run storage. */
181 		if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) {
182 			/* Undo last 'ntfs_look_for_free_space' */
183 			mark_as_free_ex(sbi, lcn, len, false);
184 			err = -ENOMEM;
185 			goto out;
186 		}
187 
188 		if (opt & ALLOCATE_ZERO) {
189 			u8 shift = sbi->cluster_bits - SECTOR_SHIFT;
190 
191 			err = blkdev_issue_zeroout(sbi->sb->s_bdev,
192 						   (sector_t)lcn << shift,
193 						   (sector_t)flen << shift,
194 						   GFP_NOFS, 0);
195 			if (err)
196 				goto out;
197 		}
198 
199 		vcn += flen;
200 
201 		if (flen >= len || (opt & ALLOCATE_MFT) ||
202 		    (fr && run->count - cnt >= fr)) {
203 			*alen = vcn - vcn0;
204 			return 0;
205 		}
206 
207 		len -= flen;
208 	}
209 
210 out:
211 	/* Undo 'ntfs_look_for_free_space' */
212 	if (vcn - vcn0) {
213 		run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
214 		run_truncate(run, vcn0);
215 	}
216 
217 	return err;
218 }
219 
220 /*
221  * attr_make_nonresident
222  *
223  * If page is not NULL - it is already contains resident data
224  * and locked (called from ni_write_frame()).
225  */
226 int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
227 			  struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
228 			  u64 new_size, struct runs_tree *run,
229 			  struct ATTRIB **ins_attr, struct page *page)
230 {
231 	struct ntfs_sb_info *sbi;
232 	struct ATTRIB *attr_s;
233 	struct MFT_REC *rec;
234 	u32 used, asize, rsize, aoff;
235 	bool is_data;
236 	CLST len, alen;
237 	char *next;
238 	int err;
239 
240 	if (attr->non_res) {
241 		*ins_attr = attr;
242 		return 0;
243 	}
244 
245 	sbi = mi->sbi;
246 	rec = mi->mrec;
247 	attr_s = NULL;
248 	used = le32_to_cpu(rec->used);
249 	asize = le32_to_cpu(attr->size);
250 	next = Add2Ptr(attr, asize);
251 	aoff = PtrOffset(rec, attr);
252 	rsize = le32_to_cpu(attr->res.data_size);
253 	is_data = attr->type == ATTR_DATA && !attr->name_len;
254 
255 	/* len - how many clusters required to store 'rsize' bytes */
256 	if (is_attr_compressed(attr)) {
257 		u8 shift = sbi->cluster_bits + NTFS_LZNT_CUNIT;
258 		len = ((rsize + (1u << shift) - 1) >> shift) << NTFS_LZNT_CUNIT;
259 	} else {
260 		len = bytes_to_cluster(sbi, rsize);
261 	}
262 
263 	run_init(run);
264 
265 	/* Make a copy of original attribute. */
266 	attr_s = kmemdup(attr, asize, GFP_NOFS);
267 	if (!attr_s) {
268 		err = -ENOMEM;
269 		goto out;
270 	}
271 
272 	if (!len) {
273 		/* Empty resident -> Empty nonresident. */
274 		alen = 0;
275 	} else {
276 		const char *data = resident_data(attr);
277 
278 		err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
279 					     ALLOCATE_DEF, &alen, 0, NULL,
280 					     NULL);
281 		if (err)
282 			goto out1;
283 
284 		if (!rsize) {
285 			/* Empty resident -> Non empty nonresident. */
286 		} else if (!is_data) {
287 			err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
288 			if (err)
289 				goto out2;
290 		} else if (!page) {
291 			struct address_space *mapping = ni->vfs_inode.i_mapping;
292 			struct folio *folio;
293 
294 			folio = __filemap_get_folio(
295 				mapping, 0, FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
296 				mapping_gfp_mask(mapping));
297 			if (IS_ERR(folio)) {
298 				err = PTR_ERR(folio);
299 				goto out2;
300 			}
301 			folio_fill_tail(folio, 0, data, rsize);
302 			folio_mark_uptodate(folio);
303 			folio_mark_dirty(folio);
304 			folio_unlock(folio);
305 			folio_put(folio);
306 		}
307 	}
308 
309 	/* Remove original attribute. */
310 	used -= asize;
311 	memmove(attr, Add2Ptr(attr, asize), used - aoff);
312 	rec->used = cpu_to_le32(used);
313 	mi->dirty = true;
314 	if (le)
315 		al_remove_le(ni, le);
316 
317 	err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
318 				    attr_s->name_len, run, 0, alen,
319 				    attr_s->flags, &attr, NULL, NULL);
320 	if (err)
321 		goto out3;
322 
323 	kfree(attr_s);
324 	attr->nres.data_size = cpu_to_le64(rsize);
325 	attr->nres.valid_size = attr->nres.data_size;
326 
327 	*ins_attr = attr;
328 
329 	if (is_data)
330 		ni->ni_flags &= ~NI_FLAG_RESIDENT;
331 
332 	/* Resident attribute becomes non resident. */
333 	return 0;
334 
335 out3:
336 	attr = Add2Ptr(rec, aoff);
337 	memmove(next, attr, used - aoff);
338 	memcpy(attr, attr_s, asize);
339 	rec->used = cpu_to_le32(used + asize);
340 	mi->dirty = true;
341 out2:
342 	/* Undo: do not trim new allocated clusters. */
343 	run_deallocate(sbi, run, false);
344 	run_close(run);
345 out1:
346 	kfree(attr_s);
347 out:
348 	return err;
349 }
350 
351 /*
352  * attr_set_size_res - Helper for attr_set_size().
353  */
354 static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
355 			     struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
356 			     u64 new_size, struct runs_tree *run,
357 			     struct ATTRIB **ins_attr)
358 {
359 	struct ntfs_sb_info *sbi = mi->sbi;
360 	struct MFT_REC *rec = mi->mrec;
361 	u32 used = le32_to_cpu(rec->used);
362 	u32 asize = le32_to_cpu(attr->size);
363 	u32 aoff = PtrOffset(rec, attr);
364 	u32 rsize = le32_to_cpu(attr->res.data_size);
365 	u32 tail = used - aoff - asize;
366 	char *next = Add2Ptr(attr, asize);
367 	s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
368 
369 	if (dsize < 0) {
370 		memmove(next + dsize, next, tail);
371 	} else if (dsize > 0) {
372 		if (used + dsize > sbi->max_bytes_per_attr)
373 			return attr_make_nonresident(ni, attr, le, mi, new_size,
374 						     run, ins_attr, NULL);
375 
376 		memmove(next + dsize, next, tail);
377 		memset(next, 0, dsize);
378 	}
379 
380 	if (new_size > rsize)
381 		memset(Add2Ptr(resident_data(attr), rsize), 0,
382 		       new_size - rsize);
383 
384 	rec->used = cpu_to_le32(used + dsize);
385 	attr->size = cpu_to_le32(asize + dsize);
386 	attr->res.data_size = cpu_to_le32(new_size);
387 	mi->dirty = true;
388 	*ins_attr = attr;
389 
390 	return 0;
391 }
392 
393 /*
394  * attr_set_size - Change the size of attribute.
395  *
396  * Extend:
397  *   - Sparse/compressed: No allocated clusters.
398  *   - Normal: Append allocated and preallocated new clusters.
399  * Shrink:
400  *   - No deallocate if @keep_prealloc is set.
401  */
402 int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
403 		  const __le16 *name, u8 name_len, struct runs_tree *run,
404 		  u64 new_size, const u64 *new_valid, bool keep_prealloc,
405 		  struct ATTRIB **ret)
406 {
407 	int err = 0;
408 	struct ntfs_sb_info *sbi = ni->mi.sbi;
409 	u8 cluster_bits = sbi->cluster_bits;
410 	bool is_mft = ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA &&
411 		      !name_len;
412 	u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
413 	struct ATTRIB *attr = NULL, *attr_b;
414 	struct ATTR_LIST_ENTRY *le, *le_b;
415 	struct mft_inode *mi, *mi_b;
416 	CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
417 	CLST next_svcn, pre_alloc = -1, done = 0;
418 	bool is_ext, is_bad = false;
419 	bool dirty = false;
420 	u32 align;
421 	struct MFT_REC *rec;
422 
423 again:
424 	alen = 0;
425 	le_b = NULL;
426 	attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
427 			      &mi_b);
428 	if (!attr_b) {
429 		err = -ENOENT;
430 		goto bad_inode;
431 	}
432 
433 	if (!attr_b->non_res) {
434 		err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
435 					&attr_b);
436 		if (err)
437 			return err;
438 
439 		/* Return if file is still resident. */
440 		if (!attr_b->non_res) {
441 			dirty = true;
442 			goto ok1;
443 		}
444 
445 		/* Layout of records may be changed, so do a full search. */
446 		goto again;
447 	}
448 
449 	is_ext = is_attr_ext(attr_b);
450 	align = sbi->cluster_size;
451 	if (is_ext)
452 		align <<= attr_b->nres.c_unit;
453 
454 	old_valid = le64_to_cpu(attr_b->nres.valid_size);
455 	old_size = le64_to_cpu(attr_b->nres.data_size);
456 	old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
457 
458 again_1:
459 	old_alen = old_alloc >> cluster_bits;
460 
461 	new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
462 	new_alen = new_alloc >> cluster_bits;
463 
464 	if (keep_prealloc && new_size < old_size) {
465 		attr_b->nres.data_size = cpu_to_le64(new_size);
466 		mi_b->dirty = dirty = true;
467 		goto ok;
468 	}
469 
470 	vcn = old_alen - 1;
471 
472 	svcn = le64_to_cpu(attr_b->nres.svcn);
473 	evcn = le64_to_cpu(attr_b->nres.evcn);
474 
475 	if (svcn <= vcn && vcn <= evcn) {
476 		attr = attr_b;
477 		le = le_b;
478 		mi = mi_b;
479 	} else if (!le_b) {
480 		err = -EINVAL;
481 		goto bad_inode;
482 	} else {
483 		le = le_b;
484 		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
485 				    &mi);
486 		if (!attr) {
487 			err = -EINVAL;
488 			goto bad_inode;
489 		}
490 
491 next_le_1:
492 		svcn = le64_to_cpu(attr->nres.svcn);
493 		evcn = le64_to_cpu(attr->nres.evcn);
494 	}
495 	/*
496 	 * Here we have:
497 	 * attr,mi,le - last attribute segment (containing 'vcn').
498 	 * attr_b,mi_b,le_b - base (primary) attribute segment.
499 	 */
500 next_le:
501 	rec = mi->mrec;
502 	err = attr_load_runs(attr, ni, run, NULL);
503 	if (err)
504 		goto out;
505 
506 	if (new_size > old_size) {
507 		CLST to_allocate;
508 		size_t free;
509 
510 		if (new_alloc <= old_alloc) {
511 			attr_b->nres.data_size = cpu_to_le64(new_size);
512 			mi_b->dirty = dirty = true;
513 			goto ok;
514 		}
515 
516 		/*
517 		 * Add clusters. In simple case we have to:
518 		 *  - allocate space (vcn, lcn, len)
519 		 *  - update packed run in 'mi'
520 		 *  - update attr->nres.evcn
521 		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
522 		 */
523 		to_allocate = new_alen - old_alen;
524 add_alloc_in_same_attr_seg:
525 		lcn = 0;
526 		if (is_mft) {
527 			/* MFT allocates clusters from MFT zone. */
528 			pre_alloc = 0;
529 		} else if (is_ext) {
530 			/* No preallocate for sparse/compress. */
531 			pre_alloc = 0;
532 		} else if (pre_alloc == -1) {
533 			pre_alloc = 0;
534 			if (type == ATTR_DATA && !name_len &&
535 			    sbi->options->prealloc) {
536 				pre_alloc = bytes_to_cluster(
537 						    sbi, get_pre_allocated(
538 								 new_size)) -
539 					    new_alen;
540 			}
541 
542 			/* Get the last LCN to allocate from. */
543 			if (old_alen &&
544 			    !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
545 				lcn = SPARSE_LCN;
546 			}
547 
548 			if (lcn == SPARSE_LCN)
549 				lcn = 0;
550 			else if (lcn)
551 				lcn += 1;
552 
553 			free = wnd_zeroes(&sbi->used.bitmap);
554 			if (to_allocate > free) {
555 				err = -ENOSPC;
556 				goto out;
557 			}
558 
559 			if (pre_alloc && to_allocate + pre_alloc > free)
560 				pre_alloc = 0;
561 		}
562 
563 		vcn = old_alen;
564 
565 		if (is_ext) {
566 			if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
567 					   false)) {
568 				err = -ENOMEM;
569 				goto out;
570 			}
571 			alen = to_allocate;
572 		} else {
573 			/* ~3 bytes per fragment. */
574 			err = attr_allocate_clusters(
575 				sbi, run, vcn, lcn, to_allocate, &pre_alloc,
576 				is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
577 				is_mft ? 0 :
578 					 (sbi->record_size -
579 					  le32_to_cpu(rec->used) + 8) /
580 							 3 +
581 						 1,
582 				NULL, NULL);
583 			if (err)
584 				goto out;
585 		}
586 
587 		done += alen;
588 		vcn += alen;
589 		if (to_allocate > alen)
590 			to_allocate -= alen;
591 		else
592 			to_allocate = 0;
593 
594 pack_runs:
595 		err = mi_pack_runs(mi, attr, run, vcn - svcn);
596 		if (err)
597 			goto undo_1;
598 
599 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
600 		new_alloc_tmp = (u64)next_svcn << cluster_bits;
601 		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
602 		mi_b->dirty = dirty = true;
603 
604 		if (next_svcn >= vcn && !to_allocate) {
605 			/* Normal way. Update attribute and exit. */
606 			attr_b->nres.data_size = cpu_to_le64(new_size);
607 			goto ok;
608 		}
609 
610 		/* At least two MFT to avoid recursive loop. */
611 		if (is_mft && next_svcn == vcn &&
612 		    ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
613 			new_size = new_alloc_tmp;
614 			attr_b->nres.data_size = attr_b->nres.alloc_size;
615 			goto ok;
616 		}
617 
618 		if (le32_to_cpu(rec->used) < sbi->record_size) {
619 			old_alen = next_svcn;
620 			evcn = old_alen - 1;
621 			goto add_alloc_in_same_attr_seg;
622 		}
623 
624 		attr_b->nres.data_size = attr_b->nres.alloc_size;
625 		if (new_alloc_tmp < old_valid)
626 			attr_b->nres.valid_size = attr_b->nres.data_size;
627 
628 		if (type == ATTR_LIST) {
629 			err = ni_expand_list(ni);
630 			if (err)
631 				goto undo_2;
632 			if (next_svcn < vcn)
633 				goto pack_runs;
634 
635 			/* Layout of records is changed. */
636 			goto again;
637 		}
638 
639 		if (!ni->attr_list.size) {
640 			err = ni_create_attr_list(ni);
641 			/* In case of error layout of records is not changed. */
642 			if (err)
643 				goto undo_2;
644 			/* Layout of records is changed. */
645 		}
646 
647 		if (next_svcn >= vcn) {
648 			/* This is MFT data, repeat. */
649 			goto again;
650 		}
651 
652 		/* Insert new attribute segment. */
653 		err = ni_insert_nonresident(ni, type, name, name_len, run,
654 					    next_svcn, vcn - next_svcn,
655 					    attr_b->flags, &attr, &mi, NULL);
656 
657 		/*
658 		 * Layout of records maybe changed.
659 		 * Find base attribute to update.
660 		 */
661 		le_b = NULL;
662 		attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
663 				      NULL, &mi_b);
664 		if (!attr_b) {
665 			err = -EINVAL;
666 			goto bad_inode;
667 		}
668 
669 		if (err) {
670 			/* ni_insert_nonresident failed. */
671 			attr = NULL;
672 			goto undo_2;
673 		}
674 
675 		/* keep runs for $MFT::$ATTR_DATA and $MFT::$ATTR_BITMAP. */
676 		if (ni->mi.rno != MFT_REC_MFT)
677 			run_truncate_head(run, evcn + 1);
678 
679 		svcn = le64_to_cpu(attr->nres.svcn);
680 		evcn = le64_to_cpu(attr->nres.evcn);
681 
682 		/*
683 		 * Attribute is in consistency state.
684 		 * Save this point to restore to if next steps fail.
685 		 */
686 		old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
687 		attr_b->nres.valid_size = attr_b->nres.data_size =
688 			attr_b->nres.alloc_size = cpu_to_le64(old_size);
689 		mi_b->dirty = dirty = true;
690 		goto again_1;
691 	}
692 
693 	if (new_size != old_size ||
694 	    (new_alloc != old_alloc && !keep_prealloc)) {
695 		/*
696 		 * Truncate clusters. In simple case we have to:
697 		 *  - update packed run in 'mi'
698 		 *  - update attr->nres.evcn
699 		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
700 		 *  - mark and trim clusters as free (vcn, lcn, len)
701 		 */
702 		CLST dlen = 0;
703 
704 		vcn = max(svcn, new_alen);
705 		new_alloc_tmp = (u64)vcn << cluster_bits;
706 
707 		if (vcn > svcn) {
708 			err = mi_pack_runs(mi, attr, run, vcn - svcn);
709 			if (err)
710 				goto out;
711 		} else if (le && le->vcn) {
712 			u16 le_sz = le16_to_cpu(le->size);
713 
714 			/*
715 			 * NOTE: List entries for one attribute are always
716 			 * the same size. We deal with last entry (vcn==0)
717 			 * and it is not first in entries array
718 			 * (list entry for std attribute always first).
719 			 * So it is safe to step back.
720 			 */
721 			mi_remove_attr(NULL, mi, attr);
722 
723 			if (!al_remove_le(ni, le)) {
724 				err = -EINVAL;
725 				goto bad_inode;
726 			}
727 
728 			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
729 		} else {
730 			attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
731 			mi->dirty = true;
732 		}
733 
734 		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
735 
736 		if (vcn == new_alen) {
737 			attr_b->nres.data_size = cpu_to_le64(new_size);
738 			if (new_size < old_valid)
739 				attr_b->nres.valid_size =
740 					attr_b->nres.data_size;
741 		} else {
742 			if (new_alloc_tmp <=
743 			    le64_to_cpu(attr_b->nres.data_size))
744 				attr_b->nres.data_size =
745 					attr_b->nres.alloc_size;
746 			if (new_alloc_tmp <
747 			    le64_to_cpu(attr_b->nres.valid_size))
748 				attr_b->nres.valid_size =
749 					attr_b->nres.alloc_size;
750 		}
751 		mi_b->dirty = dirty = true;
752 
753 		err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
754 					true);
755 		if (err)
756 			goto out;
757 
758 		if (is_ext) {
759 			/* dlen - really deallocated clusters. */
760 			le64_sub_cpu(&attr_b->nres.total_size,
761 				     ((u64)dlen << cluster_bits));
762 		}
763 
764 		run_truncate(run, vcn);
765 
766 		if (new_alloc_tmp <= new_alloc)
767 			goto ok;
768 
769 		old_size = new_alloc_tmp;
770 		vcn = svcn - 1;
771 
772 		if (le == le_b) {
773 			attr = attr_b;
774 			mi = mi_b;
775 			evcn = svcn - 1;
776 			svcn = 0;
777 			goto next_le;
778 		}
779 
780 		if (le->type != type || le->name_len != name_len ||
781 		    memcmp(le_name(le), name, name_len * sizeof(short))) {
782 			err = -EINVAL;
783 			goto bad_inode;
784 		}
785 
786 		err = ni_load_mi(ni, le, &mi);
787 		if (err)
788 			goto out;
789 
790 		attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
791 		if (!attr) {
792 			err = -EINVAL;
793 			goto bad_inode;
794 		}
795 		goto next_le_1;
796 	}
797 
798 ok:
799 	if (new_valid) {
800 		__le64 valid = cpu_to_le64(min(*new_valid, new_size));
801 
802 		if (attr_b->nres.valid_size != valid) {
803 			attr_b->nres.valid_size = valid;
804 			mi_b->dirty = true;
805 		}
806 	}
807 
808 ok1:
809 	if (ret)
810 		*ret = attr_b;
811 
812 	if (((type == ATTR_DATA && !name_len) ||
813 	     (type == ATTR_ALLOC && name == I30_NAME))) {
814 		/* Update inode_set_bytes. */
815 		if (attr_b->non_res) {
816 			new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
817 			if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
818 				inode_set_bytes(&ni->vfs_inode, new_alloc);
819 				dirty = true;
820 			}
821 		}
822 
823 		/* Don't forget to update duplicate information in parent. */
824 		if (dirty) {
825 			ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
826 			mark_inode_dirty(&ni->vfs_inode);
827 		}
828 	}
829 
830 	return 0;
831 
832 undo_2:
833 	vcn -= alen;
834 	attr_b->nres.data_size = cpu_to_le64(old_size);
835 	attr_b->nres.valid_size = cpu_to_le64(old_valid);
836 	attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
837 
838 	/* Restore 'attr' and 'mi'. */
839 	if (attr)
840 		goto restore_run;
841 
842 	if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
843 	    svcn <= le64_to_cpu(attr_b->nres.evcn)) {
844 		attr = attr_b;
845 		le = le_b;
846 		mi = mi_b;
847 	} else if (!le_b) {
848 		err = -EINVAL;
849 		goto bad_inode;
850 	} else {
851 		le = le_b;
852 		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
853 				    &svcn, &mi);
854 		if (!attr)
855 			goto bad_inode;
856 	}
857 
858 restore_run:
859 	if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
860 		is_bad = true;
861 
862 undo_1:
863 	run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
864 
865 	run_truncate(run, vcn);
866 out:
867 	if (is_bad) {
868 bad_inode:
869 		_ntfs_bad_inode(&ni->vfs_inode);
870 	}
871 	return err;
872 }
873 
874 /*
875  * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
876  *
877  * @new == NULL means just to get current mapping for 'vcn'
878  * @new != NULL means allocate real cluster if 'vcn' maps to hole
879  * @zero - zeroout new allocated clusters
880  *
881  *  NOTE:
882  *  - @new != NULL is called only for sparsed or compressed attributes.
883  *  - new allocated clusters are zeroed via blkdev_issue_zeroout.
884  */
885 int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
886 			CLST *len, bool *new, bool zero)
887 {
888 	int err = 0;
889 	struct runs_tree *run = &ni->file.run;
890 	struct ntfs_sb_info *sbi;
891 	u8 cluster_bits;
892 	struct ATTRIB *attr, *attr_b;
893 	struct ATTR_LIST_ENTRY *le, *le_b;
894 	struct mft_inode *mi, *mi_b;
895 	CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
896 	CLST alloc, evcn;
897 	unsigned fr;
898 	u64 total_size, total_size0;
899 	int step = 0;
900 
901 	if (new)
902 		*new = false;
903 
904 	/* Try to find in cache. */
905 	down_read(&ni->file.run_lock);
906 	if (!run_lookup_entry(run, vcn, lcn, len, NULL))
907 		*len = 0;
908 	up_read(&ni->file.run_lock);
909 
910 	if (*len && (*lcn != SPARSE_LCN || !new))
911 		return 0; /* Fast normal way without allocation. */
912 
913 	/* No cluster in cache or we need to allocate cluster in hole. */
914 	sbi = ni->mi.sbi;
915 	cluster_bits = sbi->cluster_bits;
916 
917 	ni_lock(ni);
918 	down_write(&ni->file.run_lock);
919 
920 	/* Repeat the code above (under write lock). */
921 	if (!run_lookup_entry(run, vcn, lcn, len, NULL))
922 		*len = 0;
923 
924 	if (*len) {
925 		if (*lcn != SPARSE_LCN || !new)
926 			goto out; /* normal way without allocation. */
927 		if (clen > *len)
928 			clen = *len;
929 	}
930 
931 	le_b = NULL;
932 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
933 	if (!attr_b) {
934 		err = -ENOENT;
935 		goto out;
936 	}
937 
938 	if (!attr_b->non_res) {
939 		*lcn = RESIDENT_LCN;
940 		*len = 1;
941 		goto out;
942 	}
943 
944 	asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
945 	if (vcn >= asize) {
946 		if (new) {
947 			err = -EINVAL;
948 		} else {
949 			*len = 1;
950 			*lcn = SPARSE_LCN;
951 		}
952 		goto out;
953 	}
954 
955 	svcn = le64_to_cpu(attr_b->nres.svcn);
956 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
957 
958 	attr = attr_b;
959 	le = le_b;
960 	mi = mi_b;
961 
962 	if (le_b && (vcn < svcn || evcn1 <= vcn)) {
963 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
964 				    &mi);
965 		if (!attr) {
966 			err = -EINVAL;
967 			goto out;
968 		}
969 		svcn = le64_to_cpu(attr->nres.svcn);
970 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
971 	}
972 
973 	/* Load in cache actual information. */
974 	err = attr_load_runs(attr, ni, run, NULL);
975 	if (err)
976 		goto out;
977 
978 	/* Check for compressed frame. */
979 	err = attr_is_frame_compressed(ni, attr, vcn >> NTFS_LZNT_CUNIT, &hint);
980 	if (err)
981 		goto out;
982 
983 	if (hint) {
984 		/* if frame is compressed - don't touch it. */
985 		*lcn = COMPRESSED_LCN;
986 		*len = hint;
987 		err = -EOPNOTSUPP;
988 		goto out;
989 	}
990 
991 	if (!*len) {
992 		if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
993 			if (*lcn != SPARSE_LCN || !new)
994 				goto ok; /* Slow normal way without allocation. */
995 
996 			if (clen > *len)
997 				clen = *len;
998 		} else if (!new) {
999 			/* Here we may return -ENOENT.
1000 			 * In any case caller gets zero length. */
1001 			goto ok;
1002 		}
1003 	}
1004 
1005 	if (!is_attr_ext(attr_b)) {
1006 		/* The code below only for sparsed or compressed attributes. */
1007 		err = -EINVAL;
1008 		goto out;
1009 	}
1010 
1011 	vcn0 = vcn;
1012 	to_alloc = clen;
1013 	fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1;
1014 	/* Allocate frame aligned clusters.
1015 	 * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed.
1016 	 * ntfs3 uses 1 cluster per frame for new created sparsed files. */
1017 	if (attr_b->nres.c_unit) {
1018 		CLST clst_per_frame = 1u << attr_b->nres.c_unit;
1019 		CLST cmask = ~(clst_per_frame - 1);
1020 
1021 		/* Get frame aligned vcn and to_alloc. */
1022 		vcn = vcn0 & cmask;
1023 		to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn;
1024 		if (fr < clst_per_frame)
1025 			fr = clst_per_frame;
1026 		zero = true;
1027 
1028 		/* Check if 'vcn' and 'vcn0' in different attribute segments. */
1029 		if (vcn < svcn || evcn1 <= vcn) {
1030 			/* Load attribute for truncated vcn. */
1031 			attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0,
1032 					    &vcn, &mi);
1033 			if (!attr) {
1034 				err = -EINVAL;
1035 				goto out;
1036 			}
1037 			svcn = le64_to_cpu(attr->nres.svcn);
1038 			evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1039 			err = attr_load_runs(attr, ni, run, NULL);
1040 			if (err)
1041 				goto out;
1042 		}
1043 	}
1044 
1045 	if (vcn + to_alloc > asize)
1046 		to_alloc = asize - vcn;
1047 
1048 	/* Get the last LCN to allocate from. */
1049 	hint = 0;
1050 
1051 	if (vcn > evcn1) {
1052 		if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
1053 				   false)) {
1054 			err = -ENOMEM;
1055 			goto out;
1056 		}
1057 	} else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
1058 		hint = -1;
1059 	}
1060 
1061 	/* Allocate and zeroout new clusters. */
1062 	err = attr_allocate_clusters(sbi, run, vcn, hint + 1, to_alloc, NULL,
1063 				     zero ? ALLOCATE_ZERO : ALLOCATE_DEF, &alen,
1064 				     fr, lcn, len);
1065 	if (err)
1066 		goto out;
1067 	*new = true;
1068 	step = 1;
1069 
1070 	end = vcn + alen;
1071 	/* Save 'total_size0' to restore if error. */
1072 	total_size0 = le64_to_cpu(attr_b->nres.total_size);
1073 	total_size = total_size0 + ((u64)alen << cluster_bits);
1074 
1075 	if (vcn != vcn0) {
1076 		if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) {
1077 			err = -EINVAL;
1078 			goto out;
1079 		}
1080 		if (*lcn == SPARSE_LCN) {
1081 			/* Internal error. Should not happened. */
1082 			WARN_ON(1);
1083 			err = -EINVAL;
1084 			goto out;
1085 		}
1086 		/* Check case when vcn0 + len overlaps new allocated clusters. */
1087 		if (vcn0 + *len > end)
1088 			*len = end - vcn0;
1089 	}
1090 
1091 repack:
1092 	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1093 	if (err)
1094 		goto out;
1095 
1096 	attr_b->nres.total_size = cpu_to_le64(total_size);
1097 	inode_set_bytes(&ni->vfs_inode, total_size);
1098 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1099 
1100 	mi_b->dirty = true;
1101 	mark_inode_dirty(&ni->vfs_inode);
1102 
1103 	/* Stored [vcn : next_svcn) from [vcn : end). */
1104 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1105 
1106 	if (end <= evcn1) {
1107 		if (next_svcn == evcn1) {
1108 			/* Normal way. Update attribute and exit. */
1109 			goto ok;
1110 		}
1111 		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1112 		if (!ni->attr_list.size) {
1113 			err = ni_create_attr_list(ni);
1114 			if (err)
1115 				goto undo1;
1116 			/* Layout of records is changed. */
1117 			le_b = NULL;
1118 			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1119 					      0, NULL, &mi_b);
1120 			if (!attr_b) {
1121 				err = -ENOENT;
1122 				goto out;
1123 			}
1124 
1125 			attr = attr_b;
1126 			le = le_b;
1127 			mi = mi_b;
1128 			goto repack;
1129 		}
1130 	}
1131 
1132 	/*
1133 	 * The code below may require additional cluster (to extend attribute list)
1134 	 * and / or one MFT record
1135 	 * It is too complex to undo operations if -ENOSPC occurs deep inside
1136 	 * in 'ni_insert_nonresident'.
1137 	 * Return in advance -ENOSPC here if there are no free cluster and no free MFT.
1138 	 */
1139 	if (!ntfs_check_for_free_space(sbi, 1, 1)) {
1140 		/* Undo step 1. */
1141 		err = -ENOSPC;
1142 		goto undo1;
1143 	}
1144 
1145 	step = 2;
1146 	svcn = evcn1;
1147 
1148 	/* Estimate next attribute. */
1149 	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1150 
1151 	if (!attr) {
1152 		/* Insert new attribute segment. */
1153 		goto ins_ext;
1154 	}
1155 
1156 	/* Try to update existed attribute segment. */
1157 	alloc = bytes_to_cluster(sbi, le64_to_cpu(attr_b->nres.alloc_size));
1158 	evcn = le64_to_cpu(attr->nres.evcn);
1159 
1160 	if (end < next_svcn)
1161 		end = next_svcn;
1162 	while (end > evcn) {
1163 		/* Remove segment [svcn : evcn). */
1164 		mi_remove_attr(NULL, mi, attr);
1165 
1166 		if (!al_remove_le(ni, le)) {
1167 			err = -EINVAL;
1168 			goto out;
1169 		}
1170 
1171 		if (evcn + 1 >= alloc) {
1172 			/* Last attribute segment. */
1173 			evcn1 = evcn + 1;
1174 			goto ins_ext;
1175 		}
1176 
1177 		if (ni_load_mi(ni, le, &mi)) {
1178 			attr = NULL;
1179 			goto out;
1180 		}
1181 
1182 		attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, &le->id);
1183 		if (!attr) {
1184 			err = -EINVAL;
1185 			goto out;
1186 		}
1187 		svcn = le64_to_cpu(attr->nres.svcn);
1188 		evcn = le64_to_cpu(attr->nres.evcn);
1189 	}
1190 
1191 	if (end < svcn)
1192 		end = svcn;
1193 
1194 	err = attr_load_runs(attr, ni, run, &end);
1195 	if (err)
1196 		goto out;
1197 
1198 	evcn1 = evcn + 1;
1199 	attr->nres.svcn = cpu_to_le64(next_svcn);
1200 	err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1201 	if (err)
1202 		goto out;
1203 
1204 	le->vcn = cpu_to_le64(next_svcn);
1205 	ni->attr_list.dirty = true;
1206 	mi->dirty = true;
1207 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1208 
1209 ins_ext:
1210 	if (evcn1 > next_svcn) {
1211 		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1212 					    next_svcn, evcn1 - next_svcn,
1213 					    attr_b->flags, &attr, &mi, NULL);
1214 		if (err)
1215 			goto out;
1216 	}
1217 ok:
1218 	run_truncate_around(run, vcn);
1219 out:
1220 	if (err && step > 1) {
1221 		/* Too complex to restore. */
1222 		_ntfs_bad_inode(&ni->vfs_inode);
1223 	}
1224 	up_write(&ni->file.run_lock);
1225 	ni_unlock(ni);
1226 
1227 	return err;
1228 
1229 undo1:
1230 	/* Undo step1. */
1231 	attr_b->nres.total_size = cpu_to_le64(total_size0);
1232 	inode_set_bytes(&ni->vfs_inode, total_size0);
1233 
1234 	if (run_deallocate_ex(sbi, run, vcn, alen, NULL, false) ||
1235 	    !run_add_entry(run, vcn, SPARSE_LCN, alen, false) ||
1236 	    mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn)) {
1237 		_ntfs_bad_inode(&ni->vfs_inode);
1238 	}
1239 	goto out;
1240 }
1241 
1242 int attr_data_read_resident(struct ntfs_inode *ni, struct folio *folio)
1243 {
1244 	u64 vbo;
1245 	struct ATTRIB *attr;
1246 	u32 data_size;
1247 	size_t len;
1248 
1249 	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1250 	if (!attr)
1251 		return -EINVAL;
1252 
1253 	if (attr->non_res)
1254 		return E_NTFS_NONRESIDENT;
1255 
1256 	vbo = folio->index << PAGE_SHIFT;
1257 	data_size = le32_to_cpu(attr->res.data_size);
1258 	if (vbo > data_size)
1259 		len = 0;
1260 	else
1261 		len = min(data_size - vbo, folio_size(folio));
1262 
1263 	folio_fill_tail(folio, 0, resident_data(attr) + vbo, len);
1264 	folio_mark_uptodate(folio);
1265 
1266 	return 0;
1267 }
1268 
1269 int attr_data_write_resident(struct ntfs_inode *ni, struct folio *folio)
1270 {
1271 	u64 vbo;
1272 	struct mft_inode *mi;
1273 	struct ATTRIB *attr;
1274 	u32 data_size;
1275 
1276 	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1277 	if (!attr)
1278 		return -EINVAL;
1279 
1280 	if (attr->non_res) {
1281 		/* Return special error code to check this case. */
1282 		return E_NTFS_NONRESIDENT;
1283 	}
1284 
1285 	vbo = folio->index << PAGE_SHIFT;
1286 	data_size = le32_to_cpu(attr->res.data_size);
1287 	if (vbo < data_size) {
1288 		char *data = resident_data(attr);
1289 		size_t len = min(data_size - vbo, folio_size(folio));
1290 
1291 		memcpy_from_folio(data + vbo, folio, 0, len);
1292 		mi->dirty = true;
1293 	}
1294 	ni->i_valid = data_size;
1295 
1296 	return 0;
1297 }
1298 
1299 /*
1300  * attr_load_runs_vcn - Load runs with VCN.
1301  */
1302 int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1303 		       const __le16 *name, u8 name_len, struct runs_tree *run,
1304 		       CLST vcn)
1305 {
1306 	struct ATTRIB *attr;
1307 	int err;
1308 	CLST svcn, evcn;
1309 	u16 ro;
1310 
1311 	if (!ni) {
1312 		/* Is record corrupted? */
1313 		return -ENOENT;
1314 	}
1315 
1316 	attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1317 	if (!attr) {
1318 		/* Is record corrupted? */
1319 		return -ENOENT;
1320 	}
1321 
1322 	svcn = le64_to_cpu(attr->nres.svcn);
1323 	evcn = le64_to_cpu(attr->nres.evcn);
1324 
1325 	if (evcn < vcn || vcn < svcn) {
1326 		/* Is record corrupted? */
1327 		return -EINVAL;
1328 	}
1329 
1330 	ro = le16_to_cpu(attr->nres.run_off);
1331 
1332 	if (ro > le32_to_cpu(attr->size))
1333 		return -EINVAL;
1334 
1335 	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1336 			    Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1337 	if (err < 0)
1338 		return err;
1339 	return 0;
1340 }
1341 
1342 /*
1343  * attr_load_runs_range - Load runs for given range [from to).
1344  */
1345 int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1346 			 const __le16 *name, u8 name_len, struct runs_tree *run,
1347 			 u64 from, u64 to)
1348 {
1349 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1350 	u8 cluster_bits = sbi->cluster_bits;
1351 	CLST vcn;
1352 	CLST vcn_last = (to - 1) >> cluster_bits;
1353 	CLST lcn, clen;
1354 	int err;
1355 
1356 	for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1357 		if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1358 			err = attr_load_runs_vcn(ni, type, name, name_len, run,
1359 						 vcn);
1360 			if (err)
1361 				return err;
1362 			clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1363 		}
1364 	}
1365 
1366 	return 0;
1367 }
1368 
1369 #ifdef CONFIG_NTFS3_LZX_XPRESS
1370 /*
1371  * attr_wof_frame_info
1372  *
1373  * Read header of Xpress/LZX file to get info about frame.
1374  */
1375 int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1376 			struct runs_tree *run, u64 frame, u64 frames,
1377 			u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1378 {
1379 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1380 	u64 vbo[2], off[2], wof_size;
1381 	u32 voff;
1382 	u8 bytes_per_off;
1383 	char *addr;
1384 	struct folio *folio;
1385 	int i, err;
1386 	__le32 *off32;
1387 	__le64 *off64;
1388 
1389 	if (ni->vfs_inode.i_size < 0x100000000ull) {
1390 		/* File starts with array of 32 bit offsets. */
1391 		bytes_per_off = sizeof(__le32);
1392 		vbo[1] = frame << 2;
1393 		*vbo_data = frames << 2;
1394 	} else {
1395 		/* File starts with array of 64 bit offsets. */
1396 		bytes_per_off = sizeof(__le64);
1397 		vbo[1] = frame << 3;
1398 		*vbo_data = frames << 3;
1399 	}
1400 
1401 	/*
1402 	 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1403 	 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1404 	 */
1405 	if (!attr->non_res) {
1406 		if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1407 			ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1408 			return -EINVAL;
1409 		}
1410 		addr = resident_data(attr);
1411 
1412 		if (bytes_per_off == sizeof(__le32)) {
1413 			off32 = Add2Ptr(addr, vbo[1]);
1414 			off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1415 			off[1] = le32_to_cpu(off32[0]);
1416 		} else {
1417 			off64 = Add2Ptr(addr, vbo[1]);
1418 			off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1419 			off[1] = le64_to_cpu(off64[0]);
1420 		}
1421 
1422 		*vbo_data += off[0];
1423 		*ondisk_size = off[1] - off[0];
1424 		return 0;
1425 	}
1426 
1427 	wof_size = le64_to_cpu(attr->nres.data_size);
1428 	down_write(&ni->file.run_lock);
1429 	folio = ni->file.offs_folio;
1430 	if (!folio) {
1431 		folio = folio_alloc(GFP_KERNEL, 0);
1432 		if (!folio) {
1433 			err = -ENOMEM;
1434 			goto out;
1435 		}
1436 		folio->index = -1;
1437 		ni->file.offs_folio = folio;
1438 	}
1439 	folio_lock(folio);
1440 	addr = folio_address(folio);
1441 
1442 	if (vbo[1]) {
1443 		voff = vbo[1] & (PAGE_SIZE - 1);
1444 		vbo[0] = vbo[1] - bytes_per_off;
1445 		i = 0;
1446 	} else {
1447 		voff = 0;
1448 		vbo[0] = 0;
1449 		off[0] = 0;
1450 		i = 1;
1451 	}
1452 
1453 	do {
1454 		pgoff_t index = vbo[i] >> PAGE_SHIFT;
1455 
1456 		if (index != folio->index) {
1457 			struct page *page = &folio->page;
1458 			u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1459 			u64 to = min(from + PAGE_SIZE, wof_size);
1460 
1461 			err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1462 						   ARRAY_SIZE(WOF_NAME), run,
1463 						   from, to);
1464 			if (err)
1465 				goto out1;
1466 
1467 			err = ntfs_bio_pages(sbi, run, &page, 1, from,
1468 					     to - from, REQ_OP_READ);
1469 			if (err) {
1470 				folio->index = -1;
1471 				goto out1;
1472 			}
1473 			folio->index = index;
1474 		}
1475 
1476 		if (i) {
1477 			if (bytes_per_off == sizeof(__le32)) {
1478 				off32 = Add2Ptr(addr, voff);
1479 				off[1] = le32_to_cpu(*off32);
1480 			} else {
1481 				off64 = Add2Ptr(addr, voff);
1482 				off[1] = le64_to_cpu(*off64);
1483 			}
1484 		} else if (!voff) {
1485 			if (bytes_per_off == sizeof(__le32)) {
1486 				off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1487 				off[0] = le32_to_cpu(*off32);
1488 			} else {
1489 				off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1490 				off[0] = le64_to_cpu(*off64);
1491 			}
1492 		} else {
1493 			/* Two values in one page. */
1494 			if (bytes_per_off == sizeof(__le32)) {
1495 				off32 = Add2Ptr(addr, voff);
1496 				off[0] = le32_to_cpu(off32[-1]);
1497 				off[1] = le32_to_cpu(off32[0]);
1498 			} else {
1499 				off64 = Add2Ptr(addr, voff);
1500 				off[0] = le64_to_cpu(off64[-1]);
1501 				off[1] = le64_to_cpu(off64[0]);
1502 			}
1503 			break;
1504 		}
1505 	} while (++i < 2);
1506 
1507 	*vbo_data += off[0];
1508 	*ondisk_size = off[1] - off[0];
1509 
1510 out1:
1511 	folio_unlock(folio);
1512 out:
1513 	up_write(&ni->file.run_lock);
1514 	return err;
1515 }
1516 #endif
1517 
1518 /*
1519  * attr_is_frame_compressed - Used to detect compressed frame.
1520  */
1521 int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1522 			     CLST frame, CLST *clst_data)
1523 {
1524 	int err;
1525 	u32 clst_frame;
1526 	CLST clen, lcn, vcn, alen, slen, vcn_next;
1527 	size_t idx;
1528 	struct runs_tree *run;
1529 
1530 	*clst_data = 0;
1531 
1532 	if (!is_attr_compressed(attr))
1533 		return 0;
1534 
1535 	if (!attr->non_res)
1536 		return 0;
1537 
1538 	clst_frame = 1u << attr->nres.c_unit;
1539 	vcn = frame * clst_frame;
1540 	run = &ni->file.run;
1541 
1542 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1543 		err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1544 					 attr->name_len, run, vcn);
1545 		if (err)
1546 			return err;
1547 
1548 		if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1549 			return -EINVAL;
1550 	}
1551 
1552 	if (lcn == SPARSE_LCN) {
1553 		/* Sparsed frame. */
1554 		return 0;
1555 	}
1556 
1557 	if (clen >= clst_frame) {
1558 		/*
1559 		 * The frame is not compressed 'cause
1560 		 * it does not contain any sparse clusters.
1561 		 */
1562 		*clst_data = clst_frame;
1563 		return 0;
1564 	}
1565 
1566 	alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1567 	slen = 0;
1568 	*clst_data = clen;
1569 
1570 	/*
1571 	 * The frame is compressed if *clst_data + slen >= clst_frame.
1572 	 * Check next fragments.
1573 	 */
1574 	while ((vcn += clen) < alen) {
1575 		vcn_next = vcn;
1576 
1577 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1578 		    vcn_next != vcn) {
1579 			err = attr_load_runs_vcn(ni, attr->type,
1580 						 attr_name(attr),
1581 						 attr->name_len, run, vcn_next);
1582 			if (err)
1583 				return err;
1584 			vcn = vcn_next;
1585 
1586 			if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1587 				return -EINVAL;
1588 		}
1589 
1590 		if (lcn == SPARSE_LCN) {
1591 			slen += clen;
1592 		} else {
1593 			if (slen) {
1594 				/*
1595 				 * Data_clusters + sparse_clusters =
1596 				 * not enough for frame.
1597 				 */
1598 				return -EINVAL;
1599 			}
1600 			*clst_data += clen;
1601 		}
1602 
1603 		if (*clst_data + slen >= clst_frame) {
1604 			if (!slen) {
1605 				/*
1606 				 * There is no sparsed clusters in this frame
1607 				 * so it is not compressed.
1608 				 */
1609 				*clst_data = clst_frame;
1610 			} else {
1611 				/* Frame is compressed. */
1612 			}
1613 			break;
1614 		}
1615 	}
1616 
1617 	return 0;
1618 }
1619 
1620 /*
1621  * attr_allocate_frame - Allocate/free clusters for @frame.
1622  *
1623  * Assumed: down_write(&ni->file.run_lock);
1624  */
1625 int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1626 			u64 new_valid)
1627 {
1628 	int err = 0;
1629 	struct runs_tree *run = &ni->file.run;
1630 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1631 	struct ATTRIB *attr = NULL, *attr_b;
1632 	struct ATTR_LIST_ENTRY *le, *le_b;
1633 	struct mft_inode *mi, *mi_b;
1634 	CLST svcn, evcn1, next_svcn, len;
1635 	CLST vcn, end, clst_data;
1636 	u64 total_size, valid_size, data_size;
1637 
1638 	le_b = NULL;
1639 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1640 	if (!attr_b)
1641 		return -ENOENT;
1642 
1643 	if (!is_attr_ext(attr_b))
1644 		return -EINVAL;
1645 
1646 	vcn = frame << NTFS_LZNT_CUNIT;
1647 	total_size = le64_to_cpu(attr_b->nres.total_size);
1648 
1649 	svcn = le64_to_cpu(attr_b->nres.svcn);
1650 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1651 	data_size = le64_to_cpu(attr_b->nres.data_size);
1652 
1653 	if (svcn <= vcn && vcn < evcn1) {
1654 		attr = attr_b;
1655 		le = le_b;
1656 		mi = mi_b;
1657 	} else if (!le_b) {
1658 		err = -EINVAL;
1659 		goto out;
1660 	} else {
1661 		le = le_b;
1662 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1663 				    &mi);
1664 		if (!attr) {
1665 			err = -EINVAL;
1666 			goto out;
1667 		}
1668 		svcn = le64_to_cpu(attr->nres.svcn);
1669 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1670 	}
1671 
1672 	err = attr_load_runs(attr, ni, run, NULL);
1673 	if (err)
1674 		goto out;
1675 
1676 	err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
1677 	if (err)
1678 		goto out;
1679 
1680 	total_size -= (u64)clst_data << sbi->cluster_bits;
1681 
1682 	len = bytes_to_cluster(sbi, compr_size);
1683 
1684 	if (len == clst_data)
1685 		goto out;
1686 
1687 	if (len < clst_data) {
1688 		err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1689 					NULL, true);
1690 		if (err)
1691 			goto out;
1692 
1693 		if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1694 				   false)) {
1695 			err = -ENOMEM;
1696 			goto out;
1697 		}
1698 		end = vcn + clst_data;
1699 		/* Run contains updated range [vcn + len : end). */
1700 	} else {
1701 		CLST alen, hint = 0;
1702 		/* Get the last LCN to allocate from. */
1703 		if (vcn + clst_data &&
1704 		    !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1705 				      NULL)) {
1706 			hint = -1;
1707 		}
1708 
1709 		err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1710 					     hint + 1, len - clst_data, NULL,
1711 					     ALLOCATE_DEF, &alen, 0, NULL,
1712 					     NULL);
1713 		if (err)
1714 			goto out;
1715 
1716 		end = vcn + len;
1717 		/* Run contains updated range [vcn + clst_data : end). */
1718 	}
1719 
1720 	total_size += (u64)len << sbi->cluster_bits;
1721 
1722 repack:
1723 	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1724 	if (err)
1725 		goto out;
1726 
1727 	attr_b->nres.total_size = cpu_to_le64(total_size);
1728 	inode_set_bytes(&ni->vfs_inode, total_size);
1729 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1730 
1731 	mi_b->dirty = true;
1732 	mark_inode_dirty(&ni->vfs_inode);
1733 
1734 	/* Stored [vcn : next_svcn) from [vcn : end). */
1735 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1736 
1737 	if (end <= evcn1) {
1738 		if (next_svcn == evcn1) {
1739 			/* Normal way. Update attribute and exit. */
1740 			goto ok;
1741 		}
1742 		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1743 		if (!ni->attr_list.size) {
1744 			err = ni_create_attr_list(ni);
1745 			if (err)
1746 				goto out;
1747 			/* Layout of records is changed. */
1748 			le_b = NULL;
1749 			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1750 					      0, NULL, &mi_b);
1751 			if (!attr_b) {
1752 				err = -ENOENT;
1753 				goto out;
1754 			}
1755 
1756 			attr = attr_b;
1757 			le = le_b;
1758 			mi = mi_b;
1759 			goto repack;
1760 		}
1761 	}
1762 
1763 	svcn = evcn1;
1764 
1765 	/* Estimate next attribute. */
1766 	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1767 
1768 	if (attr) {
1769 		CLST alloc = bytes_to_cluster(
1770 			sbi, le64_to_cpu(attr_b->nres.alloc_size));
1771 		CLST evcn = le64_to_cpu(attr->nres.evcn);
1772 
1773 		if (end < next_svcn)
1774 			end = next_svcn;
1775 		while (end > evcn) {
1776 			/* Remove segment [svcn : evcn). */
1777 			mi_remove_attr(NULL, mi, attr);
1778 
1779 			if (!al_remove_le(ni, le)) {
1780 				err = -EINVAL;
1781 				goto out;
1782 			}
1783 
1784 			if (evcn + 1 >= alloc) {
1785 				/* Last attribute segment. */
1786 				evcn1 = evcn + 1;
1787 				goto ins_ext;
1788 			}
1789 
1790 			if (ni_load_mi(ni, le, &mi)) {
1791 				attr = NULL;
1792 				goto out;
1793 			}
1794 
1795 			attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1796 					    &le->id);
1797 			if (!attr) {
1798 				err = -EINVAL;
1799 				goto out;
1800 			}
1801 			svcn = le64_to_cpu(attr->nres.svcn);
1802 			evcn = le64_to_cpu(attr->nres.evcn);
1803 		}
1804 
1805 		if (end < svcn)
1806 			end = svcn;
1807 
1808 		err = attr_load_runs(attr, ni, run, &end);
1809 		if (err)
1810 			goto out;
1811 
1812 		evcn1 = evcn + 1;
1813 		attr->nres.svcn = cpu_to_le64(next_svcn);
1814 		err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1815 		if (err)
1816 			goto out;
1817 
1818 		le->vcn = cpu_to_le64(next_svcn);
1819 		ni->attr_list.dirty = true;
1820 		mi->dirty = true;
1821 
1822 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1823 	}
1824 ins_ext:
1825 	if (evcn1 > next_svcn) {
1826 		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1827 					    next_svcn, evcn1 - next_svcn,
1828 					    attr_b->flags, &attr, &mi, NULL);
1829 		if (err)
1830 			goto out;
1831 	}
1832 ok:
1833 	run_truncate_around(run, vcn);
1834 out:
1835 	if (attr_b) {
1836 		if (new_valid > data_size)
1837 			new_valid = data_size;
1838 
1839 		valid_size = le64_to_cpu(attr_b->nres.valid_size);
1840 		if (new_valid != valid_size) {
1841 			attr_b->nres.valid_size = cpu_to_le64(valid_size);
1842 			mi_b->dirty = true;
1843 		}
1844 	}
1845 
1846 	return err;
1847 }
1848 
1849 /*
1850  * attr_collapse_range - Collapse range in file.
1851  */
1852 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1853 {
1854 	int err = 0;
1855 	struct runs_tree *run = &ni->file.run;
1856 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1857 	struct ATTRIB *attr = NULL, *attr_b;
1858 	struct ATTR_LIST_ENTRY *le, *le_b;
1859 	struct mft_inode *mi, *mi_b;
1860 	CLST svcn, evcn1, len, dealloc, alen;
1861 	CLST vcn, end;
1862 	u64 valid_size, data_size, alloc_size, total_size;
1863 	u32 mask;
1864 	__le16 a_flags;
1865 
1866 	if (!bytes)
1867 		return 0;
1868 
1869 	le_b = NULL;
1870 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1871 	if (!attr_b)
1872 		return -ENOENT;
1873 
1874 	if (!attr_b->non_res) {
1875 		/* Attribute is resident. Nothing to do? */
1876 		return 0;
1877 	}
1878 
1879 	data_size = le64_to_cpu(attr_b->nres.data_size);
1880 	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1881 	a_flags = attr_b->flags;
1882 
1883 	if (is_attr_ext(attr_b)) {
1884 		total_size = le64_to_cpu(attr_b->nres.total_size);
1885 		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1886 	} else {
1887 		total_size = alloc_size;
1888 		mask = sbi->cluster_mask;
1889 	}
1890 
1891 	if ((vbo & mask) || (bytes & mask)) {
1892 		/* Allow to collapse only cluster aligned ranges. */
1893 		return -EINVAL;
1894 	}
1895 
1896 	if (vbo > data_size)
1897 		return -EINVAL;
1898 
1899 	down_write(&ni->file.run_lock);
1900 
1901 	if (vbo + bytes >= data_size) {
1902 		u64 new_valid = min(ni->i_valid, vbo);
1903 
1904 		/* Simple truncate file at 'vbo'. */
1905 		truncate_setsize(&ni->vfs_inode, vbo);
1906 		err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1907 				    &new_valid, true, NULL);
1908 
1909 		if (!err && new_valid < ni->i_valid)
1910 			ni->i_valid = new_valid;
1911 
1912 		goto out;
1913 	}
1914 
1915 	/*
1916 	 * Enumerate all attribute segments and collapse.
1917 	 */
1918 	alen = alloc_size >> sbi->cluster_bits;
1919 	vcn = vbo >> sbi->cluster_bits;
1920 	len = bytes >> sbi->cluster_bits;
1921 	end = vcn + len;
1922 	dealloc = 0;
1923 
1924 	svcn = le64_to_cpu(attr_b->nres.svcn);
1925 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1926 
1927 	if (svcn <= vcn && vcn < evcn1) {
1928 		attr = attr_b;
1929 		le = le_b;
1930 		mi = mi_b;
1931 	} else if (!le_b) {
1932 		err = -EINVAL;
1933 		goto out;
1934 	} else {
1935 		le = le_b;
1936 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1937 				    &mi);
1938 		if (!attr) {
1939 			err = -EINVAL;
1940 			goto out;
1941 		}
1942 
1943 		svcn = le64_to_cpu(attr->nres.svcn);
1944 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1945 	}
1946 
1947 	for (;;) {
1948 		if (svcn >= end) {
1949 			/* Shift VCN- */
1950 			attr->nres.svcn = cpu_to_le64(svcn - len);
1951 			attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1952 			if (le) {
1953 				le->vcn = attr->nres.svcn;
1954 				ni->attr_list.dirty = true;
1955 			}
1956 			mi->dirty = true;
1957 		} else if (svcn < vcn || end < evcn1) {
1958 			CLST vcn1, eat, next_svcn;
1959 
1960 			/* Collapse a part of this attribute segment. */
1961 			err = attr_load_runs(attr, ni, run, &svcn);
1962 			if (err)
1963 				goto out;
1964 			vcn1 = max(vcn, svcn);
1965 			eat = min(end, evcn1) - vcn1;
1966 
1967 			err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1968 						true);
1969 			if (err)
1970 				goto out;
1971 
1972 			if (!run_collapse_range(run, vcn1, eat)) {
1973 				err = -ENOMEM;
1974 				goto out;
1975 			}
1976 
1977 			if (svcn >= vcn) {
1978 				/* Shift VCN */
1979 				attr->nres.svcn = cpu_to_le64(vcn);
1980 				if (le) {
1981 					le->vcn = attr->nres.svcn;
1982 					ni->attr_list.dirty = true;
1983 				}
1984 			}
1985 
1986 			err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1987 			if (err)
1988 				goto out;
1989 
1990 			next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1991 			if (next_svcn + eat < evcn1) {
1992 				err = ni_insert_nonresident(
1993 					ni, ATTR_DATA, NULL, 0, run, next_svcn,
1994 					evcn1 - eat - next_svcn, a_flags, &attr,
1995 					&mi, &le);
1996 				if (err)
1997 					goto out;
1998 
1999 				/* Layout of records maybe changed. */
2000 				attr_b = NULL;
2001 			}
2002 
2003 			/* Free all allocated memory. */
2004 			run_truncate(run, 0);
2005 		} else {
2006 			u16 le_sz;
2007 			u16 roff = le16_to_cpu(attr->nres.run_off);
2008 
2009 			if (roff > le32_to_cpu(attr->size)) {
2010 				err = -EINVAL;
2011 				goto out;
2012 			}
2013 
2014 			run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
2015 				      evcn1 - 1, svcn, Add2Ptr(attr, roff),
2016 				      le32_to_cpu(attr->size) - roff);
2017 
2018 			/* Delete this attribute segment. */
2019 			mi_remove_attr(NULL, mi, attr);
2020 			if (!le)
2021 				break;
2022 
2023 			le_sz = le16_to_cpu(le->size);
2024 			if (!al_remove_le(ni, le)) {
2025 				err = -EINVAL;
2026 				goto out;
2027 			}
2028 
2029 			if (evcn1 >= alen)
2030 				break;
2031 
2032 			if (!svcn) {
2033 				/* Load next record that contains this attribute. */
2034 				if (ni_load_mi(ni, le, &mi)) {
2035 					err = -EINVAL;
2036 					goto out;
2037 				}
2038 
2039 				/* Look for required attribute. */
2040 				attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
2041 						    0, &le->id);
2042 				if (!attr) {
2043 					err = -EINVAL;
2044 					goto out;
2045 				}
2046 				goto next_attr;
2047 			}
2048 			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
2049 		}
2050 
2051 		if (evcn1 >= alen)
2052 			break;
2053 
2054 		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2055 		if (!attr) {
2056 			err = -EINVAL;
2057 			goto out;
2058 		}
2059 
2060 next_attr:
2061 		svcn = le64_to_cpu(attr->nres.svcn);
2062 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2063 	}
2064 
2065 	if (!attr_b) {
2066 		le_b = NULL;
2067 		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2068 				      &mi_b);
2069 		if (!attr_b) {
2070 			err = -ENOENT;
2071 			goto out;
2072 		}
2073 	}
2074 
2075 	data_size -= bytes;
2076 	valid_size = ni->i_valid;
2077 	if (vbo + bytes <= valid_size)
2078 		valid_size -= bytes;
2079 	else if (vbo < valid_size)
2080 		valid_size = vbo;
2081 
2082 	attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
2083 	attr_b->nres.data_size = cpu_to_le64(data_size);
2084 	attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
2085 	total_size -= (u64)dealloc << sbi->cluster_bits;
2086 	if (is_attr_ext(attr_b))
2087 		attr_b->nres.total_size = cpu_to_le64(total_size);
2088 	mi_b->dirty = true;
2089 
2090 	/* Update inode size. */
2091 	ni->i_valid = valid_size;
2092 	i_size_write(&ni->vfs_inode, data_size);
2093 	inode_set_bytes(&ni->vfs_inode, total_size);
2094 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2095 	mark_inode_dirty(&ni->vfs_inode);
2096 
2097 out:
2098 	up_write(&ni->file.run_lock);
2099 	if (err)
2100 		_ntfs_bad_inode(&ni->vfs_inode);
2101 
2102 	return err;
2103 }
2104 
2105 /*
2106  * attr_punch_hole
2107  *
2108  * Not for normal files.
2109  */
2110 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
2111 {
2112 	int err = 0;
2113 	struct runs_tree *run = &ni->file.run;
2114 	struct ntfs_sb_info *sbi = ni->mi.sbi;
2115 	struct ATTRIB *attr = NULL, *attr_b;
2116 	struct ATTR_LIST_ENTRY *le, *le_b;
2117 	struct mft_inode *mi, *mi_b;
2118 	CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
2119 	u64 total_size, alloc_size;
2120 	u32 mask;
2121 	__le16 a_flags;
2122 	struct runs_tree run2;
2123 
2124 	if (!bytes)
2125 		return 0;
2126 
2127 	le_b = NULL;
2128 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2129 	if (!attr_b)
2130 		return -ENOENT;
2131 
2132 	if (!attr_b->non_res) {
2133 		u32 data_size = le32_to_cpu(attr_b->res.data_size);
2134 		u32 from, to;
2135 
2136 		if (vbo > data_size)
2137 			return 0;
2138 
2139 		from = vbo;
2140 		to = min_t(u64, vbo + bytes, data_size);
2141 		memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
2142 		return 0;
2143 	}
2144 
2145 	if (!is_attr_ext(attr_b))
2146 		return -EOPNOTSUPP;
2147 
2148 	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2149 	total_size = le64_to_cpu(attr_b->nres.total_size);
2150 
2151 	if (vbo >= alloc_size) {
2152 		/* NOTE: It is allowed. */
2153 		return 0;
2154 	}
2155 
2156 	mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2157 
2158 	bytes += vbo;
2159 	if (bytes > alloc_size)
2160 		bytes = alloc_size;
2161 	bytes -= vbo;
2162 
2163 	if ((vbo & mask) || (bytes & mask)) {
2164 		/* We have to zero a range(s). */
2165 		if (frame_size == NULL) {
2166 			/* Caller insists range is aligned. */
2167 			return -EINVAL;
2168 		}
2169 		*frame_size = mask + 1;
2170 		return E_NTFS_NOTALIGNED;
2171 	}
2172 
2173 	down_write(&ni->file.run_lock);
2174 	run_init(&run2);
2175 	run_truncate(run, 0);
2176 
2177 	/*
2178 	 * Enumerate all attribute segments and punch hole where necessary.
2179 	 */
2180 	alen = alloc_size >> sbi->cluster_bits;
2181 	vcn = vbo >> sbi->cluster_bits;
2182 	len = bytes >> sbi->cluster_bits;
2183 	end = vcn + len;
2184 	hole = 0;
2185 
2186 	svcn = le64_to_cpu(attr_b->nres.svcn);
2187 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2188 	a_flags = attr_b->flags;
2189 
2190 	if (svcn <= vcn && vcn < evcn1) {
2191 		attr = attr_b;
2192 		le = le_b;
2193 		mi = mi_b;
2194 	} else if (!le_b) {
2195 		err = -EINVAL;
2196 		goto bad_inode;
2197 	} else {
2198 		le = le_b;
2199 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2200 				    &mi);
2201 		if (!attr) {
2202 			err = -EINVAL;
2203 			goto bad_inode;
2204 		}
2205 
2206 		svcn = le64_to_cpu(attr->nres.svcn);
2207 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2208 	}
2209 
2210 	while (svcn < end) {
2211 		CLST vcn1, zero, hole2 = hole;
2212 
2213 		err = attr_load_runs(attr, ni, run, &svcn);
2214 		if (err)
2215 			goto done;
2216 		vcn1 = max(vcn, svcn);
2217 		zero = min(end, evcn1) - vcn1;
2218 
2219 		/*
2220 		 * Check range [vcn1 + zero).
2221 		 * Calculate how many clusters there are.
2222 		 * Don't do any destructive actions.
2223 		 */
2224 		err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
2225 		if (err)
2226 			goto done;
2227 
2228 		/* Check if required range is already hole. */
2229 		if (hole2 == hole)
2230 			goto next_attr;
2231 
2232 		/* Make a clone of run to undo. */
2233 		err = run_clone(run, &run2);
2234 		if (err)
2235 			goto done;
2236 
2237 		/* Make a hole range (sparse) [vcn1 + zero). */
2238 		if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
2239 			err = -ENOMEM;
2240 			goto done;
2241 		}
2242 
2243 		/* Update run in attribute segment. */
2244 		err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2245 		if (err)
2246 			goto done;
2247 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2248 		if (next_svcn < evcn1) {
2249 			/* Insert new attribute segment. */
2250 			err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2251 						    next_svcn,
2252 						    evcn1 - next_svcn, a_flags,
2253 						    &attr, &mi, &le);
2254 			if (err)
2255 				goto undo_punch;
2256 
2257 			/* Layout of records maybe changed. */
2258 			attr_b = NULL;
2259 		}
2260 
2261 		/* Real deallocate. Should not fail. */
2262 		run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
2263 
2264 next_attr:
2265 		/* Free all allocated memory. */
2266 		run_truncate(run, 0);
2267 
2268 		if (evcn1 >= alen)
2269 			break;
2270 
2271 		/* Get next attribute segment. */
2272 		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2273 		if (!attr) {
2274 			err = -EINVAL;
2275 			goto bad_inode;
2276 		}
2277 
2278 		svcn = le64_to_cpu(attr->nres.svcn);
2279 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2280 	}
2281 
2282 done:
2283 	if (!hole)
2284 		goto out;
2285 
2286 	if (!attr_b) {
2287 		attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
2288 				      &mi_b);
2289 		if (!attr_b) {
2290 			err = -EINVAL;
2291 			goto bad_inode;
2292 		}
2293 	}
2294 
2295 	total_size -= (u64)hole << sbi->cluster_bits;
2296 	attr_b->nres.total_size = cpu_to_le64(total_size);
2297 	mi_b->dirty = true;
2298 
2299 	/* Update inode size. */
2300 	inode_set_bytes(&ni->vfs_inode, total_size);
2301 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2302 	mark_inode_dirty(&ni->vfs_inode);
2303 
2304 out:
2305 	run_close(&run2);
2306 	up_write(&ni->file.run_lock);
2307 	return err;
2308 
2309 bad_inode:
2310 	_ntfs_bad_inode(&ni->vfs_inode);
2311 	goto out;
2312 
2313 undo_punch:
2314 	/*
2315 	 * Restore packed runs.
2316 	 * 'mi_pack_runs' should not fail, cause we restore original.
2317 	 */
2318 	if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
2319 		goto bad_inode;
2320 
2321 	goto done;
2322 }
2323 
2324 /*
2325  * attr_insert_range - Insert range (hole) in file.
2326  * Not for normal files.
2327  */
2328 int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
2329 {
2330 	int err = 0;
2331 	struct runs_tree *run = &ni->file.run;
2332 	struct ntfs_sb_info *sbi = ni->mi.sbi;
2333 	struct ATTRIB *attr = NULL, *attr_b;
2334 	struct ATTR_LIST_ENTRY *le, *le_b;
2335 	struct mft_inode *mi, *mi_b;
2336 	CLST vcn, svcn, evcn1, len, next_svcn;
2337 	u64 data_size, alloc_size;
2338 	u32 mask;
2339 	__le16 a_flags;
2340 
2341 	if (!bytes)
2342 		return 0;
2343 
2344 	le_b = NULL;
2345 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2346 	if (!attr_b)
2347 		return -ENOENT;
2348 
2349 	if (!is_attr_ext(attr_b)) {
2350 		/* It was checked above. See fallocate. */
2351 		return -EOPNOTSUPP;
2352 	}
2353 
2354 	if (!attr_b->non_res) {
2355 		data_size = le32_to_cpu(attr_b->res.data_size);
2356 		alloc_size = data_size;
2357 		mask = sbi->cluster_mask; /* cluster_size - 1 */
2358 	} else {
2359 		data_size = le64_to_cpu(attr_b->nres.data_size);
2360 		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2361 		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2362 	}
2363 
2364 	if (vbo >= data_size) {
2365 		/*
2366 		 * Insert range after the file size is not allowed.
2367 		 * If the offset is equal to or greater than the end of
2368 		 * file, an error is returned.  For such operations (i.e., inserting
2369 		 * a hole at the end of file), ftruncate(2) should be used.
2370 		 */
2371 		return -EINVAL;
2372 	}
2373 
2374 	if ((vbo & mask) || (bytes & mask)) {
2375 		/* Allow to insert only frame aligned ranges. */
2376 		return -EINVAL;
2377 	}
2378 
2379 	/*
2380 	 * valid_size <= data_size <= alloc_size
2381 	 * Check alloc_size for maximum possible.
2382 	 */
2383 	if (bytes > sbi->maxbytes_sparse - alloc_size)
2384 		return -EFBIG;
2385 
2386 	vcn = vbo >> sbi->cluster_bits;
2387 	len = bytes >> sbi->cluster_bits;
2388 
2389 	down_write(&ni->file.run_lock);
2390 
2391 	if (!attr_b->non_res) {
2392 		err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
2393 				    data_size + bytes, NULL, false, NULL);
2394 
2395 		le_b = NULL;
2396 		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2397 				      &mi_b);
2398 		if (!attr_b) {
2399 			err = -EINVAL;
2400 			goto bad_inode;
2401 		}
2402 
2403 		if (err)
2404 			goto out;
2405 
2406 		if (!attr_b->non_res) {
2407 			/* Still resident. */
2408 			char *data = Add2Ptr(attr_b,
2409 					     le16_to_cpu(attr_b->res.data_off));
2410 
2411 			memmove(data + bytes, data, bytes);
2412 			memset(data, 0, bytes);
2413 			goto done;
2414 		}
2415 
2416 		/* Resident files becomes nonresident. */
2417 		data_size = le64_to_cpu(attr_b->nres.data_size);
2418 		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2419 	}
2420 
2421 	/*
2422 	 * Enumerate all attribute segments and shift start vcn.
2423 	 */
2424 	a_flags = attr_b->flags;
2425 	svcn = le64_to_cpu(attr_b->nres.svcn);
2426 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2427 
2428 	if (svcn <= vcn && vcn < evcn1) {
2429 		attr = attr_b;
2430 		le = le_b;
2431 		mi = mi_b;
2432 	} else if (!le_b) {
2433 		err = -EINVAL;
2434 		goto bad_inode;
2435 	} else {
2436 		le = le_b;
2437 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2438 				    &mi);
2439 		if (!attr) {
2440 			err = -EINVAL;
2441 			goto bad_inode;
2442 		}
2443 
2444 		svcn = le64_to_cpu(attr->nres.svcn);
2445 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2446 	}
2447 
2448 	run_truncate(run, 0); /* clear cached values. */
2449 	err = attr_load_runs(attr, ni, run, NULL);
2450 	if (err)
2451 		goto out;
2452 
2453 	if (!run_insert_range(run, vcn, len)) {
2454 		err = -ENOMEM;
2455 		goto out;
2456 	}
2457 
2458 	/* Try to pack in current record as much as possible. */
2459 	err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
2460 	if (err)
2461 		goto out;
2462 
2463 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2464 
2465 	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2466 	       attr->type == ATTR_DATA && !attr->name_len) {
2467 		le64_add_cpu(&attr->nres.svcn, len);
2468 		le64_add_cpu(&attr->nres.evcn, len);
2469 		if (le) {
2470 			le->vcn = attr->nres.svcn;
2471 			ni->attr_list.dirty = true;
2472 		}
2473 		mi->dirty = true;
2474 	}
2475 
2476 	if (next_svcn < evcn1 + len) {
2477 		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2478 					    next_svcn, evcn1 + len - next_svcn,
2479 					    a_flags, NULL, NULL, NULL);
2480 
2481 		le_b = NULL;
2482 		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2483 				      &mi_b);
2484 		if (!attr_b) {
2485 			err = -EINVAL;
2486 			goto bad_inode;
2487 		}
2488 
2489 		if (err) {
2490 			/* ni_insert_nonresident failed. Try to undo. */
2491 			goto undo_insert_range;
2492 		}
2493 	}
2494 
2495 	/*
2496 	 * Update primary attribute segment.
2497 	 */
2498 	if (vbo <= ni->i_valid)
2499 		ni->i_valid += bytes;
2500 
2501 	attr_b->nres.data_size = cpu_to_le64(data_size + bytes);
2502 	attr_b->nres.alloc_size = cpu_to_le64(alloc_size + bytes);
2503 
2504 	/* ni->valid may be not equal valid_size (temporary). */
2505 	if (ni->i_valid > data_size + bytes)
2506 		attr_b->nres.valid_size = attr_b->nres.data_size;
2507 	else
2508 		attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
2509 	mi_b->dirty = true;
2510 
2511 done:
2512 	i_size_write(&ni->vfs_inode, ni->vfs_inode.i_size + bytes);
2513 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2514 	mark_inode_dirty(&ni->vfs_inode);
2515 
2516 out:
2517 	run_truncate(run, 0); /* clear cached values. */
2518 
2519 	up_write(&ni->file.run_lock);
2520 
2521 	return err;
2522 
2523 bad_inode:
2524 	_ntfs_bad_inode(&ni->vfs_inode);
2525 	goto out;
2526 
2527 undo_insert_range:
2528 	svcn = le64_to_cpu(attr_b->nres.svcn);
2529 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2530 
2531 	if (svcn <= vcn && vcn < evcn1) {
2532 		attr = attr_b;
2533 		le = le_b;
2534 		mi = mi_b;
2535 	} else if (!le_b) {
2536 		goto bad_inode;
2537 	} else {
2538 		le = le_b;
2539 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2540 				    &mi);
2541 		if (!attr) {
2542 			goto bad_inode;
2543 		}
2544 
2545 		svcn = le64_to_cpu(attr->nres.svcn);
2546 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2547 	}
2548 
2549 	if (attr_load_runs(attr, ni, run, NULL))
2550 		goto bad_inode;
2551 
2552 	if (!run_collapse_range(run, vcn, len))
2553 		goto bad_inode;
2554 
2555 	if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
2556 		goto bad_inode;
2557 
2558 	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2559 	       attr->type == ATTR_DATA && !attr->name_len) {
2560 		le64_sub_cpu(&attr->nres.svcn, len);
2561 		le64_sub_cpu(&attr->nres.evcn, len);
2562 		if (le) {
2563 			le->vcn = attr->nres.svcn;
2564 			ni->attr_list.dirty = true;
2565 		}
2566 		mi->dirty = true;
2567 	}
2568 
2569 	goto out;
2570 }
2571 
2572 /*
2573  * attr_force_nonresident
2574  *
2575  * Convert default data attribute into non resident form.
2576  */
2577 int attr_force_nonresident(struct ntfs_inode *ni)
2578 {
2579 	int err;
2580 	struct ATTRIB *attr;
2581 	struct ATTR_LIST_ENTRY *le = NULL;
2582 	struct mft_inode *mi;
2583 
2584 	attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, &mi);
2585 	if (!attr) {
2586 		ntfs_bad_inode(&ni->vfs_inode, "no data attribute");
2587 		return -ENOENT;
2588 	}
2589 
2590 	if (attr->non_res) {
2591 		/* Already non resident. */
2592 		return 0;
2593 	}
2594 
2595 	down_write(&ni->file.run_lock);
2596 	err = attr_make_nonresident(ni, attr, le, mi,
2597 				    le32_to_cpu(attr->res.data_size),
2598 				    &ni->file.run, &attr, NULL);
2599 	up_write(&ni->file.run_lock);
2600 
2601 	return err;
2602 }
2603