xref: /linux/fs/ntfs3/attrib.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
7  */
8 
9 #include <linux/fs.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 
13 #include "debug.h"
14 #include "ntfs.h"
15 #include "ntfs_fs.h"
16 
17 /*
18  * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
19  * preallocate algorithm.
20  */
21 #ifndef NTFS_MIN_LOG2_OF_CLUMP
22 #define NTFS_MIN_LOG2_OF_CLUMP 16
23 #endif
24 
25 #ifndef NTFS_MAX_LOG2_OF_CLUMP
26 #define NTFS_MAX_LOG2_OF_CLUMP 26
27 #endif
28 
29 // 16M
30 #define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
31 // 16G
32 #define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
33 
34 static inline u64 get_pre_allocated(u64 size)
35 {
36 	u32 clump;
37 	u8 align_shift;
38 	u64 ret;
39 
40 	if (size <= NTFS_CLUMP_MIN) {
41 		clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
42 		align_shift = NTFS_MIN_LOG2_OF_CLUMP;
43 	} else if (size >= NTFS_CLUMP_MAX) {
44 		clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
45 		align_shift = NTFS_MAX_LOG2_OF_CLUMP;
46 	} else {
47 		align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
48 			      __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
49 		clump = 1u << align_shift;
50 	}
51 
52 	ret = (((size + clump - 1) >> align_shift)) << align_shift;
53 
54 	return ret;
55 }
56 
57 /*
58  * attr_load_runs - Load all runs stored in @attr.
59  */
60 static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
61 			  struct runs_tree *run, const CLST *vcn)
62 {
63 	int err;
64 	CLST svcn = le64_to_cpu(attr->nres.svcn);
65 	CLST evcn = le64_to_cpu(attr->nres.evcn);
66 	u32 asize;
67 	u16 run_off;
68 
69 	if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
70 		return 0;
71 
72 	if (vcn && (evcn < *vcn || *vcn < svcn))
73 		return -EINVAL;
74 
75 	asize = le32_to_cpu(attr->size);
76 	run_off = le16_to_cpu(attr->nres.run_off);
77 
78 	if (run_off > asize)
79 		return -EINVAL;
80 
81 	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
82 			    vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
83 			    asize - run_off);
84 	if (err < 0)
85 		return err;
86 
87 	return 0;
88 }
89 
90 /*
91  * run_deallocate_ex - Deallocate clusters.
92  */
93 static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
94 			     CLST vcn, CLST len, CLST *done, bool trim)
95 {
96 	int err = 0;
97 	CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
98 	size_t idx;
99 
100 	if (!len)
101 		goto out;
102 
103 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
104 failed:
105 		run_truncate(run, vcn0);
106 		err = -EINVAL;
107 		goto out;
108 	}
109 
110 	for (;;) {
111 		if (clen > len)
112 			clen = len;
113 
114 		if (!clen) {
115 			err = -EINVAL;
116 			goto out;
117 		}
118 
119 		if (lcn != SPARSE_LCN) {
120 			if (sbi) {
121 				/* mark bitmap range [lcn + clen) as free and trim clusters. */
122 				mark_as_free_ex(sbi, lcn, clen, trim);
123 			}
124 			dn += clen;
125 		}
126 
127 		len -= clen;
128 		if (!len)
129 			break;
130 
131 		vcn_next = vcn + clen;
132 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
133 		    vcn != vcn_next) {
134 			/* Save memory - don't load entire run. */
135 			goto failed;
136 		}
137 	}
138 
139 out:
140 	if (done)
141 		*done += dn;
142 
143 	return err;
144 }
145 
146 /*
147  * attr_allocate_clusters - Find free space, mark it as used and store in @run.
148  */
149 int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
150 			   CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
151 			   enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
152 			   CLST *new_lcn, CLST *new_len)
153 {
154 	int err;
155 	CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
156 	size_t cnt = run->count;
157 
158 	for (;;) {
159 		err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
160 					       opt);
161 
162 		if (err == -ENOSPC && pre) {
163 			pre = 0;
164 			if (*pre_alloc)
165 				*pre_alloc = 0;
166 			continue;
167 		}
168 
169 		if (err)
170 			goto out;
171 
172 		if (vcn == vcn0) {
173 			/* Return the first fragment. */
174 			if (new_lcn)
175 				*new_lcn = lcn;
176 			if (new_len)
177 				*new_len = flen;
178 		}
179 
180 		/* Add new fragment into run storage. */
181 		if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) {
182 			/* Undo last 'ntfs_look_for_free_space' */
183 			mark_as_free_ex(sbi, lcn, len, false);
184 			err = -ENOMEM;
185 			goto out;
186 		}
187 
188 		if (opt & ALLOCATE_ZERO) {
189 			u8 shift = sbi->cluster_bits - SECTOR_SHIFT;
190 
191 			err = blkdev_issue_zeroout(sbi->sb->s_bdev,
192 						   (sector_t)lcn << shift,
193 						   (sector_t)flen << shift,
194 						   GFP_NOFS, 0);
195 			if (err)
196 				goto out;
197 		}
198 
199 		vcn += flen;
200 
201 		if (flen >= len || (opt & ALLOCATE_MFT) ||
202 		    (fr && run->count - cnt >= fr)) {
203 			*alen = vcn - vcn0;
204 			return 0;
205 		}
206 
207 		len -= flen;
208 	}
209 
210 out:
211 	/* Undo 'ntfs_look_for_free_space' */
212 	if (vcn - vcn0) {
213 		run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
214 		run_truncate(run, vcn0);
215 	}
216 
217 	return err;
218 }
219 
220 /*
221  * attr_make_nonresident
222  *
223  * If page is not NULL - it is already contains resident data
224  * and locked (called from ni_write_frame()).
225  */
226 int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
227 			  struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
228 			  u64 new_size, struct runs_tree *run,
229 			  struct ATTRIB **ins_attr, struct page *page)
230 {
231 	struct ntfs_sb_info *sbi;
232 	struct ATTRIB *attr_s;
233 	struct MFT_REC *rec;
234 	u32 used, asize, rsize, aoff;
235 	bool is_data;
236 	CLST len, alen;
237 	char *next;
238 	int err;
239 
240 	if (attr->non_res) {
241 		*ins_attr = attr;
242 		return 0;
243 	}
244 
245 	sbi = mi->sbi;
246 	rec = mi->mrec;
247 	attr_s = NULL;
248 	used = le32_to_cpu(rec->used);
249 	asize = le32_to_cpu(attr->size);
250 	next = Add2Ptr(attr, asize);
251 	aoff = PtrOffset(rec, attr);
252 	rsize = le32_to_cpu(attr->res.data_size);
253 	is_data = attr->type == ATTR_DATA && !attr->name_len;
254 
255 	/* len - how many clusters required to store 'rsize' bytes */
256 	if (is_attr_compressed(attr)) {
257 		u8 shift = sbi->cluster_bits + NTFS_LZNT_CUNIT;
258 		len = ((rsize + (1u << shift) - 1) >> shift) << NTFS_LZNT_CUNIT;
259 	} else {
260 		len = bytes_to_cluster(sbi, rsize);
261 	}
262 
263 	run_init(run);
264 
265 	/* Make a copy of original attribute. */
266 	attr_s = kmemdup(attr, asize, GFP_NOFS);
267 	if (!attr_s) {
268 		err = -ENOMEM;
269 		goto out;
270 	}
271 
272 	if (!len) {
273 		/* Empty resident -> Empty nonresident. */
274 		alen = 0;
275 	} else {
276 		const char *data = resident_data(attr);
277 
278 		err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
279 					     ALLOCATE_DEF, &alen, 0, NULL,
280 					     NULL);
281 		if (err)
282 			goto out1;
283 
284 		if (!rsize) {
285 			/* Empty resident -> Non empty nonresident. */
286 		} else if (!is_data) {
287 			err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
288 			if (err)
289 				goto out2;
290 		} else if (!page) {
291 			struct address_space *mapping = ni->vfs_inode.i_mapping;
292 			struct folio *folio;
293 
294 			folio = __filemap_get_folio(
295 				mapping, 0, FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
296 				mapping_gfp_mask(mapping));
297 			if (IS_ERR(folio)) {
298 				err = PTR_ERR(folio);
299 				goto out2;
300 			}
301 			folio_fill_tail(folio, 0, data, rsize);
302 			folio_mark_uptodate(folio);
303 			folio_mark_dirty(folio);
304 			folio_unlock(folio);
305 			folio_put(folio);
306 		}
307 	}
308 
309 	/* Remove original attribute. */
310 	used -= asize;
311 	memmove(attr, Add2Ptr(attr, asize), used - aoff);
312 	rec->used = cpu_to_le32(used);
313 	mi->dirty = true;
314 	if (le)
315 		al_remove_le(ni, le);
316 
317 	err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
318 				    attr_s->name_len, run, 0, alen,
319 				    attr_s->flags, &attr, NULL, NULL);
320 	if (err)
321 		goto out3;
322 
323 	kfree(attr_s);
324 	attr->nres.data_size = cpu_to_le64(rsize);
325 	attr->nres.valid_size = attr->nres.data_size;
326 
327 	*ins_attr = attr;
328 
329 	if (is_data)
330 		ni->ni_flags &= ~NI_FLAG_RESIDENT;
331 
332 	/* Resident attribute becomes non resident. */
333 	return 0;
334 
335 out3:
336 	attr = Add2Ptr(rec, aoff);
337 	memmove(next, attr, used - aoff);
338 	memcpy(attr, attr_s, asize);
339 	rec->used = cpu_to_le32(used + asize);
340 	mi->dirty = true;
341 out2:
342 	/* Undo: do not trim new allocated clusters. */
343 	run_deallocate(sbi, run, false);
344 	run_close(run);
345 out1:
346 	kfree(attr_s);
347 out:
348 	return err;
349 }
350 
351 /*
352  * attr_set_size_res - Helper for attr_set_size().
353  */
354 static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
355 			     struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
356 			     u64 new_size, struct runs_tree *run,
357 			     struct ATTRIB **ins_attr)
358 {
359 	struct ntfs_sb_info *sbi = mi->sbi;
360 	struct MFT_REC *rec = mi->mrec;
361 	u32 used = le32_to_cpu(rec->used);
362 	u32 asize = le32_to_cpu(attr->size);
363 	u32 aoff = PtrOffset(rec, attr);
364 	u32 rsize = le32_to_cpu(attr->res.data_size);
365 	u32 tail = used - aoff - asize;
366 	char *next = Add2Ptr(attr, asize);
367 	s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
368 
369 	if (dsize < 0) {
370 		memmove(next + dsize, next, tail);
371 	} else if (dsize > 0) {
372 		if (used + dsize > sbi->max_bytes_per_attr)
373 			return attr_make_nonresident(ni, attr, le, mi, new_size,
374 						     run, ins_attr, NULL);
375 
376 		memmove(next + dsize, next, tail);
377 		memset(next, 0, dsize);
378 	}
379 
380 	if (new_size > rsize)
381 		memset(Add2Ptr(resident_data(attr), rsize), 0,
382 		       new_size - rsize);
383 
384 	rec->used = cpu_to_le32(used + dsize);
385 	attr->size = cpu_to_le32(asize + dsize);
386 	attr->res.data_size = cpu_to_le32(new_size);
387 	mi->dirty = true;
388 	*ins_attr = attr;
389 
390 	return 0;
391 }
392 
393 /*
394  * attr_set_size - Change the size of attribute.
395  *
396  * Extend:
397  *   - Sparse/compressed: No allocated clusters.
398  *   - Normal: Append allocated and preallocated new clusters.
399  * Shrink:
400  *   - No deallocate if @keep_prealloc is set.
401  */
402 int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
403 		  const __le16 *name, u8 name_len, struct runs_tree *run,
404 		  u64 new_size, const u64 *new_valid, bool keep_prealloc,
405 		  struct ATTRIB **ret)
406 {
407 	int err = 0;
408 	struct ntfs_sb_info *sbi = ni->mi.sbi;
409 	u8 cluster_bits = sbi->cluster_bits;
410 	bool is_mft = ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA &&
411 		      !name_len;
412 	u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
413 	struct ATTRIB *attr = NULL, *attr_b;
414 	struct ATTR_LIST_ENTRY *le, *le_b;
415 	struct mft_inode *mi, *mi_b;
416 	CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
417 	CLST next_svcn, pre_alloc = -1, done = 0;
418 	bool is_ext, is_bad = false;
419 	bool dirty = false;
420 	u32 align;
421 	struct MFT_REC *rec;
422 
423 again:
424 	alen = 0;
425 	le_b = NULL;
426 	attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
427 			      &mi_b);
428 	if (!attr_b) {
429 		err = -ENOENT;
430 		goto bad_inode;
431 	}
432 
433 	if (!attr_b->non_res) {
434 		err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
435 					&attr_b);
436 		if (err)
437 			return err;
438 
439 		/* Return if file is still resident. */
440 		if (!attr_b->non_res) {
441 			dirty = true;
442 			goto ok1;
443 		}
444 
445 		/* Layout of records may be changed, so do a full search. */
446 		goto again;
447 	}
448 
449 	is_ext = is_attr_ext(attr_b);
450 	align = sbi->cluster_size;
451 	if (is_ext)
452 		align <<= attr_b->nres.c_unit;
453 
454 	old_valid = le64_to_cpu(attr_b->nres.valid_size);
455 	old_size = le64_to_cpu(attr_b->nres.data_size);
456 	old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
457 
458 again_1:
459 	old_alen = old_alloc >> cluster_bits;
460 
461 	new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
462 	new_alen = new_alloc >> cluster_bits;
463 
464 	if (keep_prealloc && new_size < old_size) {
465 		attr_b->nres.data_size = cpu_to_le64(new_size);
466 		mi_b->dirty = dirty = true;
467 		goto ok;
468 	}
469 
470 	vcn = old_alen - 1;
471 
472 	svcn = le64_to_cpu(attr_b->nres.svcn);
473 	evcn = le64_to_cpu(attr_b->nres.evcn);
474 
475 	if (svcn <= vcn && vcn <= evcn) {
476 		attr = attr_b;
477 		le = le_b;
478 		mi = mi_b;
479 	} else if (!le_b) {
480 		err = -EINVAL;
481 		goto bad_inode;
482 	} else {
483 		le = le_b;
484 		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
485 				    &mi);
486 		if (!attr) {
487 			err = -EINVAL;
488 			goto bad_inode;
489 		}
490 
491 next_le_1:
492 		svcn = le64_to_cpu(attr->nres.svcn);
493 		evcn = le64_to_cpu(attr->nres.evcn);
494 	}
495 	/*
496 	 * Here we have:
497 	 * attr,mi,le - last attribute segment (containing 'vcn').
498 	 * attr_b,mi_b,le_b - base (primary) attribute segment.
499 	 */
500 next_le:
501 	rec = mi->mrec;
502 	err = attr_load_runs(attr, ni, run, NULL);
503 	if (err)
504 		goto out;
505 
506 	if (new_size > old_size) {
507 		CLST to_allocate;
508 		size_t free;
509 
510 		if (new_alloc <= old_alloc) {
511 			attr_b->nres.data_size = cpu_to_le64(new_size);
512 			mi_b->dirty = dirty = true;
513 			goto ok;
514 		}
515 
516 		/*
517 		 * Add clusters. In simple case we have to:
518 		 *  - allocate space (vcn, lcn, len)
519 		 *  - update packed run in 'mi'
520 		 *  - update attr->nres.evcn
521 		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
522 		 */
523 		to_allocate = new_alen - old_alen;
524 add_alloc_in_same_attr_seg:
525 		lcn = 0;
526 		if (is_mft) {
527 			/* MFT allocates clusters from MFT zone. */
528 			pre_alloc = 0;
529 		} else if (is_ext) {
530 			/* No preallocate for sparse/compress. */
531 			pre_alloc = 0;
532 		} else if (pre_alloc == -1) {
533 			pre_alloc = 0;
534 			if (type == ATTR_DATA && !name_len &&
535 			    sbi->options->prealloc) {
536 				pre_alloc = bytes_to_cluster(
537 						    sbi, get_pre_allocated(
538 								 new_size)) -
539 					    new_alen;
540 			}
541 
542 			/* Get the last LCN to allocate from. */
543 			if (old_alen &&
544 			    !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
545 				lcn = SPARSE_LCN;
546 			}
547 
548 			if (lcn == SPARSE_LCN)
549 				lcn = 0;
550 			else if (lcn)
551 				lcn += 1;
552 
553 			free = wnd_zeroes(&sbi->used.bitmap);
554 			if (to_allocate > free) {
555 				err = -ENOSPC;
556 				goto out;
557 			}
558 
559 			if (pre_alloc && to_allocate + pre_alloc > free)
560 				pre_alloc = 0;
561 		}
562 
563 		vcn = old_alen;
564 
565 		if (is_ext) {
566 			if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
567 					   false)) {
568 				err = -ENOMEM;
569 				goto out;
570 			}
571 			alen = to_allocate;
572 		} else {
573 			/* ~3 bytes per fragment. */
574 			err = attr_allocate_clusters(
575 				sbi, run, vcn, lcn, to_allocate, &pre_alloc,
576 				is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
577 				is_mft ? 0 :
578 					 (sbi->record_size -
579 					  le32_to_cpu(rec->used) + 8) /
580 							 3 +
581 						 1,
582 				NULL, NULL);
583 			if (err)
584 				goto out;
585 		}
586 
587 		done += alen;
588 		vcn += alen;
589 		if (to_allocate > alen)
590 			to_allocate -= alen;
591 		else
592 			to_allocate = 0;
593 
594 pack_runs:
595 		err = mi_pack_runs(mi, attr, run, vcn - svcn);
596 		if (err)
597 			goto undo_1;
598 
599 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
600 		new_alloc_tmp = (u64)next_svcn << cluster_bits;
601 		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
602 		mi_b->dirty = dirty = true;
603 
604 		if (next_svcn >= vcn && !to_allocate) {
605 			/* Normal way. Update attribute and exit. */
606 			attr_b->nres.data_size = cpu_to_le64(new_size);
607 			goto ok;
608 		}
609 
610 		/* At least two MFT to avoid recursive loop. */
611 		if (is_mft && next_svcn == vcn &&
612 		    ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
613 			new_size = new_alloc_tmp;
614 			attr_b->nres.data_size = attr_b->nres.alloc_size;
615 			goto ok;
616 		}
617 
618 		if (le32_to_cpu(rec->used) < sbi->record_size) {
619 			old_alen = next_svcn;
620 			evcn = old_alen - 1;
621 			goto add_alloc_in_same_attr_seg;
622 		}
623 
624 		attr_b->nres.data_size = attr_b->nres.alloc_size;
625 		if (new_alloc_tmp < old_valid)
626 			attr_b->nres.valid_size = attr_b->nres.data_size;
627 
628 		if (type == ATTR_LIST) {
629 			err = ni_expand_list(ni);
630 			if (err)
631 				goto undo_2;
632 			if (next_svcn < vcn)
633 				goto pack_runs;
634 
635 			/* Layout of records is changed. */
636 			goto again;
637 		}
638 
639 		if (!ni->attr_list.size) {
640 			err = ni_create_attr_list(ni);
641 			/* In case of error layout of records is not changed. */
642 			if (err)
643 				goto undo_2;
644 			/* Layout of records is changed. */
645 		}
646 
647 		if (next_svcn >= vcn) {
648 			/* This is MFT data, repeat. */
649 			goto again;
650 		}
651 
652 		/* Insert new attribute segment. */
653 		err = ni_insert_nonresident(ni, type, name, name_len, run,
654 					    next_svcn, vcn - next_svcn,
655 					    attr_b->flags, &attr, &mi, NULL);
656 
657 		/*
658 		 * Layout of records maybe changed.
659 		 * Find base attribute to update.
660 		 */
661 		le_b = NULL;
662 		attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
663 				      NULL, &mi_b);
664 		if (!attr_b) {
665 			err = -EINVAL;
666 			goto bad_inode;
667 		}
668 
669 		if (err) {
670 			/* ni_insert_nonresident failed. */
671 			attr = NULL;
672 			goto undo_2;
673 		}
674 
675 		/* keep runs for $MFT::$ATTR_DATA and $MFT::$ATTR_BITMAP. */
676 		if (ni->mi.rno != MFT_REC_MFT)
677 			run_truncate_head(run, evcn + 1);
678 
679 		svcn = le64_to_cpu(attr->nres.svcn);
680 		evcn = le64_to_cpu(attr->nres.evcn);
681 
682 		/*
683 		 * Attribute is in consistency state.
684 		 * Save this point to restore to if next steps fail.
685 		 */
686 		old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
687 		attr_b->nres.valid_size = attr_b->nres.data_size =
688 			attr_b->nres.alloc_size = cpu_to_le64(old_size);
689 		mi_b->dirty = dirty = true;
690 		goto again_1;
691 	}
692 
693 	if (new_size != old_size ||
694 	    (new_alloc != old_alloc && !keep_prealloc)) {
695 		/*
696 		 * Truncate clusters. In simple case we have to:
697 		 *  - update packed run in 'mi'
698 		 *  - update attr->nres.evcn
699 		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
700 		 *  - mark and trim clusters as free (vcn, lcn, len)
701 		 */
702 		CLST dlen = 0;
703 
704 		vcn = max(svcn, new_alen);
705 		new_alloc_tmp = (u64)vcn << cluster_bits;
706 
707 		if (vcn > svcn) {
708 			err = mi_pack_runs(mi, attr, run, vcn - svcn);
709 			if (err)
710 				goto out;
711 		} else if (le && le->vcn) {
712 			u16 le_sz = le16_to_cpu(le->size);
713 
714 			/*
715 			 * NOTE: List entries for one attribute are always
716 			 * the same size. We deal with last entry (vcn==0)
717 			 * and it is not first in entries array
718 			 * (list entry for std attribute always first).
719 			 * So it is safe to step back.
720 			 */
721 			mi_remove_attr(NULL, mi, attr);
722 
723 			if (!al_remove_le(ni, le)) {
724 				err = -EINVAL;
725 				goto bad_inode;
726 			}
727 
728 			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
729 		} else {
730 			attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
731 			mi->dirty = true;
732 		}
733 
734 		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
735 
736 		if (vcn == new_alen) {
737 			attr_b->nres.data_size = cpu_to_le64(new_size);
738 			if (new_size < old_valid)
739 				attr_b->nres.valid_size =
740 					attr_b->nres.data_size;
741 		} else {
742 			if (new_alloc_tmp <=
743 			    le64_to_cpu(attr_b->nres.data_size))
744 				attr_b->nres.data_size =
745 					attr_b->nres.alloc_size;
746 			if (new_alloc_tmp <
747 			    le64_to_cpu(attr_b->nres.valid_size))
748 				attr_b->nres.valid_size =
749 					attr_b->nres.alloc_size;
750 		}
751 		mi_b->dirty = dirty = true;
752 
753 		err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
754 					true);
755 		if (err)
756 			goto out;
757 
758 		if (is_ext) {
759 			/* dlen - really deallocated clusters. */
760 			le64_sub_cpu(&attr_b->nres.total_size,
761 				     ((u64)dlen << cluster_bits));
762 		}
763 
764 		run_truncate(run, vcn);
765 
766 		if (new_alloc_tmp <= new_alloc)
767 			goto ok;
768 
769 		old_size = new_alloc_tmp;
770 		vcn = svcn - 1;
771 
772 		if (le == le_b) {
773 			attr = attr_b;
774 			mi = mi_b;
775 			evcn = svcn - 1;
776 			svcn = 0;
777 			goto next_le;
778 		}
779 
780 		if (le->type != type || le->name_len != name_len ||
781 		    memcmp(le_name(le), name, name_len * sizeof(short))) {
782 			err = -EINVAL;
783 			goto bad_inode;
784 		}
785 
786 		err = ni_load_mi(ni, le, &mi);
787 		if (err)
788 			goto out;
789 
790 		attr = mi_find_attr(ni, mi, NULL, type, name, name_len,
791 				    &le->id);
792 		if (!attr) {
793 			err = -EINVAL;
794 			goto bad_inode;
795 		}
796 		goto next_le_1;
797 	}
798 
799 ok:
800 	if (new_valid) {
801 		__le64 valid = cpu_to_le64(min(*new_valid, new_size));
802 
803 		if (attr_b->nres.valid_size != valid) {
804 			attr_b->nres.valid_size = valid;
805 			mi_b->dirty = true;
806 		}
807 	}
808 
809 ok1:
810 	if (ret)
811 		*ret = attr_b;
812 
813 	if (((type == ATTR_DATA && !name_len) ||
814 	     (type == ATTR_ALLOC && name == I30_NAME))) {
815 		/* Update inode_set_bytes. */
816 		if (attr_b->non_res) {
817 			new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
818 			if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
819 				inode_set_bytes(&ni->vfs_inode, new_alloc);
820 				dirty = true;
821 			}
822 		}
823 
824 		/* Don't forget to update duplicate information in parent. */
825 		if (dirty) {
826 			ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
827 			mark_inode_dirty(&ni->vfs_inode);
828 		}
829 	}
830 
831 	return 0;
832 
833 undo_2:
834 	vcn -= alen;
835 	attr_b->nres.data_size = cpu_to_le64(old_size);
836 	attr_b->nres.valid_size = cpu_to_le64(old_valid);
837 	attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
838 
839 	/* Restore 'attr' and 'mi'. */
840 	if (attr)
841 		goto restore_run;
842 
843 	if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
844 	    svcn <= le64_to_cpu(attr_b->nres.evcn)) {
845 		attr = attr_b;
846 		le = le_b;
847 		mi = mi_b;
848 	} else if (!le_b) {
849 		err = -EINVAL;
850 		goto bad_inode;
851 	} else {
852 		le = le_b;
853 		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
854 				    &svcn, &mi);
855 		if (!attr)
856 			goto bad_inode;
857 	}
858 
859 restore_run:
860 	if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
861 		is_bad = true;
862 
863 undo_1:
864 	run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
865 
866 	run_truncate(run, vcn);
867 out:
868 	if (is_bad) {
869 bad_inode:
870 		_ntfs_bad_inode(&ni->vfs_inode);
871 	}
872 	return err;
873 }
874 
875 /*
876  * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
877  *
878  * @new == NULL means just to get current mapping for 'vcn'
879  * @new != NULL means allocate real cluster if 'vcn' maps to hole
880  * @zero - zeroout new allocated clusters
881  *
882  *  NOTE:
883  *  - @new != NULL is called only for sparsed or compressed attributes.
884  *  - new allocated clusters are zeroed via blkdev_issue_zeroout.
885  */
886 int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
887 			CLST *len, bool *new, bool zero)
888 {
889 	int err = 0;
890 	struct runs_tree *run = &ni->file.run;
891 	struct ntfs_sb_info *sbi;
892 	u8 cluster_bits;
893 	struct ATTRIB *attr, *attr_b;
894 	struct ATTR_LIST_ENTRY *le, *le_b;
895 	struct mft_inode *mi, *mi_b;
896 	CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
897 	CLST alloc, evcn;
898 	unsigned fr;
899 	u64 total_size, total_size0;
900 	int step = 0;
901 
902 	if (new)
903 		*new = false;
904 
905 	/* Try to find in cache. */
906 	down_read(&ni->file.run_lock);
907 	if (!run_lookup_entry(run, vcn, lcn, len, NULL))
908 		*len = 0;
909 	up_read(&ni->file.run_lock);
910 
911 	if (*len && (*lcn != SPARSE_LCN || !new))
912 		return 0; /* Fast normal way without allocation. */
913 
914 	/* No cluster in cache or we need to allocate cluster in hole. */
915 	sbi = ni->mi.sbi;
916 	cluster_bits = sbi->cluster_bits;
917 
918 	ni_lock(ni);
919 	down_write(&ni->file.run_lock);
920 
921 	/* Repeat the code above (under write lock). */
922 	if (!run_lookup_entry(run, vcn, lcn, len, NULL))
923 		*len = 0;
924 
925 	if (*len) {
926 		if (*lcn != SPARSE_LCN || !new)
927 			goto out; /* normal way without allocation. */
928 		if (clen > *len)
929 			clen = *len;
930 	}
931 
932 	le_b = NULL;
933 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
934 	if (!attr_b) {
935 		err = -ENOENT;
936 		goto out;
937 	}
938 
939 	if (!attr_b->non_res) {
940 		*lcn = RESIDENT_LCN;
941 		*len = 1;
942 		goto out;
943 	}
944 
945 	asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
946 	if (vcn >= asize) {
947 		if (new) {
948 			err = -EINVAL;
949 		} else {
950 			*len = 1;
951 			*lcn = SPARSE_LCN;
952 		}
953 		goto out;
954 	}
955 
956 	svcn = le64_to_cpu(attr_b->nres.svcn);
957 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
958 
959 	attr = attr_b;
960 	le = le_b;
961 	mi = mi_b;
962 
963 	if (le_b && (vcn < svcn || evcn1 <= vcn)) {
964 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
965 				    &mi);
966 		if (!attr) {
967 			err = -EINVAL;
968 			goto out;
969 		}
970 		svcn = le64_to_cpu(attr->nres.svcn);
971 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
972 	}
973 
974 	/* Load in cache actual information. */
975 	err = attr_load_runs(attr, ni, run, NULL);
976 	if (err)
977 		goto out;
978 
979 	/* Check for compressed frame. */
980 	err = attr_is_frame_compressed(ni, attr_b, vcn >> NTFS_LZNT_CUNIT,
981 				       &hint, run);
982 	if (err)
983 		goto out;
984 
985 	if (hint) {
986 		/* if frame is compressed - don't touch it. */
987 		*lcn = COMPRESSED_LCN;
988 		/* length to the end of frame. */
989 		*len = NTFS_LZNT_CLUSTERS - (vcn & (NTFS_LZNT_CLUSTERS - 1));
990 		err = 0;
991 		goto out;
992 	}
993 
994 	if (!*len) {
995 		if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
996 			if (*lcn != SPARSE_LCN || !new)
997 				goto ok; /* Slow normal way without allocation. */
998 
999 			if (clen > *len)
1000 				clen = *len;
1001 		} else if (!new) {
1002 			/* Here we may return -ENOENT.
1003 			 * In any case caller gets zero length. */
1004 			goto ok;
1005 		}
1006 	}
1007 
1008 	if (!is_attr_ext(attr_b)) {
1009 		/* The code below only for sparsed or compressed attributes. */
1010 		err = -EINVAL;
1011 		goto out;
1012 	}
1013 
1014 	vcn0 = vcn;
1015 	to_alloc = clen;
1016 	fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1;
1017 	/* Allocate frame aligned clusters.
1018 	 * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed.
1019 	 * ntfs3 uses 1 cluster per frame for new created sparsed files. */
1020 	if (attr_b->nres.c_unit) {
1021 		CLST clst_per_frame = 1u << attr_b->nres.c_unit;
1022 		CLST cmask = ~(clst_per_frame - 1);
1023 
1024 		/* Get frame aligned vcn and to_alloc. */
1025 		vcn = vcn0 & cmask;
1026 		to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn;
1027 		if (fr < clst_per_frame)
1028 			fr = clst_per_frame;
1029 		zero = true;
1030 
1031 		/* Check if 'vcn' and 'vcn0' in different attribute segments. */
1032 		if (vcn < svcn || evcn1 <= vcn) {
1033 			struct ATTRIB *attr2;
1034 			/* Load runs for truncated vcn. */
1035 			attr2 = ni_find_attr(ni, attr_b, &le_b, ATTR_DATA, NULL,
1036 					     0, &vcn, &mi);
1037 			if (!attr2) {
1038 				err = -EINVAL;
1039 				goto out;
1040 			}
1041 			evcn1 = le64_to_cpu(attr2->nres.evcn) + 1;
1042 			err = attr_load_runs(attr2, ni, run, NULL);
1043 			if (err)
1044 				goto out;
1045 		}
1046 	}
1047 
1048 	if (vcn + to_alloc > asize)
1049 		to_alloc = asize - vcn;
1050 
1051 	/* Get the last LCN to allocate from. */
1052 	hint = 0;
1053 
1054 	if (vcn > evcn1) {
1055 		if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
1056 				   false)) {
1057 			err = -ENOMEM;
1058 			goto out;
1059 		}
1060 	} else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
1061 		hint = -1;
1062 	}
1063 
1064 	/* Allocate and zeroout new clusters. */
1065 	err = attr_allocate_clusters(sbi, run, vcn, hint + 1, to_alloc, NULL,
1066 				     zero ? ALLOCATE_ZERO : ALLOCATE_DEF, &alen,
1067 				     fr, lcn, len);
1068 	if (err)
1069 		goto out;
1070 	*new = true;
1071 	step = 1;
1072 
1073 	end = vcn + alen;
1074 	/* Save 'total_size0' to restore if error. */
1075 	total_size0 = le64_to_cpu(attr_b->nres.total_size);
1076 	total_size = total_size0 + ((u64)alen << cluster_bits);
1077 
1078 	if (vcn != vcn0) {
1079 		if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) {
1080 			err = -EINVAL;
1081 			goto out;
1082 		}
1083 		if (*lcn == SPARSE_LCN) {
1084 			/* Internal error. Should not happened. */
1085 			WARN_ON(1);
1086 			err = -EINVAL;
1087 			goto out;
1088 		}
1089 		/* Check case when vcn0 + len overlaps new allocated clusters. */
1090 		if (vcn0 + *len > end)
1091 			*len = end - vcn0;
1092 	}
1093 
1094 repack:
1095 	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1096 	if (err)
1097 		goto out;
1098 
1099 	attr_b->nres.total_size = cpu_to_le64(total_size);
1100 	inode_set_bytes(&ni->vfs_inode, total_size);
1101 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1102 
1103 	mi_b->dirty = true;
1104 	mark_inode_dirty(&ni->vfs_inode);
1105 
1106 	/* Stored [vcn : next_svcn) from [vcn : end). */
1107 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1108 
1109 	if (end <= evcn1) {
1110 		if (next_svcn == evcn1) {
1111 			/* Normal way. Update attribute and exit. */
1112 			goto ok;
1113 		}
1114 		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1115 		if (!ni->attr_list.size) {
1116 			err = ni_create_attr_list(ni);
1117 			if (err)
1118 				goto undo1;
1119 			/* Layout of records is changed. */
1120 			le_b = NULL;
1121 			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1122 					      0, NULL, &mi_b);
1123 			if (!attr_b) {
1124 				err = -ENOENT;
1125 				goto out;
1126 			}
1127 
1128 			attr = attr_b;
1129 			le = le_b;
1130 			mi = mi_b;
1131 			goto repack;
1132 		}
1133 	}
1134 
1135 	/*
1136 	 * The code below may require additional cluster (to extend attribute list)
1137 	 * and / or one MFT record
1138 	 * It is too complex to undo operations if -ENOSPC occurs deep inside
1139 	 * in 'ni_insert_nonresident'.
1140 	 * Return in advance -ENOSPC here if there are no free cluster and no free MFT.
1141 	 */
1142 	if (!ntfs_check_for_free_space(sbi, 1, 1)) {
1143 		/* Undo step 1. */
1144 		err = -ENOSPC;
1145 		goto undo1;
1146 	}
1147 
1148 	step = 2;
1149 	svcn = evcn1;
1150 
1151 	/* Estimate next attribute. */
1152 	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1153 
1154 	if (!attr) {
1155 		/* Insert new attribute segment. */
1156 		goto ins_ext;
1157 	}
1158 
1159 	/* Try to update existed attribute segment. */
1160 	alloc = bytes_to_cluster(sbi, le64_to_cpu(attr_b->nres.alloc_size));
1161 	evcn = le64_to_cpu(attr->nres.evcn);
1162 
1163 	if (end < next_svcn)
1164 		end = next_svcn;
1165 	while (end > evcn) {
1166 		/* Remove segment [svcn : evcn). */
1167 		mi_remove_attr(NULL, mi, attr);
1168 
1169 		if (!al_remove_le(ni, le)) {
1170 			err = -EINVAL;
1171 			goto out;
1172 		}
1173 
1174 		if (evcn + 1 >= alloc) {
1175 			/* Last attribute segment. */
1176 			evcn1 = evcn + 1;
1177 			goto ins_ext;
1178 		}
1179 
1180 		if (ni_load_mi(ni, le, &mi)) {
1181 			attr = NULL;
1182 			goto out;
1183 		}
1184 
1185 		attr = mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0, &le->id);
1186 		if (!attr) {
1187 			err = -EINVAL;
1188 			goto out;
1189 		}
1190 		svcn = le64_to_cpu(attr->nres.svcn);
1191 		evcn = le64_to_cpu(attr->nres.evcn);
1192 	}
1193 
1194 	if (end < svcn)
1195 		end = svcn;
1196 
1197 	err = attr_load_runs(attr, ni, run, &end);
1198 	if (err)
1199 		goto out;
1200 
1201 	evcn1 = evcn + 1;
1202 	attr->nres.svcn = cpu_to_le64(next_svcn);
1203 	err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1204 	if (err)
1205 		goto out;
1206 
1207 	le->vcn = cpu_to_le64(next_svcn);
1208 	ni->attr_list.dirty = true;
1209 	mi->dirty = true;
1210 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1211 
1212 ins_ext:
1213 	if (evcn1 > next_svcn) {
1214 		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1215 					    next_svcn, evcn1 - next_svcn,
1216 					    attr_b->flags, &attr, &mi, NULL);
1217 		if (err)
1218 			goto out;
1219 	}
1220 ok:
1221 	run_truncate_around(run, vcn);
1222 out:
1223 	if (err && step > 1) {
1224 		/* Too complex to restore. */
1225 		_ntfs_bad_inode(&ni->vfs_inode);
1226 	}
1227 	up_write(&ni->file.run_lock);
1228 	ni_unlock(ni);
1229 
1230 	return err;
1231 
1232 undo1:
1233 	/* Undo step1. */
1234 	attr_b->nres.total_size = cpu_to_le64(total_size0);
1235 	inode_set_bytes(&ni->vfs_inode, total_size0);
1236 
1237 	if (run_deallocate_ex(sbi, run, vcn, alen, NULL, false) ||
1238 	    !run_add_entry(run, vcn, SPARSE_LCN, alen, false) ||
1239 	    mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn)) {
1240 		_ntfs_bad_inode(&ni->vfs_inode);
1241 	}
1242 	goto out;
1243 }
1244 
1245 int attr_data_read_resident(struct ntfs_inode *ni, struct folio *folio)
1246 {
1247 	u64 vbo;
1248 	struct ATTRIB *attr;
1249 	u32 data_size;
1250 	size_t len;
1251 
1252 	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1253 	if (!attr)
1254 		return -EINVAL;
1255 
1256 	if (attr->non_res)
1257 		return E_NTFS_NONRESIDENT;
1258 
1259 	vbo = folio->index << PAGE_SHIFT;
1260 	data_size = le32_to_cpu(attr->res.data_size);
1261 	if (vbo > data_size)
1262 		len = 0;
1263 	else
1264 		len = min(data_size - vbo, folio_size(folio));
1265 
1266 	folio_fill_tail(folio, 0, resident_data(attr) + vbo, len);
1267 	folio_mark_uptodate(folio);
1268 
1269 	return 0;
1270 }
1271 
1272 int attr_data_write_resident(struct ntfs_inode *ni, struct folio *folio)
1273 {
1274 	u64 vbo;
1275 	struct mft_inode *mi;
1276 	struct ATTRIB *attr;
1277 	u32 data_size;
1278 
1279 	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1280 	if (!attr)
1281 		return -EINVAL;
1282 
1283 	if (attr->non_res) {
1284 		/* Return special error code to check this case. */
1285 		return E_NTFS_NONRESIDENT;
1286 	}
1287 
1288 	vbo = folio->index << PAGE_SHIFT;
1289 	data_size = le32_to_cpu(attr->res.data_size);
1290 	if (vbo < data_size) {
1291 		char *data = resident_data(attr);
1292 		size_t len = min(data_size - vbo, folio_size(folio));
1293 
1294 		memcpy_from_folio(data + vbo, folio, 0, len);
1295 		mi->dirty = true;
1296 	}
1297 	ni->i_valid = data_size;
1298 
1299 	return 0;
1300 }
1301 
1302 /*
1303  * attr_load_runs_vcn - Load runs with VCN.
1304  */
1305 int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1306 		       const __le16 *name, u8 name_len, struct runs_tree *run,
1307 		       CLST vcn)
1308 {
1309 	struct ATTRIB *attr;
1310 	int err;
1311 	CLST svcn, evcn;
1312 	u16 ro;
1313 
1314 	if (!ni) {
1315 		/* Is record corrupted? */
1316 		return -ENOENT;
1317 	}
1318 
1319 	attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1320 	if (!attr) {
1321 		/* Is record corrupted? */
1322 		return -ENOENT;
1323 	}
1324 
1325 	svcn = le64_to_cpu(attr->nres.svcn);
1326 	evcn = le64_to_cpu(attr->nres.evcn);
1327 
1328 	if (evcn < vcn || vcn < svcn) {
1329 		/* Is record corrupted? */
1330 		return -EINVAL;
1331 	}
1332 
1333 	ro = le16_to_cpu(attr->nres.run_off);
1334 
1335 	if (ro > le32_to_cpu(attr->size))
1336 		return -EINVAL;
1337 
1338 	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1339 			    Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1340 	if (err < 0)
1341 		return err;
1342 	return 0;
1343 }
1344 
1345 /*
1346  * attr_load_runs_range - Load runs for given range [from to).
1347  */
1348 int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1349 			 const __le16 *name, u8 name_len, struct runs_tree *run,
1350 			 u64 from, u64 to)
1351 {
1352 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1353 	u8 cluster_bits = sbi->cluster_bits;
1354 	CLST vcn;
1355 	CLST vcn_last = (to - 1) >> cluster_bits;
1356 	CLST lcn, clen;
1357 	int err;
1358 
1359 	for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1360 		if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1361 			err = attr_load_runs_vcn(ni, type, name, name_len, run,
1362 						 vcn);
1363 			if (err)
1364 				return err;
1365 			clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1366 		}
1367 	}
1368 
1369 	return 0;
1370 }
1371 
1372 #ifdef CONFIG_NTFS3_LZX_XPRESS
1373 /*
1374  * attr_wof_frame_info
1375  *
1376  * Read header of Xpress/LZX file to get info about frame.
1377  */
1378 int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1379 			struct runs_tree *run, u64 frame, u64 frames,
1380 			u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1381 {
1382 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1383 	u64 vbo[2], off[2], wof_size;
1384 	u32 voff;
1385 	u8 bytes_per_off;
1386 	char *addr;
1387 	struct folio *folio;
1388 	int i, err;
1389 	__le32 *off32;
1390 	__le64 *off64;
1391 
1392 	if (ni->vfs_inode.i_size < 0x100000000ull) {
1393 		/* File starts with array of 32 bit offsets. */
1394 		bytes_per_off = sizeof(__le32);
1395 		vbo[1] = frame << 2;
1396 		*vbo_data = frames << 2;
1397 	} else {
1398 		/* File starts with array of 64 bit offsets. */
1399 		bytes_per_off = sizeof(__le64);
1400 		vbo[1] = frame << 3;
1401 		*vbo_data = frames << 3;
1402 	}
1403 
1404 	/*
1405 	 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1406 	 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1407 	 */
1408 	if (!attr->non_res) {
1409 		if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1410 			_ntfs_bad_inode(&ni->vfs_inode);
1411 			return -EINVAL;
1412 		}
1413 		addr = resident_data(attr);
1414 
1415 		if (bytes_per_off == sizeof(__le32)) {
1416 			off32 = Add2Ptr(addr, vbo[1]);
1417 			off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1418 			off[1] = le32_to_cpu(off32[0]);
1419 		} else {
1420 			off64 = Add2Ptr(addr, vbo[1]);
1421 			off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1422 			off[1] = le64_to_cpu(off64[0]);
1423 		}
1424 
1425 		*vbo_data += off[0];
1426 		*ondisk_size = off[1] - off[0];
1427 		return 0;
1428 	}
1429 
1430 	wof_size = le64_to_cpu(attr->nres.data_size);
1431 	down_write(&ni->file.run_lock);
1432 	folio = ni->file.offs_folio;
1433 	if (!folio) {
1434 		folio = folio_alloc(GFP_KERNEL, 0);
1435 		if (!folio) {
1436 			err = -ENOMEM;
1437 			goto out;
1438 		}
1439 		folio->index = -1;
1440 		ni->file.offs_folio = folio;
1441 	}
1442 	folio_lock(folio);
1443 	addr = folio_address(folio);
1444 
1445 	if (vbo[1]) {
1446 		voff = vbo[1] & (PAGE_SIZE - 1);
1447 		vbo[0] = vbo[1] - bytes_per_off;
1448 		i = 0;
1449 	} else {
1450 		voff = 0;
1451 		vbo[0] = 0;
1452 		off[0] = 0;
1453 		i = 1;
1454 	}
1455 
1456 	do {
1457 		pgoff_t index = vbo[i] >> PAGE_SHIFT;
1458 
1459 		if (index != folio->index) {
1460 			u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1461 			u64 to = min(from + PAGE_SIZE, wof_size);
1462 
1463 			err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1464 						   ARRAY_SIZE(WOF_NAME), run,
1465 						   from, to);
1466 			if (err)
1467 				goto out1;
1468 
1469 			err = ntfs_read_run(sbi, run, addr, from, to - from);
1470 			if (err) {
1471 				folio->index = -1;
1472 				goto out1;
1473 			}
1474 			folio->index = index;
1475 		}
1476 
1477 		if (i) {
1478 			if (bytes_per_off == sizeof(__le32)) {
1479 				off32 = Add2Ptr(addr, voff);
1480 				off[1] = le32_to_cpu(*off32);
1481 			} else {
1482 				off64 = Add2Ptr(addr, voff);
1483 				off[1] = le64_to_cpu(*off64);
1484 			}
1485 		} else if (!voff) {
1486 			if (bytes_per_off == sizeof(__le32)) {
1487 				off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1488 				off[0] = le32_to_cpu(*off32);
1489 			} else {
1490 				off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1491 				off[0] = le64_to_cpu(*off64);
1492 			}
1493 		} else {
1494 			/* Two values in one page. */
1495 			if (bytes_per_off == sizeof(__le32)) {
1496 				off32 = Add2Ptr(addr, voff);
1497 				off[0] = le32_to_cpu(off32[-1]);
1498 				off[1] = le32_to_cpu(off32[0]);
1499 			} else {
1500 				off64 = Add2Ptr(addr, voff);
1501 				off[0] = le64_to_cpu(off64[-1]);
1502 				off[1] = le64_to_cpu(off64[0]);
1503 			}
1504 			break;
1505 		}
1506 	} while (++i < 2);
1507 
1508 	*vbo_data += off[0];
1509 	*ondisk_size = off[1] - off[0];
1510 
1511 out1:
1512 	folio_unlock(folio);
1513 out:
1514 	up_write(&ni->file.run_lock);
1515 	return err;
1516 }
1517 #endif
1518 
1519 /*
1520  * attr_is_frame_compressed - Used to detect compressed frame.
1521  *
1522  * attr - base (primary) attribute segment.
1523  * run  - run to use, usually == &ni->file.run.
1524  * Only base segments contains valid 'attr->nres.c_unit'
1525  */
1526 int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1527 			     CLST frame, CLST *clst_data, struct runs_tree *run)
1528 {
1529 	int err;
1530 	u32 clst_frame;
1531 	CLST clen, lcn, vcn, alen, slen, vcn_next;
1532 	size_t idx;
1533 
1534 	*clst_data = 0;
1535 
1536 	if (!is_attr_compressed(attr))
1537 		return 0;
1538 
1539 	if (!attr->non_res)
1540 		return 0;
1541 
1542 	clst_frame = 1u << attr->nres.c_unit;
1543 	vcn = frame * clst_frame;
1544 
1545 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1546 		err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1547 					 attr->name_len, run, vcn);
1548 		if (err)
1549 			return err;
1550 
1551 		if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1552 			return -EINVAL;
1553 	}
1554 
1555 	if (lcn == SPARSE_LCN) {
1556 		/* Sparsed frame. */
1557 		return 0;
1558 	}
1559 
1560 	if (clen >= clst_frame) {
1561 		/*
1562 		 * The frame is not compressed 'cause
1563 		 * it does not contain any sparse clusters.
1564 		 */
1565 		*clst_data = clst_frame;
1566 		return 0;
1567 	}
1568 
1569 	alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1570 	slen = 0;
1571 	*clst_data = clen;
1572 
1573 	/*
1574 	 * The frame is compressed if *clst_data + slen >= clst_frame.
1575 	 * Check next fragments.
1576 	 */
1577 	while ((vcn += clen) < alen) {
1578 		vcn_next = vcn;
1579 
1580 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1581 		    vcn_next != vcn) {
1582 			err = attr_load_runs_vcn(ni, attr->type,
1583 						 attr_name(attr),
1584 						 attr->name_len, run, vcn_next);
1585 			if (err)
1586 				return err;
1587 			vcn = vcn_next;
1588 
1589 			if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1590 				return -EINVAL;
1591 		}
1592 
1593 		if (lcn == SPARSE_LCN) {
1594 			slen += clen;
1595 		} else {
1596 			if (slen) {
1597 				/*
1598 				 * Data_clusters + sparse_clusters =
1599 				 * not enough for frame.
1600 				 */
1601 				return -EINVAL;
1602 			}
1603 			*clst_data += clen;
1604 		}
1605 
1606 		if (*clst_data + slen >= clst_frame) {
1607 			if (!slen) {
1608 				/*
1609 				 * There is no sparsed clusters in this frame
1610 				 * so it is not compressed.
1611 				 */
1612 				*clst_data = clst_frame;
1613 			} else {
1614 				/* Frame is compressed. */
1615 			}
1616 			break;
1617 		}
1618 	}
1619 
1620 	return 0;
1621 }
1622 
1623 /*
1624  * attr_allocate_frame - Allocate/free clusters for @frame.
1625  *
1626  * Assumed: down_write(&ni->file.run_lock);
1627  */
1628 int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1629 			u64 new_valid)
1630 {
1631 	int err = 0;
1632 	struct runs_tree *run = &ni->file.run;
1633 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1634 	struct ATTRIB *attr = NULL, *attr_b;
1635 	struct ATTR_LIST_ENTRY *le, *le_b;
1636 	struct mft_inode *mi, *mi_b;
1637 	CLST svcn, evcn1, next_svcn, len;
1638 	CLST vcn, end, clst_data;
1639 	u64 total_size, valid_size, data_size;
1640 
1641 	le_b = NULL;
1642 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1643 	if (!attr_b)
1644 		return -ENOENT;
1645 
1646 	if (!is_attr_ext(attr_b))
1647 		return -EINVAL;
1648 
1649 	vcn = frame << NTFS_LZNT_CUNIT;
1650 	total_size = le64_to_cpu(attr_b->nres.total_size);
1651 
1652 	svcn = le64_to_cpu(attr_b->nres.svcn);
1653 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1654 	data_size = le64_to_cpu(attr_b->nres.data_size);
1655 
1656 	if (svcn <= vcn && vcn < evcn1) {
1657 		attr = attr_b;
1658 		le = le_b;
1659 		mi = mi_b;
1660 	} else if (!le_b) {
1661 		err = -EINVAL;
1662 		goto out;
1663 	} else {
1664 		le = le_b;
1665 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1666 				    &mi);
1667 		if (!attr) {
1668 			err = -EINVAL;
1669 			goto out;
1670 		}
1671 		svcn = le64_to_cpu(attr->nres.svcn);
1672 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1673 	}
1674 
1675 	err = attr_load_runs(attr, ni, run, NULL);
1676 	if (err)
1677 		goto out;
1678 
1679 	err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data, run);
1680 	if (err)
1681 		goto out;
1682 
1683 	total_size -= (u64)clst_data << sbi->cluster_bits;
1684 
1685 	len = bytes_to_cluster(sbi, compr_size);
1686 
1687 	if (len == clst_data)
1688 		goto out;
1689 
1690 	if (len < clst_data) {
1691 		err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1692 					NULL, true);
1693 		if (err)
1694 			goto out;
1695 
1696 		if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1697 				   false)) {
1698 			err = -ENOMEM;
1699 			goto out;
1700 		}
1701 		end = vcn + clst_data;
1702 		/* Run contains updated range [vcn + len : end). */
1703 	} else {
1704 		CLST alen, hint = 0;
1705 		/* Get the last LCN to allocate from. */
1706 		if (vcn + clst_data &&
1707 		    !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1708 				      NULL)) {
1709 			hint = -1;
1710 		}
1711 
1712 		err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1713 					     hint + 1, len - clst_data, NULL,
1714 					     ALLOCATE_DEF, &alen, 0, NULL,
1715 					     NULL);
1716 		if (err)
1717 			goto out;
1718 
1719 		end = vcn + len;
1720 		/* Run contains updated range [vcn + clst_data : end). */
1721 	}
1722 
1723 	total_size += (u64)len << sbi->cluster_bits;
1724 
1725 repack:
1726 	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1727 	if (err)
1728 		goto out;
1729 
1730 	attr_b->nres.total_size = cpu_to_le64(total_size);
1731 	inode_set_bytes(&ni->vfs_inode, total_size);
1732 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1733 
1734 	mi_b->dirty = true;
1735 	mark_inode_dirty(&ni->vfs_inode);
1736 
1737 	/* Stored [vcn : next_svcn) from [vcn : end). */
1738 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1739 
1740 	if (end <= evcn1) {
1741 		if (next_svcn == evcn1) {
1742 			/* Normal way. Update attribute and exit. */
1743 			goto ok;
1744 		}
1745 		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1746 		if (!ni->attr_list.size) {
1747 			err = ni_create_attr_list(ni);
1748 			if (err)
1749 				goto out;
1750 			/* Layout of records is changed. */
1751 			le_b = NULL;
1752 			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1753 					      0, NULL, &mi_b);
1754 			if (!attr_b) {
1755 				err = -ENOENT;
1756 				goto out;
1757 			}
1758 
1759 			attr = attr_b;
1760 			le = le_b;
1761 			mi = mi_b;
1762 			goto repack;
1763 		}
1764 	}
1765 
1766 	svcn = evcn1;
1767 
1768 	/* Estimate next attribute. */
1769 	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1770 
1771 	if (attr) {
1772 		CLST alloc = bytes_to_cluster(
1773 			sbi, le64_to_cpu(attr_b->nres.alloc_size));
1774 		CLST evcn = le64_to_cpu(attr->nres.evcn);
1775 
1776 		if (end < next_svcn)
1777 			end = next_svcn;
1778 		while (end > evcn) {
1779 			/* Remove segment [svcn : evcn). */
1780 			mi_remove_attr(NULL, mi, attr);
1781 
1782 			if (!al_remove_le(ni, le)) {
1783 				err = -EINVAL;
1784 				goto out;
1785 			}
1786 
1787 			if (evcn + 1 >= alloc) {
1788 				/* Last attribute segment. */
1789 				evcn1 = evcn + 1;
1790 				goto ins_ext;
1791 			}
1792 
1793 			if (ni_load_mi(ni, le, &mi)) {
1794 				attr = NULL;
1795 				goto out;
1796 			}
1797 
1798 			attr = mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0,
1799 					    &le->id);
1800 			if (!attr) {
1801 				err = -EINVAL;
1802 				goto out;
1803 			}
1804 			svcn = le64_to_cpu(attr->nres.svcn);
1805 			evcn = le64_to_cpu(attr->nres.evcn);
1806 		}
1807 
1808 		if (end < svcn)
1809 			end = svcn;
1810 
1811 		err = attr_load_runs(attr, ni, run, &end);
1812 		if (err)
1813 			goto out;
1814 
1815 		evcn1 = evcn + 1;
1816 		attr->nres.svcn = cpu_to_le64(next_svcn);
1817 		err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1818 		if (err)
1819 			goto out;
1820 
1821 		le->vcn = cpu_to_le64(next_svcn);
1822 		ni->attr_list.dirty = true;
1823 		mi->dirty = true;
1824 
1825 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1826 	}
1827 ins_ext:
1828 	if (evcn1 > next_svcn) {
1829 		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1830 					    next_svcn, evcn1 - next_svcn,
1831 					    attr_b->flags, &attr, &mi, NULL);
1832 		if (err)
1833 			goto out;
1834 	}
1835 ok:
1836 	run_truncate_around(run, vcn);
1837 out:
1838 	if (attr_b) {
1839 		if (new_valid > data_size)
1840 			new_valid = data_size;
1841 
1842 		valid_size = le64_to_cpu(attr_b->nres.valid_size);
1843 		if (new_valid != valid_size) {
1844 			attr_b->nres.valid_size = cpu_to_le64(valid_size);
1845 			mi_b->dirty = true;
1846 		}
1847 	}
1848 
1849 	return err;
1850 }
1851 
1852 /*
1853  * attr_collapse_range - Collapse range in file.
1854  */
1855 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1856 {
1857 	int err = 0;
1858 	struct runs_tree *run = &ni->file.run;
1859 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1860 	struct ATTRIB *attr = NULL, *attr_b;
1861 	struct ATTR_LIST_ENTRY *le, *le_b;
1862 	struct mft_inode *mi, *mi_b;
1863 	CLST svcn, evcn1, len, dealloc, alen, done;
1864 	CLST vcn, end;
1865 	u64 valid_size, data_size, alloc_size, total_size;
1866 	u32 mask;
1867 	__le16 a_flags;
1868 
1869 	if (!bytes)
1870 		return 0;
1871 
1872 	le_b = NULL;
1873 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1874 	if (!attr_b)
1875 		return -ENOENT;
1876 
1877 	if (!attr_b->non_res) {
1878 		/* Attribute is resident. Nothing to do? */
1879 		return 0;
1880 	}
1881 
1882 	data_size = le64_to_cpu(attr_b->nres.data_size);
1883 	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1884 	a_flags = attr_b->flags;
1885 
1886 	if (is_attr_ext(attr_b)) {
1887 		total_size = le64_to_cpu(attr_b->nres.total_size);
1888 		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1889 	} else {
1890 		total_size = alloc_size;
1891 		mask = sbi->cluster_mask;
1892 	}
1893 
1894 	if ((vbo & mask) || (bytes & mask)) {
1895 		/* Allow to collapse only cluster aligned ranges. */
1896 		return -EINVAL;
1897 	}
1898 
1899 	if (vbo > data_size)
1900 		return -EINVAL;
1901 
1902 	down_write(&ni->file.run_lock);
1903 
1904 	if (vbo + bytes >= data_size) {
1905 		u64 new_valid = min(ni->i_valid, vbo);
1906 
1907 		/* Simple truncate file at 'vbo'. */
1908 		truncate_setsize(&ni->vfs_inode, vbo);
1909 		err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1910 				    &new_valid, true, NULL);
1911 
1912 		if (!err && new_valid < ni->i_valid)
1913 			ni->i_valid = new_valid;
1914 
1915 		goto out;
1916 	}
1917 
1918 	/*
1919 	 * Enumerate all attribute segments and collapse.
1920 	 */
1921 	alen = alloc_size >> sbi->cluster_bits;
1922 	vcn = vbo >> sbi->cluster_bits;
1923 	len = bytes >> sbi->cluster_bits;
1924 	end = vcn + len;
1925 	dealloc = 0;
1926 	done = 0;
1927 
1928 	svcn = le64_to_cpu(attr_b->nres.svcn);
1929 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1930 
1931 	if (svcn <= vcn && vcn < evcn1) {
1932 		attr = attr_b;
1933 		le = le_b;
1934 		mi = mi_b;
1935 		goto check_seg;
1936 	}
1937 
1938 	if (!le_b) {
1939 		err = -EINVAL;
1940 		goto out;
1941 	}
1942 
1943 	le = le_b;
1944 	attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, &mi);
1945 	if (!attr) {
1946 		err = -EINVAL;
1947 		goto out;
1948 	}
1949 
1950 	for (;;) {
1951 		CLST vcn1, eat, next_svcn;
1952 
1953 		svcn = le64_to_cpu(attr->nres.svcn);
1954 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1955 
1956 check_seg:
1957 		if (svcn >= end) {
1958 			/* Shift VCN- */
1959 			attr->nres.svcn = cpu_to_le64(svcn - len);
1960 			attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1961 			if (le) {
1962 				le->vcn = attr->nres.svcn;
1963 				ni->attr_list.dirty = true;
1964 			}
1965 			mi->dirty = true;
1966 			goto next_attr;
1967 		}
1968 
1969 		run_truncate(run, 0);
1970 		err = attr_load_runs(attr, ni, run, &svcn);
1971 		if (err)
1972 			goto out;
1973 
1974 		vcn1 = vcn + done; /* original vcn in attr/run. */
1975 		eat = min(end, evcn1) - vcn1;
1976 
1977 		err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc, true);
1978 		if (err)
1979 			goto out;
1980 
1981 		if (svcn + eat < evcn1) {
1982 			/* Collapse a part of this attribute segment. */
1983 
1984 			if (!run_collapse_range(run, vcn1, eat, done)) {
1985 				err = -ENOMEM;
1986 				goto out;
1987 			}
1988 
1989 			if (svcn >= vcn) {
1990 				/* Shift VCN */
1991 				attr->nres.svcn = cpu_to_le64(vcn);
1992 				if (le && attr->nres.svcn != le->vcn) {
1993 					le->vcn = attr->nres.svcn;
1994 					ni->attr_list.dirty = true;
1995 				}
1996 			}
1997 
1998 			err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1999 			if (err)
2000 				goto out;
2001 
2002 			next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2003 			if (next_svcn + eat + done < evcn1) {
2004 				err = ni_insert_nonresident(
2005 					ni, ATTR_DATA, NULL, 0, run, next_svcn,
2006 					evcn1 - eat - next_svcn, a_flags, &attr,
2007 					&mi, &le);
2008 				if (err)
2009 					goto out;
2010 
2011 				/* Layout of records maybe changed. */
2012 				attr_b = NULL;
2013 			}
2014 
2015 			/* Free all allocated memory. */
2016 			run_truncate(run, 0);
2017 			done += eat;
2018 		} else {
2019 			u16 le_sz;
2020 
2021 			/* Delete this attribute segment. */
2022 			mi_remove_attr(NULL, mi, attr);
2023 			if (!le)
2024 				break;
2025 
2026 			le_sz = le16_to_cpu(le->size);
2027 			if (!al_remove_le(ni, le)) {
2028 				err = -EINVAL;
2029 				goto out;
2030 			}
2031 
2032 			done += evcn1 - svcn;
2033 			if (evcn1 >= alen)
2034 				break;
2035 
2036 			if (!svcn) {
2037 				/* Load next record that contains this attribute. */
2038 				if (ni_load_mi(ni, le, &mi)) {
2039 					err = -EINVAL;
2040 					goto out;
2041 				}
2042 
2043 				/* Look for required attribute. */
2044 				attr = mi_find_attr(ni, mi, NULL, ATTR_DATA,
2045 						    NULL, 0, &le->id);
2046 				if (!attr) {
2047 					err = -EINVAL;
2048 					goto out;
2049 				}
2050 				continue;
2051 			}
2052 			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
2053 		}
2054 
2055 next_attr:
2056 		if (evcn1 >= alen)
2057 			break;
2058 
2059 		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2060 		if (!attr) {
2061 			err = -EINVAL;
2062 			goto out;
2063 		}
2064 	}
2065 
2066 	if (!attr_b) {
2067 		le_b = NULL;
2068 		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2069 				      &mi_b);
2070 		if (!attr_b) {
2071 			err = -ENOENT;
2072 			goto out;
2073 		}
2074 	}
2075 
2076 	data_size -= bytes;
2077 	valid_size = ni->i_valid;
2078 	if (vbo + bytes <= valid_size)
2079 		valid_size -= bytes;
2080 	else if (vbo < valid_size)
2081 		valid_size = vbo;
2082 
2083 	attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
2084 	attr_b->nres.data_size = cpu_to_le64(data_size);
2085 	attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
2086 	total_size -= (u64)dealloc << sbi->cluster_bits;
2087 	if (is_attr_ext(attr_b))
2088 		attr_b->nres.total_size = cpu_to_le64(total_size);
2089 	mi_b->dirty = true;
2090 
2091 	/* Update inode size. */
2092 	ni->i_valid = valid_size;
2093 	i_size_write(&ni->vfs_inode, data_size);
2094 	inode_set_bytes(&ni->vfs_inode, total_size);
2095 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2096 	mark_inode_dirty(&ni->vfs_inode);
2097 
2098 out:
2099 	up_write(&ni->file.run_lock);
2100 	if (err)
2101 		_ntfs_bad_inode(&ni->vfs_inode);
2102 
2103 	return err;
2104 }
2105 
2106 /*
2107  * attr_punch_hole
2108  *
2109  * Not for normal files.
2110  */
2111 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
2112 {
2113 	int err = 0;
2114 	struct runs_tree *run = &ni->file.run;
2115 	struct ntfs_sb_info *sbi = ni->mi.sbi;
2116 	struct ATTRIB *attr = NULL, *attr_b;
2117 	struct ATTR_LIST_ENTRY *le, *le_b;
2118 	struct mft_inode *mi, *mi_b;
2119 	CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
2120 	u64 total_size, alloc_size;
2121 	u32 mask;
2122 	__le16 a_flags;
2123 	struct runs_tree run2;
2124 
2125 	if (!bytes)
2126 		return 0;
2127 
2128 	le_b = NULL;
2129 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2130 	if (!attr_b)
2131 		return -ENOENT;
2132 
2133 	if (!attr_b->non_res) {
2134 		u32 data_size = le32_to_cpu(attr_b->res.data_size);
2135 		u32 from, to;
2136 
2137 		if (vbo > data_size)
2138 			return 0;
2139 
2140 		from = vbo;
2141 		to = min_t(u64, vbo + bytes, data_size);
2142 		memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
2143 		return 0;
2144 	}
2145 
2146 	if (!is_attr_ext(attr_b))
2147 		return -EOPNOTSUPP;
2148 
2149 	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2150 	total_size = le64_to_cpu(attr_b->nres.total_size);
2151 
2152 	if (vbo >= alloc_size) {
2153 		/* NOTE: It is allowed. */
2154 		return 0;
2155 	}
2156 
2157 	mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2158 
2159 	bytes += vbo;
2160 	if (bytes > alloc_size)
2161 		bytes = alloc_size;
2162 	bytes -= vbo;
2163 
2164 	if ((vbo & mask) || (bytes & mask)) {
2165 		/* We have to zero a range(s). */
2166 		if (frame_size == NULL) {
2167 			/* Caller insists range is aligned. */
2168 			return -EINVAL;
2169 		}
2170 		*frame_size = mask + 1;
2171 		return E_NTFS_NOTALIGNED;
2172 	}
2173 
2174 	down_write(&ni->file.run_lock);
2175 	run_init(&run2);
2176 	run_truncate(run, 0);
2177 
2178 	/*
2179 	 * Enumerate all attribute segments and punch hole where necessary.
2180 	 */
2181 	alen = alloc_size >> sbi->cluster_bits;
2182 	vcn = vbo >> sbi->cluster_bits;
2183 	len = bytes >> sbi->cluster_bits;
2184 	end = vcn + len;
2185 	hole = 0;
2186 
2187 	svcn = le64_to_cpu(attr_b->nres.svcn);
2188 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2189 	a_flags = attr_b->flags;
2190 
2191 	if (svcn <= vcn && vcn < evcn1) {
2192 		attr = attr_b;
2193 		le = le_b;
2194 		mi = mi_b;
2195 	} else if (!le_b) {
2196 		err = -EINVAL;
2197 		goto bad_inode;
2198 	} else {
2199 		le = le_b;
2200 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2201 				    &mi);
2202 		if (!attr) {
2203 			err = -EINVAL;
2204 			goto bad_inode;
2205 		}
2206 
2207 		svcn = le64_to_cpu(attr->nres.svcn);
2208 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2209 	}
2210 
2211 	while (svcn < end) {
2212 		CLST vcn1, zero, hole2 = hole;
2213 
2214 		err = attr_load_runs(attr, ni, run, &svcn);
2215 		if (err)
2216 			goto done;
2217 		vcn1 = max(vcn, svcn);
2218 		zero = min(end, evcn1) - vcn1;
2219 
2220 		/*
2221 		 * Check range [vcn1 + zero).
2222 		 * Calculate how many clusters there are.
2223 		 * Don't do any destructive actions.
2224 		 */
2225 		err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
2226 		if (err)
2227 			goto done;
2228 
2229 		/* Check if required range is already hole. */
2230 		if (hole2 == hole)
2231 			goto next_attr;
2232 
2233 		/* Make a clone of run to undo. */
2234 		err = run_clone(run, &run2);
2235 		if (err)
2236 			goto done;
2237 
2238 		/* Make a hole range (sparse) [vcn1 + zero). */
2239 		if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
2240 			err = -ENOMEM;
2241 			goto done;
2242 		}
2243 
2244 		/* Update run in attribute segment. */
2245 		err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2246 		if (err)
2247 			goto done;
2248 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2249 		if (next_svcn < evcn1) {
2250 			/* Insert new attribute segment. */
2251 			err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2252 						    next_svcn,
2253 						    evcn1 - next_svcn, a_flags,
2254 						    &attr, &mi, &le);
2255 			if (err)
2256 				goto undo_punch;
2257 
2258 			/* Layout of records maybe changed. */
2259 			attr_b = NULL;
2260 		}
2261 
2262 		/* Real deallocate. Should not fail. */
2263 		run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
2264 
2265 next_attr:
2266 		/* Free all allocated memory. */
2267 		run_truncate(run, 0);
2268 
2269 		if (evcn1 >= alen)
2270 			break;
2271 
2272 		/* Get next attribute segment. */
2273 		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2274 		if (!attr) {
2275 			err = -EINVAL;
2276 			goto bad_inode;
2277 		}
2278 
2279 		svcn = le64_to_cpu(attr->nres.svcn);
2280 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2281 	}
2282 
2283 done:
2284 	if (!hole)
2285 		goto out;
2286 
2287 	if (!attr_b) {
2288 		attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
2289 				      &mi_b);
2290 		if (!attr_b) {
2291 			err = -EINVAL;
2292 			goto bad_inode;
2293 		}
2294 	}
2295 
2296 	total_size -= (u64)hole << sbi->cluster_bits;
2297 	attr_b->nres.total_size = cpu_to_le64(total_size);
2298 	mi_b->dirty = true;
2299 
2300 	/* Update inode size. */
2301 	inode_set_bytes(&ni->vfs_inode, total_size);
2302 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2303 	mark_inode_dirty(&ni->vfs_inode);
2304 
2305 out:
2306 	run_close(&run2);
2307 	up_write(&ni->file.run_lock);
2308 	return err;
2309 
2310 bad_inode:
2311 	_ntfs_bad_inode(&ni->vfs_inode);
2312 	goto out;
2313 
2314 undo_punch:
2315 	/*
2316 	 * Restore packed runs.
2317 	 * 'mi_pack_runs' should not fail, cause we restore original.
2318 	 */
2319 	if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
2320 		goto bad_inode;
2321 
2322 	goto done;
2323 }
2324 
2325 /*
2326  * attr_insert_range - Insert range (hole) in file.
2327  * Not for normal files.
2328  */
2329 int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
2330 {
2331 	int err = 0;
2332 	struct runs_tree *run = &ni->file.run;
2333 	struct ntfs_sb_info *sbi = ni->mi.sbi;
2334 	struct ATTRIB *attr = NULL, *attr_b;
2335 	struct ATTR_LIST_ENTRY *le, *le_b;
2336 	struct mft_inode *mi, *mi_b;
2337 	CLST vcn, svcn, evcn1, len, next_svcn;
2338 	u64 data_size, alloc_size;
2339 	u32 mask;
2340 	__le16 a_flags;
2341 
2342 	if (!bytes)
2343 		return 0;
2344 
2345 	le_b = NULL;
2346 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2347 	if (!attr_b)
2348 		return -ENOENT;
2349 
2350 	if (!is_attr_ext(attr_b)) {
2351 		/* It was checked above. See fallocate. */
2352 		return -EOPNOTSUPP;
2353 	}
2354 
2355 	if (!attr_b->non_res) {
2356 		data_size = le32_to_cpu(attr_b->res.data_size);
2357 		alloc_size = data_size;
2358 		mask = sbi->cluster_mask; /* cluster_size - 1 */
2359 	} else {
2360 		data_size = le64_to_cpu(attr_b->nres.data_size);
2361 		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2362 		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2363 	}
2364 
2365 	if (vbo >= data_size) {
2366 		/*
2367 		 * Insert range after the file size is not allowed.
2368 		 * If the offset is equal to or greater than the end of
2369 		 * file, an error is returned.  For such operations (i.e., inserting
2370 		 * a hole at the end of file), ftruncate(2) should be used.
2371 		 */
2372 		return -EINVAL;
2373 	}
2374 
2375 	if ((vbo & mask) || (bytes & mask)) {
2376 		/* Allow to insert only frame aligned ranges. */
2377 		return -EINVAL;
2378 	}
2379 
2380 	/*
2381 	 * valid_size <= data_size <= alloc_size
2382 	 * Check alloc_size for maximum possible.
2383 	 */
2384 	if (bytes > sbi->maxbytes_sparse - alloc_size)
2385 		return -EFBIG;
2386 
2387 	vcn = vbo >> sbi->cluster_bits;
2388 	len = bytes >> sbi->cluster_bits;
2389 
2390 	down_write(&ni->file.run_lock);
2391 
2392 	if (!attr_b->non_res) {
2393 		err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
2394 				    data_size + bytes, NULL, false, NULL);
2395 
2396 		le_b = NULL;
2397 		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2398 				      &mi_b);
2399 		if (!attr_b) {
2400 			err = -EINVAL;
2401 			goto bad_inode;
2402 		}
2403 
2404 		if (err)
2405 			goto out;
2406 
2407 		if (!attr_b->non_res) {
2408 			/* Still resident. */
2409 			char *data = Add2Ptr(attr_b,
2410 					     le16_to_cpu(attr_b->res.data_off));
2411 
2412 			memmove(data + bytes, data, bytes);
2413 			memset(data, 0, bytes);
2414 			goto done;
2415 		}
2416 
2417 		/* Resident files becomes nonresident. */
2418 		data_size = le64_to_cpu(attr_b->nres.data_size);
2419 		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2420 	}
2421 
2422 	/*
2423 	 * Enumerate all attribute segments and shift start vcn.
2424 	 */
2425 	a_flags = attr_b->flags;
2426 	svcn = le64_to_cpu(attr_b->nres.svcn);
2427 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2428 
2429 	if (svcn <= vcn && vcn < evcn1) {
2430 		attr = attr_b;
2431 		le = le_b;
2432 		mi = mi_b;
2433 	} else if (!le_b) {
2434 		err = -EINVAL;
2435 		goto bad_inode;
2436 	} else {
2437 		le = le_b;
2438 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2439 				    &mi);
2440 		if (!attr) {
2441 			err = -EINVAL;
2442 			goto bad_inode;
2443 		}
2444 
2445 		svcn = le64_to_cpu(attr->nres.svcn);
2446 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2447 	}
2448 
2449 	run_truncate(run, 0); /* clear cached values. */
2450 	err = attr_load_runs(attr, ni, run, NULL);
2451 	if (err)
2452 		goto out;
2453 
2454 	if (!run_insert_range(run, vcn, len)) {
2455 		err = -ENOMEM;
2456 		goto out;
2457 	}
2458 
2459 	/* Try to pack in current record as much as possible. */
2460 	err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
2461 	if (err)
2462 		goto out;
2463 
2464 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2465 
2466 	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2467 	       attr->type == ATTR_DATA && !attr->name_len) {
2468 		le64_add_cpu(&attr->nres.svcn, len);
2469 		le64_add_cpu(&attr->nres.evcn, len);
2470 		if (le) {
2471 			le->vcn = attr->nres.svcn;
2472 			ni->attr_list.dirty = true;
2473 		}
2474 		mi->dirty = true;
2475 	}
2476 
2477 	if (next_svcn < evcn1 + len) {
2478 		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2479 					    next_svcn, evcn1 + len - next_svcn,
2480 					    a_flags, NULL, NULL, NULL);
2481 
2482 		le_b = NULL;
2483 		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2484 				      &mi_b);
2485 		if (!attr_b) {
2486 			err = -EINVAL;
2487 			goto bad_inode;
2488 		}
2489 
2490 		if (err) {
2491 			/* ni_insert_nonresident failed. Try to undo. */
2492 			goto undo_insert_range;
2493 		}
2494 	}
2495 
2496 	/*
2497 	 * Update primary attribute segment.
2498 	 */
2499 	if (vbo <= ni->i_valid)
2500 		ni->i_valid += bytes;
2501 
2502 	attr_b->nres.data_size = cpu_to_le64(data_size + bytes);
2503 	attr_b->nres.alloc_size = cpu_to_le64(alloc_size + bytes);
2504 
2505 	/* ni->valid may be not equal valid_size (temporary). */
2506 	if (ni->i_valid > data_size + bytes)
2507 		attr_b->nres.valid_size = attr_b->nres.data_size;
2508 	else
2509 		attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
2510 	mi_b->dirty = true;
2511 
2512 done:
2513 	i_size_write(&ni->vfs_inode, ni->vfs_inode.i_size + bytes);
2514 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2515 	mark_inode_dirty(&ni->vfs_inode);
2516 
2517 out:
2518 	run_truncate(run, 0); /* clear cached values. */
2519 
2520 	up_write(&ni->file.run_lock);
2521 
2522 	return err;
2523 
2524 bad_inode:
2525 	_ntfs_bad_inode(&ni->vfs_inode);
2526 	goto out;
2527 
2528 undo_insert_range:
2529 	svcn = le64_to_cpu(attr_b->nres.svcn);
2530 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2531 
2532 	if (svcn <= vcn && vcn < evcn1) {
2533 		attr = attr_b;
2534 		le = le_b;
2535 		mi = mi_b;
2536 	} else if (!le_b) {
2537 		goto bad_inode;
2538 	} else {
2539 		le = le_b;
2540 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2541 				    &mi);
2542 		if (!attr) {
2543 			goto bad_inode;
2544 		}
2545 
2546 		svcn = le64_to_cpu(attr->nres.svcn);
2547 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2548 	}
2549 
2550 	if (attr_load_runs(attr, ni, run, NULL))
2551 		goto bad_inode;
2552 
2553 	if (!run_collapse_range(run, vcn, len, 0))
2554 		goto bad_inode;
2555 
2556 	if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
2557 		goto bad_inode;
2558 
2559 	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2560 	       attr->type == ATTR_DATA && !attr->name_len) {
2561 		le64_sub_cpu(&attr->nres.svcn, len);
2562 		le64_sub_cpu(&attr->nres.evcn, len);
2563 		if (le) {
2564 			le->vcn = attr->nres.svcn;
2565 			ni->attr_list.dirty = true;
2566 		}
2567 		mi->dirty = true;
2568 	}
2569 
2570 	goto out;
2571 }
2572 
2573 /*
2574  * attr_force_nonresident
2575  *
2576  * Convert default data attribute into non resident form.
2577  */
2578 int attr_force_nonresident(struct ntfs_inode *ni)
2579 {
2580 	int err;
2581 	struct ATTRIB *attr;
2582 	struct ATTR_LIST_ENTRY *le = NULL;
2583 	struct mft_inode *mi;
2584 
2585 	attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, &mi);
2586 	if (!attr) {
2587 		_ntfs_bad_inode(&ni->vfs_inode);
2588 		return -ENOENT;
2589 	}
2590 
2591 	if (attr->non_res) {
2592 		/* Already non resident. */
2593 		return 0;
2594 	}
2595 
2596 	down_write(&ni->file.run_lock);
2597 	err = attr_make_nonresident(ni, attr, le, mi,
2598 				    le32_to_cpu(attr->res.data_size),
2599 				    &ni->file.run, &attr, NULL);
2600 	up_write(&ni->file.run_lock);
2601 
2602 	return err;
2603 }
2604