xref: /linux/fs/ntfs3/attrib.c (revision 43b46e6bc69c2aa4331cfd7fa4e2943a894339e5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
7  */
8 
9 #include <linux/fs.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 
13 #include "debug.h"
14 #include "ntfs.h"
15 #include "ntfs_fs.h"
16 
17 /*
18  * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
19  * preallocate algorithm.
20  */
21 #ifndef NTFS_MIN_LOG2_OF_CLUMP
22 #define NTFS_MIN_LOG2_OF_CLUMP 16
23 #endif
24 
25 #ifndef NTFS_MAX_LOG2_OF_CLUMP
26 #define NTFS_MAX_LOG2_OF_CLUMP 26
27 #endif
28 
29 // 16M
30 #define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
31 // 16G
32 #define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
33 
34 static inline u64 get_pre_allocated(u64 size)
35 {
36 	u32 clump;
37 	u8 align_shift;
38 	u64 ret;
39 
40 	if (size <= NTFS_CLUMP_MIN) {
41 		clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
42 		align_shift = NTFS_MIN_LOG2_OF_CLUMP;
43 	} else if (size >= NTFS_CLUMP_MAX) {
44 		clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
45 		align_shift = NTFS_MAX_LOG2_OF_CLUMP;
46 	} else {
47 		align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
48 			      __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
49 		clump = 1u << align_shift;
50 	}
51 
52 	ret = (((size + clump - 1) >> align_shift)) << align_shift;
53 
54 	return ret;
55 }
56 
57 /*
58  * attr_must_be_resident
59  *
60  * Return: True if attribute must be resident.
61  */
62 static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
63 					 enum ATTR_TYPE type)
64 {
65 	const struct ATTR_DEF_ENTRY *de;
66 
67 	switch (type) {
68 	case ATTR_STD:
69 	case ATTR_NAME:
70 	case ATTR_ID:
71 	case ATTR_LABEL:
72 	case ATTR_VOL_INFO:
73 	case ATTR_ROOT:
74 	case ATTR_EA_INFO:
75 		return true;
76 	default:
77 		de = ntfs_query_def(sbi, type);
78 		if (de && (de->flags & NTFS_ATTR_MUST_BE_RESIDENT))
79 			return true;
80 		return false;
81 	}
82 }
83 
84 /*
85  * attr_load_runs - Load all runs stored in @attr.
86  */
87 static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
88 			  struct runs_tree *run, const CLST *vcn)
89 {
90 	int err;
91 	CLST svcn = le64_to_cpu(attr->nres.svcn);
92 	CLST evcn = le64_to_cpu(attr->nres.evcn);
93 	u32 asize;
94 	u16 run_off;
95 
96 	if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
97 		return 0;
98 
99 	if (vcn && (evcn < *vcn || *vcn < svcn))
100 		return -EINVAL;
101 
102 	asize = le32_to_cpu(attr->size);
103 	run_off = le16_to_cpu(attr->nres.run_off);
104 	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
105 			    vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
106 			    asize - run_off);
107 	if (err < 0)
108 		return err;
109 
110 	return 0;
111 }
112 
113 /*
114  * run_deallocate_ex - Deallocate clusters.
115  */
116 static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
117 			     CLST vcn, CLST len, CLST *done, bool trim)
118 {
119 	int err = 0;
120 	CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
121 	size_t idx;
122 
123 	if (!len)
124 		goto out;
125 
126 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
127 failed:
128 		run_truncate(run, vcn0);
129 		err = -EINVAL;
130 		goto out;
131 	}
132 
133 	for (;;) {
134 		if (clen > len)
135 			clen = len;
136 
137 		if (!clen) {
138 			err = -EINVAL;
139 			goto out;
140 		}
141 
142 		if (lcn != SPARSE_LCN) {
143 			if (sbi) {
144 				/* mark bitmap range [lcn + clen) as free and trim clusters. */
145 				mark_as_free_ex(sbi, lcn, clen, trim);
146 			}
147 			dn += clen;
148 		}
149 
150 		len -= clen;
151 		if (!len)
152 			break;
153 
154 		vcn_next = vcn + clen;
155 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
156 		    vcn != vcn_next) {
157 			/* Save memory - don't load entire run. */
158 			goto failed;
159 		}
160 	}
161 
162 out:
163 	if (done)
164 		*done += dn;
165 
166 	return err;
167 }
168 
169 /*
170  * attr_allocate_clusters - Find free space, mark it as used and store in @run.
171  */
172 int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
173 			   CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
174 			   enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
175 			   CLST *new_lcn)
176 {
177 	int err;
178 	CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
179 	size_t cnt = run->count;
180 
181 	for (;;) {
182 		err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
183 					       opt);
184 
185 		if (err == -ENOSPC && pre) {
186 			pre = 0;
187 			if (*pre_alloc)
188 				*pre_alloc = 0;
189 			continue;
190 		}
191 
192 		if (err)
193 			goto out;
194 
195 		if (new_lcn && vcn == vcn0)
196 			*new_lcn = lcn;
197 
198 		/* Add new fragment into run storage. */
199 		if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
200 			/* Undo last 'ntfs_look_for_free_space' */
201 			mark_as_free_ex(sbi, lcn, len, false);
202 			err = -ENOMEM;
203 			goto out;
204 		}
205 
206 		vcn += flen;
207 
208 		if (flen >= len || opt == ALLOCATE_MFT ||
209 		    (fr && run->count - cnt >= fr)) {
210 			*alen = vcn - vcn0;
211 			return 0;
212 		}
213 
214 		len -= flen;
215 	}
216 
217 out:
218 	/* Undo 'ntfs_look_for_free_space' */
219 	if (vcn - vcn0) {
220 		run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
221 		run_truncate(run, vcn0);
222 	}
223 
224 	return err;
225 }
226 
227 /*
228  * attr_make_nonresident
229  *
230  * If page is not NULL - it is already contains resident data
231  * and locked (called from ni_write_frame()).
232  */
233 int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
234 			  struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
235 			  u64 new_size, struct runs_tree *run,
236 			  struct ATTRIB **ins_attr, struct page *page)
237 {
238 	struct ntfs_sb_info *sbi;
239 	struct ATTRIB *attr_s;
240 	struct MFT_REC *rec;
241 	u32 used, asize, rsize, aoff, align;
242 	bool is_data;
243 	CLST len, alen;
244 	char *next;
245 	int err;
246 
247 	if (attr->non_res) {
248 		*ins_attr = attr;
249 		return 0;
250 	}
251 
252 	sbi = mi->sbi;
253 	rec = mi->mrec;
254 	attr_s = NULL;
255 	used = le32_to_cpu(rec->used);
256 	asize = le32_to_cpu(attr->size);
257 	next = Add2Ptr(attr, asize);
258 	aoff = PtrOffset(rec, attr);
259 	rsize = le32_to_cpu(attr->res.data_size);
260 	is_data = attr->type == ATTR_DATA && !attr->name_len;
261 
262 	align = sbi->cluster_size;
263 	if (is_attr_compressed(attr))
264 		align <<= COMPRESSION_UNIT;
265 	len = (rsize + align - 1) >> sbi->cluster_bits;
266 
267 	run_init(run);
268 
269 	/* Make a copy of original attribute. */
270 	attr_s = kmemdup(attr, asize, GFP_NOFS);
271 	if (!attr_s) {
272 		err = -ENOMEM;
273 		goto out;
274 	}
275 
276 	if (!len) {
277 		/* Empty resident -> Empty nonresident. */
278 		alen = 0;
279 	} else {
280 		const char *data = resident_data(attr);
281 
282 		err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
283 					     ALLOCATE_DEF, &alen, 0, NULL);
284 		if (err)
285 			goto out1;
286 
287 		if (!rsize) {
288 			/* Empty resident -> Non empty nonresident. */
289 		} else if (!is_data) {
290 			err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
291 			if (err)
292 				goto out2;
293 		} else if (!page) {
294 			char *kaddr;
295 
296 			page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
297 			if (!page) {
298 				err = -ENOMEM;
299 				goto out2;
300 			}
301 			kaddr = kmap_atomic(page);
302 			memcpy(kaddr, data, rsize);
303 			memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
304 			kunmap_atomic(kaddr);
305 			flush_dcache_page(page);
306 			SetPageUptodate(page);
307 			set_page_dirty(page);
308 			unlock_page(page);
309 			put_page(page);
310 		}
311 	}
312 
313 	/* Remove original attribute. */
314 	used -= asize;
315 	memmove(attr, Add2Ptr(attr, asize), used - aoff);
316 	rec->used = cpu_to_le32(used);
317 	mi->dirty = true;
318 	if (le)
319 		al_remove_le(ni, le);
320 
321 	err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
322 				    attr_s->name_len, run, 0, alen,
323 				    attr_s->flags, &attr, NULL, NULL);
324 	if (err)
325 		goto out3;
326 
327 	kfree(attr_s);
328 	attr->nres.data_size = cpu_to_le64(rsize);
329 	attr->nres.valid_size = attr->nres.data_size;
330 
331 	*ins_attr = attr;
332 
333 	if (is_data)
334 		ni->ni_flags &= ~NI_FLAG_RESIDENT;
335 
336 	/* Resident attribute becomes non resident. */
337 	return 0;
338 
339 out3:
340 	attr = Add2Ptr(rec, aoff);
341 	memmove(next, attr, used - aoff);
342 	memcpy(attr, attr_s, asize);
343 	rec->used = cpu_to_le32(used + asize);
344 	mi->dirty = true;
345 out2:
346 	/* Undo: do not trim new allocated clusters. */
347 	run_deallocate(sbi, run, false);
348 	run_close(run);
349 out1:
350 	kfree(attr_s);
351 out:
352 	return err;
353 }
354 
355 /*
356  * attr_set_size_res - Helper for attr_set_size().
357  */
358 static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
359 			     struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
360 			     u64 new_size, struct runs_tree *run,
361 			     struct ATTRIB **ins_attr)
362 {
363 	struct ntfs_sb_info *sbi = mi->sbi;
364 	struct MFT_REC *rec = mi->mrec;
365 	u32 used = le32_to_cpu(rec->used);
366 	u32 asize = le32_to_cpu(attr->size);
367 	u32 aoff = PtrOffset(rec, attr);
368 	u32 rsize = le32_to_cpu(attr->res.data_size);
369 	u32 tail = used - aoff - asize;
370 	char *next = Add2Ptr(attr, asize);
371 	s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
372 
373 	if (dsize < 0) {
374 		memmove(next + dsize, next, tail);
375 	} else if (dsize > 0) {
376 		if (used + dsize > sbi->max_bytes_per_attr)
377 			return attr_make_nonresident(ni, attr, le, mi, new_size,
378 						     run, ins_attr, NULL);
379 
380 		memmove(next + dsize, next, tail);
381 		memset(next, 0, dsize);
382 	}
383 
384 	if (new_size > rsize)
385 		memset(Add2Ptr(resident_data(attr), rsize), 0,
386 		       new_size - rsize);
387 
388 	rec->used = cpu_to_le32(used + dsize);
389 	attr->size = cpu_to_le32(asize + dsize);
390 	attr->res.data_size = cpu_to_le32(new_size);
391 	mi->dirty = true;
392 	*ins_attr = attr;
393 
394 	return 0;
395 }
396 
397 /*
398  * attr_set_size - Change the size of attribute.
399  *
400  * Extend:
401  *   - Sparse/compressed: No allocated clusters.
402  *   - Normal: Append allocated and preallocated new clusters.
403  * Shrink:
404  *   - No deallocate if @keep_prealloc is set.
405  */
406 int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
407 		  const __le16 *name, u8 name_len, struct runs_tree *run,
408 		  u64 new_size, const u64 *new_valid, bool keep_prealloc,
409 		  struct ATTRIB **ret)
410 {
411 	int err = 0;
412 	struct ntfs_sb_info *sbi = ni->mi.sbi;
413 	u8 cluster_bits = sbi->cluster_bits;
414 	bool is_mft =
415 		ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len;
416 	u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
417 	struct ATTRIB *attr = NULL, *attr_b;
418 	struct ATTR_LIST_ENTRY *le, *le_b;
419 	struct mft_inode *mi, *mi_b;
420 	CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
421 	CLST next_svcn, pre_alloc = -1, done = 0;
422 	bool is_ext, is_bad = false;
423 	u32 align;
424 	struct MFT_REC *rec;
425 
426 again:
427 	alen = 0;
428 	le_b = NULL;
429 	attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
430 			      &mi_b);
431 	if (!attr_b) {
432 		err = -ENOENT;
433 		goto bad_inode;
434 	}
435 
436 	if (!attr_b->non_res) {
437 		err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
438 					&attr_b);
439 		if (err)
440 			return err;
441 
442 		/* Return if file is still resident. */
443 		if (!attr_b->non_res)
444 			goto ok1;
445 
446 		/* Layout of records may be changed, so do a full search. */
447 		goto again;
448 	}
449 
450 	is_ext = is_attr_ext(attr_b);
451 	align = sbi->cluster_size;
452 	if (is_ext)
453 		align <<= attr_b->nres.c_unit;
454 
455 	old_valid = le64_to_cpu(attr_b->nres.valid_size);
456 	old_size = le64_to_cpu(attr_b->nres.data_size);
457 	old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
458 
459 again_1:
460 	old_alen = old_alloc >> cluster_bits;
461 
462 	new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
463 	new_alen = new_alloc >> cluster_bits;
464 
465 	if (keep_prealloc && new_size < old_size) {
466 		attr_b->nres.data_size = cpu_to_le64(new_size);
467 		mi_b->dirty = true;
468 		goto ok;
469 	}
470 
471 	vcn = old_alen - 1;
472 
473 	svcn = le64_to_cpu(attr_b->nres.svcn);
474 	evcn = le64_to_cpu(attr_b->nres.evcn);
475 
476 	if (svcn <= vcn && vcn <= evcn) {
477 		attr = attr_b;
478 		le = le_b;
479 		mi = mi_b;
480 	} else if (!le_b) {
481 		err = -EINVAL;
482 		goto bad_inode;
483 	} else {
484 		le = le_b;
485 		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
486 				    &mi);
487 		if (!attr) {
488 			err = -EINVAL;
489 			goto bad_inode;
490 		}
491 
492 next_le_1:
493 		svcn = le64_to_cpu(attr->nres.svcn);
494 		evcn = le64_to_cpu(attr->nres.evcn);
495 	}
496 	/*
497 	 * Here we have:
498 	 * attr,mi,le - last attribute segment (containing 'vcn').
499 	 * attr_b,mi_b,le_b - base (primary) attribute segment.
500 	 */
501 next_le:
502 	rec = mi->mrec;
503 	err = attr_load_runs(attr, ni, run, NULL);
504 	if (err)
505 		goto out;
506 
507 	if (new_size > old_size) {
508 		CLST to_allocate;
509 		size_t free;
510 
511 		if (new_alloc <= old_alloc) {
512 			attr_b->nres.data_size = cpu_to_le64(new_size);
513 			mi_b->dirty = true;
514 			goto ok;
515 		}
516 
517 		/*
518 		 * Add clusters. In simple case we have to:
519 		 *  - allocate space (vcn, lcn, len)
520 		 *  - update packed run in 'mi'
521 		 *  - update attr->nres.evcn
522 		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
523 		 */
524 		to_allocate = new_alen - old_alen;
525 add_alloc_in_same_attr_seg:
526 		lcn = 0;
527 		if (is_mft) {
528 			/* MFT allocates clusters from MFT zone. */
529 			pre_alloc = 0;
530 		} else if (is_ext) {
531 			/* No preallocate for sparse/compress. */
532 			pre_alloc = 0;
533 		} else if (pre_alloc == -1) {
534 			pre_alloc = 0;
535 			if (type == ATTR_DATA && !name_len &&
536 			    sbi->options->prealloc) {
537 				pre_alloc =
538 					bytes_to_cluster(
539 						sbi,
540 						get_pre_allocated(new_size)) -
541 					new_alen;
542 			}
543 
544 			/* Get the last LCN to allocate from. */
545 			if (old_alen &&
546 			    !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
547 				lcn = SPARSE_LCN;
548 			}
549 
550 			if (lcn == SPARSE_LCN)
551 				lcn = 0;
552 			else if (lcn)
553 				lcn += 1;
554 
555 			free = wnd_zeroes(&sbi->used.bitmap);
556 			if (to_allocate > free) {
557 				err = -ENOSPC;
558 				goto out;
559 			}
560 
561 			if (pre_alloc && to_allocate + pre_alloc > free)
562 				pre_alloc = 0;
563 		}
564 
565 		vcn = old_alen;
566 
567 		if (is_ext) {
568 			if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
569 					   false)) {
570 				err = -ENOMEM;
571 				goto out;
572 			}
573 			alen = to_allocate;
574 		} else {
575 			/* ~3 bytes per fragment. */
576 			err = attr_allocate_clusters(
577 				sbi, run, vcn, lcn, to_allocate, &pre_alloc,
578 				is_mft ? ALLOCATE_MFT : 0, &alen,
579 				is_mft ? 0
580 				       : (sbi->record_size -
581 					  le32_to_cpu(rec->used) + 8) /
582 							 3 +
583 						 1,
584 				NULL);
585 			if (err)
586 				goto out;
587 		}
588 
589 		done += alen;
590 		vcn += alen;
591 		if (to_allocate > alen)
592 			to_allocate -= alen;
593 		else
594 			to_allocate = 0;
595 
596 pack_runs:
597 		err = mi_pack_runs(mi, attr, run, vcn - svcn);
598 		if (err)
599 			goto undo_1;
600 
601 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
602 		new_alloc_tmp = (u64)next_svcn << cluster_bits;
603 		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
604 		mi_b->dirty = true;
605 
606 		if (next_svcn >= vcn && !to_allocate) {
607 			/* Normal way. Update attribute and exit. */
608 			attr_b->nres.data_size = cpu_to_le64(new_size);
609 			goto ok;
610 		}
611 
612 		/* At least two MFT to avoid recursive loop. */
613 		if (is_mft && next_svcn == vcn &&
614 		    ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
615 			new_size = new_alloc_tmp;
616 			attr_b->nres.data_size = attr_b->nres.alloc_size;
617 			goto ok;
618 		}
619 
620 		if (le32_to_cpu(rec->used) < sbi->record_size) {
621 			old_alen = next_svcn;
622 			evcn = old_alen - 1;
623 			goto add_alloc_in_same_attr_seg;
624 		}
625 
626 		attr_b->nres.data_size = attr_b->nres.alloc_size;
627 		if (new_alloc_tmp < old_valid)
628 			attr_b->nres.valid_size = attr_b->nres.data_size;
629 
630 		if (type == ATTR_LIST) {
631 			err = ni_expand_list(ni);
632 			if (err)
633 				goto undo_2;
634 			if (next_svcn < vcn)
635 				goto pack_runs;
636 
637 			/* Layout of records is changed. */
638 			goto again;
639 		}
640 
641 		if (!ni->attr_list.size) {
642 			err = ni_create_attr_list(ni);
643 			/* In case of error layout of records is not changed. */
644 			if (err)
645 				goto undo_2;
646 			/* Layout of records is changed. */
647 		}
648 
649 		if (next_svcn >= vcn) {
650 			/* This is MFT data, repeat. */
651 			goto again;
652 		}
653 
654 		/* Insert new attribute segment. */
655 		err = ni_insert_nonresident(ni, type, name, name_len, run,
656 					    next_svcn, vcn - next_svcn,
657 					    attr_b->flags, &attr, &mi, NULL);
658 
659 		/*
660 		 * Layout of records maybe changed.
661 		 * Find base attribute to update.
662 		 */
663 		le_b = NULL;
664 		attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
665 				      NULL, &mi_b);
666 		if (!attr_b) {
667 			err = -EINVAL;
668 			goto bad_inode;
669 		}
670 
671 		if (err) {
672 			/* ni_insert_nonresident failed. */
673 			attr = NULL;
674 			goto undo_2;
675 		}
676 
677 		if (!is_mft)
678 			run_truncate_head(run, evcn + 1);
679 
680 		svcn = le64_to_cpu(attr->nres.svcn);
681 		evcn = le64_to_cpu(attr->nres.evcn);
682 
683 		/*
684 		 * Attribute is in consistency state.
685 		 * Save this point to restore to if next steps fail.
686 		 */
687 		old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
688 		attr_b->nres.valid_size = attr_b->nres.data_size =
689 			attr_b->nres.alloc_size = cpu_to_le64(old_size);
690 		mi_b->dirty = true;
691 		goto again_1;
692 	}
693 
694 	if (new_size != old_size ||
695 	    (new_alloc != old_alloc && !keep_prealloc)) {
696 		/*
697 		 * Truncate clusters. In simple case we have to:
698 		 *  - update packed run in 'mi'
699 		 *  - update attr->nres.evcn
700 		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
701 		 *  - mark and trim clusters as free (vcn, lcn, len)
702 		 */
703 		CLST dlen = 0;
704 
705 		vcn = max(svcn, new_alen);
706 		new_alloc_tmp = (u64)vcn << cluster_bits;
707 
708 		if (vcn > svcn) {
709 			err = mi_pack_runs(mi, attr, run, vcn - svcn);
710 			if (err)
711 				goto out;
712 		} else if (le && le->vcn) {
713 			u16 le_sz = le16_to_cpu(le->size);
714 
715 			/*
716 			 * NOTE: List entries for one attribute are always
717 			 * the same size. We deal with last entry (vcn==0)
718 			 * and it is not first in entries array
719 			 * (list entry for std attribute always first).
720 			 * So it is safe to step back.
721 			 */
722 			mi_remove_attr(NULL, mi, attr);
723 
724 			if (!al_remove_le(ni, le)) {
725 				err = -EINVAL;
726 				goto bad_inode;
727 			}
728 
729 			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
730 		} else {
731 			attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
732 			mi->dirty = true;
733 		}
734 
735 		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
736 
737 		if (vcn == new_alen) {
738 			attr_b->nres.data_size = cpu_to_le64(new_size);
739 			if (new_size < old_valid)
740 				attr_b->nres.valid_size =
741 					attr_b->nres.data_size;
742 		} else {
743 			if (new_alloc_tmp <=
744 			    le64_to_cpu(attr_b->nres.data_size))
745 				attr_b->nres.data_size =
746 					attr_b->nres.alloc_size;
747 			if (new_alloc_tmp <
748 			    le64_to_cpu(attr_b->nres.valid_size))
749 				attr_b->nres.valid_size =
750 					attr_b->nres.alloc_size;
751 		}
752 		mi_b->dirty = true;
753 
754 		err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
755 					true);
756 		if (err)
757 			goto out;
758 
759 		if (is_ext) {
760 			/* dlen - really deallocated clusters. */
761 			le64_sub_cpu(&attr_b->nres.total_size,
762 				     ((u64)dlen << cluster_bits));
763 		}
764 
765 		run_truncate(run, vcn);
766 
767 		if (new_alloc_tmp <= new_alloc)
768 			goto ok;
769 
770 		old_size = new_alloc_tmp;
771 		vcn = svcn - 1;
772 
773 		if (le == le_b) {
774 			attr = attr_b;
775 			mi = mi_b;
776 			evcn = svcn - 1;
777 			svcn = 0;
778 			goto next_le;
779 		}
780 
781 		if (le->type != type || le->name_len != name_len ||
782 		    memcmp(le_name(le), name, name_len * sizeof(short))) {
783 			err = -EINVAL;
784 			goto bad_inode;
785 		}
786 
787 		err = ni_load_mi(ni, le, &mi);
788 		if (err)
789 			goto out;
790 
791 		attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
792 		if (!attr) {
793 			err = -EINVAL;
794 			goto bad_inode;
795 		}
796 		goto next_le_1;
797 	}
798 
799 ok:
800 	if (new_valid) {
801 		__le64 valid = cpu_to_le64(min(*new_valid, new_size));
802 
803 		if (attr_b->nres.valid_size != valid) {
804 			attr_b->nres.valid_size = valid;
805 			mi_b->dirty = true;
806 		}
807 	}
808 
809 ok1:
810 	if (ret)
811 		*ret = attr_b;
812 
813 	/* Update inode_set_bytes. */
814 	if (((type == ATTR_DATA && !name_len) ||
815 	     (type == ATTR_ALLOC && name == I30_NAME))) {
816 		bool dirty = false;
817 
818 		if (ni->vfs_inode.i_size != new_size) {
819 			ni->vfs_inode.i_size = new_size;
820 			dirty = true;
821 		}
822 
823 		if (attr_b->non_res) {
824 			new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
825 			if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
826 				inode_set_bytes(&ni->vfs_inode, new_alloc);
827 				dirty = true;
828 			}
829 		}
830 
831 		if (dirty) {
832 			ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
833 			mark_inode_dirty(&ni->vfs_inode);
834 		}
835 	}
836 
837 	return 0;
838 
839 undo_2:
840 	vcn -= alen;
841 	attr_b->nres.data_size = cpu_to_le64(old_size);
842 	attr_b->nres.valid_size = cpu_to_le64(old_valid);
843 	attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
844 
845 	/* Restore 'attr' and 'mi'. */
846 	if (attr)
847 		goto restore_run;
848 
849 	if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
850 	    svcn <= le64_to_cpu(attr_b->nres.evcn)) {
851 		attr = attr_b;
852 		le = le_b;
853 		mi = mi_b;
854 	} else if (!le_b) {
855 		err = -EINVAL;
856 		goto bad_inode;
857 	} else {
858 		le = le_b;
859 		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
860 				    &svcn, &mi);
861 		if (!attr)
862 			goto bad_inode;
863 	}
864 
865 restore_run:
866 	if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
867 		is_bad = true;
868 
869 undo_1:
870 	run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
871 
872 	run_truncate(run, vcn);
873 out:
874 	if (is_bad) {
875 bad_inode:
876 		_ntfs_bad_inode(&ni->vfs_inode);
877 	}
878 	return err;
879 }
880 
881 int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
882 			CLST *len, bool *new)
883 {
884 	int err = 0;
885 	struct runs_tree *run = &ni->file.run;
886 	struct ntfs_sb_info *sbi;
887 	u8 cluster_bits;
888 	struct ATTRIB *attr = NULL, *attr_b;
889 	struct ATTR_LIST_ENTRY *le, *le_b;
890 	struct mft_inode *mi, *mi_b;
891 	CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end;
892 	u64 total_size;
893 	u32 clst_per_frame;
894 	bool ok;
895 
896 	if (new)
897 		*new = false;
898 
899 	down_read(&ni->file.run_lock);
900 	ok = run_lookup_entry(run, vcn, lcn, len, NULL);
901 	up_read(&ni->file.run_lock);
902 
903 	if (ok && (*lcn != SPARSE_LCN || !new)) {
904 		/* Normal way. */
905 		return 0;
906 	}
907 
908 	if (!clen)
909 		clen = 1;
910 
911 	if (ok && clen > *len)
912 		clen = *len;
913 
914 	sbi = ni->mi.sbi;
915 	cluster_bits = sbi->cluster_bits;
916 
917 	ni_lock(ni);
918 	down_write(&ni->file.run_lock);
919 
920 	le_b = NULL;
921 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
922 	if (!attr_b) {
923 		err = -ENOENT;
924 		goto out;
925 	}
926 
927 	if (!attr_b->non_res) {
928 		*lcn = RESIDENT_LCN;
929 		*len = 1;
930 		goto out;
931 	}
932 
933 	asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
934 	if (vcn >= asize) {
935 		err = -EINVAL;
936 		goto out;
937 	}
938 
939 	clst_per_frame = 1u << attr_b->nres.c_unit;
940 	to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1);
941 
942 	if (vcn + to_alloc > asize)
943 		to_alloc = asize - vcn;
944 
945 	svcn = le64_to_cpu(attr_b->nres.svcn);
946 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
947 
948 	attr = attr_b;
949 	le = le_b;
950 	mi = mi_b;
951 
952 	if (le_b && (vcn < svcn || evcn1 <= vcn)) {
953 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
954 				    &mi);
955 		if (!attr) {
956 			err = -EINVAL;
957 			goto out;
958 		}
959 		svcn = le64_to_cpu(attr->nres.svcn);
960 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
961 	}
962 
963 	err = attr_load_runs(attr, ni, run, NULL);
964 	if (err)
965 		goto out;
966 
967 	if (!ok) {
968 		ok = run_lookup_entry(run, vcn, lcn, len, NULL);
969 		if (ok && (*lcn != SPARSE_LCN || !new)) {
970 			/* Normal way. */
971 			err = 0;
972 			goto ok;
973 		}
974 
975 		if (!ok && !new) {
976 			*len = 0;
977 			err = 0;
978 			goto ok;
979 		}
980 
981 		if (ok && clen > *len) {
982 			clen = *len;
983 			to_alloc = (clen + clst_per_frame - 1) &
984 				   ~(clst_per_frame - 1);
985 		}
986 	}
987 
988 	if (!is_attr_ext(attr_b)) {
989 		err = -EINVAL;
990 		goto out;
991 	}
992 
993 	/* Get the last LCN to allocate from. */
994 	hint = 0;
995 
996 	if (vcn > evcn1) {
997 		if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
998 				   false)) {
999 			err = -ENOMEM;
1000 			goto out;
1001 		}
1002 	} else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
1003 		hint = -1;
1004 	}
1005 
1006 	err = attr_allocate_clusters(
1007 		sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len,
1008 		(sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1,
1009 		lcn);
1010 	if (err)
1011 		goto out;
1012 	*new = true;
1013 
1014 	end = vcn + *len;
1015 
1016 	total_size = le64_to_cpu(attr_b->nres.total_size) +
1017 		     ((u64)*len << cluster_bits);
1018 
1019 repack:
1020 	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1021 	if (err)
1022 		goto out;
1023 
1024 	attr_b->nres.total_size = cpu_to_le64(total_size);
1025 	inode_set_bytes(&ni->vfs_inode, total_size);
1026 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1027 
1028 	mi_b->dirty = true;
1029 	mark_inode_dirty(&ni->vfs_inode);
1030 
1031 	/* Stored [vcn : next_svcn) from [vcn : end). */
1032 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1033 
1034 	if (end <= evcn1) {
1035 		if (next_svcn == evcn1) {
1036 			/* Normal way. Update attribute and exit. */
1037 			goto ok;
1038 		}
1039 		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1040 		if (!ni->attr_list.size) {
1041 			err = ni_create_attr_list(ni);
1042 			if (err)
1043 				goto out;
1044 			/* Layout of records is changed. */
1045 			le_b = NULL;
1046 			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1047 					      0, NULL, &mi_b);
1048 			if (!attr_b) {
1049 				err = -ENOENT;
1050 				goto out;
1051 			}
1052 
1053 			attr = attr_b;
1054 			le = le_b;
1055 			mi = mi_b;
1056 			goto repack;
1057 		}
1058 	}
1059 
1060 	svcn = evcn1;
1061 
1062 	/* Estimate next attribute. */
1063 	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1064 
1065 	if (attr) {
1066 		CLST alloc = bytes_to_cluster(
1067 			sbi, le64_to_cpu(attr_b->nres.alloc_size));
1068 		CLST evcn = le64_to_cpu(attr->nres.evcn);
1069 
1070 		if (end < next_svcn)
1071 			end = next_svcn;
1072 		while (end > evcn) {
1073 			/* Remove segment [svcn : evcn). */
1074 			mi_remove_attr(NULL, mi, attr);
1075 
1076 			if (!al_remove_le(ni, le)) {
1077 				err = -EINVAL;
1078 				goto out;
1079 			}
1080 
1081 			if (evcn + 1 >= alloc) {
1082 				/* Last attribute segment. */
1083 				evcn1 = evcn + 1;
1084 				goto ins_ext;
1085 			}
1086 
1087 			if (ni_load_mi(ni, le, &mi)) {
1088 				attr = NULL;
1089 				goto out;
1090 			}
1091 
1092 			attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1093 					    &le->id);
1094 			if (!attr) {
1095 				err = -EINVAL;
1096 				goto out;
1097 			}
1098 			svcn = le64_to_cpu(attr->nres.svcn);
1099 			evcn = le64_to_cpu(attr->nres.evcn);
1100 		}
1101 
1102 		if (end < svcn)
1103 			end = svcn;
1104 
1105 		err = attr_load_runs(attr, ni, run, &end);
1106 		if (err)
1107 			goto out;
1108 
1109 		evcn1 = evcn + 1;
1110 		attr->nres.svcn = cpu_to_le64(next_svcn);
1111 		err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1112 		if (err)
1113 			goto out;
1114 
1115 		le->vcn = cpu_to_le64(next_svcn);
1116 		ni->attr_list.dirty = true;
1117 		mi->dirty = true;
1118 
1119 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1120 	}
1121 ins_ext:
1122 	if (evcn1 > next_svcn) {
1123 		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1124 					    next_svcn, evcn1 - next_svcn,
1125 					    attr_b->flags, &attr, &mi, NULL);
1126 		if (err)
1127 			goto out;
1128 	}
1129 ok:
1130 	run_truncate_around(run, vcn);
1131 out:
1132 	up_write(&ni->file.run_lock);
1133 	ni_unlock(ni);
1134 
1135 	return err;
1136 }
1137 
1138 int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
1139 {
1140 	u64 vbo;
1141 	struct ATTRIB *attr;
1142 	u32 data_size;
1143 
1144 	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1145 	if (!attr)
1146 		return -EINVAL;
1147 
1148 	if (attr->non_res)
1149 		return E_NTFS_NONRESIDENT;
1150 
1151 	vbo = page->index << PAGE_SHIFT;
1152 	data_size = le32_to_cpu(attr->res.data_size);
1153 	if (vbo < data_size) {
1154 		const char *data = resident_data(attr);
1155 		char *kaddr = kmap_atomic(page);
1156 		u32 use = data_size - vbo;
1157 
1158 		if (use > PAGE_SIZE)
1159 			use = PAGE_SIZE;
1160 
1161 		memcpy(kaddr, data + vbo, use);
1162 		memset(kaddr + use, 0, PAGE_SIZE - use);
1163 		kunmap_atomic(kaddr);
1164 		flush_dcache_page(page);
1165 		SetPageUptodate(page);
1166 	} else if (!PageUptodate(page)) {
1167 		zero_user_segment(page, 0, PAGE_SIZE);
1168 		SetPageUptodate(page);
1169 	}
1170 
1171 	return 0;
1172 }
1173 
1174 int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
1175 {
1176 	u64 vbo;
1177 	struct mft_inode *mi;
1178 	struct ATTRIB *attr;
1179 	u32 data_size;
1180 
1181 	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1182 	if (!attr)
1183 		return -EINVAL;
1184 
1185 	if (attr->non_res) {
1186 		/* Return special error code to check this case. */
1187 		return E_NTFS_NONRESIDENT;
1188 	}
1189 
1190 	vbo = page->index << PAGE_SHIFT;
1191 	data_size = le32_to_cpu(attr->res.data_size);
1192 	if (vbo < data_size) {
1193 		char *data = resident_data(attr);
1194 		char *kaddr = kmap_atomic(page);
1195 		u32 use = data_size - vbo;
1196 
1197 		if (use > PAGE_SIZE)
1198 			use = PAGE_SIZE;
1199 		memcpy(data + vbo, kaddr, use);
1200 		kunmap_atomic(kaddr);
1201 		mi->dirty = true;
1202 	}
1203 	ni->i_valid = data_size;
1204 
1205 	return 0;
1206 }
1207 
1208 /*
1209  * attr_load_runs_vcn - Load runs with VCN.
1210  */
1211 int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1212 		       const __le16 *name, u8 name_len, struct runs_tree *run,
1213 		       CLST vcn)
1214 {
1215 	struct ATTRIB *attr;
1216 	int err;
1217 	CLST svcn, evcn;
1218 	u16 ro;
1219 
1220 	attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1221 	if (!attr) {
1222 		/* Is record corrupted? */
1223 		return -ENOENT;
1224 	}
1225 
1226 	svcn = le64_to_cpu(attr->nres.svcn);
1227 	evcn = le64_to_cpu(attr->nres.evcn);
1228 
1229 	if (evcn < vcn || vcn < svcn) {
1230 		/* Is record corrupted? */
1231 		return -EINVAL;
1232 	}
1233 
1234 	ro = le16_to_cpu(attr->nres.run_off);
1235 	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1236 			    Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1237 	if (err < 0)
1238 		return err;
1239 	return 0;
1240 }
1241 
1242 /*
1243  * attr_load_runs_range - Load runs for given range [from to).
1244  */
1245 int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1246 			 const __le16 *name, u8 name_len, struct runs_tree *run,
1247 			 u64 from, u64 to)
1248 {
1249 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1250 	u8 cluster_bits = sbi->cluster_bits;
1251 	CLST vcn;
1252 	CLST vcn_last = (to - 1) >> cluster_bits;
1253 	CLST lcn, clen;
1254 	int err;
1255 
1256 	for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1257 		if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1258 			err = attr_load_runs_vcn(ni, type, name, name_len, run,
1259 						 vcn);
1260 			if (err)
1261 				return err;
1262 			clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1263 		}
1264 	}
1265 
1266 	return 0;
1267 }
1268 
1269 #ifdef CONFIG_NTFS3_LZX_XPRESS
1270 /*
1271  * attr_wof_frame_info
1272  *
1273  * Read header of Xpress/LZX file to get info about frame.
1274  */
1275 int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1276 			struct runs_tree *run, u64 frame, u64 frames,
1277 			u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1278 {
1279 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1280 	u64 vbo[2], off[2], wof_size;
1281 	u32 voff;
1282 	u8 bytes_per_off;
1283 	char *addr;
1284 	struct page *page;
1285 	int i, err;
1286 	__le32 *off32;
1287 	__le64 *off64;
1288 
1289 	if (ni->vfs_inode.i_size < 0x100000000ull) {
1290 		/* File starts with array of 32 bit offsets. */
1291 		bytes_per_off = sizeof(__le32);
1292 		vbo[1] = frame << 2;
1293 		*vbo_data = frames << 2;
1294 	} else {
1295 		/* File starts with array of 64 bit offsets. */
1296 		bytes_per_off = sizeof(__le64);
1297 		vbo[1] = frame << 3;
1298 		*vbo_data = frames << 3;
1299 	}
1300 
1301 	/*
1302 	 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1303 	 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1304 	 */
1305 	if (!attr->non_res) {
1306 		if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1307 			ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1308 			return -EINVAL;
1309 		}
1310 		addr = resident_data(attr);
1311 
1312 		if (bytes_per_off == sizeof(__le32)) {
1313 			off32 = Add2Ptr(addr, vbo[1]);
1314 			off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1315 			off[1] = le32_to_cpu(off32[0]);
1316 		} else {
1317 			off64 = Add2Ptr(addr, vbo[1]);
1318 			off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1319 			off[1] = le64_to_cpu(off64[0]);
1320 		}
1321 
1322 		*vbo_data += off[0];
1323 		*ondisk_size = off[1] - off[0];
1324 		return 0;
1325 	}
1326 
1327 	wof_size = le64_to_cpu(attr->nres.data_size);
1328 	down_write(&ni->file.run_lock);
1329 	page = ni->file.offs_page;
1330 	if (!page) {
1331 		page = alloc_page(GFP_KERNEL);
1332 		if (!page) {
1333 			err = -ENOMEM;
1334 			goto out;
1335 		}
1336 		page->index = -1;
1337 		ni->file.offs_page = page;
1338 	}
1339 	lock_page(page);
1340 	addr = page_address(page);
1341 
1342 	if (vbo[1]) {
1343 		voff = vbo[1] & (PAGE_SIZE - 1);
1344 		vbo[0] = vbo[1] - bytes_per_off;
1345 		i = 0;
1346 	} else {
1347 		voff = 0;
1348 		vbo[0] = 0;
1349 		off[0] = 0;
1350 		i = 1;
1351 	}
1352 
1353 	do {
1354 		pgoff_t index = vbo[i] >> PAGE_SHIFT;
1355 
1356 		if (index != page->index) {
1357 			u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1358 			u64 to = min(from + PAGE_SIZE, wof_size);
1359 
1360 			err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1361 						   ARRAY_SIZE(WOF_NAME), run,
1362 						   from, to);
1363 			if (err)
1364 				goto out1;
1365 
1366 			err = ntfs_bio_pages(sbi, run, &page, 1, from,
1367 					     to - from, REQ_OP_READ);
1368 			if (err) {
1369 				page->index = -1;
1370 				goto out1;
1371 			}
1372 			page->index = index;
1373 		}
1374 
1375 		if (i) {
1376 			if (bytes_per_off == sizeof(__le32)) {
1377 				off32 = Add2Ptr(addr, voff);
1378 				off[1] = le32_to_cpu(*off32);
1379 			} else {
1380 				off64 = Add2Ptr(addr, voff);
1381 				off[1] = le64_to_cpu(*off64);
1382 			}
1383 		} else if (!voff) {
1384 			if (bytes_per_off == sizeof(__le32)) {
1385 				off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1386 				off[0] = le32_to_cpu(*off32);
1387 			} else {
1388 				off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1389 				off[0] = le64_to_cpu(*off64);
1390 			}
1391 		} else {
1392 			/* Two values in one page. */
1393 			if (bytes_per_off == sizeof(__le32)) {
1394 				off32 = Add2Ptr(addr, voff);
1395 				off[0] = le32_to_cpu(off32[-1]);
1396 				off[1] = le32_to_cpu(off32[0]);
1397 			} else {
1398 				off64 = Add2Ptr(addr, voff);
1399 				off[0] = le64_to_cpu(off64[-1]);
1400 				off[1] = le64_to_cpu(off64[0]);
1401 			}
1402 			break;
1403 		}
1404 	} while (++i < 2);
1405 
1406 	*vbo_data += off[0];
1407 	*ondisk_size = off[1] - off[0];
1408 
1409 out1:
1410 	unlock_page(page);
1411 out:
1412 	up_write(&ni->file.run_lock);
1413 	return err;
1414 }
1415 #endif
1416 
1417 /*
1418  * attr_is_frame_compressed - Used to detect compressed frame.
1419  */
1420 int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1421 			     CLST frame, CLST *clst_data)
1422 {
1423 	int err;
1424 	u32 clst_frame;
1425 	CLST clen, lcn, vcn, alen, slen, vcn_next;
1426 	size_t idx;
1427 	struct runs_tree *run;
1428 
1429 	*clst_data = 0;
1430 
1431 	if (!is_attr_compressed(attr))
1432 		return 0;
1433 
1434 	if (!attr->non_res)
1435 		return 0;
1436 
1437 	clst_frame = 1u << attr->nres.c_unit;
1438 	vcn = frame * clst_frame;
1439 	run = &ni->file.run;
1440 
1441 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1442 		err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1443 					 attr->name_len, run, vcn);
1444 		if (err)
1445 			return err;
1446 
1447 		if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1448 			return -EINVAL;
1449 	}
1450 
1451 	if (lcn == SPARSE_LCN) {
1452 		/* Sparsed frame. */
1453 		return 0;
1454 	}
1455 
1456 	if (clen >= clst_frame) {
1457 		/*
1458 		 * The frame is not compressed 'cause
1459 		 * it does not contain any sparse clusters.
1460 		 */
1461 		*clst_data = clst_frame;
1462 		return 0;
1463 	}
1464 
1465 	alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1466 	slen = 0;
1467 	*clst_data = clen;
1468 
1469 	/*
1470 	 * The frame is compressed if *clst_data + slen >= clst_frame.
1471 	 * Check next fragments.
1472 	 */
1473 	while ((vcn += clen) < alen) {
1474 		vcn_next = vcn;
1475 
1476 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1477 		    vcn_next != vcn) {
1478 			err = attr_load_runs_vcn(ni, attr->type,
1479 						 attr_name(attr),
1480 						 attr->name_len, run, vcn_next);
1481 			if (err)
1482 				return err;
1483 			vcn = vcn_next;
1484 
1485 			if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1486 				return -EINVAL;
1487 		}
1488 
1489 		if (lcn == SPARSE_LCN) {
1490 			slen += clen;
1491 		} else {
1492 			if (slen) {
1493 				/*
1494 				 * Data_clusters + sparse_clusters =
1495 				 * not enough for frame.
1496 				 */
1497 				return -EINVAL;
1498 			}
1499 			*clst_data += clen;
1500 		}
1501 
1502 		if (*clst_data + slen >= clst_frame) {
1503 			if (!slen) {
1504 				/*
1505 				 * There is no sparsed clusters in this frame
1506 				 * so it is not compressed.
1507 				 */
1508 				*clst_data = clst_frame;
1509 			} else {
1510 				/* Frame is compressed. */
1511 			}
1512 			break;
1513 		}
1514 	}
1515 
1516 	return 0;
1517 }
1518 
1519 /*
1520  * attr_allocate_frame - Allocate/free clusters for @frame.
1521  *
1522  * Assumed: down_write(&ni->file.run_lock);
1523  */
1524 int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1525 			u64 new_valid)
1526 {
1527 	int err = 0;
1528 	struct runs_tree *run = &ni->file.run;
1529 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1530 	struct ATTRIB *attr = NULL, *attr_b;
1531 	struct ATTR_LIST_ENTRY *le, *le_b;
1532 	struct mft_inode *mi, *mi_b;
1533 	CLST svcn, evcn1, next_svcn, lcn, len;
1534 	CLST vcn, end, clst_data;
1535 	u64 total_size, valid_size, data_size;
1536 
1537 	le_b = NULL;
1538 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1539 	if (!attr_b)
1540 		return -ENOENT;
1541 
1542 	if (!is_attr_ext(attr_b))
1543 		return -EINVAL;
1544 
1545 	vcn = frame << NTFS_LZNT_CUNIT;
1546 	total_size = le64_to_cpu(attr_b->nres.total_size);
1547 
1548 	svcn = le64_to_cpu(attr_b->nres.svcn);
1549 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1550 	data_size = le64_to_cpu(attr_b->nres.data_size);
1551 
1552 	if (svcn <= vcn && vcn < evcn1) {
1553 		attr = attr_b;
1554 		le = le_b;
1555 		mi = mi_b;
1556 	} else if (!le_b) {
1557 		err = -EINVAL;
1558 		goto out;
1559 	} else {
1560 		le = le_b;
1561 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1562 				    &mi);
1563 		if (!attr) {
1564 			err = -EINVAL;
1565 			goto out;
1566 		}
1567 		svcn = le64_to_cpu(attr->nres.svcn);
1568 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1569 	}
1570 
1571 	err = attr_load_runs(attr, ni, run, NULL);
1572 	if (err)
1573 		goto out;
1574 
1575 	err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
1576 	if (err)
1577 		goto out;
1578 
1579 	total_size -= (u64)clst_data << sbi->cluster_bits;
1580 
1581 	len = bytes_to_cluster(sbi, compr_size);
1582 
1583 	if (len == clst_data)
1584 		goto out;
1585 
1586 	if (len < clst_data) {
1587 		err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1588 					NULL, true);
1589 		if (err)
1590 			goto out;
1591 
1592 		if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1593 				   false)) {
1594 			err = -ENOMEM;
1595 			goto out;
1596 		}
1597 		end = vcn + clst_data;
1598 		/* Run contains updated range [vcn + len : end). */
1599 	} else {
1600 		CLST alen, hint = 0;
1601 		/* Get the last LCN to allocate from. */
1602 		if (vcn + clst_data &&
1603 		    !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1604 				      NULL)) {
1605 			hint = -1;
1606 		}
1607 
1608 		err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1609 					     hint + 1, len - clst_data, NULL, 0,
1610 					     &alen, 0, &lcn);
1611 		if (err)
1612 			goto out;
1613 
1614 		end = vcn + len;
1615 		/* Run contains updated range [vcn + clst_data : end). */
1616 	}
1617 
1618 	total_size += (u64)len << sbi->cluster_bits;
1619 
1620 repack:
1621 	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1622 	if (err)
1623 		goto out;
1624 
1625 	attr_b->nres.total_size = cpu_to_le64(total_size);
1626 	inode_set_bytes(&ni->vfs_inode, total_size);
1627 
1628 	mi_b->dirty = true;
1629 	mark_inode_dirty(&ni->vfs_inode);
1630 
1631 	/* Stored [vcn : next_svcn) from [vcn : end). */
1632 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1633 
1634 	if (end <= evcn1) {
1635 		if (next_svcn == evcn1) {
1636 			/* Normal way. Update attribute and exit. */
1637 			goto ok;
1638 		}
1639 		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1640 		if (!ni->attr_list.size) {
1641 			err = ni_create_attr_list(ni);
1642 			if (err)
1643 				goto out;
1644 			/* Layout of records is changed. */
1645 			le_b = NULL;
1646 			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1647 					      0, NULL, &mi_b);
1648 			if (!attr_b) {
1649 				err = -ENOENT;
1650 				goto out;
1651 			}
1652 
1653 			attr = attr_b;
1654 			le = le_b;
1655 			mi = mi_b;
1656 			goto repack;
1657 		}
1658 	}
1659 
1660 	svcn = evcn1;
1661 
1662 	/* Estimate next attribute. */
1663 	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1664 
1665 	if (attr) {
1666 		CLST alloc = bytes_to_cluster(
1667 			sbi, le64_to_cpu(attr_b->nres.alloc_size));
1668 		CLST evcn = le64_to_cpu(attr->nres.evcn);
1669 
1670 		if (end < next_svcn)
1671 			end = next_svcn;
1672 		while (end > evcn) {
1673 			/* Remove segment [svcn : evcn). */
1674 			mi_remove_attr(NULL, mi, attr);
1675 
1676 			if (!al_remove_le(ni, le)) {
1677 				err = -EINVAL;
1678 				goto out;
1679 			}
1680 
1681 			if (evcn + 1 >= alloc) {
1682 				/* Last attribute segment. */
1683 				evcn1 = evcn + 1;
1684 				goto ins_ext;
1685 			}
1686 
1687 			if (ni_load_mi(ni, le, &mi)) {
1688 				attr = NULL;
1689 				goto out;
1690 			}
1691 
1692 			attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1693 					    &le->id);
1694 			if (!attr) {
1695 				err = -EINVAL;
1696 				goto out;
1697 			}
1698 			svcn = le64_to_cpu(attr->nres.svcn);
1699 			evcn = le64_to_cpu(attr->nres.evcn);
1700 		}
1701 
1702 		if (end < svcn)
1703 			end = svcn;
1704 
1705 		err = attr_load_runs(attr, ni, run, &end);
1706 		if (err)
1707 			goto out;
1708 
1709 		evcn1 = evcn + 1;
1710 		attr->nres.svcn = cpu_to_le64(next_svcn);
1711 		err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1712 		if (err)
1713 			goto out;
1714 
1715 		le->vcn = cpu_to_le64(next_svcn);
1716 		ni->attr_list.dirty = true;
1717 		mi->dirty = true;
1718 
1719 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1720 	}
1721 ins_ext:
1722 	if (evcn1 > next_svcn) {
1723 		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1724 					    next_svcn, evcn1 - next_svcn,
1725 					    attr_b->flags, &attr, &mi, NULL);
1726 		if (err)
1727 			goto out;
1728 	}
1729 ok:
1730 	run_truncate_around(run, vcn);
1731 out:
1732 	if (new_valid > data_size)
1733 		new_valid = data_size;
1734 
1735 	valid_size = le64_to_cpu(attr_b->nres.valid_size);
1736 	if (new_valid != valid_size) {
1737 		attr_b->nres.valid_size = cpu_to_le64(valid_size);
1738 		mi_b->dirty = true;
1739 	}
1740 
1741 	return err;
1742 }
1743 
1744 /*
1745  * attr_collapse_range - Collapse range in file.
1746  */
1747 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1748 {
1749 	int err = 0;
1750 	struct runs_tree *run = &ni->file.run;
1751 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1752 	struct ATTRIB *attr = NULL, *attr_b;
1753 	struct ATTR_LIST_ENTRY *le, *le_b;
1754 	struct mft_inode *mi, *mi_b;
1755 	CLST svcn, evcn1, len, dealloc, alen;
1756 	CLST vcn, end;
1757 	u64 valid_size, data_size, alloc_size, total_size;
1758 	u32 mask;
1759 	__le16 a_flags;
1760 
1761 	if (!bytes)
1762 		return 0;
1763 
1764 	le_b = NULL;
1765 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1766 	if (!attr_b)
1767 		return -ENOENT;
1768 
1769 	if (!attr_b->non_res) {
1770 		/* Attribute is resident. Nothing to do? */
1771 		return 0;
1772 	}
1773 
1774 	data_size = le64_to_cpu(attr_b->nres.data_size);
1775 	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1776 	a_flags = attr_b->flags;
1777 
1778 	if (is_attr_ext(attr_b)) {
1779 		total_size = le64_to_cpu(attr_b->nres.total_size);
1780 		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1781 	} else {
1782 		total_size = alloc_size;
1783 		mask = sbi->cluster_mask;
1784 	}
1785 
1786 	if ((vbo & mask) || (bytes & mask)) {
1787 		/* Allow to collapse only cluster aligned ranges. */
1788 		return -EINVAL;
1789 	}
1790 
1791 	if (vbo > data_size)
1792 		return -EINVAL;
1793 
1794 	down_write(&ni->file.run_lock);
1795 
1796 	if (vbo + bytes >= data_size) {
1797 		u64 new_valid = min(ni->i_valid, vbo);
1798 
1799 		/* Simple truncate file at 'vbo'. */
1800 		truncate_setsize(&ni->vfs_inode, vbo);
1801 		err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1802 				    &new_valid, true, NULL);
1803 
1804 		if (!err && new_valid < ni->i_valid)
1805 			ni->i_valid = new_valid;
1806 
1807 		goto out;
1808 	}
1809 
1810 	/*
1811 	 * Enumerate all attribute segments and collapse.
1812 	 */
1813 	alen = alloc_size >> sbi->cluster_bits;
1814 	vcn = vbo >> sbi->cluster_bits;
1815 	len = bytes >> sbi->cluster_bits;
1816 	end = vcn + len;
1817 	dealloc = 0;
1818 
1819 	svcn = le64_to_cpu(attr_b->nres.svcn);
1820 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1821 
1822 	if (svcn <= vcn && vcn < evcn1) {
1823 		attr = attr_b;
1824 		le = le_b;
1825 		mi = mi_b;
1826 	} else if (!le_b) {
1827 		err = -EINVAL;
1828 		goto out;
1829 	} else {
1830 		le = le_b;
1831 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1832 				    &mi);
1833 		if (!attr) {
1834 			err = -EINVAL;
1835 			goto out;
1836 		}
1837 
1838 		svcn = le64_to_cpu(attr->nres.svcn);
1839 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1840 	}
1841 
1842 	for (;;) {
1843 		if (svcn >= end) {
1844 			/* Shift VCN- */
1845 			attr->nres.svcn = cpu_to_le64(svcn - len);
1846 			attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1847 			if (le) {
1848 				le->vcn = attr->nres.svcn;
1849 				ni->attr_list.dirty = true;
1850 			}
1851 			mi->dirty = true;
1852 		} else if (svcn < vcn || end < evcn1) {
1853 			CLST vcn1, eat, next_svcn;
1854 
1855 			/* Collapse a part of this attribute segment. */
1856 			err = attr_load_runs(attr, ni, run, &svcn);
1857 			if (err)
1858 				goto out;
1859 			vcn1 = max(vcn, svcn);
1860 			eat = min(end, evcn1) - vcn1;
1861 
1862 			err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1863 						true);
1864 			if (err)
1865 				goto out;
1866 
1867 			if (!run_collapse_range(run, vcn1, eat)) {
1868 				err = -ENOMEM;
1869 				goto out;
1870 			}
1871 
1872 			if (svcn >= vcn) {
1873 				/* Shift VCN */
1874 				attr->nres.svcn = cpu_to_le64(vcn);
1875 				if (le) {
1876 					le->vcn = attr->nres.svcn;
1877 					ni->attr_list.dirty = true;
1878 				}
1879 			}
1880 
1881 			err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1882 			if (err)
1883 				goto out;
1884 
1885 			next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1886 			if (next_svcn + eat < evcn1) {
1887 				err = ni_insert_nonresident(
1888 					ni, ATTR_DATA, NULL, 0, run, next_svcn,
1889 					evcn1 - eat - next_svcn, a_flags, &attr,
1890 					&mi, &le);
1891 				if (err)
1892 					goto out;
1893 
1894 				/* Layout of records maybe changed. */
1895 				attr_b = NULL;
1896 			}
1897 
1898 			/* Free all allocated memory. */
1899 			run_truncate(run, 0);
1900 		} else {
1901 			u16 le_sz;
1902 			u16 roff = le16_to_cpu(attr->nres.run_off);
1903 
1904 			run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
1905 				      evcn1 - 1, svcn, Add2Ptr(attr, roff),
1906 				      le32_to_cpu(attr->size) - roff);
1907 
1908 			/* Delete this attribute segment. */
1909 			mi_remove_attr(NULL, mi, attr);
1910 			if (!le)
1911 				break;
1912 
1913 			le_sz = le16_to_cpu(le->size);
1914 			if (!al_remove_le(ni, le)) {
1915 				err = -EINVAL;
1916 				goto out;
1917 			}
1918 
1919 			if (evcn1 >= alen)
1920 				break;
1921 
1922 			if (!svcn) {
1923 				/* Load next record that contains this attribute. */
1924 				if (ni_load_mi(ni, le, &mi)) {
1925 					err = -EINVAL;
1926 					goto out;
1927 				}
1928 
1929 				/* Look for required attribute. */
1930 				attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
1931 						    0, &le->id);
1932 				if (!attr) {
1933 					err = -EINVAL;
1934 					goto out;
1935 				}
1936 				goto next_attr;
1937 			}
1938 			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
1939 		}
1940 
1941 		if (evcn1 >= alen)
1942 			break;
1943 
1944 		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
1945 		if (!attr) {
1946 			err = -EINVAL;
1947 			goto out;
1948 		}
1949 
1950 next_attr:
1951 		svcn = le64_to_cpu(attr->nres.svcn);
1952 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1953 	}
1954 
1955 	if (!attr_b) {
1956 		le_b = NULL;
1957 		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
1958 				      &mi_b);
1959 		if (!attr_b) {
1960 			err = -ENOENT;
1961 			goto out;
1962 		}
1963 	}
1964 
1965 	data_size -= bytes;
1966 	valid_size = ni->i_valid;
1967 	if (vbo + bytes <= valid_size)
1968 		valid_size -= bytes;
1969 	else if (vbo < valid_size)
1970 		valid_size = vbo;
1971 
1972 	attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
1973 	attr_b->nres.data_size = cpu_to_le64(data_size);
1974 	attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
1975 	total_size -= (u64)dealloc << sbi->cluster_bits;
1976 	if (is_attr_ext(attr_b))
1977 		attr_b->nres.total_size = cpu_to_le64(total_size);
1978 	mi_b->dirty = true;
1979 
1980 	/* Update inode size. */
1981 	ni->i_valid = valid_size;
1982 	ni->vfs_inode.i_size = data_size;
1983 	inode_set_bytes(&ni->vfs_inode, total_size);
1984 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1985 	mark_inode_dirty(&ni->vfs_inode);
1986 
1987 out:
1988 	up_write(&ni->file.run_lock);
1989 	if (err)
1990 		_ntfs_bad_inode(&ni->vfs_inode);
1991 
1992 	return err;
1993 }
1994 
1995 /*
1996  * attr_punch_hole
1997  *
1998  * Not for normal files.
1999  */
2000 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
2001 {
2002 	int err = 0;
2003 	struct runs_tree *run = &ni->file.run;
2004 	struct ntfs_sb_info *sbi = ni->mi.sbi;
2005 	struct ATTRIB *attr = NULL, *attr_b;
2006 	struct ATTR_LIST_ENTRY *le, *le_b;
2007 	struct mft_inode *mi, *mi_b;
2008 	CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
2009 	u64 total_size, alloc_size;
2010 	u32 mask;
2011 	__le16 a_flags;
2012 	struct runs_tree run2;
2013 
2014 	if (!bytes)
2015 		return 0;
2016 
2017 	le_b = NULL;
2018 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2019 	if (!attr_b)
2020 		return -ENOENT;
2021 
2022 	if (!attr_b->non_res) {
2023 		u32 data_size = le32_to_cpu(attr->res.data_size);
2024 		u32 from, to;
2025 
2026 		if (vbo > data_size)
2027 			return 0;
2028 
2029 		from = vbo;
2030 		to = min_t(u64, vbo + bytes, data_size);
2031 		memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
2032 		return 0;
2033 	}
2034 
2035 	if (!is_attr_ext(attr_b))
2036 		return -EOPNOTSUPP;
2037 
2038 	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2039 	total_size = le64_to_cpu(attr_b->nres.total_size);
2040 
2041 	if (vbo >= alloc_size) {
2042 		/* NOTE: It is allowed. */
2043 		return 0;
2044 	}
2045 
2046 	mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2047 
2048 	bytes += vbo;
2049 	if (bytes > alloc_size)
2050 		bytes = alloc_size;
2051 	bytes -= vbo;
2052 
2053 	if ((vbo & mask) || (bytes & mask)) {
2054 		/* We have to zero a range(s). */
2055 		if (frame_size == NULL) {
2056 			/* Caller insists range is aligned. */
2057 			return -EINVAL;
2058 		}
2059 		*frame_size = mask + 1;
2060 		return E_NTFS_NOTALIGNED;
2061 	}
2062 
2063 	down_write(&ni->file.run_lock);
2064 	run_init(&run2);
2065 	run_truncate(run, 0);
2066 
2067 	/*
2068 	 * Enumerate all attribute segments and punch hole where necessary.
2069 	 */
2070 	alen = alloc_size >> sbi->cluster_bits;
2071 	vcn = vbo >> sbi->cluster_bits;
2072 	len = bytes >> sbi->cluster_bits;
2073 	end = vcn + len;
2074 	hole = 0;
2075 
2076 	svcn = le64_to_cpu(attr_b->nres.svcn);
2077 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2078 	a_flags = attr_b->flags;
2079 
2080 	if (svcn <= vcn && vcn < evcn1) {
2081 		attr = attr_b;
2082 		le = le_b;
2083 		mi = mi_b;
2084 	} else if (!le_b) {
2085 		err = -EINVAL;
2086 		goto bad_inode;
2087 	} else {
2088 		le = le_b;
2089 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2090 				    &mi);
2091 		if (!attr) {
2092 			err = -EINVAL;
2093 			goto bad_inode;
2094 		}
2095 
2096 		svcn = le64_to_cpu(attr->nres.svcn);
2097 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2098 	}
2099 
2100 	while (svcn < end) {
2101 		CLST vcn1, zero, hole2 = hole;
2102 
2103 		err = attr_load_runs(attr, ni, run, &svcn);
2104 		if (err)
2105 			goto done;
2106 		vcn1 = max(vcn, svcn);
2107 		zero = min(end, evcn1) - vcn1;
2108 
2109 		/*
2110 		 * Check range [vcn1 + zero).
2111 		 * Calculate how many clusters there are.
2112 		 * Don't do any destructive actions.
2113 		 */
2114 		err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
2115 		if (err)
2116 			goto done;
2117 
2118 		/* Check if required range is already hole. */
2119 		if (hole2 == hole)
2120 			goto next_attr;
2121 
2122 		/* Make a clone of run to undo. */
2123 		err = run_clone(run, &run2);
2124 		if (err)
2125 			goto done;
2126 
2127 		/* Make a hole range (sparse) [vcn1 + zero). */
2128 		if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
2129 			err = -ENOMEM;
2130 			goto done;
2131 		}
2132 
2133 		/* Update run in attribute segment. */
2134 		err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2135 		if (err)
2136 			goto done;
2137 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2138 		if (next_svcn < evcn1) {
2139 			/* Insert new attribute segment. */
2140 			err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2141 						    next_svcn,
2142 						    evcn1 - next_svcn, a_flags,
2143 						    &attr, &mi, &le);
2144 			if (err)
2145 				goto undo_punch;
2146 
2147 			/* Layout of records maybe changed. */
2148 			attr_b = NULL;
2149 		}
2150 
2151 		/* Real deallocate. Should not fail. */
2152 		run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
2153 
2154 next_attr:
2155 		/* Free all allocated memory. */
2156 		run_truncate(run, 0);
2157 
2158 		if (evcn1 >= alen)
2159 			break;
2160 
2161 		/* Get next attribute segment. */
2162 		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2163 		if (!attr) {
2164 			err = -EINVAL;
2165 			goto bad_inode;
2166 		}
2167 
2168 		svcn = le64_to_cpu(attr->nres.svcn);
2169 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2170 	}
2171 
2172 done:
2173 	if (!hole)
2174 		goto out;
2175 
2176 	if (!attr_b) {
2177 		attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
2178 				      &mi_b);
2179 		if (!attr_b) {
2180 			err = -EINVAL;
2181 			goto bad_inode;
2182 		}
2183 	}
2184 
2185 	total_size -= (u64)hole << sbi->cluster_bits;
2186 	attr_b->nres.total_size = cpu_to_le64(total_size);
2187 	mi_b->dirty = true;
2188 
2189 	/* Update inode size. */
2190 	inode_set_bytes(&ni->vfs_inode, total_size);
2191 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2192 	mark_inode_dirty(&ni->vfs_inode);
2193 
2194 out:
2195 	run_close(&run2);
2196 	up_write(&ni->file.run_lock);
2197 	return err;
2198 
2199 bad_inode:
2200 	_ntfs_bad_inode(&ni->vfs_inode);
2201 	goto out;
2202 
2203 undo_punch:
2204 	/*
2205 	 * Restore packed runs.
2206 	 * 'mi_pack_runs' should not fail, cause we restore original.
2207 	 */
2208 	if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
2209 		goto bad_inode;
2210 
2211 	goto done;
2212 }
2213 
2214 /*
2215  * attr_insert_range - Insert range (hole) in file.
2216  * Not for normal files.
2217  */
2218 int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
2219 {
2220 	int err = 0;
2221 	struct runs_tree *run = &ni->file.run;
2222 	struct ntfs_sb_info *sbi = ni->mi.sbi;
2223 	struct ATTRIB *attr = NULL, *attr_b;
2224 	struct ATTR_LIST_ENTRY *le, *le_b;
2225 	struct mft_inode *mi, *mi_b;
2226 	CLST vcn, svcn, evcn1, len, next_svcn;
2227 	u64 data_size, alloc_size;
2228 	u32 mask;
2229 	__le16 a_flags;
2230 
2231 	if (!bytes)
2232 		return 0;
2233 
2234 	le_b = NULL;
2235 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2236 	if (!attr_b)
2237 		return -ENOENT;
2238 
2239 	if (!is_attr_ext(attr_b)) {
2240 		/* It was checked above. See fallocate. */
2241 		return -EOPNOTSUPP;
2242 	}
2243 
2244 	if (!attr_b->non_res) {
2245 		data_size = le32_to_cpu(attr_b->res.data_size);
2246 		alloc_size = data_size;
2247 		mask = sbi->cluster_mask; /* cluster_size - 1 */
2248 	} else {
2249 		data_size = le64_to_cpu(attr_b->nres.data_size);
2250 		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2251 		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2252 	}
2253 
2254 	if (vbo > data_size) {
2255 		/* Insert range after the file size is not allowed. */
2256 		return -EINVAL;
2257 	}
2258 
2259 	if ((vbo & mask) || (bytes & mask)) {
2260 		/* Allow to insert only frame aligned ranges. */
2261 		return -EINVAL;
2262 	}
2263 
2264 	/*
2265 	 * valid_size <= data_size <= alloc_size
2266 	 * Check alloc_size for maximum possible.
2267 	 */
2268 	if (bytes > sbi->maxbytes_sparse - alloc_size)
2269 		return -EFBIG;
2270 
2271 	vcn = vbo >> sbi->cluster_bits;
2272 	len = bytes >> sbi->cluster_bits;
2273 
2274 	down_write(&ni->file.run_lock);
2275 
2276 	if (!attr_b->non_res) {
2277 		err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
2278 				    data_size + bytes, NULL, false, NULL);
2279 
2280 		le_b = NULL;
2281 		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2282 				      &mi_b);
2283 		if (!attr_b) {
2284 			err = -EINVAL;
2285 			goto bad_inode;
2286 		}
2287 
2288 		if (err)
2289 			goto out;
2290 
2291 		if (!attr_b->non_res) {
2292 			/* Still resident. */
2293 			char *data = Add2Ptr(attr_b, attr_b->res.data_off);
2294 
2295 			memmove(data + bytes, data, bytes);
2296 			memset(data, 0, bytes);
2297 			goto done;
2298 		}
2299 
2300 		/* Resident files becomes nonresident. */
2301 		data_size = le64_to_cpu(attr_b->nres.data_size);
2302 		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2303 	}
2304 
2305 	/*
2306 	 * Enumerate all attribute segments and shift start vcn.
2307 	 */
2308 	a_flags = attr_b->flags;
2309 	svcn = le64_to_cpu(attr_b->nres.svcn);
2310 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2311 
2312 	if (svcn <= vcn && vcn < evcn1) {
2313 		attr = attr_b;
2314 		le = le_b;
2315 		mi = mi_b;
2316 	} else if (!le_b) {
2317 		err = -EINVAL;
2318 		goto bad_inode;
2319 	} else {
2320 		le = le_b;
2321 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2322 				    &mi);
2323 		if (!attr) {
2324 			err = -EINVAL;
2325 			goto bad_inode;
2326 		}
2327 
2328 		svcn = le64_to_cpu(attr->nres.svcn);
2329 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2330 	}
2331 
2332 	run_truncate(run, 0); /* clear cached values. */
2333 	err = attr_load_runs(attr, ni, run, NULL);
2334 	if (err)
2335 		goto out;
2336 
2337 	if (!run_insert_range(run, vcn, len)) {
2338 		err = -ENOMEM;
2339 		goto out;
2340 	}
2341 
2342 	/* Try to pack in current record as much as possible. */
2343 	err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
2344 	if (err)
2345 		goto out;
2346 
2347 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2348 
2349 	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2350 	       attr->type == ATTR_DATA && !attr->name_len) {
2351 		le64_add_cpu(&attr->nres.svcn, len);
2352 		le64_add_cpu(&attr->nres.evcn, len);
2353 		if (le) {
2354 			le->vcn = attr->nres.svcn;
2355 			ni->attr_list.dirty = true;
2356 		}
2357 		mi->dirty = true;
2358 	}
2359 
2360 	if (next_svcn < evcn1 + len) {
2361 		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2362 					    next_svcn, evcn1 + len - next_svcn,
2363 					    a_flags, NULL, NULL, NULL);
2364 
2365 		le_b = NULL;
2366 		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2367 				      &mi_b);
2368 		if (!attr_b) {
2369 			err = -EINVAL;
2370 			goto bad_inode;
2371 		}
2372 
2373 		if (err) {
2374 			/* ni_insert_nonresident failed. Try to undo. */
2375 			goto undo_insert_range;
2376 		}
2377 	}
2378 
2379 	/*
2380 	 * Update primary attribute segment.
2381 	 */
2382 	if (vbo <= ni->i_valid)
2383 		ni->i_valid += bytes;
2384 
2385 	attr_b->nres.data_size = le64_to_cpu(data_size + bytes);
2386 	attr_b->nres.alloc_size = le64_to_cpu(alloc_size + bytes);
2387 
2388 	/* ni->valid may be not equal valid_size (temporary). */
2389 	if (ni->i_valid > data_size + bytes)
2390 		attr_b->nres.valid_size = attr_b->nres.data_size;
2391 	else
2392 		attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
2393 	mi_b->dirty = true;
2394 
2395 done:
2396 	ni->vfs_inode.i_size += bytes;
2397 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2398 	mark_inode_dirty(&ni->vfs_inode);
2399 
2400 out:
2401 	run_truncate(run, 0); /* clear cached values. */
2402 
2403 	up_write(&ni->file.run_lock);
2404 
2405 	return err;
2406 
2407 bad_inode:
2408 	_ntfs_bad_inode(&ni->vfs_inode);
2409 	goto out;
2410 
2411 undo_insert_range:
2412 	svcn = le64_to_cpu(attr_b->nres.svcn);
2413 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2414 
2415 	if (svcn <= vcn && vcn < evcn1) {
2416 		attr = attr_b;
2417 		le = le_b;
2418 		mi = mi_b;
2419 	} else if (!le_b) {
2420 		goto bad_inode;
2421 	} else {
2422 		le = le_b;
2423 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2424 				    &mi);
2425 		if (!attr) {
2426 			goto bad_inode;
2427 		}
2428 
2429 		svcn = le64_to_cpu(attr->nres.svcn);
2430 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2431 	}
2432 
2433 	if (attr_load_runs(attr, ni, run, NULL))
2434 		goto bad_inode;
2435 
2436 	if (!run_collapse_range(run, vcn, len))
2437 		goto bad_inode;
2438 
2439 	if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
2440 		goto bad_inode;
2441 
2442 	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2443 	       attr->type == ATTR_DATA && !attr->name_len) {
2444 		le64_sub_cpu(&attr->nres.svcn, len);
2445 		le64_sub_cpu(&attr->nres.evcn, len);
2446 		if (le) {
2447 			le->vcn = attr->nres.svcn;
2448 			ni->attr_list.dirty = true;
2449 		}
2450 		mi->dirty = true;
2451 	}
2452 
2453 	goto out;
2454 }
2455