xref: /linux/fs/ntfs3/run.c (revision f8d87ed9f0d546ac5b05e8e7d2b148d4b77599fa)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  * TODO: try to use extents tree (instead of array)
7  */
8 
9 #include <linux/blkdev.h>
10 #include <linux/buffer_head.h>
11 #include <linux/fs.h>
12 #include <linux/nls.h>
13 
14 #include "debug.h"
15 #include "ntfs.h"
16 #include "ntfs_fs.h"
17 
18 /* runs_tree is a continues memory. Try to avoid big size  */
19 #define NTFS3_RUN_MAX_BYTES 0x10000
20 
21 struct ntfs_run {
22 	CLST vcn; /* virtual cluster number */
23 	CLST len; /* length in clusters */
24 	CLST lcn; /* logical cluster number */
25 };
26 
27 /*
28  * run_lookup
29  *
30  * Lookup the index of a MCB entry that is first <= vcn.
31  * case of success it will return non-zero value and set
32  * 'index' parameter to index of entry been found.
33  * case of entry missing from list 'index' will be set to
34  * point to insertion position for the entry question.
35  */
36 bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
37 {
38 	size_t min_idx, max_idx, mid_idx;
39 	struct ntfs_run *r;
40 
41 	if (!run->count) {
42 		*index = 0;
43 		return false;
44 	}
45 
46 	min_idx = 0;
47 	max_idx = run->count - 1;
48 
49 	/* Check boundary cases specially, 'cause they cover the often requests */
50 	r = run->runs;
51 	if (vcn < r->vcn) {
52 		*index = 0;
53 		return false;
54 	}
55 
56 	if (vcn < r->vcn + r->len) {
57 		*index = 0;
58 		return true;
59 	}
60 
61 	r += max_idx;
62 	if (vcn >= r->vcn + r->len) {
63 		*index = run->count;
64 		return false;
65 	}
66 
67 	if (vcn >= r->vcn) {
68 		*index = max_idx;
69 		return true;
70 	}
71 
72 	do {
73 		mid_idx = min_idx + ((max_idx - min_idx) >> 1);
74 		r = run->runs + mid_idx;
75 
76 		if (vcn < r->vcn) {
77 			max_idx = mid_idx - 1;
78 			if (!mid_idx)
79 				break;
80 		} else if (vcn >= r->vcn + r->len) {
81 			min_idx = mid_idx + 1;
82 		} else {
83 			*index = mid_idx;
84 			return true;
85 		}
86 	} while (min_idx <= max_idx);
87 
88 	*index = max_idx + 1;
89 	return false;
90 }
91 
92 /*
93  * run_consolidate
94  *
95  * consolidate runs starting from a given one.
96  */
97 static void run_consolidate(struct runs_tree *run, size_t index)
98 {
99 	size_t i;
100 	struct ntfs_run *r = run->runs + index;
101 
102 	while (index + 1 < run->count) {
103 		/*
104 		 * I should merge current run with next
105 		 * if start of the next run lies inside one being tested.
106 		 */
107 		struct ntfs_run *n = r + 1;
108 		CLST end = r->vcn + r->len;
109 		CLST dl;
110 
111 		/* Stop if runs are not aligned one to another. */
112 		if (n->vcn > end)
113 			break;
114 
115 		dl = end - n->vcn;
116 
117 		/*
118 		 * If range at index overlaps with next one
119 		 * then I will either adjust it's start position
120 		 * or (if completely matches) dust remove one from the list.
121 		 */
122 		if (dl > 0) {
123 			if (n->len <= dl)
124 				goto remove_next_range;
125 
126 			n->len -= dl;
127 			n->vcn += dl;
128 			if (n->lcn != SPARSE_LCN)
129 				n->lcn += dl;
130 			dl = 0;
131 		}
132 
133 		/*
134 		 * Stop if sparse mode does not match
135 		 * both current and next runs.
136 		 */
137 		if ((n->lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) {
138 			index += 1;
139 			r = n;
140 			continue;
141 		}
142 
143 		/*
144 		 * Check if volume block
145 		 * of a next run lcn does not match
146 		 * last volume block of the current run.
147 		 */
148 		if (n->lcn != SPARSE_LCN && n->lcn != r->lcn + r->len)
149 			break;
150 
151 		/*
152 		 * Next and current are siblings.
153 		 * Eat/join.
154 		 */
155 		r->len += n->len - dl;
156 
157 remove_next_range:
158 		i = run->count - (index + 1);
159 		if (i > 1)
160 			memmove(n, n + 1, sizeof(*n) * (i - 1));
161 
162 		run->count -= 1;
163 	}
164 }
165 
166 /* returns true if range [svcn - evcn] is mapped*/
167 bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn)
168 {
169 	size_t i;
170 	const struct ntfs_run *r, *end;
171 	CLST next_vcn;
172 
173 	if (!run_lookup(run, svcn, &i))
174 		return false;
175 
176 	end = run->runs + run->count;
177 	r = run->runs + i;
178 
179 	for (;;) {
180 		next_vcn = r->vcn + r->len;
181 		if (next_vcn > evcn)
182 			return true;
183 
184 		if (++r >= end)
185 			return false;
186 
187 		if (r->vcn != next_vcn)
188 			return false;
189 	}
190 }
191 
192 bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
193 		      CLST *len, size_t *index)
194 {
195 	size_t idx;
196 	CLST gap;
197 	struct ntfs_run *r;
198 
199 	/* Fail immediately if nrun was not touched yet. */
200 	if (!run->runs)
201 		return false;
202 
203 	if (!run_lookup(run, vcn, &idx))
204 		return false;
205 
206 	r = run->runs + idx;
207 
208 	if (vcn >= r->vcn + r->len)
209 		return false;
210 
211 	gap = vcn - r->vcn;
212 	if (r->len <= gap)
213 		return false;
214 
215 	*lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + gap);
216 
217 	if (len)
218 		*len = r->len - gap;
219 	if (index)
220 		*index = idx;
221 
222 	return true;
223 }
224 
225 /*
226  * run_truncate_head
227  *
228  * decommit the range before vcn
229  */
230 void run_truncate_head(struct runs_tree *run, CLST vcn)
231 {
232 	size_t index;
233 	struct ntfs_run *r;
234 
235 	if (run_lookup(run, vcn, &index)) {
236 		r = run->runs + index;
237 
238 		if (vcn > r->vcn) {
239 			CLST dlen = vcn - r->vcn;
240 
241 			r->vcn = vcn;
242 			r->len -= dlen;
243 			if (r->lcn != SPARSE_LCN)
244 				r->lcn += dlen;
245 		}
246 
247 		if (!index)
248 			return;
249 	}
250 	r = run->runs;
251 	memmove(r, r + index, sizeof(*r) * (run->count - index));
252 
253 	run->count -= index;
254 
255 	if (!run->count) {
256 		ntfs_vfree(run->runs);
257 		run->runs = NULL;
258 		run->allocated = 0;
259 	}
260 }
261 
262 /*
263  * run_truncate
264  *
265  * decommit the range after vcn
266  */
267 void run_truncate(struct runs_tree *run, CLST vcn)
268 {
269 	size_t index;
270 
271 	/*
272 	 * If I hit the range then
273 	 * I have to truncate one.
274 	 * If range to be truncated is becoming empty
275 	 * then it will entirely be removed.
276 	 */
277 	if (run_lookup(run, vcn, &index)) {
278 		struct ntfs_run *r = run->runs + index;
279 
280 		r->len = vcn - r->vcn;
281 
282 		if (r->len > 0)
283 			index += 1;
284 	}
285 
286 	/*
287 	 * At this point 'index' is set to
288 	 * position that should be thrown away (including index itself)
289 	 * Simple one - just set the limit.
290 	 */
291 	run->count = index;
292 
293 	/* Do not reallocate array 'runs'. Only free if possible */
294 	if (!index) {
295 		ntfs_vfree(run->runs);
296 		run->runs = NULL;
297 		run->allocated = 0;
298 	}
299 }
300 
301 /* trim head and tail if necessary*/
302 void run_truncate_around(struct runs_tree *run, CLST vcn)
303 {
304 	run_truncate_head(run, vcn);
305 
306 	if (run->count >= NTFS3_RUN_MAX_BYTES / sizeof(struct ntfs_run) / 2)
307 		run_truncate(run, (run->runs + (run->count >> 1))->vcn);
308 }
309 
310 /*
311  * run_add_entry
312  *
313  * sets location to known state.
314  * run to be added may overlap with existing location.
315  * returns false if of memory
316  */
317 bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
318 		   bool is_mft)
319 {
320 	size_t used, index;
321 	struct ntfs_run *r;
322 	bool inrange;
323 	CLST tail_vcn = 0, tail_len = 0, tail_lcn = 0;
324 	bool should_add_tail = false;
325 
326 	/*
327 	 * Lookup the insertion point.
328 	 *
329 	 * Execute bsearch for the entry containing
330 	 * start position question.
331 	 */
332 	inrange = run_lookup(run, vcn, &index);
333 
334 	/*
335 	 * Shortcut here would be case of
336 	 * range not been found but one been added
337 	 * continues previous run.
338 	 * this case I can directly make use of
339 	 * existing range as my start point.
340 	 */
341 	if (!inrange && index > 0) {
342 		struct ntfs_run *t = run->runs + index - 1;
343 
344 		if (t->vcn + t->len == vcn &&
345 		    (t->lcn == SPARSE_LCN) == (lcn == SPARSE_LCN) &&
346 		    (lcn == SPARSE_LCN || lcn == t->lcn + t->len)) {
347 			inrange = true;
348 			index -= 1;
349 		}
350 	}
351 
352 	/*
353 	 * At this point 'index' either points to the range
354 	 * containing start position or to the insertion position
355 	 * for a new range.
356 	 * So first let's check if range I'm probing is here already.
357 	 */
358 	if (!inrange) {
359 requires_new_range:
360 		/*
361 		 * Range was not found.
362 		 * Insert at position 'index'
363 		 */
364 		used = run->count * sizeof(struct ntfs_run);
365 
366 		/*
367 		 * Check allocated space.
368 		 * If one is not enough to get one more entry
369 		 * then it will be reallocated
370 		 */
371 		if (run->allocated < used + sizeof(struct ntfs_run)) {
372 			size_t bytes;
373 			struct ntfs_run *new_ptr;
374 
375 			/* Use power of 2 for 'bytes'*/
376 			if (!used) {
377 				bytes = 64;
378 			} else if (used <= 16 * PAGE_SIZE) {
379 				if (is_power_of2(run->allocated))
380 					bytes = run->allocated << 1;
381 				else
382 					bytes = (size_t)1
383 						<< (2 + blksize_bits(used));
384 			} else {
385 				bytes = run->allocated + (16 * PAGE_SIZE);
386 			}
387 
388 			WARN_ON(!is_mft && bytes > NTFS3_RUN_MAX_BYTES);
389 
390 			new_ptr = ntfs_vmalloc(bytes);
391 
392 			if (!new_ptr)
393 				return false;
394 
395 			r = new_ptr + index;
396 			memcpy(new_ptr, run->runs,
397 			       index * sizeof(struct ntfs_run));
398 			memcpy(r + 1, run->runs + index,
399 			       sizeof(struct ntfs_run) * (run->count - index));
400 
401 			ntfs_vfree(run->runs);
402 			run->runs = new_ptr;
403 			run->allocated = bytes;
404 
405 		} else {
406 			size_t i = run->count - index;
407 
408 			r = run->runs + index;
409 
410 			/* memmove appears to be a bottle neck here... */
411 			if (i > 0)
412 				memmove(r + 1, r, sizeof(struct ntfs_run) * i);
413 		}
414 
415 		r->vcn = vcn;
416 		r->lcn = lcn;
417 		r->len = len;
418 		run->count += 1;
419 	} else {
420 		r = run->runs + index;
421 
422 		/*
423 		 * If one of ranges was not allocated
424 		 * then I have to split location I just matched.
425 		 * and insert current one
426 		 * a common case this requires tail to be reinserted
427 		 * a recursive call.
428 		 */
429 		if (((lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) ||
430 		    (lcn != SPARSE_LCN && lcn != r->lcn + (vcn - r->vcn))) {
431 			CLST to_eat = vcn - r->vcn;
432 			CLST Tovcn = to_eat + len;
433 
434 			should_add_tail = Tovcn < r->len;
435 
436 			if (should_add_tail) {
437 				tail_lcn = r->lcn == SPARSE_LCN
438 						   ? SPARSE_LCN
439 						   : (r->lcn + Tovcn);
440 				tail_vcn = r->vcn + Tovcn;
441 				tail_len = r->len - Tovcn;
442 			}
443 
444 			if (to_eat > 0) {
445 				r->len = to_eat;
446 				inrange = false;
447 				index += 1;
448 				goto requires_new_range;
449 			}
450 
451 			/* lcn should match one I'm going to add. */
452 			r->lcn = lcn;
453 		}
454 
455 		/*
456 		 * If existing range fits then I'm done.
457 		 * Otherwise extend found one and fall back to range jocode.
458 		 */
459 		if (r->vcn + r->len < vcn + len)
460 			r->len += len - ((r->vcn + r->len) - vcn);
461 	}
462 
463 	/*
464 	 * And normalize it starting from insertion point.
465 	 * It's possible that no insertion needed case if
466 	 * start point lies within the range of an entry
467 	 * that 'index' points to.
468 	 */
469 	if (inrange && index > 0)
470 		index -= 1;
471 	run_consolidate(run, index);
472 	run_consolidate(run, index + 1);
473 
474 	/*
475 	 * a special case
476 	 * I have to add extra range a tail.
477 	 */
478 	if (should_add_tail &&
479 	    !run_add_entry(run, tail_vcn, tail_lcn, tail_len, is_mft))
480 		return false;
481 
482 	return true;
483 }
484 
485 /*helper for attr_collapse_range, which is helper for fallocate(collapse_range)*/
486 bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
487 {
488 	size_t index, eat;
489 	struct ntfs_run *r, *e, *eat_start, *eat_end;
490 	CLST end;
491 
492 	if (WARN_ON(!run_lookup(run, vcn, &index)))
493 		return true; /* should never be here */
494 
495 	e = run->runs + run->count;
496 	r = run->runs + index;
497 	end = vcn + len;
498 
499 	if (vcn > r->vcn) {
500 		if (r->vcn + r->len <= end) {
501 			/* collapse tail of run */
502 			r->len = vcn - r->vcn;
503 		} else if (r->lcn == SPARSE_LCN) {
504 			/* collapse a middle part of sparsed run */
505 			r->len -= len;
506 		} else {
507 			/* collapse a middle part of normal run, split */
508 			if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
509 				return false;
510 			return run_collapse_range(run, vcn, len);
511 		}
512 
513 		r += 1;
514 	}
515 
516 	eat_start = r;
517 	eat_end = r;
518 
519 	for (; r < e; r++) {
520 		CLST d;
521 
522 		if (r->vcn >= end) {
523 			r->vcn -= len;
524 			continue;
525 		}
526 
527 		if (r->vcn + r->len <= end) {
528 			/* eat this run */
529 			eat_end = r + 1;
530 			continue;
531 		}
532 
533 		d = end - r->vcn;
534 		if (r->lcn != SPARSE_LCN)
535 			r->lcn += d;
536 		r->len -= d;
537 		r->vcn -= len - d;
538 	}
539 
540 	eat = eat_end - eat_start;
541 	memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r));
542 	run->count -= eat;
543 
544 	return true;
545 }
546 
547 /*
548  * run_get_entry
549  *
550  * returns index-th mapped region
551  */
552 bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
553 		   CLST *lcn, CLST *len)
554 {
555 	const struct ntfs_run *r;
556 
557 	if (index >= run->count)
558 		return false;
559 
560 	r = run->runs + index;
561 
562 	if (!r->len)
563 		return false;
564 
565 	if (vcn)
566 		*vcn = r->vcn;
567 	if (lcn)
568 		*lcn = r->lcn;
569 	if (len)
570 		*len = r->len;
571 	return true;
572 }
573 
574 /*
575  * run_packed_size
576  *
577  * calculates the size of packed int64
578  */
579 #ifdef __BIG_ENDIAN
580 static inline int run_packed_size(const s64 n)
581 {
582 	const u8 *p = (const u8 *)&n + sizeof(n) - 1;
583 
584 	if (n >= 0) {
585 		if (p[-7] || p[-6] || p[-5] || p[-4])
586 			p -= 4;
587 		if (p[-3] || p[-2])
588 			p -= 2;
589 		if (p[-1])
590 			p -= 1;
591 		if (p[0] & 0x80)
592 			p -= 1;
593 	} else {
594 		if (p[-7] != 0xff || p[-6] != 0xff || p[-5] != 0xff ||
595 		    p[-4] != 0xff)
596 			p -= 4;
597 		if (p[-3] != 0xff || p[-2] != 0xff)
598 			p -= 2;
599 		if (p[-1] != 0xff)
600 			p -= 1;
601 		if (!(p[0] & 0x80))
602 			p -= 1;
603 	}
604 	return (const u8 *)&n + sizeof(n) - p;
605 }
606 
607 /* full trusted function. It does not check 'size' for errors */
608 static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
609 {
610 	const u8 *p = (u8 *)&v;
611 
612 	switch (size) {
613 	case 8:
614 		run_buf[7] = p[0];
615 		fallthrough;
616 	case 7:
617 		run_buf[6] = p[1];
618 		fallthrough;
619 	case 6:
620 		run_buf[5] = p[2];
621 		fallthrough;
622 	case 5:
623 		run_buf[4] = p[3];
624 		fallthrough;
625 	case 4:
626 		run_buf[3] = p[4];
627 		fallthrough;
628 	case 3:
629 		run_buf[2] = p[5];
630 		fallthrough;
631 	case 2:
632 		run_buf[1] = p[6];
633 		fallthrough;
634 	case 1:
635 		run_buf[0] = p[7];
636 	}
637 }
638 
639 /* full trusted function. It does not check 'size' for errors */
640 static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
641 {
642 	u8 *p = (u8 *)&v;
643 
644 	switch (size) {
645 	case 8:
646 		p[0] = run_buf[7];
647 		fallthrough;
648 	case 7:
649 		p[1] = run_buf[6];
650 		fallthrough;
651 	case 6:
652 		p[2] = run_buf[5];
653 		fallthrough;
654 	case 5:
655 		p[3] = run_buf[4];
656 		fallthrough;
657 	case 4:
658 		p[4] = run_buf[3];
659 		fallthrough;
660 	case 3:
661 		p[5] = run_buf[2];
662 		fallthrough;
663 	case 2:
664 		p[6] = run_buf[1];
665 		fallthrough;
666 	case 1:
667 		p[7] = run_buf[0];
668 	}
669 	return v;
670 }
671 
672 #else
673 
674 static inline int run_packed_size(const s64 n)
675 {
676 	const u8 *p = (const u8 *)&n;
677 
678 	if (n >= 0) {
679 		if (p[7] || p[6] || p[5] || p[4])
680 			p += 4;
681 		if (p[3] || p[2])
682 			p += 2;
683 		if (p[1])
684 			p += 1;
685 		if (p[0] & 0x80)
686 			p += 1;
687 	} else {
688 		if (p[7] != 0xff || p[6] != 0xff || p[5] != 0xff ||
689 		    p[4] != 0xff)
690 			p += 4;
691 		if (p[3] != 0xff || p[2] != 0xff)
692 			p += 2;
693 		if (p[1] != 0xff)
694 			p += 1;
695 		if (!(p[0] & 0x80))
696 			p += 1;
697 	}
698 
699 	return 1 + p - (const u8 *)&n;
700 }
701 
702 /* full trusted function. It does not check 'size' for errors */
703 static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
704 {
705 	const u8 *p = (u8 *)&v;
706 
707 	/* memcpy( run_buf, &v, size); is it faster? */
708 	switch (size) {
709 	case 8:
710 		run_buf[7] = p[7];
711 		fallthrough;
712 	case 7:
713 		run_buf[6] = p[6];
714 		fallthrough;
715 	case 6:
716 		run_buf[5] = p[5];
717 		fallthrough;
718 	case 5:
719 		run_buf[4] = p[4];
720 		fallthrough;
721 	case 4:
722 		run_buf[3] = p[3];
723 		fallthrough;
724 	case 3:
725 		run_buf[2] = p[2];
726 		fallthrough;
727 	case 2:
728 		run_buf[1] = p[1];
729 		fallthrough;
730 	case 1:
731 		run_buf[0] = p[0];
732 	}
733 }
734 
735 /* full trusted function. It does not check 'size' for errors */
736 static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
737 {
738 	u8 *p = (u8 *)&v;
739 
740 	/* memcpy( &v, run_buf, size); is it faster? */
741 	switch (size) {
742 	case 8:
743 		p[7] = run_buf[7];
744 		fallthrough;
745 	case 7:
746 		p[6] = run_buf[6];
747 		fallthrough;
748 	case 6:
749 		p[5] = run_buf[5];
750 		fallthrough;
751 	case 5:
752 		p[4] = run_buf[4];
753 		fallthrough;
754 	case 4:
755 		p[3] = run_buf[3];
756 		fallthrough;
757 	case 3:
758 		p[2] = run_buf[2];
759 		fallthrough;
760 	case 2:
761 		p[1] = run_buf[1];
762 		fallthrough;
763 	case 1:
764 		p[0] = run_buf[0];
765 	}
766 	return v;
767 }
768 #endif
769 
770 /*
771  * run_pack
772  *
773  * packs runs into buffer
774  * packed_vcns - how much runs we have packed
775  * packed_size - how much bytes we have used run_buf
776  */
777 int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
778 	     u32 run_buf_size, CLST *packed_vcns)
779 {
780 	CLST next_vcn, vcn, lcn;
781 	CLST prev_lcn = 0;
782 	CLST evcn1 = svcn + len;
783 	int packed_size = 0;
784 	size_t i;
785 	bool ok;
786 	s64 dlcn;
787 	int offset_size, size_size, tmp;
788 
789 	next_vcn = vcn = svcn;
790 
791 	*packed_vcns = 0;
792 
793 	if (!len)
794 		goto out;
795 
796 	ok = run_lookup_entry(run, vcn, &lcn, &len, &i);
797 
798 	if (!ok)
799 		goto error;
800 
801 	if (next_vcn != vcn)
802 		goto error;
803 
804 	for (;;) {
805 		next_vcn = vcn + len;
806 		if (next_vcn > evcn1)
807 			len = evcn1 - vcn;
808 
809 		/* how much bytes required to pack len */
810 		size_size = run_packed_size(len);
811 
812 		/* offset_size - how much bytes is packed dlcn */
813 		if (lcn == SPARSE_LCN) {
814 			offset_size = 0;
815 			dlcn = 0;
816 		} else {
817 			/* NOTE: lcn can be less than prev_lcn! */
818 			dlcn = (s64)lcn - prev_lcn;
819 			offset_size = run_packed_size(dlcn);
820 			prev_lcn = lcn;
821 		}
822 
823 		tmp = run_buf_size - packed_size - 2 - offset_size;
824 		if (tmp <= 0)
825 			goto out;
826 
827 		/* can we store this entire run */
828 		if (tmp < size_size)
829 			goto out;
830 
831 		if (run_buf) {
832 			/* pack run header */
833 			run_buf[0] = ((u8)(size_size | (offset_size << 4)));
834 			run_buf += 1;
835 
836 			/* Pack the length of run */
837 			run_pack_s64(run_buf, size_size, len);
838 
839 			run_buf += size_size;
840 			/* Pack the offset from previous lcn */
841 			run_pack_s64(run_buf, offset_size, dlcn);
842 			run_buf += offset_size;
843 		}
844 
845 		packed_size += 1 + offset_size + size_size;
846 		*packed_vcns += len;
847 
848 		if (packed_size + 1 >= run_buf_size || next_vcn >= evcn1)
849 			goto out;
850 
851 		ok = run_get_entry(run, ++i, &vcn, &lcn, &len);
852 		if (!ok)
853 			goto error;
854 
855 		if (next_vcn != vcn)
856 			goto error;
857 	}
858 
859 out:
860 	/* Store last zero */
861 	if (run_buf)
862 		run_buf[0] = 0;
863 
864 	return packed_size + 1;
865 
866 error:
867 	return -EOPNOTSUPP;
868 }
869 
870 /*
871  * run_unpack
872  *
873  * unpacks packed runs from "run_buf"
874  * returns error, if negative, or real used bytes
875  */
876 int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
877 	       CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
878 	       u32 run_buf_size)
879 {
880 	u64 prev_lcn, vcn64, lcn, next_vcn;
881 	const u8 *run_last, *run_0;
882 	bool is_mft = ino == MFT_REC_MFT;
883 
884 	/* Check for empty */
885 	if (evcn + 1 == svcn)
886 		return 0;
887 
888 	if (evcn < svcn)
889 		return -EINVAL;
890 
891 	run_0 = run_buf;
892 	run_last = run_buf + run_buf_size;
893 	prev_lcn = 0;
894 	vcn64 = svcn;
895 
896 	/* Read all runs the chain */
897 	/* size_size - how much bytes is packed len */
898 	while (run_buf < run_last) {
899 		/* size_size - how much bytes is packed len */
900 		u8 size_size = *run_buf & 0xF;
901 		/* offset_size - how much bytes is packed dlcn */
902 		u8 offset_size = *run_buf++ >> 4;
903 		u64 len;
904 
905 		if (!size_size)
906 			break;
907 
908 		/*
909 		 * Unpack runs.
910 		 * NOTE: runs are stored little endian order
911 		 * "len" is unsigned value, "dlcn" is signed
912 		 * Large positive number requires to store 5 bytes
913 		 * e.g.: 05 FF 7E FF FF 00 00 00
914 		 */
915 		if (size_size > 8)
916 			return -EINVAL;
917 
918 		len = run_unpack_s64(run_buf, size_size, 0);
919 		/* skip size_size */
920 		run_buf += size_size;
921 
922 		if (!len)
923 			return -EINVAL;
924 
925 		if (!offset_size)
926 			lcn = SPARSE_LCN64;
927 		else if (offset_size <= 8) {
928 			s64 dlcn;
929 
930 			/* initial value of dlcn is -1 or 0 */
931 			dlcn = (run_buf[offset_size - 1] & 0x80) ? (s64)-1 : 0;
932 			dlcn = run_unpack_s64(run_buf, offset_size, dlcn);
933 			/* skip offset_size */
934 			run_buf += offset_size;
935 
936 			if (!dlcn)
937 				return -EINVAL;
938 			lcn = prev_lcn + dlcn;
939 			prev_lcn = lcn;
940 		} else
941 			return -EINVAL;
942 
943 		next_vcn = vcn64 + len;
944 		/* check boundary */
945 		if (next_vcn > evcn + 1)
946 			return -EINVAL;
947 
948 #ifndef CONFIG_NTFS3_64BIT_CLUSTER
949 		if (next_vcn > 0x100000000ull || (lcn + len) > 0x100000000ull) {
950 			ntfs_err(
951 				sbi->sb,
952 				"This driver is compiled without CONFIG_NTFS3_64BIT_CLUSTER (like windows driver).\n"
953 				"Volume contains 64 bits run: vcn %llx, lcn %llx, len %llx.\n"
954 				"Activate CONFIG_NTFS3_64BIT_CLUSTER to process this case",
955 				vcn64, lcn, len);
956 			return -EOPNOTSUPP;
957 		}
958 #endif
959 		if (lcn != SPARSE_LCN64 && lcn + len > sbi->used.bitmap.nbits) {
960 			/* lcn range is out of volume */
961 			return -EINVAL;
962 		}
963 
964 		if (!run)
965 			; /* called from check_attr(fslog.c) to check run */
966 		else if (run == RUN_DEALLOCATE) {
967 			/* called from ni_delete_all to free clusters without storing in run */
968 			if (lcn != SPARSE_LCN64)
969 				mark_as_free_ex(sbi, lcn, len, true);
970 		} else if (vcn64 >= vcn) {
971 			if (!run_add_entry(run, vcn64, lcn, len, is_mft))
972 				return -ENOMEM;
973 		} else if (next_vcn > vcn) {
974 			u64 dlen = vcn - vcn64;
975 
976 			if (!run_add_entry(run, vcn, lcn + dlen, len - dlen,
977 					   is_mft))
978 				return -ENOMEM;
979 		}
980 
981 		vcn64 = next_vcn;
982 	}
983 
984 	if (vcn64 != evcn + 1) {
985 		/* not expected length of unpacked runs */
986 		return -EINVAL;
987 	}
988 
989 	return run_buf - run_0;
990 }
991 
992 #ifdef NTFS3_CHECK_FREE_CLST
993 /*
994  * run_unpack_ex
995  *
996  * unpacks packed runs from "run_buf"
997  * checks unpacked runs to be used in bitmap
998  * returns error, if negative, or real used bytes
999  */
1000 int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
1001 		  CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
1002 		  u32 run_buf_size)
1003 {
1004 	int ret, err;
1005 	CLST next_vcn, lcn, len;
1006 	size_t index;
1007 	bool ok;
1008 	struct wnd_bitmap *wnd;
1009 
1010 	ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size);
1011 	if (ret <= 0)
1012 		return ret;
1013 
1014 	if (!sbi->used.bitmap.sb || !run || run == RUN_DEALLOCATE)
1015 		return ret;
1016 
1017 	if (ino == MFT_REC_BADCLUST)
1018 		return ret;
1019 
1020 	next_vcn = vcn = svcn;
1021 	wnd = &sbi->used.bitmap;
1022 
1023 	for (ok = run_lookup_entry(run, vcn, &lcn, &len, &index);
1024 	     next_vcn <= evcn;
1025 	     ok = run_get_entry(run, ++index, &vcn, &lcn, &len)) {
1026 		if (!ok || next_vcn != vcn)
1027 			return -EINVAL;
1028 
1029 		next_vcn = vcn + len;
1030 
1031 		if (lcn == SPARSE_LCN)
1032 			continue;
1033 
1034 		if (sbi->flags & NTFS_FLAGS_NEED_REPLAY)
1035 			continue;
1036 
1037 		down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
1038 		/* Check for free blocks */
1039 		ok = wnd_is_used(wnd, lcn, len);
1040 		up_read(&wnd->rw_lock);
1041 		if (ok)
1042 			continue;
1043 
1044 		/* Looks like volume is corrupted */
1045 		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
1046 
1047 		if (down_write_trylock(&wnd->rw_lock)) {
1048 			/* mark all zero bits as used in range [lcn, lcn+len) */
1049 			CLST i, lcn_f = 0, len_f = 0;
1050 
1051 			err = 0;
1052 			for (i = 0; i < len; i++) {
1053 				if (wnd_is_free(wnd, lcn + i, 1)) {
1054 					if (!len_f)
1055 						lcn_f = lcn + i;
1056 					len_f += 1;
1057 				} else if (len_f) {
1058 					err = wnd_set_used(wnd, lcn_f, len_f);
1059 					len_f = 0;
1060 					if (err)
1061 						break;
1062 				}
1063 			}
1064 
1065 			if (len_f)
1066 				err = wnd_set_used(wnd, lcn_f, len_f);
1067 
1068 			up_write(&wnd->rw_lock);
1069 			if (err)
1070 				return err;
1071 		}
1072 	}
1073 
1074 	return ret;
1075 }
1076 #endif
1077 
1078 /*
1079  * run_get_highest_vcn
1080  *
1081  * returns the highest vcn from a mapping pairs array
1082  * it used while replaying log file
1083  */
1084 int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn)
1085 {
1086 	u64 vcn64 = vcn;
1087 	u8 size_size;
1088 
1089 	while ((size_size = *run_buf & 0xF)) {
1090 		u8 offset_size = *run_buf++ >> 4;
1091 		u64 len;
1092 
1093 		if (size_size > 8 || offset_size > 8)
1094 			return -EINVAL;
1095 
1096 		len = run_unpack_s64(run_buf, size_size, 0);
1097 		if (!len)
1098 			return -EINVAL;
1099 
1100 		run_buf += size_size + offset_size;
1101 		vcn64 += len;
1102 
1103 #ifndef CONFIG_NTFS3_64BIT_CLUSTER
1104 		if (vcn64 > 0x100000000ull)
1105 			return -EINVAL;
1106 #endif
1107 	}
1108 
1109 	*highest_vcn = vcn64 - 1;
1110 	return 0;
1111 }
1112