xref: /linux/fs/ntfs/runlist.c (revision 06bc7ff0a1e0f2b0102e1314e3527a7ec0997851)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * NTFS runlist handling code.
4  *
5  * Copyright (c) 2001-2007 Anton Altaparmakov
6  * Copyright (c) 2002-2005 Richard Russon
7  * Copyright (c) 2025 LG Electronics Co., Ltd.
8  *
9  * Part of this file is based on code from the NTFS-3G.
10  * and is copyrighted by the respective authors below:
11  * Copyright (c) 2002-2005 Anton Altaparmakov
12  * Copyright (c) 2002-2005 Richard Russon
13  * Copyright (c) 2002-2008 Szabolcs Szakacsits
14  * Copyright (c) 2004 Yura Pakhuchiy
15  * Copyright (c) 2007-2022 Jean-Pierre Andre
16  */
17 
18 #include <linux/overflow.h>
19 
20 #include "ntfs.h"
21 #include "attrib.h"
22 
23 /*
24  * ntfs_rl_mm - runlist memmove
25  * @base: base runlist array
26  * @dst: destination index in @base
27  * @src: source index in @base
28  * @size: number of elements to move
29  *
30  * It is up to the caller to serialize access to the runlist @base.
31  */
ntfs_rl_mm(struct runlist_element * base,int dst,int src,int size)32 static inline void ntfs_rl_mm(struct runlist_element *base, int dst, int src, int size)
33 {
34 	if (likely((dst != src) && (size > 0)))
35 		memmove(base + dst, base + src, size * sizeof(*base));
36 }
37 
38 /*
39  * ntfs_rl_mc - runlist memory copy
40  * @dstbase: destination runlist array
41  * @dst: destination index in @dstbase
42  * @srcbase: source runlist array
43  * @src: source index in @srcbase
44  * @size: number of elements to copy
45  *
46  * It is up to the caller to serialize access to the runlists @dstbase and
47  * @srcbase.
48  */
ntfs_rl_mc(struct runlist_element * dstbase,int dst,struct runlist_element * srcbase,int src,int size)49 static inline void ntfs_rl_mc(struct runlist_element *dstbase, int dst,
50 		struct runlist_element *srcbase, int src, int size)
51 {
52 	if (likely(size > 0))
53 		memcpy(dstbase + dst, srcbase + src, size * sizeof(*dstbase));
54 }
55 
56 /*
57  * ntfs_rl_realloc - Reallocate memory for runlists
58  * @rl:		original runlist
59  * @old_size:	number of runlist elements in the original runlist @rl
60  * @new_size:	number of runlist elements we need space for
61  *
62  * As the runlists grow, more memory will be required.  To prevent the
63  * kernel having to allocate and reallocate large numbers of small bits of
64  * memory, this function returns an entire page of memory.
65  *
66  * It is up to the caller to serialize access to the runlist @rl.
67  *
68  * N.B.  If the new allocation doesn't require a different number of pages in
69  *       memory, the function will return the original pointer.
70  *
71  * On success, return a pointer to the newly allocated, or recycled, memory.
72  * On error, return -errno.
73  */
ntfs_rl_realloc(struct runlist_element * rl,int old_size,int new_size)74 struct runlist_element *ntfs_rl_realloc(struct runlist_element *rl,
75 		int old_size, int new_size)
76 {
77 	struct runlist_element *new_rl;
78 
79 	old_size = old_size * sizeof(*rl);
80 	new_size = new_size * sizeof(*rl);
81 	if (old_size == new_size)
82 		return rl;
83 
84 	new_rl = kvzalloc(new_size, GFP_NOFS);
85 	if (unlikely(!new_rl))
86 		return ERR_PTR(-ENOMEM);
87 
88 	if (likely(rl != NULL)) {
89 		if (unlikely(old_size > new_size))
90 			old_size = new_size;
91 		memcpy(new_rl, rl, old_size);
92 		kvfree(rl);
93 	}
94 	return new_rl;
95 }
96 
97 /*
98  * ntfs_rl_realloc_nofail - Reallocate memory for runlists
99  * @rl:		original runlist
100  * @old_size:	number of runlist elements in the original runlist @rl
101  * @new_size:	number of runlist elements we need space for
102  *
103  * As the runlists grow, more memory will be required.  To prevent the
104  * kernel having to allocate and reallocate large numbers of small bits of
105  * memory, this function returns an entire page of memory.
106  *
107  * This function guarantees that the allocation will succeed.  It will sleep
108  * for as long as it takes to complete the allocation.
109  *
110  * It is up to the caller to serialize access to the runlist @rl.
111  *
112  * N.B.  If the new allocation doesn't require a different number of pages in
113  *       memory, the function will return the original pointer.
114  *
115  * On success, return a pointer to the newly allocated, or recycled, memory.
116  * On error, return -errno.
117  */
ntfs_rl_realloc_nofail(struct runlist_element * rl,int old_size,int new_size)118 static inline struct runlist_element *ntfs_rl_realloc_nofail(struct runlist_element *rl,
119 		int old_size, int new_size)
120 {
121 	struct runlist_element *new_rl;
122 
123 	old_size = old_size * sizeof(*rl);
124 	new_size = new_size * sizeof(*rl);
125 	if (old_size == new_size)
126 		return rl;
127 
128 	new_rl = kvmalloc(new_size, GFP_NOFS | __GFP_NOFAIL);
129 	if (likely(rl != NULL)) {
130 		if (unlikely(old_size > new_size))
131 			old_size = new_size;
132 		memcpy(new_rl, rl, old_size);
133 		kvfree(rl);
134 	}
135 	return new_rl;
136 }
137 
138 /*
139  * ntfs_are_rl_mergeable - test if two runlists can be joined together
140  * @dst:	original runlist
141  * @src:	new runlist to test for mergeability with @dst
142  *
143  * Test if two runlists can be joined together. For this, their VCNs and LCNs
144  * must be adjacent.
145  *
146  * It is up to the caller to serialize access to the runlists @dst and @src.
147  *
148  * Return: true   Success, the runlists can be merged.
149  *	   false  Failure, the runlists cannot be merged.
150  */
ntfs_are_rl_mergeable(struct runlist_element * dst,struct runlist_element * src)151 static inline bool ntfs_are_rl_mergeable(struct runlist_element *dst,
152 		struct runlist_element *src)
153 {
154 	/* We can merge unmapped regions even if they are misaligned. */
155 	if ((dst->lcn == LCN_RL_NOT_MAPPED) && (src->lcn == LCN_RL_NOT_MAPPED))
156 		return true;
157 	/* If the runs are misaligned, we cannot merge them. */
158 	if ((dst->vcn + dst->length) != src->vcn)
159 		return false;
160 	/* If both runs are non-sparse and contiguous, we can merge them. */
161 	if ((dst->lcn >= 0) && (src->lcn >= 0) &&
162 			((dst->lcn + dst->length) == src->lcn))
163 		return true;
164 	/* If we are merging two holes, we can merge them. */
165 	if ((dst->lcn == LCN_HOLE) && (src->lcn == LCN_HOLE))
166 		return true;
167 	/* If we are merging two dealloc, we can merge them. */
168 	if ((dst->lcn == LCN_DELALLOC) && (src->lcn == LCN_DELALLOC))
169 		return true;
170 	/* Cannot merge. */
171 	return false;
172 }
173 
174 /*
175  * __ntfs_rl_merge - merge two runlists without testing if they can be merged
176  * @dst:	original, destination runlist
177  * @src:	new runlist to merge with @dst
178  *
179  * Merge the two runlists, writing into the destination runlist @dst. The
180  * caller must make sure the runlists can be merged or this will corrupt the
181  * destination runlist.
182  *
183  * It is up to the caller to serialize access to the runlists @dst and @src.
184  */
__ntfs_rl_merge(struct runlist_element * dst,struct runlist_element * src)185 static inline void __ntfs_rl_merge(struct runlist_element *dst, struct runlist_element *src)
186 {
187 	dst->length += src->length;
188 }
189 
190 /*
191  * ntfs_rl_append - append a runlist after a given element
192  * @dst: destination runlist to append to
193  * @dsize: number of elements in @dst
194  * @src: source runlist to append from
195  * @ssize: number of elements in @src
196  * @loc: index in @dst after which to append @src
197  * @new_size: on success, set to the new combined size
198  *
199  * Append the runlist @src after element @loc in @dst.  Merge the right end of
200  * the new runlist, if necessary. Adjust the size of the hole before the
201  * appended runlist.
202  *
203  * It is up to the caller to serialize access to the runlists @dst and @src.
204  *
205  * On success, return a pointer to the new, combined, runlist. Note, both
206  * runlists @dst and @src are deallocated before returning so you cannot use
207  * the pointers for anything any more. (Strictly speaking the returned runlist
208  * may be the same as @dst but this is irrelevant.)
209  *
210  * On error, return -errno. Both runlists are left unmodified.
211  */
ntfs_rl_append(struct runlist_element * dst,int dsize,struct runlist_element * src,int ssize,int loc,size_t * new_size)212 static inline struct runlist_element *ntfs_rl_append(struct runlist_element *dst,
213 		int dsize, struct runlist_element *src, int ssize, int loc,
214 		size_t *new_size)
215 {
216 	bool right = false;	/* Right end of @src needs merging. */
217 	int marker;		/* End of the inserted runs. */
218 
219 	/* First, check if the right hand end needs merging. */
220 	if ((loc + 1) < dsize)
221 		right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1);
222 
223 	/* Space required: @dst size + @src size, less one if we merged. */
224 	dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - right);
225 	if (IS_ERR(dst))
226 		return dst;
227 
228 	*new_size = dsize + ssize - right;
229 	/*
230 	 * We are guaranteed to succeed from here so can start modifying the
231 	 * original runlists.
232 	 */
233 
234 	/* First, merge the right hand end, if necessary. */
235 	if (right)
236 		__ntfs_rl_merge(src + ssize - 1, dst + loc + 1);
237 
238 	/* First run after the @src runs that have been inserted. */
239 	marker = loc + ssize + 1;
240 
241 	/* Move the tail of @dst out of the way, then copy in @src. */
242 	ntfs_rl_mm(dst, marker, loc + 1 + right, dsize - (loc + 1 + right));
243 	ntfs_rl_mc(dst, loc + 1, src, 0, ssize);
244 
245 	/* Adjust the size of the preceding hole. */
246 	dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn;
247 
248 	/* We may have changed the length of the file, so fix the end marker */
249 	if (dst[marker].lcn == LCN_ENOENT)
250 		dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length;
251 
252 	return dst;
253 }
254 
255 /*
256  * ntfs_rl_insert - insert a runlist into another
257  * @dst: destination runlist to insert into
258  * @dsize: number of elements in @dst
259  * @src: source runlist to insert from
260  * @ssize: number of elements in @src
261  * @loc: index in @dst at which to insert @src
262  * @new_size: on success, set to the new combined size
263  *
264  * Insert the runlist @src before element @loc in the runlist @dst. Merge the
265  * left end of the new runlist, if necessary. Adjust the size of the hole
266  * after the inserted runlist.
267  *
268  * It is up to the caller to serialize access to the runlists @dst and @src.
269  *
270  * On success, return a pointer to the new, combined, runlist. Note, both
271  * runlists @dst and @src are deallocated before returning so you cannot use
272  * the pointers for anything any more. (Strictly speaking the returned runlist
273  * may be the same as @dst but this is irrelevant.)
274  *
275  * On error, return -errno. Both runlists are left unmodified.
276  */
ntfs_rl_insert(struct runlist_element * dst,int dsize,struct runlist_element * src,int ssize,int loc,size_t * new_size)277 static inline struct runlist_element *ntfs_rl_insert(struct runlist_element *dst,
278 		int dsize, struct runlist_element *src, int ssize, int loc,
279 		size_t *new_size)
280 {
281 	bool left = false;	/* Left end of @src needs merging. */
282 	bool disc = false;	/* Discontinuity between @dst and @src. */
283 	int marker;		/* End of the inserted runs. */
284 
285 	/*
286 	 * disc => Discontinuity between the end of @dst and the start of @src.
287 	 *	   This means we might need to insert a "not mapped" run.
288 	 */
289 	if (loc == 0)
290 		disc = (src[0].vcn > 0);
291 	else {
292 		s64 merged_length;
293 
294 		left = ntfs_are_rl_mergeable(dst + loc - 1, src);
295 
296 		merged_length = dst[loc - 1].length;
297 		if (left)
298 			merged_length += src->length;
299 
300 		disc = (src[0].vcn > dst[loc - 1].vcn + merged_length);
301 	}
302 	/*
303 	 * Space required: @dst size + @src size, less one if we merged, plus
304 	 * one if there was a discontinuity.
305 	 */
306 	dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc);
307 	if (IS_ERR(dst))
308 		return dst;
309 
310 	*new_size = dsize + ssize - left + disc;
311 	/*
312 	 * We are guaranteed to succeed from here so can start modifying the
313 	 * original runlist.
314 	 */
315 	if (left)
316 		__ntfs_rl_merge(dst + loc - 1, src);
317 	/*
318 	 * First run after the @src runs that have been inserted.
319 	 * Nominally,  @marker equals @loc + @ssize, i.e. location + number of
320 	 * runs in @src.  However, if @left, then the first run in @src has
321 	 * been merged with one in @dst.  And if @disc, then @dst and @src do
322 	 * not meet and we need an extra run to fill the gap.
323 	 */
324 	marker = loc + ssize - left + disc;
325 
326 	/* Move the tail of @dst out of the way, then copy in @src. */
327 	ntfs_rl_mm(dst, marker, loc, dsize - loc);
328 	ntfs_rl_mc(dst, loc + disc, src, left, ssize - left);
329 
330 	/* Adjust the VCN of the first run after the insertion... */
331 	dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length;
332 	/* ... and the length. */
333 	if (dst[marker].lcn == LCN_HOLE || dst[marker].lcn == LCN_RL_NOT_MAPPED ||
334 	    dst[marker].lcn == LCN_DELALLOC)
335 		dst[marker].length = dst[marker + 1].vcn - dst[marker].vcn;
336 
337 	/* Writing beyond the end of the file and there is a discontinuity. */
338 	if (disc) {
339 		if (loc > 0) {
340 			dst[loc].vcn = dst[loc - 1].vcn + dst[loc - 1].length;
341 			dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn;
342 		} else {
343 			dst[loc].vcn = 0;
344 			dst[loc].length = dst[loc + 1].vcn;
345 		}
346 		dst[loc].lcn = LCN_RL_NOT_MAPPED;
347 	}
348 	return dst;
349 }
350 
351 /*
352  * ntfs_rl_replace - overwrite a runlist element with another runlist
353  * @dst: destination runlist to replace in
354  * @dsize: number of elements in @dst
355  * @src: source runlist to replace with
356  * @ssize: number of elements in @src
357  * @loc: index in @dst to replace
358  * @new_size: on success, set to the new combined size
359  *
360  * Replace the runlist element @dst at @loc with @src. Merge the left and
361  * right ends of the inserted runlist, if necessary.
362  *
363  * It is up to the caller to serialize access to the runlists @dst and @src.
364  *
365  * On success, return a pointer to the new, combined, runlist. Note, both
366  * runlists @dst and @src are deallocated before returning so you cannot use
367  * the pointers for anything any more. (Strictly speaking the returned runlist
368  * may be the same as @dst but this is irrelevant.)
369  *
370  * On error, return -errno. Both runlists are left unmodified.
371  */
ntfs_rl_replace(struct runlist_element * dst,int dsize,struct runlist_element * src,int ssize,int loc,size_t * new_size)372 static inline struct runlist_element *ntfs_rl_replace(struct runlist_element *dst,
373 		int dsize, struct runlist_element *src, int ssize, int loc,
374 		size_t *new_size)
375 {
376 	int delta;
377 	bool left = false;	/* Left end of @src needs merging. */
378 	bool right = false;	/* Right end of @src needs merging. */
379 	int tail;		/* Start of tail of @dst. */
380 	int marker;		/* End of the inserted runs. */
381 
382 	/* First, see if the left and right ends need merging. */
383 	if ((loc + 1) < dsize)
384 		right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1);
385 	if (loc > 0)
386 		left = ntfs_are_rl_mergeable(dst + loc - 1, src);
387 	/*
388 	 * Allocate some space.  We will need less if the left, right, or both
389 	 * ends get merged.  The -1 accounts for the run being replaced.
390 	 */
391 	delta = ssize - 1 - left - right;
392 	if (delta > 0) {
393 		dst = ntfs_rl_realloc(dst, dsize, dsize + delta);
394 		if (IS_ERR(dst))
395 			return dst;
396 	}
397 
398 	*new_size = dsize + delta;
399 	/*
400 	 * We are guaranteed to succeed from here so can start modifying the
401 	 * original runlists.
402 	 */
403 
404 	/* First, merge the left and right ends, if necessary. */
405 	if (right)
406 		__ntfs_rl_merge(src + ssize - 1, dst + loc + 1);
407 	if (left)
408 		__ntfs_rl_merge(dst + loc - 1, src);
409 	/*
410 	 * Offset of the tail of @dst.  This needs to be moved out of the way
411 	 * to make space for the runs to be copied from @src, i.e. the first
412 	 * run of the tail of @dst.
413 	 * Nominally, @tail equals @loc + 1, i.e. location, skipping the
414 	 * replaced run.  However, if @right, then one of @dst's runs is
415 	 * already merged into @src.
416 	 */
417 	tail = loc + right + 1;
418 	/*
419 	 * First run after the @src runs that have been inserted, i.e. where
420 	 * the tail of @dst needs to be moved to.
421 	 * Nominally, @marker equals @loc + @ssize, i.e. location + number of
422 	 * runs in @src.  However, if @left, then the first run in @src has
423 	 * been merged with one in @dst.
424 	 */
425 	marker = loc + ssize - left;
426 
427 	/* Move the tail of @dst out of the way, then copy in @src. */
428 	ntfs_rl_mm(dst, marker, tail, dsize - tail);
429 	ntfs_rl_mc(dst, loc, src, left, ssize - left);
430 
431 	/* We may have changed the length of the file, so fix the end marker. */
432 	if (dsize - tail > 0 && dst[marker].lcn == LCN_ENOENT)
433 		dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length;
434 	return dst;
435 }
436 
437 /*
438  * ntfs_rl_split - insert a runlist into the centre of a hole
439  * @dst: destination runlist with a hole
440  * @dsize: number of elements in @dst
441  * @src: source runlist to insert
442  * @ssize: number of elements in @src
443  * @loc: index in @dst of the hole to split
444  * @new_size: on success, set to the new combined size
445  *
446  * Split the runlist @dst at @loc into two and insert @new in between the two
447  * fragments. No merging of runlists is necessary. Adjust the size of the
448  * holes either side.
449  *
450  * It is up to the caller to serialize access to the runlists @dst and @src.
451  *
452  * On success, return a pointer to the new, combined, runlist. Note, both
453  * runlists @dst and @src are deallocated before returning so you cannot use
454  * the pointers for anything any more. (Strictly speaking the returned runlist
455  * may be the same as @dst but this is irrelevant.)
456  *
457  * On error, return -errno. Both runlists are left unmodified.
458  */
ntfs_rl_split(struct runlist_element * dst,int dsize,struct runlist_element * src,int ssize,int loc,size_t * new_size)459 static inline struct runlist_element *ntfs_rl_split(struct runlist_element *dst, int dsize,
460 		struct runlist_element *src, int ssize, int loc,
461 		size_t *new_size)
462 {
463 	/* Space required: @dst size + @src size + one new hole. */
464 	dst = ntfs_rl_realloc(dst, dsize, dsize + ssize + 1);
465 	if (IS_ERR(dst))
466 		return dst;
467 
468 	*new_size = dsize + ssize + 1;
469 	/*
470 	 * We are guaranteed to succeed from here so can start modifying the
471 	 * original runlists.
472 	 */
473 
474 	/* Move the tail of @dst out of the way, then copy in @src. */
475 	ntfs_rl_mm(dst, loc + 1 + ssize, loc, dsize - loc);
476 	ntfs_rl_mc(dst, loc + 1, src, 0, ssize);
477 
478 	/* Adjust the size of the holes either size of @src. */
479 	dst[loc].length		= dst[loc+1].vcn       - dst[loc].vcn;
480 	dst[loc+ssize+1].vcn    = dst[loc+ssize].vcn   + dst[loc+ssize].length;
481 	dst[loc+ssize+1].length = dst[loc+ssize+2].vcn - dst[loc+ssize+1].vcn;
482 
483 	return dst;
484 }
485 
486 /*
487  * ntfs_runlists_merge - merge two runlists into one
488  * @d_runlist: destination runlist structure to merge into
489  * @srl: source runlist to merge from
490  * @s_rl_count: number of elements in @srl (0 to auto-detect)
491  * @new_rl_count: on success, set to the new combined runlist size
492  *
493  * First we sanity check the two runlists @srl and @drl to make sure that they
494  * are sensible and can be merged. The runlist @srl must be either after the
495  * runlist @drl or completely within a hole (or unmapped region) in @drl.
496  *
497  * It is up to the caller to serialize access to the runlists @drl and @srl.
498  *
499  * Merging of runlists is necessary in two cases:
500  *   1. When attribute lists are used and a further extent is being mapped.
501  *   2. When new clusters are allocated to fill a hole or extend a file.
502  *
503  * There are four possible ways @srl can be merged. It can:
504  *	- be inserted at the beginning of a hole,
505  *	- split the hole in two and be inserted between the two fragments,
506  *	- be appended at the end of a hole, or it can
507  *	- replace the whole hole.
508  * It can also be appended to the end of the runlist, which is just a variant
509  * of the insert case.
510  *
511  * On success, return a pointer to the new, combined, runlist. Note, both
512  * runlists @drl and @srl are deallocated before returning so you cannot use
513  * the pointers for anything any more. (Strictly speaking the returned runlist
514  * may be the same as @dst but this is irrelevant.)
515  *
516  * On error, return -errno. Both runlists are left unmodified.
517  */
ntfs_runlists_merge(struct runlist * d_runlist,struct runlist_element * srl,size_t s_rl_count,size_t * new_rl_count)518 struct runlist_element *ntfs_runlists_merge(struct runlist *d_runlist,
519 				     struct runlist_element *srl, size_t s_rl_count,
520 				     size_t *new_rl_count)
521 {
522 	int di, si;		/* Current index into @[ds]rl. */
523 	int sstart;		/* First index with lcn > LCN_RL_NOT_MAPPED. */
524 	int dins;		/* Index into @drl at which to insert @srl. */
525 	int dend, send;		/* Last index into @[ds]rl. */
526 	int dfinal, sfinal;	/* The last index into @[ds]rl with lcn >= LCN_HOLE. */
527 	int marker = 0;
528 	s64 marker_vcn = 0;
529 	struct runlist_element *drl = d_runlist->rl, *rl;
530 
531 #ifdef DEBUG
532 	ntfs_debug("dst:");
533 	ntfs_debug_dump_runlist(drl);
534 	ntfs_debug("src:");
535 	ntfs_debug_dump_runlist(srl);
536 #endif
537 
538 	/* Check for silly calling... */
539 	if (unlikely(!srl))
540 		return drl;
541 	if (IS_ERR(srl) || IS_ERR(drl))
542 		return ERR_PTR(-EINVAL);
543 
544 	if (s_rl_count == 0) {
545 		for (; srl[s_rl_count].length; s_rl_count++)
546 			;
547 		s_rl_count++;
548 	}
549 
550 	/* Check for the case where the first mapping is being done now. */
551 	if (unlikely(!drl)) {
552 		drl = srl;
553 		/* Complete the source runlist if necessary. */
554 		if (unlikely(drl[0].vcn)) {
555 			/* Scan to the end of the source runlist. */
556 			drl = ntfs_rl_realloc(drl, s_rl_count, s_rl_count + 1);
557 			if (IS_ERR(drl))
558 				return drl;
559 			/* Insert start element at the front of the runlist. */
560 			ntfs_rl_mm(drl, 1, 0, s_rl_count);
561 			drl[0].vcn = 0;
562 			drl[0].lcn = LCN_RL_NOT_MAPPED;
563 			drl[0].length = drl[1].vcn;
564 			s_rl_count++;
565 		}
566 
567 		*new_rl_count = s_rl_count;
568 		goto finished;
569 	}
570 
571 	if (d_runlist->count < 1 || s_rl_count < 2)
572 		return ERR_PTR(-EINVAL);
573 
574 	si = di = 0;
575 
576 	/* Skip any unmapped start element(s) in the source runlist. */
577 	while (srl[si].length && srl[si].lcn < LCN_HOLE)
578 		si++;
579 
580 	/* Can't have an entirely unmapped source runlist. */
581 	WARN_ON(!srl[si].length);
582 
583 	/* Record the starting points. */
584 	sstart = si;
585 
586 	/*
587 	 * Skip forward in @drl until we reach the position where @srl needs to
588 	 * be inserted. If we reach the end of @drl, @srl just needs to be
589 	 * appended to @drl.
590 	 */
591 	rl = __ntfs_attr_find_vcn_nolock(d_runlist, srl[sstart].vcn);
592 	if (IS_ERR(rl))
593 		di = (int)d_runlist->count - 1;
594 	else
595 		di = (int)(rl - d_runlist->rl);
596 	dins = di;
597 
598 	/* Sanity check for illegal overlaps. */
599 	if ((drl[di].vcn == srl[si].vcn) && (drl[di].lcn >= 0) &&
600 			(srl[si].lcn >= 0)) {
601 		ntfs_error(NULL, "Run lists overlap. Cannot merge!");
602 		return ERR_PTR(-ERANGE);
603 	}
604 
605 	/* Scan to the end of both runlists in order to know their sizes. */
606 	send = (int)s_rl_count - 1;
607 	dend = (int)d_runlist->count - 1;
608 
609 	if (srl[send].lcn == LCN_ENOENT)
610 		marker_vcn = srl[marker = send].vcn;
611 
612 	/* Scan to the last element with lcn >= LCN_HOLE. */
613 	for (sfinal = send; sfinal >= 0 && srl[sfinal].lcn < LCN_HOLE; sfinal--)
614 		;
615 	for (dfinal = dend; dfinal >= 0 && drl[dfinal].lcn < LCN_HOLE; dfinal--)
616 		;
617 
618 	{
619 	bool start;
620 	bool finish;
621 	int ds = dend + 1;		/* Number of elements in drl & srl */
622 	int ss = sfinal - sstart + 1;
623 
624 	start  = ((drl[dins].lcn <  LCN_RL_NOT_MAPPED) ||    /* End of file   */
625 		  (drl[dins].vcn == srl[sstart].vcn));	     /* Start of hole */
626 	finish = ((drl[dins].lcn >= LCN_RL_NOT_MAPPED) &&    /* End of file   */
627 		 ((drl[dins].vcn + drl[dins].length) <=      /* End of hole   */
628 		  (srl[send - 1].vcn + srl[send - 1].length)));
629 
630 	/* Or we will lose an end marker. */
631 	if (finish && !drl[dins].length)
632 		ss++;
633 	if (marker && (drl[dins].vcn + drl[dins].length > srl[send - 1].vcn))
634 		finish = false;
635 
636 	if (start) {
637 		if (finish)
638 			drl = ntfs_rl_replace(drl, ds, srl + sstart, ss, dins, new_rl_count);
639 		else
640 			drl = ntfs_rl_insert(drl, ds, srl + sstart, ss, dins, new_rl_count);
641 	} else {
642 		if (finish)
643 			drl = ntfs_rl_append(drl, ds, srl + sstart, ss, dins, new_rl_count);
644 		else
645 			drl = ntfs_rl_split(drl, ds, srl + sstart, ss, dins, new_rl_count);
646 	}
647 	if (IS_ERR(drl)) {
648 		ntfs_error(NULL, "Merge failed.");
649 		return drl;
650 	}
651 	kvfree(srl);
652 	if (marker) {
653 		ntfs_debug("Triggering marker code.");
654 		for (ds = dend; drl[ds].length; ds++)
655 			;
656 		/* We only need to care if @srl ended after @drl. */
657 		if (drl[ds].vcn <= marker_vcn) {
658 			int slots = 0;
659 
660 			if (drl[ds].vcn == marker_vcn) {
661 				ntfs_debug("Old marker = 0x%llx, replacing with LCN_ENOENT.",
662 						drl[ds].lcn);
663 				drl[ds].lcn = LCN_ENOENT;
664 				goto finished;
665 			}
666 			/*
667 			 * We need to create an unmapped runlist element in
668 			 * @drl or extend an existing one before adding the
669 			 * ENOENT terminator.
670 			 */
671 			if (drl[ds].lcn == LCN_ENOENT) {
672 				ds--;
673 				slots = 1;
674 			}
675 			if (drl[ds].lcn != LCN_RL_NOT_MAPPED) {
676 				/* Add an unmapped runlist element. */
677 				if (!slots) {
678 					drl = ntfs_rl_realloc_nofail(drl, ds,
679 							ds + 2);
680 					slots = 2;
681 					*new_rl_count += 2;
682 				}
683 				ds++;
684 				/* Need to set vcn if it isn't set already. */
685 				if (slots != 1)
686 					drl[ds].vcn = drl[ds - 1].vcn +
687 							drl[ds - 1].length;
688 				drl[ds].lcn = LCN_RL_NOT_MAPPED;
689 				/* We now used up a slot. */
690 				slots--;
691 			}
692 			drl[ds].length = marker_vcn - drl[ds].vcn;
693 			/* Finally add the ENOENT terminator. */
694 			ds++;
695 			if (!slots) {
696 				drl = ntfs_rl_realloc_nofail(drl, ds, ds + 1);
697 				*new_rl_count += 1;
698 			}
699 			drl[ds].vcn = marker_vcn;
700 			drl[ds].lcn = LCN_ENOENT;
701 			drl[ds].length = (s64)0;
702 		}
703 	}
704 	}
705 
706 finished:
707 	/* The merge was completed successfully. */
708 	ntfs_debug("Merged runlist:");
709 	ntfs_debug_dump_runlist(drl);
710 	return drl;
711 }
712 
713 /*
714  * ntfs_mapping_pairs_decompress - convert mapping pairs array to runlist
715  * @vol: ntfs volume
716  * @attr: attribute record whose mapping pairs to decompress
717  * @old_runlist: optional runlist to merge the decompressed runlist into
718  * @new_rl_count: on success, set to the new runlist size
719  *
720  * It is up to the caller to serialize access to the runlist @old_rl.
721  *
722  * Decompress the attribute @attr's mapping pairs array into a runlist. On
723  * success, return the decompressed runlist.
724  *
725  * If @old_rl is not NULL, decompressed runlist is inserted into the
726  * appropriate place in @old_rl and the resultant, combined runlist is
727  * returned. The original @old_rl is deallocated.
728  *
729  * On error, return -errno. @old_rl is left unmodified in that case.
730  */
ntfs_mapping_pairs_decompress(const struct ntfs_volume * vol,const struct attr_record * attr,struct runlist * old_runlist,size_t * new_rl_count)731 struct runlist_element *ntfs_mapping_pairs_decompress(const struct ntfs_volume *vol,
732 		const struct attr_record *attr, struct runlist *old_runlist,
733 		size_t *new_rl_count)
734 {
735 	s64 vcn;		/* Current vcn. */
736 	s64 lcn;		/* Current lcn. */
737 	s64 deltaxcn;		/* Change in [vl]cn. */
738 	struct runlist_element *rl, *new_rl;	/* The output runlist. */
739 	u8 *buf;		/* Current position in mapping pairs array. */
740 	u8 *attr_end;		/* End of attribute. */
741 	int rlsize;		/* Size of runlist buffer. */
742 	u16 rlpos;		/* Current runlist position in units of struct runlist_elements. */
743 	u8 b;			/* Current byte offset in buf. */
744 	u64 lowest_vcn;		/* Raw on-disk lowest_vcn. */
745 
746 #ifdef DEBUG
747 	/* Make sure attr exists and is non-resident. */
748 	if (!attr || !attr->non_resident) {
749 		ntfs_error(vol->sb, "Invalid arguments.");
750 		return ERR_PTR(-EINVAL);
751 	}
752 #endif
753 	lowest_vcn = le64_to_cpu(attr->data.non_resident.lowest_vcn);
754 	/* Validate lowest_vcn from on-disk metadata to ensure it is sane. */
755 	if (overflows_type(lowest_vcn, vcn)) {
756 		ntfs_error(vol->sb, "Invalid lowest_vcn in mapping pairs.");
757 		return ERR_PTR(-EIO);
758 	}
759 	/* Start at vcn = lowest_vcn and lcn 0. */
760 	vcn = lowest_vcn;
761 	lcn = 0;
762 	/* Get start of the mapping pairs array. */
763 	buf = (u8 *)attr +
764 		le16_to_cpu(attr->data.non_resident.mapping_pairs_offset);
765 	attr_end = (u8 *)attr + le32_to_cpu(attr->length);
766 	if (unlikely(buf < (u8 *)attr || buf > attr_end)) {
767 		ntfs_error(vol->sb, "Corrupt attribute.");
768 		return ERR_PTR(-EIO);
769 	}
770 
771 	/* Current position in runlist array. */
772 	rlpos = 0;
773 	/* Allocate first page and set current runlist size to one page. */
774 	rl = kvzalloc(rlsize = PAGE_SIZE, GFP_NOFS);
775 	if (unlikely(!rl))
776 		return ERR_PTR(-ENOMEM);
777 	/* Insert unmapped starting element if necessary. */
778 	if (vcn) {
779 		rl->vcn = 0;
780 		rl->lcn = LCN_RL_NOT_MAPPED;
781 		rl->length = vcn;
782 		rlpos++;
783 	}
784 	while (buf < attr_end && *buf) {
785 		/*
786 		 * Allocate more memory if needed, including space for the
787 		 * not-mapped and terminator elements. kvzalloc()
788 		 * operates on whole pages only.
789 		 */
790 		if (((rlpos + 3) * sizeof(*rl)) > rlsize) {
791 			struct runlist_element *rl2;
792 
793 			rl2 = kvzalloc(rlsize + PAGE_SIZE, GFP_NOFS);
794 			if (unlikely(!rl2)) {
795 				kvfree(rl);
796 				return ERR_PTR(-ENOMEM);
797 			}
798 			memcpy(rl2, rl, rlsize);
799 			kvfree(rl);
800 			rl = rl2;
801 			rlsize += PAGE_SIZE;
802 		}
803 		/* Enter the current vcn into the current runlist element. */
804 		rl[rlpos].vcn = vcn;
805 		/*
806 		 * Get the change in vcn, i.e. the run length in clusters.
807 		 * Doing it this way ensures that we signextend negative values.
808 		 * A negative run length doesn't make any sense, but hey, I
809 		 * didn't make up the NTFS specs and Windows NT4 treats the run
810 		 * length as a signed value so that's how it is...
811 		 */
812 		b = *buf & 0xf;
813 		if (b) {
814 			if (unlikely(buf + b > attr_end))
815 				goto io_error;
816 			for (deltaxcn = (s8)buf[b--]; b; b--)
817 				deltaxcn = (deltaxcn << 8) + buf[b];
818 		} else { /* The length entry is compulsory. */
819 			ntfs_error(vol->sb, "Missing length entry in mapping pairs array.");
820 			deltaxcn = (s64)-1;
821 		}
822 		/*
823 		 * Assume a negative length to indicate data corruption and
824 		 * hence clean-up and return NULL.
825 		 */
826 		if (unlikely(deltaxcn < 0)) {
827 			ntfs_error(vol->sb, "Invalid length in mapping pairs array.");
828 			goto err_out;
829 		}
830 		/*
831 		 * Enter the current run length into the current runlist
832 		 * element.
833 		 */
834 		rl[rlpos].length = deltaxcn;
835 		/*
836 		 * Increment the current vcn by the current run length.
837 		 * Guard against s64 overflow from a crafted mapping
838 		 * pairs array to preserve the monotonically-increasing
839 		 * vcn invariant.
840 		 */
841 		if (unlikely(check_add_overflow(vcn, deltaxcn, &vcn))) {
842 			ntfs_error(vol->sb, "VCN overflow in mapping pairs array.");
843 			goto err_out;
844 		}
845 
846 		/*
847 		 * There might be no lcn change at all, as is the case for
848 		 * sparse clusters on NTFS 3.0+, in which case we set the lcn
849 		 * to LCN_HOLE.
850 		 */
851 		if (!(*buf & 0xf0))
852 			rl[rlpos].lcn = LCN_HOLE;
853 		else {
854 			/* Get the lcn change which really can be negative. */
855 			u8 b2 = *buf & 0xf;
856 
857 			b = b2 + ((*buf >> 4) & 0xf);
858 			if (buf + b > attr_end)
859 				goto io_error;
860 			for (deltaxcn = (s8)buf[b--]; b > b2; b--)
861 				deltaxcn = (deltaxcn << 8) + buf[b];
862 			/* Change the current lcn to its new value. */
863 			lcn += deltaxcn;
864 #ifdef DEBUG
865 			/*
866 			 * On NTFS 1.2-, apparently can have lcn == -1 to
867 			 * indicate a hole. But we haven't verified ourselves
868 			 * whether it is really the lcn or the deltaxcn that is
869 			 * -1. So if either is found give us a message so we
870 			 * can investigate it further!
871 			 */
872 			if (vol->major_ver < 3) {
873 				if (unlikely(deltaxcn == -1))
874 					ntfs_error(vol->sb, "lcn delta == -1");
875 				if (unlikely(lcn == -1))
876 					ntfs_error(vol->sb, "lcn == -1");
877 			}
878 #endif
879 			/* Check lcn is not below -1. */
880 			if (unlikely(lcn < -1)) {
881 				ntfs_error(vol->sb, "Invalid s64 < -1 in mapping pairs array.");
882 				goto err_out;
883 			}
884 
885 			/* chkdsk accepts zero-sized runs only for holes */
886 			if ((lcn != -1) && !rl[rlpos].length) {
887 				ntfs_error(vol->sb,
888 					   "Invalid zero-sized data run(lcn : %lld).\n",
889 					   lcn);
890 				goto err_out;
891 			}
892 
893 			/* Enter the current lcn into the runlist element. */
894 			rl[rlpos].lcn = lcn;
895 		}
896 		/* Get to the next runlist element, skipping zero-sized holes */
897 		if (rl[rlpos].length)
898 			rlpos++;
899 		/* Increment the buffer position to the next mapping pair. */
900 		buf += (*buf & 0xf) + ((*buf >> 4) & 0xf) + 1;
901 	}
902 	if (unlikely(buf >= attr_end))
903 		goto io_error;
904 	/*
905 	 * If there is a highest_vcn specified, it must be equal to the final
906 	 * vcn in the runlist - 1, or something has gone badly wrong.
907 	 */
908 	deltaxcn = le64_to_cpu(attr->data.non_resident.highest_vcn);
909 	if (unlikely(deltaxcn && vcn - 1 != deltaxcn)) {
910 mpa_err:
911 		ntfs_error(vol->sb, "Corrupt mapping pairs array in non-resident attribute.");
912 		goto err_out;
913 	}
914 	/* Setup not mapped runlist element if this is the base extent. */
915 	if (!attr->data.non_resident.lowest_vcn) {
916 		s64 max_cluster;
917 
918 		max_cluster = ((le64_to_cpu(attr->data.non_resident.allocated_size) +
919 				vol->cluster_size - 1) >>
920 				vol->cluster_size_bits) - 1;
921 		/*
922 		 * A highest_vcn of zero means this is a single extent
923 		 * attribute so simply terminate the runlist with LCN_ENOENT).
924 		 */
925 		if (deltaxcn) {
926 			/*
927 			 * If there is a difference between the highest_vcn and
928 			 * the highest cluster, the runlist is either corrupt
929 			 * or, more likely, there are more extents following
930 			 * this one.
931 			 */
932 			if (deltaxcn < max_cluster) {
933 				ntfs_debug("More extents to follow; deltaxcn = 0x%llx, max_cluster = 0x%llx",
934 						deltaxcn, max_cluster);
935 				rl[rlpos].vcn = vcn;
936 				vcn += rl[rlpos].length = max_cluster -
937 						deltaxcn;
938 				rl[rlpos].lcn = LCN_RL_NOT_MAPPED;
939 				rlpos++;
940 			} else if (unlikely(deltaxcn > max_cluster)) {
941 				ntfs_error(vol->sb,
942 					   "Corrupt attribute. deltaxcn = 0x%llx, max_cluster = 0x%llx",
943 					   deltaxcn, max_cluster);
944 				goto mpa_err;
945 			}
946 		}
947 		rl[rlpos].lcn = LCN_ENOENT;
948 	} else /* Not the base extent. There may be more extents to follow. */
949 		rl[rlpos].lcn = LCN_RL_NOT_MAPPED;
950 
951 	/* Setup terminating runlist element. */
952 	rl[rlpos].vcn = vcn;
953 	rl[rlpos].length = (s64)0;
954 	/* If no existing runlist was specified, we are done. */
955 	if (!old_runlist || !old_runlist->rl) {
956 		*new_rl_count = rlpos + 1;
957 		ntfs_debug("Mapping pairs array successfully decompressed:");
958 		ntfs_debug_dump_runlist(rl);
959 		return rl;
960 	}
961 	/* Now combine the new and old runlists checking for overlaps. */
962 	new_rl = ntfs_runlists_merge(old_runlist, rl, rlpos + 1, new_rl_count);
963 	if (!IS_ERR(new_rl))
964 		return new_rl;
965 	kvfree(rl);
966 	ntfs_error(vol->sb, "Failed to merge runlists.");
967 	return new_rl;
968 io_error:
969 	ntfs_error(vol->sb, "Corrupt attribute.");
970 err_out:
971 	kvfree(rl);
972 	return ERR_PTR(-EIO);
973 }
974 
975 /*
976  * ntfs_rl_vcn_to_lcn - convert a vcn into a lcn given a runlist
977  * @rl:		runlist to use for conversion
978  * @vcn:	vcn to convert
979  *
980  * Convert the virtual cluster number @vcn of an attribute into a logical
981  * cluster number (lcn) of a device using the runlist @rl to map vcns to their
982  * corresponding lcns.
983  *
984  * It is up to the caller to serialize access to the runlist @rl.
985  *
986  * Since lcns must be >= 0, we use negative return codes with special meaning:
987  *
988  * Return code		Meaning / Description
989  * ==================================================
990  *  LCN_HOLE		Hole / not allocated on disk.
991  *  LCN_RL_NOT_MAPPED	This is part of the runlist which has not been
992  *			inserted into the runlist yet.
993  *  LCN_ENOENT		There is no such vcn in the attribute.
994  *
995  * Locking: - The caller must have locked the runlist (for reading or writing).
996  *	    - This function does not touch the lock, nor does it modify the
997  *	      runlist.
998  */
ntfs_rl_vcn_to_lcn(const struct runlist_element * rl,const s64 vcn)999 s64 ntfs_rl_vcn_to_lcn(const struct runlist_element *rl, const s64 vcn)
1000 {
1001 	int i;
1002 
1003 	/*
1004 	 * If rl is NULL, assume that we have found an unmapped runlist. The
1005 	 * caller can then attempt to map it and fail appropriately if
1006 	 * necessary.
1007 	 */
1008 	if (unlikely(!rl))
1009 		return LCN_RL_NOT_MAPPED;
1010 
1011 	/* Catch out of lower bounds vcn. */
1012 	if (unlikely(vcn < rl[0].vcn))
1013 		return LCN_ENOENT;
1014 
1015 	for (i = 0; likely(rl[i].length); i++) {
1016 		if (vcn < rl[i+1].vcn) {
1017 			if (likely(rl[i].lcn >= 0))
1018 				return rl[i].lcn + (vcn - rl[i].vcn);
1019 			return rl[i].lcn;
1020 		}
1021 	}
1022 	/*
1023 	 * The terminator element is setup to the correct value, i.e. one of
1024 	 * LCN_HOLE, LCN_RL_NOT_MAPPED, or LCN_ENOENT.
1025 	 */
1026 	if (likely(rl[i].lcn < 0))
1027 		return rl[i].lcn;
1028 	/* Just in case... We could replace this with BUG() some day. */
1029 	return LCN_ENOENT;
1030 }
1031 
1032 /*
1033  * ntfs_rl_find_vcn_nolock - find a vcn in a runlist
1034  * @rl:		runlist to search
1035  * @vcn:	vcn to find
1036  *
1037  * Find the virtual cluster number @vcn in the runlist @rl and return the
1038  * address of the runlist element containing the @vcn on success.
1039  *
1040  * Return NULL if @rl is NULL or @vcn is in an unmapped part/out of bounds of
1041  * the runlist.
1042  *
1043  * Locking: The runlist must be locked on entry.
1044  */
ntfs_rl_find_vcn_nolock(struct runlist_element * rl,const s64 vcn)1045 struct runlist_element *ntfs_rl_find_vcn_nolock(struct runlist_element *rl, const s64 vcn)
1046 {
1047 	if (unlikely(!rl || vcn < rl[0].vcn))
1048 		return NULL;
1049 	while (likely(rl->length)) {
1050 		if (unlikely(vcn < rl[1].vcn)) {
1051 			if (likely(rl->lcn >= LCN_HOLE))
1052 				return rl;
1053 			return NULL;
1054 		}
1055 		rl++;
1056 	}
1057 	if (likely(rl->lcn == LCN_ENOENT))
1058 		return rl;
1059 	return NULL;
1060 }
1061 
1062 /*
1063  * ntfs_get_nr_significant_bytes - get number of bytes needed to store a number
1064  * @n:		number for which to get the number of bytes for
1065  *
1066  * Return the number of bytes required to store @n unambiguously as
1067  * a signed number.
1068  *
1069  * This is used in the context of the mapping pairs array to determine how
1070  * many bytes will be needed in the array to store a given logical cluster
1071  * number (lcn) or a specific run length.
1072  *
1073  * Return the number of bytes written.  This function cannot fail.
1074  */
ntfs_get_nr_significant_bytes(const s64 n)1075 static inline int ntfs_get_nr_significant_bytes(const s64 n)
1076 {
1077 	s64 l = n;
1078 	int i;
1079 	s8 j;
1080 
1081 	i = 0;
1082 	do {
1083 		l >>= 8;
1084 		i++;
1085 	} while (l != 0 && l != -1);
1086 	j = (n >> 8 * (i - 1)) & 0xff;
1087 	/* If the sign bit is wrong, we need an extra byte. */
1088 	if ((n < 0 && j >= 0) || (n > 0 && j < 0))
1089 		i++;
1090 	return i;
1091 }
1092 
1093 /*
1094  * ntfs_get_size_for_mapping_pairs - get bytes needed for mapping pairs array
1095  * @vol: ntfs volume
1096  * @rl: runlist to calculate the mapping pairs array size for
1097  * @first_vcn: first vcn which to include in the mapping pairs array
1098  * @last_vcn: last vcn which to include in the mapping pairs array
1099  * @max_mp_size: maximum size to return (0 or less means unlimited)
1100  *
1101  * Walk the locked runlist @rl and calculate the size in bytes of the mapping
1102  * pairs array corresponding to the runlist @rl, starting at vcn @first_vcn and
1103  * finishing with vcn @last_vcn.
1104  *
1105  * A @last_vcn of -1 means end of runlist and in that case the size of the
1106  * mapping pairs array corresponding to the runlist starting at vcn @first_vcn
1107  * and finishing at the end of the runlist is determined.
1108  *
1109  * This for example allows us to allocate a buffer of the right size when
1110  * building the mapping pairs array.
1111  *
1112  * If @rl is NULL, just return 1 (for the single terminator byte).
1113  *
1114  * Return the calculated size in bytes on success.  On error, return -errno.
1115  */
ntfs_get_size_for_mapping_pairs(const struct ntfs_volume * vol,const struct runlist_element * rl,const s64 first_vcn,const s64 last_vcn,int max_mp_size)1116 int ntfs_get_size_for_mapping_pairs(const struct ntfs_volume *vol,
1117 		const struct runlist_element *rl, const s64 first_vcn,
1118 		const s64 last_vcn, int max_mp_size)
1119 {
1120 	s64 prev_lcn;
1121 	int rls;
1122 	bool the_end = false;
1123 
1124 	if (first_vcn < 0 || last_vcn < -1)
1125 		return -EINVAL;
1126 
1127 	if (last_vcn >= 0 && first_vcn > last_vcn)
1128 		return -EINVAL;
1129 
1130 	if (!rl) {
1131 		WARN_ON(first_vcn);
1132 		WARN_ON(last_vcn > 0);
1133 		return 1;
1134 	}
1135 	if (max_mp_size <= 0)
1136 		max_mp_size = INT_MAX;
1137 	/* Skip to runlist element containing @first_vcn. */
1138 	while (rl->length && first_vcn >= rl[1].vcn)
1139 		rl++;
1140 	if (unlikely((!rl->length && first_vcn > rl->vcn) ||
1141 			first_vcn < rl->vcn))
1142 		return -EINVAL;
1143 	prev_lcn = 0;
1144 	/* Always need the termining zero byte. */
1145 	rls = 1;
1146 	/* Do the first partial run if present. */
1147 	if (first_vcn > rl->vcn) {
1148 		s64 delta, length = rl->length;
1149 
1150 		/* We know rl->length != 0 already. */
1151 		if (unlikely(length < 0 || rl->lcn < LCN_HOLE))
1152 			goto err_out;
1153 		/*
1154 		 * If @stop_vcn is given and finishes inside this run, cap the
1155 		 * run length.
1156 		 */
1157 		if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) {
1158 			s64 s1 = last_vcn + 1;
1159 
1160 			if (unlikely(rl[1].vcn > s1))
1161 				length = s1 - rl->vcn;
1162 			the_end = true;
1163 		}
1164 		delta = first_vcn - rl->vcn;
1165 		/* Header byte + length. */
1166 		rls += 1 + ntfs_get_nr_significant_bytes(length - delta);
1167 		/*
1168 		 * If the logical cluster number (lcn) denotes a hole and we
1169 		 * are on NTFS 3.0+, we don't store it at all, i.e. we need
1170 		 * zero space.  On earlier NTFS versions we just store the lcn.
1171 		 * Note: this assumes that on NTFS 1.2-, holes are stored with
1172 		 * an lcn of -1 and not a delta_lcn of -1 (unless both are -1).
1173 		 */
1174 		if (likely(rl->lcn >= 0 || vol->major_ver < 3)) {
1175 			prev_lcn = rl->lcn;
1176 			if (likely(rl->lcn >= 0))
1177 				prev_lcn += delta;
1178 			/* Change in lcn. */
1179 			rls += ntfs_get_nr_significant_bytes(prev_lcn);
1180 		}
1181 		/* Go to next runlist element. */
1182 		rl++;
1183 	}
1184 	/* Do the full runs. */
1185 	for (; rl->length && !the_end; rl++) {
1186 		s64 length = rl->length;
1187 
1188 		if (unlikely(length < 0 || rl->lcn < LCN_HOLE))
1189 			goto err_out;
1190 		/*
1191 		 * If @stop_vcn is given and finishes inside this run, cap the
1192 		 * run length.
1193 		 */
1194 		if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) {
1195 			s64 s1 = last_vcn + 1;
1196 
1197 			if (unlikely(rl[1].vcn > s1))
1198 				length = s1 - rl->vcn;
1199 			the_end = true;
1200 		}
1201 		/* Header byte + length. */
1202 		rls += 1 + ntfs_get_nr_significant_bytes(length);
1203 		/*
1204 		 * If the logical cluster number (lcn) denotes a hole and we
1205 		 * are on NTFS 3.0+, we don't store it at all, i.e. we need
1206 		 * zero space.  On earlier NTFS versions we just store the lcn.
1207 		 * Note: this assumes that on NTFS 1.2-, holes are stored with
1208 		 * an lcn of -1 and not a delta_lcn of -1 (unless both are -1).
1209 		 */
1210 		if (likely(rl->lcn >= 0 || vol->major_ver < 3)) {
1211 			/* Change in lcn. */
1212 			rls += ntfs_get_nr_significant_bytes(rl->lcn -
1213 					prev_lcn);
1214 			prev_lcn = rl->lcn;
1215 		}
1216 
1217 		if (rls > max_mp_size)
1218 			break;
1219 	}
1220 	return rls;
1221 err_out:
1222 	if (rl->lcn == LCN_RL_NOT_MAPPED)
1223 		rls = -EINVAL;
1224 	else
1225 		rls = -EIO;
1226 	return rls;
1227 }
1228 
1229 /*
1230  * ntfs_write_significant_bytes - write the significant bytes of a number
1231  * @dst:	destination buffer to write to
1232  * @dst_max:	pointer to last byte of destination buffer for bounds checking
1233  * @n:		number whose significant bytes to write
1234  *
1235  * Store in @dst, the minimum bytes of the number @n which are required to
1236  * identify @n unambiguously as a signed number, taking care not to exceed
1237  * @dest_max, the maximum position within @dst to which we are allowed to
1238  * write.
1239  *
1240  * This is used when building the mapping pairs array of a runlist to compress
1241  * a given logical cluster number (lcn) or a specific run length to the minimum
1242  * size possible.
1243  *
1244  * Return the number of bytes written on success.  On error, i.e. the
1245  * destination buffer @dst is too small, return -ENOSPC.
1246  */
ntfs_write_significant_bytes(s8 * dst,const s8 * dst_max,const s64 n)1247 static inline int ntfs_write_significant_bytes(s8 *dst, const s8 *dst_max,
1248 		const s64 n)
1249 {
1250 	s64 l = n;
1251 	int i;
1252 	s8 j;
1253 
1254 	i = 0;
1255 	do {
1256 		if (unlikely(dst > dst_max))
1257 			goto err_out;
1258 		*dst++ = l & 0xffll;
1259 		l >>= 8;
1260 		i++;
1261 	} while (l != 0 && l != -1);
1262 	j = (n >> 8 * (i - 1)) & 0xff;
1263 	/* If the sign bit is wrong, we need an extra byte. */
1264 	if (n < 0 && j >= 0) {
1265 		if (unlikely(dst > dst_max))
1266 			goto err_out;
1267 		i++;
1268 		*dst = (s8)-1;
1269 	} else if (n > 0 && j < 0) {
1270 		if (unlikely(dst > dst_max))
1271 			goto err_out;
1272 		i++;
1273 		*dst = (s8)0;
1274 	}
1275 	return i;
1276 err_out:
1277 	return -ENOSPC;
1278 }
1279 
1280 /*
1281  * ntfs_mapping_pairs_build - build the mapping pairs array from a runlist
1282  * @vol: ntfs volume
1283  * @dst: destination buffer to build mapping pairs array into
1284  * @dst_len: size of @dst in bytes
1285  * @rl: runlist to build the mapping pairs array from
1286  * @first_vcn: first vcn which to include in the mapping pairs array
1287  * @last_vcn: last vcn which to include in the mapping pairs array
1288  * @stop_vcn: on return, set to the first vcn outside the destination buffer
1289  * @stop_rl: on return, set to the runlist element where encoding stopped
1290  * @de_cluster_count: on return, set to the number of clusters encoded
1291  *
1292  * Create the mapping pairs array from the locked runlist @rl, starting at vcn
1293  * @first_vcn and finishing with vcn @last_vcn and save the array in @dst.
1294  * @dst_len is the size of @dst in bytes and it should be at least equal to the
1295  * value obtained by calling ntfs_get_size_for_mapping_pairs().
1296  *
1297  * A @last_vcn of -1 means end of runlist and in that case the mapping pairs
1298  * array corresponding to the runlist starting at vcn @first_vcn and finishing
1299  * at the end of the runlist is created.
1300  *
1301  * If @rl is NULL, just write a single terminator byte to @dst.
1302  *
1303  * On success or -ENOSPC error, if @stop_vcn is not NULL, *@stop_vcn is set to
1304  * the first vcn outside the destination buffer.  Note that on error, @dst has
1305  * been filled with all the mapping pairs that will fit, thus it can be treated
1306  * as partial success, in that a new attribute extent needs to be created or
1307  * the next extent has to be used and the mapping pairs build has to be
1308  * continued with @first_vcn set to *@stop_vcn.
1309  *
1310  * Return 0 on success and -errno on error.  The following error codes are
1311  * defined:
1312  *	-EINVAL	- Run list contains unmapped elements.  Make sure to only pass
1313  *		  fully mapped runlists to this function.
1314  *	-EIO	- The runlist is corrupt.
1315  *	-ENOSPC	- The destination buffer is too small.
1316  *
1317  * Locking: @rl must be locked on entry (either for reading or writing), it
1318  *	    remains locked throughout, and is left locked upon return.
1319  */
ntfs_mapping_pairs_build(const struct ntfs_volume * vol,s8 * dst,const int dst_len,const struct runlist_element * rl,const s64 first_vcn,const s64 last_vcn,s64 * const stop_vcn,struct runlist_element ** stop_rl,unsigned int * de_cluster_count)1320 int ntfs_mapping_pairs_build(const struct ntfs_volume *vol, s8 *dst,
1321 		const int dst_len, const struct runlist_element *rl,
1322 		const s64 first_vcn, const s64 last_vcn, s64 *const stop_vcn,
1323 		struct runlist_element **stop_rl, unsigned int *de_cluster_count)
1324 {
1325 	s64 prev_lcn;
1326 	s8 *dst_max, *dst_next;
1327 	int err = -ENOSPC;
1328 	bool the_end = false;
1329 	s8 len_len, lcn_len;
1330 	unsigned int de_cnt = 0;
1331 
1332 	if (first_vcn < 0 || last_vcn < -1 || dst_len < 1)
1333 		return -EINVAL;
1334 	if (last_vcn >= 0 && first_vcn > last_vcn)
1335 		return -EINVAL;
1336 
1337 	if (!rl) {
1338 		WARN_ON(first_vcn || last_vcn > 0);
1339 		if (stop_vcn)
1340 			*stop_vcn = 0;
1341 		/* Terminator byte. */
1342 		*dst = 0;
1343 		return 0;
1344 	}
1345 	/* Skip to runlist element containing @first_vcn. */
1346 	while (rl->length && first_vcn >= rl[1].vcn)
1347 		rl++;
1348 	if (unlikely((!rl->length && first_vcn > rl->vcn) ||
1349 			first_vcn < rl->vcn))
1350 		return -EINVAL;
1351 	/*
1352 	 * @dst_max is used for bounds checking in
1353 	 * ntfs_write_significant_bytes().
1354 	 */
1355 	dst_max = dst + dst_len - 1;
1356 	prev_lcn = 0;
1357 	/* Do the first partial run if present. */
1358 	if (first_vcn > rl->vcn) {
1359 		s64 delta, length = rl->length;
1360 
1361 		/* We know rl->length != 0 already. */
1362 		if (unlikely(length < 0 || rl->lcn < LCN_HOLE))
1363 			goto err_out;
1364 		/*
1365 		 * If @stop_vcn is given and finishes inside this run, cap the
1366 		 * run length.
1367 		 */
1368 		if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) {
1369 			s64 s1 = last_vcn + 1;
1370 
1371 			if (unlikely(rl[1].vcn > s1))
1372 				length = s1 - rl->vcn;
1373 			the_end = true;
1374 		}
1375 		delta = first_vcn - rl->vcn;
1376 		/* Write length. */
1377 		len_len = ntfs_write_significant_bytes(dst + 1, dst_max,
1378 				length - delta);
1379 		if (unlikely(len_len < 0))
1380 			goto size_err;
1381 		/*
1382 		 * If the logical cluster number (lcn) denotes a hole and we
1383 		 * are on NTFS 3.0+, we don't store it at all, i.e. we need
1384 		 * zero space.  On earlier NTFS versions we just write the lcn
1385 		 * change.
1386 		 */
1387 		if (likely(rl->lcn >= 0 || vol->major_ver < 3)) {
1388 			prev_lcn = rl->lcn;
1389 			if (likely(rl->lcn >= 0))
1390 				prev_lcn += delta;
1391 			/* Write change in lcn. */
1392 			lcn_len = ntfs_write_significant_bytes(dst + 1 +
1393 					len_len, dst_max, prev_lcn);
1394 			if (unlikely(lcn_len < 0))
1395 				goto size_err;
1396 		} else
1397 			lcn_len = 0;
1398 		dst_next = dst + len_len + lcn_len + 1;
1399 		if (unlikely(dst_next > dst_max))
1400 			goto size_err;
1401 		/* Update header byte. */
1402 		*dst = lcn_len << 4 | len_len;
1403 		/* Position at next mapping pairs array element. */
1404 		dst = dst_next;
1405 		/* Go to next runlist element. */
1406 		rl++;
1407 	}
1408 	/* Do the full runs. */
1409 	for (; rl->length && !the_end; rl++) {
1410 		s64 length = rl->length;
1411 
1412 		if (unlikely(length < 0 || rl->lcn < LCN_HOLE))
1413 			goto err_out;
1414 		/*
1415 		 * If @stop_vcn is given and finishes inside this run, cap the
1416 		 * run length.
1417 		 */
1418 		if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) {
1419 			s64 s1 = last_vcn + 1;
1420 
1421 			if (unlikely(rl[1].vcn > s1))
1422 				length = s1 - rl->vcn;
1423 			the_end = true;
1424 		}
1425 		/* Write length. */
1426 		len_len = ntfs_write_significant_bytes(dst + 1, dst_max,
1427 				length);
1428 		if (unlikely(len_len < 0))
1429 			goto size_err;
1430 		/*
1431 		 * If the logical cluster number (lcn) denotes a hole and we
1432 		 * are on NTFS 3.0+, we don't store it at all, i.e. we need
1433 		 * zero space.  On earlier NTFS versions we just write the lcn
1434 		 * change.
1435 		 */
1436 		if (likely(rl->lcn >= 0 || vol->major_ver < 3)) {
1437 			/* Write change in lcn. */
1438 			lcn_len = ntfs_write_significant_bytes(dst + 1 +
1439 					len_len, dst_max, rl->lcn - prev_lcn);
1440 			if (unlikely(lcn_len < 0))
1441 				goto size_err;
1442 			prev_lcn = rl->lcn;
1443 		} else {
1444 			if (rl->lcn == LCN_DELALLOC)
1445 				de_cnt += rl->length;
1446 			lcn_len = 0;
1447 		}
1448 		dst_next = dst + len_len + lcn_len + 1;
1449 		if (unlikely(dst_next > dst_max))
1450 			goto size_err;
1451 		/* Update header byte. */
1452 		*dst = lcn_len << 4 | len_len;
1453 		/* Position at next mapping pairs array element. */
1454 		dst = dst_next;
1455 	}
1456 	/* Success. */
1457 	if (de_cluster_count)
1458 		*de_cluster_count = de_cnt;
1459 	err = 0;
1460 size_err:
1461 	/* Set stop vcn. */
1462 	if (stop_vcn)
1463 		*stop_vcn = rl->vcn;
1464 	if (stop_rl)
1465 		*stop_rl = (struct runlist_element *)rl;
1466 	/* Add terminator byte. */
1467 	*dst = 0;
1468 	return err;
1469 err_out:
1470 	if (rl->lcn == LCN_RL_NOT_MAPPED)
1471 		err = -EINVAL;
1472 	else
1473 		err = -EIO;
1474 	return err;
1475 }
1476 
1477 /*
1478  * ntfs_rl_truncate_nolock - truncate a runlist starting at a specified vcn
1479  * @vol:	ntfs volume (needed for error output)
1480  * @runlist:	runlist to truncate
1481  * @new_length:	the new length of the runlist in VCNs
1482  *
1483  * Truncate the runlist described by @runlist as well as the memory buffer
1484  * holding the runlist elements to a length of @new_length VCNs.
1485  *
1486  * If @new_length lies within the runlist, the runlist elements with VCNs of
1487  * @new_length and above are discarded.  As a special case if @new_length is
1488  * zero, the runlist is discarded and set to NULL.
1489  *
1490  * If @new_length lies beyond the runlist, a sparse runlist element is added to
1491  * the end of the runlist @runlist or if the last runlist element is a sparse
1492  * one already, this is extended.
1493  *
1494  * Note, no checking is done for unmapped runlist elements.  It is assumed that
1495  * the caller has mapped any elements that need to be mapped already.
1496  *
1497  * Return 0 on success and -errno on error.
1498  *
1499  * Locking: The caller must hold @runlist->lock for writing.
1500  */
ntfs_rl_truncate_nolock(const struct ntfs_volume * vol,struct runlist * const runlist,const s64 new_length)1501 int ntfs_rl_truncate_nolock(const struct ntfs_volume *vol, struct runlist *const runlist,
1502 		const s64 new_length)
1503 {
1504 	struct runlist_element *rl;
1505 	int old_size;
1506 
1507 	ntfs_debug("Entering for new_length 0x%llx.", (long long)new_length);
1508 
1509 	if (!runlist || new_length < 0)
1510 		return -EINVAL;
1511 
1512 	rl = runlist->rl;
1513 	if (new_length < rl->vcn)
1514 		return -EINVAL;
1515 
1516 	/* Find @new_length in the runlist. */
1517 	while (likely(rl->length && new_length >= rl[1].vcn))
1518 		rl++;
1519 	/*
1520 	 * If not at the end of the runlist we need to shrink it.
1521 	 * If at the end of the runlist we need to expand it.
1522 	 */
1523 	if (rl->length) {
1524 		struct runlist_element *trl;
1525 		bool is_end;
1526 
1527 		ntfs_debug("Shrinking runlist.");
1528 		/* Determine the runlist size. */
1529 		trl = rl + 1;
1530 		while (likely(trl->length))
1531 			trl++;
1532 		old_size = trl - runlist->rl + 1;
1533 		/* Truncate the run. */
1534 		rl->length = new_length - rl->vcn;
1535 		/*
1536 		 * If a run was partially truncated, make the following runlist
1537 		 * element a terminator.
1538 		 */
1539 		is_end = false;
1540 		if (rl->length) {
1541 			rl++;
1542 			if (!rl->length)
1543 				is_end = true;
1544 			rl->vcn = new_length;
1545 			rl->length = 0;
1546 		}
1547 		rl->lcn = LCN_ENOENT;
1548 		runlist->count = rl - runlist->rl + 1;
1549 		/* Reallocate memory if necessary. */
1550 		if (!is_end) {
1551 			int new_size = rl - runlist->rl + 1;
1552 
1553 			rl = ntfs_rl_realloc(runlist->rl, old_size, new_size);
1554 			if (IS_ERR(rl))
1555 				ntfs_warning(vol->sb,
1556 					"Failed to shrink runlist buffer.  This just wastes a bit of memory temporarily so we ignore it and return success.");
1557 			else
1558 				runlist->rl = rl;
1559 		}
1560 	} else if (likely(/* !rl->length && */ new_length > rl->vcn)) {
1561 		ntfs_debug("Expanding runlist.");
1562 		/*
1563 		 * If there is a previous runlist element and it is a sparse
1564 		 * one, extend it.  Otherwise need to add a new, sparse runlist
1565 		 * element.
1566 		 */
1567 		if ((rl > runlist->rl) && ((rl - 1)->lcn == LCN_HOLE))
1568 			(rl - 1)->length = new_length - (rl - 1)->vcn;
1569 		else {
1570 			/* Determine the runlist size. */
1571 			old_size = rl - runlist->rl + 1;
1572 			/* Reallocate memory if necessary. */
1573 			rl = ntfs_rl_realloc(runlist->rl, old_size,
1574 					old_size + 1);
1575 			if (IS_ERR(rl)) {
1576 				ntfs_error(vol->sb, "Failed to expand runlist buffer, aborting.");
1577 				return PTR_ERR(rl);
1578 			}
1579 			runlist->rl = rl;
1580 			/*
1581 			 * Set @rl to the same runlist element in the new
1582 			 * runlist as before in the old runlist.
1583 			 */
1584 			rl += old_size - 1;
1585 			/* Add a new, sparse runlist element. */
1586 			rl->lcn = LCN_HOLE;
1587 			rl->length = new_length - rl->vcn;
1588 			/* Add a new terminator runlist element. */
1589 			rl++;
1590 			rl->length = 0;
1591 			runlist->count = old_size + 1;
1592 		}
1593 		rl->vcn = new_length;
1594 		rl->lcn = LCN_ENOENT;
1595 	} else /* if (unlikely(!rl->length && new_length == rl->vcn)) */ {
1596 		/* Runlist already has same size as requested. */
1597 		rl->lcn = LCN_ENOENT;
1598 	}
1599 	ntfs_debug("Done.");
1600 	return 0;
1601 }
1602 
1603 /*
1604  * ntfs_rl_sparse - check whether runlist have sparse regions or not.
1605  * @rl:         runlist to check
1606  *
1607  * Return 1 if have, 0 if not, -errno on error.
1608  */
ntfs_rl_sparse(struct runlist_element * rl)1609 int ntfs_rl_sparse(struct runlist_element *rl)
1610 {
1611 	struct runlist_element *rlc;
1612 
1613 	if (!rl)
1614 		return -EINVAL;
1615 
1616 	for (rlc = rl; rlc->length; rlc++)
1617 		if (rlc->lcn < 0) {
1618 			if (rlc->lcn != LCN_HOLE && rlc->lcn != LCN_DELALLOC) {
1619 				pr_err("%s: bad runlist\n", __func__);
1620 				return -EINVAL;
1621 			}
1622 			return 1;
1623 		}
1624 	return 0;
1625 }
1626 
1627 /*
1628  * ntfs_rl_get_compressed_size - calculate length of non sparse regions
1629  * @vol:        ntfs volume (need for cluster size)
1630  * @rl:         runlist to calculate for
1631  *
1632  * Return compressed size or -errno on error.
1633  */
ntfs_rl_get_compressed_size(struct ntfs_volume * vol,struct runlist_element * rl)1634 s64 ntfs_rl_get_compressed_size(struct ntfs_volume *vol, struct runlist_element *rl)
1635 {
1636 	struct runlist_element *rlc;
1637 	s64 ret = 0;
1638 
1639 	if (!rl)
1640 		return -EINVAL;
1641 
1642 	for (rlc = rl; rlc->length; rlc++) {
1643 		if (rlc->lcn < 0) {
1644 			if (rlc->lcn != LCN_HOLE && rlc->lcn != LCN_DELALLOC) {
1645 				ntfs_error(vol->sb, "%s: bad runlist, rlc->lcn : %lld",
1646 						__func__, rlc->lcn);
1647 				return -EINVAL;
1648 			}
1649 		} else
1650 			ret += rlc->length;
1651 	}
1652 	return NTFS_CLU_TO_B(vol, ret);
1653 }
1654 
ntfs_rle_lcn_contiguous(struct runlist_element * left_rle,struct runlist_element * right_rle)1655 static inline bool ntfs_rle_lcn_contiguous(struct runlist_element *left_rle,
1656 					   struct runlist_element *right_rle)
1657 {
1658 	if (left_rle->lcn > LCN_HOLE &&
1659 	    left_rle->lcn + left_rle->length == right_rle->lcn)
1660 		return true;
1661 	else if (left_rle->lcn == LCN_HOLE && right_rle->lcn == LCN_HOLE)
1662 		return true;
1663 	else
1664 		return false;
1665 }
1666 
ntfs_rle_contain(struct runlist_element * rle,s64 vcn)1667 static inline bool ntfs_rle_contain(struct runlist_element *rle, s64 vcn)
1668 {
1669 	if (rle->length > 0 &&
1670 	    vcn >= rle->vcn && vcn < rle->vcn + rle->length)
1671 		return true;
1672 	else
1673 		return false;
1674 }
1675 
ntfs_rl_insert_range(struct runlist_element * dst_rl,int dst_cnt,struct runlist_element * src_rl,int src_cnt,size_t * new_rl_cnt)1676 struct runlist_element *ntfs_rl_insert_range(struct runlist_element *dst_rl, int dst_cnt,
1677 				      struct runlist_element *src_rl, int src_cnt,
1678 				      size_t *new_rl_cnt)
1679 {
1680 	struct runlist_element *i_rl, *new_rl, *src_rl_origin = src_rl;
1681 	struct runlist_element dst_rl_split;
1682 	s64 start_vcn;
1683 	int new_1st_cnt, new_2nd_cnt, new_3rd_cnt, new_cnt;
1684 
1685 	if (!dst_rl || !src_rl || !new_rl_cnt)
1686 		return ERR_PTR(-EINVAL);
1687 	if (dst_cnt <= 0 || src_cnt <= 0)
1688 		return ERR_PTR(-EINVAL);
1689 	if (!(dst_rl[dst_cnt - 1].lcn == LCN_ENOENT &&
1690 	      dst_rl[dst_cnt - 1].length == 0) ||
1691 	    src_rl[src_cnt - 1].lcn < LCN_HOLE)
1692 		return ERR_PTR(-EINVAL);
1693 
1694 	start_vcn = src_rl[0].vcn;
1695 
1696 	i_rl = ntfs_rl_find_vcn_nolock(dst_rl, start_vcn);
1697 	if (!i_rl ||
1698 	    (i_rl->lcn == LCN_ENOENT && i_rl->vcn != start_vcn) ||
1699 	    (i_rl->lcn != LCN_ENOENT && !ntfs_rle_contain(i_rl, start_vcn)))
1700 		return ERR_PTR(-EINVAL);
1701 
1702 	new_1st_cnt = (int)(i_rl - dst_rl);
1703 	if (new_1st_cnt > dst_cnt)
1704 		return ERR_PTR(-EINVAL);
1705 	new_3rd_cnt = dst_cnt - new_1st_cnt;
1706 	if (new_3rd_cnt < 1)
1707 		return ERR_PTR(-EINVAL);
1708 
1709 	if (i_rl[0].vcn != start_vcn) {
1710 		if (i_rl[0].lcn == LCN_HOLE && src_rl[0].lcn == LCN_HOLE)
1711 			goto merge_src_rle;
1712 
1713 		/* split @i_rl[0] and create @dst_rl_split */
1714 		dst_rl_split.vcn = i_rl[0].vcn;
1715 		dst_rl_split.length = start_vcn - i_rl[0].vcn;
1716 		dst_rl_split.lcn = i_rl[0].lcn;
1717 
1718 		i_rl[0].vcn = start_vcn;
1719 		i_rl[0].length -= dst_rl_split.length;
1720 		i_rl[0].lcn += dst_rl_split.length;
1721 	} else {
1722 		struct runlist_element *dst_rle, *src_rle;
1723 merge_src_rle:
1724 
1725 		/* not split @i_rl[0] */
1726 		dst_rl_split.lcn = LCN_ENOENT;
1727 
1728 		/* merge @src_rl's first run and @i_rl[0]'s left run if possible */
1729 		dst_rle = &dst_rl[new_1st_cnt - 1];
1730 		src_rle = &src_rl[0];
1731 		if (new_1st_cnt > 0 && ntfs_rle_lcn_contiguous(dst_rle, src_rle)) {
1732 			WARN_ON(dst_rle->vcn + dst_rle->length != src_rle->vcn);
1733 			dst_rle->length += src_rle->length;
1734 			src_rl++;
1735 			src_cnt--;
1736 		} else {
1737 			/* merge @src_rl's last run and @i_rl[0]'s right if possible */
1738 			dst_rle = &dst_rl[new_1st_cnt];
1739 			src_rle = &src_rl[src_cnt - 1];
1740 
1741 			if (ntfs_rle_lcn_contiguous(dst_rle, src_rle)) {
1742 				dst_rle->length += src_rle->length;
1743 				src_cnt--;
1744 			}
1745 		}
1746 	}
1747 
1748 	new_2nd_cnt = src_cnt;
1749 	new_cnt = new_1st_cnt + new_2nd_cnt + new_3rd_cnt;
1750 	new_cnt += dst_rl_split.lcn >= LCN_HOLE ? 1 : 0;
1751 	new_rl = kvcalloc(new_cnt, sizeof(*new_rl), GFP_NOFS);
1752 	if (!new_rl)
1753 		return ERR_PTR(-ENOMEM);
1754 
1755 	/* Copy the @dst_rl's first half to @new_rl */
1756 	ntfs_rl_mc(new_rl, 0, dst_rl, 0, new_1st_cnt);
1757 	if (dst_rl_split.lcn >= LCN_HOLE) {
1758 		ntfs_rl_mc(new_rl, new_1st_cnt, &dst_rl_split, 0, 1);
1759 		new_1st_cnt++;
1760 	}
1761 	/* Copy the @src_rl to @new_rl */
1762 	ntfs_rl_mc(new_rl, new_1st_cnt, src_rl, 0, new_2nd_cnt);
1763 	/* Copy the @dst_rl's second half to @new_rl */
1764 	if (new_3rd_cnt >= 1) {
1765 		struct runlist_element *rl, *rl_3rd;
1766 		int dst_1st_cnt = dst_rl_split.lcn >= LCN_HOLE ?
1767 			new_1st_cnt - 1 : new_1st_cnt;
1768 
1769 		ntfs_rl_mc(new_rl, new_1st_cnt + new_2nd_cnt,
1770 			   dst_rl, dst_1st_cnt, new_3rd_cnt);
1771 		/* Update vcn of the @dst_rl's second half runs to reflect
1772 		 * appended @src_rl.
1773 		 */
1774 		if (new_1st_cnt + new_2nd_cnt == 0) {
1775 			rl_3rd = &new_rl[new_1st_cnt + new_2nd_cnt + 1];
1776 			rl = &new_rl[new_1st_cnt + new_2nd_cnt];
1777 		} else {
1778 			rl_3rd = &new_rl[new_1st_cnt + new_2nd_cnt];
1779 			rl = &new_rl[new_1st_cnt + new_2nd_cnt - 1];
1780 		}
1781 		do {
1782 			rl_3rd->vcn = rl->vcn + rl->length;
1783 			if (rl_3rd->length <= 0)
1784 				break;
1785 			rl = rl_3rd;
1786 			rl_3rd++;
1787 		} while (1);
1788 	}
1789 	*new_rl_cnt = new_1st_cnt + new_2nd_cnt + new_3rd_cnt;
1790 
1791 	kvfree(dst_rl);
1792 	kvfree(src_rl_origin);
1793 	return new_rl;
1794 }
1795 
ntfs_rl_punch_hole(struct runlist_element * dst_rl,int dst_cnt,s64 start_vcn,s64 len,struct runlist_element ** punch_rl,size_t * new_rl_cnt)1796 struct runlist_element *ntfs_rl_punch_hole(struct runlist_element *dst_rl, int dst_cnt,
1797 				    s64 start_vcn, s64 len,
1798 				    struct runlist_element **punch_rl,
1799 				    size_t *new_rl_cnt)
1800 {
1801 	struct runlist_element *s_rl, *e_rl, *new_rl, *dst_3rd_rl, hole_rl[1];
1802 	s64 end_vcn;
1803 	int new_1st_cnt, dst_3rd_cnt, new_cnt, punch_cnt, merge_cnt;
1804 	bool begin_split, end_split, one_split_3;
1805 
1806 	if (dst_cnt < 2 ||
1807 	    !(dst_rl[dst_cnt - 1].lcn == LCN_ENOENT &&
1808 	      dst_rl[dst_cnt - 1].length == 0))
1809 		return ERR_PTR(-EINVAL);
1810 
1811 	end_vcn = min(start_vcn + len - 1,
1812 		      dst_rl[dst_cnt - 2].vcn + dst_rl[dst_cnt - 2].length - 1);
1813 
1814 	s_rl = ntfs_rl_find_vcn_nolock(dst_rl, start_vcn);
1815 	if (!s_rl ||
1816 	    s_rl->lcn <= LCN_ENOENT ||
1817 	    !ntfs_rle_contain(s_rl, start_vcn))
1818 		return ERR_PTR(-EINVAL);
1819 
1820 	begin_split = s_rl->vcn != start_vcn ? true : false;
1821 
1822 	e_rl = ntfs_rl_find_vcn_nolock(dst_rl, end_vcn);
1823 	if (!e_rl ||
1824 	    e_rl->lcn <= LCN_ENOENT ||
1825 	    !ntfs_rle_contain(e_rl, end_vcn))
1826 		return ERR_PTR(-EINVAL);
1827 
1828 	end_split = e_rl->vcn + e_rl->length - 1 != end_vcn ? true : false;
1829 
1830 	/* @s_rl has to be split into left, punched hole, and right */
1831 	one_split_3 = e_rl == s_rl && begin_split && end_split ? true : false;
1832 
1833 	punch_cnt = (int)(e_rl - s_rl) + 1;
1834 
1835 	*punch_rl = kvcalloc(punch_cnt + 1, sizeof(struct runlist_element),
1836 			GFP_NOFS);
1837 	if (!*punch_rl)
1838 		return ERR_PTR(-ENOMEM);
1839 
1840 	new_cnt = dst_cnt - (int)(e_rl - s_rl + 1) + 3;
1841 	new_rl = kvcalloc(new_cnt, sizeof(struct runlist_element), GFP_NOFS);
1842 	if (!new_rl) {
1843 		kvfree(*punch_rl);
1844 		*punch_rl = NULL;
1845 		return ERR_PTR(-ENOMEM);
1846 	}
1847 
1848 	new_1st_cnt = (int)(s_rl - dst_rl) + 1;
1849 	ntfs_rl_mc(*punch_rl, 0, dst_rl, new_1st_cnt - 1, punch_cnt);
1850 
1851 	(*punch_rl)[punch_cnt].lcn = LCN_ENOENT;
1852 	(*punch_rl)[punch_cnt].length = 0;
1853 
1854 	if (!begin_split)
1855 		new_1st_cnt--;
1856 	dst_3rd_rl = e_rl;
1857 	dst_3rd_cnt = (int)(&dst_rl[dst_cnt - 1] - e_rl) + 1;
1858 	if (!end_split) {
1859 		dst_3rd_rl++;
1860 		dst_3rd_cnt--;
1861 	}
1862 
1863 	/* Copy the 1st part of @dst_rl into @new_rl */
1864 	ntfs_rl_mc(new_rl, 0, dst_rl, 0, new_1st_cnt);
1865 	if (begin_split) {
1866 		/* the @e_rl has to be splited and copied into the last of @new_rl
1867 		 * and the first of @punch_rl
1868 		 */
1869 		s64 first_cnt = start_vcn - dst_rl[new_1st_cnt - 1].vcn;
1870 
1871 		if (new_1st_cnt)
1872 			new_rl[new_1st_cnt - 1].length = first_cnt;
1873 
1874 		(*punch_rl)[0].vcn = start_vcn;
1875 		(*punch_rl)[0].length -= first_cnt;
1876 		if ((*punch_rl)[0].lcn > LCN_HOLE)
1877 			(*punch_rl)[0].lcn += first_cnt;
1878 	}
1879 
1880 	/* Copy a hole into @new_rl */
1881 	hole_rl[0].vcn = start_vcn;
1882 	hole_rl[0].length = (s64)len;
1883 	hole_rl[0].lcn = LCN_HOLE;
1884 	ntfs_rl_mc(new_rl, new_1st_cnt, hole_rl, 0, 1);
1885 
1886 	/* Copy the 3rd part of @dst_rl into @new_rl */
1887 	ntfs_rl_mc(new_rl, new_1st_cnt + 1, dst_3rd_rl, 0, dst_3rd_cnt);
1888 	if (end_split) {
1889 		/* the @e_rl has to be splited and copied into the first of
1890 		 * @new_rl and the last of @punch_rl
1891 		 */
1892 		s64 first_cnt = end_vcn - dst_3rd_rl[0].vcn + 1;
1893 
1894 		new_rl[new_1st_cnt + 1].vcn = end_vcn + 1;
1895 		new_rl[new_1st_cnt + 1].length -= first_cnt;
1896 		if (new_rl[new_1st_cnt + 1].lcn > LCN_HOLE)
1897 			new_rl[new_1st_cnt + 1].lcn += first_cnt;
1898 
1899 		if (one_split_3)
1900 			(*punch_rl)[punch_cnt - 1].length -=
1901 				new_rl[new_1st_cnt + 1].length;
1902 		else
1903 			(*punch_rl)[punch_cnt - 1].length = first_cnt;
1904 	}
1905 
1906 	/* Merge left and hole, or hole and right in @new_rl, if left or right
1907 	 * consists of holes.
1908 	 */
1909 	merge_cnt = 0;
1910 	if (new_1st_cnt > 0 && new_rl[new_1st_cnt - 1].lcn == LCN_HOLE) {
1911 		/* Merge right and hole */
1912 		s_rl =  &new_rl[new_1st_cnt - 1];
1913 		s_rl->length += s_rl[1].length;
1914 		merge_cnt = 1;
1915 		/* Merge left and right */
1916 		if (new_1st_cnt + 1 < new_cnt &&
1917 		    new_rl[new_1st_cnt + 1].lcn == LCN_HOLE) {
1918 			s_rl->length += s_rl[2].length;
1919 			merge_cnt++;
1920 		}
1921 	} else if (new_1st_cnt + 1 < new_cnt &&
1922 		   new_rl[new_1st_cnt + 1].lcn == LCN_HOLE) {
1923 		/* Merge left and hole */
1924 		s_rl = &new_rl[new_1st_cnt];
1925 		s_rl->length += s_rl[1].length;
1926 		merge_cnt = 1;
1927 	}
1928 	if (merge_cnt) {
1929 		struct runlist_element *d_rl, *src_rl;
1930 
1931 		d_rl = s_rl + 1;
1932 		src_rl = s_rl + 1 + merge_cnt;
1933 		ntfs_rl_mm(new_rl, (int)(d_rl - new_rl), (int)(src_rl - new_rl),
1934 			   (int)(&new_rl[new_cnt - 1] - src_rl) + 1);
1935 	}
1936 
1937 	(*punch_rl)[punch_cnt].vcn = (*punch_rl)[punch_cnt - 1].vcn +
1938 		(*punch_rl)[punch_cnt - 1].length;
1939 
1940 	/* punch_cnt elements of dst are replaced with one hole */
1941 	*new_rl_cnt = dst_cnt - (punch_cnt - (int)begin_split - (int)end_split) +
1942 		1 - merge_cnt;
1943 	kvfree(dst_rl);
1944 	return new_rl;
1945 }
1946 
ntfs_rl_collapse_range(struct runlist_element * dst_rl,int dst_cnt,s64 start_vcn,s64 len,struct runlist_element ** punch_rl,size_t * new_rl_cnt)1947 struct runlist_element *ntfs_rl_collapse_range(struct runlist_element *dst_rl, int dst_cnt,
1948 					s64 start_vcn, s64 len,
1949 					struct runlist_element **punch_rl,
1950 					size_t *new_rl_cnt)
1951 {
1952 	struct runlist_element *s_rl, *e_rl, *new_rl, *dst_3rd_rl;
1953 	s64 end_vcn;
1954 	int new_1st_cnt, dst_3rd_cnt, new_cnt, punch_cnt, merge_cnt, i;
1955 	bool begin_split, end_split, one_split_3;
1956 
1957 	if (dst_cnt < 2 ||
1958 	    !(dst_rl[dst_cnt - 1].lcn == LCN_ENOENT &&
1959 	      dst_rl[dst_cnt - 1].length == 0))
1960 		return ERR_PTR(-EINVAL);
1961 
1962 	end_vcn = min(start_vcn + len - 1,
1963 			dst_rl[dst_cnt - 1].vcn - 1);
1964 
1965 	s_rl = ntfs_rl_find_vcn_nolock(dst_rl, start_vcn);
1966 	if (!s_rl ||
1967 	    s_rl->lcn <= LCN_ENOENT ||
1968 	    !ntfs_rle_contain(s_rl, start_vcn))
1969 		return ERR_PTR(-EINVAL);
1970 
1971 	begin_split = s_rl->vcn != start_vcn ? true : false;
1972 
1973 	e_rl = ntfs_rl_find_vcn_nolock(dst_rl, end_vcn);
1974 	if (!e_rl ||
1975 	    e_rl->lcn <= LCN_ENOENT ||
1976 	    !ntfs_rle_contain(e_rl, end_vcn))
1977 		return ERR_PTR(-EINVAL);
1978 
1979 	end_split = e_rl->vcn + e_rl->length - 1 != end_vcn ? true : false;
1980 
1981 	/* @s_rl has to be split into left, collapsed, and right */
1982 	one_split_3 = e_rl == s_rl && begin_split && end_split ? true : false;
1983 
1984 	punch_cnt = (int)(e_rl - s_rl) + 1;
1985 	*punch_rl = kvcalloc(punch_cnt + 1, sizeof(struct runlist_element),
1986 			GFP_NOFS);
1987 	if (!*punch_rl)
1988 		return ERR_PTR(-ENOMEM);
1989 
1990 	new_cnt = dst_cnt - (int)(e_rl - s_rl + 1) + 3;
1991 	new_rl = kvcalloc(new_cnt, sizeof(struct runlist_element), GFP_NOFS);
1992 	if (!new_rl) {
1993 		kvfree(*punch_rl);
1994 		*punch_rl = NULL;
1995 		return ERR_PTR(-ENOMEM);
1996 	}
1997 
1998 	new_1st_cnt = (int)(s_rl - dst_rl) + 1;
1999 	ntfs_rl_mc(*punch_rl, 0, dst_rl, new_1st_cnt - 1, punch_cnt);
2000 	(*punch_rl)[punch_cnt].lcn = LCN_ENOENT;
2001 	(*punch_rl)[punch_cnt].length = 0;
2002 
2003 	if (!begin_split)
2004 		new_1st_cnt--;
2005 	dst_3rd_rl = e_rl;
2006 	dst_3rd_cnt = (int)(&dst_rl[dst_cnt - 1] - e_rl) + 1;
2007 	if (!end_split) {
2008 		dst_3rd_rl++;
2009 		dst_3rd_cnt--;
2010 	}
2011 
2012 	/* Copy the 1st part of @dst_rl into @new_rl */
2013 	ntfs_rl_mc(new_rl, 0, dst_rl, 0, new_1st_cnt);
2014 	if (begin_split) {
2015 		/* the @e_rl has to be splited and copied into the last of @new_rl
2016 		 * and the first of @punch_rl
2017 		 */
2018 		s64 first_cnt = start_vcn - dst_rl[new_1st_cnt - 1].vcn;
2019 
2020 		new_rl[new_1st_cnt - 1].length = first_cnt;
2021 
2022 		(*punch_rl)[0].vcn = start_vcn;
2023 		(*punch_rl)[0].length -= first_cnt;
2024 		if ((*punch_rl)[0].lcn > LCN_HOLE)
2025 			(*punch_rl)[0].lcn += first_cnt;
2026 	}
2027 
2028 	/* Copy the 3rd part of @dst_rl into @new_rl */
2029 	ntfs_rl_mc(new_rl, new_1st_cnt, dst_3rd_rl, 0, dst_3rd_cnt);
2030 	if (end_split) {
2031 		/* the @e_rl has to be splited and copied into the first of
2032 		 * @new_rl and the last of @punch_rl
2033 		 */
2034 		s64 first_cnt = end_vcn - dst_3rd_rl[0].vcn + 1;
2035 
2036 		new_rl[new_1st_cnt].vcn = end_vcn + 1;
2037 		new_rl[new_1st_cnt].length -= first_cnt;
2038 		if (new_rl[new_1st_cnt].lcn > LCN_HOLE)
2039 			new_rl[new_1st_cnt].lcn += first_cnt;
2040 
2041 		if (one_split_3)
2042 			(*punch_rl)[punch_cnt - 1].length -=
2043 				new_rl[new_1st_cnt].length;
2044 		else
2045 			(*punch_rl)[punch_cnt - 1].length = first_cnt;
2046 	}
2047 
2048 	/* Adjust vcn */
2049 	if (new_1st_cnt == 0)
2050 		new_rl[new_1st_cnt].vcn = 0;
2051 	for (i = new_1st_cnt == 0 ? 1 : new_1st_cnt; new_rl[i].length; i++)
2052 		new_rl[i].vcn = new_rl[i - 1].vcn + new_rl[i - 1].length;
2053 	new_rl[i].vcn = new_rl[i - 1].vcn + new_rl[i - 1].length;
2054 
2055 	/* Merge left and hole, or hole and right in @new_rl, if left or right
2056 	 * consists of holes.
2057 	 */
2058 	merge_cnt = 0;
2059 	i = new_1st_cnt == 0 ? 1 : new_1st_cnt;
2060 	if (ntfs_rle_lcn_contiguous(&new_rl[i - 1], &new_rl[i])) {
2061 		/* Merge right and left */
2062 		s_rl =  &new_rl[new_1st_cnt - 1];
2063 		s_rl->length += s_rl[1].length;
2064 		merge_cnt = 1;
2065 	}
2066 	if (merge_cnt) {
2067 		struct runlist_element *d_rl, *src_rl;
2068 
2069 		d_rl = s_rl + 1;
2070 		src_rl = s_rl + 1 + merge_cnt;
2071 		ntfs_rl_mm(new_rl, (int)(d_rl - new_rl), (int)(src_rl - new_rl),
2072 			   (int)(&new_rl[new_cnt - 1] - src_rl) + 1);
2073 	}
2074 
2075 	(*punch_rl)[punch_cnt].vcn = (*punch_rl)[punch_cnt - 1].vcn +
2076 		(*punch_rl)[punch_cnt - 1].length;
2077 
2078 	/* punch_cnt elements of dst are extracted */
2079 	*new_rl_cnt = dst_cnt - (punch_cnt - (int)begin_split - (int)end_split) -
2080 		merge_cnt;
2081 
2082 	kvfree(dst_rl);
2083 	return new_rl;
2084 }
2085