xref: /linux/fs/ntfs/runlist.c (revision cdd4dc3aebeab43a72ce0bc2b5bab6f0a80b97a5)
11e9ea7e0SNamjae Jeon // SPDX-License-Identifier: GPL-2.0-or-later
21e9ea7e0SNamjae Jeon /*
311ccc910SNamjae Jeon  * NTFS runlist handling code.
41e9ea7e0SNamjae Jeon  *
51e9ea7e0SNamjae Jeon  * Copyright (c) 2001-2007 Anton Altaparmakov
61e9ea7e0SNamjae Jeon  * Copyright (c) 2002-2005 Richard Russon
711ccc910SNamjae Jeon  * Copyright (c) 2025 LG Electronics Co., Ltd.
811ccc910SNamjae Jeon  *
911ccc910SNamjae Jeon  * Part of this file is based on code from the NTFS-3G.
1011ccc910SNamjae Jeon  * and is copyrighted by the respective authors below:
1111ccc910SNamjae Jeon  * Copyright (c) 2002-2005 Anton Altaparmakov
1211ccc910SNamjae Jeon  * Copyright (c) 2002-2005 Richard Russon
1311ccc910SNamjae Jeon  * Copyright (c) 2002-2008 Szabolcs Szakacsits
1411ccc910SNamjae Jeon  * Copyright (c) 2004 Yura Pakhuchiy
1511ccc910SNamjae Jeon  * Copyright (c) 2007-2022 Jean-Pierre Andre
161e9ea7e0SNamjae Jeon  */
171e9ea7e0SNamjae Jeon 
181e9ea7e0SNamjae Jeon #include "ntfs.h"
1911ccc910SNamjae Jeon #include "attrib.h"
201e9ea7e0SNamjae Jeon 
2111ccc910SNamjae Jeon /*
221e9ea7e0SNamjae Jeon  * ntfs_rl_mm - runlist memmove
2311ccc910SNamjae Jeon  * @base: base runlist array
2411ccc910SNamjae Jeon  * @dst: destination index in @base
2511ccc910SNamjae Jeon  * @src: source index in @base
2611ccc910SNamjae Jeon  * @size: number of elements to move
271e9ea7e0SNamjae Jeon  *
281e9ea7e0SNamjae Jeon  * It is up to the caller to serialize access to the runlist @base.
291e9ea7e0SNamjae Jeon  */
3011ccc910SNamjae Jeon static inline void ntfs_rl_mm(struct runlist_element *base, int dst, int src, int size)
311e9ea7e0SNamjae Jeon {
321e9ea7e0SNamjae Jeon 	if (likely((dst != src) && (size > 0)))
331e9ea7e0SNamjae Jeon 		memmove(base + dst, base + src, size * sizeof(*base));
341e9ea7e0SNamjae Jeon }
351e9ea7e0SNamjae Jeon 
3611ccc910SNamjae Jeon /*
371e9ea7e0SNamjae Jeon  * ntfs_rl_mc - runlist memory copy
3811ccc910SNamjae Jeon  * @dstbase: destination runlist array
3911ccc910SNamjae Jeon  * @dst: destination index in @dstbase
4011ccc910SNamjae Jeon  * @srcbase: source runlist array
4111ccc910SNamjae Jeon  * @src: source index in @srcbase
4211ccc910SNamjae Jeon  * @size: number of elements to copy
431e9ea7e0SNamjae Jeon  *
441e9ea7e0SNamjae Jeon  * It is up to the caller to serialize access to the runlists @dstbase and
451e9ea7e0SNamjae Jeon  * @srcbase.
461e9ea7e0SNamjae Jeon  */
4711ccc910SNamjae Jeon static inline void ntfs_rl_mc(struct runlist_element *dstbase, int dst,
4811ccc910SNamjae Jeon 		struct runlist_element *srcbase, int src, int size)
491e9ea7e0SNamjae Jeon {
501e9ea7e0SNamjae Jeon 	if (likely(size > 0))
511e9ea7e0SNamjae Jeon 		memcpy(dstbase + dst, srcbase + src, size * sizeof(*dstbase));
521e9ea7e0SNamjae Jeon }
531e9ea7e0SNamjae Jeon 
5411ccc910SNamjae Jeon /*
551e9ea7e0SNamjae Jeon  * ntfs_rl_realloc - Reallocate memory for runlists
561e9ea7e0SNamjae Jeon  * @rl:		original runlist
571e9ea7e0SNamjae Jeon  * @old_size:	number of runlist elements in the original runlist @rl
581e9ea7e0SNamjae Jeon  * @new_size:	number of runlist elements we need space for
591e9ea7e0SNamjae Jeon  *
601e9ea7e0SNamjae Jeon  * As the runlists grow, more memory will be required.  To prevent the
611e9ea7e0SNamjae Jeon  * kernel having to allocate and reallocate large numbers of small bits of
621e9ea7e0SNamjae Jeon  * memory, this function returns an entire page of memory.
631e9ea7e0SNamjae Jeon  *
641e9ea7e0SNamjae Jeon  * It is up to the caller to serialize access to the runlist @rl.
651e9ea7e0SNamjae Jeon  *
661e9ea7e0SNamjae Jeon  * N.B.  If the new allocation doesn't require a different number of pages in
671e9ea7e0SNamjae Jeon  *       memory, the function will return the original pointer.
681e9ea7e0SNamjae Jeon  *
691e9ea7e0SNamjae Jeon  * On success, return a pointer to the newly allocated, or recycled, memory.
7011ccc910SNamjae Jeon  * On error, return -errno.
711e9ea7e0SNamjae Jeon  */
7211ccc910SNamjae Jeon struct runlist_element *ntfs_rl_realloc(struct runlist_element *rl,
731e9ea7e0SNamjae Jeon 		int old_size, int new_size)
741e9ea7e0SNamjae Jeon {
7511ccc910SNamjae Jeon 	struct runlist_element *new_rl;
761e9ea7e0SNamjae Jeon 
7711ccc910SNamjae Jeon 	old_size = old_size * sizeof(*rl);
7811ccc910SNamjae Jeon 	new_size = new_size * sizeof(*rl);
791e9ea7e0SNamjae Jeon 	if (old_size == new_size)
801e9ea7e0SNamjae Jeon 		return rl;
811e9ea7e0SNamjae Jeon 
8211ccc910SNamjae Jeon 	new_rl = kvzalloc(new_size, GFP_NOFS);
831e9ea7e0SNamjae Jeon 	if (unlikely(!new_rl))
841e9ea7e0SNamjae Jeon 		return ERR_PTR(-ENOMEM);
851e9ea7e0SNamjae Jeon 
861e9ea7e0SNamjae Jeon 	if (likely(rl != NULL)) {
871e9ea7e0SNamjae Jeon 		if (unlikely(old_size > new_size))
881e9ea7e0SNamjae Jeon 			old_size = new_size;
891e9ea7e0SNamjae Jeon 		memcpy(new_rl, rl, old_size);
9011ccc910SNamjae Jeon 		kvfree(rl);
911e9ea7e0SNamjae Jeon 	}
921e9ea7e0SNamjae Jeon 	return new_rl;
931e9ea7e0SNamjae Jeon }
941e9ea7e0SNamjae Jeon 
9511ccc910SNamjae Jeon /*
961e9ea7e0SNamjae Jeon  * ntfs_rl_realloc_nofail - Reallocate memory for runlists
971e9ea7e0SNamjae Jeon  * @rl:		original runlist
981e9ea7e0SNamjae Jeon  * @old_size:	number of runlist elements in the original runlist @rl
991e9ea7e0SNamjae Jeon  * @new_size:	number of runlist elements we need space for
1001e9ea7e0SNamjae Jeon  *
1011e9ea7e0SNamjae Jeon  * As the runlists grow, more memory will be required.  To prevent the
1021e9ea7e0SNamjae Jeon  * kernel having to allocate and reallocate large numbers of small bits of
1031e9ea7e0SNamjae Jeon  * memory, this function returns an entire page of memory.
1041e9ea7e0SNamjae Jeon  *
1051e9ea7e0SNamjae Jeon  * This function guarantees that the allocation will succeed.  It will sleep
1061e9ea7e0SNamjae Jeon  * for as long as it takes to complete the allocation.
1071e9ea7e0SNamjae Jeon  *
1081e9ea7e0SNamjae Jeon  * It is up to the caller to serialize access to the runlist @rl.
1091e9ea7e0SNamjae Jeon  *
1101e9ea7e0SNamjae Jeon  * N.B.  If the new allocation doesn't require a different number of pages in
1111e9ea7e0SNamjae Jeon  *       memory, the function will return the original pointer.
1121e9ea7e0SNamjae Jeon  *
1131e9ea7e0SNamjae Jeon  * On success, return a pointer to the newly allocated, or recycled, memory.
11411ccc910SNamjae Jeon  * On error, return -errno.
1151e9ea7e0SNamjae Jeon  */
11611ccc910SNamjae Jeon static inline struct runlist_element *ntfs_rl_realloc_nofail(struct runlist_element *rl,
1171e9ea7e0SNamjae Jeon 		int old_size, int new_size)
1181e9ea7e0SNamjae Jeon {
11911ccc910SNamjae Jeon 	struct runlist_element *new_rl;
1201e9ea7e0SNamjae Jeon 
12111ccc910SNamjae Jeon 	old_size = old_size * sizeof(*rl);
12211ccc910SNamjae Jeon 	new_size = new_size * sizeof(*rl);
1231e9ea7e0SNamjae Jeon 	if (old_size == new_size)
1241e9ea7e0SNamjae Jeon 		return rl;
1251e9ea7e0SNamjae Jeon 
12611ccc910SNamjae Jeon 	new_rl = kvmalloc(new_size, GFP_NOFS | __GFP_NOFAIL);
1271e9ea7e0SNamjae Jeon 	if (likely(rl != NULL)) {
1281e9ea7e0SNamjae Jeon 		if (unlikely(old_size > new_size))
1291e9ea7e0SNamjae Jeon 			old_size = new_size;
1301e9ea7e0SNamjae Jeon 		memcpy(new_rl, rl, old_size);
13111ccc910SNamjae Jeon 		kvfree(rl);
1321e9ea7e0SNamjae Jeon 	}
1331e9ea7e0SNamjae Jeon 	return new_rl;
1341e9ea7e0SNamjae Jeon }
1351e9ea7e0SNamjae Jeon 
13611ccc910SNamjae Jeon /*
1371e9ea7e0SNamjae Jeon  * ntfs_are_rl_mergeable - test if two runlists can be joined together
1381e9ea7e0SNamjae Jeon  * @dst:	original runlist
1391e9ea7e0SNamjae Jeon  * @src:	new runlist to test for mergeability with @dst
1401e9ea7e0SNamjae Jeon  *
1411e9ea7e0SNamjae Jeon  * Test if two runlists can be joined together. For this, their VCNs and LCNs
1421e9ea7e0SNamjae Jeon  * must be adjacent.
1431e9ea7e0SNamjae Jeon  *
1441e9ea7e0SNamjae Jeon  * It is up to the caller to serialize access to the runlists @dst and @src.
1451e9ea7e0SNamjae Jeon  *
1461e9ea7e0SNamjae Jeon  * Return: true   Success, the runlists can be merged.
1471e9ea7e0SNamjae Jeon  *	   false  Failure, the runlists cannot be merged.
1481e9ea7e0SNamjae Jeon  */
14911ccc910SNamjae Jeon static inline bool ntfs_are_rl_mergeable(struct runlist_element *dst,
15011ccc910SNamjae Jeon 		struct runlist_element *src)
1511e9ea7e0SNamjae Jeon {
1521e9ea7e0SNamjae Jeon 	/* We can merge unmapped regions even if they are misaligned. */
1531e9ea7e0SNamjae Jeon 	if ((dst->lcn == LCN_RL_NOT_MAPPED) && (src->lcn == LCN_RL_NOT_MAPPED))
1541e9ea7e0SNamjae Jeon 		return true;
1551e9ea7e0SNamjae Jeon 	/* If the runs are misaligned, we cannot merge them. */
1561e9ea7e0SNamjae Jeon 	if ((dst->vcn + dst->length) != src->vcn)
1571e9ea7e0SNamjae Jeon 		return false;
1581e9ea7e0SNamjae Jeon 	/* If both runs are non-sparse and contiguous, we can merge them. */
1591e9ea7e0SNamjae Jeon 	if ((dst->lcn >= 0) && (src->lcn >= 0) &&
1601e9ea7e0SNamjae Jeon 			((dst->lcn + dst->length) == src->lcn))
1611e9ea7e0SNamjae Jeon 		return true;
1621e9ea7e0SNamjae Jeon 	/* If we are merging two holes, we can merge them. */
1631e9ea7e0SNamjae Jeon 	if ((dst->lcn == LCN_HOLE) && (src->lcn == LCN_HOLE))
1641e9ea7e0SNamjae Jeon 		return true;
16511ccc910SNamjae Jeon 	/* If we are merging two dealloc, we can merge them. */
16611ccc910SNamjae Jeon 	if ((dst->lcn == LCN_DELALLOC) && (src->lcn == LCN_DELALLOC))
16711ccc910SNamjae Jeon 		return true;
1681e9ea7e0SNamjae Jeon 	/* Cannot merge. */
1691e9ea7e0SNamjae Jeon 	return false;
1701e9ea7e0SNamjae Jeon }
1711e9ea7e0SNamjae Jeon 
17211ccc910SNamjae Jeon /*
1731e9ea7e0SNamjae Jeon  * __ntfs_rl_merge - merge two runlists without testing if they can be merged
1741e9ea7e0SNamjae Jeon  * @dst:	original, destination runlist
1751e9ea7e0SNamjae Jeon  * @src:	new runlist to merge with @dst
1761e9ea7e0SNamjae Jeon  *
1771e9ea7e0SNamjae Jeon  * Merge the two runlists, writing into the destination runlist @dst. The
1781e9ea7e0SNamjae Jeon  * caller must make sure the runlists can be merged or this will corrupt the
1791e9ea7e0SNamjae Jeon  * destination runlist.
1801e9ea7e0SNamjae Jeon  *
1811e9ea7e0SNamjae Jeon  * It is up to the caller to serialize access to the runlists @dst and @src.
1821e9ea7e0SNamjae Jeon  */
18311ccc910SNamjae Jeon static inline void __ntfs_rl_merge(struct runlist_element *dst, struct runlist_element *src)
1841e9ea7e0SNamjae Jeon {
1851e9ea7e0SNamjae Jeon 	dst->length += src->length;
1861e9ea7e0SNamjae Jeon }
1871e9ea7e0SNamjae Jeon 
18811ccc910SNamjae Jeon /*
1891e9ea7e0SNamjae Jeon  * ntfs_rl_append - append a runlist after a given element
19011ccc910SNamjae Jeon  * @dst: destination runlist to append to
19111ccc910SNamjae Jeon  * @dsize: number of elements in @dst
19211ccc910SNamjae Jeon  * @src: source runlist to append from
19311ccc910SNamjae Jeon  * @ssize: number of elements in @src
19411ccc910SNamjae Jeon  * @loc: index in @dst after which to append @src
19511ccc910SNamjae Jeon  * @new_size: on success, set to the new combined size
1961e9ea7e0SNamjae Jeon  *
1971e9ea7e0SNamjae Jeon  * Append the runlist @src after element @loc in @dst.  Merge the right end of
1981e9ea7e0SNamjae Jeon  * the new runlist, if necessary. Adjust the size of the hole before the
1991e9ea7e0SNamjae Jeon  * appended runlist.
2001e9ea7e0SNamjae Jeon  *
2011e9ea7e0SNamjae Jeon  * It is up to the caller to serialize access to the runlists @dst and @src.
2021e9ea7e0SNamjae Jeon  *
2031e9ea7e0SNamjae Jeon  * On success, return a pointer to the new, combined, runlist. Note, both
2041e9ea7e0SNamjae Jeon  * runlists @dst and @src are deallocated before returning so you cannot use
2051e9ea7e0SNamjae Jeon  * the pointers for anything any more. (Strictly speaking the returned runlist
2061e9ea7e0SNamjae Jeon  * may be the same as @dst but this is irrelevant.)
2071e9ea7e0SNamjae Jeon  *
20811ccc910SNamjae Jeon  * On error, return -errno. Both runlists are left unmodified.
2091e9ea7e0SNamjae Jeon  */
21011ccc910SNamjae Jeon static inline struct runlist_element *ntfs_rl_append(struct runlist_element *dst,
21111ccc910SNamjae Jeon 		int dsize, struct runlist_element *src, int ssize, int loc,
21211ccc910SNamjae Jeon 		size_t *new_size)
2131e9ea7e0SNamjae Jeon {
2141e9ea7e0SNamjae Jeon 	bool right = false;	/* Right end of @src needs merging. */
2151e9ea7e0SNamjae Jeon 	int marker;		/* End of the inserted runs. */
2161e9ea7e0SNamjae Jeon 
2171e9ea7e0SNamjae Jeon 	/* First, check if the right hand end needs merging. */
2181e9ea7e0SNamjae Jeon 	if ((loc + 1) < dsize)
2191e9ea7e0SNamjae Jeon 		right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1);
2201e9ea7e0SNamjae Jeon 
2211e9ea7e0SNamjae Jeon 	/* Space required: @dst size + @src size, less one if we merged. */
2221e9ea7e0SNamjae Jeon 	dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - right);
2231e9ea7e0SNamjae Jeon 	if (IS_ERR(dst))
2241e9ea7e0SNamjae Jeon 		return dst;
22511ccc910SNamjae Jeon 
22611ccc910SNamjae Jeon 	*new_size = dsize + ssize - right;
2271e9ea7e0SNamjae Jeon 	/*
2281e9ea7e0SNamjae Jeon 	 * We are guaranteed to succeed from here so can start modifying the
2291e9ea7e0SNamjae Jeon 	 * original runlists.
2301e9ea7e0SNamjae Jeon 	 */
2311e9ea7e0SNamjae Jeon 
2321e9ea7e0SNamjae Jeon 	/* First, merge the right hand end, if necessary. */
2331e9ea7e0SNamjae Jeon 	if (right)
2341e9ea7e0SNamjae Jeon 		__ntfs_rl_merge(src + ssize - 1, dst + loc + 1);
2351e9ea7e0SNamjae Jeon 
2361e9ea7e0SNamjae Jeon 	/* First run after the @src runs that have been inserted. */
2371e9ea7e0SNamjae Jeon 	marker = loc + ssize + 1;
2381e9ea7e0SNamjae Jeon 
2391e9ea7e0SNamjae Jeon 	/* Move the tail of @dst out of the way, then copy in @src. */
2401e9ea7e0SNamjae Jeon 	ntfs_rl_mm(dst, marker, loc + 1 + right, dsize - (loc + 1 + right));
2411e9ea7e0SNamjae Jeon 	ntfs_rl_mc(dst, loc + 1, src, 0, ssize);
2421e9ea7e0SNamjae Jeon 
2431e9ea7e0SNamjae Jeon 	/* Adjust the size of the preceding hole. */
2441e9ea7e0SNamjae Jeon 	dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn;
2451e9ea7e0SNamjae Jeon 
2461e9ea7e0SNamjae Jeon 	/* We may have changed the length of the file, so fix the end marker */
2471e9ea7e0SNamjae Jeon 	if (dst[marker].lcn == LCN_ENOENT)
2481e9ea7e0SNamjae Jeon 		dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length;
2491e9ea7e0SNamjae Jeon 
2501e9ea7e0SNamjae Jeon 	return dst;
2511e9ea7e0SNamjae Jeon }
2521e9ea7e0SNamjae Jeon 
25311ccc910SNamjae Jeon /*
2541e9ea7e0SNamjae Jeon  * ntfs_rl_insert - insert a runlist into another
25511ccc910SNamjae Jeon  * @dst: destination runlist to insert into
25611ccc910SNamjae Jeon  * @dsize: number of elements in @dst
25711ccc910SNamjae Jeon  * @src: source runlist to insert from
25811ccc910SNamjae Jeon  * @ssize: number of elements in @src
25911ccc910SNamjae Jeon  * @loc: index in @dst at which to insert @src
26011ccc910SNamjae Jeon  * @new_size: on success, set to the new combined size
2611e9ea7e0SNamjae Jeon  *
2621e9ea7e0SNamjae Jeon  * Insert the runlist @src before element @loc in the runlist @dst. Merge the
2631e9ea7e0SNamjae Jeon  * left end of the new runlist, if necessary. Adjust the size of the hole
2641e9ea7e0SNamjae Jeon  * after the inserted runlist.
2651e9ea7e0SNamjae Jeon  *
2661e9ea7e0SNamjae Jeon  * It is up to the caller to serialize access to the runlists @dst and @src.
2671e9ea7e0SNamjae Jeon  *
2681e9ea7e0SNamjae Jeon  * On success, return a pointer to the new, combined, runlist. Note, both
2691e9ea7e0SNamjae Jeon  * runlists @dst and @src are deallocated before returning so you cannot use
2701e9ea7e0SNamjae Jeon  * the pointers for anything any more. (Strictly speaking the returned runlist
2711e9ea7e0SNamjae Jeon  * may be the same as @dst but this is irrelevant.)
2721e9ea7e0SNamjae Jeon  *
27311ccc910SNamjae Jeon  * On error, return -errno. Both runlists are left unmodified.
2741e9ea7e0SNamjae Jeon  */
27511ccc910SNamjae Jeon static inline struct runlist_element *ntfs_rl_insert(struct runlist_element *dst,
27611ccc910SNamjae Jeon 		int dsize, struct runlist_element *src, int ssize, int loc,
27711ccc910SNamjae Jeon 		size_t *new_size)
2781e9ea7e0SNamjae Jeon {
2791e9ea7e0SNamjae Jeon 	bool left = false;	/* Left end of @src needs merging. */
2801e9ea7e0SNamjae Jeon 	bool disc = false;	/* Discontinuity between @dst and @src. */
2811e9ea7e0SNamjae Jeon 	int marker;		/* End of the inserted runs. */
2821e9ea7e0SNamjae Jeon 
2831e9ea7e0SNamjae Jeon 	/*
2841e9ea7e0SNamjae Jeon 	 * disc => Discontinuity between the end of @dst and the start of @src.
2851e9ea7e0SNamjae Jeon 	 *	   This means we might need to insert a "not mapped" run.
2861e9ea7e0SNamjae Jeon 	 */
2871e9ea7e0SNamjae Jeon 	if (loc == 0)
2881e9ea7e0SNamjae Jeon 		disc = (src[0].vcn > 0);
2891e9ea7e0SNamjae Jeon 	else {
2901e9ea7e0SNamjae Jeon 		s64 merged_length;
2911e9ea7e0SNamjae Jeon 
2921e9ea7e0SNamjae Jeon 		left = ntfs_are_rl_mergeable(dst + loc - 1, src);
2931e9ea7e0SNamjae Jeon 
2941e9ea7e0SNamjae Jeon 		merged_length = dst[loc - 1].length;
2951e9ea7e0SNamjae Jeon 		if (left)
2961e9ea7e0SNamjae Jeon 			merged_length += src->length;
2971e9ea7e0SNamjae Jeon 
2981e9ea7e0SNamjae Jeon 		disc = (src[0].vcn > dst[loc - 1].vcn + merged_length);
2991e9ea7e0SNamjae Jeon 	}
3001e9ea7e0SNamjae Jeon 	/*
3011e9ea7e0SNamjae Jeon 	 * Space required: @dst size + @src size, less one if we merged, plus
3021e9ea7e0SNamjae Jeon 	 * one if there was a discontinuity.
3031e9ea7e0SNamjae Jeon 	 */
3041e9ea7e0SNamjae Jeon 	dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc);
3051e9ea7e0SNamjae Jeon 	if (IS_ERR(dst))
3061e9ea7e0SNamjae Jeon 		return dst;
30711ccc910SNamjae Jeon 
30811ccc910SNamjae Jeon 	*new_size = dsize + ssize - left + disc;
3091e9ea7e0SNamjae Jeon 	/*
3101e9ea7e0SNamjae Jeon 	 * We are guaranteed to succeed from here so can start modifying the
3111e9ea7e0SNamjae Jeon 	 * original runlist.
3121e9ea7e0SNamjae Jeon 	 */
3131e9ea7e0SNamjae Jeon 	if (left)
3141e9ea7e0SNamjae Jeon 		__ntfs_rl_merge(dst + loc - 1, src);
3151e9ea7e0SNamjae Jeon 	/*
3161e9ea7e0SNamjae Jeon 	 * First run after the @src runs that have been inserted.
3171e9ea7e0SNamjae Jeon 	 * Nominally,  @marker equals @loc + @ssize, i.e. location + number of
3181e9ea7e0SNamjae Jeon 	 * runs in @src.  However, if @left, then the first run in @src has
3191e9ea7e0SNamjae Jeon 	 * been merged with one in @dst.  And if @disc, then @dst and @src do
3201e9ea7e0SNamjae Jeon 	 * not meet and we need an extra run to fill the gap.
3211e9ea7e0SNamjae Jeon 	 */
3221e9ea7e0SNamjae Jeon 	marker = loc + ssize - left + disc;
3231e9ea7e0SNamjae Jeon 
3241e9ea7e0SNamjae Jeon 	/* Move the tail of @dst out of the way, then copy in @src. */
3251e9ea7e0SNamjae Jeon 	ntfs_rl_mm(dst, marker, loc, dsize - loc);
3261e9ea7e0SNamjae Jeon 	ntfs_rl_mc(dst, loc + disc, src, left, ssize - left);
3271e9ea7e0SNamjae Jeon 
3281e9ea7e0SNamjae Jeon 	/* Adjust the VCN of the first run after the insertion... */
3291e9ea7e0SNamjae Jeon 	dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length;
3301e9ea7e0SNamjae Jeon 	/* ... and the length. */
33111ccc910SNamjae Jeon 	if (dst[marker].lcn == LCN_HOLE || dst[marker].lcn == LCN_RL_NOT_MAPPED ||
33211ccc910SNamjae Jeon 	    dst[marker].lcn == LCN_DELALLOC)
3331e9ea7e0SNamjae Jeon 		dst[marker].length = dst[marker + 1].vcn - dst[marker].vcn;
3341e9ea7e0SNamjae Jeon 
3351e9ea7e0SNamjae Jeon 	/* Writing beyond the end of the file and there is a discontinuity. */
3361e9ea7e0SNamjae Jeon 	if (disc) {
3371e9ea7e0SNamjae Jeon 		if (loc > 0) {
3381e9ea7e0SNamjae Jeon 			dst[loc].vcn = dst[loc - 1].vcn + dst[loc - 1].length;
3391e9ea7e0SNamjae Jeon 			dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn;
3401e9ea7e0SNamjae Jeon 		} else {
3411e9ea7e0SNamjae Jeon 			dst[loc].vcn = 0;
3421e9ea7e0SNamjae Jeon 			dst[loc].length = dst[loc + 1].vcn;
3431e9ea7e0SNamjae Jeon 		}
3441e9ea7e0SNamjae Jeon 		dst[loc].lcn = LCN_RL_NOT_MAPPED;
3451e9ea7e0SNamjae Jeon 	}
3461e9ea7e0SNamjae Jeon 	return dst;
3471e9ea7e0SNamjae Jeon }
3481e9ea7e0SNamjae Jeon 
34911ccc910SNamjae Jeon /*
3501e9ea7e0SNamjae Jeon  * ntfs_rl_replace - overwrite a runlist element with another runlist
35111ccc910SNamjae Jeon  * @dst: destination runlist to replace in
35211ccc910SNamjae Jeon  * @dsize: number of elements in @dst
35311ccc910SNamjae Jeon  * @src: source runlist to replace with
35411ccc910SNamjae Jeon  * @ssize: number of elements in @src
35511ccc910SNamjae Jeon  * @loc: index in @dst to replace
35611ccc910SNamjae Jeon  * @new_size: on success, set to the new combined size
3571e9ea7e0SNamjae Jeon  *
3581e9ea7e0SNamjae Jeon  * Replace the runlist element @dst at @loc with @src. Merge the left and
3591e9ea7e0SNamjae Jeon  * right ends of the inserted runlist, if necessary.
3601e9ea7e0SNamjae Jeon  *
3611e9ea7e0SNamjae Jeon  * It is up to the caller to serialize access to the runlists @dst and @src.
3621e9ea7e0SNamjae Jeon  *
3631e9ea7e0SNamjae Jeon  * On success, return a pointer to the new, combined, runlist. Note, both
3641e9ea7e0SNamjae Jeon  * runlists @dst and @src are deallocated before returning so you cannot use
3651e9ea7e0SNamjae Jeon  * the pointers for anything any more. (Strictly speaking the returned runlist
3661e9ea7e0SNamjae Jeon  * may be the same as @dst but this is irrelevant.)
3671e9ea7e0SNamjae Jeon  *
36811ccc910SNamjae Jeon  * On error, return -errno. Both runlists are left unmodified.
3691e9ea7e0SNamjae Jeon  */
37011ccc910SNamjae Jeon static inline struct runlist_element *ntfs_rl_replace(struct runlist_element *dst,
37111ccc910SNamjae Jeon 		int dsize, struct runlist_element *src, int ssize, int loc,
37211ccc910SNamjae Jeon 		size_t *new_size)
3731e9ea7e0SNamjae Jeon {
37411ccc910SNamjae Jeon 	int delta;
3751e9ea7e0SNamjae Jeon 	bool left = false;	/* Left end of @src needs merging. */
3761e9ea7e0SNamjae Jeon 	bool right = false;	/* Right end of @src needs merging. */
3771e9ea7e0SNamjae Jeon 	int tail;		/* Start of tail of @dst. */
3781e9ea7e0SNamjae Jeon 	int marker;		/* End of the inserted runs. */
3791e9ea7e0SNamjae Jeon 
3801e9ea7e0SNamjae Jeon 	/* First, see if the left and right ends need merging. */
3811e9ea7e0SNamjae Jeon 	if ((loc + 1) < dsize)
3821e9ea7e0SNamjae Jeon 		right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1);
3831e9ea7e0SNamjae Jeon 	if (loc > 0)
3841e9ea7e0SNamjae Jeon 		left = ntfs_are_rl_mergeable(dst + loc - 1, src);
3851e9ea7e0SNamjae Jeon 	/*
3861e9ea7e0SNamjae Jeon 	 * Allocate some space.  We will need less if the left, right, or both
3871e9ea7e0SNamjae Jeon 	 * ends get merged.  The -1 accounts for the run being replaced.
3881e9ea7e0SNamjae Jeon 	 */
3891e9ea7e0SNamjae Jeon 	delta = ssize - 1 - left - right;
3901e9ea7e0SNamjae Jeon 	if (delta > 0) {
3911e9ea7e0SNamjae Jeon 		dst = ntfs_rl_realloc(dst, dsize, dsize + delta);
3921e9ea7e0SNamjae Jeon 		if (IS_ERR(dst))
3931e9ea7e0SNamjae Jeon 			return dst;
3941e9ea7e0SNamjae Jeon 	}
39511ccc910SNamjae Jeon 
39611ccc910SNamjae Jeon 	*new_size = dsize + delta;
3971e9ea7e0SNamjae Jeon 	/*
3981e9ea7e0SNamjae Jeon 	 * We are guaranteed to succeed from here so can start modifying the
3991e9ea7e0SNamjae Jeon 	 * original runlists.
4001e9ea7e0SNamjae Jeon 	 */
4011e9ea7e0SNamjae Jeon 
4021e9ea7e0SNamjae Jeon 	/* First, merge the left and right ends, if necessary. */
4031e9ea7e0SNamjae Jeon 	if (right)
4041e9ea7e0SNamjae Jeon 		__ntfs_rl_merge(src + ssize - 1, dst + loc + 1);
4051e9ea7e0SNamjae Jeon 	if (left)
4061e9ea7e0SNamjae Jeon 		__ntfs_rl_merge(dst + loc - 1, src);
4071e9ea7e0SNamjae Jeon 	/*
4081e9ea7e0SNamjae Jeon 	 * Offset of the tail of @dst.  This needs to be moved out of the way
4091e9ea7e0SNamjae Jeon 	 * to make space for the runs to be copied from @src, i.e. the first
4101e9ea7e0SNamjae Jeon 	 * run of the tail of @dst.
4111e9ea7e0SNamjae Jeon 	 * Nominally, @tail equals @loc + 1, i.e. location, skipping the
4121e9ea7e0SNamjae Jeon 	 * replaced run.  However, if @right, then one of @dst's runs is
4131e9ea7e0SNamjae Jeon 	 * already merged into @src.
4141e9ea7e0SNamjae Jeon 	 */
4151e9ea7e0SNamjae Jeon 	tail = loc + right + 1;
4161e9ea7e0SNamjae Jeon 	/*
4171e9ea7e0SNamjae Jeon 	 * First run after the @src runs that have been inserted, i.e. where
4181e9ea7e0SNamjae Jeon 	 * the tail of @dst needs to be moved to.
4191e9ea7e0SNamjae Jeon 	 * Nominally, @marker equals @loc + @ssize, i.e. location + number of
4201e9ea7e0SNamjae Jeon 	 * runs in @src.  However, if @left, then the first run in @src has
4211e9ea7e0SNamjae Jeon 	 * been merged with one in @dst.
4221e9ea7e0SNamjae Jeon 	 */
4231e9ea7e0SNamjae Jeon 	marker = loc + ssize - left;
4241e9ea7e0SNamjae Jeon 
4251e9ea7e0SNamjae Jeon 	/* Move the tail of @dst out of the way, then copy in @src. */
4261e9ea7e0SNamjae Jeon 	ntfs_rl_mm(dst, marker, tail, dsize - tail);
4271e9ea7e0SNamjae Jeon 	ntfs_rl_mc(dst, loc, src, left, ssize - left);
4281e9ea7e0SNamjae Jeon 
4291e9ea7e0SNamjae Jeon 	/* We may have changed the length of the file, so fix the end marker. */
4301e9ea7e0SNamjae Jeon 	if (dsize - tail > 0 && dst[marker].lcn == LCN_ENOENT)
4311e9ea7e0SNamjae Jeon 		dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length;
4321e9ea7e0SNamjae Jeon 	return dst;
4331e9ea7e0SNamjae Jeon }
4341e9ea7e0SNamjae Jeon 
43511ccc910SNamjae Jeon /*
4361e9ea7e0SNamjae Jeon  * ntfs_rl_split - insert a runlist into the centre of a hole
43711ccc910SNamjae Jeon  * @dst: destination runlist with a hole
43811ccc910SNamjae Jeon  * @dsize: number of elements in @dst
43911ccc910SNamjae Jeon  * @src: source runlist to insert
44011ccc910SNamjae Jeon  * @ssize: number of elements in @src
44111ccc910SNamjae Jeon  * @loc: index in @dst of the hole to split
44211ccc910SNamjae Jeon  * @new_size: on success, set to the new combined size
4431e9ea7e0SNamjae Jeon  *
4441e9ea7e0SNamjae Jeon  * Split the runlist @dst at @loc into two and insert @new in between the two
4451e9ea7e0SNamjae Jeon  * fragments. No merging of runlists is necessary. Adjust the size of the
4461e9ea7e0SNamjae Jeon  * holes either side.
4471e9ea7e0SNamjae Jeon  *
4481e9ea7e0SNamjae Jeon  * It is up to the caller to serialize access to the runlists @dst and @src.
4491e9ea7e0SNamjae Jeon  *
4501e9ea7e0SNamjae Jeon  * On success, return a pointer to the new, combined, runlist. Note, both
4511e9ea7e0SNamjae Jeon  * runlists @dst and @src are deallocated before returning so you cannot use
4521e9ea7e0SNamjae Jeon  * the pointers for anything any more. (Strictly speaking the returned runlist
4531e9ea7e0SNamjae Jeon  * may be the same as @dst but this is irrelevant.)
4541e9ea7e0SNamjae Jeon  *
45511ccc910SNamjae Jeon  * On error, return -errno. Both runlists are left unmodified.
4561e9ea7e0SNamjae Jeon  */
45711ccc910SNamjae Jeon static inline struct runlist_element *ntfs_rl_split(struct runlist_element *dst, int dsize,
45811ccc910SNamjae Jeon 		struct runlist_element *src, int ssize, int loc,
45911ccc910SNamjae Jeon 		size_t *new_size)
4601e9ea7e0SNamjae Jeon {
4611e9ea7e0SNamjae Jeon 	/* Space required: @dst size + @src size + one new hole. */
4621e9ea7e0SNamjae Jeon 	dst = ntfs_rl_realloc(dst, dsize, dsize + ssize + 1);
4631e9ea7e0SNamjae Jeon 	if (IS_ERR(dst))
4641e9ea7e0SNamjae Jeon 		return dst;
46511ccc910SNamjae Jeon 
46611ccc910SNamjae Jeon 	*new_size = dsize + ssize + 1;
4671e9ea7e0SNamjae Jeon 	/*
4681e9ea7e0SNamjae Jeon 	 * We are guaranteed to succeed from here so can start modifying the
4691e9ea7e0SNamjae Jeon 	 * original runlists.
4701e9ea7e0SNamjae Jeon 	 */
4711e9ea7e0SNamjae Jeon 
4721e9ea7e0SNamjae Jeon 	/* Move the tail of @dst out of the way, then copy in @src. */
4731e9ea7e0SNamjae Jeon 	ntfs_rl_mm(dst, loc + 1 + ssize, loc, dsize - loc);
4741e9ea7e0SNamjae Jeon 	ntfs_rl_mc(dst, loc + 1, src, 0, ssize);
4751e9ea7e0SNamjae Jeon 
4761e9ea7e0SNamjae Jeon 	/* Adjust the size of the holes either size of @src. */
4771e9ea7e0SNamjae Jeon 	dst[loc].length		= dst[loc+1].vcn       - dst[loc].vcn;
4781e9ea7e0SNamjae Jeon 	dst[loc+ssize+1].vcn    = dst[loc+ssize].vcn   + dst[loc+ssize].length;
4791e9ea7e0SNamjae Jeon 	dst[loc+ssize+1].length = dst[loc+ssize+2].vcn - dst[loc+ssize+1].vcn;
4801e9ea7e0SNamjae Jeon 
4811e9ea7e0SNamjae Jeon 	return dst;
4821e9ea7e0SNamjae Jeon }
4831e9ea7e0SNamjae Jeon 
48411ccc910SNamjae Jeon /*
4851e9ea7e0SNamjae Jeon  * ntfs_runlists_merge - merge two runlists into one
48611ccc910SNamjae Jeon  * @d_runlist: destination runlist structure to merge into
48711ccc910SNamjae Jeon  * @srl: source runlist to merge from
48811ccc910SNamjae Jeon  * @s_rl_count: number of elements in @srl (0 to auto-detect)
48911ccc910SNamjae Jeon  * @new_rl_count: on success, set to the new combined runlist size
4901e9ea7e0SNamjae Jeon  *
4911e9ea7e0SNamjae Jeon  * First we sanity check the two runlists @srl and @drl to make sure that they
4921e9ea7e0SNamjae Jeon  * are sensible and can be merged. The runlist @srl must be either after the
4931e9ea7e0SNamjae Jeon  * runlist @drl or completely within a hole (or unmapped region) in @drl.
4941e9ea7e0SNamjae Jeon  *
4951e9ea7e0SNamjae Jeon  * It is up to the caller to serialize access to the runlists @drl and @srl.
4961e9ea7e0SNamjae Jeon  *
4971e9ea7e0SNamjae Jeon  * Merging of runlists is necessary in two cases:
4981e9ea7e0SNamjae Jeon  *   1. When attribute lists are used and a further extent is being mapped.
4991e9ea7e0SNamjae Jeon  *   2. When new clusters are allocated to fill a hole or extend a file.
5001e9ea7e0SNamjae Jeon  *
5011e9ea7e0SNamjae Jeon  * There are four possible ways @srl can be merged. It can:
5021e9ea7e0SNamjae Jeon  *	- be inserted at the beginning of a hole,
5031e9ea7e0SNamjae Jeon  *	- split the hole in two and be inserted between the two fragments,
5041e9ea7e0SNamjae Jeon  *	- be appended at the end of a hole, or it can
5051e9ea7e0SNamjae Jeon  *	- replace the whole hole.
5061e9ea7e0SNamjae Jeon  * It can also be appended to the end of the runlist, which is just a variant
5071e9ea7e0SNamjae Jeon  * of the insert case.
5081e9ea7e0SNamjae Jeon  *
5091e9ea7e0SNamjae Jeon  * On success, return a pointer to the new, combined, runlist. Note, both
5101e9ea7e0SNamjae Jeon  * runlists @drl and @srl are deallocated before returning so you cannot use
5111e9ea7e0SNamjae Jeon  * the pointers for anything any more. (Strictly speaking the returned runlist
5121e9ea7e0SNamjae Jeon  * may be the same as @dst but this is irrelevant.)
5131e9ea7e0SNamjae Jeon  *
51411ccc910SNamjae Jeon  * On error, return -errno. Both runlists are left unmodified.
5151e9ea7e0SNamjae Jeon  */
51611ccc910SNamjae Jeon struct runlist_element *ntfs_runlists_merge(struct runlist *d_runlist,
51711ccc910SNamjae Jeon 				     struct runlist_element *srl, size_t s_rl_count,
51811ccc910SNamjae Jeon 				     size_t *new_rl_count)
5191e9ea7e0SNamjae Jeon {
5201e9ea7e0SNamjae Jeon 	int di, si;		/* Current index into @[ds]rl. */
5211e9ea7e0SNamjae Jeon 	int sstart;		/* First index with lcn > LCN_RL_NOT_MAPPED. */
5221e9ea7e0SNamjae Jeon 	int dins;		/* Index into @drl at which to insert @srl. */
5231e9ea7e0SNamjae Jeon 	int dend, send;		/* Last index into @[ds]rl. */
52411ccc910SNamjae Jeon 	int dfinal, sfinal;	/* The last index into @[ds]rl with lcn >= LCN_HOLE. */
5251e9ea7e0SNamjae Jeon 	int marker = 0;
52611ccc910SNamjae Jeon 	s64 marker_vcn = 0;
52711ccc910SNamjae Jeon 	struct runlist_element *drl = d_runlist->rl, *rl;
5281e9ea7e0SNamjae Jeon 
5291e9ea7e0SNamjae Jeon #ifdef DEBUG
5301e9ea7e0SNamjae Jeon 	ntfs_debug("dst:");
5311e9ea7e0SNamjae Jeon 	ntfs_debug_dump_runlist(drl);
5321e9ea7e0SNamjae Jeon 	ntfs_debug("src:");
5331e9ea7e0SNamjae Jeon 	ntfs_debug_dump_runlist(srl);
5341e9ea7e0SNamjae Jeon #endif
5351e9ea7e0SNamjae Jeon 
5361e9ea7e0SNamjae Jeon 	/* Check for silly calling... */
5371e9ea7e0SNamjae Jeon 	if (unlikely(!srl))
5381e9ea7e0SNamjae Jeon 		return drl;
5391e9ea7e0SNamjae Jeon 	if (IS_ERR(srl) || IS_ERR(drl))
5401e9ea7e0SNamjae Jeon 		return ERR_PTR(-EINVAL);
5411e9ea7e0SNamjae Jeon 
54211ccc910SNamjae Jeon 	if (s_rl_count == 0) {
54311ccc910SNamjae Jeon 		for (; srl[s_rl_count].length; s_rl_count++)
54411ccc910SNamjae Jeon 			;
54511ccc910SNamjae Jeon 		s_rl_count++;
54611ccc910SNamjae Jeon 	}
54711ccc910SNamjae Jeon 
5481e9ea7e0SNamjae Jeon 	/* Check for the case where the first mapping is being done now. */
5491e9ea7e0SNamjae Jeon 	if (unlikely(!drl)) {
5501e9ea7e0SNamjae Jeon 		drl = srl;
5511e9ea7e0SNamjae Jeon 		/* Complete the source runlist if necessary. */
5521e9ea7e0SNamjae Jeon 		if (unlikely(drl[0].vcn)) {
5531e9ea7e0SNamjae Jeon 			/* Scan to the end of the source runlist. */
55411ccc910SNamjae Jeon 			drl = ntfs_rl_realloc(drl, s_rl_count, s_rl_count + 1);
5551e9ea7e0SNamjae Jeon 			if (IS_ERR(drl))
5561e9ea7e0SNamjae Jeon 				return drl;
5571e9ea7e0SNamjae Jeon 			/* Insert start element at the front of the runlist. */
55811ccc910SNamjae Jeon 			ntfs_rl_mm(drl, 1, 0, s_rl_count);
5591e9ea7e0SNamjae Jeon 			drl[0].vcn = 0;
5601e9ea7e0SNamjae Jeon 			drl[0].lcn = LCN_RL_NOT_MAPPED;
5611e9ea7e0SNamjae Jeon 			drl[0].length = drl[1].vcn;
56211ccc910SNamjae Jeon 			s_rl_count++;
5631e9ea7e0SNamjae Jeon 		}
56411ccc910SNamjae Jeon 
56511ccc910SNamjae Jeon 		*new_rl_count = s_rl_count;
5661e9ea7e0SNamjae Jeon 		goto finished;
5671e9ea7e0SNamjae Jeon 	}
5681e9ea7e0SNamjae Jeon 
56911ccc910SNamjae Jeon 	if (d_runlist->count < 1 || s_rl_count < 2)
57011ccc910SNamjae Jeon 		return ERR_PTR(-EINVAL);
57111ccc910SNamjae Jeon 
5721e9ea7e0SNamjae Jeon 	si = di = 0;
5731e9ea7e0SNamjae Jeon 
5741e9ea7e0SNamjae Jeon 	/* Skip any unmapped start element(s) in the source runlist. */
5751e9ea7e0SNamjae Jeon 	while (srl[si].length && srl[si].lcn < LCN_HOLE)
5761e9ea7e0SNamjae Jeon 		si++;
5771e9ea7e0SNamjae Jeon 
5781e9ea7e0SNamjae Jeon 	/* Can't have an entirely unmapped source runlist. */
57911ccc910SNamjae Jeon 	WARN_ON(!srl[si].length);
5801e9ea7e0SNamjae Jeon 
5811e9ea7e0SNamjae Jeon 	/* Record the starting points. */
5821e9ea7e0SNamjae Jeon 	sstart = si;
5831e9ea7e0SNamjae Jeon 
5841e9ea7e0SNamjae Jeon 	/*
5851e9ea7e0SNamjae Jeon 	 * Skip forward in @drl until we reach the position where @srl needs to
5861e9ea7e0SNamjae Jeon 	 * be inserted. If we reach the end of @drl, @srl just needs to be
5871e9ea7e0SNamjae Jeon 	 * appended to @drl.
5881e9ea7e0SNamjae Jeon 	 */
58911ccc910SNamjae Jeon 	rl = __ntfs_attr_find_vcn_nolock(d_runlist, srl[sstart].vcn);
59011ccc910SNamjae Jeon 	if (IS_ERR(rl))
59111ccc910SNamjae Jeon 		di = (int)d_runlist->count - 1;
59211ccc910SNamjae Jeon 	else
59311ccc910SNamjae Jeon 		di = (int)(rl - d_runlist->rl);
5941e9ea7e0SNamjae Jeon 	dins = di;
5951e9ea7e0SNamjae Jeon 
5961e9ea7e0SNamjae Jeon 	/* Sanity check for illegal overlaps. */
5971e9ea7e0SNamjae Jeon 	if ((drl[di].vcn == srl[si].vcn) && (drl[di].lcn >= 0) &&
5981e9ea7e0SNamjae Jeon 			(srl[si].lcn >= 0)) {
5991e9ea7e0SNamjae Jeon 		ntfs_error(NULL, "Run lists overlap. Cannot merge!");
6001e9ea7e0SNamjae Jeon 		return ERR_PTR(-ERANGE);
6011e9ea7e0SNamjae Jeon 	}
6021e9ea7e0SNamjae Jeon 
6031e9ea7e0SNamjae Jeon 	/* Scan to the end of both runlists in order to know their sizes. */
60411ccc910SNamjae Jeon 	send = (int)s_rl_count - 1;
60511ccc910SNamjae Jeon 	dend = (int)d_runlist->count - 1;
6061e9ea7e0SNamjae Jeon 
6071e9ea7e0SNamjae Jeon 	if (srl[send].lcn == LCN_ENOENT)
6081e9ea7e0SNamjae Jeon 		marker_vcn = srl[marker = send].vcn;
6091e9ea7e0SNamjae Jeon 
6101e9ea7e0SNamjae Jeon 	/* Scan to the last element with lcn >= LCN_HOLE. */
6111e9ea7e0SNamjae Jeon 	for (sfinal = send; sfinal >= 0 && srl[sfinal].lcn < LCN_HOLE; sfinal--)
6121e9ea7e0SNamjae Jeon 		;
6131e9ea7e0SNamjae Jeon 	for (dfinal = dend; dfinal >= 0 && drl[dfinal].lcn < LCN_HOLE; dfinal--)
6141e9ea7e0SNamjae Jeon 		;
6151e9ea7e0SNamjae Jeon 
6161e9ea7e0SNamjae Jeon 	{
6171e9ea7e0SNamjae Jeon 	bool start;
6181e9ea7e0SNamjae Jeon 	bool finish;
6191e9ea7e0SNamjae Jeon 	int ds = dend + 1;		/* Number of elements in drl & srl */
6201e9ea7e0SNamjae Jeon 	int ss = sfinal - sstart + 1;
6211e9ea7e0SNamjae Jeon 
6221e9ea7e0SNamjae Jeon 	start  = ((drl[dins].lcn <  LCN_RL_NOT_MAPPED) ||    /* End of file   */
6231e9ea7e0SNamjae Jeon 		  (drl[dins].vcn == srl[sstart].vcn));	     /* Start of hole */
6241e9ea7e0SNamjae Jeon 	finish = ((drl[dins].lcn >= LCN_RL_NOT_MAPPED) &&    /* End of file   */
6251e9ea7e0SNamjae Jeon 		 ((drl[dins].vcn + drl[dins].length) <=      /* End of hole   */
6261e9ea7e0SNamjae Jeon 		  (srl[send - 1].vcn + srl[send - 1].length)));
6271e9ea7e0SNamjae Jeon 
6281e9ea7e0SNamjae Jeon 	/* Or we will lose an end marker. */
6291e9ea7e0SNamjae Jeon 	if (finish && !drl[dins].length)
6301e9ea7e0SNamjae Jeon 		ss++;
6311e9ea7e0SNamjae Jeon 	if (marker && (drl[dins].vcn + drl[dins].length > srl[send - 1].vcn))
6321e9ea7e0SNamjae Jeon 		finish = false;
63311ccc910SNamjae Jeon 
6341e9ea7e0SNamjae Jeon 	if (start) {
6351e9ea7e0SNamjae Jeon 		if (finish)
63611ccc910SNamjae Jeon 			drl = ntfs_rl_replace(drl, ds, srl + sstart, ss, dins, new_rl_count);
6371e9ea7e0SNamjae Jeon 		else
63811ccc910SNamjae Jeon 			drl = ntfs_rl_insert(drl, ds, srl + sstart, ss, dins, new_rl_count);
6391e9ea7e0SNamjae Jeon 	} else {
6401e9ea7e0SNamjae Jeon 		if (finish)
64111ccc910SNamjae Jeon 			drl = ntfs_rl_append(drl, ds, srl + sstart, ss, dins, new_rl_count);
6421e9ea7e0SNamjae Jeon 		else
64311ccc910SNamjae Jeon 			drl = ntfs_rl_split(drl, ds, srl + sstart, ss, dins, new_rl_count);
6441e9ea7e0SNamjae Jeon 	}
6451e9ea7e0SNamjae Jeon 	if (IS_ERR(drl)) {
6461e9ea7e0SNamjae Jeon 		ntfs_error(NULL, "Merge failed.");
6471e9ea7e0SNamjae Jeon 		return drl;
6481e9ea7e0SNamjae Jeon 	}
64911ccc910SNamjae Jeon 	kvfree(srl);
6501e9ea7e0SNamjae Jeon 	if (marker) {
6511e9ea7e0SNamjae Jeon 		ntfs_debug("Triggering marker code.");
6521e9ea7e0SNamjae Jeon 		for (ds = dend; drl[ds].length; ds++)
6531e9ea7e0SNamjae Jeon 			;
6541e9ea7e0SNamjae Jeon 		/* We only need to care if @srl ended after @drl. */
6551e9ea7e0SNamjae Jeon 		if (drl[ds].vcn <= marker_vcn) {
6561e9ea7e0SNamjae Jeon 			int slots = 0;
6571e9ea7e0SNamjae Jeon 
6581e9ea7e0SNamjae Jeon 			if (drl[ds].vcn == marker_vcn) {
65911ccc910SNamjae Jeon 				ntfs_debug("Old marker = 0x%llx, replacing with LCN_ENOENT.",
6601e9ea7e0SNamjae Jeon 						drl[ds].lcn);
6611e9ea7e0SNamjae Jeon 				drl[ds].lcn = LCN_ENOENT;
6621e9ea7e0SNamjae Jeon 				goto finished;
6631e9ea7e0SNamjae Jeon 			}
6641e9ea7e0SNamjae Jeon 			/*
6651e9ea7e0SNamjae Jeon 			 * We need to create an unmapped runlist element in
6661e9ea7e0SNamjae Jeon 			 * @drl or extend an existing one before adding the
6671e9ea7e0SNamjae Jeon 			 * ENOENT terminator.
6681e9ea7e0SNamjae Jeon 			 */
6691e9ea7e0SNamjae Jeon 			if (drl[ds].lcn == LCN_ENOENT) {
6701e9ea7e0SNamjae Jeon 				ds--;
6711e9ea7e0SNamjae Jeon 				slots = 1;
6721e9ea7e0SNamjae Jeon 			}
6731e9ea7e0SNamjae Jeon 			if (drl[ds].lcn != LCN_RL_NOT_MAPPED) {
6741e9ea7e0SNamjae Jeon 				/* Add an unmapped runlist element. */
6751e9ea7e0SNamjae Jeon 				if (!slots) {
6761e9ea7e0SNamjae Jeon 					drl = ntfs_rl_realloc_nofail(drl, ds,
6771e9ea7e0SNamjae Jeon 							ds + 2);
6781e9ea7e0SNamjae Jeon 					slots = 2;
67911ccc910SNamjae Jeon 					*new_rl_count += 2;
6801e9ea7e0SNamjae Jeon 				}
6811e9ea7e0SNamjae Jeon 				ds++;
6821e9ea7e0SNamjae Jeon 				/* Need to set vcn if it isn't set already. */
6831e9ea7e0SNamjae Jeon 				if (slots != 1)
6841e9ea7e0SNamjae Jeon 					drl[ds].vcn = drl[ds - 1].vcn +
6851e9ea7e0SNamjae Jeon 							drl[ds - 1].length;
6861e9ea7e0SNamjae Jeon 				drl[ds].lcn = LCN_RL_NOT_MAPPED;
6871e9ea7e0SNamjae Jeon 				/* We now used up a slot. */
6881e9ea7e0SNamjae Jeon 				slots--;
6891e9ea7e0SNamjae Jeon 			}
6901e9ea7e0SNamjae Jeon 			drl[ds].length = marker_vcn - drl[ds].vcn;
6911e9ea7e0SNamjae Jeon 			/* Finally add the ENOENT terminator. */
6921e9ea7e0SNamjae Jeon 			ds++;
69311ccc910SNamjae Jeon 			if (!slots) {
6941e9ea7e0SNamjae Jeon 				drl = ntfs_rl_realloc_nofail(drl, ds, ds + 1);
69511ccc910SNamjae Jeon 				*new_rl_count += 1;
69611ccc910SNamjae Jeon 			}
6971e9ea7e0SNamjae Jeon 			drl[ds].vcn = marker_vcn;
6981e9ea7e0SNamjae Jeon 			drl[ds].lcn = LCN_ENOENT;
6991e9ea7e0SNamjae Jeon 			drl[ds].length = (s64)0;
7001e9ea7e0SNamjae Jeon 		}
7011e9ea7e0SNamjae Jeon 	}
7021e9ea7e0SNamjae Jeon 	}
7031e9ea7e0SNamjae Jeon 
7041e9ea7e0SNamjae Jeon finished:
7051e9ea7e0SNamjae Jeon 	/* The merge was completed successfully. */
7061e9ea7e0SNamjae Jeon 	ntfs_debug("Merged runlist:");
7071e9ea7e0SNamjae Jeon 	ntfs_debug_dump_runlist(drl);
7081e9ea7e0SNamjae Jeon 	return drl;
7091e9ea7e0SNamjae Jeon }
7101e9ea7e0SNamjae Jeon 
71111ccc910SNamjae Jeon /*
7121e9ea7e0SNamjae Jeon  * ntfs_mapping_pairs_decompress - convert mapping pairs array to runlist
71311ccc910SNamjae Jeon  * @vol: ntfs volume
71411ccc910SNamjae Jeon  * @attr: attribute record whose mapping pairs to decompress
71511ccc910SNamjae Jeon  * @old_runlist: optional runlist to merge the decompressed runlist into
71611ccc910SNamjae Jeon  * @new_rl_count: on success, set to the new runlist size
7171e9ea7e0SNamjae Jeon  *
7181e9ea7e0SNamjae Jeon  * It is up to the caller to serialize access to the runlist @old_rl.
7191e9ea7e0SNamjae Jeon  *
7201e9ea7e0SNamjae Jeon  * Decompress the attribute @attr's mapping pairs array into a runlist. On
7211e9ea7e0SNamjae Jeon  * success, return the decompressed runlist.
7221e9ea7e0SNamjae Jeon  *
7231e9ea7e0SNamjae Jeon  * If @old_rl is not NULL, decompressed runlist is inserted into the
7241e9ea7e0SNamjae Jeon  * appropriate place in @old_rl and the resultant, combined runlist is
7251e9ea7e0SNamjae Jeon  * returned. The original @old_rl is deallocated.
7261e9ea7e0SNamjae Jeon  *
7271e9ea7e0SNamjae Jeon  * On error, return -errno. @old_rl is left unmodified in that case.
7281e9ea7e0SNamjae Jeon  */
72911ccc910SNamjae Jeon struct runlist_element *ntfs_mapping_pairs_decompress(const struct ntfs_volume *vol,
73011ccc910SNamjae Jeon 		const struct attr_record *attr, struct runlist *old_runlist,
73111ccc910SNamjae Jeon 		size_t *new_rl_count)
7321e9ea7e0SNamjae Jeon {
73311ccc910SNamjae Jeon 	s64 vcn;		/* Current vcn. */
73411ccc910SNamjae Jeon 	s64 lcn;		/* Current lcn. */
7351e9ea7e0SNamjae Jeon 	s64 deltaxcn;		/* Change in [vl]cn. */
73611ccc910SNamjae Jeon 	struct runlist_element *rl, *new_rl;	/* The output runlist. */
7371e9ea7e0SNamjae Jeon 	u8 *buf;		/* Current position in mapping pairs array. */
7381e9ea7e0SNamjae Jeon 	u8 *attr_end;		/* End of attribute. */
7391e9ea7e0SNamjae Jeon 	int rlsize;		/* Size of runlist buffer. */
74011ccc910SNamjae Jeon 	u16 rlpos;		/* Current runlist position in units of struct runlist_elements. */
7411e9ea7e0SNamjae Jeon 	u8 b;			/* Current byte offset in buf. */
7421e9ea7e0SNamjae Jeon 
7431e9ea7e0SNamjae Jeon #ifdef DEBUG
7441e9ea7e0SNamjae Jeon 	/* Make sure attr exists and is non-resident. */
74511ccc910SNamjae Jeon 	if (!attr || !attr->non_resident) {
7461e9ea7e0SNamjae Jeon 		ntfs_error(vol->sb, "Invalid arguments.");
7471e9ea7e0SNamjae Jeon 		return ERR_PTR(-EINVAL);
7481e9ea7e0SNamjae Jeon 	}
7491e9ea7e0SNamjae Jeon #endif
7501e9ea7e0SNamjae Jeon 	/* Start at vcn = lowest_vcn and lcn 0. */
75111ccc910SNamjae Jeon 	vcn = le64_to_cpu(attr->data.non_resident.lowest_vcn);
7521e9ea7e0SNamjae Jeon 	lcn = 0;
7531e9ea7e0SNamjae Jeon 	/* Get start of the mapping pairs array. */
75411ccc910SNamjae Jeon 	buf = (u8 *)attr +
75511ccc910SNamjae Jeon 		le16_to_cpu(attr->data.non_resident.mapping_pairs_offset);
7561e9ea7e0SNamjae Jeon 	attr_end = (u8 *)attr + le32_to_cpu(attr->length);
7571e9ea7e0SNamjae Jeon 	if (unlikely(buf < (u8 *)attr || buf > attr_end)) {
7581e9ea7e0SNamjae Jeon 		ntfs_error(vol->sb, "Corrupt attribute.");
7591e9ea7e0SNamjae Jeon 		return ERR_PTR(-EIO);
7601e9ea7e0SNamjae Jeon 	}
76111ccc910SNamjae Jeon 
7621e9ea7e0SNamjae Jeon 	/* Current position in runlist array. */
7631e9ea7e0SNamjae Jeon 	rlpos = 0;
7641e9ea7e0SNamjae Jeon 	/* Allocate first page and set current runlist size to one page. */
76511ccc910SNamjae Jeon 	rl = kvzalloc(rlsize = PAGE_SIZE, GFP_NOFS);
7661e9ea7e0SNamjae Jeon 	if (unlikely(!rl))
7671e9ea7e0SNamjae Jeon 		return ERR_PTR(-ENOMEM);
7681e9ea7e0SNamjae Jeon 	/* Insert unmapped starting element if necessary. */
7691e9ea7e0SNamjae Jeon 	if (vcn) {
7701e9ea7e0SNamjae Jeon 		rl->vcn = 0;
7711e9ea7e0SNamjae Jeon 		rl->lcn = LCN_RL_NOT_MAPPED;
7721e9ea7e0SNamjae Jeon 		rl->length = vcn;
7731e9ea7e0SNamjae Jeon 		rlpos++;
7741e9ea7e0SNamjae Jeon 	}
7751e9ea7e0SNamjae Jeon 	while (buf < attr_end && *buf) {
7761e9ea7e0SNamjae Jeon 		/*
7771e9ea7e0SNamjae Jeon 		 * Allocate more memory if needed, including space for the
77811ccc910SNamjae Jeon 		 * not-mapped and terminator elements. kvzalloc()
7791e9ea7e0SNamjae Jeon 		 * operates on whole pages only.
7801e9ea7e0SNamjae Jeon 		 */
78111ccc910SNamjae Jeon 		if (((rlpos + 3) * sizeof(*rl)) > rlsize) {
78211ccc910SNamjae Jeon 			struct runlist_element *rl2;
7831e9ea7e0SNamjae Jeon 
78411ccc910SNamjae Jeon 			rl2 = kvzalloc(rlsize + PAGE_SIZE, GFP_NOFS);
7851e9ea7e0SNamjae Jeon 			if (unlikely(!rl2)) {
78611ccc910SNamjae Jeon 				kvfree(rl);
7871e9ea7e0SNamjae Jeon 				return ERR_PTR(-ENOMEM);
7881e9ea7e0SNamjae Jeon 			}
7891e9ea7e0SNamjae Jeon 			memcpy(rl2, rl, rlsize);
79011ccc910SNamjae Jeon 			kvfree(rl);
7911e9ea7e0SNamjae Jeon 			rl = rl2;
7921e9ea7e0SNamjae Jeon 			rlsize += PAGE_SIZE;
7931e9ea7e0SNamjae Jeon 		}
7941e9ea7e0SNamjae Jeon 		/* Enter the current vcn into the current runlist element. */
7951e9ea7e0SNamjae Jeon 		rl[rlpos].vcn = vcn;
7961e9ea7e0SNamjae Jeon 		/*
7971e9ea7e0SNamjae Jeon 		 * Get the change in vcn, i.e. the run length in clusters.
7981e9ea7e0SNamjae Jeon 		 * Doing it this way ensures that we signextend negative values.
7991e9ea7e0SNamjae Jeon 		 * A negative run length doesn't make any sense, but hey, I
8001e9ea7e0SNamjae Jeon 		 * didn't make up the NTFS specs and Windows NT4 treats the run
8011e9ea7e0SNamjae Jeon 		 * length as a signed value so that's how it is...
8021e9ea7e0SNamjae Jeon 		 */
8031e9ea7e0SNamjae Jeon 		b = *buf & 0xf;
8041e9ea7e0SNamjae Jeon 		if (b) {
8051e9ea7e0SNamjae Jeon 			if (unlikely(buf + b > attr_end))
8061e9ea7e0SNamjae Jeon 				goto io_error;
8071e9ea7e0SNamjae Jeon 			for (deltaxcn = (s8)buf[b--]; b; b--)
8081e9ea7e0SNamjae Jeon 				deltaxcn = (deltaxcn << 8) + buf[b];
8091e9ea7e0SNamjae Jeon 		} else { /* The length entry is compulsory. */
81011ccc910SNamjae Jeon 			ntfs_error(vol->sb, "Missing length entry in mapping pairs array.");
8111e9ea7e0SNamjae Jeon 			deltaxcn = (s64)-1;
8121e9ea7e0SNamjae Jeon 		}
8131e9ea7e0SNamjae Jeon 		/*
8141e9ea7e0SNamjae Jeon 		 * Assume a negative length to indicate data corruption and
8151e9ea7e0SNamjae Jeon 		 * hence clean-up and return NULL.
8161e9ea7e0SNamjae Jeon 		 */
8171e9ea7e0SNamjae Jeon 		if (unlikely(deltaxcn < 0)) {
81811ccc910SNamjae Jeon 			ntfs_error(vol->sb, "Invalid length in mapping pairs array.");
8191e9ea7e0SNamjae Jeon 			goto err_out;
8201e9ea7e0SNamjae Jeon 		}
8211e9ea7e0SNamjae Jeon 		/*
8221e9ea7e0SNamjae Jeon 		 * Enter the current run length into the current runlist
8231e9ea7e0SNamjae Jeon 		 * element.
8241e9ea7e0SNamjae Jeon 		 */
8251e9ea7e0SNamjae Jeon 		rl[rlpos].length = deltaxcn;
8261e9ea7e0SNamjae Jeon 		/* Increment the current vcn by the current run length. */
8271e9ea7e0SNamjae Jeon 		vcn += deltaxcn;
8281e9ea7e0SNamjae Jeon 		/*
8291e9ea7e0SNamjae Jeon 		 * There might be no lcn change at all, as is the case for
8301e9ea7e0SNamjae Jeon 		 * sparse clusters on NTFS 3.0+, in which case we set the lcn
8311e9ea7e0SNamjae Jeon 		 * to LCN_HOLE.
8321e9ea7e0SNamjae Jeon 		 */
8331e9ea7e0SNamjae Jeon 		if (!(*buf & 0xf0))
8341e9ea7e0SNamjae Jeon 			rl[rlpos].lcn = LCN_HOLE;
8351e9ea7e0SNamjae Jeon 		else {
8361e9ea7e0SNamjae Jeon 			/* Get the lcn change which really can be negative. */
8371e9ea7e0SNamjae Jeon 			u8 b2 = *buf & 0xf;
83811ccc910SNamjae Jeon 
8391e9ea7e0SNamjae Jeon 			b = b2 + ((*buf >> 4) & 0xf);
8401e9ea7e0SNamjae Jeon 			if (buf + b > attr_end)
8411e9ea7e0SNamjae Jeon 				goto io_error;
8421e9ea7e0SNamjae Jeon 			for (deltaxcn = (s8)buf[b--]; b > b2; b--)
8431e9ea7e0SNamjae Jeon 				deltaxcn = (deltaxcn << 8) + buf[b];
8441e9ea7e0SNamjae Jeon 			/* Change the current lcn to its new value. */
8451e9ea7e0SNamjae Jeon 			lcn += deltaxcn;
8461e9ea7e0SNamjae Jeon #ifdef DEBUG
8471e9ea7e0SNamjae Jeon 			/*
8481e9ea7e0SNamjae Jeon 			 * On NTFS 1.2-, apparently can have lcn == -1 to
8491e9ea7e0SNamjae Jeon 			 * indicate a hole. But we haven't verified ourselves
8501e9ea7e0SNamjae Jeon 			 * whether it is really the lcn or the deltaxcn that is
8511e9ea7e0SNamjae Jeon 			 * -1. So if either is found give us a message so we
8521e9ea7e0SNamjae Jeon 			 * can investigate it further!
8531e9ea7e0SNamjae Jeon 			 */
8541e9ea7e0SNamjae Jeon 			if (vol->major_ver < 3) {
85511ccc910SNamjae Jeon 				if (unlikely(deltaxcn == -1))
8561e9ea7e0SNamjae Jeon 					ntfs_error(vol->sb, "lcn delta == -1");
85711ccc910SNamjae Jeon 				if (unlikely(lcn == -1))
8581e9ea7e0SNamjae Jeon 					ntfs_error(vol->sb, "lcn == -1");
8591e9ea7e0SNamjae Jeon 			}
8601e9ea7e0SNamjae Jeon #endif
8611e9ea7e0SNamjae Jeon 			/* Check lcn is not below -1. */
86211ccc910SNamjae Jeon 			if (unlikely(lcn < -1)) {
86311ccc910SNamjae Jeon 				ntfs_error(vol->sb, "Invalid s64 < -1 in mapping pairs array.");
8641e9ea7e0SNamjae Jeon 				goto err_out;
8651e9ea7e0SNamjae Jeon 			}
86611ccc910SNamjae Jeon 
86711ccc910SNamjae Jeon 			/* chkdsk accepts zero-sized runs only for holes */
86811ccc910SNamjae Jeon 			if ((lcn != -1) && !rl[rlpos].length) {
86911ccc910SNamjae Jeon 				ntfs_error(vol->sb,
87011ccc910SNamjae Jeon 					   "Invalid zero-sized data run(lcn : %lld).\n",
87111ccc910SNamjae Jeon 					   lcn);
87211ccc910SNamjae Jeon 				goto err_out;
87311ccc910SNamjae Jeon 			}
87411ccc910SNamjae Jeon 
8751e9ea7e0SNamjae Jeon 			/* Enter the current lcn into the runlist element. */
8761e9ea7e0SNamjae Jeon 			rl[rlpos].lcn = lcn;
8771e9ea7e0SNamjae Jeon 		}
87811ccc910SNamjae Jeon 		/* Get to the next runlist element, skipping zero-sized holes */
87911ccc910SNamjae Jeon 		if (rl[rlpos].length)
8801e9ea7e0SNamjae Jeon 			rlpos++;
8811e9ea7e0SNamjae Jeon 		/* Increment the buffer position to the next mapping pair. */
8821e9ea7e0SNamjae Jeon 		buf += (*buf & 0xf) + ((*buf >> 4) & 0xf) + 1;
8831e9ea7e0SNamjae Jeon 	}
8841e9ea7e0SNamjae Jeon 	if (unlikely(buf >= attr_end))
8851e9ea7e0SNamjae Jeon 		goto io_error;
8861e9ea7e0SNamjae Jeon 	/*
8871e9ea7e0SNamjae Jeon 	 * If there is a highest_vcn specified, it must be equal to the final
8881e9ea7e0SNamjae Jeon 	 * vcn in the runlist - 1, or something has gone badly wrong.
8891e9ea7e0SNamjae Jeon 	 */
89011ccc910SNamjae Jeon 	deltaxcn = le64_to_cpu(attr->data.non_resident.highest_vcn);
8911e9ea7e0SNamjae Jeon 	if (unlikely(deltaxcn && vcn - 1 != deltaxcn)) {
8921e9ea7e0SNamjae Jeon mpa_err:
89311ccc910SNamjae Jeon 		ntfs_error(vol->sb, "Corrupt mapping pairs array in non-resident attribute.");
8941e9ea7e0SNamjae Jeon 		goto err_out;
8951e9ea7e0SNamjae Jeon 	}
8961e9ea7e0SNamjae Jeon 	/* Setup not mapped runlist element if this is the base extent. */
8971e9ea7e0SNamjae Jeon 	if (!attr->data.non_resident.lowest_vcn) {
89811ccc910SNamjae Jeon 		s64 max_cluster;
8991e9ea7e0SNamjae Jeon 
90011ccc910SNamjae Jeon 		max_cluster = ((le64_to_cpu(attr->data.non_resident.allocated_size) +
9011e9ea7e0SNamjae Jeon 				vol->cluster_size - 1) >>
9021e9ea7e0SNamjae Jeon 				vol->cluster_size_bits) - 1;
9031e9ea7e0SNamjae Jeon 		/*
9041e9ea7e0SNamjae Jeon 		 * A highest_vcn of zero means this is a single extent
9051e9ea7e0SNamjae Jeon 		 * attribute so simply terminate the runlist with LCN_ENOENT).
9061e9ea7e0SNamjae Jeon 		 */
9071e9ea7e0SNamjae Jeon 		if (deltaxcn) {
9081e9ea7e0SNamjae Jeon 			/*
9091e9ea7e0SNamjae Jeon 			 * If there is a difference between the highest_vcn and
9101e9ea7e0SNamjae Jeon 			 * the highest cluster, the runlist is either corrupt
9111e9ea7e0SNamjae Jeon 			 * or, more likely, there are more extents following
9121e9ea7e0SNamjae Jeon 			 * this one.
9131e9ea7e0SNamjae Jeon 			 */
9141e9ea7e0SNamjae Jeon 			if (deltaxcn < max_cluster) {
91511ccc910SNamjae Jeon 				ntfs_debug("More extents to follow; deltaxcn = 0x%llx, max_cluster = 0x%llx",
91611ccc910SNamjae Jeon 						deltaxcn, max_cluster);
9171e9ea7e0SNamjae Jeon 				rl[rlpos].vcn = vcn;
9181e9ea7e0SNamjae Jeon 				vcn += rl[rlpos].length = max_cluster -
9191e9ea7e0SNamjae Jeon 						deltaxcn;
9201e9ea7e0SNamjae Jeon 				rl[rlpos].lcn = LCN_RL_NOT_MAPPED;
9211e9ea7e0SNamjae Jeon 				rlpos++;
9221e9ea7e0SNamjae Jeon 			} else if (unlikely(deltaxcn > max_cluster)) {
92311ccc910SNamjae Jeon 				ntfs_error(vol->sb,
92411ccc910SNamjae Jeon 					   "Corrupt attribute. deltaxcn = 0x%llx, max_cluster = 0x%llx",
92511ccc910SNamjae Jeon 					   deltaxcn, max_cluster);
9261e9ea7e0SNamjae Jeon 				goto mpa_err;
9271e9ea7e0SNamjae Jeon 			}
9281e9ea7e0SNamjae Jeon 		}
9291e9ea7e0SNamjae Jeon 		rl[rlpos].lcn = LCN_ENOENT;
9301e9ea7e0SNamjae Jeon 	} else /* Not the base extent. There may be more extents to follow. */
9311e9ea7e0SNamjae Jeon 		rl[rlpos].lcn = LCN_RL_NOT_MAPPED;
9321e9ea7e0SNamjae Jeon 
9331e9ea7e0SNamjae Jeon 	/* Setup terminating runlist element. */
9341e9ea7e0SNamjae Jeon 	rl[rlpos].vcn = vcn;
9351e9ea7e0SNamjae Jeon 	rl[rlpos].length = (s64)0;
9361e9ea7e0SNamjae Jeon 	/* If no existing runlist was specified, we are done. */
93711ccc910SNamjae Jeon 	if (!old_runlist || !old_runlist->rl) {
93811ccc910SNamjae Jeon 		*new_rl_count = rlpos + 1;
9391e9ea7e0SNamjae Jeon 		ntfs_debug("Mapping pairs array successfully decompressed:");
9401e9ea7e0SNamjae Jeon 		ntfs_debug_dump_runlist(rl);
9411e9ea7e0SNamjae Jeon 		return rl;
9421e9ea7e0SNamjae Jeon 	}
9431e9ea7e0SNamjae Jeon 	/* Now combine the new and old runlists checking for overlaps. */
94411ccc910SNamjae Jeon 	new_rl = ntfs_runlists_merge(old_runlist, rl, rlpos + 1, new_rl_count);
94511ccc910SNamjae Jeon 	if (!IS_ERR(new_rl))
94611ccc910SNamjae Jeon 		return new_rl;
94711ccc910SNamjae Jeon 	kvfree(rl);
9481e9ea7e0SNamjae Jeon 	ntfs_error(vol->sb, "Failed to merge runlists.");
94911ccc910SNamjae Jeon 	return new_rl;
9501e9ea7e0SNamjae Jeon io_error:
9511e9ea7e0SNamjae Jeon 	ntfs_error(vol->sb, "Corrupt attribute.");
9521e9ea7e0SNamjae Jeon err_out:
95311ccc910SNamjae Jeon 	kvfree(rl);
9541e9ea7e0SNamjae Jeon 	return ERR_PTR(-EIO);
9551e9ea7e0SNamjae Jeon }
9561e9ea7e0SNamjae Jeon 
95711ccc910SNamjae Jeon /*
9581e9ea7e0SNamjae Jeon  * ntfs_rl_vcn_to_lcn - convert a vcn into a lcn given a runlist
9591e9ea7e0SNamjae Jeon  * @rl:		runlist to use for conversion
9601e9ea7e0SNamjae Jeon  * @vcn:	vcn to convert
9611e9ea7e0SNamjae Jeon  *
9621e9ea7e0SNamjae Jeon  * Convert the virtual cluster number @vcn of an attribute into a logical
9631e9ea7e0SNamjae Jeon  * cluster number (lcn) of a device using the runlist @rl to map vcns to their
9641e9ea7e0SNamjae Jeon  * corresponding lcns.
9651e9ea7e0SNamjae Jeon  *
9661e9ea7e0SNamjae Jeon  * It is up to the caller to serialize access to the runlist @rl.
9671e9ea7e0SNamjae Jeon  *
9681e9ea7e0SNamjae Jeon  * Since lcns must be >= 0, we use negative return codes with special meaning:
9691e9ea7e0SNamjae Jeon  *
9701e9ea7e0SNamjae Jeon  * Return code		Meaning / Description
9711e9ea7e0SNamjae Jeon  * ==================================================
9721e9ea7e0SNamjae Jeon  *  LCN_HOLE		Hole / not allocated on disk.
9731e9ea7e0SNamjae Jeon  *  LCN_RL_NOT_MAPPED	This is part of the runlist which has not been
9741e9ea7e0SNamjae Jeon  *			inserted into the runlist yet.
9751e9ea7e0SNamjae Jeon  *  LCN_ENOENT		There is no such vcn in the attribute.
9761e9ea7e0SNamjae Jeon  *
9771e9ea7e0SNamjae Jeon  * Locking: - The caller must have locked the runlist (for reading or writing).
9781e9ea7e0SNamjae Jeon  *	    - This function does not touch the lock, nor does it modify the
9791e9ea7e0SNamjae Jeon  *	      runlist.
9801e9ea7e0SNamjae Jeon  */
98111ccc910SNamjae Jeon s64 ntfs_rl_vcn_to_lcn(const struct runlist_element *rl, const s64 vcn)
9821e9ea7e0SNamjae Jeon {
9831e9ea7e0SNamjae Jeon 	int i;
9841e9ea7e0SNamjae Jeon 
9851e9ea7e0SNamjae Jeon 	/*
9861e9ea7e0SNamjae Jeon 	 * If rl is NULL, assume that we have found an unmapped runlist. The
9871e9ea7e0SNamjae Jeon 	 * caller can then attempt to map it and fail appropriately if
9881e9ea7e0SNamjae Jeon 	 * necessary.
9891e9ea7e0SNamjae Jeon 	 */
9901e9ea7e0SNamjae Jeon 	if (unlikely(!rl))
9911e9ea7e0SNamjae Jeon 		return LCN_RL_NOT_MAPPED;
9921e9ea7e0SNamjae Jeon 
9931e9ea7e0SNamjae Jeon 	/* Catch out of lower bounds vcn. */
9941e9ea7e0SNamjae Jeon 	if (unlikely(vcn < rl[0].vcn))
9951e9ea7e0SNamjae Jeon 		return LCN_ENOENT;
9961e9ea7e0SNamjae Jeon 
9971e9ea7e0SNamjae Jeon 	for (i = 0; likely(rl[i].length); i++) {
99811ccc910SNamjae Jeon 		if (vcn < rl[i+1].vcn) {
99911ccc910SNamjae Jeon 			if (likely(rl[i].lcn >= 0))
10001e9ea7e0SNamjae Jeon 				return rl[i].lcn + (vcn - rl[i].vcn);
10011e9ea7e0SNamjae Jeon 			return rl[i].lcn;
10021e9ea7e0SNamjae Jeon 		}
10031e9ea7e0SNamjae Jeon 	}
10041e9ea7e0SNamjae Jeon 	/*
10051e9ea7e0SNamjae Jeon 	 * The terminator element is setup to the correct value, i.e. one of
10061e9ea7e0SNamjae Jeon 	 * LCN_HOLE, LCN_RL_NOT_MAPPED, or LCN_ENOENT.
10071e9ea7e0SNamjae Jeon 	 */
100811ccc910SNamjae Jeon 	if (likely(rl[i].lcn < 0))
10091e9ea7e0SNamjae Jeon 		return rl[i].lcn;
10101e9ea7e0SNamjae Jeon 	/* Just in case... We could replace this with BUG() some day. */
10111e9ea7e0SNamjae Jeon 	return LCN_ENOENT;
10121e9ea7e0SNamjae Jeon }
10131e9ea7e0SNamjae Jeon 
101411ccc910SNamjae Jeon /*
10151e9ea7e0SNamjae Jeon  * ntfs_rl_find_vcn_nolock - find a vcn in a runlist
10161e9ea7e0SNamjae Jeon  * @rl:		runlist to search
10171e9ea7e0SNamjae Jeon  * @vcn:	vcn to find
10181e9ea7e0SNamjae Jeon  *
10191e9ea7e0SNamjae Jeon  * Find the virtual cluster number @vcn in the runlist @rl and return the
10201e9ea7e0SNamjae Jeon  * address of the runlist element containing the @vcn on success.
10211e9ea7e0SNamjae Jeon  *
10221e9ea7e0SNamjae Jeon  * Return NULL if @rl is NULL or @vcn is in an unmapped part/out of bounds of
10231e9ea7e0SNamjae Jeon  * the runlist.
10241e9ea7e0SNamjae Jeon  *
10251e9ea7e0SNamjae Jeon  * Locking: The runlist must be locked on entry.
10261e9ea7e0SNamjae Jeon  */
102711ccc910SNamjae Jeon struct runlist_element *ntfs_rl_find_vcn_nolock(struct runlist_element *rl, const s64 vcn)
10281e9ea7e0SNamjae Jeon {
10291e9ea7e0SNamjae Jeon 	if (unlikely(!rl || vcn < rl[0].vcn))
10301e9ea7e0SNamjae Jeon 		return NULL;
10311e9ea7e0SNamjae Jeon 	while (likely(rl->length)) {
10321e9ea7e0SNamjae Jeon 		if (unlikely(vcn < rl[1].vcn)) {
10331e9ea7e0SNamjae Jeon 			if (likely(rl->lcn >= LCN_HOLE))
10341e9ea7e0SNamjae Jeon 				return rl;
10351e9ea7e0SNamjae Jeon 			return NULL;
10361e9ea7e0SNamjae Jeon 		}
10371e9ea7e0SNamjae Jeon 		rl++;
10381e9ea7e0SNamjae Jeon 	}
10391e9ea7e0SNamjae Jeon 	if (likely(rl->lcn == LCN_ENOENT))
10401e9ea7e0SNamjae Jeon 		return rl;
10411e9ea7e0SNamjae Jeon 	return NULL;
10421e9ea7e0SNamjae Jeon }
10431e9ea7e0SNamjae Jeon 
104411ccc910SNamjae Jeon /*
10451e9ea7e0SNamjae Jeon  * ntfs_get_nr_significant_bytes - get number of bytes needed to store a number
10461e9ea7e0SNamjae Jeon  * @n:		number for which to get the number of bytes for
10471e9ea7e0SNamjae Jeon  *
10481e9ea7e0SNamjae Jeon  * Return the number of bytes required to store @n unambiguously as
10491e9ea7e0SNamjae Jeon  * a signed number.
10501e9ea7e0SNamjae Jeon  *
10511e9ea7e0SNamjae Jeon  * This is used in the context of the mapping pairs array to determine how
10521e9ea7e0SNamjae Jeon  * many bytes will be needed in the array to store a given logical cluster
10531e9ea7e0SNamjae Jeon  * number (lcn) or a specific run length.
10541e9ea7e0SNamjae Jeon  *
10551e9ea7e0SNamjae Jeon  * Return the number of bytes written.  This function cannot fail.
10561e9ea7e0SNamjae Jeon  */
10571e9ea7e0SNamjae Jeon static inline int ntfs_get_nr_significant_bytes(const s64 n)
10581e9ea7e0SNamjae Jeon {
10591e9ea7e0SNamjae Jeon 	s64 l = n;
10601e9ea7e0SNamjae Jeon 	int i;
10611e9ea7e0SNamjae Jeon 	s8 j;
10621e9ea7e0SNamjae Jeon 
10631e9ea7e0SNamjae Jeon 	i = 0;
10641e9ea7e0SNamjae Jeon 	do {
10651e9ea7e0SNamjae Jeon 		l >>= 8;
10661e9ea7e0SNamjae Jeon 		i++;
10671e9ea7e0SNamjae Jeon 	} while (l != 0 && l != -1);
10681e9ea7e0SNamjae Jeon 	j = (n >> 8 * (i - 1)) & 0xff;
10691e9ea7e0SNamjae Jeon 	/* If the sign bit is wrong, we need an extra byte. */
10701e9ea7e0SNamjae Jeon 	if ((n < 0 && j >= 0) || (n > 0 && j < 0))
10711e9ea7e0SNamjae Jeon 		i++;
10721e9ea7e0SNamjae Jeon 	return i;
10731e9ea7e0SNamjae Jeon }
10741e9ea7e0SNamjae Jeon 
107511ccc910SNamjae Jeon /*
10761e9ea7e0SNamjae Jeon  * ntfs_get_size_for_mapping_pairs - get bytes needed for mapping pairs array
107711ccc910SNamjae Jeon  * @vol: ntfs volume
107811ccc910SNamjae Jeon  * @rl: runlist to calculate the mapping pairs array size for
10791e9ea7e0SNamjae Jeon  * @first_vcn: first vcn which to include in the mapping pairs array
10801e9ea7e0SNamjae Jeon  * @last_vcn: last vcn which to include in the mapping pairs array
108111ccc910SNamjae Jeon  * @max_mp_size: maximum size to return (0 or less means unlimited)
10821e9ea7e0SNamjae Jeon  *
10831e9ea7e0SNamjae Jeon  * Walk the locked runlist @rl and calculate the size in bytes of the mapping
10841e9ea7e0SNamjae Jeon  * pairs array corresponding to the runlist @rl, starting at vcn @first_vcn and
10851e9ea7e0SNamjae Jeon  * finishing with vcn @last_vcn.
10861e9ea7e0SNamjae Jeon  *
10871e9ea7e0SNamjae Jeon  * A @last_vcn of -1 means end of runlist and in that case the size of the
10881e9ea7e0SNamjae Jeon  * mapping pairs array corresponding to the runlist starting at vcn @first_vcn
10891e9ea7e0SNamjae Jeon  * and finishing at the end of the runlist is determined.
10901e9ea7e0SNamjae Jeon  *
10911e9ea7e0SNamjae Jeon  * This for example allows us to allocate a buffer of the right size when
10921e9ea7e0SNamjae Jeon  * building the mapping pairs array.
10931e9ea7e0SNamjae Jeon  *
10941e9ea7e0SNamjae Jeon  * If @rl is NULL, just return 1 (for the single terminator byte).
10951e9ea7e0SNamjae Jeon  *
10961e9ea7e0SNamjae Jeon  * Return the calculated size in bytes on success.  On error, return -errno.
10971e9ea7e0SNamjae Jeon  */
109811ccc910SNamjae Jeon int ntfs_get_size_for_mapping_pairs(const struct ntfs_volume *vol,
109911ccc910SNamjae Jeon 		const struct runlist_element *rl, const s64 first_vcn,
110011ccc910SNamjae Jeon 		const s64 last_vcn, int max_mp_size)
11011e9ea7e0SNamjae Jeon {
110211ccc910SNamjae Jeon 	s64 prev_lcn;
11031e9ea7e0SNamjae Jeon 	int rls;
11041e9ea7e0SNamjae Jeon 	bool the_end = false;
11051e9ea7e0SNamjae Jeon 
110611ccc910SNamjae Jeon 	if (first_vcn < 0 || last_vcn < -1)
110711ccc910SNamjae Jeon 		return -EINVAL;
110811ccc910SNamjae Jeon 
110911ccc910SNamjae Jeon 	if (last_vcn >= 0 && first_vcn > last_vcn)
111011ccc910SNamjae Jeon 		return -EINVAL;
111111ccc910SNamjae Jeon 
11121e9ea7e0SNamjae Jeon 	if (!rl) {
111311ccc910SNamjae Jeon 		WARN_ON(first_vcn);
111411ccc910SNamjae Jeon 		WARN_ON(last_vcn > 0);
11151e9ea7e0SNamjae Jeon 		return 1;
11161e9ea7e0SNamjae Jeon 	}
111711ccc910SNamjae Jeon 	if (max_mp_size <= 0)
111811ccc910SNamjae Jeon 		max_mp_size = INT_MAX;
11191e9ea7e0SNamjae Jeon 	/* Skip to runlist element containing @first_vcn. */
11201e9ea7e0SNamjae Jeon 	while (rl->length && first_vcn >= rl[1].vcn)
11211e9ea7e0SNamjae Jeon 		rl++;
11221e9ea7e0SNamjae Jeon 	if (unlikely((!rl->length && first_vcn > rl->vcn) ||
11231e9ea7e0SNamjae Jeon 			first_vcn < rl->vcn))
11241e9ea7e0SNamjae Jeon 		return -EINVAL;
11251e9ea7e0SNamjae Jeon 	prev_lcn = 0;
11261e9ea7e0SNamjae Jeon 	/* Always need the termining zero byte. */
11271e9ea7e0SNamjae Jeon 	rls = 1;
11281e9ea7e0SNamjae Jeon 	/* Do the first partial run if present. */
11291e9ea7e0SNamjae Jeon 	if (first_vcn > rl->vcn) {
11301e9ea7e0SNamjae Jeon 		s64 delta, length = rl->length;
11311e9ea7e0SNamjae Jeon 
11321e9ea7e0SNamjae Jeon 		/* We know rl->length != 0 already. */
11331e9ea7e0SNamjae Jeon 		if (unlikely(length < 0 || rl->lcn < LCN_HOLE))
11341e9ea7e0SNamjae Jeon 			goto err_out;
11351e9ea7e0SNamjae Jeon 		/*
11361e9ea7e0SNamjae Jeon 		 * If @stop_vcn is given and finishes inside this run, cap the
11371e9ea7e0SNamjae Jeon 		 * run length.
11381e9ea7e0SNamjae Jeon 		 */
11391e9ea7e0SNamjae Jeon 		if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) {
11401e9ea7e0SNamjae Jeon 			s64 s1 = last_vcn + 1;
114111ccc910SNamjae Jeon 
11421e9ea7e0SNamjae Jeon 			if (unlikely(rl[1].vcn > s1))
11431e9ea7e0SNamjae Jeon 				length = s1 - rl->vcn;
11441e9ea7e0SNamjae Jeon 			the_end = true;
11451e9ea7e0SNamjae Jeon 		}
11461e9ea7e0SNamjae Jeon 		delta = first_vcn - rl->vcn;
11471e9ea7e0SNamjae Jeon 		/* Header byte + length. */
11481e9ea7e0SNamjae Jeon 		rls += 1 + ntfs_get_nr_significant_bytes(length - delta);
11491e9ea7e0SNamjae Jeon 		/*
11501e9ea7e0SNamjae Jeon 		 * If the logical cluster number (lcn) denotes a hole and we
11511e9ea7e0SNamjae Jeon 		 * are on NTFS 3.0+, we don't store it at all, i.e. we need
11521e9ea7e0SNamjae Jeon 		 * zero space.  On earlier NTFS versions we just store the lcn.
11531e9ea7e0SNamjae Jeon 		 * Note: this assumes that on NTFS 1.2-, holes are stored with
11541e9ea7e0SNamjae Jeon 		 * an lcn of -1 and not a delta_lcn of -1 (unless both are -1).
11551e9ea7e0SNamjae Jeon 		 */
11561e9ea7e0SNamjae Jeon 		if (likely(rl->lcn >= 0 || vol->major_ver < 3)) {
11571e9ea7e0SNamjae Jeon 			prev_lcn = rl->lcn;
11581e9ea7e0SNamjae Jeon 			if (likely(rl->lcn >= 0))
11591e9ea7e0SNamjae Jeon 				prev_lcn += delta;
11601e9ea7e0SNamjae Jeon 			/* Change in lcn. */
11611e9ea7e0SNamjae Jeon 			rls += ntfs_get_nr_significant_bytes(prev_lcn);
11621e9ea7e0SNamjae Jeon 		}
11631e9ea7e0SNamjae Jeon 		/* Go to next runlist element. */
11641e9ea7e0SNamjae Jeon 		rl++;
11651e9ea7e0SNamjae Jeon 	}
11661e9ea7e0SNamjae Jeon 	/* Do the full runs. */
11671e9ea7e0SNamjae Jeon 	for (; rl->length && !the_end; rl++) {
11681e9ea7e0SNamjae Jeon 		s64 length = rl->length;
11691e9ea7e0SNamjae Jeon 
11701e9ea7e0SNamjae Jeon 		if (unlikely(length < 0 || rl->lcn < LCN_HOLE))
11711e9ea7e0SNamjae Jeon 			goto err_out;
11721e9ea7e0SNamjae Jeon 		/*
11731e9ea7e0SNamjae Jeon 		 * If @stop_vcn is given and finishes inside this run, cap the
11741e9ea7e0SNamjae Jeon 		 * run length.
11751e9ea7e0SNamjae Jeon 		 */
11761e9ea7e0SNamjae Jeon 		if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) {
11771e9ea7e0SNamjae Jeon 			s64 s1 = last_vcn + 1;
117811ccc910SNamjae Jeon 
11791e9ea7e0SNamjae Jeon 			if (unlikely(rl[1].vcn > s1))
11801e9ea7e0SNamjae Jeon 				length = s1 - rl->vcn;
11811e9ea7e0SNamjae Jeon 			the_end = true;
11821e9ea7e0SNamjae Jeon 		}
11831e9ea7e0SNamjae Jeon 		/* Header byte + length. */
11841e9ea7e0SNamjae Jeon 		rls += 1 + ntfs_get_nr_significant_bytes(length);
11851e9ea7e0SNamjae Jeon 		/*
11861e9ea7e0SNamjae Jeon 		 * If the logical cluster number (lcn) denotes a hole and we
11871e9ea7e0SNamjae Jeon 		 * are on NTFS 3.0+, we don't store it at all, i.e. we need
11881e9ea7e0SNamjae Jeon 		 * zero space.  On earlier NTFS versions we just store the lcn.
11891e9ea7e0SNamjae Jeon 		 * Note: this assumes that on NTFS 1.2-, holes are stored with
11901e9ea7e0SNamjae Jeon 		 * an lcn of -1 and not a delta_lcn of -1 (unless both are -1).
11911e9ea7e0SNamjae Jeon 		 */
11921e9ea7e0SNamjae Jeon 		if (likely(rl->lcn >= 0 || vol->major_ver < 3)) {
11931e9ea7e0SNamjae Jeon 			/* Change in lcn. */
11941e9ea7e0SNamjae Jeon 			rls += ntfs_get_nr_significant_bytes(rl->lcn -
11951e9ea7e0SNamjae Jeon 					prev_lcn);
11961e9ea7e0SNamjae Jeon 			prev_lcn = rl->lcn;
11971e9ea7e0SNamjae Jeon 		}
119811ccc910SNamjae Jeon 
119911ccc910SNamjae Jeon 		if (rls > max_mp_size)
120011ccc910SNamjae Jeon 			break;
12011e9ea7e0SNamjae Jeon 	}
12021e9ea7e0SNamjae Jeon 	return rls;
12031e9ea7e0SNamjae Jeon err_out:
12041e9ea7e0SNamjae Jeon 	if (rl->lcn == LCN_RL_NOT_MAPPED)
12051e9ea7e0SNamjae Jeon 		rls = -EINVAL;
12061e9ea7e0SNamjae Jeon 	else
12071e9ea7e0SNamjae Jeon 		rls = -EIO;
12081e9ea7e0SNamjae Jeon 	return rls;
12091e9ea7e0SNamjae Jeon }
12101e9ea7e0SNamjae Jeon 
121111ccc910SNamjae Jeon /*
12121e9ea7e0SNamjae Jeon  * ntfs_write_significant_bytes - write the significant bytes of a number
12131e9ea7e0SNamjae Jeon  * @dst:	destination buffer to write to
12141e9ea7e0SNamjae Jeon  * @dst_max:	pointer to last byte of destination buffer for bounds checking
12151e9ea7e0SNamjae Jeon  * @n:		number whose significant bytes to write
12161e9ea7e0SNamjae Jeon  *
12171e9ea7e0SNamjae Jeon  * Store in @dst, the minimum bytes of the number @n which are required to
12181e9ea7e0SNamjae Jeon  * identify @n unambiguously as a signed number, taking care not to exceed
12191e9ea7e0SNamjae Jeon  * @dest_max, the maximum position within @dst to which we are allowed to
12201e9ea7e0SNamjae Jeon  * write.
12211e9ea7e0SNamjae Jeon  *
12221e9ea7e0SNamjae Jeon  * This is used when building the mapping pairs array of a runlist to compress
12231e9ea7e0SNamjae Jeon  * a given logical cluster number (lcn) or a specific run length to the minimum
12241e9ea7e0SNamjae Jeon  * size possible.
12251e9ea7e0SNamjae Jeon  *
12261e9ea7e0SNamjae Jeon  * Return the number of bytes written on success.  On error, i.e. the
12271e9ea7e0SNamjae Jeon  * destination buffer @dst is too small, return -ENOSPC.
12281e9ea7e0SNamjae Jeon  */
12291e9ea7e0SNamjae Jeon static inline int ntfs_write_significant_bytes(s8 *dst, const s8 *dst_max,
12301e9ea7e0SNamjae Jeon 		const s64 n)
12311e9ea7e0SNamjae Jeon {
12321e9ea7e0SNamjae Jeon 	s64 l = n;
12331e9ea7e0SNamjae Jeon 	int i;
12341e9ea7e0SNamjae Jeon 	s8 j;
12351e9ea7e0SNamjae Jeon 
12361e9ea7e0SNamjae Jeon 	i = 0;
12371e9ea7e0SNamjae Jeon 	do {
12381e9ea7e0SNamjae Jeon 		if (unlikely(dst > dst_max))
12391e9ea7e0SNamjae Jeon 			goto err_out;
12401e9ea7e0SNamjae Jeon 		*dst++ = l & 0xffll;
12411e9ea7e0SNamjae Jeon 		l >>= 8;
12421e9ea7e0SNamjae Jeon 		i++;
12431e9ea7e0SNamjae Jeon 	} while (l != 0 && l != -1);
12441e9ea7e0SNamjae Jeon 	j = (n >> 8 * (i - 1)) & 0xff;
12451e9ea7e0SNamjae Jeon 	/* If the sign bit is wrong, we need an extra byte. */
12461e9ea7e0SNamjae Jeon 	if (n < 0 && j >= 0) {
12471e9ea7e0SNamjae Jeon 		if (unlikely(dst > dst_max))
12481e9ea7e0SNamjae Jeon 			goto err_out;
12491e9ea7e0SNamjae Jeon 		i++;
12501e9ea7e0SNamjae Jeon 		*dst = (s8)-1;
12511e9ea7e0SNamjae Jeon 	} else if (n > 0 && j < 0) {
12521e9ea7e0SNamjae Jeon 		if (unlikely(dst > dst_max))
12531e9ea7e0SNamjae Jeon 			goto err_out;
12541e9ea7e0SNamjae Jeon 		i++;
12551e9ea7e0SNamjae Jeon 		*dst = (s8)0;
12561e9ea7e0SNamjae Jeon 	}
12571e9ea7e0SNamjae Jeon 	return i;
12581e9ea7e0SNamjae Jeon err_out:
12591e9ea7e0SNamjae Jeon 	return -ENOSPC;
12601e9ea7e0SNamjae Jeon }
12611e9ea7e0SNamjae Jeon 
126211ccc910SNamjae Jeon /*
12631e9ea7e0SNamjae Jeon  * ntfs_mapping_pairs_build - build the mapping pairs array from a runlist
126411ccc910SNamjae Jeon  * @vol: ntfs volume
126511ccc910SNamjae Jeon  * @dst: destination buffer to build mapping pairs array into
126611ccc910SNamjae Jeon  * @dst_len: size of @dst in bytes
126711ccc910SNamjae Jeon  * @rl: runlist to build the mapping pairs array from
12681e9ea7e0SNamjae Jeon  * @first_vcn: first vcn which to include in the mapping pairs array
12691e9ea7e0SNamjae Jeon  * @last_vcn: last vcn which to include in the mapping pairs array
127011ccc910SNamjae Jeon  * @stop_vcn: on return, set to the first vcn outside the destination buffer
127111ccc910SNamjae Jeon  * @stop_rl: on return, set to the runlist element where encoding stopped
127211ccc910SNamjae Jeon  * @de_cluster_count: on return, set to the number of clusters encoded
12731e9ea7e0SNamjae Jeon  *
12741e9ea7e0SNamjae Jeon  * Create the mapping pairs array from the locked runlist @rl, starting at vcn
12751e9ea7e0SNamjae Jeon  * @first_vcn and finishing with vcn @last_vcn and save the array in @dst.
12761e9ea7e0SNamjae Jeon  * @dst_len is the size of @dst in bytes and it should be at least equal to the
12771e9ea7e0SNamjae Jeon  * value obtained by calling ntfs_get_size_for_mapping_pairs().
12781e9ea7e0SNamjae Jeon  *
12791e9ea7e0SNamjae Jeon  * A @last_vcn of -1 means end of runlist and in that case the mapping pairs
12801e9ea7e0SNamjae Jeon  * array corresponding to the runlist starting at vcn @first_vcn and finishing
12811e9ea7e0SNamjae Jeon  * at the end of the runlist is created.
12821e9ea7e0SNamjae Jeon  *
12831e9ea7e0SNamjae Jeon  * If @rl is NULL, just write a single terminator byte to @dst.
12841e9ea7e0SNamjae Jeon  *
12851e9ea7e0SNamjae Jeon  * On success or -ENOSPC error, if @stop_vcn is not NULL, *@stop_vcn is set to
12861e9ea7e0SNamjae Jeon  * the first vcn outside the destination buffer.  Note that on error, @dst has
12871e9ea7e0SNamjae Jeon  * been filled with all the mapping pairs that will fit, thus it can be treated
12881e9ea7e0SNamjae Jeon  * as partial success, in that a new attribute extent needs to be created or
12891e9ea7e0SNamjae Jeon  * the next extent has to be used and the mapping pairs build has to be
12901e9ea7e0SNamjae Jeon  * continued with @first_vcn set to *@stop_vcn.
12911e9ea7e0SNamjae Jeon  *
12921e9ea7e0SNamjae Jeon  * Return 0 on success and -errno on error.  The following error codes are
12931e9ea7e0SNamjae Jeon  * defined:
12941e9ea7e0SNamjae Jeon  *	-EINVAL	- Run list contains unmapped elements.  Make sure to only pass
12951e9ea7e0SNamjae Jeon  *		  fully mapped runlists to this function.
12961e9ea7e0SNamjae Jeon  *	-EIO	- The runlist is corrupt.
12971e9ea7e0SNamjae Jeon  *	-ENOSPC	- The destination buffer is too small.
12981e9ea7e0SNamjae Jeon  *
12991e9ea7e0SNamjae Jeon  * Locking: @rl must be locked on entry (either for reading or writing), it
13001e9ea7e0SNamjae Jeon  *	    remains locked throughout, and is left locked upon return.
13011e9ea7e0SNamjae Jeon  */
130211ccc910SNamjae Jeon int ntfs_mapping_pairs_build(const struct ntfs_volume *vol, s8 *dst,
130311ccc910SNamjae Jeon 		const int dst_len, const struct runlist_element *rl,
130411ccc910SNamjae Jeon 		const s64 first_vcn, const s64 last_vcn, s64 *const stop_vcn,
130511ccc910SNamjae Jeon 		struct runlist_element **stop_rl, unsigned int *de_cluster_count)
13061e9ea7e0SNamjae Jeon {
130711ccc910SNamjae Jeon 	s64 prev_lcn;
13081e9ea7e0SNamjae Jeon 	s8 *dst_max, *dst_next;
13091e9ea7e0SNamjae Jeon 	int err = -ENOSPC;
13101e9ea7e0SNamjae Jeon 	bool the_end = false;
13111e9ea7e0SNamjae Jeon 	s8 len_len, lcn_len;
131211ccc910SNamjae Jeon 	unsigned int de_cnt = 0;
13131e9ea7e0SNamjae Jeon 
131411ccc910SNamjae Jeon 	if (first_vcn < 0 || last_vcn < -1 || dst_len < 1)
131511ccc910SNamjae Jeon 		return -EINVAL;
131611ccc910SNamjae Jeon 	if (last_vcn >= 0 && first_vcn > last_vcn)
131711ccc910SNamjae Jeon 		return -EINVAL;
131811ccc910SNamjae Jeon 
13191e9ea7e0SNamjae Jeon 	if (!rl) {
132011ccc910SNamjae Jeon 		WARN_ON(first_vcn || last_vcn > 0);
13211e9ea7e0SNamjae Jeon 		if (stop_vcn)
13221e9ea7e0SNamjae Jeon 			*stop_vcn = 0;
13231e9ea7e0SNamjae Jeon 		/* Terminator byte. */
13241e9ea7e0SNamjae Jeon 		*dst = 0;
13251e9ea7e0SNamjae Jeon 		return 0;
13261e9ea7e0SNamjae Jeon 	}
13271e9ea7e0SNamjae Jeon 	/* Skip to runlist element containing @first_vcn. */
13281e9ea7e0SNamjae Jeon 	while (rl->length && first_vcn >= rl[1].vcn)
13291e9ea7e0SNamjae Jeon 		rl++;
13301e9ea7e0SNamjae Jeon 	if (unlikely((!rl->length && first_vcn > rl->vcn) ||
13311e9ea7e0SNamjae Jeon 			first_vcn < rl->vcn))
13321e9ea7e0SNamjae Jeon 		return -EINVAL;
13331e9ea7e0SNamjae Jeon 	/*
13341e9ea7e0SNamjae Jeon 	 * @dst_max is used for bounds checking in
13351e9ea7e0SNamjae Jeon 	 * ntfs_write_significant_bytes().
13361e9ea7e0SNamjae Jeon 	 */
13371e9ea7e0SNamjae Jeon 	dst_max = dst + dst_len - 1;
13381e9ea7e0SNamjae Jeon 	prev_lcn = 0;
13391e9ea7e0SNamjae Jeon 	/* Do the first partial run if present. */
13401e9ea7e0SNamjae Jeon 	if (first_vcn > rl->vcn) {
13411e9ea7e0SNamjae Jeon 		s64 delta, length = rl->length;
13421e9ea7e0SNamjae Jeon 
13431e9ea7e0SNamjae Jeon 		/* We know rl->length != 0 already. */
13441e9ea7e0SNamjae Jeon 		if (unlikely(length < 0 || rl->lcn < LCN_HOLE))
13451e9ea7e0SNamjae Jeon 			goto err_out;
13461e9ea7e0SNamjae Jeon 		/*
13471e9ea7e0SNamjae Jeon 		 * If @stop_vcn is given and finishes inside this run, cap the
13481e9ea7e0SNamjae Jeon 		 * run length.
13491e9ea7e0SNamjae Jeon 		 */
13501e9ea7e0SNamjae Jeon 		if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) {
13511e9ea7e0SNamjae Jeon 			s64 s1 = last_vcn + 1;
135211ccc910SNamjae Jeon 
13531e9ea7e0SNamjae Jeon 			if (unlikely(rl[1].vcn > s1))
13541e9ea7e0SNamjae Jeon 				length = s1 - rl->vcn;
13551e9ea7e0SNamjae Jeon 			the_end = true;
13561e9ea7e0SNamjae Jeon 		}
13571e9ea7e0SNamjae Jeon 		delta = first_vcn - rl->vcn;
13581e9ea7e0SNamjae Jeon 		/* Write length. */
13591e9ea7e0SNamjae Jeon 		len_len = ntfs_write_significant_bytes(dst + 1, dst_max,
13601e9ea7e0SNamjae Jeon 				length - delta);
13611e9ea7e0SNamjae Jeon 		if (unlikely(len_len < 0))
13621e9ea7e0SNamjae Jeon 			goto size_err;
13631e9ea7e0SNamjae Jeon 		/*
13641e9ea7e0SNamjae Jeon 		 * If the logical cluster number (lcn) denotes a hole and we
13651e9ea7e0SNamjae Jeon 		 * are on NTFS 3.0+, we don't store it at all, i.e. we need
13661e9ea7e0SNamjae Jeon 		 * zero space.  On earlier NTFS versions we just write the lcn
136711ccc910SNamjae Jeon 		 * change.
13681e9ea7e0SNamjae Jeon 		 */
13691e9ea7e0SNamjae Jeon 		if (likely(rl->lcn >= 0 || vol->major_ver < 3)) {
13701e9ea7e0SNamjae Jeon 			prev_lcn = rl->lcn;
13711e9ea7e0SNamjae Jeon 			if (likely(rl->lcn >= 0))
13721e9ea7e0SNamjae Jeon 				prev_lcn += delta;
13731e9ea7e0SNamjae Jeon 			/* Write change in lcn. */
13741e9ea7e0SNamjae Jeon 			lcn_len = ntfs_write_significant_bytes(dst + 1 +
13751e9ea7e0SNamjae Jeon 					len_len, dst_max, prev_lcn);
13761e9ea7e0SNamjae Jeon 			if (unlikely(lcn_len < 0))
13771e9ea7e0SNamjae Jeon 				goto size_err;
13781e9ea7e0SNamjae Jeon 		} else
13791e9ea7e0SNamjae Jeon 			lcn_len = 0;
13801e9ea7e0SNamjae Jeon 		dst_next = dst + len_len + lcn_len + 1;
13811e9ea7e0SNamjae Jeon 		if (unlikely(dst_next > dst_max))
13821e9ea7e0SNamjae Jeon 			goto size_err;
13831e9ea7e0SNamjae Jeon 		/* Update header byte. */
13841e9ea7e0SNamjae Jeon 		*dst = lcn_len << 4 | len_len;
13851e9ea7e0SNamjae Jeon 		/* Position at next mapping pairs array element. */
13861e9ea7e0SNamjae Jeon 		dst = dst_next;
13871e9ea7e0SNamjae Jeon 		/* Go to next runlist element. */
13881e9ea7e0SNamjae Jeon 		rl++;
13891e9ea7e0SNamjae Jeon 	}
13901e9ea7e0SNamjae Jeon 	/* Do the full runs. */
13911e9ea7e0SNamjae Jeon 	for (; rl->length && !the_end; rl++) {
13921e9ea7e0SNamjae Jeon 		s64 length = rl->length;
13931e9ea7e0SNamjae Jeon 
13941e9ea7e0SNamjae Jeon 		if (unlikely(length < 0 || rl->lcn < LCN_HOLE))
13951e9ea7e0SNamjae Jeon 			goto err_out;
13961e9ea7e0SNamjae Jeon 		/*
13971e9ea7e0SNamjae Jeon 		 * If @stop_vcn is given and finishes inside this run, cap the
13981e9ea7e0SNamjae Jeon 		 * run length.
13991e9ea7e0SNamjae Jeon 		 */
14001e9ea7e0SNamjae Jeon 		if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) {
14011e9ea7e0SNamjae Jeon 			s64 s1 = last_vcn + 1;
140211ccc910SNamjae Jeon 
14031e9ea7e0SNamjae Jeon 			if (unlikely(rl[1].vcn > s1))
14041e9ea7e0SNamjae Jeon 				length = s1 - rl->vcn;
14051e9ea7e0SNamjae Jeon 			the_end = true;
14061e9ea7e0SNamjae Jeon 		}
14071e9ea7e0SNamjae Jeon 		/* Write length. */
14081e9ea7e0SNamjae Jeon 		len_len = ntfs_write_significant_bytes(dst + 1, dst_max,
14091e9ea7e0SNamjae Jeon 				length);
14101e9ea7e0SNamjae Jeon 		if (unlikely(len_len < 0))
14111e9ea7e0SNamjae Jeon 			goto size_err;
14121e9ea7e0SNamjae Jeon 		/*
14131e9ea7e0SNamjae Jeon 		 * If the logical cluster number (lcn) denotes a hole and we
14141e9ea7e0SNamjae Jeon 		 * are on NTFS 3.0+, we don't store it at all, i.e. we need
14151e9ea7e0SNamjae Jeon 		 * zero space.  On earlier NTFS versions we just write the lcn
141611ccc910SNamjae Jeon 		 * change.
14171e9ea7e0SNamjae Jeon 		 */
14181e9ea7e0SNamjae Jeon 		if (likely(rl->lcn >= 0 || vol->major_ver < 3)) {
14191e9ea7e0SNamjae Jeon 			/* Write change in lcn. */
14201e9ea7e0SNamjae Jeon 			lcn_len = ntfs_write_significant_bytes(dst + 1 +
14211e9ea7e0SNamjae Jeon 					len_len, dst_max, rl->lcn - prev_lcn);
14221e9ea7e0SNamjae Jeon 			if (unlikely(lcn_len < 0))
14231e9ea7e0SNamjae Jeon 				goto size_err;
14241e9ea7e0SNamjae Jeon 			prev_lcn = rl->lcn;
142511ccc910SNamjae Jeon 		} else {
142611ccc910SNamjae Jeon 			if (rl->lcn == LCN_DELALLOC)
142711ccc910SNamjae Jeon 				de_cnt += rl->length;
14281e9ea7e0SNamjae Jeon 			lcn_len = 0;
142911ccc910SNamjae Jeon 		}
14301e9ea7e0SNamjae Jeon 		dst_next = dst + len_len + lcn_len + 1;
14311e9ea7e0SNamjae Jeon 		if (unlikely(dst_next > dst_max))
14321e9ea7e0SNamjae Jeon 			goto size_err;
14331e9ea7e0SNamjae Jeon 		/* Update header byte. */
14341e9ea7e0SNamjae Jeon 		*dst = lcn_len << 4 | len_len;
14351e9ea7e0SNamjae Jeon 		/* Position at next mapping pairs array element. */
14361e9ea7e0SNamjae Jeon 		dst = dst_next;
14371e9ea7e0SNamjae Jeon 	}
14381e9ea7e0SNamjae Jeon 	/* Success. */
143911ccc910SNamjae Jeon 	if (de_cluster_count)
144011ccc910SNamjae Jeon 		*de_cluster_count = de_cnt;
14411e9ea7e0SNamjae Jeon 	err = 0;
14421e9ea7e0SNamjae Jeon size_err:
14431e9ea7e0SNamjae Jeon 	/* Set stop vcn. */
14441e9ea7e0SNamjae Jeon 	if (stop_vcn)
14451e9ea7e0SNamjae Jeon 		*stop_vcn = rl->vcn;
144611ccc910SNamjae Jeon 	if (stop_rl)
144711ccc910SNamjae Jeon 		*stop_rl = (struct runlist_element *)rl;
14481e9ea7e0SNamjae Jeon 	/* Add terminator byte. */
14491e9ea7e0SNamjae Jeon 	*dst = 0;
14501e9ea7e0SNamjae Jeon 	return err;
14511e9ea7e0SNamjae Jeon err_out:
14521e9ea7e0SNamjae Jeon 	if (rl->lcn == LCN_RL_NOT_MAPPED)
14531e9ea7e0SNamjae Jeon 		err = -EINVAL;
14541e9ea7e0SNamjae Jeon 	else
14551e9ea7e0SNamjae Jeon 		err = -EIO;
14561e9ea7e0SNamjae Jeon 	return err;
14571e9ea7e0SNamjae Jeon }
14581e9ea7e0SNamjae Jeon 
145911ccc910SNamjae Jeon /*
14601e9ea7e0SNamjae Jeon  * ntfs_rl_truncate_nolock - truncate a runlist starting at a specified vcn
14611e9ea7e0SNamjae Jeon  * @vol:	ntfs volume (needed for error output)
14621e9ea7e0SNamjae Jeon  * @runlist:	runlist to truncate
14631e9ea7e0SNamjae Jeon  * @new_length:	the new length of the runlist in VCNs
14641e9ea7e0SNamjae Jeon  *
14651e9ea7e0SNamjae Jeon  * Truncate the runlist described by @runlist as well as the memory buffer
14661e9ea7e0SNamjae Jeon  * holding the runlist elements to a length of @new_length VCNs.
14671e9ea7e0SNamjae Jeon  *
14681e9ea7e0SNamjae Jeon  * If @new_length lies within the runlist, the runlist elements with VCNs of
14691e9ea7e0SNamjae Jeon  * @new_length and above are discarded.  As a special case if @new_length is
14701e9ea7e0SNamjae Jeon  * zero, the runlist is discarded and set to NULL.
14711e9ea7e0SNamjae Jeon  *
14721e9ea7e0SNamjae Jeon  * If @new_length lies beyond the runlist, a sparse runlist element is added to
14731e9ea7e0SNamjae Jeon  * the end of the runlist @runlist or if the last runlist element is a sparse
14741e9ea7e0SNamjae Jeon  * one already, this is extended.
14751e9ea7e0SNamjae Jeon  *
14761e9ea7e0SNamjae Jeon  * Note, no checking is done for unmapped runlist elements.  It is assumed that
14771e9ea7e0SNamjae Jeon  * the caller has mapped any elements that need to be mapped already.
14781e9ea7e0SNamjae Jeon  *
14791e9ea7e0SNamjae Jeon  * Return 0 on success and -errno on error.
14801e9ea7e0SNamjae Jeon  *
14811e9ea7e0SNamjae Jeon  * Locking: The caller must hold @runlist->lock for writing.
14821e9ea7e0SNamjae Jeon  */
148311ccc910SNamjae Jeon int ntfs_rl_truncate_nolock(const struct ntfs_volume *vol, struct runlist *const runlist,
14841e9ea7e0SNamjae Jeon 		const s64 new_length)
14851e9ea7e0SNamjae Jeon {
148611ccc910SNamjae Jeon 	struct runlist_element *rl;
14871e9ea7e0SNamjae Jeon 	int old_size;
14881e9ea7e0SNamjae Jeon 
14891e9ea7e0SNamjae Jeon 	ntfs_debug("Entering for new_length 0x%llx.", (long long)new_length);
149011ccc910SNamjae Jeon 
149111ccc910SNamjae Jeon 	if (!runlist || new_length < 0)
149211ccc910SNamjae Jeon 		return -EINVAL;
149311ccc910SNamjae Jeon 
14941e9ea7e0SNamjae Jeon 	rl = runlist->rl;
149511ccc910SNamjae Jeon 	if (new_length < rl->vcn)
149611ccc910SNamjae Jeon 		return -EINVAL;
149711ccc910SNamjae Jeon 
14981e9ea7e0SNamjae Jeon 	/* Find @new_length in the runlist. */
14991e9ea7e0SNamjae Jeon 	while (likely(rl->length && new_length >= rl[1].vcn))
15001e9ea7e0SNamjae Jeon 		rl++;
15011e9ea7e0SNamjae Jeon 	/*
15021e9ea7e0SNamjae Jeon 	 * If not at the end of the runlist we need to shrink it.
15031e9ea7e0SNamjae Jeon 	 * If at the end of the runlist we need to expand it.
15041e9ea7e0SNamjae Jeon 	 */
15051e9ea7e0SNamjae Jeon 	if (rl->length) {
150611ccc910SNamjae Jeon 		struct runlist_element *trl;
15071e9ea7e0SNamjae Jeon 		bool is_end;
15081e9ea7e0SNamjae Jeon 
15091e9ea7e0SNamjae Jeon 		ntfs_debug("Shrinking runlist.");
15101e9ea7e0SNamjae Jeon 		/* Determine the runlist size. */
15111e9ea7e0SNamjae Jeon 		trl = rl + 1;
15121e9ea7e0SNamjae Jeon 		while (likely(trl->length))
15131e9ea7e0SNamjae Jeon 			trl++;
15141e9ea7e0SNamjae Jeon 		old_size = trl - runlist->rl + 1;
15151e9ea7e0SNamjae Jeon 		/* Truncate the run. */
15161e9ea7e0SNamjae Jeon 		rl->length = new_length - rl->vcn;
15171e9ea7e0SNamjae Jeon 		/*
15181e9ea7e0SNamjae Jeon 		 * If a run was partially truncated, make the following runlist
15191e9ea7e0SNamjae Jeon 		 * element a terminator.
15201e9ea7e0SNamjae Jeon 		 */
15211e9ea7e0SNamjae Jeon 		is_end = false;
15221e9ea7e0SNamjae Jeon 		if (rl->length) {
15231e9ea7e0SNamjae Jeon 			rl++;
15241e9ea7e0SNamjae Jeon 			if (!rl->length)
15251e9ea7e0SNamjae Jeon 				is_end = true;
15261e9ea7e0SNamjae Jeon 			rl->vcn = new_length;
15271e9ea7e0SNamjae Jeon 			rl->length = 0;
15281e9ea7e0SNamjae Jeon 		}
15291e9ea7e0SNamjae Jeon 		rl->lcn = LCN_ENOENT;
153011ccc910SNamjae Jeon 		runlist->count = rl - runlist->rl + 1;
15311e9ea7e0SNamjae Jeon 		/* Reallocate memory if necessary. */
15321e9ea7e0SNamjae Jeon 		if (!is_end) {
15331e9ea7e0SNamjae Jeon 			int new_size = rl - runlist->rl + 1;
153411ccc910SNamjae Jeon 
15351e9ea7e0SNamjae Jeon 			rl = ntfs_rl_realloc(runlist->rl, old_size, new_size);
15361e9ea7e0SNamjae Jeon 			if (IS_ERR(rl))
153711ccc910SNamjae Jeon 				ntfs_warning(vol->sb,
153811ccc910SNamjae Jeon 					"Failed to shrink runlist buffer.  This just wastes a bit of memory temporarily so we ignore it and return success.");
15391e9ea7e0SNamjae Jeon 			else
15401e9ea7e0SNamjae Jeon 				runlist->rl = rl;
15411e9ea7e0SNamjae Jeon 		}
15421e9ea7e0SNamjae Jeon 	} else if (likely(/* !rl->length && */ new_length > rl->vcn)) {
15431e9ea7e0SNamjae Jeon 		ntfs_debug("Expanding runlist.");
15441e9ea7e0SNamjae Jeon 		/*
15451e9ea7e0SNamjae Jeon 		 * If there is a previous runlist element and it is a sparse
15461e9ea7e0SNamjae Jeon 		 * one, extend it.  Otherwise need to add a new, sparse runlist
15471e9ea7e0SNamjae Jeon 		 * element.
15481e9ea7e0SNamjae Jeon 		 */
15491e9ea7e0SNamjae Jeon 		if ((rl > runlist->rl) && ((rl - 1)->lcn == LCN_HOLE))
15501e9ea7e0SNamjae Jeon 			(rl - 1)->length = new_length - (rl - 1)->vcn;
15511e9ea7e0SNamjae Jeon 		else {
15521e9ea7e0SNamjae Jeon 			/* Determine the runlist size. */
15531e9ea7e0SNamjae Jeon 			old_size = rl - runlist->rl + 1;
15541e9ea7e0SNamjae Jeon 			/* Reallocate memory if necessary. */
15551e9ea7e0SNamjae Jeon 			rl = ntfs_rl_realloc(runlist->rl, old_size,
15561e9ea7e0SNamjae Jeon 					old_size + 1);
15571e9ea7e0SNamjae Jeon 			if (IS_ERR(rl)) {
155811ccc910SNamjae Jeon 				ntfs_error(vol->sb, "Failed to expand runlist buffer, aborting.");
15591e9ea7e0SNamjae Jeon 				return PTR_ERR(rl);
15601e9ea7e0SNamjae Jeon 			}
15611e9ea7e0SNamjae Jeon 			runlist->rl = rl;
15621e9ea7e0SNamjae Jeon 			/*
15631e9ea7e0SNamjae Jeon 			 * Set @rl to the same runlist element in the new
15641e9ea7e0SNamjae Jeon 			 * runlist as before in the old runlist.
15651e9ea7e0SNamjae Jeon 			 */
15661e9ea7e0SNamjae Jeon 			rl += old_size - 1;
15671e9ea7e0SNamjae Jeon 			/* Add a new, sparse runlist element. */
15681e9ea7e0SNamjae Jeon 			rl->lcn = LCN_HOLE;
15691e9ea7e0SNamjae Jeon 			rl->length = new_length - rl->vcn;
15701e9ea7e0SNamjae Jeon 			/* Add a new terminator runlist element. */
15711e9ea7e0SNamjae Jeon 			rl++;
15721e9ea7e0SNamjae Jeon 			rl->length = 0;
157311ccc910SNamjae Jeon 			runlist->count = old_size + 1;
15741e9ea7e0SNamjae Jeon 		}
15751e9ea7e0SNamjae Jeon 		rl->vcn = new_length;
15761e9ea7e0SNamjae Jeon 		rl->lcn = LCN_ENOENT;
15771e9ea7e0SNamjae Jeon 	} else /* if (unlikely(!rl->length && new_length == rl->vcn)) */ {
15781e9ea7e0SNamjae Jeon 		/* Runlist already has same size as requested. */
15791e9ea7e0SNamjae Jeon 		rl->lcn = LCN_ENOENT;
15801e9ea7e0SNamjae Jeon 	}
15811e9ea7e0SNamjae Jeon 	ntfs_debug("Done.");
15821e9ea7e0SNamjae Jeon 	return 0;
15831e9ea7e0SNamjae Jeon }
15841e9ea7e0SNamjae Jeon 
158511ccc910SNamjae Jeon /*
158611ccc910SNamjae Jeon  * ntfs_rl_sparse - check whether runlist have sparse regions or not.
158711ccc910SNamjae Jeon  * @rl:         runlist to check
15881e9ea7e0SNamjae Jeon  *
158911ccc910SNamjae Jeon  * Return 1 if have, 0 if not, -errno on error.
15901e9ea7e0SNamjae Jeon  */
159111ccc910SNamjae Jeon int ntfs_rl_sparse(struct runlist_element *rl)
15921e9ea7e0SNamjae Jeon {
159311ccc910SNamjae Jeon 	struct runlist_element *rlc;
15941e9ea7e0SNamjae Jeon 
159511ccc910SNamjae Jeon 	if (!rl)
15961e9ea7e0SNamjae Jeon 		return -EINVAL;
159711ccc910SNamjae Jeon 
159811ccc910SNamjae Jeon 	for (rlc = rl; rlc->length; rlc++)
159911ccc910SNamjae Jeon 		if (rlc->lcn < 0) {
160011ccc910SNamjae Jeon 			if (rlc->lcn != LCN_HOLE && rlc->lcn != LCN_DELALLOC) {
1601ea3566a3SWoody Suwalski 				pr_err("%s: bad runlist\n", __func__);
16021e9ea7e0SNamjae Jeon 				return -EINVAL;
160311ccc910SNamjae Jeon 			}
160411ccc910SNamjae Jeon 			return 1;
160511ccc910SNamjae Jeon 		}
16061e9ea7e0SNamjae Jeon 	return 0;
160711ccc910SNamjae Jeon }
160811ccc910SNamjae Jeon 
16091e9ea7e0SNamjae Jeon /*
161011ccc910SNamjae Jeon  * ntfs_rl_get_compressed_size - calculate length of non sparse regions
161111ccc910SNamjae Jeon  * @vol:        ntfs volume (need for cluster size)
161211ccc910SNamjae Jeon  * @rl:         runlist to calculate for
161311ccc910SNamjae Jeon  *
161411ccc910SNamjae Jeon  * Return compressed size or -errno on error.
16151e9ea7e0SNamjae Jeon  */
161611ccc910SNamjae Jeon s64 ntfs_rl_get_compressed_size(struct ntfs_volume *vol, struct runlist_element *rl)
161711ccc910SNamjae Jeon {
161811ccc910SNamjae Jeon 	struct runlist_element *rlc;
161911ccc910SNamjae Jeon 	s64 ret = 0;
162011ccc910SNamjae Jeon 
162111ccc910SNamjae Jeon 	if (!rl)
162211ccc910SNamjae Jeon 		return -EINVAL;
162311ccc910SNamjae Jeon 
162411ccc910SNamjae Jeon 	for (rlc = rl; rlc->length; rlc++) {
162511ccc910SNamjae Jeon 		if (rlc->lcn < 0) {
162611ccc910SNamjae Jeon 			if (rlc->lcn != LCN_HOLE && rlc->lcn != LCN_DELALLOC) {
162711ccc910SNamjae Jeon 				ntfs_error(vol->sb, "%s: bad runlist, rlc->lcn : %lld",
162811ccc910SNamjae Jeon 						__func__, rlc->lcn);
162911ccc910SNamjae Jeon 				return -EINVAL;
16301e9ea7e0SNamjae Jeon 			}
163111ccc910SNamjae Jeon 		} else
163211ccc910SNamjae Jeon 			ret += rlc->length;
16331e9ea7e0SNamjae Jeon 	}
163411ccc910SNamjae Jeon 	return NTFS_CLU_TO_B(vol, ret);
16351e9ea7e0SNamjae Jeon }
163611ccc910SNamjae Jeon 
163711ccc910SNamjae Jeon static inline bool ntfs_rle_lcn_contiguous(struct runlist_element *left_rle,
163811ccc910SNamjae Jeon 					   struct runlist_element *right_rle)
163911ccc910SNamjae Jeon {
164011ccc910SNamjae Jeon 	if (left_rle->lcn > LCN_HOLE &&
164111ccc910SNamjae Jeon 	    left_rle->lcn + left_rle->length == right_rle->lcn)
164211ccc910SNamjae Jeon 		return true;
164311ccc910SNamjae Jeon 	else if (left_rle->lcn == LCN_HOLE && right_rle->lcn == LCN_HOLE)
164411ccc910SNamjae Jeon 		return true;
16451e9ea7e0SNamjae Jeon 	else
164611ccc910SNamjae Jeon 		return false;
16471e9ea7e0SNamjae Jeon }
16481e9ea7e0SNamjae Jeon 
164911ccc910SNamjae Jeon static inline bool ntfs_rle_contain(struct runlist_element *rle, s64 vcn)
165011ccc910SNamjae Jeon {
165111ccc910SNamjae Jeon 	if (rle->length > 0 &&
165211ccc910SNamjae Jeon 	    vcn >= rle->vcn && vcn < rle->vcn + rle->length)
165311ccc910SNamjae Jeon 		return true;
165411ccc910SNamjae Jeon 	else
165511ccc910SNamjae Jeon 		return false;
165611ccc910SNamjae Jeon }
165711ccc910SNamjae Jeon 
165811ccc910SNamjae Jeon struct runlist_element *ntfs_rl_insert_range(struct runlist_element *dst_rl, int dst_cnt,
165911ccc910SNamjae Jeon 				      struct runlist_element *src_rl, int src_cnt,
166011ccc910SNamjae Jeon 				      size_t *new_rl_cnt)
166111ccc910SNamjae Jeon {
166211ccc910SNamjae Jeon 	struct runlist_element *i_rl, *new_rl, *src_rl_origin = src_rl;
166311ccc910SNamjae Jeon 	struct runlist_element dst_rl_split;
1664*4e59f8a1SHyunchul Lee 	s64 start_vcn;
166511ccc910SNamjae Jeon 	int new_1st_cnt, new_2nd_cnt, new_3rd_cnt, new_cnt;
166611ccc910SNamjae Jeon 
166711ccc910SNamjae Jeon 	if (!dst_rl || !src_rl || !new_rl_cnt)
166811ccc910SNamjae Jeon 		return ERR_PTR(-EINVAL);
166911ccc910SNamjae Jeon 	if (dst_cnt <= 0 || src_cnt <= 0)
167011ccc910SNamjae Jeon 		return ERR_PTR(-EINVAL);
167111ccc910SNamjae Jeon 	if (!(dst_rl[dst_cnt - 1].lcn == LCN_ENOENT &&
167211ccc910SNamjae Jeon 	      dst_rl[dst_cnt - 1].length == 0) ||
167311ccc910SNamjae Jeon 	    src_rl[src_cnt - 1].lcn < LCN_HOLE)
167411ccc910SNamjae Jeon 		return ERR_PTR(-EINVAL);
167511ccc910SNamjae Jeon 
167611ccc910SNamjae Jeon 	start_vcn = src_rl[0].vcn;
167711ccc910SNamjae Jeon 
167811ccc910SNamjae Jeon 	i_rl = ntfs_rl_find_vcn_nolock(dst_rl, start_vcn);
167911ccc910SNamjae Jeon 	if (!i_rl ||
168011ccc910SNamjae Jeon 	    (i_rl->lcn == LCN_ENOENT && i_rl->vcn != start_vcn) ||
168111ccc910SNamjae Jeon 	    (i_rl->lcn != LCN_ENOENT && !ntfs_rle_contain(i_rl, start_vcn)))
168211ccc910SNamjae Jeon 		return ERR_PTR(-EINVAL);
168311ccc910SNamjae Jeon 
168411ccc910SNamjae Jeon 	new_1st_cnt = (int)(i_rl - dst_rl);
168511ccc910SNamjae Jeon 	if (new_1st_cnt > dst_cnt)
168611ccc910SNamjae Jeon 		return ERR_PTR(-EINVAL);
168711ccc910SNamjae Jeon 	new_3rd_cnt = dst_cnt - new_1st_cnt;
168811ccc910SNamjae Jeon 	if (new_3rd_cnt < 1)
168911ccc910SNamjae Jeon 		return ERR_PTR(-EINVAL);
169011ccc910SNamjae Jeon 
169111ccc910SNamjae Jeon 	if (i_rl[0].vcn != start_vcn) {
169211ccc910SNamjae Jeon 		if (i_rl[0].lcn == LCN_HOLE && src_rl[0].lcn == LCN_HOLE)
169311ccc910SNamjae Jeon 			goto merge_src_rle;
169411ccc910SNamjae Jeon 
169511ccc910SNamjae Jeon 		/* split @i_rl[0] and create @dst_rl_split */
169611ccc910SNamjae Jeon 		dst_rl_split.vcn = i_rl[0].vcn;
169711ccc910SNamjae Jeon 		dst_rl_split.length = start_vcn - i_rl[0].vcn;
169811ccc910SNamjae Jeon 		dst_rl_split.lcn = i_rl[0].lcn;
169911ccc910SNamjae Jeon 
170011ccc910SNamjae Jeon 		i_rl[0].vcn = start_vcn;
170111ccc910SNamjae Jeon 		i_rl[0].length -= dst_rl_split.length;
170211ccc910SNamjae Jeon 		i_rl[0].lcn += dst_rl_split.length;
170311ccc910SNamjae Jeon 	} else {
170411ccc910SNamjae Jeon 		struct runlist_element *dst_rle, *src_rle;
170511ccc910SNamjae Jeon merge_src_rle:
170611ccc910SNamjae Jeon 
170711ccc910SNamjae Jeon 		/* not split @i_rl[0] */
170811ccc910SNamjae Jeon 		dst_rl_split.lcn = LCN_ENOENT;
170911ccc910SNamjae Jeon 
171011ccc910SNamjae Jeon 		/* merge @src_rl's first run and @i_rl[0]'s left run if possible */
171111ccc910SNamjae Jeon 		dst_rle = &dst_rl[new_1st_cnt - 1];
171211ccc910SNamjae Jeon 		src_rle = &src_rl[0];
171311ccc910SNamjae Jeon 		if (new_1st_cnt > 0 && ntfs_rle_lcn_contiguous(dst_rle, src_rle)) {
171411ccc910SNamjae Jeon 			WARN_ON(dst_rle->vcn + dst_rle->length != src_rle->vcn);
171511ccc910SNamjae Jeon 			dst_rle->length += src_rle->length;
171611ccc910SNamjae Jeon 			src_rl++;
171711ccc910SNamjae Jeon 			src_cnt--;
171811ccc910SNamjae Jeon 		} else {
171911ccc910SNamjae Jeon 			/* merge @src_rl's last run and @i_rl[0]'s right if possible */
172011ccc910SNamjae Jeon 			dst_rle = &dst_rl[new_1st_cnt];
172111ccc910SNamjae Jeon 			src_rle = &src_rl[src_cnt - 1];
172211ccc910SNamjae Jeon 
172311ccc910SNamjae Jeon 			if (ntfs_rle_lcn_contiguous(dst_rle, src_rle)) {
172411ccc910SNamjae Jeon 				dst_rle->length += src_rle->length;
172511ccc910SNamjae Jeon 				src_cnt--;
172611ccc910SNamjae Jeon 			}
172711ccc910SNamjae Jeon 		}
172811ccc910SNamjae Jeon 	}
172911ccc910SNamjae Jeon 
173011ccc910SNamjae Jeon 	new_2nd_cnt = src_cnt;
173111ccc910SNamjae Jeon 	new_cnt = new_1st_cnt + new_2nd_cnt + new_3rd_cnt;
173211ccc910SNamjae Jeon 	new_cnt += dst_rl_split.lcn >= LCN_HOLE ? 1 : 0;
173311ccc910SNamjae Jeon 	new_rl = kvcalloc(new_cnt, sizeof(*new_rl), GFP_NOFS);
173411ccc910SNamjae Jeon 	if (!new_rl)
173511ccc910SNamjae Jeon 		return ERR_PTR(-ENOMEM);
173611ccc910SNamjae Jeon 
173711ccc910SNamjae Jeon 	/* Copy the @dst_rl's first half to @new_rl */
173811ccc910SNamjae Jeon 	ntfs_rl_mc(new_rl, 0, dst_rl, 0, new_1st_cnt);
173911ccc910SNamjae Jeon 	if (dst_rl_split.lcn >= LCN_HOLE) {
174011ccc910SNamjae Jeon 		ntfs_rl_mc(new_rl, new_1st_cnt, &dst_rl_split, 0, 1);
174111ccc910SNamjae Jeon 		new_1st_cnt++;
174211ccc910SNamjae Jeon 	}
174311ccc910SNamjae Jeon 	/* Copy the @src_rl to @new_rl */
174411ccc910SNamjae Jeon 	ntfs_rl_mc(new_rl, new_1st_cnt, src_rl, 0, new_2nd_cnt);
174511ccc910SNamjae Jeon 	/* Copy the @dst_rl's second half to @new_rl */
174611ccc910SNamjae Jeon 	if (new_3rd_cnt >= 1) {
174711ccc910SNamjae Jeon 		struct runlist_element *rl, *rl_3rd;
174811ccc910SNamjae Jeon 		int dst_1st_cnt = dst_rl_split.lcn >= LCN_HOLE ?
174911ccc910SNamjae Jeon 			new_1st_cnt - 1 : new_1st_cnt;
175011ccc910SNamjae Jeon 
175111ccc910SNamjae Jeon 		ntfs_rl_mc(new_rl, new_1st_cnt + new_2nd_cnt,
175211ccc910SNamjae Jeon 			   dst_rl, dst_1st_cnt, new_3rd_cnt);
175311ccc910SNamjae Jeon 		/* Update vcn of the @dst_rl's second half runs to reflect
175411ccc910SNamjae Jeon 		 * appended @src_rl.
175511ccc910SNamjae Jeon 		 */
175611ccc910SNamjae Jeon 		if (new_1st_cnt + new_2nd_cnt == 0) {
175711ccc910SNamjae Jeon 			rl_3rd = &new_rl[new_1st_cnt + new_2nd_cnt + 1];
175811ccc910SNamjae Jeon 			rl = &new_rl[new_1st_cnt + new_2nd_cnt];
175911ccc910SNamjae Jeon 		} else {
176011ccc910SNamjae Jeon 			rl_3rd = &new_rl[new_1st_cnt + new_2nd_cnt];
176111ccc910SNamjae Jeon 			rl = &new_rl[new_1st_cnt + new_2nd_cnt - 1];
176211ccc910SNamjae Jeon 		}
176311ccc910SNamjae Jeon 		do {
176411ccc910SNamjae Jeon 			rl_3rd->vcn = rl->vcn + rl->length;
176511ccc910SNamjae Jeon 			if (rl_3rd->length <= 0)
176611ccc910SNamjae Jeon 				break;
176711ccc910SNamjae Jeon 			rl = rl_3rd;
176811ccc910SNamjae Jeon 			rl_3rd++;
176911ccc910SNamjae Jeon 		} while (1);
177011ccc910SNamjae Jeon 	}
177111ccc910SNamjae Jeon 	*new_rl_cnt = new_1st_cnt + new_2nd_cnt + new_3rd_cnt;
177211ccc910SNamjae Jeon 
177311ccc910SNamjae Jeon 	kvfree(dst_rl);
177411ccc910SNamjae Jeon 	kvfree(src_rl_origin);
177511ccc910SNamjae Jeon 	return new_rl;
177611ccc910SNamjae Jeon }
177711ccc910SNamjae Jeon 
177811ccc910SNamjae Jeon struct runlist_element *ntfs_rl_punch_hole(struct runlist_element *dst_rl, int dst_cnt,
177911ccc910SNamjae Jeon 				    s64 start_vcn, s64 len,
178011ccc910SNamjae Jeon 				    struct runlist_element **punch_rl,
178111ccc910SNamjae Jeon 				    size_t *new_rl_cnt)
178211ccc910SNamjae Jeon {
178311ccc910SNamjae Jeon 	struct runlist_element *s_rl, *e_rl, *new_rl, *dst_3rd_rl, hole_rl[1];
178411ccc910SNamjae Jeon 	s64 end_vcn;
178511ccc910SNamjae Jeon 	int new_1st_cnt, dst_3rd_cnt, new_cnt, punch_cnt, merge_cnt;
178611ccc910SNamjae Jeon 	bool begin_split, end_split, one_split_3;
178711ccc910SNamjae Jeon 
178811ccc910SNamjae Jeon 	if (dst_cnt < 2 ||
178911ccc910SNamjae Jeon 	    !(dst_rl[dst_cnt - 1].lcn == LCN_ENOENT &&
179011ccc910SNamjae Jeon 	      dst_rl[dst_cnt - 1].length == 0))
179111ccc910SNamjae Jeon 		return ERR_PTR(-EINVAL);
179211ccc910SNamjae Jeon 
179311ccc910SNamjae Jeon 	end_vcn = min(start_vcn + len - 1,
179411ccc910SNamjae Jeon 		      dst_rl[dst_cnt - 2].vcn + dst_rl[dst_cnt - 2].length - 1);
179511ccc910SNamjae Jeon 
179611ccc910SNamjae Jeon 	s_rl = ntfs_rl_find_vcn_nolock(dst_rl, start_vcn);
179711ccc910SNamjae Jeon 	if (!s_rl ||
179811ccc910SNamjae Jeon 	    s_rl->lcn <= LCN_ENOENT ||
179911ccc910SNamjae Jeon 	    !ntfs_rle_contain(s_rl, start_vcn))
180011ccc910SNamjae Jeon 		return ERR_PTR(-EINVAL);
180111ccc910SNamjae Jeon 
180211ccc910SNamjae Jeon 	begin_split = s_rl->vcn != start_vcn ? true : false;
180311ccc910SNamjae Jeon 
180411ccc910SNamjae Jeon 	e_rl = ntfs_rl_find_vcn_nolock(dst_rl, end_vcn);
180511ccc910SNamjae Jeon 	if (!e_rl ||
180611ccc910SNamjae Jeon 	    e_rl->lcn <= LCN_ENOENT ||
180711ccc910SNamjae Jeon 	    !ntfs_rle_contain(e_rl, end_vcn))
180811ccc910SNamjae Jeon 		return ERR_PTR(-EINVAL);
180911ccc910SNamjae Jeon 
181011ccc910SNamjae Jeon 	end_split = e_rl->vcn + e_rl->length - 1 != end_vcn ? true : false;
181111ccc910SNamjae Jeon 
181211ccc910SNamjae Jeon 	/* @s_rl has to be split into left, punched hole, and right */
181311ccc910SNamjae Jeon 	one_split_3 = e_rl == s_rl && begin_split && end_split ? true : false;
181411ccc910SNamjae Jeon 
181511ccc910SNamjae Jeon 	punch_cnt = (int)(e_rl - s_rl) + 1;
181611ccc910SNamjae Jeon 
181711ccc910SNamjae Jeon 	*punch_rl = kvcalloc(punch_cnt + 1, sizeof(struct runlist_element),
181811ccc910SNamjae Jeon 			GFP_NOFS);
181911ccc910SNamjae Jeon 	if (!*punch_rl)
182011ccc910SNamjae Jeon 		return ERR_PTR(-ENOMEM);
182111ccc910SNamjae Jeon 
182211ccc910SNamjae Jeon 	new_cnt = dst_cnt - (int)(e_rl - s_rl + 1) + 3;
182311ccc910SNamjae Jeon 	new_rl = kvcalloc(new_cnt, sizeof(struct runlist_element), GFP_NOFS);
182411ccc910SNamjae Jeon 	if (!new_rl) {
182511ccc910SNamjae Jeon 		kvfree(*punch_rl);
182611ccc910SNamjae Jeon 		*punch_rl = NULL;
182711ccc910SNamjae Jeon 		return ERR_PTR(-ENOMEM);
182811ccc910SNamjae Jeon 	}
182911ccc910SNamjae Jeon 
183011ccc910SNamjae Jeon 	new_1st_cnt = (int)(s_rl - dst_rl) + 1;
183111ccc910SNamjae Jeon 	ntfs_rl_mc(*punch_rl, 0, dst_rl, new_1st_cnt - 1, punch_cnt);
183211ccc910SNamjae Jeon 
183311ccc910SNamjae Jeon 	(*punch_rl)[punch_cnt].lcn = LCN_ENOENT;
183411ccc910SNamjae Jeon 	(*punch_rl)[punch_cnt].length = 0;
183511ccc910SNamjae Jeon 
183611ccc910SNamjae Jeon 	if (!begin_split)
183711ccc910SNamjae Jeon 		new_1st_cnt--;
183811ccc910SNamjae Jeon 	dst_3rd_rl = e_rl;
183911ccc910SNamjae Jeon 	dst_3rd_cnt = (int)(&dst_rl[dst_cnt - 1] - e_rl) + 1;
184011ccc910SNamjae Jeon 	if (!end_split) {
184111ccc910SNamjae Jeon 		dst_3rd_rl++;
184211ccc910SNamjae Jeon 		dst_3rd_cnt--;
184311ccc910SNamjae Jeon 	}
184411ccc910SNamjae Jeon 
184511ccc910SNamjae Jeon 	/* Copy the 1st part of @dst_rl into @new_rl */
184611ccc910SNamjae Jeon 	ntfs_rl_mc(new_rl, 0, dst_rl, 0, new_1st_cnt);
184711ccc910SNamjae Jeon 	if (begin_split) {
184811ccc910SNamjae Jeon 		/* the @e_rl has to be splited and copied into the last of @new_rl
184911ccc910SNamjae Jeon 		 * and the first of @punch_rl
185011ccc910SNamjae Jeon 		 */
185111ccc910SNamjae Jeon 		s64 first_cnt = start_vcn - dst_rl[new_1st_cnt - 1].vcn;
185211ccc910SNamjae Jeon 
185311ccc910SNamjae Jeon 		if (new_1st_cnt)
185411ccc910SNamjae Jeon 			new_rl[new_1st_cnt - 1].length = first_cnt;
185511ccc910SNamjae Jeon 
185611ccc910SNamjae Jeon 		(*punch_rl)[0].vcn = start_vcn;
185711ccc910SNamjae Jeon 		(*punch_rl)[0].length -= first_cnt;
185811ccc910SNamjae Jeon 		if ((*punch_rl)[0].lcn > LCN_HOLE)
185911ccc910SNamjae Jeon 			(*punch_rl)[0].lcn += first_cnt;
186011ccc910SNamjae Jeon 	}
186111ccc910SNamjae Jeon 
186211ccc910SNamjae Jeon 	/* Copy a hole into @new_rl */
186311ccc910SNamjae Jeon 	hole_rl[0].vcn = start_vcn;
186411ccc910SNamjae Jeon 	hole_rl[0].length = (s64)len;
186511ccc910SNamjae Jeon 	hole_rl[0].lcn = LCN_HOLE;
186611ccc910SNamjae Jeon 	ntfs_rl_mc(new_rl, new_1st_cnt, hole_rl, 0, 1);
186711ccc910SNamjae Jeon 
186811ccc910SNamjae Jeon 	/* Copy the 3rd part of @dst_rl into @new_rl */
186911ccc910SNamjae Jeon 	ntfs_rl_mc(new_rl, new_1st_cnt + 1, dst_3rd_rl, 0, dst_3rd_cnt);
187011ccc910SNamjae Jeon 	if (end_split) {
187111ccc910SNamjae Jeon 		/* the @e_rl has to be splited and copied into the first of
187211ccc910SNamjae Jeon 		 * @new_rl and the last of @punch_rl
187311ccc910SNamjae Jeon 		 */
187411ccc910SNamjae Jeon 		s64 first_cnt = end_vcn - dst_3rd_rl[0].vcn + 1;
187511ccc910SNamjae Jeon 
187611ccc910SNamjae Jeon 		new_rl[new_1st_cnt + 1].vcn = end_vcn + 1;
187711ccc910SNamjae Jeon 		new_rl[new_1st_cnt + 1].length -= first_cnt;
187811ccc910SNamjae Jeon 		if (new_rl[new_1st_cnt + 1].lcn > LCN_HOLE)
187911ccc910SNamjae Jeon 			new_rl[new_1st_cnt + 1].lcn += first_cnt;
188011ccc910SNamjae Jeon 
188111ccc910SNamjae Jeon 		if (one_split_3)
188211ccc910SNamjae Jeon 			(*punch_rl)[punch_cnt - 1].length -=
188311ccc910SNamjae Jeon 				new_rl[new_1st_cnt + 1].length;
188411ccc910SNamjae Jeon 		else
188511ccc910SNamjae Jeon 			(*punch_rl)[punch_cnt - 1].length = first_cnt;
188611ccc910SNamjae Jeon 	}
188711ccc910SNamjae Jeon 
188811ccc910SNamjae Jeon 	/* Merge left and hole, or hole and right in @new_rl, if left or right
188911ccc910SNamjae Jeon 	 * consists of holes.
189011ccc910SNamjae Jeon 	 */
189111ccc910SNamjae Jeon 	merge_cnt = 0;
189211ccc910SNamjae Jeon 	if (new_1st_cnt > 0 && new_rl[new_1st_cnt - 1].lcn == LCN_HOLE) {
189311ccc910SNamjae Jeon 		/* Merge right and hole */
189411ccc910SNamjae Jeon 		s_rl =  &new_rl[new_1st_cnt - 1];
189511ccc910SNamjae Jeon 		s_rl->length += s_rl[1].length;
189611ccc910SNamjae Jeon 		merge_cnt = 1;
189711ccc910SNamjae Jeon 		/* Merge left and right */
189811ccc910SNamjae Jeon 		if (new_1st_cnt + 1 < new_cnt &&
189911ccc910SNamjae Jeon 		    new_rl[new_1st_cnt + 1].lcn == LCN_HOLE) {
190011ccc910SNamjae Jeon 			s_rl->length += s_rl[2].length;
190111ccc910SNamjae Jeon 			merge_cnt++;
190211ccc910SNamjae Jeon 		}
190311ccc910SNamjae Jeon 	} else if (new_1st_cnt + 1 < new_cnt &&
190411ccc910SNamjae Jeon 		   new_rl[new_1st_cnt + 1].lcn == LCN_HOLE) {
190511ccc910SNamjae Jeon 		/* Merge left and hole */
190611ccc910SNamjae Jeon 		s_rl = &new_rl[new_1st_cnt];
190711ccc910SNamjae Jeon 		s_rl->length += s_rl[1].length;
190811ccc910SNamjae Jeon 		merge_cnt = 1;
190911ccc910SNamjae Jeon 	}
191011ccc910SNamjae Jeon 	if (merge_cnt) {
191111ccc910SNamjae Jeon 		struct runlist_element *d_rl, *src_rl;
191211ccc910SNamjae Jeon 
191311ccc910SNamjae Jeon 		d_rl = s_rl + 1;
191411ccc910SNamjae Jeon 		src_rl = s_rl + 1 + merge_cnt;
191511ccc910SNamjae Jeon 		ntfs_rl_mm(new_rl, (int)(d_rl - new_rl), (int)(src_rl - new_rl),
191611ccc910SNamjae Jeon 			   (int)(&new_rl[new_cnt - 1] - src_rl) + 1);
191711ccc910SNamjae Jeon 	}
191811ccc910SNamjae Jeon 
191911ccc910SNamjae Jeon 	(*punch_rl)[punch_cnt].vcn = (*punch_rl)[punch_cnt - 1].vcn +
192011ccc910SNamjae Jeon 		(*punch_rl)[punch_cnt - 1].length;
192111ccc910SNamjae Jeon 
192211ccc910SNamjae Jeon 	/* punch_cnt elements of dst are replaced with one hole */
192311ccc910SNamjae Jeon 	*new_rl_cnt = dst_cnt - (punch_cnt - (int)begin_split - (int)end_split) +
192411ccc910SNamjae Jeon 		1 - merge_cnt;
192511ccc910SNamjae Jeon 	kvfree(dst_rl);
192611ccc910SNamjae Jeon 	return new_rl;
192711ccc910SNamjae Jeon }
192811ccc910SNamjae Jeon 
192911ccc910SNamjae Jeon struct runlist_element *ntfs_rl_collapse_range(struct runlist_element *dst_rl, int dst_cnt,
193011ccc910SNamjae Jeon 					s64 start_vcn, s64 len,
193111ccc910SNamjae Jeon 					struct runlist_element **punch_rl,
193211ccc910SNamjae Jeon 					size_t *new_rl_cnt)
193311ccc910SNamjae Jeon {
193411ccc910SNamjae Jeon 	struct runlist_element *s_rl, *e_rl, *new_rl, *dst_3rd_rl;
193511ccc910SNamjae Jeon 	s64 end_vcn;
193611ccc910SNamjae Jeon 	int new_1st_cnt, dst_3rd_cnt, new_cnt, punch_cnt, merge_cnt, i;
193711ccc910SNamjae Jeon 	bool begin_split, end_split, one_split_3;
193811ccc910SNamjae Jeon 
193911ccc910SNamjae Jeon 	if (dst_cnt < 2 ||
194011ccc910SNamjae Jeon 	    !(dst_rl[dst_cnt - 1].lcn == LCN_ENOENT &&
194111ccc910SNamjae Jeon 	      dst_rl[dst_cnt - 1].length == 0))
194211ccc910SNamjae Jeon 		return ERR_PTR(-EINVAL);
194311ccc910SNamjae Jeon 
194411ccc910SNamjae Jeon 	end_vcn = min(start_vcn + len - 1,
194511ccc910SNamjae Jeon 			dst_rl[dst_cnt - 1].vcn - 1);
194611ccc910SNamjae Jeon 
194711ccc910SNamjae Jeon 	s_rl = ntfs_rl_find_vcn_nolock(dst_rl, start_vcn);
194811ccc910SNamjae Jeon 	if (!s_rl ||
194911ccc910SNamjae Jeon 	    s_rl->lcn <= LCN_ENOENT ||
195011ccc910SNamjae Jeon 	    !ntfs_rle_contain(s_rl, start_vcn))
195111ccc910SNamjae Jeon 		return ERR_PTR(-EINVAL);
195211ccc910SNamjae Jeon 
195311ccc910SNamjae Jeon 	begin_split = s_rl->vcn != start_vcn ? true : false;
195411ccc910SNamjae Jeon 
195511ccc910SNamjae Jeon 	e_rl = ntfs_rl_find_vcn_nolock(dst_rl, end_vcn);
195611ccc910SNamjae Jeon 	if (!e_rl ||
195711ccc910SNamjae Jeon 	    e_rl->lcn <= LCN_ENOENT ||
195811ccc910SNamjae Jeon 	    !ntfs_rle_contain(e_rl, end_vcn))
195911ccc910SNamjae Jeon 		return ERR_PTR(-EINVAL);
196011ccc910SNamjae Jeon 
196111ccc910SNamjae Jeon 	end_split = e_rl->vcn + e_rl->length - 1 != end_vcn ? true : false;
196211ccc910SNamjae Jeon 
196311ccc910SNamjae Jeon 	/* @s_rl has to be split into left, collapsed, and right */
196411ccc910SNamjae Jeon 	one_split_3 = e_rl == s_rl && begin_split && end_split ? true : false;
196511ccc910SNamjae Jeon 
196611ccc910SNamjae Jeon 	punch_cnt = (int)(e_rl - s_rl) + 1;
196711ccc910SNamjae Jeon 	*punch_rl = kvcalloc(punch_cnt + 1, sizeof(struct runlist_element),
196811ccc910SNamjae Jeon 			GFP_NOFS);
196911ccc910SNamjae Jeon 	if (!*punch_rl)
197011ccc910SNamjae Jeon 		return ERR_PTR(-ENOMEM);
197111ccc910SNamjae Jeon 
197211ccc910SNamjae Jeon 	new_cnt = dst_cnt - (int)(e_rl - s_rl + 1) + 3;
197311ccc910SNamjae Jeon 	new_rl = kvcalloc(new_cnt, sizeof(struct runlist_element), GFP_NOFS);
197411ccc910SNamjae Jeon 	if (!new_rl) {
197511ccc910SNamjae Jeon 		kvfree(*punch_rl);
197611ccc910SNamjae Jeon 		*punch_rl = NULL;
197711ccc910SNamjae Jeon 		return ERR_PTR(-ENOMEM);
197811ccc910SNamjae Jeon 	}
197911ccc910SNamjae Jeon 
198011ccc910SNamjae Jeon 	new_1st_cnt = (int)(s_rl - dst_rl) + 1;
198111ccc910SNamjae Jeon 	ntfs_rl_mc(*punch_rl, 0, dst_rl, new_1st_cnt - 1, punch_cnt);
198211ccc910SNamjae Jeon 	(*punch_rl)[punch_cnt].lcn = LCN_ENOENT;
198311ccc910SNamjae Jeon 	(*punch_rl)[punch_cnt].length = 0;
198411ccc910SNamjae Jeon 
198511ccc910SNamjae Jeon 	if (!begin_split)
198611ccc910SNamjae Jeon 		new_1st_cnt--;
198711ccc910SNamjae Jeon 	dst_3rd_rl = e_rl;
198811ccc910SNamjae Jeon 	dst_3rd_cnt = (int)(&dst_rl[dst_cnt - 1] - e_rl) + 1;
198911ccc910SNamjae Jeon 	if (!end_split) {
199011ccc910SNamjae Jeon 		dst_3rd_rl++;
199111ccc910SNamjae Jeon 		dst_3rd_cnt--;
199211ccc910SNamjae Jeon 	}
199311ccc910SNamjae Jeon 
199411ccc910SNamjae Jeon 	/* Copy the 1st part of @dst_rl into @new_rl */
199511ccc910SNamjae Jeon 	ntfs_rl_mc(new_rl, 0, dst_rl, 0, new_1st_cnt);
199611ccc910SNamjae Jeon 	if (begin_split) {
199711ccc910SNamjae Jeon 		/* the @e_rl has to be splited and copied into the last of @new_rl
199811ccc910SNamjae Jeon 		 * and the first of @punch_rl
199911ccc910SNamjae Jeon 		 */
200011ccc910SNamjae Jeon 		s64 first_cnt = start_vcn - dst_rl[new_1st_cnt - 1].vcn;
200111ccc910SNamjae Jeon 
200211ccc910SNamjae Jeon 		new_rl[new_1st_cnt - 1].length = first_cnt;
200311ccc910SNamjae Jeon 
200411ccc910SNamjae Jeon 		(*punch_rl)[0].vcn = start_vcn;
200511ccc910SNamjae Jeon 		(*punch_rl)[0].length -= first_cnt;
200611ccc910SNamjae Jeon 		if ((*punch_rl)[0].lcn > LCN_HOLE)
200711ccc910SNamjae Jeon 			(*punch_rl)[0].lcn += first_cnt;
200811ccc910SNamjae Jeon 	}
200911ccc910SNamjae Jeon 
201011ccc910SNamjae Jeon 	/* Copy the 3rd part of @dst_rl into @new_rl */
201111ccc910SNamjae Jeon 	ntfs_rl_mc(new_rl, new_1st_cnt, dst_3rd_rl, 0, dst_3rd_cnt);
201211ccc910SNamjae Jeon 	if (end_split) {
201311ccc910SNamjae Jeon 		/* the @e_rl has to be splited and copied into the first of
201411ccc910SNamjae Jeon 		 * @new_rl and the last of @punch_rl
201511ccc910SNamjae Jeon 		 */
201611ccc910SNamjae Jeon 		s64 first_cnt = end_vcn - dst_3rd_rl[0].vcn + 1;
201711ccc910SNamjae Jeon 
201811ccc910SNamjae Jeon 		new_rl[new_1st_cnt].vcn = end_vcn + 1;
201911ccc910SNamjae Jeon 		new_rl[new_1st_cnt].length -= first_cnt;
202011ccc910SNamjae Jeon 		if (new_rl[new_1st_cnt].lcn > LCN_HOLE)
202111ccc910SNamjae Jeon 			new_rl[new_1st_cnt].lcn += first_cnt;
202211ccc910SNamjae Jeon 
202311ccc910SNamjae Jeon 		if (one_split_3)
202411ccc910SNamjae Jeon 			(*punch_rl)[punch_cnt - 1].length -=
202511ccc910SNamjae Jeon 				new_rl[new_1st_cnt].length;
202611ccc910SNamjae Jeon 		else
202711ccc910SNamjae Jeon 			(*punch_rl)[punch_cnt - 1].length = first_cnt;
202811ccc910SNamjae Jeon 	}
202911ccc910SNamjae Jeon 
203011ccc910SNamjae Jeon 	/* Adjust vcn */
203111ccc910SNamjae Jeon 	if (new_1st_cnt == 0)
203211ccc910SNamjae Jeon 		new_rl[new_1st_cnt].vcn = 0;
203311ccc910SNamjae Jeon 	for (i = new_1st_cnt == 0 ? 1 : new_1st_cnt; new_rl[i].length; i++)
203411ccc910SNamjae Jeon 		new_rl[i].vcn = new_rl[i - 1].vcn + new_rl[i - 1].length;
203511ccc910SNamjae Jeon 	new_rl[i].vcn = new_rl[i - 1].vcn + new_rl[i - 1].length;
203611ccc910SNamjae Jeon 
203711ccc910SNamjae Jeon 	/* Merge left and hole, or hole and right in @new_rl, if left or right
203811ccc910SNamjae Jeon 	 * consists of holes.
203911ccc910SNamjae Jeon 	 */
204011ccc910SNamjae Jeon 	merge_cnt = 0;
204111ccc910SNamjae Jeon 	i = new_1st_cnt == 0 ? 1 : new_1st_cnt;
204211ccc910SNamjae Jeon 	if (ntfs_rle_lcn_contiguous(&new_rl[i - 1], &new_rl[i])) {
204311ccc910SNamjae Jeon 		/* Merge right and left */
204411ccc910SNamjae Jeon 		s_rl =  &new_rl[new_1st_cnt - 1];
204511ccc910SNamjae Jeon 		s_rl->length += s_rl[1].length;
204611ccc910SNamjae Jeon 		merge_cnt = 1;
204711ccc910SNamjae Jeon 	}
204811ccc910SNamjae Jeon 	if (merge_cnt) {
204911ccc910SNamjae Jeon 		struct runlist_element *d_rl, *src_rl;
205011ccc910SNamjae Jeon 
205111ccc910SNamjae Jeon 		d_rl = s_rl + 1;
205211ccc910SNamjae Jeon 		src_rl = s_rl + 1 + merge_cnt;
205311ccc910SNamjae Jeon 		ntfs_rl_mm(new_rl, (int)(d_rl - new_rl), (int)(src_rl - new_rl),
205411ccc910SNamjae Jeon 			   (int)(&new_rl[new_cnt - 1] - src_rl) + 1);
205511ccc910SNamjae Jeon 	}
205611ccc910SNamjae Jeon 
205711ccc910SNamjae Jeon 	(*punch_rl)[punch_cnt].vcn = (*punch_rl)[punch_cnt - 1].vcn +
205811ccc910SNamjae Jeon 		(*punch_rl)[punch_cnt - 1].length;
205911ccc910SNamjae Jeon 
206011ccc910SNamjae Jeon 	/* punch_cnt elements of dst are extracted */
206111ccc910SNamjae Jeon 	*new_rl_cnt = dst_cnt - (punch_cnt - (int)begin_split - (int)end_split) -
206211ccc910SNamjae Jeon 		merge_cnt;
206311ccc910SNamjae Jeon 
206411ccc910SNamjae Jeon 	kvfree(dst_rl);
206511ccc910SNamjae Jeon 	return new_rl;
206611ccc910SNamjae Jeon }
2067