1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NTFS runlist handling code. 4 * 5 * Copyright (c) 2001-2007 Anton Altaparmakov 6 * Copyright (c) 2002-2005 Richard Russon 7 * Copyright (c) 2025 LG Electronics Co., Ltd. 8 * 9 * Part of this file is based on code from the NTFS-3G. 10 * and is copyrighted by the respective authors below: 11 * Copyright (c) 2002-2005 Anton Altaparmakov 12 * Copyright (c) 2002-2005 Richard Russon 13 * Copyright (c) 2002-2008 Szabolcs Szakacsits 14 * Copyright (c) 2004 Yura Pakhuchiy 15 * Copyright (c) 2007-2022 Jean-Pierre Andre 16 */ 17 18 #include "ntfs.h" 19 #include "attrib.h" 20 21 /* 22 * ntfs_rl_mm - runlist memmove 23 * @base: base runlist array 24 * @dst: destination index in @base 25 * @src: source index in @base 26 * @size: number of elements to move 27 * 28 * It is up to the caller to serialize access to the runlist @base. 29 */ 30 static inline void ntfs_rl_mm(struct runlist_element *base, int dst, int src, int size) 31 { 32 if (likely((dst != src) && (size > 0))) 33 memmove(base + dst, base + src, size * sizeof(*base)); 34 } 35 36 /* 37 * ntfs_rl_mc - runlist memory copy 38 * @dstbase: destination runlist array 39 * @dst: destination index in @dstbase 40 * @srcbase: source runlist array 41 * @src: source index in @srcbase 42 * @size: number of elements to copy 43 * 44 * It is up to the caller to serialize access to the runlists @dstbase and 45 * @srcbase. 46 */ 47 static inline void ntfs_rl_mc(struct runlist_element *dstbase, int dst, 48 struct runlist_element *srcbase, int src, int size) 49 { 50 if (likely(size > 0)) 51 memcpy(dstbase + dst, srcbase + src, size * sizeof(*dstbase)); 52 } 53 54 /* 55 * ntfs_rl_realloc - Reallocate memory for runlists 56 * @rl: original runlist 57 * @old_size: number of runlist elements in the original runlist @rl 58 * @new_size: number of runlist elements we need space for 59 * 60 * As the runlists grow, more memory will be required. To prevent the 61 * kernel having to allocate and reallocate large numbers of small bits of 62 * memory, this function returns an entire page of memory. 63 * 64 * It is up to the caller to serialize access to the runlist @rl. 65 * 66 * N.B. If the new allocation doesn't require a different number of pages in 67 * memory, the function will return the original pointer. 68 * 69 * On success, return a pointer to the newly allocated, or recycled, memory. 70 * On error, return -errno. 71 */ 72 struct runlist_element *ntfs_rl_realloc(struct runlist_element *rl, 73 int old_size, int new_size) 74 { 75 struct runlist_element *new_rl; 76 77 old_size = old_size * sizeof(*rl); 78 new_size = new_size * sizeof(*rl); 79 if (old_size == new_size) 80 return rl; 81 82 new_rl = kvzalloc(new_size, GFP_NOFS); 83 if (unlikely(!new_rl)) 84 return ERR_PTR(-ENOMEM); 85 86 if (likely(rl != NULL)) { 87 if (unlikely(old_size > new_size)) 88 old_size = new_size; 89 memcpy(new_rl, rl, old_size); 90 kvfree(rl); 91 } 92 return new_rl; 93 } 94 95 /* 96 * ntfs_rl_realloc_nofail - Reallocate memory for runlists 97 * @rl: original runlist 98 * @old_size: number of runlist elements in the original runlist @rl 99 * @new_size: number of runlist elements we need space for 100 * 101 * As the runlists grow, more memory will be required. To prevent the 102 * kernel having to allocate and reallocate large numbers of small bits of 103 * memory, this function returns an entire page of memory. 104 * 105 * This function guarantees that the allocation will succeed. It will sleep 106 * for as long as it takes to complete the allocation. 107 * 108 * It is up to the caller to serialize access to the runlist @rl. 109 * 110 * N.B. If the new allocation doesn't require a different number of pages in 111 * memory, the function will return the original pointer. 112 * 113 * On success, return a pointer to the newly allocated, or recycled, memory. 114 * On error, return -errno. 115 */ 116 static inline struct runlist_element *ntfs_rl_realloc_nofail(struct runlist_element *rl, 117 int old_size, int new_size) 118 { 119 struct runlist_element *new_rl; 120 121 old_size = old_size * sizeof(*rl); 122 new_size = new_size * sizeof(*rl); 123 if (old_size == new_size) 124 return rl; 125 126 new_rl = kvmalloc(new_size, GFP_NOFS | __GFP_NOFAIL); 127 if (likely(rl != NULL)) { 128 if (unlikely(old_size > new_size)) 129 old_size = new_size; 130 memcpy(new_rl, rl, old_size); 131 kvfree(rl); 132 } 133 return new_rl; 134 } 135 136 /* 137 * ntfs_are_rl_mergeable - test if two runlists can be joined together 138 * @dst: original runlist 139 * @src: new runlist to test for mergeability with @dst 140 * 141 * Test if two runlists can be joined together. For this, their VCNs and LCNs 142 * must be adjacent. 143 * 144 * It is up to the caller to serialize access to the runlists @dst and @src. 145 * 146 * Return: true Success, the runlists can be merged. 147 * false Failure, the runlists cannot be merged. 148 */ 149 static inline bool ntfs_are_rl_mergeable(struct runlist_element *dst, 150 struct runlist_element *src) 151 { 152 /* We can merge unmapped regions even if they are misaligned. */ 153 if ((dst->lcn == LCN_RL_NOT_MAPPED) && (src->lcn == LCN_RL_NOT_MAPPED)) 154 return true; 155 /* If the runs are misaligned, we cannot merge them. */ 156 if ((dst->vcn + dst->length) != src->vcn) 157 return false; 158 /* If both runs are non-sparse and contiguous, we can merge them. */ 159 if ((dst->lcn >= 0) && (src->lcn >= 0) && 160 ((dst->lcn + dst->length) == src->lcn)) 161 return true; 162 /* If we are merging two holes, we can merge them. */ 163 if ((dst->lcn == LCN_HOLE) && (src->lcn == LCN_HOLE)) 164 return true; 165 /* If we are merging two dealloc, we can merge them. */ 166 if ((dst->lcn == LCN_DELALLOC) && (src->lcn == LCN_DELALLOC)) 167 return true; 168 /* Cannot merge. */ 169 return false; 170 } 171 172 /* 173 * __ntfs_rl_merge - merge two runlists without testing if they can be merged 174 * @dst: original, destination runlist 175 * @src: new runlist to merge with @dst 176 * 177 * Merge the two runlists, writing into the destination runlist @dst. The 178 * caller must make sure the runlists can be merged or this will corrupt the 179 * destination runlist. 180 * 181 * It is up to the caller to serialize access to the runlists @dst and @src. 182 */ 183 static inline void __ntfs_rl_merge(struct runlist_element *dst, struct runlist_element *src) 184 { 185 dst->length += src->length; 186 } 187 188 /* 189 * ntfs_rl_append - append a runlist after a given element 190 * @dst: destination runlist to append to 191 * @dsize: number of elements in @dst 192 * @src: source runlist to append from 193 * @ssize: number of elements in @src 194 * @loc: index in @dst after which to append @src 195 * @new_size: on success, set to the new combined size 196 * 197 * Append the runlist @src after element @loc in @dst. Merge the right end of 198 * the new runlist, if necessary. Adjust the size of the hole before the 199 * appended runlist. 200 * 201 * It is up to the caller to serialize access to the runlists @dst and @src. 202 * 203 * On success, return a pointer to the new, combined, runlist. Note, both 204 * runlists @dst and @src are deallocated before returning so you cannot use 205 * the pointers for anything any more. (Strictly speaking the returned runlist 206 * may be the same as @dst but this is irrelevant.) 207 * 208 * On error, return -errno. Both runlists are left unmodified. 209 */ 210 static inline struct runlist_element *ntfs_rl_append(struct runlist_element *dst, 211 int dsize, struct runlist_element *src, int ssize, int loc, 212 size_t *new_size) 213 { 214 bool right = false; /* Right end of @src needs merging. */ 215 int marker; /* End of the inserted runs. */ 216 217 /* First, check if the right hand end needs merging. */ 218 if ((loc + 1) < dsize) 219 right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); 220 221 /* Space required: @dst size + @src size, less one if we merged. */ 222 dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - right); 223 if (IS_ERR(dst)) 224 return dst; 225 226 *new_size = dsize + ssize - right; 227 /* 228 * We are guaranteed to succeed from here so can start modifying the 229 * original runlists. 230 */ 231 232 /* First, merge the right hand end, if necessary. */ 233 if (right) 234 __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); 235 236 /* First run after the @src runs that have been inserted. */ 237 marker = loc + ssize + 1; 238 239 /* Move the tail of @dst out of the way, then copy in @src. */ 240 ntfs_rl_mm(dst, marker, loc + 1 + right, dsize - (loc + 1 + right)); 241 ntfs_rl_mc(dst, loc + 1, src, 0, ssize); 242 243 /* Adjust the size of the preceding hole. */ 244 dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; 245 246 /* We may have changed the length of the file, so fix the end marker */ 247 if (dst[marker].lcn == LCN_ENOENT) 248 dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; 249 250 return dst; 251 } 252 253 /* 254 * ntfs_rl_insert - insert a runlist into another 255 * @dst: destination runlist to insert into 256 * @dsize: number of elements in @dst 257 * @src: source runlist to insert from 258 * @ssize: number of elements in @src 259 * @loc: index in @dst at which to insert @src 260 * @new_size: on success, set to the new combined size 261 * 262 * Insert the runlist @src before element @loc in the runlist @dst. Merge the 263 * left end of the new runlist, if necessary. Adjust the size of the hole 264 * after the inserted runlist. 265 * 266 * It is up to the caller to serialize access to the runlists @dst and @src. 267 * 268 * On success, return a pointer to the new, combined, runlist. Note, both 269 * runlists @dst and @src are deallocated before returning so you cannot use 270 * the pointers for anything any more. (Strictly speaking the returned runlist 271 * may be the same as @dst but this is irrelevant.) 272 * 273 * On error, return -errno. Both runlists are left unmodified. 274 */ 275 static inline struct runlist_element *ntfs_rl_insert(struct runlist_element *dst, 276 int dsize, struct runlist_element *src, int ssize, int loc, 277 size_t *new_size) 278 { 279 bool left = false; /* Left end of @src needs merging. */ 280 bool disc = false; /* Discontinuity between @dst and @src. */ 281 int marker; /* End of the inserted runs. */ 282 283 /* 284 * disc => Discontinuity between the end of @dst and the start of @src. 285 * This means we might need to insert a "not mapped" run. 286 */ 287 if (loc == 0) 288 disc = (src[0].vcn > 0); 289 else { 290 s64 merged_length; 291 292 left = ntfs_are_rl_mergeable(dst + loc - 1, src); 293 294 merged_length = dst[loc - 1].length; 295 if (left) 296 merged_length += src->length; 297 298 disc = (src[0].vcn > dst[loc - 1].vcn + merged_length); 299 } 300 /* 301 * Space required: @dst size + @src size, less one if we merged, plus 302 * one if there was a discontinuity. 303 */ 304 dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc); 305 if (IS_ERR(dst)) 306 return dst; 307 308 *new_size = dsize + ssize - left + disc; 309 /* 310 * We are guaranteed to succeed from here so can start modifying the 311 * original runlist. 312 */ 313 if (left) 314 __ntfs_rl_merge(dst + loc - 1, src); 315 /* 316 * First run after the @src runs that have been inserted. 317 * Nominally, @marker equals @loc + @ssize, i.e. location + number of 318 * runs in @src. However, if @left, then the first run in @src has 319 * been merged with one in @dst. And if @disc, then @dst and @src do 320 * not meet and we need an extra run to fill the gap. 321 */ 322 marker = loc + ssize - left + disc; 323 324 /* Move the tail of @dst out of the way, then copy in @src. */ 325 ntfs_rl_mm(dst, marker, loc, dsize - loc); 326 ntfs_rl_mc(dst, loc + disc, src, left, ssize - left); 327 328 /* Adjust the VCN of the first run after the insertion... */ 329 dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; 330 /* ... and the length. */ 331 if (dst[marker].lcn == LCN_HOLE || dst[marker].lcn == LCN_RL_NOT_MAPPED || 332 dst[marker].lcn == LCN_DELALLOC) 333 dst[marker].length = dst[marker + 1].vcn - dst[marker].vcn; 334 335 /* Writing beyond the end of the file and there is a discontinuity. */ 336 if (disc) { 337 if (loc > 0) { 338 dst[loc].vcn = dst[loc - 1].vcn + dst[loc - 1].length; 339 dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; 340 } else { 341 dst[loc].vcn = 0; 342 dst[loc].length = dst[loc + 1].vcn; 343 } 344 dst[loc].lcn = LCN_RL_NOT_MAPPED; 345 } 346 return dst; 347 } 348 349 /* 350 * ntfs_rl_replace - overwrite a runlist element with another runlist 351 * @dst: destination runlist to replace in 352 * @dsize: number of elements in @dst 353 * @src: source runlist to replace with 354 * @ssize: number of elements in @src 355 * @loc: index in @dst to replace 356 * @new_size: on success, set to the new combined size 357 * 358 * Replace the runlist element @dst at @loc with @src. Merge the left and 359 * right ends of the inserted runlist, if necessary. 360 * 361 * It is up to the caller to serialize access to the runlists @dst and @src. 362 * 363 * On success, return a pointer to the new, combined, runlist. Note, both 364 * runlists @dst and @src are deallocated before returning so you cannot use 365 * the pointers for anything any more. (Strictly speaking the returned runlist 366 * may be the same as @dst but this is irrelevant.) 367 * 368 * On error, return -errno. Both runlists are left unmodified. 369 */ 370 static inline struct runlist_element *ntfs_rl_replace(struct runlist_element *dst, 371 int dsize, struct runlist_element *src, int ssize, int loc, 372 size_t *new_size) 373 { 374 int delta; 375 bool left = false; /* Left end of @src needs merging. */ 376 bool right = false; /* Right end of @src needs merging. */ 377 int tail; /* Start of tail of @dst. */ 378 int marker; /* End of the inserted runs. */ 379 380 /* First, see if the left and right ends need merging. */ 381 if ((loc + 1) < dsize) 382 right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); 383 if (loc > 0) 384 left = ntfs_are_rl_mergeable(dst + loc - 1, src); 385 /* 386 * Allocate some space. We will need less if the left, right, or both 387 * ends get merged. The -1 accounts for the run being replaced. 388 */ 389 delta = ssize - 1 - left - right; 390 if (delta > 0) { 391 dst = ntfs_rl_realloc(dst, dsize, dsize + delta); 392 if (IS_ERR(dst)) 393 return dst; 394 } 395 396 *new_size = dsize + delta; 397 /* 398 * We are guaranteed to succeed from here so can start modifying the 399 * original runlists. 400 */ 401 402 /* First, merge the left and right ends, if necessary. */ 403 if (right) 404 __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); 405 if (left) 406 __ntfs_rl_merge(dst + loc - 1, src); 407 /* 408 * Offset of the tail of @dst. This needs to be moved out of the way 409 * to make space for the runs to be copied from @src, i.e. the first 410 * run of the tail of @dst. 411 * Nominally, @tail equals @loc + 1, i.e. location, skipping the 412 * replaced run. However, if @right, then one of @dst's runs is 413 * already merged into @src. 414 */ 415 tail = loc + right + 1; 416 /* 417 * First run after the @src runs that have been inserted, i.e. where 418 * the tail of @dst needs to be moved to. 419 * Nominally, @marker equals @loc + @ssize, i.e. location + number of 420 * runs in @src. However, if @left, then the first run in @src has 421 * been merged with one in @dst. 422 */ 423 marker = loc + ssize - left; 424 425 /* Move the tail of @dst out of the way, then copy in @src. */ 426 ntfs_rl_mm(dst, marker, tail, dsize - tail); 427 ntfs_rl_mc(dst, loc, src, left, ssize - left); 428 429 /* We may have changed the length of the file, so fix the end marker. */ 430 if (dsize - tail > 0 && dst[marker].lcn == LCN_ENOENT) 431 dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; 432 return dst; 433 } 434 435 /* 436 * ntfs_rl_split - insert a runlist into the centre of a hole 437 * @dst: destination runlist with a hole 438 * @dsize: number of elements in @dst 439 * @src: source runlist to insert 440 * @ssize: number of elements in @src 441 * @loc: index in @dst of the hole to split 442 * @new_size: on success, set to the new combined size 443 * 444 * Split the runlist @dst at @loc into two and insert @new in between the two 445 * fragments. No merging of runlists is necessary. Adjust the size of the 446 * holes either side. 447 * 448 * It is up to the caller to serialize access to the runlists @dst and @src. 449 * 450 * On success, return a pointer to the new, combined, runlist. Note, both 451 * runlists @dst and @src are deallocated before returning so you cannot use 452 * the pointers for anything any more. (Strictly speaking the returned runlist 453 * may be the same as @dst but this is irrelevant.) 454 * 455 * On error, return -errno. Both runlists are left unmodified. 456 */ 457 static inline struct runlist_element *ntfs_rl_split(struct runlist_element *dst, int dsize, 458 struct runlist_element *src, int ssize, int loc, 459 size_t *new_size) 460 { 461 /* Space required: @dst size + @src size + one new hole. */ 462 dst = ntfs_rl_realloc(dst, dsize, dsize + ssize + 1); 463 if (IS_ERR(dst)) 464 return dst; 465 466 *new_size = dsize + ssize + 1; 467 /* 468 * We are guaranteed to succeed from here so can start modifying the 469 * original runlists. 470 */ 471 472 /* Move the tail of @dst out of the way, then copy in @src. */ 473 ntfs_rl_mm(dst, loc + 1 + ssize, loc, dsize - loc); 474 ntfs_rl_mc(dst, loc + 1, src, 0, ssize); 475 476 /* Adjust the size of the holes either size of @src. */ 477 dst[loc].length = dst[loc+1].vcn - dst[loc].vcn; 478 dst[loc+ssize+1].vcn = dst[loc+ssize].vcn + dst[loc+ssize].length; 479 dst[loc+ssize+1].length = dst[loc+ssize+2].vcn - dst[loc+ssize+1].vcn; 480 481 return dst; 482 } 483 484 /* 485 * ntfs_runlists_merge - merge two runlists into one 486 * @d_runlist: destination runlist structure to merge into 487 * @srl: source runlist to merge from 488 * @s_rl_count: number of elements in @srl (0 to auto-detect) 489 * @new_rl_count: on success, set to the new combined runlist size 490 * 491 * First we sanity check the two runlists @srl and @drl to make sure that they 492 * are sensible and can be merged. The runlist @srl must be either after the 493 * runlist @drl or completely within a hole (or unmapped region) in @drl. 494 * 495 * It is up to the caller to serialize access to the runlists @drl and @srl. 496 * 497 * Merging of runlists is necessary in two cases: 498 * 1. When attribute lists are used and a further extent is being mapped. 499 * 2. When new clusters are allocated to fill a hole or extend a file. 500 * 501 * There are four possible ways @srl can be merged. It can: 502 * - be inserted at the beginning of a hole, 503 * - split the hole in two and be inserted between the two fragments, 504 * - be appended at the end of a hole, or it can 505 * - replace the whole hole. 506 * It can also be appended to the end of the runlist, which is just a variant 507 * of the insert case. 508 * 509 * On success, return a pointer to the new, combined, runlist. Note, both 510 * runlists @drl and @srl are deallocated before returning so you cannot use 511 * the pointers for anything any more. (Strictly speaking the returned runlist 512 * may be the same as @dst but this is irrelevant.) 513 * 514 * On error, return -errno. Both runlists are left unmodified. 515 */ 516 struct runlist_element *ntfs_runlists_merge(struct runlist *d_runlist, 517 struct runlist_element *srl, size_t s_rl_count, 518 size_t *new_rl_count) 519 { 520 int di, si; /* Current index into @[ds]rl. */ 521 int sstart; /* First index with lcn > LCN_RL_NOT_MAPPED. */ 522 int dins; /* Index into @drl at which to insert @srl. */ 523 int dend, send; /* Last index into @[ds]rl. */ 524 int dfinal, sfinal; /* The last index into @[ds]rl with lcn >= LCN_HOLE. */ 525 int marker = 0; 526 s64 marker_vcn = 0; 527 struct runlist_element *drl = d_runlist->rl, *rl; 528 529 #ifdef DEBUG 530 ntfs_debug("dst:"); 531 ntfs_debug_dump_runlist(drl); 532 ntfs_debug("src:"); 533 ntfs_debug_dump_runlist(srl); 534 #endif 535 536 /* Check for silly calling... */ 537 if (unlikely(!srl)) 538 return drl; 539 if (IS_ERR(srl) || IS_ERR(drl)) 540 return ERR_PTR(-EINVAL); 541 542 if (s_rl_count == 0) { 543 for (; srl[s_rl_count].length; s_rl_count++) 544 ; 545 s_rl_count++; 546 } 547 548 /* Check for the case where the first mapping is being done now. */ 549 if (unlikely(!drl)) { 550 drl = srl; 551 /* Complete the source runlist if necessary. */ 552 if (unlikely(drl[0].vcn)) { 553 /* Scan to the end of the source runlist. */ 554 drl = ntfs_rl_realloc(drl, s_rl_count, s_rl_count + 1); 555 if (IS_ERR(drl)) 556 return drl; 557 /* Insert start element at the front of the runlist. */ 558 ntfs_rl_mm(drl, 1, 0, s_rl_count); 559 drl[0].vcn = 0; 560 drl[0].lcn = LCN_RL_NOT_MAPPED; 561 drl[0].length = drl[1].vcn; 562 s_rl_count++; 563 } 564 565 *new_rl_count = s_rl_count; 566 goto finished; 567 } 568 569 if (d_runlist->count < 1 || s_rl_count < 2) 570 return ERR_PTR(-EINVAL); 571 572 si = di = 0; 573 574 /* Skip any unmapped start element(s) in the source runlist. */ 575 while (srl[si].length && srl[si].lcn < LCN_HOLE) 576 si++; 577 578 /* Can't have an entirely unmapped source runlist. */ 579 WARN_ON(!srl[si].length); 580 581 /* Record the starting points. */ 582 sstart = si; 583 584 /* 585 * Skip forward in @drl until we reach the position where @srl needs to 586 * be inserted. If we reach the end of @drl, @srl just needs to be 587 * appended to @drl. 588 */ 589 rl = __ntfs_attr_find_vcn_nolock(d_runlist, srl[sstart].vcn); 590 if (IS_ERR(rl)) 591 di = (int)d_runlist->count - 1; 592 else 593 di = (int)(rl - d_runlist->rl); 594 dins = di; 595 596 /* Sanity check for illegal overlaps. */ 597 if ((drl[di].vcn == srl[si].vcn) && (drl[di].lcn >= 0) && 598 (srl[si].lcn >= 0)) { 599 ntfs_error(NULL, "Run lists overlap. Cannot merge!"); 600 return ERR_PTR(-ERANGE); 601 } 602 603 /* Scan to the end of both runlists in order to know their sizes. */ 604 send = (int)s_rl_count - 1; 605 dend = (int)d_runlist->count - 1; 606 607 if (srl[send].lcn == LCN_ENOENT) 608 marker_vcn = srl[marker = send].vcn; 609 610 /* Scan to the last element with lcn >= LCN_HOLE. */ 611 for (sfinal = send; sfinal >= 0 && srl[sfinal].lcn < LCN_HOLE; sfinal--) 612 ; 613 for (dfinal = dend; dfinal >= 0 && drl[dfinal].lcn < LCN_HOLE; dfinal--) 614 ; 615 616 { 617 bool start; 618 bool finish; 619 int ds = dend + 1; /* Number of elements in drl & srl */ 620 int ss = sfinal - sstart + 1; 621 622 start = ((drl[dins].lcn < LCN_RL_NOT_MAPPED) || /* End of file */ 623 (drl[dins].vcn == srl[sstart].vcn)); /* Start of hole */ 624 finish = ((drl[dins].lcn >= LCN_RL_NOT_MAPPED) && /* End of file */ 625 ((drl[dins].vcn + drl[dins].length) <= /* End of hole */ 626 (srl[send - 1].vcn + srl[send - 1].length))); 627 628 /* Or we will lose an end marker. */ 629 if (finish && !drl[dins].length) 630 ss++; 631 if (marker && (drl[dins].vcn + drl[dins].length > srl[send - 1].vcn)) 632 finish = false; 633 634 if (start) { 635 if (finish) 636 drl = ntfs_rl_replace(drl, ds, srl + sstart, ss, dins, new_rl_count); 637 else 638 drl = ntfs_rl_insert(drl, ds, srl + sstart, ss, dins, new_rl_count); 639 } else { 640 if (finish) 641 drl = ntfs_rl_append(drl, ds, srl + sstart, ss, dins, new_rl_count); 642 else 643 drl = ntfs_rl_split(drl, ds, srl + sstart, ss, dins, new_rl_count); 644 } 645 if (IS_ERR(drl)) { 646 ntfs_error(NULL, "Merge failed."); 647 return drl; 648 } 649 kvfree(srl); 650 if (marker) { 651 ntfs_debug("Triggering marker code."); 652 for (ds = dend; drl[ds].length; ds++) 653 ; 654 /* We only need to care if @srl ended after @drl. */ 655 if (drl[ds].vcn <= marker_vcn) { 656 int slots = 0; 657 658 if (drl[ds].vcn == marker_vcn) { 659 ntfs_debug("Old marker = 0x%llx, replacing with LCN_ENOENT.", 660 drl[ds].lcn); 661 drl[ds].lcn = LCN_ENOENT; 662 goto finished; 663 } 664 /* 665 * We need to create an unmapped runlist element in 666 * @drl or extend an existing one before adding the 667 * ENOENT terminator. 668 */ 669 if (drl[ds].lcn == LCN_ENOENT) { 670 ds--; 671 slots = 1; 672 } 673 if (drl[ds].lcn != LCN_RL_NOT_MAPPED) { 674 /* Add an unmapped runlist element. */ 675 if (!slots) { 676 drl = ntfs_rl_realloc_nofail(drl, ds, 677 ds + 2); 678 slots = 2; 679 *new_rl_count += 2; 680 } 681 ds++; 682 /* Need to set vcn if it isn't set already. */ 683 if (slots != 1) 684 drl[ds].vcn = drl[ds - 1].vcn + 685 drl[ds - 1].length; 686 drl[ds].lcn = LCN_RL_NOT_MAPPED; 687 /* We now used up a slot. */ 688 slots--; 689 } 690 drl[ds].length = marker_vcn - drl[ds].vcn; 691 /* Finally add the ENOENT terminator. */ 692 ds++; 693 if (!slots) { 694 drl = ntfs_rl_realloc_nofail(drl, ds, ds + 1); 695 *new_rl_count += 1; 696 } 697 drl[ds].vcn = marker_vcn; 698 drl[ds].lcn = LCN_ENOENT; 699 drl[ds].length = (s64)0; 700 } 701 } 702 } 703 704 finished: 705 /* The merge was completed successfully. */ 706 ntfs_debug("Merged runlist:"); 707 ntfs_debug_dump_runlist(drl); 708 return drl; 709 } 710 711 /* 712 * ntfs_mapping_pairs_decompress - convert mapping pairs array to runlist 713 * @vol: ntfs volume 714 * @attr: attribute record whose mapping pairs to decompress 715 * @old_runlist: optional runlist to merge the decompressed runlist into 716 * @new_rl_count: on success, set to the new runlist size 717 * 718 * It is up to the caller to serialize access to the runlist @old_rl. 719 * 720 * Decompress the attribute @attr's mapping pairs array into a runlist. On 721 * success, return the decompressed runlist. 722 * 723 * If @old_rl is not NULL, decompressed runlist is inserted into the 724 * appropriate place in @old_rl and the resultant, combined runlist is 725 * returned. The original @old_rl is deallocated. 726 * 727 * On error, return -errno. @old_rl is left unmodified in that case. 728 */ 729 struct runlist_element *ntfs_mapping_pairs_decompress(const struct ntfs_volume *vol, 730 const struct attr_record *attr, struct runlist *old_runlist, 731 size_t *new_rl_count) 732 { 733 s64 vcn; /* Current vcn. */ 734 s64 lcn; /* Current lcn. */ 735 s64 deltaxcn; /* Change in [vl]cn. */ 736 struct runlist_element *rl, *new_rl; /* The output runlist. */ 737 u8 *buf; /* Current position in mapping pairs array. */ 738 u8 *attr_end; /* End of attribute. */ 739 int rlsize; /* Size of runlist buffer. */ 740 u16 rlpos; /* Current runlist position in units of struct runlist_elements. */ 741 u8 b; /* Current byte offset in buf. */ 742 743 #ifdef DEBUG 744 /* Make sure attr exists and is non-resident. */ 745 if (!attr || !attr->non_resident) { 746 ntfs_error(vol->sb, "Invalid arguments."); 747 return ERR_PTR(-EINVAL); 748 } 749 #endif 750 /* Start at vcn = lowest_vcn and lcn 0. */ 751 vcn = le64_to_cpu(attr->data.non_resident.lowest_vcn); 752 lcn = 0; 753 /* Get start of the mapping pairs array. */ 754 buf = (u8 *)attr + 755 le16_to_cpu(attr->data.non_resident.mapping_pairs_offset); 756 attr_end = (u8 *)attr + le32_to_cpu(attr->length); 757 if (unlikely(buf < (u8 *)attr || buf > attr_end)) { 758 ntfs_error(vol->sb, "Corrupt attribute."); 759 return ERR_PTR(-EIO); 760 } 761 762 /* Current position in runlist array. */ 763 rlpos = 0; 764 /* Allocate first page and set current runlist size to one page. */ 765 rl = kvzalloc(rlsize = PAGE_SIZE, GFP_NOFS); 766 if (unlikely(!rl)) 767 return ERR_PTR(-ENOMEM); 768 /* Insert unmapped starting element if necessary. */ 769 if (vcn) { 770 rl->vcn = 0; 771 rl->lcn = LCN_RL_NOT_MAPPED; 772 rl->length = vcn; 773 rlpos++; 774 } 775 while (buf < attr_end && *buf) { 776 /* 777 * Allocate more memory if needed, including space for the 778 * not-mapped and terminator elements. kvzalloc() 779 * operates on whole pages only. 780 */ 781 if (((rlpos + 3) * sizeof(*rl)) > rlsize) { 782 struct runlist_element *rl2; 783 784 rl2 = kvzalloc(rlsize + PAGE_SIZE, GFP_NOFS); 785 if (unlikely(!rl2)) { 786 kvfree(rl); 787 return ERR_PTR(-ENOMEM); 788 } 789 memcpy(rl2, rl, rlsize); 790 kvfree(rl); 791 rl = rl2; 792 rlsize += PAGE_SIZE; 793 } 794 /* Enter the current vcn into the current runlist element. */ 795 rl[rlpos].vcn = vcn; 796 /* 797 * Get the change in vcn, i.e. the run length in clusters. 798 * Doing it this way ensures that we signextend negative values. 799 * A negative run length doesn't make any sense, but hey, I 800 * didn't make up the NTFS specs and Windows NT4 treats the run 801 * length as a signed value so that's how it is... 802 */ 803 b = *buf & 0xf; 804 if (b) { 805 if (unlikely(buf + b > attr_end)) 806 goto io_error; 807 for (deltaxcn = (s8)buf[b--]; b; b--) 808 deltaxcn = (deltaxcn << 8) + buf[b]; 809 } else { /* The length entry is compulsory. */ 810 ntfs_error(vol->sb, "Missing length entry in mapping pairs array."); 811 deltaxcn = (s64)-1; 812 } 813 /* 814 * Assume a negative length to indicate data corruption and 815 * hence clean-up and return NULL. 816 */ 817 if (unlikely(deltaxcn < 0)) { 818 ntfs_error(vol->sb, "Invalid length in mapping pairs array."); 819 goto err_out; 820 } 821 /* 822 * Enter the current run length into the current runlist 823 * element. 824 */ 825 rl[rlpos].length = deltaxcn; 826 /* Increment the current vcn by the current run length. */ 827 vcn += deltaxcn; 828 /* 829 * There might be no lcn change at all, as is the case for 830 * sparse clusters on NTFS 3.0+, in which case we set the lcn 831 * to LCN_HOLE. 832 */ 833 if (!(*buf & 0xf0)) 834 rl[rlpos].lcn = LCN_HOLE; 835 else { 836 /* Get the lcn change which really can be negative. */ 837 u8 b2 = *buf & 0xf; 838 839 b = b2 + ((*buf >> 4) & 0xf); 840 if (buf + b > attr_end) 841 goto io_error; 842 for (deltaxcn = (s8)buf[b--]; b > b2; b--) 843 deltaxcn = (deltaxcn << 8) + buf[b]; 844 /* Change the current lcn to its new value. */ 845 lcn += deltaxcn; 846 #ifdef DEBUG 847 /* 848 * On NTFS 1.2-, apparently can have lcn == -1 to 849 * indicate a hole. But we haven't verified ourselves 850 * whether it is really the lcn or the deltaxcn that is 851 * -1. So if either is found give us a message so we 852 * can investigate it further! 853 */ 854 if (vol->major_ver < 3) { 855 if (unlikely(deltaxcn == -1)) 856 ntfs_error(vol->sb, "lcn delta == -1"); 857 if (unlikely(lcn == -1)) 858 ntfs_error(vol->sb, "lcn == -1"); 859 } 860 #endif 861 /* Check lcn is not below -1. */ 862 if (unlikely(lcn < -1)) { 863 ntfs_error(vol->sb, "Invalid s64 < -1 in mapping pairs array."); 864 goto err_out; 865 } 866 867 /* chkdsk accepts zero-sized runs only for holes */ 868 if ((lcn != -1) && !rl[rlpos].length) { 869 ntfs_error(vol->sb, 870 "Invalid zero-sized data run(lcn : %lld).\n", 871 lcn); 872 goto err_out; 873 } 874 875 /* Enter the current lcn into the runlist element. */ 876 rl[rlpos].lcn = lcn; 877 } 878 /* Get to the next runlist element, skipping zero-sized holes */ 879 if (rl[rlpos].length) 880 rlpos++; 881 /* Increment the buffer position to the next mapping pair. */ 882 buf += (*buf & 0xf) + ((*buf >> 4) & 0xf) + 1; 883 } 884 if (unlikely(buf >= attr_end)) 885 goto io_error; 886 /* 887 * If there is a highest_vcn specified, it must be equal to the final 888 * vcn in the runlist - 1, or something has gone badly wrong. 889 */ 890 deltaxcn = le64_to_cpu(attr->data.non_resident.highest_vcn); 891 if (unlikely(deltaxcn && vcn - 1 != deltaxcn)) { 892 mpa_err: 893 ntfs_error(vol->sb, "Corrupt mapping pairs array in non-resident attribute."); 894 goto err_out; 895 } 896 /* Setup not mapped runlist element if this is the base extent. */ 897 if (!attr->data.non_resident.lowest_vcn) { 898 s64 max_cluster; 899 900 max_cluster = ((le64_to_cpu(attr->data.non_resident.allocated_size) + 901 vol->cluster_size - 1) >> 902 vol->cluster_size_bits) - 1; 903 /* 904 * A highest_vcn of zero means this is a single extent 905 * attribute so simply terminate the runlist with LCN_ENOENT). 906 */ 907 if (deltaxcn) { 908 /* 909 * If there is a difference between the highest_vcn and 910 * the highest cluster, the runlist is either corrupt 911 * or, more likely, there are more extents following 912 * this one. 913 */ 914 if (deltaxcn < max_cluster) { 915 ntfs_debug("More extents to follow; deltaxcn = 0x%llx, max_cluster = 0x%llx", 916 deltaxcn, max_cluster); 917 rl[rlpos].vcn = vcn; 918 vcn += rl[rlpos].length = max_cluster - 919 deltaxcn; 920 rl[rlpos].lcn = LCN_RL_NOT_MAPPED; 921 rlpos++; 922 } else if (unlikely(deltaxcn > max_cluster)) { 923 ntfs_error(vol->sb, 924 "Corrupt attribute. deltaxcn = 0x%llx, max_cluster = 0x%llx", 925 deltaxcn, max_cluster); 926 goto mpa_err; 927 } 928 } 929 rl[rlpos].lcn = LCN_ENOENT; 930 } else /* Not the base extent. There may be more extents to follow. */ 931 rl[rlpos].lcn = LCN_RL_NOT_MAPPED; 932 933 /* Setup terminating runlist element. */ 934 rl[rlpos].vcn = vcn; 935 rl[rlpos].length = (s64)0; 936 /* If no existing runlist was specified, we are done. */ 937 if (!old_runlist || !old_runlist->rl) { 938 *new_rl_count = rlpos + 1; 939 ntfs_debug("Mapping pairs array successfully decompressed:"); 940 ntfs_debug_dump_runlist(rl); 941 return rl; 942 } 943 /* Now combine the new and old runlists checking for overlaps. */ 944 new_rl = ntfs_runlists_merge(old_runlist, rl, rlpos + 1, new_rl_count); 945 if (!IS_ERR(new_rl)) 946 return new_rl; 947 kvfree(rl); 948 ntfs_error(vol->sb, "Failed to merge runlists."); 949 return new_rl; 950 io_error: 951 ntfs_error(vol->sb, "Corrupt attribute."); 952 err_out: 953 kvfree(rl); 954 return ERR_PTR(-EIO); 955 } 956 957 /* 958 * ntfs_rl_vcn_to_lcn - convert a vcn into a lcn given a runlist 959 * @rl: runlist to use for conversion 960 * @vcn: vcn to convert 961 * 962 * Convert the virtual cluster number @vcn of an attribute into a logical 963 * cluster number (lcn) of a device using the runlist @rl to map vcns to their 964 * corresponding lcns. 965 * 966 * It is up to the caller to serialize access to the runlist @rl. 967 * 968 * Since lcns must be >= 0, we use negative return codes with special meaning: 969 * 970 * Return code Meaning / Description 971 * ================================================== 972 * LCN_HOLE Hole / not allocated on disk. 973 * LCN_RL_NOT_MAPPED This is part of the runlist which has not been 974 * inserted into the runlist yet. 975 * LCN_ENOENT There is no such vcn in the attribute. 976 * 977 * Locking: - The caller must have locked the runlist (for reading or writing). 978 * - This function does not touch the lock, nor does it modify the 979 * runlist. 980 */ 981 s64 ntfs_rl_vcn_to_lcn(const struct runlist_element *rl, const s64 vcn) 982 { 983 int i; 984 985 /* 986 * If rl is NULL, assume that we have found an unmapped runlist. The 987 * caller can then attempt to map it and fail appropriately if 988 * necessary. 989 */ 990 if (unlikely(!rl)) 991 return LCN_RL_NOT_MAPPED; 992 993 /* Catch out of lower bounds vcn. */ 994 if (unlikely(vcn < rl[0].vcn)) 995 return LCN_ENOENT; 996 997 for (i = 0; likely(rl[i].length); i++) { 998 if (vcn < rl[i+1].vcn) { 999 if (likely(rl[i].lcn >= 0)) 1000 return rl[i].lcn + (vcn - rl[i].vcn); 1001 return rl[i].lcn; 1002 } 1003 } 1004 /* 1005 * The terminator element is setup to the correct value, i.e. one of 1006 * LCN_HOLE, LCN_RL_NOT_MAPPED, or LCN_ENOENT. 1007 */ 1008 if (likely(rl[i].lcn < 0)) 1009 return rl[i].lcn; 1010 /* Just in case... We could replace this with BUG() some day. */ 1011 return LCN_ENOENT; 1012 } 1013 1014 /* 1015 * ntfs_rl_find_vcn_nolock - find a vcn in a runlist 1016 * @rl: runlist to search 1017 * @vcn: vcn to find 1018 * 1019 * Find the virtual cluster number @vcn in the runlist @rl and return the 1020 * address of the runlist element containing the @vcn on success. 1021 * 1022 * Return NULL if @rl is NULL or @vcn is in an unmapped part/out of bounds of 1023 * the runlist. 1024 * 1025 * Locking: The runlist must be locked on entry. 1026 */ 1027 struct runlist_element *ntfs_rl_find_vcn_nolock(struct runlist_element *rl, const s64 vcn) 1028 { 1029 if (unlikely(!rl || vcn < rl[0].vcn)) 1030 return NULL; 1031 while (likely(rl->length)) { 1032 if (unlikely(vcn < rl[1].vcn)) { 1033 if (likely(rl->lcn >= LCN_HOLE)) 1034 return rl; 1035 return NULL; 1036 } 1037 rl++; 1038 } 1039 if (likely(rl->lcn == LCN_ENOENT)) 1040 return rl; 1041 return NULL; 1042 } 1043 1044 /* 1045 * ntfs_get_nr_significant_bytes - get number of bytes needed to store a number 1046 * @n: number for which to get the number of bytes for 1047 * 1048 * Return the number of bytes required to store @n unambiguously as 1049 * a signed number. 1050 * 1051 * This is used in the context of the mapping pairs array to determine how 1052 * many bytes will be needed in the array to store a given logical cluster 1053 * number (lcn) or a specific run length. 1054 * 1055 * Return the number of bytes written. This function cannot fail. 1056 */ 1057 static inline int ntfs_get_nr_significant_bytes(const s64 n) 1058 { 1059 s64 l = n; 1060 int i; 1061 s8 j; 1062 1063 i = 0; 1064 do { 1065 l >>= 8; 1066 i++; 1067 } while (l != 0 && l != -1); 1068 j = (n >> 8 * (i - 1)) & 0xff; 1069 /* If the sign bit is wrong, we need an extra byte. */ 1070 if ((n < 0 && j >= 0) || (n > 0 && j < 0)) 1071 i++; 1072 return i; 1073 } 1074 1075 /* 1076 * ntfs_get_size_for_mapping_pairs - get bytes needed for mapping pairs array 1077 * @vol: ntfs volume 1078 * @rl: runlist to calculate the mapping pairs array size for 1079 * @first_vcn: first vcn which to include in the mapping pairs array 1080 * @last_vcn: last vcn which to include in the mapping pairs array 1081 * @max_mp_size: maximum size to return (0 or less means unlimited) 1082 * 1083 * Walk the locked runlist @rl and calculate the size in bytes of the mapping 1084 * pairs array corresponding to the runlist @rl, starting at vcn @first_vcn and 1085 * finishing with vcn @last_vcn. 1086 * 1087 * A @last_vcn of -1 means end of runlist and in that case the size of the 1088 * mapping pairs array corresponding to the runlist starting at vcn @first_vcn 1089 * and finishing at the end of the runlist is determined. 1090 * 1091 * This for example allows us to allocate a buffer of the right size when 1092 * building the mapping pairs array. 1093 * 1094 * If @rl is NULL, just return 1 (for the single terminator byte). 1095 * 1096 * Return the calculated size in bytes on success. On error, return -errno. 1097 */ 1098 int ntfs_get_size_for_mapping_pairs(const struct ntfs_volume *vol, 1099 const struct runlist_element *rl, const s64 first_vcn, 1100 const s64 last_vcn, int max_mp_size) 1101 { 1102 s64 prev_lcn; 1103 int rls; 1104 bool the_end = false; 1105 1106 if (first_vcn < 0 || last_vcn < -1) 1107 return -EINVAL; 1108 1109 if (last_vcn >= 0 && first_vcn > last_vcn) 1110 return -EINVAL; 1111 1112 if (!rl) { 1113 WARN_ON(first_vcn); 1114 WARN_ON(last_vcn > 0); 1115 return 1; 1116 } 1117 if (max_mp_size <= 0) 1118 max_mp_size = INT_MAX; 1119 /* Skip to runlist element containing @first_vcn. */ 1120 while (rl->length && first_vcn >= rl[1].vcn) 1121 rl++; 1122 if (unlikely((!rl->length && first_vcn > rl->vcn) || 1123 first_vcn < rl->vcn)) 1124 return -EINVAL; 1125 prev_lcn = 0; 1126 /* Always need the termining zero byte. */ 1127 rls = 1; 1128 /* Do the first partial run if present. */ 1129 if (first_vcn > rl->vcn) { 1130 s64 delta, length = rl->length; 1131 1132 /* We know rl->length != 0 already. */ 1133 if (unlikely(length < 0 || rl->lcn < LCN_HOLE)) 1134 goto err_out; 1135 /* 1136 * If @stop_vcn is given and finishes inside this run, cap the 1137 * run length. 1138 */ 1139 if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) { 1140 s64 s1 = last_vcn + 1; 1141 1142 if (unlikely(rl[1].vcn > s1)) 1143 length = s1 - rl->vcn; 1144 the_end = true; 1145 } 1146 delta = first_vcn - rl->vcn; 1147 /* Header byte + length. */ 1148 rls += 1 + ntfs_get_nr_significant_bytes(length - delta); 1149 /* 1150 * If the logical cluster number (lcn) denotes a hole and we 1151 * are on NTFS 3.0+, we don't store it at all, i.e. we need 1152 * zero space. On earlier NTFS versions we just store the lcn. 1153 * Note: this assumes that on NTFS 1.2-, holes are stored with 1154 * an lcn of -1 and not a delta_lcn of -1 (unless both are -1). 1155 */ 1156 if (likely(rl->lcn >= 0 || vol->major_ver < 3)) { 1157 prev_lcn = rl->lcn; 1158 if (likely(rl->lcn >= 0)) 1159 prev_lcn += delta; 1160 /* Change in lcn. */ 1161 rls += ntfs_get_nr_significant_bytes(prev_lcn); 1162 } 1163 /* Go to next runlist element. */ 1164 rl++; 1165 } 1166 /* Do the full runs. */ 1167 for (; rl->length && !the_end; rl++) { 1168 s64 length = rl->length; 1169 1170 if (unlikely(length < 0 || rl->lcn < LCN_HOLE)) 1171 goto err_out; 1172 /* 1173 * If @stop_vcn is given and finishes inside this run, cap the 1174 * run length. 1175 */ 1176 if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) { 1177 s64 s1 = last_vcn + 1; 1178 1179 if (unlikely(rl[1].vcn > s1)) 1180 length = s1 - rl->vcn; 1181 the_end = true; 1182 } 1183 /* Header byte + length. */ 1184 rls += 1 + ntfs_get_nr_significant_bytes(length); 1185 /* 1186 * If the logical cluster number (lcn) denotes a hole and we 1187 * are on NTFS 3.0+, we don't store it at all, i.e. we need 1188 * zero space. On earlier NTFS versions we just store the lcn. 1189 * Note: this assumes that on NTFS 1.2-, holes are stored with 1190 * an lcn of -1 and not a delta_lcn of -1 (unless both are -1). 1191 */ 1192 if (likely(rl->lcn >= 0 || vol->major_ver < 3)) { 1193 /* Change in lcn. */ 1194 rls += ntfs_get_nr_significant_bytes(rl->lcn - 1195 prev_lcn); 1196 prev_lcn = rl->lcn; 1197 } 1198 1199 if (rls > max_mp_size) 1200 break; 1201 } 1202 return rls; 1203 err_out: 1204 if (rl->lcn == LCN_RL_NOT_MAPPED) 1205 rls = -EINVAL; 1206 else 1207 rls = -EIO; 1208 return rls; 1209 } 1210 1211 /* 1212 * ntfs_write_significant_bytes - write the significant bytes of a number 1213 * @dst: destination buffer to write to 1214 * @dst_max: pointer to last byte of destination buffer for bounds checking 1215 * @n: number whose significant bytes to write 1216 * 1217 * Store in @dst, the minimum bytes of the number @n which are required to 1218 * identify @n unambiguously as a signed number, taking care not to exceed 1219 * @dest_max, the maximum position within @dst to which we are allowed to 1220 * write. 1221 * 1222 * This is used when building the mapping pairs array of a runlist to compress 1223 * a given logical cluster number (lcn) or a specific run length to the minimum 1224 * size possible. 1225 * 1226 * Return the number of bytes written on success. On error, i.e. the 1227 * destination buffer @dst is too small, return -ENOSPC. 1228 */ 1229 static inline int ntfs_write_significant_bytes(s8 *dst, const s8 *dst_max, 1230 const s64 n) 1231 { 1232 s64 l = n; 1233 int i; 1234 s8 j; 1235 1236 i = 0; 1237 do { 1238 if (unlikely(dst > dst_max)) 1239 goto err_out; 1240 *dst++ = l & 0xffll; 1241 l >>= 8; 1242 i++; 1243 } while (l != 0 && l != -1); 1244 j = (n >> 8 * (i - 1)) & 0xff; 1245 /* If the sign bit is wrong, we need an extra byte. */ 1246 if (n < 0 && j >= 0) { 1247 if (unlikely(dst > dst_max)) 1248 goto err_out; 1249 i++; 1250 *dst = (s8)-1; 1251 } else if (n > 0 && j < 0) { 1252 if (unlikely(dst > dst_max)) 1253 goto err_out; 1254 i++; 1255 *dst = (s8)0; 1256 } 1257 return i; 1258 err_out: 1259 return -ENOSPC; 1260 } 1261 1262 /* 1263 * ntfs_mapping_pairs_build - build the mapping pairs array from a runlist 1264 * @vol: ntfs volume 1265 * @dst: destination buffer to build mapping pairs array into 1266 * @dst_len: size of @dst in bytes 1267 * @rl: runlist to build the mapping pairs array from 1268 * @first_vcn: first vcn which to include in the mapping pairs array 1269 * @last_vcn: last vcn which to include in the mapping pairs array 1270 * @stop_vcn: on return, set to the first vcn outside the destination buffer 1271 * @stop_rl: on return, set to the runlist element where encoding stopped 1272 * @de_cluster_count: on return, set to the number of clusters encoded 1273 * 1274 * Create the mapping pairs array from the locked runlist @rl, starting at vcn 1275 * @first_vcn and finishing with vcn @last_vcn and save the array in @dst. 1276 * @dst_len is the size of @dst in bytes and it should be at least equal to the 1277 * value obtained by calling ntfs_get_size_for_mapping_pairs(). 1278 * 1279 * A @last_vcn of -1 means end of runlist and in that case the mapping pairs 1280 * array corresponding to the runlist starting at vcn @first_vcn and finishing 1281 * at the end of the runlist is created. 1282 * 1283 * If @rl is NULL, just write a single terminator byte to @dst. 1284 * 1285 * On success or -ENOSPC error, if @stop_vcn is not NULL, *@stop_vcn is set to 1286 * the first vcn outside the destination buffer. Note that on error, @dst has 1287 * been filled with all the mapping pairs that will fit, thus it can be treated 1288 * as partial success, in that a new attribute extent needs to be created or 1289 * the next extent has to be used and the mapping pairs build has to be 1290 * continued with @first_vcn set to *@stop_vcn. 1291 * 1292 * Return 0 on success and -errno on error. The following error codes are 1293 * defined: 1294 * -EINVAL - Run list contains unmapped elements. Make sure to only pass 1295 * fully mapped runlists to this function. 1296 * -EIO - The runlist is corrupt. 1297 * -ENOSPC - The destination buffer is too small. 1298 * 1299 * Locking: @rl must be locked on entry (either for reading or writing), it 1300 * remains locked throughout, and is left locked upon return. 1301 */ 1302 int ntfs_mapping_pairs_build(const struct ntfs_volume *vol, s8 *dst, 1303 const int dst_len, const struct runlist_element *rl, 1304 const s64 first_vcn, const s64 last_vcn, s64 *const stop_vcn, 1305 struct runlist_element **stop_rl, unsigned int *de_cluster_count) 1306 { 1307 s64 prev_lcn; 1308 s8 *dst_max, *dst_next; 1309 int err = -ENOSPC; 1310 bool the_end = false; 1311 s8 len_len, lcn_len; 1312 unsigned int de_cnt = 0; 1313 1314 if (first_vcn < 0 || last_vcn < -1 || dst_len < 1) 1315 return -EINVAL; 1316 if (last_vcn >= 0 && first_vcn > last_vcn) 1317 return -EINVAL; 1318 1319 if (!rl) { 1320 WARN_ON(first_vcn || last_vcn > 0); 1321 if (stop_vcn) 1322 *stop_vcn = 0; 1323 /* Terminator byte. */ 1324 *dst = 0; 1325 return 0; 1326 } 1327 /* Skip to runlist element containing @first_vcn. */ 1328 while (rl->length && first_vcn >= rl[1].vcn) 1329 rl++; 1330 if (unlikely((!rl->length && first_vcn > rl->vcn) || 1331 first_vcn < rl->vcn)) 1332 return -EINVAL; 1333 /* 1334 * @dst_max is used for bounds checking in 1335 * ntfs_write_significant_bytes(). 1336 */ 1337 dst_max = dst + dst_len - 1; 1338 prev_lcn = 0; 1339 /* Do the first partial run if present. */ 1340 if (first_vcn > rl->vcn) { 1341 s64 delta, length = rl->length; 1342 1343 /* We know rl->length != 0 already. */ 1344 if (unlikely(length < 0 || rl->lcn < LCN_HOLE)) 1345 goto err_out; 1346 /* 1347 * If @stop_vcn is given and finishes inside this run, cap the 1348 * run length. 1349 */ 1350 if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) { 1351 s64 s1 = last_vcn + 1; 1352 1353 if (unlikely(rl[1].vcn > s1)) 1354 length = s1 - rl->vcn; 1355 the_end = true; 1356 } 1357 delta = first_vcn - rl->vcn; 1358 /* Write length. */ 1359 len_len = ntfs_write_significant_bytes(dst + 1, dst_max, 1360 length - delta); 1361 if (unlikely(len_len < 0)) 1362 goto size_err; 1363 /* 1364 * If the logical cluster number (lcn) denotes a hole and we 1365 * are on NTFS 3.0+, we don't store it at all, i.e. we need 1366 * zero space. On earlier NTFS versions we just write the lcn 1367 * change. 1368 */ 1369 if (likely(rl->lcn >= 0 || vol->major_ver < 3)) { 1370 prev_lcn = rl->lcn; 1371 if (likely(rl->lcn >= 0)) 1372 prev_lcn += delta; 1373 /* Write change in lcn. */ 1374 lcn_len = ntfs_write_significant_bytes(dst + 1 + 1375 len_len, dst_max, prev_lcn); 1376 if (unlikely(lcn_len < 0)) 1377 goto size_err; 1378 } else 1379 lcn_len = 0; 1380 dst_next = dst + len_len + lcn_len + 1; 1381 if (unlikely(dst_next > dst_max)) 1382 goto size_err; 1383 /* Update header byte. */ 1384 *dst = lcn_len << 4 | len_len; 1385 /* Position at next mapping pairs array element. */ 1386 dst = dst_next; 1387 /* Go to next runlist element. */ 1388 rl++; 1389 } 1390 /* Do the full runs. */ 1391 for (; rl->length && !the_end; rl++) { 1392 s64 length = rl->length; 1393 1394 if (unlikely(length < 0 || rl->lcn < LCN_HOLE)) 1395 goto err_out; 1396 /* 1397 * If @stop_vcn is given and finishes inside this run, cap the 1398 * run length. 1399 */ 1400 if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) { 1401 s64 s1 = last_vcn + 1; 1402 1403 if (unlikely(rl[1].vcn > s1)) 1404 length = s1 - rl->vcn; 1405 the_end = true; 1406 } 1407 /* Write length. */ 1408 len_len = ntfs_write_significant_bytes(dst + 1, dst_max, 1409 length); 1410 if (unlikely(len_len < 0)) 1411 goto size_err; 1412 /* 1413 * If the logical cluster number (lcn) denotes a hole and we 1414 * are on NTFS 3.0+, we don't store it at all, i.e. we need 1415 * zero space. On earlier NTFS versions we just write the lcn 1416 * change. 1417 */ 1418 if (likely(rl->lcn >= 0 || vol->major_ver < 3)) { 1419 /* Write change in lcn. */ 1420 lcn_len = ntfs_write_significant_bytes(dst + 1 + 1421 len_len, dst_max, rl->lcn - prev_lcn); 1422 if (unlikely(lcn_len < 0)) 1423 goto size_err; 1424 prev_lcn = rl->lcn; 1425 } else { 1426 if (rl->lcn == LCN_DELALLOC) 1427 de_cnt += rl->length; 1428 lcn_len = 0; 1429 } 1430 dst_next = dst + len_len + lcn_len + 1; 1431 if (unlikely(dst_next > dst_max)) 1432 goto size_err; 1433 /* Update header byte. */ 1434 *dst = lcn_len << 4 | len_len; 1435 /* Position at next mapping pairs array element. */ 1436 dst = dst_next; 1437 } 1438 /* Success. */ 1439 if (de_cluster_count) 1440 *de_cluster_count = de_cnt; 1441 err = 0; 1442 size_err: 1443 /* Set stop vcn. */ 1444 if (stop_vcn) 1445 *stop_vcn = rl->vcn; 1446 if (stop_rl) 1447 *stop_rl = (struct runlist_element *)rl; 1448 /* Add terminator byte. */ 1449 *dst = 0; 1450 return err; 1451 err_out: 1452 if (rl->lcn == LCN_RL_NOT_MAPPED) 1453 err = -EINVAL; 1454 else 1455 err = -EIO; 1456 return err; 1457 } 1458 1459 /* 1460 * ntfs_rl_truncate_nolock - truncate a runlist starting at a specified vcn 1461 * @vol: ntfs volume (needed for error output) 1462 * @runlist: runlist to truncate 1463 * @new_length: the new length of the runlist in VCNs 1464 * 1465 * Truncate the runlist described by @runlist as well as the memory buffer 1466 * holding the runlist elements to a length of @new_length VCNs. 1467 * 1468 * If @new_length lies within the runlist, the runlist elements with VCNs of 1469 * @new_length and above are discarded. As a special case if @new_length is 1470 * zero, the runlist is discarded and set to NULL. 1471 * 1472 * If @new_length lies beyond the runlist, a sparse runlist element is added to 1473 * the end of the runlist @runlist or if the last runlist element is a sparse 1474 * one already, this is extended. 1475 * 1476 * Note, no checking is done for unmapped runlist elements. It is assumed that 1477 * the caller has mapped any elements that need to be mapped already. 1478 * 1479 * Return 0 on success and -errno on error. 1480 * 1481 * Locking: The caller must hold @runlist->lock for writing. 1482 */ 1483 int ntfs_rl_truncate_nolock(const struct ntfs_volume *vol, struct runlist *const runlist, 1484 const s64 new_length) 1485 { 1486 struct runlist_element *rl; 1487 int old_size; 1488 1489 ntfs_debug("Entering for new_length 0x%llx.", (long long)new_length); 1490 1491 if (!runlist || new_length < 0) 1492 return -EINVAL; 1493 1494 rl = runlist->rl; 1495 if (new_length < rl->vcn) 1496 return -EINVAL; 1497 1498 /* Find @new_length in the runlist. */ 1499 while (likely(rl->length && new_length >= rl[1].vcn)) 1500 rl++; 1501 /* 1502 * If not at the end of the runlist we need to shrink it. 1503 * If at the end of the runlist we need to expand it. 1504 */ 1505 if (rl->length) { 1506 struct runlist_element *trl; 1507 bool is_end; 1508 1509 ntfs_debug("Shrinking runlist."); 1510 /* Determine the runlist size. */ 1511 trl = rl + 1; 1512 while (likely(trl->length)) 1513 trl++; 1514 old_size = trl - runlist->rl + 1; 1515 /* Truncate the run. */ 1516 rl->length = new_length - rl->vcn; 1517 /* 1518 * If a run was partially truncated, make the following runlist 1519 * element a terminator. 1520 */ 1521 is_end = false; 1522 if (rl->length) { 1523 rl++; 1524 if (!rl->length) 1525 is_end = true; 1526 rl->vcn = new_length; 1527 rl->length = 0; 1528 } 1529 rl->lcn = LCN_ENOENT; 1530 runlist->count = rl - runlist->rl + 1; 1531 /* Reallocate memory if necessary. */ 1532 if (!is_end) { 1533 int new_size = rl - runlist->rl + 1; 1534 1535 rl = ntfs_rl_realloc(runlist->rl, old_size, new_size); 1536 if (IS_ERR(rl)) 1537 ntfs_warning(vol->sb, 1538 "Failed to shrink runlist buffer. This just wastes a bit of memory temporarily so we ignore it and return success."); 1539 else 1540 runlist->rl = rl; 1541 } 1542 } else if (likely(/* !rl->length && */ new_length > rl->vcn)) { 1543 ntfs_debug("Expanding runlist."); 1544 /* 1545 * If there is a previous runlist element and it is a sparse 1546 * one, extend it. Otherwise need to add a new, sparse runlist 1547 * element. 1548 */ 1549 if ((rl > runlist->rl) && ((rl - 1)->lcn == LCN_HOLE)) 1550 (rl - 1)->length = new_length - (rl - 1)->vcn; 1551 else { 1552 /* Determine the runlist size. */ 1553 old_size = rl - runlist->rl + 1; 1554 /* Reallocate memory if necessary. */ 1555 rl = ntfs_rl_realloc(runlist->rl, old_size, 1556 old_size + 1); 1557 if (IS_ERR(rl)) { 1558 ntfs_error(vol->sb, "Failed to expand runlist buffer, aborting."); 1559 return PTR_ERR(rl); 1560 } 1561 runlist->rl = rl; 1562 /* 1563 * Set @rl to the same runlist element in the new 1564 * runlist as before in the old runlist. 1565 */ 1566 rl += old_size - 1; 1567 /* Add a new, sparse runlist element. */ 1568 rl->lcn = LCN_HOLE; 1569 rl->length = new_length - rl->vcn; 1570 /* Add a new terminator runlist element. */ 1571 rl++; 1572 rl->length = 0; 1573 runlist->count = old_size + 1; 1574 } 1575 rl->vcn = new_length; 1576 rl->lcn = LCN_ENOENT; 1577 } else /* if (unlikely(!rl->length && new_length == rl->vcn)) */ { 1578 /* Runlist already has same size as requested. */ 1579 rl->lcn = LCN_ENOENT; 1580 } 1581 ntfs_debug("Done."); 1582 return 0; 1583 } 1584 1585 /* 1586 * ntfs_rl_sparse - check whether runlist have sparse regions or not. 1587 * @rl: runlist to check 1588 * 1589 * Return 1 if have, 0 if not, -errno on error. 1590 */ 1591 int ntfs_rl_sparse(struct runlist_element *rl) 1592 { 1593 struct runlist_element *rlc; 1594 1595 if (!rl) 1596 return -EINVAL; 1597 1598 for (rlc = rl; rlc->length; rlc++) 1599 if (rlc->lcn < 0) { 1600 if (rlc->lcn != LCN_HOLE && rlc->lcn != LCN_DELALLOC) { 1601 pr_err("%s: bad runlist\n", __func__); 1602 return -EINVAL; 1603 } 1604 return 1; 1605 } 1606 return 0; 1607 } 1608 1609 /* 1610 * ntfs_rl_get_compressed_size - calculate length of non sparse regions 1611 * @vol: ntfs volume (need for cluster size) 1612 * @rl: runlist to calculate for 1613 * 1614 * Return compressed size or -errno on error. 1615 */ 1616 s64 ntfs_rl_get_compressed_size(struct ntfs_volume *vol, struct runlist_element *rl) 1617 { 1618 struct runlist_element *rlc; 1619 s64 ret = 0; 1620 1621 if (!rl) 1622 return -EINVAL; 1623 1624 for (rlc = rl; rlc->length; rlc++) { 1625 if (rlc->lcn < 0) { 1626 if (rlc->lcn != LCN_HOLE && rlc->lcn != LCN_DELALLOC) { 1627 ntfs_error(vol->sb, "%s: bad runlist, rlc->lcn : %lld", 1628 __func__, rlc->lcn); 1629 return -EINVAL; 1630 } 1631 } else 1632 ret += rlc->length; 1633 } 1634 return NTFS_CLU_TO_B(vol, ret); 1635 } 1636 1637 static inline bool ntfs_rle_lcn_contiguous(struct runlist_element *left_rle, 1638 struct runlist_element *right_rle) 1639 { 1640 if (left_rle->lcn > LCN_HOLE && 1641 left_rle->lcn + left_rle->length == right_rle->lcn) 1642 return true; 1643 else if (left_rle->lcn == LCN_HOLE && right_rle->lcn == LCN_HOLE) 1644 return true; 1645 else 1646 return false; 1647 } 1648 1649 static inline bool ntfs_rle_contain(struct runlist_element *rle, s64 vcn) 1650 { 1651 if (rle->length > 0 && 1652 vcn >= rle->vcn && vcn < rle->vcn + rle->length) 1653 return true; 1654 else 1655 return false; 1656 } 1657 1658 struct runlist_element *ntfs_rl_insert_range(struct runlist_element *dst_rl, int dst_cnt, 1659 struct runlist_element *src_rl, int src_cnt, 1660 size_t *new_rl_cnt) 1661 { 1662 struct runlist_element *i_rl, *new_rl, *src_rl_origin = src_rl; 1663 struct runlist_element dst_rl_split; 1664 s64 start_vcn; 1665 int new_1st_cnt, new_2nd_cnt, new_3rd_cnt, new_cnt; 1666 1667 if (!dst_rl || !src_rl || !new_rl_cnt) 1668 return ERR_PTR(-EINVAL); 1669 if (dst_cnt <= 0 || src_cnt <= 0) 1670 return ERR_PTR(-EINVAL); 1671 if (!(dst_rl[dst_cnt - 1].lcn == LCN_ENOENT && 1672 dst_rl[dst_cnt - 1].length == 0) || 1673 src_rl[src_cnt - 1].lcn < LCN_HOLE) 1674 return ERR_PTR(-EINVAL); 1675 1676 start_vcn = src_rl[0].vcn; 1677 1678 i_rl = ntfs_rl_find_vcn_nolock(dst_rl, start_vcn); 1679 if (!i_rl || 1680 (i_rl->lcn == LCN_ENOENT && i_rl->vcn != start_vcn) || 1681 (i_rl->lcn != LCN_ENOENT && !ntfs_rle_contain(i_rl, start_vcn))) 1682 return ERR_PTR(-EINVAL); 1683 1684 new_1st_cnt = (int)(i_rl - dst_rl); 1685 if (new_1st_cnt > dst_cnt) 1686 return ERR_PTR(-EINVAL); 1687 new_3rd_cnt = dst_cnt - new_1st_cnt; 1688 if (new_3rd_cnt < 1) 1689 return ERR_PTR(-EINVAL); 1690 1691 if (i_rl[0].vcn != start_vcn) { 1692 if (i_rl[0].lcn == LCN_HOLE && src_rl[0].lcn == LCN_HOLE) 1693 goto merge_src_rle; 1694 1695 /* split @i_rl[0] and create @dst_rl_split */ 1696 dst_rl_split.vcn = i_rl[0].vcn; 1697 dst_rl_split.length = start_vcn - i_rl[0].vcn; 1698 dst_rl_split.lcn = i_rl[0].lcn; 1699 1700 i_rl[0].vcn = start_vcn; 1701 i_rl[0].length -= dst_rl_split.length; 1702 i_rl[0].lcn += dst_rl_split.length; 1703 } else { 1704 struct runlist_element *dst_rle, *src_rle; 1705 merge_src_rle: 1706 1707 /* not split @i_rl[0] */ 1708 dst_rl_split.lcn = LCN_ENOENT; 1709 1710 /* merge @src_rl's first run and @i_rl[0]'s left run if possible */ 1711 dst_rle = &dst_rl[new_1st_cnt - 1]; 1712 src_rle = &src_rl[0]; 1713 if (new_1st_cnt > 0 && ntfs_rle_lcn_contiguous(dst_rle, src_rle)) { 1714 WARN_ON(dst_rle->vcn + dst_rle->length != src_rle->vcn); 1715 dst_rle->length += src_rle->length; 1716 src_rl++; 1717 src_cnt--; 1718 } else { 1719 /* merge @src_rl's last run and @i_rl[0]'s right if possible */ 1720 dst_rle = &dst_rl[new_1st_cnt]; 1721 src_rle = &src_rl[src_cnt - 1]; 1722 1723 if (ntfs_rle_lcn_contiguous(dst_rle, src_rle)) { 1724 dst_rle->length += src_rle->length; 1725 src_cnt--; 1726 } 1727 } 1728 } 1729 1730 new_2nd_cnt = src_cnt; 1731 new_cnt = new_1st_cnt + new_2nd_cnt + new_3rd_cnt; 1732 new_cnt += dst_rl_split.lcn >= LCN_HOLE ? 1 : 0; 1733 new_rl = kvcalloc(new_cnt, sizeof(*new_rl), GFP_NOFS); 1734 if (!new_rl) 1735 return ERR_PTR(-ENOMEM); 1736 1737 /* Copy the @dst_rl's first half to @new_rl */ 1738 ntfs_rl_mc(new_rl, 0, dst_rl, 0, new_1st_cnt); 1739 if (dst_rl_split.lcn >= LCN_HOLE) { 1740 ntfs_rl_mc(new_rl, new_1st_cnt, &dst_rl_split, 0, 1); 1741 new_1st_cnt++; 1742 } 1743 /* Copy the @src_rl to @new_rl */ 1744 ntfs_rl_mc(new_rl, new_1st_cnt, src_rl, 0, new_2nd_cnt); 1745 /* Copy the @dst_rl's second half to @new_rl */ 1746 if (new_3rd_cnt >= 1) { 1747 struct runlist_element *rl, *rl_3rd; 1748 int dst_1st_cnt = dst_rl_split.lcn >= LCN_HOLE ? 1749 new_1st_cnt - 1 : new_1st_cnt; 1750 1751 ntfs_rl_mc(new_rl, new_1st_cnt + new_2nd_cnt, 1752 dst_rl, dst_1st_cnt, new_3rd_cnt); 1753 /* Update vcn of the @dst_rl's second half runs to reflect 1754 * appended @src_rl. 1755 */ 1756 if (new_1st_cnt + new_2nd_cnt == 0) { 1757 rl_3rd = &new_rl[new_1st_cnt + new_2nd_cnt + 1]; 1758 rl = &new_rl[new_1st_cnt + new_2nd_cnt]; 1759 } else { 1760 rl_3rd = &new_rl[new_1st_cnt + new_2nd_cnt]; 1761 rl = &new_rl[new_1st_cnt + new_2nd_cnt - 1]; 1762 } 1763 do { 1764 rl_3rd->vcn = rl->vcn + rl->length; 1765 if (rl_3rd->length <= 0) 1766 break; 1767 rl = rl_3rd; 1768 rl_3rd++; 1769 } while (1); 1770 } 1771 *new_rl_cnt = new_1st_cnt + new_2nd_cnt + new_3rd_cnt; 1772 1773 kvfree(dst_rl); 1774 kvfree(src_rl_origin); 1775 return new_rl; 1776 } 1777 1778 struct runlist_element *ntfs_rl_punch_hole(struct runlist_element *dst_rl, int dst_cnt, 1779 s64 start_vcn, s64 len, 1780 struct runlist_element **punch_rl, 1781 size_t *new_rl_cnt) 1782 { 1783 struct runlist_element *s_rl, *e_rl, *new_rl, *dst_3rd_rl, hole_rl[1]; 1784 s64 end_vcn; 1785 int new_1st_cnt, dst_3rd_cnt, new_cnt, punch_cnt, merge_cnt; 1786 bool begin_split, end_split, one_split_3; 1787 1788 if (dst_cnt < 2 || 1789 !(dst_rl[dst_cnt - 1].lcn == LCN_ENOENT && 1790 dst_rl[dst_cnt - 1].length == 0)) 1791 return ERR_PTR(-EINVAL); 1792 1793 end_vcn = min(start_vcn + len - 1, 1794 dst_rl[dst_cnt - 2].vcn + dst_rl[dst_cnt - 2].length - 1); 1795 1796 s_rl = ntfs_rl_find_vcn_nolock(dst_rl, start_vcn); 1797 if (!s_rl || 1798 s_rl->lcn <= LCN_ENOENT || 1799 !ntfs_rle_contain(s_rl, start_vcn)) 1800 return ERR_PTR(-EINVAL); 1801 1802 begin_split = s_rl->vcn != start_vcn ? true : false; 1803 1804 e_rl = ntfs_rl_find_vcn_nolock(dst_rl, end_vcn); 1805 if (!e_rl || 1806 e_rl->lcn <= LCN_ENOENT || 1807 !ntfs_rle_contain(e_rl, end_vcn)) 1808 return ERR_PTR(-EINVAL); 1809 1810 end_split = e_rl->vcn + e_rl->length - 1 != end_vcn ? true : false; 1811 1812 /* @s_rl has to be split into left, punched hole, and right */ 1813 one_split_3 = e_rl == s_rl && begin_split && end_split ? true : false; 1814 1815 punch_cnt = (int)(e_rl - s_rl) + 1; 1816 1817 *punch_rl = kvcalloc(punch_cnt + 1, sizeof(struct runlist_element), 1818 GFP_NOFS); 1819 if (!*punch_rl) 1820 return ERR_PTR(-ENOMEM); 1821 1822 new_cnt = dst_cnt - (int)(e_rl - s_rl + 1) + 3; 1823 new_rl = kvcalloc(new_cnt, sizeof(struct runlist_element), GFP_NOFS); 1824 if (!new_rl) { 1825 kvfree(*punch_rl); 1826 *punch_rl = NULL; 1827 return ERR_PTR(-ENOMEM); 1828 } 1829 1830 new_1st_cnt = (int)(s_rl - dst_rl) + 1; 1831 ntfs_rl_mc(*punch_rl, 0, dst_rl, new_1st_cnt - 1, punch_cnt); 1832 1833 (*punch_rl)[punch_cnt].lcn = LCN_ENOENT; 1834 (*punch_rl)[punch_cnt].length = 0; 1835 1836 if (!begin_split) 1837 new_1st_cnt--; 1838 dst_3rd_rl = e_rl; 1839 dst_3rd_cnt = (int)(&dst_rl[dst_cnt - 1] - e_rl) + 1; 1840 if (!end_split) { 1841 dst_3rd_rl++; 1842 dst_3rd_cnt--; 1843 } 1844 1845 /* Copy the 1st part of @dst_rl into @new_rl */ 1846 ntfs_rl_mc(new_rl, 0, dst_rl, 0, new_1st_cnt); 1847 if (begin_split) { 1848 /* the @e_rl has to be splited and copied into the last of @new_rl 1849 * and the first of @punch_rl 1850 */ 1851 s64 first_cnt = start_vcn - dst_rl[new_1st_cnt - 1].vcn; 1852 1853 if (new_1st_cnt) 1854 new_rl[new_1st_cnt - 1].length = first_cnt; 1855 1856 (*punch_rl)[0].vcn = start_vcn; 1857 (*punch_rl)[0].length -= first_cnt; 1858 if ((*punch_rl)[0].lcn > LCN_HOLE) 1859 (*punch_rl)[0].lcn += first_cnt; 1860 } 1861 1862 /* Copy a hole into @new_rl */ 1863 hole_rl[0].vcn = start_vcn; 1864 hole_rl[0].length = (s64)len; 1865 hole_rl[0].lcn = LCN_HOLE; 1866 ntfs_rl_mc(new_rl, new_1st_cnt, hole_rl, 0, 1); 1867 1868 /* Copy the 3rd part of @dst_rl into @new_rl */ 1869 ntfs_rl_mc(new_rl, new_1st_cnt + 1, dst_3rd_rl, 0, dst_3rd_cnt); 1870 if (end_split) { 1871 /* the @e_rl has to be splited and copied into the first of 1872 * @new_rl and the last of @punch_rl 1873 */ 1874 s64 first_cnt = end_vcn - dst_3rd_rl[0].vcn + 1; 1875 1876 new_rl[new_1st_cnt + 1].vcn = end_vcn + 1; 1877 new_rl[new_1st_cnt + 1].length -= first_cnt; 1878 if (new_rl[new_1st_cnt + 1].lcn > LCN_HOLE) 1879 new_rl[new_1st_cnt + 1].lcn += first_cnt; 1880 1881 if (one_split_3) 1882 (*punch_rl)[punch_cnt - 1].length -= 1883 new_rl[new_1st_cnt + 1].length; 1884 else 1885 (*punch_rl)[punch_cnt - 1].length = first_cnt; 1886 } 1887 1888 /* Merge left and hole, or hole and right in @new_rl, if left or right 1889 * consists of holes. 1890 */ 1891 merge_cnt = 0; 1892 if (new_1st_cnt > 0 && new_rl[new_1st_cnt - 1].lcn == LCN_HOLE) { 1893 /* Merge right and hole */ 1894 s_rl = &new_rl[new_1st_cnt - 1]; 1895 s_rl->length += s_rl[1].length; 1896 merge_cnt = 1; 1897 /* Merge left and right */ 1898 if (new_1st_cnt + 1 < new_cnt && 1899 new_rl[new_1st_cnt + 1].lcn == LCN_HOLE) { 1900 s_rl->length += s_rl[2].length; 1901 merge_cnt++; 1902 } 1903 } else if (new_1st_cnt + 1 < new_cnt && 1904 new_rl[new_1st_cnt + 1].lcn == LCN_HOLE) { 1905 /* Merge left and hole */ 1906 s_rl = &new_rl[new_1st_cnt]; 1907 s_rl->length += s_rl[1].length; 1908 merge_cnt = 1; 1909 } 1910 if (merge_cnt) { 1911 struct runlist_element *d_rl, *src_rl; 1912 1913 d_rl = s_rl + 1; 1914 src_rl = s_rl + 1 + merge_cnt; 1915 ntfs_rl_mm(new_rl, (int)(d_rl - new_rl), (int)(src_rl - new_rl), 1916 (int)(&new_rl[new_cnt - 1] - src_rl) + 1); 1917 } 1918 1919 (*punch_rl)[punch_cnt].vcn = (*punch_rl)[punch_cnt - 1].vcn + 1920 (*punch_rl)[punch_cnt - 1].length; 1921 1922 /* punch_cnt elements of dst are replaced with one hole */ 1923 *new_rl_cnt = dst_cnt - (punch_cnt - (int)begin_split - (int)end_split) + 1924 1 - merge_cnt; 1925 kvfree(dst_rl); 1926 return new_rl; 1927 } 1928 1929 struct runlist_element *ntfs_rl_collapse_range(struct runlist_element *dst_rl, int dst_cnt, 1930 s64 start_vcn, s64 len, 1931 struct runlist_element **punch_rl, 1932 size_t *new_rl_cnt) 1933 { 1934 struct runlist_element *s_rl, *e_rl, *new_rl, *dst_3rd_rl; 1935 s64 end_vcn; 1936 int new_1st_cnt, dst_3rd_cnt, new_cnt, punch_cnt, merge_cnt, i; 1937 bool begin_split, end_split, one_split_3; 1938 1939 if (dst_cnt < 2 || 1940 !(dst_rl[dst_cnt - 1].lcn == LCN_ENOENT && 1941 dst_rl[dst_cnt - 1].length == 0)) 1942 return ERR_PTR(-EINVAL); 1943 1944 end_vcn = min(start_vcn + len - 1, 1945 dst_rl[dst_cnt - 1].vcn - 1); 1946 1947 s_rl = ntfs_rl_find_vcn_nolock(dst_rl, start_vcn); 1948 if (!s_rl || 1949 s_rl->lcn <= LCN_ENOENT || 1950 !ntfs_rle_contain(s_rl, start_vcn)) 1951 return ERR_PTR(-EINVAL); 1952 1953 begin_split = s_rl->vcn != start_vcn ? true : false; 1954 1955 e_rl = ntfs_rl_find_vcn_nolock(dst_rl, end_vcn); 1956 if (!e_rl || 1957 e_rl->lcn <= LCN_ENOENT || 1958 !ntfs_rle_contain(e_rl, end_vcn)) 1959 return ERR_PTR(-EINVAL); 1960 1961 end_split = e_rl->vcn + e_rl->length - 1 != end_vcn ? true : false; 1962 1963 /* @s_rl has to be split into left, collapsed, and right */ 1964 one_split_3 = e_rl == s_rl && begin_split && end_split ? true : false; 1965 1966 punch_cnt = (int)(e_rl - s_rl) + 1; 1967 *punch_rl = kvcalloc(punch_cnt + 1, sizeof(struct runlist_element), 1968 GFP_NOFS); 1969 if (!*punch_rl) 1970 return ERR_PTR(-ENOMEM); 1971 1972 new_cnt = dst_cnt - (int)(e_rl - s_rl + 1) + 3; 1973 new_rl = kvcalloc(new_cnt, sizeof(struct runlist_element), GFP_NOFS); 1974 if (!new_rl) { 1975 kvfree(*punch_rl); 1976 *punch_rl = NULL; 1977 return ERR_PTR(-ENOMEM); 1978 } 1979 1980 new_1st_cnt = (int)(s_rl - dst_rl) + 1; 1981 ntfs_rl_mc(*punch_rl, 0, dst_rl, new_1st_cnt - 1, punch_cnt); 1982 (*punch_rl)[punch_cnt].lcn = LCN_ENOENT; 1983 (*punch_rl)[punch_cnt].length = 0; 1984 1985 if (!begin_split) 1986 new_1st_cnt--; 1987 dst_3rd_rl = e_rl; 1988 dst_3rd_cnt = (int)(&dst_rl[dst_cnt - 1] - e_rl) + 1; 1989 if (!end_split) { 1990 dst_3rd_rl++; 1991 dst_3rd_cnt--; 1992 } 1993 1994 /* Copy the 1st part of @dst_rl into @new_rl */ 1995 ntfs_rl_mc(new_rl, 0, dst_rl, 0, new_1st_cnt); 1996 if (begin_split) { 1997 /* the @e_rl has to be splited and copied into the last of @new_rl 1998 * and the first of @punch_rl 1999 */ 2000 s64 first_cnt = start_vcn - dst_rl[new_1st_cnt - 1].vcn; 2001 2002 new_rl[new_1st_cnt - 1].length = first_cnt; 2003 2004 (*punch_rl)[0].vcn = start_vcn; 2005 (*punch_rl)[0].length -= first_cnt; 2006 if ((*punch_rl)[0].lcn > LCN_HOLE) 2007 (*punch_rl)[0].lcn += first_cnt; 2008 } 2009 2010 /* Copy the 3rd part of @dst_rl into @new_rl */ 2011 ntfs_rl_mc(new_rl, new_1st_cnt, dst_3rd_rl, 0, dst_3rd_cnt); 2012 if (end_split) { 2013 /* the @e_rl has to be splited and copied into the first of 2014 * @new_rl and the last of @punch_rl 2015 */ 2016 s64 first_cnt = end_vcn - dst_3rd_rl[0].vcn + 1; 2017 2018 new_rl[new_1st_cnt].vcn = end_vcn + 1; 2019 new_rl[new_1st_cnt].length -= first_cnt; 2020 if (new_rl[new_1st_cnt].lcn > LCN_HOLE) 2021 new_rl[new_1st_cnt].lcn += first_cnt; 2022 2023 if (one_split_3) 2024 (*punch_rl)[punch_cnt - 1].length -= 2025 new_rl[new_1st_cnt].length; 2026 else 2027 (*punch_rl)[punch_cnt - 1].length = first_cnt; 2028 } 2029 2030 /* Adjust vcn */ 2031 if (new_1st_cnt == 0) 2032 new_rl[new_1st_cnt].vcn = 0; 2033 for (i = new_1st_cnt == 0 ? 1 : new_1st_cnt; new_rl[i].length; i++) 2034 new_rl[i].vcn = new_rl[i - 1].vcn + new_rl[i - 1].length; 2035 new_rl[i].vcn = new_rl[i - 1].vcn + new_rl[i - 1].length; 2036 2037 /* Merge left and hole, or hole and right in @new_rl, if left or right 2038 * consists of holes. 2039 */ 2040 merge_cnt = 0; 2041 i = new_1st_cnt == 0 ? 1 : new_1st_cnt; 2042 if (ntfs_rle_lcn_contiguous(&new_rl[i - 1], &new_rl[i])) { 2043 /* Merge right and left */ 2044 s_rl = &new_rl[new_1st_cnt - 1]; 2045 s_rl->length += s_rl[1].length; 2046 merge_cnt = 1; 2047 } 2048 if (merge_cnt) { 2049 struct runlist_element *d_rl, *src_rl; 2050 2051 d_rl = s_rl + 1; 2052 src_rl = s_rl + 1 + merge_cnt; 2053 ntfs_rl_mm(new_rl, (int)(d_rl - new_rl), (int)(src_rl - new_rl), 2054 (int)(&new_rl[new_cnt - 1] - src_rl) + 1); 2055 } 2056 2057 (*punch_rl)[punch_cnt].vcn = (*punch_rl)[punch_cnt - 1].vcn + 2058 (*punch_rl)[punch_cnt - 1].length; 2059 2060 /* punch_cnt elements of dst are extracted */ 2061 *new_rl_cnt = dst_cnt - (punch_cnt - (int)begin_split - (int)end_split) - 2062 merge_cnt; 2063 2064 kvfree(dst_rl); 2065 return new_rl; 2066 } 2067