1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. 5 * 6 * TODO: try to use extents tree (instead of array) 7 */ 8 9 #include <linux/blkdev.h> 10 #include <linux/fs.h> 11 #include <linux/log2.h> 12 #include <linux/overflow.h> 13 14 #include "debug.h" 15 #include "ntfs.h" 16 #include "ntfs_fs.h" 17 18 /* runs_tree is a continues memory. Try to avoid big size. */ 19 #define NTFS3_RUN_MAX_BYTES 0x10000 20 21 struct ntfs_run { 22 CLST vcn; /* Virtual cluster number. */ 23 CLST len; /* Length in clusters. */ 24 CLST lcn; /* Logical cluster number. */ 25 }; 26 27 /* 28 * run_lookup - Lookup the index of a MCB entry that is first <= vcn. 29 * 30 * Case of success it will return non-zero value and set 31 * @index parameter to index of entry been found. 32 * Case of entry missing from list 'index' will be set to 33 * point to insertion position for the entry question. 34 */ 35 static bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index) 36 { 37 size_t min_idx, max_idx, mid_idx; 38 struct ntfs_run *r; 39 40 if (!run->count) { 41 *index = 0; 42 return false; 43 } 44 45 min_idx = 0; 46 max_idx = run->count - 1; 47 48 /* Check boundary cases specially, 'cause they cover the often requests. */ 49 r = run->runs; 50 if (vcn < r->vcn) { 51 *index = 0; 52 return false; 53 } 54 55 if (vcn < r->vcn + r->len) { 56 *index = 0; 57 return true; 58 } 59 60 r += max_idx; 61 if (vcn >= r->vcn + r->len) { 62 *index = run->count; 63 return false; 64 } 65 66 if (vcn >= r->vcn) { 67 *index = max_idx; 68 return true; 69 } 70 71 do { 72 mid_idx = min_idx + ((max_idx - min_idx) >> 1); 73 r = run->runs + mid_idx; 74 75 if (vcn < r->vcn) { 76 max_idx = mid_idx - 1; 77 if (!mid_idx) 78 break; 79 } else if (vcn >= r->vcn + r->len) { 80 min_idx = mid_idx + 1; 81 } else { 82 *index = mid_idx; 83 return true; 84 } 85 } while (min_idx <= max_idx); 86 87 *index = max_idx + 1; 88 return false; 89 } 90 91 /* 92 * run_consolidate - Consolidate runs starting from a given one. 93 */ 94 static void run_consolidate(struct runs_tree *run, size_t index) 95 { 96 size_t i; 97 struct ntfs_run *r = run->runs + index; 98 99 while (index + 1 < run->count) { 100 /* 101 * I should merge current run with next 102 * if start of the next run lies inside one being tested. 103 */ 104 struct ntfs_run *n = r + 1; 105 CLST end = r->vcn + r->len; 106 CLST dl; 107 108 /* Stop if runs are not aligned one to another. */ 109 if (n->vcn > end) 110 break; 111 112 dl = end - n->vcn; 113 114 /* 115 * If range at index overlaps with next one 116 * then I will either adjust it's start position 117 * or (if completely matches) dust remove one from the list. 118 */ 119 if (dl > 0) { 120 if (n->len <= dl) 121 goto remove_next_range; 122 123 n->len -= dl; 124 n->vcn += dl; 125 if (n->lcn != SPARSE_LCN) 126 n->lcn += dl; 127 dl = 0; 128 } 129 130 /* 131 * Stop if sparse mode does not match 132 * both current and next runs. 133 */ 134 if ((n->lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) { 135 index += 1; 136 r = n; 137 continue; 138 } 139 140 /* 141 * Check if volume block 142 * of a next run lcn does not match 143 * last volume block of the current run. 144 */ 145 if (n->lcn != SPARSE_LCN && n->lcn != r->lcn + r->len) 146 break; 147 148 /* 149 * Next and current are siblings. 150 * Eat/join. 151 */ 152 r->len += n->len - dl; 153 154 remove_next_range: 155 i = run->count - (index + 1); 156 if (i > 1) 157 memmove(n, n + 1, sizeof(*n) * (i - 1)); 158 159 run->count -= 1; 160 } 161 } 162 163 /* 164 * run_is_mapped_full 165 * 166 * Return: True if range [svcn - evcn] is mapped. 167 */ 168 bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn) 169 { 170 size_t i; 171 const struct ntfs_run *r, *end; 172 CLST next_vcn; 173 174 if (!run_lookup(run, svcn, &i)) 175 return false; 176 177 end = run->runs + run->count; 178 r = run->runs + i; 179 180 for (;;) { 181 next_vcn = r->vcn + r->len; 182 if (next_vcn > evcn) 183 return true; 184 185 if (++r >= end) 186 return false; 187 188 if (r->vcn != next_vcn) 189 return false; 190 } 191 } 192 193 bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn, 194 CLST *len, size_t *index) 195 { 196 size_t idx; 197 CLST gap; 198 struct ntfs_run *r; 199 200 /* Fail immediately if nrun was not touched yet. */ 201 if (!run->runs) 202 return false; 203 204 if (!run_lookup(run, vcn, &idx)) 205 return false; 206 207 r = run->runs + idx; 208 209 if (vcn >= r->vcn + r->len) 210 return false; 211 212 gap = vcn - r->vcn; 213 if (r->len <= gap) 214 return false; 215 216 *lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + gap); 217 218 if (len) 219 *len = r->len - gap; 220 if (index) 221 *index = idx; 222 223 return true; 224 } 225 226 /* 227 * run_truncate_head - Decommit the range before vcn. 228 */ 229 void run_truncate_head(struct runs_tree *run, CLST vcn) 230 { 231 size_t index; 232 struct ntfs_run *r; 233 234 if (run_lookup(run, vcn, &index)) { 235 r = run->runs + index; 236 237 if (vcn > r->vcn) { 238 CLST dlen = vcn - r->vcn; 239 240 r->vcn = vcn; 241 r->len -= dlen; 242 if (r->lcn != SPARSE_LCN) 243 r->lcn += dlen; 244 } 245 246 if (!index) 247 return; 248 } 249 r = run->runs; 250 memmove(r, r + index, sizeof(*r) * (run->count - index)); 251 252 run->count -= index; 253 254 if (!run->count) { 255 kvfree(run->runs); 256 run->runs = NULL; 257 run->allocated = 0; 258 } 259 } 260 261 /* 262 * run_truncate - Decommit the range after vcn. 263 */ 264 void run_truncate(struct runs_tree *run, CLST vcn) 265 { 266 size_t index; 267 268 /* 269 * If I hit the range then 270 * I have to truncate one. 271 * If range to be truncated is becoming empty 272 * then it will entirely be removed. 273 */ 274 if (run_lookup(run, vcn, &index)) { 275 struct ntfs_run *r = run->runs + index; 276 277 r->len = vcn - r->vcn; 278 279 if (r->len > 0) 280 index += 1; 281 } 282 283 /* 284 * At this point 'index' is set to position that 285 * should be thrown away (including index itself) 286 * Simple one - just set the limit. 287 */ 288 run->count = index; 289 290 /* Do not reallocate array 'runs'. Only free if possible. */ 291 if (!index) { 292 kvfree(run->runs); 293 run->runs = NULL; 294 run->allocated = 0; 295 } 296 } 297 298 /* 299 * run_truncate_around - Trim head and tail if necessary. 300 */ 301 void run_truncate_around(struct runs_tree *run, CLST vcn) 302 { 303 run_truncate_head(run, vcn); 304 305 if (run->count >= NTFS3_RUN_MAX_BYTES / sizeof(struct ntfs_run) / 2) 306 run_truncate(run, (run->runs + (run->count >> 1))->vcn); 307 } 308 309 /* 310 * run_add_entry 311 * 312 * Sets location to known state. 313 * Run to be added may overlap with existing location. 314 * 315 * Return: false if of memory. 316 */ 317 bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len, 318 bool is_mft) 319 { 320 size_t used, index; 321 struct ntfs_run *r; 322 bool inrange; 323 CLST tail_vcn = 0, tail_len = 0, tail_lcn = 0; 324 bool should_add_tail = false; 325 326 /* 327 * Lookup the insertion point. 328 * 329 * Execute bsearch for the entry containing 330 * start position question. 331 */ 332 inrange = run_lookup(run, vcn, &index); 333 334 /* 335 * Shortcut here would be case of 336 * range not been found but one been added 337 * continues previous run. 338 * This case I can directly make use of 339 * existing range as my start point. 340 */ 341 if (!inrange && index > 0) { 342 struct ntfs_run *t = run->runs + index - 1; 343 344 if (t->vcn + t->len == vcn && 345 (t->lcn == SPARSE_LCN) == (lcn == SPARSE_LCN) && 346 (lcn == SPARSE_LCN || lcn == t->lcn + t->len)) { 347 inrange = true; 348 index -= 1; 349 } 350 } 351 352 /* 353 * At this point 'index' either points to the range 354 * containing start position or to the insertion position 355 * for a new range. 356 * So first let's check if range I'm probing is here already. 357 */ 358 if (!inrange) { 359 requires_new_range: 360 /* 361 * Range was not found. 362 * Insert at position 'index' 363 */ 364 used = run->count * sizeof(struct ntfs_run); 365 366 /* 367 * Check allocated space. 368 * If one is not enough to get one more entry 369 * then it will be reallocated. 370 */ 371 if (run->allocated < used + sizeof(struct ntfs_run)) { 372 size_t bytes; 373 struct ntfs_run *new_ptr; 374 375 /* Use power of 2 for 'bytes'. */ 376 if (!used) { 377 bytes = 64; 378 } else if (used <= 16 * PAGE_SIZE) { 379 if (is_power_of_2(run->allocated)) 380 bytes = run->allocated << 1; 381 else 382 bytes = (size_t)1 383 << (2 + blksize_bits(used)); 384 } else { 385 bytes = run->allocated + (16 * PAGE_SIZE); 386 } 387 388 WARN_ON(!is_mft && bytes > NTFS3_RUN_MAX_BYTES); 389 390 new_ptr = kvmalloc(bytes, GFP_KERNEL); 391 392 if (!new_ptr) 393 return false; 394 395 r = new_ptr + index; 396 memcpy(new_ptr, run->runs, 397 index * sizeof(struct ntfs_run)); 398 memcpy(r + 1, run->runs + index, 399 sizeof(struct ntfs_run) * (run->count - index)); 400 401 kvfree(run->runs); 402 run->runs = new_ptr; 403 run->allocated = bytes; 404 405 } else { 406 size_t i = run->count - index; 407 408 r = run->runs + index; 409 410 /* memmove appears to be a bottle neck here... */ 411 if (i > 0) 412 memmove(r + 1, r, sizeof(struct ntfs_run) * i); 413 } 414 415 r->vcn = vcn; 416 r->lcn = lcn; 417 r->len = len; 418 run->count += 1; 419 } else { 420 r = run->runs + index; 421 422 /* 423 * If one of ranges was not allocated then we 424 * have to split location we just matched and 425 * insert current one. 426 * A common case this requires tail to be reinserted 427 * a recursive call. 428 */ 429 if (((lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) || 430 (lcn != SPARSE_LCN && lcn != r->lcn + (vcn - r->vcn))) { 431 CLST to_eat = vcn - r->vcn; 432 CLST Tovcn = to_eat + len; 433 434 should_add_tail = Tovcn < r->len; 435 436 if (should_add_tail) { 437 tail_lcn = r->lcn == SPARSE_LCN ? 438 SPARSE_LCN : 439 (r->lcn + Tovcn); 440 tail_vcn = r->vcn + Tovcn; 441 tail_len = r->len - Tovcn; 442 } 443 444 if (to_eat > 0) { 445 r->len = to_eat; 446 inrange = false; 447 index += 1; 448 goto requires_new_range; 449 } 450 451 /* lcn should match one were going to add. */ 452 r->lcn = lcn; 453 } 454 455 /* 456 * If existing range fits then were done. 457 * Otherwise extend found one and fall back to range jocode. 458 */ 459 if (r->vcn + r->len < vcn + len) 460 r->len += len - ((r->vcn + r->len) - vcn); 461 } 462 463 /* 464 * And normalize it starting from insertion point. 465 * It's possible that no insertion needed case if 466 * start point lies within the range of an entry 467 * that 'index' points to. 468 */ 469 if (inrange && index > 0) 470 index -= 1; 471 run_consolidate(run, index); 472 run_consolidate(run, index + 1); 473 474 /* 475 * A special case. 476 * We have to add extra range a tail. 477 */ 478 if (should_add_tail && 479 !run_add_entry(run, tail_vcn, tail_lcn, tail_len, is_mft)) 480 return false; 481 482 return true; 483 } 484 485 /* run_collapse_range 486 * 487 * Helper for attr_collapse_range(), 488 * which is helper for fallocate(collapse_range). 489 */ 490 bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len) 491 { 492 size_t index, eat; 493 struct ntfs_run *r, *e, *eat_start, *eat_end; 494 CLST end; 495 496 if (WARN_ON(!run_lookup(run, vcn, &index))) 497 return true; /* Should never be here. */ 498 499 e = run->runs + run->count; 500 r = run->runs + index; 501 end = vcn + len; 502 503 if (vcn > r->vcn) { 504 if (r->vcn + r->len <= end) { 505 /* Collapse tail of run .*/ 506 r->len = vcn - r->vcn; 507 } else if (r->lcn == SPARSE_LCN) { 508 /* Collapse a middle part of sparsed run. */ 509 r->len -= len; 510 } else { 511 /* Collapse a middle part of normal run, split. */ 512 if (!run_add_entry(run, vcn, SPARSE_LCN, len, false)) 513 return false; 514 return run_collapse_range(run, vcn, len); 515 } 516 517 r += 1; 518 } 519 520 eat_start = r; 521 eat_end = r; 522 523 for (; r < e; r++) { 524 CLST d; 525 526 if (r->vcn >= end) { 527 r->vcn -= len; 528 continue; 529 } 530 531 if (r->vcn + r->len <= end) { 532 /* Eat this run. */ 533 eat_end = r + 1; 534 continue; 535 } 536 537 d = end - r->vcn; 538 if (r->lcn != SPARSE_LCN) 539 r->lcn += d; 540 r->len -= d; 541 r->vcn -= len - d; 542 } 543 544 eat = eat_end - eat_start; 545 memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r)); 546 run->count -= eat; 547 548 return true; 549 } 550 551 /* run_insert_range 552 * 553 * Helper for attr_insert_range(), 554 * which is helper for fallocate(insert_range). 555 */ 556 bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len) 557 { 558 size_t index; 559 struct ntfs_run *r, *e; 560 561 if (WARN_ON(!run_lookup(run, vcn, &index))) 562 return false; /* Should never be here. */ 563 564 e = run->runs + run->count; 565 r = run->runs + index; 566 567 if (vcn > r->vcn) 568 r += 1; 569 570 for (; r < e; r++) 571 r->vcn += len; 572 573 r = run->runs + index; 574 575 if (vcn > r->vcn) { 576 /* split fragment. */ 577 CLST len1 = vcn - r->vcn; 578 CLST len2 = r->len - len1; 579 CLST lcn2 = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len1); 580 581 r->len = len1; 582 583 if (!run_add_entry(run, vcn + len, lcn2, len2, false)) 584 return false; 585 } 586 587 if (!run_add_entry(run, vcn, SPARSE_LCN, len, false)) 588 return false; 589 590 return true; 591 } 592 593 /* 594 * run_get_entry - Return index-th mapped region. 595 */ 596 bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn, 597 CLST *lcn, CLST *len) 598 { 599 const struct ntfs_run *r; 600 601 if (index >= run->count) 602 return false; 603 604 r = run->runs + index; 605 606 if (!r->len) 607 return false; 608 609 if (vcn) 610 *vcn = r->vcn; 611 if (lcn) 612 *lcn = r->lcn; 613 if (len) 614 *len = r->len; 615 return true; 616 } 617 618 /* 619 * run_packed_size - Calculate the size of packed int64. 620 */ 621 #ifdef __BIG_ENDIAN 622 static inline int run_packed_size(const s64 n) 623 { 624 const u8 *p = (const u8 *)&n + sizeof(n) - 1; 625 626 if (n >= 0) { 627 if (p[-7] || p[-6] || p[-5] || p[-4]) 628 p -= 4; 629 if (p[-3] || p[-2]) 630 p -= 2; 631 if (p[-1]) 632 p -= 1; 633 if (p[0] & 0x80) 634 p -= 1; 635 } else { 636 if (p[-7] != 0xff || p[-6] != 0xff || p[-5] != 0xff || 637 p[-4] != 0xff) 638 p -= 4; 639 if (p[-3] != 0xff || p[-2] != 0xff) 640 p -= 2; 641 if (p[-1] != 0xff) 642 p -= 1; 643 if (!(p[0] & 0x80)) 644 p -= 1; 645 } 646 return (const u8 *)&n + sizeof(n) - p; 647 } 648 649 /* Full trusted function. It does not check 'size' for errors. */ 650 static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v) 651 { 652 const u8 *p = (u8 *)&v; 653 654 switch (size) { 655 case 8: 656 run_buf[7] = p[0]; 657 fallthrough; 658 case 7: 659 run_buf[6] = p[1]; 660 fallthrough; 661 case 6: 662 run_buf[5] = p[2]; 663 fallthrough; 664 case 5: 665 run_buf[4] = p[3]; 666 fallthrough; 667 case 4: 668 run_buf[3] = p[4]; 669 fallthrough; 670 case 3: 671 run_buf[2] = p[5]; 672 fallthrough; 673 case 2: 674 run_buf[1] = p[6]; 675 fallthrough; 676 case 1: 677 run_buf[0] = p[7]; 678 } 679 } 680 681 /* Full trusted function. It does not check 'size' for errors. */ 682 static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v) 683 { 684 u8 *p = (u8 *)&v; 685 686 switch (size) { 687 case 8: 688 p[0] = run_buf[7]; 689 fallthrough; 690 case 7: 691 p[1] = run_buf[6]; 692 fallthrough; 693 case 6: 694 p[2] = run_buf[5]; 695 fallthrough; 696 case 5: 697 p[3] = run_buf[4]; 698 fallthrough; 699 case 4: 700 p[4] = run_buf[3]; 701 fallthrough; 702 case 3: 703 p[5] = run_buf[2]; 704 fallthrough; 705 case 2: 706 p[6] = run_buf[1]; 707 fallthrough; 708 case 1: 709 p[7] = run_buf[0]; 710 } 711 return v; 712 } 713 714 #else 715 716 static inline int run_packed_size(const s64 n) 717 { 718 const u8 *p = (const u8 *)&n; 719 720 if (n >= 0) { 721 if (p[7] || p[6] || p[5] || p[4]) 722 p += 4; 723 if (p[3] || p[2]) 724 p += 2; 725 if (p[1]) 726 p += 1; 727 if (p[0] & 0x80) 728 p += 1; 729 } else { 730 if (p[7] != 0xff || p[6] != 0xff || p[5] != 0xff || 731 p[4] != 0xff) 732 p += 4; 733 if (p[3] != 0xff || p[2] != 0xff) 734 p += 2; 735 if (p[1] != 0xff) 736 p += 1; 737 if (!(p[0] & 0x80)) 738 p += 1; 739 } 740 741 return 1 + p - (const u8 *)&n; 742 } 743 744 /* Full trusted function. It does not check 'size' for errors. */ 745 static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v) 746 { 747 const u8 *p = (u8 *)&v; 748 749 /* memcpy( run_buf, &v, size); Is it faster? */ 750 switch (size) { 751 case 8: 752 run_buf[7] = p[7]; 753 fallthrough; 754 case 7: 755 run_buf[6] = p[6]; 756 fallthrough; 757 case 6: 758 run_buf[5] = p[5]; 759 fallthrough; 760 case 5: 761 run_buf[4] = p[4]; 762 fallthrough; 763 case 4: 764 run_buf[3] = p[3]; 765 fallthrough; 766 case 3: 767 run_buf[2] = p[2]; 768 fallthrough; 769 case 2: 770 run_buf[1] = p[1]; 771 fallthrough; 772 case 1: 773 run_buf[0] = p[0]; 774 } 775 } 776 777 /* full trusted function. It does not check 'size' for errors */ 778 static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v) 779 { 780 u8 *p = (u8 *)&v; 781 782 /* memcpy( &v, run_buf, size); Is it faster? */ 783 switch (size) { 784 case 8: 785 p[7] = run_buf[7]; 786 fallthrough; 787 case 7: 788 p[6] = run_buf[6]; 789 fallthrough; 790 case 6: 791 p[5] = run_buf[5]; 792 fallthrough; 793 case 5: 794 p[4] = run_buf[4]; 795 fallthrough; 796 case 4: 797 p[3] = run_buf[3]; 798 fallthrough; 799 case 3: 800 p[2] = run_buf[2]; 801 fallthrough; 802 case 2: 803 p[1] = run_buf[1]; 804 fallthrough; 805 case 1: 806 p[0] = run_buf[0]; 807 } 808 return v; 809 } 810 #endif 811 812 /* 813 * run_pack - Pack runs into buffer. 814 * 815 * packed_vcns - How much runs we have packed. 816 * packed_size - How much bytes we have used run_buf. 817 */ 818 int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf, 819 u32 run_buf_size, CLST *packed_vcns) 820 { 821 CLST next_vcn, vcn, lcn; 822 CLST prev_lcn = 0; 823 CLST evcn1 = svcn + len; 824 const struct ntfs_run *r, *r_end; 825 int packed_size = 0; 826 size_t i; 827 s64 dlcn; 828 int offset_size, size_size, tmp; 829 830 *packed_vcns = 0; 831 832 if (!len) 833 goto out; 834 835 /* Check all required entries [svcn, encv1) available. */ 836 if (!run_lookup(run, svcn, &i)) 837 return -ENOENT; 838 839 r_end = run->runs + run->count; 840 r = run->runs + i; 841 842 for (next_vcn = r->vcn + r->len; next_vcn < evcn1; 843 next_vcn = r->vcn + r->len) { 844 if (++r >= r_end || r->vcn != next_vcn) 845 return -ENOENT; 846 } 847 848 /* Repeat cycle above and pack runs. Assume no errors. */ 849 r = run->runs + i; 850 len = svcn - r->vcn; 851 vcn = svcn; 852 lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len); 853 len = r->len - len; 854 855 for (;;) { 856 next_vcn = vcn + len; 857 if (next_vcn > evcn1) 858 len = evcn1 - vcn; 859 860 /* How much bytes required to pack len. */ 861 size_size = run_packed_size(len); 862 863 /* offset_size - How much bytes is packed dlcn. */ 864 if (lcn == SPARSE_LCN) { 865 offset_size = 0; 866 dlcn = 0; 867 } else { 868 /* NOTE: lcn can be less than prev_lcn! */ 869 dlcn = (s64)lcn - prev_lcn; 870 offset_size = run_packed_size(dlcn); 871 prev_lcn = lcn; 872 } 873 874 tmp = run_buf_size - packed_size - 2 - offset_size; 875 if (tmp <= 0) 876 goto out; 877 878 /* Can we store this entire run. */ 879 if (tmp < size_size) 880 goto out; 881 882 if (run_buf) { 883 /* Pack run header. */ 884 run_buf[0] = ((u8)(size_size | (offset_size << 4))); 885 run_buf += 1; 886 887 /* Pack the length of run. */ 888 run_pack_s64(run_buf, size_size, len); 889 890 run_buf += size_size; 891 /* Pack the offset from previous LCN. */ 892 run_pack_s64(run_buf, offset_size, dlcn); 893 run_buf += offset_size; 894 } 895 896 packed_size += 1 + offset_size + size_size; 897 *packed_vcns += len; 898 899 if (packed_size + 1 >= run_buf_size || next_vcn >= evcn1) 900 goto out; 901 902 r += 1; 903 vcn = r->vcn; 904 lcn = r->lcn; 905 len = r->len; 906 } 907 908 out: 909 /* Store last zero. */ 910 if (run_buf) 911 run_buf[0] = 0; 912 913 return packed_size + 1; 914 } 915 916 /* 917 * run_unpack - Unpack packed runs from @run_buf. 918 * 919 * Return: Error if negative, or real used bytes. 920 */ 921 int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, 922 CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf, 923 int run_buf_size) 924 { 925 u64 prev_lcn, vcn64, lcn, next_vcn; 926 const u8 *run_last, *run_0; 927 bool is_mft = ino == MFT_REC_MFT; 928 929 if (run_buf_size < 0) 930 return -EINVAL; 931 932 /* Check for empty. */ 933 if (evcn + 1 == svcn) 934 return 0; 935 936 if (evcn < svcn) 937 return -EINVAL; 938 939 run_0 = run_buf; 940 run_last = run_buf + run_buf_size; 941 prev_lcn = 0; 942 vcn64 = svcn; 943 944 /* Read all runs the chain. */ 945 /* size_size - How much bytes is packed len. */ 946 while (run_buf < run_last) { 947 /* size_size - How much bytes is packed len. */ 948 u8 size_size = *run_buf & 0xF; 949 /* offset_size - How much bytes is packed dlcn. */ 950 u8 offset_size = *run_buf++ >> 4; 951 u64 len; 952 953 if (!size_size) 954 break; 955 956 /* 957 * Unpack runs. 958 * NOTE: Runs are stored little endian order 959 * "len" is unsigned value, "dlcn" is signed. 960 * Large positive number requires to store 5 bytes 961 * e.g.: 05 FF 7E FF FF 00 00 00 962 */ 963 if (size_size > sizeof(len)) 964 return -EINVAL; 965 966 len = run_unpack_s64(run_buf, size_size, 0); 967 /* Skip size_size. */ 968 run_buf += size_size; 969 970 if (!len) 971 return -EINVAL; 972 973 if (!offset_size) 974 lcn = SPARSE_LCN64; 975 else if (offset_size <= sizeof(s64)) { 976 s64 dlcn; 977 978 /* Initial value of dlcn is -1 or 0. */ 979 dlcn = (run_buf[offset_size - 1] & 0x80) ? (s64)-1 : 0; 980 dlcn = run_unpack_s64(run_buf, offset_size, dlcn); 981 /* Skip offset_size. */ 982 run_buf += offset_size; 983 984 if (!dlcn) 985 return -EINVAL; 986 987 if (check_add_overflow(prev_lcn, dlcn, &lcn)) 988 return -EINVAL; 989 prev_lcn = lcn; 990 } else { 991 /* The size of 'dlcn' can't be > 8. */ 992 return -EINVAL; 993 } 994 995 if (check_add_overflow(vcn64, len, &next_vcn)) 996 return -EINVAL; 997 998 /* Check boundary. */ 999 if (next_vcn > evcn + 1) 1000 return -EINVAL; 1001 1002 #ifndef CONFIG_NTFS3_64BIT_CLUSTER 1003 if (next_vcn > 0x100000000ull || (lcn + len) > 0x100000000ull) { 1004 ntfs_err( 1005 sbi->sb, 1006 "This driver is compiled without CONFIG_NTFS3_64BIT_CLUSTER (like windows driver).\n" 1007 "Volume contains 64 bits run: vcn %llx, lcn %llx, len %llx.\n" 1008 "Activate CONFIG_NTFS3_64BIT_CLUSTER to process this case", 1009 vcn64, lcn, len); 1010 return -EOPNOTSUPP; 1011 } 1012 #endif 1013 if (lcn != SPARSE_LCN64 && lcn + len > sbi->used.bitmap.nbits) { 1014 /* LCN range is out of volume. */ 1015 return -EINVAL; 1016 } 1017 1018 if (!run) 1019 ; /* Called from check_attr(fslog.c) to check run. */ 1020 else if (run == RUN_DEALLOCATE) { 1021 /* 1022 * Called from ni_delete_all to free clusters 1023 * without storing in run. 1024 */ 1025 if (lcn != SPARSE_LCN64) 1026 mark_as_free_ex(sbi, lcn, len, true); 1027 } else if (vcn64 >= vcn) { 1028 if (!run_add_entry(run, vcn64, lcn, len, is_mft)) 1029 return -ENOMEM; 1030 } else if (next_vcn > vcn) { 1031 u64 dlen = vcn - vcn64; 1032 1033 if (!run_add_entry(run, vcn, lcn + dlen, len - dlen, 1034 is_mft)) 1035 return -ENOMEM; 1036 } 1037 1038 vcn64 = next_vcn; 1039 } 1040 1041 if (vcn64 != evcn + 1) { 1042 /* Not expected length of unpacked runs. */ 1043 return -EINVAL; 1044 } 1045 1046 return run_buf - run_0; 1047 } 1048 1049 #ifdef NTFS3_CHECK_FREE_CLST 1050 /* 1051 * run_unpack_ex - Unpack packed runs from "run_buf". 1052 * 1053 * Checks unpacked runs to be used in bitmap. 1054 * 1055 * Return: Error if negative, or real used bytes. 1056 */ 1057 int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, 1058 CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf, 1059 int run_buf_size) 1060 { 1061 int ret, err; 1062 CLST next_vcn, lcn, len; 1063 size_t index, done; 1064 bool ok, zone; 1065 struct wnd_bitmap *wnd; 1066 1067 ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size); 1068 if (ret <= 0) 1069 return ret; 1070 1071 if (!sbi->used.bitmap.sb || !run || run == RUN_DEALLOCATE) 1072 return ret; 1073 1074 if (ino == MFT_REC_BADCLUST) 1075 return ret; 1076 1077 next_vcn = vcn = svcn; 1078 wnd = &sbi->used.bitmap; 1079 1080 for (ok = run_lookup_entry(run, vcn, &lcn, &len, &index); 1081 next_vcn <= evcn; 1082 ok = run_get_entry(run, ++index, &vcn, &lcn, &len)) { 1083 if (!ok || next_vcn != vcn) 1084 return -EINVAL; 1085 1086 next_vcn = vcn + len; 1087 1088 if (lcn == SPARSE_LCN) 1089 continue; 1090 1091 if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) 1092 continue; 1093 1094 down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS); 1095 zone = max(wnd->zone_bit, lcn) < min(wnd->zone_end, lcn + len); 1096 /* Check for free blocks. */ 1097 ok = !zone && wnd_is_used(wnd, lcn, len); 1098 up_read(&wnd->rw_lock); 1099 if (ok) 1100 continue; 1101 1102 /* Looks like volume is corrupted. */ 1103 ntfs_set_state(sbi, NTFS_DIRTY_ERROR); 1104 1105 if (!down_write_trylock(&wnd->rw_lock)) 1106 continue; 1107 1108 if (zone) { 1109 /* 1110 * Range [lcn, lcn + len) intersects with zone. 1111 * To avoid complex with zone just turn it off. 1112 */ 1113 wnd_zone_set(wnd, 0, 0); 1114 } 1115 1116 /* Mark all zero bits as used in range [lcn, lcn+len). */ 1117 err = wnd_set_used_safe(wnd, lcn, len, &done); 1118 if (zone) { 1119 /* Restore zone. Lock mft run. */ 1120 struct rw_semaphore *lock = 1121 is_mounted(sbi) ? &sbi->mft.ni->file.run_lock : 1122 NULL; 1123 if (lock) 1124 down_read(lock); 1125 ntfs_refresh_zone(sbi); 1126 if (lock) 1127 up_read(lock); 1128 } 1129 up_write(&wnd->rw_lock); 1130 if (err) 1131 return err; 1132 } 1133 1134 return ret; 1135 } 1136 #endif 1137 1138 /* 1139 * run_get_highest_vcn 1140 * 1141 * Return the highest vcn from a mapping pairs array 1142 * it used while replaying log file. 1143 */ 1144 int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn) 1145 { 1146 u64 vcn64 = vcn; 1147 u8 size_size; 1148 1149 while ((size_size = *run_buf & 0xF)) { 1150 u8 offset_size = *run_buf++ >> 4; 1151 u64 len; 1152 1153 if (size_size > 8 || offset_size > 8) 1154 return -EINVAL; 1155 1156 len = run_unpack_s64(run_buf, size_size, 0); 1157 if (!len) 1158 return -EINVAL; 1159 1160 run_buf += size_size + offset_size; 1161 if (check_add_overflow(vcn64, len, &vcn64)) 1162 return -EINVAL; 1163 1164 #ifndef CONFIG_NTFS3_64BIT_CLUSTER 1165 if (vcn64 > 0x100000000ull) 1166 return -EINVAL; 1167 #endif 1168 } 1169 1170 *highest_vcn = vcn64 - 1; 1171 return 0; 1172 } 1173 1174 /* 1175 * run_clone 1176 * 1177 * Make a copy of run 1178 */ 1179 int run_clone(const struct runs_tree *run, struct runs_tree *new_run) 1180 { 1181 size_t bytes = run->count * sizeof(struct ntfs_run); 1182 1183 if (bytes > new_run->allocated) { 1184 struct ntfs_run *new_ptr = kvmalloc(bytes, GFP_KERNEL); 1185 1186 if (!new_ptr) 1187 return -ENOMEM; 1188 1189 kvfree(new_run->runs); 1190 new_run->runs = new_ptr; 1191 new_run->allocated = bytes; 1192 } 1193 1194 memcpy(new_run->runs, run->runs, bytes); 1195 new_run->count = run->count; 1196 return 0; 1197 } 1198