1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. 5 * 6 */ 7 8 #include <linux/blkdev.h> 9 #include <linux/buffer_head.h> 10 #include <linux/fs.h> 11 #include <linux/kernel.h> 12 13 #include "debug.h" 14 #include "ntfs.h" 15 #include "ntfs_fs.h" 16 17 static const struct INDEX_NAMES { 18 const __le16 *name; 19 u8 name_len; 20 } s_index_names[INDEX_MUTEX_TOTAL] = { 21 { I30_NAME, ARRAY_SIZE(I30_NAME) }, { SII_NAME, ARRAY_SIZE(SII_NAME) }, 22 { SDH_NAME, ARRAY_SIZE(SDH_NAME) }, { SO_NAME, ARRAY_SIZE(SO_NAME) }, 23 { SQ_NAME, ARRAY_SIZE(SQ_NAME) }, { SR_NAME, ARRAY_SIZE(SR_NAME) }, 24 }; 25 26 /* 27 * cmp_fnames - Compare two names in index. 28 * 29 * if l1 != 0 30 * Both names are little endian on-disk ATTR_FILE_NAME structs. 31 * else 32 * key1 - cpu_str, key2 - ATTR_FILE_NAME 33 */ 34 static int cmp_fnames(const void *key1, size_t l1, const void *key2, size_t l2, 35 const void *data) 36 { 37 const struct ATTR_FILE_NAME *f2 = key2; 38 const struct ntfs_sb_info *sbi = data; 39 const struct ATTR_FILE_NAME *f1; 40 u16 fsize2; 41 bool both_case; 42 43 if (l2 <= offsetof(struct ATTR_FILE_NAME, name)) 44 return -1; 45 46 fsize2 = fname_full_size(f2); 47 if (l2 < fsize2) 48 return -1; 49 50 both_case = f2->type != FILE_NAME_DOS && !sbi->options->nocase; 51 if (!l1) { 52 const struct le_str *s2 = (struct le_str *)&f2->name_len; 53 54 /* 55 * If names are equal (case insensitive) 56 * try to compare it case sensitive. 57 */ 58 return ntfs_cmp_names_cpu(key1, s2, sbi->upcase, both_case); 59 } 60 61 f1 = key1; 62 return ntfs_cmp_names(f1->name, f1->name_len, f2->name, f2->name_len, 63 sbi->upcase, both_case); 64 } 65 66 /* 67 * cmp_uint - $SII of $Secure and $Q of Quota 68 */ 69 static int cmp_uint(const void *key1, size_t l1, const void *key2, size_t l2, 70 const void *data) 71 { 72 const u32 *k1 = key1; 73 const u32 *k2 = key2; 74 75 if (l2 < sizeof(u32)) 76 return -1; 77 78 if (*k1 < *k2) 79 return -1; 80 if (*k1 > *k2) 81 return 1; 82 return 0; 83 } 84 85 /* 86 * cmp_sdh - $SDH of $Secure 87 */ 88 static int cmp_sdh(const void *key1, size_t l1, const void *key2, size_t l2, 89 const void *data) 90 { 91 const struct SECURITY_KEY *k1 = key1; 92 const struct SECURITY_KEY *k2 = key2; 93 u32 t1, t2; 94 95 if (l2 < sizeof(struct SECURITY_KEY)) 96 return -1; 97 98 t1 = le32_to_cpu(k1->hash); 99 t2 = le32_to_cpu(k2->hash); 100 101 /* First value is a hash value itself. */ 102 if (t1 < t2) 103 return -1; 104 if (t1 > t2) 105 return 1; 106 107 /* Second value is security Id. */ 108 if (data) { 109 t1 = le32_to_cpu(k1->sec_id); 110 t2 = le32_to_cpu(k2->sec_id); 111 if (t1 < t2) 112 return -1; 113 if (t1 > t2) 114 return 1; 115 } 116 117 return 0; 118 } 119 120 /* 121 * cmp_uints - $O of ObjId and "$R" for Reparse. 122 */ 123 static int cmp_uints(const void *key1, size_t l1, const void *key2, size_t l2, 124 const void *data) 125 { 126 const __le32 *k1 = key1; 127 const __le32 *k2 = key2; 128 size_t count; 129 130 if ((size_t)data == 1) { 131 /* 132 * ni_delete_all -> ntfs_remove_reparse -> 133 * delete all with this reference. 134 * k1, k2 - pointers to REPARSE_KEY 135 */ 136 137 k1 += 1; // Skip REPARSE_KEY.ReparseTag 138 k2 += 1; // Skip REPARSE_KEY.ReparseTag 139 if (l2 <= sizeof(int)) 140 return -1; 141 l2 -= sizeof(int); 142 if (l1 <= sizeof(int)) 143 return 1; 144 l1 -= sizeof(int); 145 } 146 147 if (l2 < sizeof(int)) 148 return -1; 149 150 for (count = min(l1, l2) >> 2; count > 0; --count, ++k1, ++k2) { 151 u32 t1 = le32_to_cpu(*k1); 152 u32 t2 = le32_to_cpu(*k2); 153 154 if (t1 > t2) 155 return 1; 156 if (t1 < t2) 157 return -1; 158 } 159 160 if (l1 > l2) 161 return 1; 162 if (l1 < l2) 163 return -1; 164 165 return 0; 166 } 167 168 static inline NTFS_CMP_FUNC get_cmp_func(const struct INDEX_ROOT *root) 169 { 170 switch (root->type) { 171 case ATTR_NAME: 172 if (root->rule == NTFS_COLLATION_TYPE_FILENAME) 173 return &cmp_fnames; 174 break; 175 case ATTR_ZERO: 176 switch (root->rule) { 177 case NTFS_COLLATION_TYPE_UINT: 178 return &cmp_uint; 179 case NTFS_COLLATION_TYPE_SECURITY_HASH: 180 return &cmp_sdh; 181 case NTFS_COLLATION_TYPE_UINTS: 182 return &cmp_uints; 183 default: 184 break; 185 } 186 break; 187 default: 188 break; 189 } 190 191 return NULL; 192 } 193 194 struct bmp_buf { 195 struct ATTRIB *b; 196 struct mft_inode *mi; 197 struct buffer_head *bh; 198 ulong *buf; 199 size_t bit; 200 u32 nbits; 201 u64 new_valid; 202 }; 203 204 static int bmp_buf_get(struct ntfs_index *indx, struct ntfs_inode *ni, 205 size_t bit, struct bmp_buf *bbuf) 206 { 207 struct ATTRIB *b; 208 size_t data_size, valid_size, vbo, off = bit >> 3; 209 struct ntfs_sb_info *sbi = ni->mi.sbi; 210 CLST vcn = off >> sbi->cluster_bits; 211 struct ATTR_LIST_ENTRY *le = NULL; 212 struct buffer_head *bh; 213 struct super_block *sb; 214 u32 blocksize; 215 const struct INDEX_NAMES *in = &s_index_names[indx->type]; 216 217 bbuf->bh = NULL; 218 219 b = ni_find_attr(ni, NULL, &le, ATTR_BITMAP, in->name, in->name_len, 220 &vcn, &bbuf->mi); 221 bbuf->b = b; 222 if (!b) 223 return -EINVAL; 224 225 if (!b->non_res) { 226 data_size = le32_to_cpu(b->res.data_size); 227 228 if (off >= data_size) 229 return -EINVAL; 230 231 bbuf->buf = (ulong *)resident_data(b); 232 bbuf->bit = 0; 233 bbuf->nbits = data_size * 8; 234 235 return 0; 236 } 237 238 data_size = le64_to_cpu(b->nres.data_size); 239 if (WARN_ON(off >= data_size)) { 240 /* Looks like filesystem error. */ 241 return -EINVAL; 242 } 243 244 valid_size = le64_to_cpu(b->nres.valid_size); 245 246 bh = ntfs_bread_run(sbi, &indx->bitmap_run, off); 247 if (!bh) 248 return -EIO; 249 250 if (IS_ERR(bh)) 251 return PTR_ERR(bh); 252 253 bbuf->bh = bh; 254 255 if (buffer_locked(bh)) 256 __wait_on_buffer(bh); 257 258 lock_buffer(bh); 259 260 sb = sbi->sb; 261 blocksize = sb->s_blocksize; 262 263 vbo = off & ~(size_t)sbi->block_mask; 264 265 bbuf->new_valid = vbo + blocksize; 266 if (bbuf->new_valid <= valid_size) 267 bbuf->new_valid = 0; 268 else if (bbuf->new_valid > data_size) 269 bbuf->new_valid = data_size; 270 271 if (vbo >= valid_size) { 272 memset(bh->b_data, 0, blocksize); 273 } else if (vbo + blocksize > valid_size) { 274 u32 voff = valid_size & sbi->block_mask; 275 276 memset(bh->b_data + voff, 0, blocksize - voff); 277 } 278 279 bbuf->buf = (ulong *)bh->b_data; 280 bbuf->bit = 8 * (off & ~(size_t)sbi->block_mask); 281 bbuf->nbits = 8 * blocksize; 282 283 return 0; 284 } 285 286 static void bmp_buf_put(struct bmp_buf *bbuf, bool dirty) 287 { 288 struct buffer_head *bh = bbuf->bh; 289 struct ATTRIB *b = bbuf->b; 290 291 if (!bh) { 292 if (b && !b->non_res && dirty) 293 bbuf->mi->dirty = true; 294 return; 295 } 296 297 if (!dirty) 298 goto out; 299 300 if (bbuf->new_valid) { 301 b->nres.valid_size = cpu_to_le64(bbuf->new_valid); 302 bbuf->mi->dirty = true; 303 } 304 305 set_buffer_uptodate(bh); 306 mark_buffer_dirty(bh); 307 308 out: 309 unlock_buffer(bh); 310 put_bh(bh); 311 } 312 313 /* 314 * indx_mark_used - Mark the bit @bit as used. 315 */ 316 static int indx_mark_used(struct ntfs_index *indx, struct ntfs_inode *ni, 317 size_t bit) 318 { 319 int err; 320 struct bmp_buf bbuf; 321 322 err = bmp_buf_get(indx, ni, bit, &bbuf); 323 if (err) 324 return err; 325 326 __set_bit_le(bit - bbuf.bit, bbuf.buf); 327 328 bmp_buf_put(&bbuf, true); 329 330 return 0; 331 } 332 333 /* 334 * indx_mark_free - Mark the bit @bit as free. 335 */ 336 static int indx_mark_free(struct ntfs_index *indx, struct ntfs_inode *ni, 337 size_t bit) 338 { 339 int err; 340 struct bmp_buf bbuf; 341 342 err = bmp_buf_get(indx, ni, bit, &bbuf); 343 if (err) 344 return err; 345 346 __clear_bit_le(bit - bbuf.bit, bbuf.buf); 347 348 bmp_buf_put(&bbuf, true); 349 350 return 0; 351 } 352 353 /* 354 * scan_nres_bitmap 355 * 356 * If ntfs_readdir calls this function (indx_used_bit -> scan_nres_bitmap), 357 * inode is shared locked and no ni_lock. 358 * Use rw_semaphore for read/write access to bitmap_run. 359 */ 360 static int scan_nres_bitmap(struct ntfs_inode *ni, struct ATTRIB *bitmap, 361 struct ntfs_index *indx, size_t from, 362 bool (*fn)(const ulong *buf, u32 bit, u32 bits, 363 size_t *ret), 364 size_t *ret) 365 { 366 struct ntfs_sb_info *sbi = ni->mi.sbi; 367 struct super_block *sb = sbi->sb; 368 struct runs_tree *run = &indx->bitmap_run; 369 struct rw_semaphore *lock = &indx->run_lock; 370 u32 nbits = sb->s_blocksize * 8; 371 u32 blocksize = sb->s_blocksize; 372 u64 valid_size = le64_to_cpu(bitmap->nres.valid_size); 373 u64 data_size = le64_to_cpu(bitmap->nres.data_size); 374 sector_t eblock = bytes_to_block(sb, data_size); 375 size_t vbo = from >> 3; 376 sector_t blk = (vbo & sbi->cluster_mask) >> sb->s_blocksize_bits; 377 sector_t vblock = vbo >> sb->s_blocksize_bits; 378 sector_t blen, block; 379 CLST lcn, clen, vcn, vcn_next; 380 size_t idx; 381 struct buffer_head *bh; 382 bool ok; 383 384 *ret = MINUS_ONE_T; 385 386 if (vblock >= eblock) 387 return 0; 388 389 from &= nbits - 1; 390 vcn = vbo >> sbi->cluster_bits; 391 392 down_read(lock); 393 ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx); 394 up_read(lock); 395 396 next_run: 397 if (!ok) { 398 int err; 399 const struct INDEX_NAMES *name = &s_index_names[indx->type]; 400 401 down_write(lock); 402 err = attr_load_runs_vcn(ni, ATTR_BITMAP, name->name, 403 name->name_len, run, vcn); 404 up_write(lock); 405 if (err) 406 return err; 407 down_read(lock); 408 ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx); 409 up_read(lock); 410 if (!ok) 411 return -EINVAL; 412 } 413 414 blen = (sector_t)clen * sbi->blocks_per_cluster; 415 block = (sector_t)lcn * sbi->blocks_per_cluster; 416 417 for (; blk < blen; blk++, from = 0) { 418 bh = ntfs_bread(sb, block + blk); 419 if (!bh) 420 return -EIO; 421 422 vbo = (u64)vblock << sb->s_blocksize_bits; 423 if (vbo >= valid_size) { 424 memset(bh->b_data, 0, blocksize); 425 } else if (vbo + blocksize > valid_size) { 426 u32 voff = valid_size & sbi->block_mask; 427 428 memset(bh->b_data + voff, 0, blocksize - voff); 429 } 430 431 if (vbo + blocksize > data_size) 432 nbits = 8 * (data_size - vbo); 433 434 ok = nbits > from ? 435 (*fn)((ulong *)bh->b_data, from, nbits, ret) : 436 false; 437 put_bh(bh); 438 439 if (ok) { 440 *ret += 8 * vbo; 441 return 0; 442 } 443 444 if (++vblock >= eblock) { 445 *ret = MINUS_ONE_T; 446 return 0; 447 } 448 } 449 blk = 0; 450 vcn_next = vcn + clen; 451 down_read(lock); 452 ok = run_get_entry(run, ++idx, &vcn, &lcn, &clen) && vcn == vcn_next; 453 if (!ok) 454 vcn = vcn_next; 455 up_read(lock); 456 goto next_run; 457 } 458 459 static bool scan_for_free(const ulong *buf, u32 bit, u32 bits, size_t *ret) 460 { 461 size_t pos = find_next_zero_bit_le(buf, bits, bit); 462 463 if (pos >= bits) 464 return false; 465 *ret = pos; 466 return true; 467 } 468 469 /* 470 * indx_find_free - Look for free bit. 471 * 472 * Return: -1 if no free bits. 473 */ 474 static int indx_find_free(struct ntfs_index *indx, struct ntfs_inode *ni, 475 size_t *bit, struct ATTRIB **bitmap) 476 { 477 struct ATTRIB *b; 478 struct ATTR_LIST_ENTRY *le = NULL; 479 const struct INDEX_NAMES *in = &s_index_names[indx->type]; 480 int err; 481 482 b = ni_find_attr(ni, NULL, &le, ATTR_BITMAP, in->name, in->name_len, 483 NULL, NULL); 484 485 if (!b) 486 return -ENOENT; 487 488 *bitmap = b; 489 *bit = MINUS_ONE_T; 490 491 if (!b->non_res) { 492 u32 nbits = 8 * le32_to_cpu(b->res.data_size); 493 size_t pos = find_next_zero_bit_le(resident_data(b), nbits, 0); 494 495 if (pos < nbits) 496 *bit = pos; 497 } else { 498 err = scan_nres_bitmap(ni, b, indx, 0, &scan_for_free, bit); 499 500 if (err) 501 return err; 502 } 503 504 return 0; 505 } 506 507 static bool scan_for_used(const ulong *buf, u32 bit, u32 bits, size_t *ret) 508 { 509 size_t pos = find_next_bit_le(buf, bits, bit); 510 511 if (pos >= bits) 512 return false; 513 *ret = pos; 514 return true; 515 } 516 517 /* 518 * indx_used_bit - Look for used bit. 519 * 520 * Return: MINUS_ONE_T if no used bits. 521 */ 522 int indx_used_bit(struct ntfs_index *indx, struct ntfs_inode *ni, size_t *bit) 523 { 524 struct ATTRIB *b; 525 struct ATTR_LIST_ENTRY *le = NULL; 526 size_t from = *bit; 527 const struct INDEX_NAMES *in = &s_index_names[indx->type]; 528 int err; 529 530 b = ni_find_attr(ni, NULL, &le, ATTR_BITMAP, in->name, in->name_len, 531 NULL, NULL); 532 533 if (!b) 534 return -ENOENT; 535 536 *bit = MINUS_ONE_T; 537 538 if (!b->non_res) { 539 u32 nbits = le32_to_cpu(b->res.data_size) * 8; 540 size_t pos = find_next_bit_le(resident_data(b), nbits, from); 541 542 if (pos < nbits) 543 *bit = pos; 544 } else { 545 err = scan_nres_bitmap(ni, b, indx, from, &scan_for_used, bit); 546 if (err) 547 return err; 548 } 549 550 return 0; 551 } 552 553 /* 554 * hdr_find_split 555 * 556 * Find a point at which the index allocation buffer would like to be split. 557 * NOTE: This function should never return 'END' entry NULL returns on error. 558 */ 559 static const struct NTFS_DE *hdr_find_split(const struct INDEX_HDR *hdr) 560 { 561 size_t o; 562 const struct NTFS_DE *e = hdr_first_de(hdr); 563 u32 used_2 = le32_to_cpu(hdr->used) >> 1; 564 u16 esize; 565 566 if (!e || de_is_last(e)) 567 return NULL; 568 569 esize = le16_to_cpu(e->size); 570 for (o = le32_to_cpu(hdr->de_off) + esize; o < used_2; o += esize) { 571 const struct NTFS_DE *p = e; 572 573 e = Add2Ptr(hdr, o); 574 575 /* We must not return END entry. */ 576 if (de_is_last(e)) 577 return p; 578 579 esize = le16_to_cpu(e->size); 580 } 581 582 return e; 583 } 584 585 /* 586 * hdr_insert_head - Insert some entries at the beginning of the buffer. 587 * 588 * It is used to insert entries into a newly-created buffer. 589 */ 590 static const struct NTFS_DE *hdr_insert_head(struct INDEX_HDR *hdr, 591 const void *ins, u32 ins_bytes) 592 { 593 u32 to_move; 594 struct NTFS_DE *e = hdr_first_de(hdr); 595 u32 used = le32_to_cpu(hdr->used); 596 597 if (!e) 598 return NULL; 599 600 /* Now we just make room for the inserted entries and jam it in. */ 601 to_move = used - le32_to_cpu(hdr->de_off); 602 memmove(Add2Ptr(e, ins_bytes), e, to_move); 603 memcpy(e, ins, ins_bytes); 604 hdr->used = cpu_to_le32(used + ins_bytes); 605 606 return e; 607 } 608 609 /* 610 * index_hdr_check 611 * 612 * return true if INDEX_HDR is valid 613 */ 614 static bool index_hdr_check(const struct INDEX_HDR *hdr, u32 bytes) 615 { 616 u32 end = le32_to_cpu(hdr->used); 617 u32 tot = le32_to_cpu(hdr->total); 618 u32 off = le32_to_cpu(hdr->de_off); 619 620 if (!IS_ALIGNED(off, 8) || tot > bytes || end > tot || 621 off + sizeof(struct NTFS_DE) > end) { 622 /* incorrect index buffer. */ 623 return false; 624 } 625 626 return true; 627 } 628 629 /* 630 * index_buf_check 631 * 632 * return true if INDEX_BUFFER seems is valid 633 */ 634 static bool index_buf_check(const struct INDEX_BUFFER *ib, u32 bytes, 635 const CLST *vbn) 636 { 637 const struct NTFS_RECORD_HEADER *rhdr = &ib->rhdr; 638 u16 fo = le16_to_cpu(rhdr->fix_off); 639 u16 fn = le16_to_cpu(rhdr->fix_num); 640 641 if (bytes <= offsetof(struct INDEX_BUFFER, ihdr) || 642 rhdr->sign != NTFS_INDX_SIGNATURE || 643 fo < sizeof(struct INDEX_BUFFER) 644 /* Check index buffer vbn. */ 645 || (vbn && *vbn != le64_to_cpu(ib->vbn)) || (fo % sizeof(short)) || 646 fo + fn * sizeof(short) >= bytes || 647 fn != ((bytes >> SECTOR_SHIFT) + 1)) { 648 /* incorrect index buffer. */ 649 return false; 650 } 651 652 return index_hdr_check(&ib->ihdr, 653 bytes - offsetof(struct INDEX_BUFFER, ihdr)); 654 } 655 656 void fnd_clear(struct ntfs_fnd *fnd) 657 { 658 int i; 659 660 for (i = fnd->level - 1; i >= 0; i--) { 661 struct indx_node *n = fnd->nodes[i]; 662 663 if (!n) 664 continue; 665 666 put_indx_node(n); 667 fnd->nodes[i] = NULL; 668 } 669 fnd->level = 0; 670 fnd->root_de = NULL; 671 } 672 673 static int fnd_push(struct ntfs_fnd *fnd, struct indx_node *n, 674 struct NTFS_DE *e) 675 { 676 int i = fnd->level; 677 678 if (i < 0 || i >= ARRAY_SIZE(fnd->nodes)) 679 return -EINVAL; 680 fnd->nodes[i] = n; 681 fnd->de[i] = e; 682 fnd->level += 1; 683 return 0; 684 } 685 686 static struct indx_node *fnd_pop(struct ntfs_fnd *fnd) 687 { 688 struct indx_node *n; 689 int i = fnd->level; 690 691 i -= 1; 692 n = fnd->nodes[i]; 693 fnd->nodes[i] = NULL; 694 fnd->level = i; 695 696 return n; 697 } 698 699 static bool fnd_is_empty(struct ntfs_fnd *fnd) 700 { 701 if (!fnd->level) 702 return !fnd->root_de; 703 704 return !fnd->de[fnd->level - 1]; 705 } 706 707 /* 708 * hdr_find_e - Locate an entry the index buffer. 709 * 710 * If no matching entry is found, it returns the first entry which is greater 711 * than the desired entry If the search key is greater than all the entries the 712 * buffer, it returns the 'end' entry. This function does a binary search of the 713 * current index buffer, for the first entry that is <= to the search value. 714 * 715 * Return: NULL if error. 716 */ 717 static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx, 718 const struct INDEX_HDR *hdr, const void *key, 719 size_t key_len, const void *ctx, int *diff) 720 { 721 struct NTFS_DE *e, *found = NULL; 722 NTFS_CMP_FUNC cmp = indx->cmp; 723 int min_idx = 0, mid_idx, max_idx = 0; 724 int diff2; 725 int table_size = 8; 726 u32 e_size, e_key_len; 727 u32 end = le32_to_cpu(hdr->used); 728 u32 off = le32_to_cpu(hdr->de_off); 729 u32 total = le32_to_cpu(hdr->total); 730 u16 offs[128]; 731 732 if (unlikely(!cmp)) 733 return NULL; 734 735 fill_table: 736 if (end > total) 737 return NULL; 738 739 if (off + sizeof(struct NTFS_DE) > end) 740 return NULL; 741 742 e = Add2Ptr(hdr, off); 743 e_size = le16_to_cpu(e->size); 744 745 if (e_size < sizeof(struct NTFS_DE) || off + e_size > end) 746 return NULL; 747 748 if (!de_is_last(e)) { 749 offs[max_idx] = off; 750 off += e_size; 751 752 max_idx++; 753 if (max_idx < table_size) 754 goto fill_table; 755 756 max_idx--; 757 } 758 759 binary_search: 760 e_key_len = le16_to_cpu(e->key_size); 761 762 diff2 = (*cmp)(key, key_len, e + 1, e_key_len, ctx); 763 if (diff2 > 0) { 764 if (found) { 765 min_idx = mid_idx + 1; 766 } else { 767 if (de_is_last(e)) 768 return NULL; 769 770 max_idx = 0; 771 table_size = min(table_size * 2, (int)ARRAY_SIZE(offs)); 772 goto fill_table; 773 } 774 } else if (diff2 < 0) { 775 if (found) 776 max_idx = mid_idx - 1; 777 else 778 max_idx--; 779 780 found = e; 781 } else { 782 *diff = 0; 783 return e; 784 } 785 786 if (min_idx > max_idx) { 787 *diff = -1; 788 return found; 789 } 790 791 mid_idx = (min_idx + max_idx) >> 1; 792 e = Add2Ptr(hdr, offs[mid_idx]); 793 794 goto binary_search; 795 } 796 797 /* 798 * hdr_insert_de - Insert an index entry into the buffer. 799 * 800 * 'before' should be a pointer previously returned from hdr_find_e. 801 */ 802 static struct NTFS_DE *hdr_insert_de(const struct ntfs_index *indx, 803 struct INDEX_HDR *hdr, 804 const struct NTFS_DE *de, 805 struct NTFS_DE *before, const void *ctx) 806 { 807 int diff; 808 size_t off = PtrOffset(hdr, before); 809 u32 used = le32_to_cpu(hdr->used); 810 u32 total = le32_to_cpu(hdr->total); 811 u16 de_size = le16_to_cpu(de->size); 812 813 /* First, check to see if there's enough room. */ 814 if (used + de_size > total) 815 return NULL; 816 817 /* We know there's enough space, so we know we'll succeed. */ 818 if (before) { 819 /* Check that before is inside Index. */ 820 if (off >= used || off < le32_to_cpu(hdr->de_off) || 821 off + le16_to_cpu(before->size) > total) { 822 return NULL; 823 } 824 goto ok; 825 } 826 /* No insert point is applied. Get it manually. */ 827 before = hdr_find_e(indx, hdr, de + 1, le16_to_cpu(de->key_size), ctx, 828 &diff); 829 if (!before) 830 return NULL; 831 off = PtrOffset(hdr, before); 832 833 ok: 834 /* Now we just make room for the entry and jam it in. */ 835 memmove(Add2Ptr(before, de_size), before, used - off); 836 837 hdr->used = cpu_to_le32(used + de_size); 838 memcpy(before, de, de_size); 839 840 return before; 841 } 842 843 /* 844 * hdr_delete_de - Remove an entry from the index buffer. 845 */ 846 static inline struct NTFS_DE *hdr_delete_de(struct INDEX_HDR *hdr, 847 struct NTFS_DE *re) 848 { 849 u32 used = le32_to_cpu(hdr->used); 850 u16 esize = le16_to_cpu(re->size); 851 u32 off = PtrOffset(hdr, re); 852 int bytes = used - (off + esize); 853 854 /* check INDEX_HDR valid before using INDEX_HDR */ 855 if (!check_index_header(hdr, le32_to_cpu(hdr->total))) 856 return NULL; 857 858 if (off >= used || esize < sizeof(struct NTFS_DE) || 859 bytes < sizeof(struct NTFS_DE)) 860 return NULL; 861 862 hdr->used = cpu_to_le32(used - esize); 863 memmove(re, Add2Ptr(re, esize), bytes); 864 865 return re; 866 } 867 868 void indx_clear(struct ntfs_index *indx) 869 { 870 run_close(&indx->alloc_run); 871 run_close(&indx->bitmap_run); 872 } 873 874 int indx_init(struct ntfs_index *indx, struct ntfs_sb_info *sbi, 875 const struct ATTRIB *attr, enum index_mutex_classed type) 876 { 877 u32 t32; 878 const struct INDEX_ROOT *root = resident_data(attr); 879 880 t32 = le32_to_cpu(attr->res.data_size); 881 if (t32 <= offsetof(struct INDEX_ROOT, ihdr) || 882 !index_hdr_check(&root->ihdr, 883 t32 - offsetof(struct INDEX_ROOT, ihdr))) { 884 goto out; 885 } 886 887 /* Check root fields. */ 888 if (!root->index_block_clst) 889 goto out; 890 891 indx->type = type; 892 indx->idx2vbn_bits = __ffs(root->index_block_clst); 893 894 t32 = le32_to_cpu(root->index_block_size); 895 indx->index_bits = blksize_bits(t32); 896 897 /* Check index record size. */ 898 if (t32 < sbi->cluster_size) { 899 /* Index record is smaller than a cluster, use 512 blocks. */ 900 if (t32 != root->index_block_clst * SECTOR_SIZE) 901 goto out; 902 903 /* Check alignment to a cluster. */ 904 if ((sbi->cluster_size >> SECTOR_SHIFT) & 905 (root->index_block_clst - 1)) { 906 goto out; 907 } 908 909 indx->vbn2vbo_bits = SECTOR_SHIFT; 910 } else { 911 /* Index record must be a multiple of cluster size. */ 912 if (t32 != root->index_block_clst << sbi->cluster_bits) 913 goto out; 914 915 indx->vbn2vbo_bits = sbi->cluster_bits; 916 } 917 918 init_rwsem(&indx->run_lock); 919 920 indx->cmp = get_cmp_func(root); 921 if (!indx->cmp) 922 goto out; 923 924 return 0; 925 926 out: 927 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY); 928 return -EINVAL; 929 } 930 931 static struct indx_node *indx_new(struct ntfs_index *indx, 932 struct ntfs_inode *ni, CLST vbn, 933 const __le64 *sub_vbn) 934 { 935 int err; 936 struct NTFS_DE *e; 937 struct indx_node *r; 938 struct INDEX_HDR *hdr; 939 struct INDEX_BUFFER *index; 940 u64 vbo = (u64)vbn << indx->vbn2vbo_bits; 941 u32 bytes = 1u << indx->index_bits; 942 u16 fn; 943 u32 eo; 944 945 r = kzalloc(sizeof(struct indx_node), GFP_NOFS); 946 if (!r) 947 return ERR_PTR(-ENOMEM); 948 949 index = kzalloc(bytes, GFP_NOFS); 950 if (!index) { 951 kfree(r); 952 return ERR_PTR(-ENOMEM); 953 } 954 955 err = ntfs_get_bh(ni->mi.sbi, &indx->alloc_run, vbo, bytes, &r->nb); 956 957 if (err) { 958 kfree(index); 959 kfree(r); 960 return ERR_PTR(err); 961 } 962 963 /* Create header. */ 964 index->rhdr.sign = NTFS_INDX_SIGNATURE; 965 index->rhdr.fix_off = cpu_to_le16(sizeof(struct INDEX_BUFFER)); // 0x28 966 fn = (bytes >> SECTOR_SHIFT) + 1; // 9 967 index->rhdr.fix_num = cpu_to_le16(fn); 968 index->vbn = cpu_to_le64(vbn); 969 hdr = &index->ihdr; 970 eo = ALIGN(sizeof(struct INDEX_BUFFER) + fn * sizeof(short), 8); 971 hdr->de_off = cpu_to_le32(eo); 972 973 e = Add2Ptr(hdr, eo); 974 975 if (sub_vbn) { 976 e->flags = NTFS_IE_LAST | NTFS_IE_HAS_SUBNODES; 977 e->size = cpu_to_le16(sizeof(struct NTFS_DE) + sizeof(u64)); 978 hdr->used = 979 cpu_to_le32(eo + sizeof(struct NTFS_DE) + sizeof(u64)); 980 de_set_vbn_le(e, *sub_vbn); 981 hdr->flags = 1; 982 } else { 983 e->size = cpu_to_le16(sizeof(struct NTFS_DE)); 984 hdr->used = cpu_to_le32(eo + sizeof(struct NTFS_DE)); 985 e->flags = NTFS_IE_LAST; 986 } 987 988 hdr->total = cpu_to_le32(bytes - offsetof(struct INDEX_BUFFER, ihdr)); 989 990 r->index = index; 991 return r; 992 } 993 994 struct INDEX_ROOT *indx_get_root(struct ntfs_index *indx, struct ntfs_inode *ni, 995 struct ATTRIB **attr, struct mft_inode **mi) 996 { 997 struct ATTR_LIST_ENTRY *le = NULL; 998 struct ATTRIB *a; 999 const struct INDEX_NAMES *in = &s_index_names[indx->type]; 1000 struct INDEX_ROOT *root; 1001 1002 a = ni_find_attr(ni, NULL, &le, ATTR_ROOT, in->name, in->name_len, NULL, 1003 mi); 1004 if (!a) 1005 return NULL; 1006 1007 if (attr) 1008 *attr = a; 1009 1010 root = resident_data_ex(a, sizeof(struct INDEX_ROOT)); 1011 1012 /* length check */ 1013 if (root && 1014 offsetof(struct INDEX_ROOT, ihdr) + le32_to_cpu(root->ihdr.used) > 1015 le32_to_cpu(a->res.data_size)) { 1016 return NULL; 1017 } 1018 1019 return root; 1020 } 1021 1022 static int indx_write(struct ntfs_index *indx, struct ntfs_inode *ni, 1023 struct indx_node *node, int sync) 1024 { 1025 struct INDEX_BUFFER *ib = node->index; 1026 1027 return ntfs_write_bh(ni->mi.sbi, &ib->rhdr, &node->nb, sync); 1028 } 1029 1030 /* 1031 * indx_read 1032 * 1033 * If ntfs_readdir calls this function 1034 * inode is shared locked and no ni_lock. 1035 * Use rw_semaphore for read/write access to alloc_run. 1036 */ 1037 int indx_read(struct ntfs_index *indx, struct ntfs_inode *ni, CLST vbn, 1038 struct indx_node **node) 1039 { 1040 int err; 1041 struct INDEX_BUFFER *ib; 1042 struct runs_tree *run = &indx->alloc_run; 1043 struct rw_semaphore *lock = &indx->run_lock; 1044 u64 vbo = (u64)vbn << indx->vbn2vbo_bits; 1045 u32 bytes = 1u << indx->index_bits; 1046 struct indx_node *in = *node; 1047 const struct INDEX_NAMES *name; 1048 1049 if (!in) { 1050 in = kzalloc(sizeof(struct indx_node), GFP_NOFS); 1051 if (!in) 1052 return -ENOMEM; 1053 } else { 1054 nb_put(&in->nb); 1055 } 1056 1057 ib = in->index; 1058 if (!ib) { 1059 ib = kmalloc(bytes, GFP_NOFS); 1060 if (!ib) { 1061 err = -ENOMEM; 1062 goto out; 1063 } 1064 } 1065 1066 down_read(lock); 1067 err = ntfs_read_bh(ni->mi.sbi, run, vbo, &ib->rhdr, bytes, &in->nb); 1068 up_read(lock); 1069 if (!err) 1070 goto ok; 1071 1072 if (err == -E_NTFS_FIXUP) 1073 goto ok; 1074 1075 if (err != -ENOENT) 1076 goto out; 1077 1078 name = &s_index_names[indx->type]; 1079 down_write(lock); 1080 err = attr_load_runs_range(ni, ATTR_ALLOC, name->name, name->name_len, 1081 run, vbo, vbo + bytes); 1082 up_write(lock); 1083 if (err) 1084 goto out; 1085 1086 down_read(lock); 1087 err = ntfs_read_bh(ni->mi.sbi, run, vbo, &ib->rhdr, bytes, &in->nb); 1088 up_read(lock); 1089 if (err == -E_NTFS_FIXUP) 1090 goto ok; 1091 1092 if (err) 1093 goto out; 1094 1095 ok: 1096 if (!index_buf_check(ib, bytes, &vbn)) { 1097 ntfs_inode_err(&ni->vfs_inode, "directory corrupted"); 1098 ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR); 1099 err = -EINVAL; 1100 goto out; 1101 } 1102 1103 if (err == -E_NTFS_FIXUP) { 1104 ntfs_write_bh(ni->mi.sbi, &ib->rhdr, &in->nb, 0); 1105 err = 0; 1106 } 1107 1108 /* check for index header length */ 1109 if (offsetof(struct INDEX_BUFFER, ihdr) + le32_to_cpu(ib->ihdr.used) > 1110 bytes) { 1111 err = -EINVAL; 1112 goto out; 1113 } 1114 1115 in->index = ib; 1116 *node = in; 1117 1118 out: 1119 if (err == -E_NTFS_CORRUPT) { 1120 ntfs_inode_err(&ni->vfs_inode, "directory corrupted"); 1121 ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR); 1122 err = -EINVAL; 1123 } 1124 1125 if (ib != in->index) 1126 kfree(ib); 1127 1128 if (*node != in) { 1129 nb_put(&in->nb); 1130 kfree(in); 1131 } 1132 1133 return err; 1134 } 1135 1136 /* 1137 * indx_find - Scan NTFS directory for given entry. 1138 */ 1139 int indx_find(struct ntfs_index *indx, struct ntfs_inode *ni, 1140 const struct INDEX_ROOT *root, const void *key, size_t key_len, 1141 const void *ctx, int *diff, struct NTFS_DE **entry, 1142 struct ntfs_fnd *fnd) 1143 { 1144 int err; 1145 struct NTFS_DE *e; 1146 struct indx_node *node; 1147 1148 if (!root) 1149 root = indx_get_root(&ni->dir, ni, NULL, NULL); 1150 1151 if (!root) { 1152 /* Should not happen. */ 1153 return -EINVAL; 1154 } 1155 1156 /* Check cache. */ 1157 e = fnd->level ? fnd->de[fnd->level - 1] : fnd->root_de; 1158 if (e && !de_is_last(e) && 1159 !(*indx->cmp)(key, key_len, e + 1, le16_to_cpu(e->key_size), ctx)) { 1160 *entry = e; 1161 *diff = 0; 1162 return 0; 1163 } 1164 1165 /* Soft finder reset. */ 1166 fnd_clear(fnd); 1167 1168 /* Lookup entry that is <= to the search value. */ 1169 e = hdr_find_e(indx, &root->ihdr, key, key_len, ctx, diff); 1170 if (!e) 1171 return -EINVAL; 1172 1173 fnd->root_de = e; 1174 1175 for (;;) { 1176 node = NULL; 1177 if (*diff >= 0 || !de_has_vcn_ex(e)) 1178 break; 1179 1180 /* Read next level. */ 1181 err = indx_read(indx, ni, de_get_vbn(e), &node); 1182 if (err) { 1183 /* io error? */ 1184 return err; 1185 } 1186 1187 /* Lookup entry that is <= to the search value. */ 1188 e = hdr_find_e(indx, &node->index->ihdr, key, key_len, ctx, 1189 diff); 1190 if (!e) { 1191 put_indx_node(node); 1192 return -EINVAL; 1193 } 1194 1195 fnd_push(fnd, node, e); 1196 } 1197 1198 *entry = e; 1199 return 0; 1200 } 1201 1202 int indx_find_sort(struct ntfs_index *indx, struct ntfs_inode *ni, 1203 const struct INDEX_ROOT *root, struct NTFS_DE **entry, 1204 struct ntfs_fnd *fnd) 1205 { 1206 int err; 1207 struct indx_node *n = NULL; 1208 struct NTFS_DE *e; 1209 size_t iter = 0; 1210 int level = fnd->level; 1211 1212 if (!*entry) { 1213 /* Start find. */ 1214 e = hdr_first_de(&root->ihdr); 1215 if (!e) 1216 return 0; 1217 fnd_clear(fnd); 1218 fnd->root_de = e; 1219 } else if (!level) { 1220 if (de_is_last(fnd->root_de)) { 1221 *entry = NULL; 1222 return 0; 1223 } 1224 1225 e = hdr_next_de(&root->ihdr, fnd->root_de); 1226 if (!e) 1227 return -EINVAL; 1228 fnd->root_de = e; 1229 } else { 1230 n = fnd->nodes[level - 1]; 1231 e = fnd->de[level - 1]; 1232 1233 if (de_is_last(e)) 1234 goto pop_level; 1235 1236 e = hdr_next_de(&n->index->ihdr, e); 1237 if (!e) 1238 return -EINVAL; 1239 1240 fnd->de[level - 1] = e; 1241 } 1242 1243 /* Just to avoid tree cycle. */ 1244 next_iter: 1245 if (iter++ >= 1000) 1246 return -EINVAL; 1247 1248 while (de_has_vcn_ex(e)) { 1249 if (le16_to_cpu(e->size) < 1250 sizeof(struct NTFS_DE) + sizeof(u64)) { 1251 if (n) { 1252 fnd_pop(fnd); 1253 kfree(n); 1254 } 1255 return -EINVAL; 1256 } 1257 1258 /* Read next level. */ 1259 err = indx_read(indx, ni, de_get_vbn(e), &n); 1260 if (err) 1261 return err; 1262 1263 /* Try next level. */ 1264 e = hdr_first_de(&n->index->ihdr); 1265 if (!e) { 1266 kfree(n); 1267 return -EINVAL; 1268 } 1269 1270 fnd_push(fnd, n, e); 1271 } 1272 1273 if (le16_to_cpu(e->size) > sizeof(struct NTFS_DE)) { 1274 *entry = e; 1275 return 0; 1276 } 1277 1278 pop_level: 1279 for (;;) { 1280 if (!de_is_last(e)) 1281 goto next_iter; 1282 1283 /* Pop one level. */ 1284 if (n) { 1285 fnd_pop(fnd); 1286 kfree(n); 1287 } 1288 1289 level = fnd->level; 1290 1291 if (level) { 1292 n = fnd->nodes[level - 1]; 1293 e = fnd->de[level - 1]; 1294 } else if (fnd->root_de) { 1295 n = NULL; 1296 e = fnd->root_de; 1297 fnd->root_de = NULL; 1298 } else { 1299 *entry = NULL; 1300 return 0; 1301 } 1302 1303 if (le16_to_cpu(e->size) > sizeof(struct NTFS_DE)) { 1304 *entry = e; 1305 if (!fnd->root_de) 1306 fnd->root_de = e; 1307 return 0; 1308 } 1309 } 1310 } 1311 1312 int indx_find_raw(struct ntfs_index *indx, struct ntfs_inode *ni, 1313 const struct INDEX_ROOT *root, struct NTFS_DE **entry, 1314 size_t *off, struct ntfs_fnd *fnd) 1315 { 1316 int err; 1317 struct indx_node *n = NULL; 1318 struct NTFS_DE *e = NULL; 1319 struct NTFS_DE *e2; 1320 size_t bit; 1321 CLST next_used_vbn; 1322 CLST next_vbn; 1323 u32 record_size = ni->mi.sbi->record_size; 1324 1325 /* Use non sorted algorithm. */ 1326 if (!*entry) { 1327 /* This is the first call. */ 1328 e = hdr_first_de(&root->ihdr); 1329 if (!e) 1330 return 0; 1331 fnd_clear(fnd); 1332 fnd->root_de = e; 1333 1334 /* The first call with setup of initial element. */ 1335 if (*off >= record_size) { 1336 next_vbn = (((*off - record_size) >> indx->index_bits)) 1337 << indx->idx2vbn_bits; 1338 /* Jump inside cycle 'for'. */ 1339 goto next; 1340 } 1341 1342 /* Start enumeration from root. */ 1343 *off = 0; 1344 } else if (!fnd->root_de) 1345 return -EINVAL; 1346 1347 for (;;) { 1348 /* Check if current entry can be used. */ 1349 if (e && le16_to_cpu(e->size) > sizeof(struct NTFS_DE)) 1350 goto ok; 1351 1352 if (!fnd->level) { 1353 /* Continue to enumerate root. */ 1354 if (!de_is_last(fnd->root_de)) { 1355 e = hdr_next_de(&root->ihdr, fnd->root_de); 1356 if (!e) 1357 return -EINVAL; 1358 fnd->root_de = e; 1359 continue; 1360 } 1361 1362 /* Start to enumerate indexes from 0. */ 1363 next_vbn = 0; 1364 } else { 1365 /* Continue to enumerate indexes. */ 1366 e2 = fnd->de[fnd->level - 1]; 1367 1368 n = fnd->nodes[fnd->level - 1]; 1369 1370 if (!de_is_last(e2)) { 1371 e = hdr_next_de(&n->index->ihdr, e2); 1372 if (!e) 1373 return -EINVAL; 1374 fnd->de[fnd->level - 1] = e; 1375 continue; 1376 } 1377 1378 /* Continue with next index. */ 1379 next_vbn = le64_to_cpu(n->index->vbn) + 1380 root->index_block_clst; 1381 } 1382 1383 next: 1384 /* Release current index. */ 1385 if (n) { 1386 fnd_pop(fnd); 1387 put_indx_node(n); 1388 n = NULL; 1389 } 1390 1391 /* Skip all free indexes. */ 1392 bit = next_vbn >> indx->idx2vbn_bits; 1393 err = indx_used_bit(indx, ni, &bit); 1394 if (err == -ENOENT || bit == MINUS_ONE_T) { 1395 /* No used indexes. */ 1396 *entry = NULL; 1397 return 0; 1398 } 1399 1400 next_used_vbn = bit << indx->idx2vbn_bits; 1401 1402 /* Read buffer into memory. */ 1403 err = indx_read(indx, ni, next_used_vbn, &n); 1404 if (err) 1405 return err; 1406 1407 e = hdr_first_de(&n->index->ihdr); 1408 fnd_push(fnd, n, e); 1409 if (!e) 1410 return -EINVAL; 1411 } 1412 1413 ok: 1414 /* Return offset to restore enumerator if necessary. */ 1415 if (!n) { 1416 /* 'e' points in root, */ 1417 *off = PtrOffset(&root->ihdr, e); 1418 } else { 1419 /* 'e' points in index, */ 1420 *off = (le64_to_cpu(n->index->vbn) << indx->vbn2vbo_bits) + 1421 record_size + PtrOffset(&n->index->ihdr, e); 1422 } 1423 1424 *entry = e; 1425 return 0; 1426 } 1427 1428 /* 1429 * indx_create_allocate - Create "Allocation + Bitmap" attributes. 1430 */ 1431 static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni, 1432 CLST *vbn) 1433 { 1434 int err; 1435 struct ntfs_sb_info *sbi = ni->mi.sbi; 1436 struct ATTRIB *bitmap; 1437 struct ATTRIB *alloc; 1438 u32 data_size = 1u << indx->index_bits; 1439 u32 alloc_size = ntfs_up_cluster(sbi, data_size); 1440 CLST len = alloc_size >> sbi->cluster_bits; 1441 const struct INDEX_NAMES *in = &s_index_names[indx->type]; 1442 CLST alen; 1443 struct runs_tree run; 1444 1445 run_init(&run); 1446 1447 err = attr_allocate_clusters(sbi, &run, 0, 0, len, NULL, ALLOCATE_DEF, 1448 &alen, 0, NULL, NULL); 1449 if (err) 1450 goto out; 1451 1452 err = ni_insert_nonresident(ni, ATTR_ALLOC, in->name, in->name_len, 1453 &run, 0, len, 0, &alloc, NULL, NULL); 1454 if (err) 1455 goto out1; 1456 1457 alloc->nres.valid_size = alloc->nres.data_size = cpu_to_le64(data_size); 1458 1459 err = ni_insert_resident(ni, ntfs3_bitmap_size(1), ATTR_BITMAP, 1460 in->name, in->name_len, &bitmap, NULL, NULL); 1461 if (err) 1462 goto out2; 1463 1464 if (in->name == I30_NAME) { 1465 i_size_write(&ni->vfs_inode, data_size); 1466 inode_set_bytes(&ni->vfs_inode, alloc_size); 1467 } 1468 1469 memcpy(&indx->alloc_run, &run, sizeof(run)); 1470 1471 *vbn = 0; 1472 1473 return 0; 1474 1475 out2: 1476 mi_remove_attr(NULL, &ni->mi, alloc); 1477 1478 out1: 1479 run_deallocate(sbi, &run, false); 1480 1481 out: 1482 return err; 1483 } 1484 1485 /* 1486 * indx_add_allocate - Add clusters to index. 1487 */ 1488 static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni, 1489 CLST *vbn) 1490 { 1491 int err; 1492 size_t bit; 1493 u64 data_size; 1494 u64 bmp_size, bmp_size_v; 1495 struct ATTRIB *bmp, *alloc; 1496 struct mft_inode *mi; 1497 const struct INDEX_NAMES *in = &s_index_names[indx->type]; 1498 1499 err = indx_find_free(indx, ni, &bit, &bmp); 1500 if (err) 1501 goto out1; 1502 1503 if (bit != MINUS_ONE_T) { 1504 bmp = NULL; 1505 } else { 1506 if (bmp->non_res) { 1507 bmp_size = le64_to_cpu(bmp->nres.data_size); 1508 bmp_size_v = le64_to_cpu(bmp->nres.valid_size); 1509 } else { 1510 bmp_size = bmp_size_v = le32_to_cpu(bmp->res.data_size); 1511 } 1512 1513 bit = bmp_size << 3; 1514 } 1515 1516 data_size = (u64)(bit + 1) << indx->index_bits; 1517 1518 if (bmp) { 1519 /* Increase bitmap. */ 1520 err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len, 1521 &indx->bitmap_run, 1522 ntfs3_bitmap_size(bit + 1), NULL, true, 1523 NULL); 1524 if (err) 1525 goto out1; 1526 } 1527 1528 alloc = ni_find_attr(ni, NULL, NULL, ATTR_ALLOC, in->name, in->name_len, 1529 NULL, &mi); 1530 if (!alloc) { 1531 err = -EINVAL; 1532 if (bmp) 1533 goto out2; 1534 goto out1; 1535 } 1536 1537 if (data_size <= le64_to_cpu(alloc->nres.data_size)) { 1538 /* Reuse index. */ 1539 goto out; 1540 } 1541 1542 /* Increase allocation. */ 1543 err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len, 1544 &indx->alloc_run, data_size, &data_size, true, 1545 NULL); 1546 if (err) { 1547 if (bmp) 1548 goto out2; 1549 goto out1; 1550 } 1551 1552 if (in->name == I30_NAME) 1553 i_size_write(&ni->vfs_inode, data_size); 1554 1555 out: 1556 *vbn = bit << indx->idx2vbn_bits; 1557 1558 return 0; 1559 1560 out2: 1561 /* Ops. No space? */ 1562 attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len, 1563 &indx->bitmap_run, bmp_size, &bmp_size_v, false, NULL); 1564 1565 out1: 1566 return err; 1567 } 1568 1569 /* 1570 * indx_insert_into_root - Attempt to insert an entry into the index root. 1571 * 1572 * @undo - True if we undoing previous remove. 1573 * If necessary, it will twiddle the index b-tree. 1574 */ 1575 static int indx_insert_into_root(struct ntfs_index *indx, struct ntfs_inode *ni, 1576 const struct NTFS_DE *new_de, 1577 struct NTFS_DE *root_de, const void *ctx, 1578 struct ntfs_fnd *fnd, bool undo) 1579 { 1580 int err = 0; 1581 struct NTFS_DE *e, *e0, *re; 1582 struct mft_inode *mi; 1583 struct ATTRIB *attr; 1584 struct INDEX_HDR *hdr; 1585 struct indx_node *n; 1586 CLST new_vbn; 1587 __le64 *sub_vbn, t_vbn; 1588 u16 new_de_size; 1589 u32 hdr_used, hdr_total, asize, to_move; 1590 u32 root_size, new_root_size; 1591 struct ntfs_sb_info *sbi; 1592 int ds_root; 1593 struct INDEX_ROOT *root, *a_root; 1594 1595 /* Get the record this root placed in. */ 1596 root = indx_get_root(indx, ni, &attr, &mi); 1597 if (!root) 1598 return -EINVAL; 1599 1600 /* 1601 * Try easy case: 1602 * hdr_insert_de will succeed if there's 1603 * room the root for the new entry. 1604 */ 1605 hdr = &root->ihdr; 1606 sbi = ni->mi.sbi; 1607 new_de_size = le16_to_cpu(new_de->size); 1608 hdr_used = le32_to_cpu(hdr->used); 1609 hdr_total = le32_to_cpu(hdr->total); 1610 asize = le32_to_cpu(attr->size); 1611 root_size = le32_to_cpu(attr->res.data_size); 1612 1613 ds_root = new_de_size + hdr_used - hdr_total; 1614 1615 /* If 'undo' is set then reduce requirements. */ 1616 if ((undo || asize + ds_root < sbi->max_bytes_per_attr) && 1617 mi_resize_attr(mi, attr, ds_root)) { 1618 hdr->total = cpu_to_le32(hdr_total + ds_root); 1619 e = hdr_insert_de(indx, hdr, new_de, root_de, ctx); 1620 WARN_ON(!e); 1621 fnd_clear(fnd); 1622 fnd->root_de = e; 1623 1624 return 0; 1625 } 1626 1627 /* Make a copy of root attribute to restore if error. */ 1628 a_root = kmemdup(attr, asize, GFP_NOFS); 1629 if (!a_root) 1630 return -ENOMEM; 1631 1632 /* 1633 * Copy all the non-end entries from 1634 * the index root to the new buffer. 1635 */ 1636 to_move = 0; 1637 e0 = hdr_first_de(hdr); 1638 1639 /* Calculate the size to copy. */ 1640 for (e = e0;; e = hdr_next_de(hdr, e)) { 1641 if (!e) { 1642 err = -EINVAL; 1643 goto out_free_root; 1644 } 1645 1646 if (de_is_last(e)) 1647 break; 1648 to_move += le16_to_cpu(e->size); 1649 } 1650 1651 if (!to_move) { 1652 re = NULL; 1653 } else { 1654 re = kmemdup(e0, to_move, GFP_NOFS); 1655 if (!re) { 1656 err = -ENOMEM; 1657 goto out_free_root; 1658 } 1659 } 1660 1661 sub_vbn = NULL; 1662 if (de_has_vcn(e)) { 1663 t_vbn = de_get_vbn_le(e); 1664 sub_vbn = &t_vbn; 1665 } 1666 1667 new_root_size = sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE) + 1668 sizeof(u64); 1669 ds_root = new_root_size - root_size; 1670 1671 if (ds_root > 0 && asize + ds_root > sbi->max_bytes_per_attr) { 1672 /* Make root external. */ 1673 err = -EOPNOTSUPP; 1674 goto out_free_re; 1675 } 1676 1677 if (ds_root) 1678 mi_resize_attr(mi, attr, ds_root); 1679 1680 /* Fill first entry (vcn will be set later). */ 1681 e = (struct NTFS_DE *)(root + 1); 1682 memset(e, 0, sizeof(struct NTFS_DE)); 1683 e->size = cpu_to_le16(sizeof(struct NTFS_DE) + sizeof(u64)); 1684 e->flags = NTFS_IE_HAS_SUBNODES | NTFS_IE_LAST; 1685 1686 hdr->flags = 1; 1687 hdr->used = hdr->total = 1688 cpu_to_le32(new_root_size - offsetof(struct INDEX_ROOT, ihdr)); 1689 1690 fnd->root_de = hdr_first_de(hdr); 1691 mi->dirty = true; 1692 1693 /* Create alloc and bitmap attributes (if not). */ 1694 err = run_is_empty(&indx->alloc_run) ? 1695 indx_create_allocate(indx, ni, &new_vbn) : 1696 indx_add_allocate(indx, ni, &new_vbn); 1697 1698 /* Layout of record may be changed, so rescan root. */ 1699 root = indx_get_root(indx, ni, &attr, &mi); 1700 if (!root) { 1701 /* Bug? */ 1702 ntfs_set_state(sbi, NTFS_DIRTY_ERROR); 1703 err = -EINVAL; 1704 goto out_free_re; 1705 } 1706 1707 if (err) { 1708 /* Restore root. */ 1709 if (mi_resize_attr(mi, attr, -ds_root)) { 1710 memcpy(attr, a_root, asize); 1711 } else { 1712 /* Bug? */ 1713 ntfs_set_state(sbi, NTFS_DIRTY_ERROR); 1714 } 1715 goto out_free_re; 1716 } 1717 1718 e = (struct NTFS_DE *)(root + 1); 1719 *(__le64 *)(e + 1) = cpu_to_le64(new_vbn); 1720 mi->dirty = true; 1721 1722 /* Now we can create/format the new buffer and copy the entries into. */ 1723 n = indx_new(indx, ni, new_vbn, sub_vbn); 1724 if (IS_ERR(n)) { 1725 err = PTR_ERR(n); 1726 goto out_free_re; 1727 } 1728 1729 hdr = &n->index->ihdr; 1730 hdr_used = le32_to_cpu(hdr->used); 1731 hdr_total = le32_to_cpu(hdr->total); 1732 1733 /* Copy root entries into new buffer. */ 1734 hdr_insert_head(hdr, re, to_move); 1735 1736 /* Update bitmap attribute. */ 1737 indx_mark_used(indx, ni, new_vbn >> indx->idx2vbn_bits); 1738 1739 /* Check if we can insert new entry new index buffer. */ 1740 if (hdr_used + new_de_size > hdr_total) { 1741 /* 1742 * This occurs if MFT record is the same or bigger than index 1743 * buffer. Move all root new index and have no space to add 1744 * new entry classic case when MFT record is 1K and index 1745 * buffer 4K the problem should not occurs. 1746 */ 1747 kfree(re); 1748 indx_write(indx, ni, n, 0); 1749 1750 put_indx_node(n); 1751 fnd_clear(fnd); 1752 err = indx_insert_entry(indx, ni, new_de, ctx, fnd, undo); 1753 goto out_free_root; 1754 } 1755 1756 /* 1757 * Now root is a parent for new index buffer. 1758 * Insert NewEntry a new buffer. 1759 */ 1760 e = hdr_insert_de(indx, hdr, new_de, NULL, ctx); 1761 if (!e) { 1762 err = -EINVAL; 1763 goto out_put_n; 1764 } 1765 fnd_push(fnd, n, e); 1766 1767 /* Just write updates index into disk. */ 1768 indx_write(indx, ni, n, 0); 1769 1770 n = NULL; 1771 1772 out_put_n: 1773 put_indx_node(n); 1774 out_free_re: 1775 kfree(re); 1776 out_free_root: 1777 kfree(a_root); 1778 return err; 1779 } 1780 1781 /* 1782 * indx_insert_into_buffer 1783 * 1784 * Attempt to insert an entry into an Index Allocation Buffer. 1785 * If necessary, it will split the buffer. 1786 */ 1787 static int 1788 indx_insert_into_buffer(struct ntfs_index *indx, struct ntfs_inode *ni, 1789 struct INDEX_ROOT *root, const struct NTFS_DE *new_de, 1790 const void *ctx, int level, struct ntfs_fnd *fnd) 1791 { 1792 int err; 1793 const struct NTFS_DE *sp; 1794 struct NTFS_DE *e, *de_t, *up_e; 1795 struct indx_node *n2; 1796 struct indx_node *n1 = fnd->nodes[level]; 1797 struct INDEX_HDR *hdr1 = &n1->index->ihdr; 1798 struct INDEX_HDR *hdr2; 1799 u32 to_copy, used, used1; 1800 CLST new_vbn; 1801 __le64 t_vbn, *sub_vbn; 1802 u16 sp_size; 1803 void *hdr1_saved = NULL; 1804 1805 /* Try the most easy case. */ 1806 e = fnd->level - 1 == level ? fnd->de[level] : NULL; 1807 e = hdr_insert_de(indx, hdr1, new_de, e, ctx); 1808 fnd->de[level] = e; 1809 if (e) { 1810 /* Just write updated index into disk. */ 1811 indx_write(indx, ni, n1, 0); 1812 return 0; 1813 } 1814 1815 /* 1816 * No space to insert into buffer. Split it. 1817 * To split we: 1818 * - Save split point ('cause index buffers will be changed) 1819 * - Allocate NewBuffer and copy all entries <= sp into new buffer 1820 * - Remove all entries (sp including) from TargetBuffer 1821 * - Insert NewEntry into left or right buffer (depending on sp <=> 1822 * NewEntry) 1823 * - Insert sp into parent buffer (or root) 1824 * - Make sp a parent for new buffer 1825 */ 1826 sp = hdr_find_split(hdr1); 1827 if (!sp) 1828 return -EINVAL; 1829 1830 sp_size = le16_to_cpu(sp->size); 1831 up_e = kmalloc(sp_size + sizeof(u64), GFP_NOFS); 1832 if (!up_e) 1833 return -ENOMEM; 1834 memcpy(up_e, sp, sp_size); 1835 1836 used1 = le32_to_cpu(hdr1->used); 1837 hdr1_saved = kmemdup(hdr1, used1, GFP_NOFS); 1838 if (!hdr1_saved) { 1839 err = -ENOMEM; 1840 goto out; 1841 } 1842 1843 if (!hdr1->flags) { 1844 up_e->flags |= NTFS_IE_HAS_SUBNODES; 1845 up_e->size = cpu_to_le16(sp_size + sizeof(u64)); 1846 sub_vbn = NULL; 1847 } else { 1848 t_vbn = de_get_vbn_le(up_e); 1849 sub_vbn = &t_vbn; 1850 } 1851 1852 /* Allocate on disk a new index allocation buffer. */ 1853 err = indx_add_allocate(indx, ni, &new_vbn); 1854 if (err) 1855 goto out; 1856 1857 /* Allocate and format memory a new index buffer. */ 1858 n2 = indx_new(indx, ni, new_vbn, sub_vbn); 1859 if (IS_ERR(n2)) { 1860 err = PTR_ERR(n2); 1861 goto out; 1862 } 1863 1864 hdr2 = &n2->index->ihdr; 1865 1866 /* Make sp a parent for new buffer. */ 1867 de_set_vbn(up_e, new_vbn); 1868 1869 /* Copy all the entries <= sp into the new buffer. */ 1870 de_t = hdr_first_de(hdr1); 1871 to_copy = PtrOffset(de_t, sp); 1872 hdr_insert_head(hdr2, de_t, to_copy); 1873 1874 /* Remove all entries (sp including) from hdr1. */ 1875 used = used1 - to_copy - sp_size; 1876 memmove(de_t, Add2Ptr(sp, sp_size), used - le32_to_cpu(hdr1->de_off)); 1877 hdr1->used = cpu_to_le32(used); 1878 1879 /* 1880 * Insert new entry into left or right buffer 1881 * (depending on sp <=> new_de). 1882 */ 1883 hdr_insert_de(indx, 1884 (*indx->cmp)(new_de + 1, le16_to_cpu(new_de->key_size), 1885 up_e + 1, le16_to_cpu(up_e->key_size), 1886 ctx) < 0 ? 1887 hdr2 : 1888 hdr1, 1889 new_de, NULL, ctx); 1890 1891 indx_mark_used(indx, ni, new_vbn >> indx->idx2vbn_bits); 1892 1893 indx_write(indx, ni, n1, 0); 1894 indx_write(indx, ni, n2, 0); 1895 1896 put_indx_node(n2); 1897 1898 /* 1899 * We've finished splitting everybody, so we are ready to 1900 * insert the promoted entry into the parent. 1901 */ 1902 if (!level) { 1903 /* Insert in root. */ 1904 err = indx_insert_into_root(indx, ni, up_e, NULL, ctx, fnd, 0); 1905 } else { 1906 /* 1907 * The target buffer's parent is another index buffer. 1908 * TODO: Remove recursion. 1909 */ 1910 err = indx_insert_into_buffer(indx, ni, root, up_e, ctx, 1911 level - 1, fnd); 1912 } 1913 1914 if (err) { 1915 /* 1916 * Undo critical operations. 1917 */ 1918 indx_mark_free(indx, ni, new_vbn >> indx->idx2vbn_bits); 1919 memcpy(hdr1, hdr1_saved, used1); 1920 indx_write(indx, ni, n1, 0); 1921 } 1922 1923 out: 1924 kfree(up_e); 1925 kfree(hdr1_saved); 1926 1927 return err; 1928 } 1929 1930 /* 1931 * indx_insert_entry - Insert new entry into index. 1932 * 1933 * @undo - True if we undoing previous remove. 1934 */ 1935 int indx_insert_entry(struct ntfs_index *indx, struct ntfs_inode *ni, 1936 const struct NTFS_DE *new_de, const void *ctx, 1937 struct ntfs_fnd *fnd, bool undo) 1938 { 1939 int err; 1940 int diff; 1941 struct NTFS_DE *e; 1942 struct ntfs_fnd *fnd_a = NULL; 1943 struct INDEX_ROOT *root; 1944 1945 if (!fnd) { 1946 fnd_a = fnd_get(); 1947 if (!fnd_a) { 1948 err = -ENOMEM; 1949 goto out1; 1950 } 1951 fnd = fnd_a; 1952 } 1953 1954 root = indx_get_root(indx, ni, NULL, NULL); 1955 if (!root) { 1956 err = -EINVAL; 1957 goto out; 1958 } 1959 1960 if (fnd_is_empty(fnd)) { 1961 /* 1962 * Find the spot the tree where we want to 1963 * insert the new entry. 1964 */ 1965 err = indx_find(indx, ni, root, new_de + 1, 1966 le16_to_cpu(new_de->key_size), ctx, &diff, &e, 1967 fnd); 1968 if (err) 1969 goto out; 1970 1971 if (!diff) { 1972 err = -EEXIST; 1973 goto out; 1974 } 1975 } 1976 1977 if (!fnd->level) { 1978 /* 1979 * The root is also a leaf, so we'll insert the 1980 * new entry into it. 1981 */ 1982 err = indx_insert_into_root(indx, ni, new_de, fnd->root_de, ctx, 1983 fnd, undo); 1984 } else { 1985 /* 1986 * Found a leaf buffer, so we'll insert the new entry into it. 1987 */ 1988 err = indx_insert_into_buffer(indx, ni, root, new_de, ctx, 1989 fnd->level - 1, fnd); 1990 } 1991 1992 out: 1993 fnd_put(fnd_a); 1994 out1: 1995 return err; 1996 } 1997 1998 /* 1999 * indx_find_buffer - Locate a buffer from the tree. 2000 */ 2001 static struct indx_node *indx_find_buffer(struct ntfs_index *indx, 2002 struct ntfs_inode *ni, 2003 const struct INDEX_ROOT *root, 2004 __le64 vbn, struct indx_node *n) 2005 { 2006 int err; 2007 const struct NTFS_DE *e; 2008 struct indx_node *r; 2009 const struct INDEX_HDR *hdr = n ? &n->index->ihdr : &root->ihdr; 2010 2011 /* Step 1: Scan one level. */ 2012 for (e = hdr_first_de(hdr);; e = hdr_next_de(hdr, e)) { 2013 if (!e) 2014 return ERR_PTR(-EINVAL); 2015 2016 if (de_has_vcn(e) && vbn == de_get_vbn_le(e)) 2017 return n; 2018 2019 if (de_is_last(e)) 2020 break; 2021 } 2022 2023 /* Step2: Do recursion. */ 2024 e = Add2Ptr(hdr, le32_to_cpu(hdr->de_off)); 2025 for (;;) { 2026 if (de_has_vcn_ex(e)) { 2027 err = indx_read(indx, ni, de_get_vbn(e), &n); 2028 if (err) 2029 return ERR_PTR(err); 2030 2031 r = indx_find_buffer(indx, ni, root, vbn, n); 2032 if (r) 2033 return r; 2034 } 2035 2036 if (de_is_last(e)) 2037 break; 2038 2039 e = Add2Ptr(e, le16_to_cpu(e->size)); 2040 } 2041 2042 return NULL; 2043 } 2044 2045 /* 2046 * indx_shrink - Deallocate unused tail indexes. 2047 */ 2048 static int indx_shrink(struct ntfs_index *indx, struct ntfs_inode *ni, 2049 size_t bit) 2050 { 2051 int err = 0; 2052 u64 bpb, new_data; 2053 size_t nbits; 2054 struct ATTRIB *b; 2055 struct ATTR_LIST_ENTRY *le = NULL; 2056 const struct INDEX_NAMES *in = &s_index_names[indx->type]; 2057 2058 b = ni_find_attr(ni, NULL, &le, ATTR_BITMAP, in->name, in->name_len, 2059 NULL, NULL); 2060 2061 if (!b) 2062 return -ENOENT; 2063 2064 if (!b->non_res) { 2065 unsigned long pos; 2066 const unsigned long *bm = resident_data(b); 2067 2068 nbits = (size_t)le32_to_cpu(b->res.data_size) * 8; 2069 2070 if (bit >= nbits) 2071 return 0; 2072 2073 pos = find_next_bit_le(bm, nbits, bit); 2074 if (pos < nbits) 2075 return 0; 2076 } else { 2077 size_t used = MINUS_ONE_T; 2078 2079 nbits = le64_to_cpu(b->nres.data_size) * 8; 2080 2081 if (bit >= nbits) 2082 return 0; 2083 2084 err = scan_nres_bitmap(ni, b, indx, bit, &scan_for_used, &used); 2085 if (err) 2086 return err; 2087 2088 if (used != MINUS_ONE_T) 2089 return 0; 2090 } 2091 2092 new_data = (u64)bit << indx->index_bits; 2093 2094 err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len, 2095 &indx->alloc_run, new_data, &new_data, false, NULL); 2096 if (err) 2097 return err; 2098 2099 if (in->name == I30_NAME) 2100 i_size_write(&ni->vfs_inode, new_data); 2101 2102 bpb = ntfs3_bitmap_size(bit); 2103 if (bpb * 8 == nbits) 2104 return 0; 2105 2106 err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len, 2107 &indx->bitmap_run, bpb, &bpb, false, NULL); 2108 2109 return err; 2110 } 2111 2112 static int indx_free_children(struct ntfs_index *indx, struct ntfs_inode *ni, 2113 const struct NTFS_DE *e, bool trim) 2114 { 2115 int err; 2116 struct indx_node *n = NULL; 2117 struct INDEX_HDR *hdr; 2118 CLST vbn = de_get_vbn(e); 2119 size_t i; 2120 2121 err = indx_read(indx, ni, vbn, &n); 2122 if (err) 2123 return err; 2124 2125 hdr = &n->index->ihdr; 2126 /* First, recurse into the children, if any. */ 2127 if (hdr_has_subnode(hdr)) { 2128 for (e = hdr_first_de(hdr); e; e = hdr_next_de(hdr, e)) { 2129 indx_free_children(indx, ni, e, false); 2130 if (de_is_last(e)) 2131 break; 2132 } 2133 } 2134 2135 put_indx_node(n); 2136 2137 i = vbn >> indx->idx2vbn_bits; 2138 /* 2139 * We've gotten rid of the children; add this buffer to the free list. 2140 */ 2141 indx_mark_free(indx, ni, i); 2142 2143 if (!trim) 2144 return 0; 2145 2146 /* 2147 * If there are no used indexes after current free index 2148 * then we can truncate allocation and bitmap. 2149 * Use bitmap to estimate the case. 2150 */ 2151 indx_shrink(indx, ni, i + 1); 2152 return 0; 2153 } 2154 2155 /* 2156 * indx_get_entry_to_replace 2157 * 2158 * Find a replacement entry for a deleted entry. 2159 * Always returns a node entry: 2160 * NTFS_IE_HAS_SUBNODES is set the flags and the size includes the sub_vcn. 2161 */ 2162 static int indx_get_entry_to_replace(struct ntfs_index *indx, 2163 struct ntfs_inode *ni, 2164 const struct NTFS_DE *de_next, 2165 struct NTFS_DE **de_to_replace, 2166 struct ntfs_fnd *fnd) 2167 { 2168 int err; 2169 int level = -1; 2170 CLST vbn; 2171 struct NTFS_DE *e, *te, *re; 2172 struct indx_node *n; 2173 struct INDEX_BUFFER *ib; 2174 2175 *de_to_replace = NULL; 2176 2177 /* Find first leaf entry down from de_next. */ 2178 vbn = de_get_vbn(de_next); 2179 for (;;) { 2180 n = NULL; 2181 err = indx_read(indx, ni, vbn, &n); 2182 if (err) 2183 goto out; 2184 2185 e = hdr_first_de(&n->index->ihdr); 2186 fnd_push(fnd, n, e); 2187 2188 if (!de_is_last(e)) { 2189 /* 2190 * This buffer is non-empty, so its first entry 2191 * could be used as the replacement entry. 2192 */ 2193 level = fnd->level - 1; 2194 } 2195 2196 if (!de_has_vcn(e)) 2197 break; 2198 2199 /* This buffer is a node. Continue to go down. */ 2200 vbn = de_get_vbn(e); 2201 } 2202 2203 if (level == -1) 2204 goto out; 2205 2206 n = fnd->nodes[level]; 2207 te = hdr_first_de(&n->index->ihdr); 2208 /* Copy the candidate entry into the replacement entry buffer. */ 2209 re = kmalloc(le16_to_cpu(te->size) + sizeof(u64), GFP_NOFS); 2210 if (!re) { 2211 err = -ENOMEM; 2212 goto out; 2213 } 2214 2215 *de_to_replace = re; 2216 memcpy(re, te, le16_to_cpu(te->size)); 2217 2218 if (!de_has_vcn(re)) { 2219 /* 2220 * The replacement entry we found doesn't have a sub_vcn. 2221 * increase its size to hold one. 2222 */ 2223 le16_add_cpu(&re->size, sizeof(u64)); 2224 re->flags |= NTFS_IE_HAS_SUBNODES; 2225 } else { 2226 /* 2227 * The replacement entry we found was a node entry, which 2228 * means that all its child buffers are empty. Return them 2229 * to the free pool. 2230 */ 2231 indx_free_children(indx, ni, te, true); 2232 } 2233 2234 /* 2235 * Expunge the replacement entry from its former location, 2236 * and then write that buffer. 2237 */ 2238 ib = n->index; 2239 e = hdr_delete_de(&ib->ihdr, te); 2240 2241 fnd->de[level] = e; 2242 indx_write(indx, ni, n, 0); 2243 2244 if (ib_is_leaf(ib) && ib_is_empty(ib)) { 2245 /* An empty leaf. */ 2246 return 0; 2247 } 2248 2249 out: 2250 fnd_clear(fnd); 2251 return err; 2252 } 2253 2254 /* 2255 * indx_delete_entry - Delete an entry from the index. 2256 */ 2257 int indx_delete_entry(struct ntfs_index *indx, struct ntfs_inode *ni, 2258 const void *key, u32 key_len, const void *ctx) 2259 { 2260 int err, diff; 2261 struct INDEX_ROOT *root; 2262 struct INDEX_HDR *hdr; 2263 struct ntfs_fnd *fnd, *fnd2; 2264 struct INDEX_BUFFER *ib; 2265 struct NTFS_DE *e, *re, *next, *prev, *me; 2266 struct indx_node *n, *n2d = NULL; 2267 __le64 sub_vbn; 2268 int level, level2; 2269 struct ATTRIB *attr; 2270 struct mft_inode *mi; 2271 u32 e_size, root_size, new_root_size; 2272 size_t trim_bit; 2273 const struct INDEX_NAMES *in; 2274 2275 fnd = fnd_get(); 2276 if (!fnd) { 2277 err = -ENOMEM; 2278 goto out2; 2279 } 2280 2281 fnd2 = fnd_get(); 2282 if (!fnd2) { 2283 err = -ENOMEM; 2284 goto out1; 2285 } 2286 2287 root = indx_get_root(indx, ni, &attr, &mi); 2288 if (!root) { 2289 err = -EINVAL; 2290 goto out; 2291 } 2292 2293 /* Locate the entry to remove. */ 2294 err = indx_find(indx, ni, root, key, key_len, ctx, &diff, &e, fnd); 2295 if (err) 2296 goto out; 2297 2298 if (!e || diff) { 2299 err = -ENOENT; 2300 goto out; 2301 } 2302 2303 level = fnd->level; 2304 2305 if (level) { 2306 n = fnd->nodes[level - 1]; 2307 e = fnd->de[level - 1]; 2308 ib = n->index; 2309 hdr = &ib->ihdr; 2310 } else { 2311 hdr = &root->ihdr; 2312 e = fnd->root_de; 2313 n = NULL; 2314 } 2315 2316 e_size = le16_to_cpu(e->size); 2317 2318 if (!de_has_vcn_ex(e)) { 2319 /* The entry to delete is a leaf, so we can just rip it out. */ 2320 hdr_delete_de(hdr, e); 2321 2322 if (!level) { 2323 hdr->total = hdr->used; 2324 2325 /* Shrink resident root attribute. */ 2326 mi_resize_attr(mi, attr, 0 - e_size); 2327 goto out; 2328 } 2329 2330 indx_write(indx, ni, n, 0); 2331 2332 /* 2333 * Check to see if removing that entry made 2334 * the leaf empty. 2335 */ 2336 if (ib_is_leaf(ib) && ib_is_empty(ib)) { 2337 fnd_pop(fnd); 2338 fnd_push(fnd2, n, e); 2339 } 2340 } else { 2341 /* 2342 * The entry we wish to delete is a node buffer, so we 2343 * have to find a replacement for it. 2344 */ 2345 next = de_get_next(e); 2346 2347 err = indx_get_entry_to_replace(indx, ni, next, &re, fnd2); 2348 if (err) 2349 goto out; 2350 2351 if (re) { 2352 de_set_vbn_le(re, de_get_vbn_le(e)); 2353 hdr_delete_de(hdr, e); 2354 2355 err = level ? indx_insert_into_buffer(indx, ni, root, 2356 re, ctx, 2357 fnd->level - 1, 2358 fnd) : 2359 indx_insert_into_root(indx, ni, re, e, 2360 ctx, fnd, 0); 2361 kfree(re); 2362 2363 if (err) 2364 goto out; 2365 } else { 2366 /* 2367 * There is no replacement for the current entry. 2368 * This means that the subtree rooted at its node 2369 * is empty, and can be deleted, which turn means 2370 * that the node can just inherit the deleted 2371 * entry sub_vcn. 2372 */ 2373 indx_free_children(indx, ni, next, true); 2374 2375 de_set_vbn_le(next, de_get_vbn_le(e)); 2376 hdr_delete_de(hdr, e); 2377 if (level) { 2378 indx_write(indx, ni, n, 0); 2379 } else { 2380 hdr->total = hdr->used; 2381 2382 /* Shrink resident root attribute. */ 2383 mi_resize_attr(mi, attr, 0 - e_size); 2384 } 2385 } 2386 } 2387 2388 /* Delete a branch of tree. */ 2389 if (!fnd2 || !fnd2->level) 2390 goto out; 2391 2392 /* Reinit root 'cause it can be changed. */ 2393 root = indx_get_root(indx, ni, &attr, &mi); 2394 if (!root) { 2395 err = -EINVAL; 2396 goto out; 2397 } 2398 2399 n2d = NULL; 2400 sub_vbn = fnd2->nodes[0]->index->vbn; 2401 level2 = 0; 2402 level = fnd->level; 2403 2404 hdr = level ? &fnd->nodes[level - 1]->index->ihdr : &root->ihdr; 2405 2406 /* Scan current level. */ 2407 for (e = hdr_first_de(hdr);; e = hdr_next_de(hdr, e)) { 2408 if (!e) { 2409 err = -EINVAL; 2410 goto out; 2411 } 2412 2413 if (de_has_vcn(e) && sub_vbn == de_get_vbn_le(e)) 2414 break; 2415 2416 if (de_is_last(e)) { 2417 e = NULL; 2418 break; 2419 } 2420 } 2421 2422 if (!e) { 2423 /* Do slow search from root. */ 2424 struct indx_node *in; 2425 2426 fnd_clear(fnd); 2427 2428 in = indx_find_buffer(indx, ni, root, sub_vbn, NULL); 2429 if (IS_ERR(in)) { 2430 err = PTR_ERR(in); 2431 goto out; 2432 } 2433 2434 if (in) 2435 fnd_push(fnd, in, NULL); 2436 } 2437 2438 /* Merge fnd2 -> fnd. */ 2439 for (level = 0; level < fnd2->level; level++) { 2440 fnd_push(fnd, fnd2->nodes[level], fnd2->de[level]); 2441 fnd2->nodes[level] = NULL; 2442 } 2443 fnd2->level = 0; 2444 2445 hdr = NULL; 2446 for (level = fnd->level; level; level--) { 2447 struct indx_node *in = fnd->nodes[level - 1]; 2448 2449 ib = in->index; 2450 if (ib_is_empty(ib)) { 2451 sub_vbn = ib->vbn; 2452 } else { 2453 hdr = &ib->ihdr; 2454 n2d = in; 2455 level2 = level; 2456 break; 2457 } 2458 } 2459 2460 if (!hdr) 2461 hdr = &root->ihdr; 2462 2463 e = hdr_first_de(hdr); 2464 if (!e) { 2465 err = -EINVAL; 2466 goto out; 2467 } 2468 2469 if (hdr != &root->ihdr || !de_is_last(e)) { 2470 prev = NULL; 2471 while (!de_is_last(e)) { 2472 if (de_has_vcn(e) && sub_vbn == de_get_vbn_le(e)) 2473 break; 2474 prev = e; 2475 e = hdr_next_de(hdr, e); 2476 if (!e) { 2477 err = -EINVAL; 2478 goto out; 2479 } 2480 } 2481 2482 if (sub_vbn != de_get_vbn_le(e)) { 2483 /* 2484 * Didn't find the parent entry, although this buffer 2485 * is the parent trail. Something is corrupt. 2486 */ 2487 err = -EINVAL; 2488 goto out; 2489 } 2490 2491 if (de_is_last(e)) { 2492 /* 2493 * Since we can't remove the end entry, we'll remove 2494 * its predecessor instead. This means we have to 2495 * transfer the predecessor's sub_vcn to the end entry. 2496 * Note: This index block is not empty, so the 2497 * predecessor must exist. 2498 */ 2499 if (!prev) { 2500 err = -EINVAL; 2501 goto out; 2502 } 2503 2504 if (de_has_vcn(prev)) { 2505 de_set_vbn_le(e, de_get_vbn_le(prev)); 2506 } else if (de_has_vcn(e)) { 2507 le16_sub_cpu(&e->size, sizeof(u64)); 2508 e->flags &= ~NTFS_IE_HAS_SUBNODES; 2509 le32_sub_cpu(&hdr->used, sizeof(u64)); 2510 } 2511 e = prev; 2512 } 2513 2514 /* 2515 * Copy the current entry into a temporary buffer (stripping 2516 * off its down-pointer, if any) and delete it from the current 2517 * buffer or root, as appropriate. 2518 */ 2519 e_size = le16_to_cpu(e->size); 2520 me = kmemdup(e, e_size, GFP_NOFS); 2521 if (!me) { 2522 err = -ENOMEM; 2523 goto out; 2524 } 2525 2526 if (de_has_vcn(me)) { 2527 me->flags &= ~NTFS_IE_HAS_SUBNODES; 2528 le16_sub_cpu(&me->size, sizeof(u64)); 2529 } 2530 2531 hdr_delete_de(hdr, e); 2532 2533 if (hdr == &root->ihdr) { 2534 level = 0; 2535 hdr->total = hdr->used; 2536 2537 /* Shrink resident root attribute. */ 2538 mi_resize_attr(mi, attr, 0 - e_size); 2539 } else { 2540 indx_write(indx, ni, n2d, 0); 2541 level = level2; 2542 } 2543 2544 /* Mark unused buffers as free. */ 2545 trim_bit = -1; 2546 for (; level < fnd->level; level++) { 2547 ib = fnd->nodes[level]->index; 2548 if (ib_is_empty(ib)) { 2549 size_t k = le64_to_cpu(ib->vbn) >> 2550 indx->idx2vbn_bits; 2551 2552 indx_mark_free(indx, ni, k); 2553 if (k < trim_bit) 2554 trim_bit = k; 2555 } 2556 } 2557 2558 fnd_clear(fnd); 2559 /*fnd->root_de = NULL;*/ 2560 2561 /* 2562 * Re-insert the entry into the tree. 2563 * Find the spot the tree where we want to insert the new entry. 2564 */ 2565 err = indx_insert_entry(indx, ni, me, ctx, fnd, 0); 2566 kfree(me); 2567 if (err) 2568 goto out; 2569 2570 if (trim_bit != -1) 2571 indx_shrink(indx, ni, trim_bit); 2572 } else { 2573 /* 2574 * This tree needs to be collapsed down to an empty root. 2575 * Recreate the index root as an empty leaf and free all 2576 * the bits the index allocation bitmap. 2577 */ 2578 fnd_clear(fnd); 2579 fnd_clear(fnd2); 2580 2581 in = &s_index_names[indx->type]; 2582 2583 err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len, 2584 &indx->alloc_run, 0, NULL, false, NULL); 2585 if (in->name == I30_NAME) 2586 i_size_write(&ni->vfs_inode, 0); 2587 2588 err = ni_remove_attr(ni, ATTR_ALLOC, in->name, in->name_len, 2589 false, NULL); 2590 run_close(&indx->alloc_run); 2591 2592 err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len, 2593 &indx->bitmap_run, 0, NULL, false, NULL); 2594 err = ni_remove_attr(ni, ATTR_BITMAP, in->name, in->name_len, 2595 false, NULL); 2596 run_close(&indx->bitmap_run); 2597 2598 root = indx_get_root(indx, ni, &attr, &mi); 2599 if (!root) { 2600 err = -EINVAL; 2601 goto out; 2602 } 2603 2604 root_size = le32_to_cpu(attr->res.data_size); 2605 new_root_size = 2606 sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE); 2607 2608 if (new_root_size != root_size && 2609 !mi_resize_attr(mi, attr, new_root_size - root_size)) { 2610 err = -EINVAL; 2611 goto out; 2612 } 2613 2614 /* Fill first entry. */ 2615 e = (struct NTFS_DE *)(root + 1); 2616 e->ref.low = 0; 2617 e->ref.high = 0; 2618 e->ref.seq = 0; 2619 e->size = cpu_to_le16(sizeof(struct NTFS_DE)); 2620 e->flags = NTFS_IE_LAST; // 0x02 2621 e->key_size = 0; 2622 e->res = 0; 2623 2624 hdr = &root->ihdr; 2625 hdr->flags = 0; 2626 hdr->used = hdr->total = cpu_to_le32( 2627 new_root_size - offsetof(struct INDEX_ROOT, ihdr)); 2628 mi->dirty = true; 2629 } 2630 2631 out: 2632 fnd_put(fnd2); 2633 out1: 2634 fnd_put(fnd); 2635 out2: 2636 return err; 2637 } 2638 2639 /* 2640 * Update duplicated information in directory entry 2641 * 'dup' - info from MFT record 2642 */ 2643 int indx_update_dup(struct ntfs_inode *ni, struct ntfs_sb_info *sbi, 2644 const struct ATTR_FILE_NAME *fname, 2645 const struct NTFS_DUP_INFO *dup, int sync) 2646 { 2647 int err, diff; 2648 struct NTFS_DE *e = NULL; 2649 struct ATTR_FILE_NAME *e_fname; 2650 struct ntfs_fnd *fnd; 2651 struct INDEX_ROOT *root; 2652 struct mft_inode *mi; 2653 struct ntfs_index *indx = &ni->dir; 2654 2655 fnd = fnd_get(); 2656 if (!fnd) 2657 return -ENOMEM; 2658 2659 root = indx_get_root(indx, ni, NULL, &mi); 2660 if (!root) { 2661 err = -EINVAL; 2662 goto out; 2663 } 2664 2665 /* Find entry in directory. */ 2666 err = indx_find(indx, ni, root, fname, fname_full_size(fname), sbi, 2667 &diff, &e, fnd); 2668 if (err) 2669 goto out; 2670 2671 if (!e) { 2672 err = -EINVAL; 2673 goto out; 2674 } 2675 2676 if (diff) { 2677 err = -EINVAL; 2678 goto out; 2679 } 2680 2681 e_fname = (struct ATTR_FILE_NAME *)(e + 1); 2682 2683 if (!memcmp(&e_fname->dup, dup, sizeof(*dup))) { 2684 /* 2685 * Nothing to update in index! Try to avoid this call. 2686 */ 2687 goto out; 2688 } 2689 2690 memcpy(&e_fname->dup, dup, sizeof(*dup)); 2691 2692 if (fnd->level) { 2693 /* Directory entry in index. */ 2694 err = indx_write(indx, ni, fnd->nodes[fnd->level - 1], sync); 2695 } else { 2696 /* Directory entry in directory MFT record. */ 2697 mi->dirty = true; 2698 if (sync) 2699 err = mi_write(mi, 1); 2700 else 2701 mark_inode_dirty(&ni->vfs_inode); 2702 } 2703 2704 out: 2705 fnd_put(fnd); 2706 return err; 2707 } 2708