1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. 5 * 6 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame? 7 */ 8 9 #include <linux/blkdev.h> 10 #include <linux/buffer_head.h> 11 #include <linux/fs.h> 12 #include <linux/hash.h> 13 #include <linux/nls.h> 14 #include <linux/ratelimit.h> 15 #include <linux/slab.h> 16 17 #include "debug.h" 18 #include "ntfs.h" 19 #include "ntfs_fs.h" 20 21 /* 22 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage 23 * preallocate algorithm. 24 */ 25 #ifndef NTFS_MIN_LOG2_OF_CLUMP 26 #define NTFS_MIN_LOG2_OF_CLUMP 16 27 #endif 28 29 #ifndef NTFS_MAX_LOG2_OF_CLUMP 30 #define NTFS_MAX_LOG2_OF_CLUMP 26 31 #endif 32 33 // 16M 34 #define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8)) 35 // 16G 36 #define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8)) 37 38 static inline u64 get_pre_allocated(u64 size) 39 { 40 u32 clump; 41 u8 align_shift; 42 u64 ret; 43 44 if (size <= NTFS_CLUMP_MIN) { 45 clump = 1 << NTFS_MIN_LOG2_OF_CLUMP; 46 align_shift = NTFS_MIN_LOG2_OF_CLUMP; 47 } else if (size >= NTFS_CLUMP_MAX) { 48 clump = 1 << NTFS_MAX_LOG2_OF_CLUMP; 49 align_shift = NTFS_MAX_LOG2_OF_CLUMP; 50 } else { 51 align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 + 52 __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP)); 53 clump = 1u << align_shift; 54 } 55 56 ret = (((size + clump - 1) >> align_shift)) << align_shift; 57 58 return ret; 59 } 60 61 /* 62 * attr_must_be_resident 63 * 64 * Return: True if attribute must be resident. 65 */ 66 static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi, 67 enum ATTR_TYPE type) 68 { 69 const struct ATTR_DEF_ENTRY *de; 70 71 switch (type) { 72 case ATTR_STD: 73 case ATTR_NAME: 74 case ATTR_ID: 75 case ATTR_LABEL: 76 case ATTR_VOL_INFO: 77 case ATTR_ROOT: 78 case ATTR_EA_INFO: 79 return true; 80 default: 81 de = ntfs_query_def(sbi, type); 82 if (de && (de->flags & NTFS_ATTR_MUST_BE_RESIDENT)) 83 return true; 84 return false; 85 } 86 } 87 88 /* 89 * attr_load_runs - Load all runs stored in @attr. 90 */ 91 int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni, 92 struct runs_tree *run, const CLST *vcn) 93 { 94 int err; 95 CLST svcn = le64_to_cpu(attr->nres.svcn); 96 CLST evcn = le64_to_cpu(attr->nres.evcn); 97 u32 asize; 98 u16 run_off; 99 100 if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn)) 101 return 0; 102 103 if (vcn && (evcn < *vcn || *vcn < svcn)) 104 return -EINVAL; 105 106 asize = le32_to_cpu(attr->size); 107 run_off = le16_to_cpu(attr->nres.run_off); 108 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, 109 vcn ? *vcn : svcn, Add2Ptr(attr, run_off), 110 asize - run_off); 111 if (err < 0) 112 return err; 113 114 return 0; 115 } 116 117 /* 118 * run_deallocate_ex - Deallocate clusters. 119 */ 120 static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run, 121 CLST vcn, CLST len, CLST *done, bool trim) 122 { 123 int err = 0; 124 CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0; 125 size_t idx; 126 127 if (!len) 128 goto out; 129 130 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) { 131 failed: 132 run_truncate(run, vcn0); 133 err = -EINVAL; 134 goto out; 135 } 136 137 for (;;) { 138 if (clen > len) 139 clen = len; 140 141 if (!clen) { 142 err = -EINVAL; 143 goto out; 144 } 145 146 if (lcn != SPARSE_LCN) { 147 mark_as_free_ex(sbi, lcn, clen, trim); 148 dn += clen; 149 } 150 151 len -= clen; 152 if (!len) 153 break; 154 155 vcn_next = vcn + clen; 156 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) || 157 vcn != vcn_next) { 158 /* Save memory - don't load entire run. */ 159 goto failed; 160 } 161 } 162 163 out: 164 if (done) 165 *done += dn; 166 167 return err; 168 } 169 170 /* 171 * attr_allocate_clusters - Find free space, mark it as used and store in @run. 172 */ 173 int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run, 174 CLST vcn, CLST lcn, CLST len, CLST *pre_alloc, 175 enum ALLOCATE_OPT opt, CLST *alen, const size_t fr, 176 CLST *new_lcn) 177 { 178 int err; 179 CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0; 180 struct wnd_bitmap *wnd = &sbi->used.bitmap; 181 size_t cnt = run->count; 182 183 for (;;) { 184 err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen, 185 opt); 186 187 if (err == -ENOSPC && pre) { 188 pre = 0; 189 if (*pre_alloc) 190 *pre_alloc = 0; 191 continue; 192 } 193 194 if (err) 195 goto out; 196 197 if (new_lcn && vcn == vcn0) 198 *new_lcn = lcn; 199 200 /* Add new fragment into run storage. */ 201 if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) { 202 /* Undo last 'ntfs_look_for_free_space' */ 203 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS); 204 wnd_set_free(wnd, lcn, flen); 205 up_write(&wnd->rw_lock); 206 err = -ENOMEM; 207 goto out; 208 } 209 210 vcn += flen; 211 212 if (flen >= len || opt == ALLOCATE_MFT || 213 (fr && run->count - cnt >= fr)) { 214 *alen = vcn - vcn0; 215 return 0; 216 } 217 218 len -= flen; 219 } 220 221 out: 222 /* Undo 'ntfs_look_for_free_space' */ 223 if (vcn - vcn0) { 224 run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false); 225 run_truncate(run, vcn0); 226 } 227 228 return err; 229 } 230 231 /* 232 * attr_make_nonresident 233 * 234 * If page is not NULL - it is already contains resident data 235 * and locked (called from ni_write_frame()). 236 */ 237 int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr, 238 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi, 239 u64 new_size, struct runs_tree *run, 240 struct ATTRIB **ins_attr, struct page *page) 241 { 242 struct ntfs_sb_info *sbi; 243 struct ATTRIB *attr_s; 244 struct MFT_REC *rec; 245 u32 used, asize, rsize, aoff, align; 246 bool is_data; 247 CLST len, alen; 248 char *next; 249 int err; 250 251 if (attr->non_res) { 252 *ins_attr = attr; 253 return 0; 254 } 255 256 sbi = mi->sbi; 257 rec = mi->mrec; 258 attr_s = NULL; 259 used = le32_to_cpu(rec->used); 260 asize = le32_to_cpu(attr->size); 261 next = Add2Ptr(attr, asize); 262 aoff = PtrOffset(rec, attr); 263 rsize = le32_to_cpu(attr->res.data_size); 264 is_data = attr->type == ATTR_DATA && !attr->name_len; 265 266 align = sbi->cluster_size; 267 if (is_attr_compressed(attr)) 268 align <<= COMPRESSION_UNIT; 269 len = (rsize + align - 1) >> sbi->cluster_bits; 270 271 run_init(run); 272 273 /* Make a copy of original attribute. */ 274 attr_s = kmemdup(attr, asize, GFP_NOFS); 275 if (!attr_s) { 276 err = -ENOMEM; 277 goto out; 278 } 279 280 if (!len) { 281 /* Empty resident -> Empty nonresident. */ 282 alen = 0; 283 } else { 284 const char *data = resident_data(attr); 285 286 err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL, 287 ALLOCATE_DEF, &alen, 0, NULL); 288 if (err) 289 goto out1; 290 291 if (!rsize) { 292 /* Empty resident -> Non empty nonresident. */ 293 } else if (!is_data) { 294 err = ntfs_sb_write_run(sbi, run, 0, data, rsize); 295 if (err) 296 goto out2; 297 } else if (!page) { 298 char *kaddr; 299 300 page = grab_cache_page(ni->vfs_inode.i_mapping, 0); 301 if (!page) { 302 err = -ENOMEM; 303 goto out2; 304 } 305 kaddr = kmap_atomic(page); 306 memcpy(kaddr, data, rsize); 307 memset(kaddr + rsize, 0, PAGE_SIZE - rsize); 308 kunmap_atomic(kaddr); 309 flush_dcache_page(page); 310 SetPageUptodate(page); 311 set_page_dirty(page); 312 unlock_page(page); 313 put_page(page); 314 } 315 } 316 317 /* Remove original attribute. */ 318 used -= asize; 319 memmove(attr, Add2Ptr(attr, asize), used - aoff); 320 rec->used = cpu_to_le32(used); 321 mi->dirty = true; 322 if (le) 323 al_remove_le(ni, le); 324 325 err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s), 326 attr_s->name_len, run, 0, alen, 327 attr_s->flags, &attr, NULL); 328 if (err) 329 goto out3; 330 331 kfree(attr_s); 332 attr->nres.data_size = cpu_to_le64(rsize); 333 attr->nres.valid_size = attr->nres.data_size; 334 335 *ins_attr = attr; 336 337 if (is_data) 338 ni->ni_flags &= ~NI_FLAG_RESIDENT; 339 340 /* Resident attribute becomes non resident. */ 341 return 0; 342 343 out3: 344 attr = Add2Ptr(rec, aoff); 345 memmove(next, attr, used - aoff); 346 memcpy(attr, attr_s, asize); 347 rec->used = cpu_to_le32(used + asize); 348 mi->dirty = true; 349 out2: 350 /* Undo: do not trim new allocated clusters. */ 351 run_deallocate(sbi, run, false); 352 run_close(run); 353 out1: 354 kfree(attr_s); 355 out: 356 return err; 357 } 358 359 /* 360 * attr_set_size_res - Helper for attr_set_size(). 361 */ 362 static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr, 363 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi, 364 u64 new_size, struct runs_tree *run, 365 struct ATTRIB **ins_attr) 366 { 367 struct ntfs_sb_info *sbi = mi->sbi; 368 struct MFT_REC *rec = mi->mrec; 369 u32 used = le32_to_cpu(rec->used); 370 u32 asize = le32_to_cpu(attr->size); 371 u32 aoff = PtrOffset(rec, attr); 372 u32 rsize = le32_to_cpu(attr->res.data_size); 373 u32 tail = used - aoff - asize; 374 char *next = Add2Ptr(attr, asize); 375 s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8); 376 377 if (dsize < 0) { 378 memmove(next + dsize, next, tail); 379 } else if (dsize > 0) { 380 if (used + dsize > sbi->max_bytes_per_attr) 381 return attr_make_nonresident(ni, attr, le, mi, new_size, 382 run, ins_attr, NULL); 383 384 memmove(next + dsize, next, tail); 385 memset(next, 0, dsize); 386 } 387 388 if (new_size > rsize) 389 memset(Add2Ptr(resident_data(attr), rsize), 0, 390 new_size - rsize); 391 392 rec->used = cpu_to_le32(used + dsize); 393 attr->size = cpu_to_le32(asize + dsize); 394 attr->res.data_size = cpu_to_le32(new_size); 395 mi->dirty = true; 396 *ins_attr = attr; 397 398 return 0; 399 } 400 401 /* 402 * attr_set_size - Change the size of attribute. 403 * 404 * Extend: 405 * - Sparse/compressed: No allocated clusters. 406 * - Normal: Append allocated and preallocated new clusters. 407 * Shrink: 408 * - No deallocate if @keep_prealloc is set. 409 */ 410 int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, 411 const __le16 *name, u8 name_len, struct runs_tree *run, 412 u64 new_size, const u64 *new_valid, bool keep_prealloc, 413 struct ATTRIB **ret) 414 { 415 int err = 0; 416 struct ntfs_sb_info *sbi = ni->mi.sbi; 417 u8 cluster_bits = sbi->cluster_bits; 418 bool is_mft = 419 ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len; 420 u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp; 421 struct ATTRIB *attr = NULL, *attr_b; 422 struct ATTR_LIST_ENTRY *le, *le_b; 423 struct mft_inode *mi, *mi_b; 424 CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn; 425 CLST next_svcn, pre_alloc = -1, done = 0; 426 bool is_ext; 427 u32 align; 428 struct MFT_REC *rec; 429 430 again: 431 le_b = NULL; 432 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL, 433 &mi_b); 434 if (!attr_b) { 435 err = -ENOENT; 436 goto out; 437 } 438 439 if (!attr_b->non_res) { 440 err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run, 441 &attr_b); 442 if (err || !attr_b->non_res) 443 goto out; 444 445 /* Layout of records may be changed, so do a full search. */ 446 goto again; 447 } 448 449 is_ext = is_attr_ext(attr_b); 450 451 again_1: 452 align = sbi->cluster_size; 453 454 if (is_ext) { 455 align <<= attr_b->nres.c_unit; 456 if (is_attr_sparsed(attr_b)) 457 keep_prealloc = false; 458 } 459 460 old_valid = le64_to_cpu(attr_b->nres.valid_size); 461 old_size = le64_to_cpu(attr_b->nres.data_size); 462 old_alloc = le64_to_cpu(attr_b->nres.alloc_size); 463 old_alen = old_alloc >> cluster_bits; 464 465 new_alloc = (new_size + align - 1) & ~(u64)(align - 1); 466 new_alen = new_alloc >> cluster_bits; 467 468 if (keep_prealloc && is_ext) 469 keep_prealloc = false; 470 471 if (keep_prealloc && new_size < old_size) { 472 attr_b->nres.data_size = cpu_to_le64(new_size); 473 mi_b->dirty = true; 474 goto ok; 475 } 476 477 vcn = old_alen - 1; 478 479 svcn = le64_to_cpu(attr_b->nres.svcn); 480 evcn = le64_to_cpu(attr_b->nres.evcn); 481 482 if (svcn <= vcn && vcn <= evcn) { 483 attr = attr_b; 484 le = le_b; 485 mi = mi_b; 486 } else if (!le_b) { 487 err = -EINVAL; 488 goto out; 489 } else { 490 le = le_b; 491 attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn, 492 &mi); 493 if (!attr) { 494 err = -EINVAL; 495 goto out; 496 } 497 498 next_le_1: 499 svcn = le64_to_cpu(attr->nres.svcn); 500 evcn = le64_to_cpu(attr->nres.evcn); 501 } 502 503 next_le: 504 rec = mi->mrec; 505 506 err = attr_load_runs(attr, ni, run, NULL); 507 if (err) 508 goto out; 509 510 if (new_size > old_size) { 511 CLST to_allocate; 512 size_t free; 513 514 if (new_alloc <= old_alloc) { 515 attr_b->nres.data_size = cpu_to_le64(new_size); 516 mi_b->dirty = true; 517 goto ok; 518 } 519 520 to_allocate = new_alen - old_alen; 521 add_alloc_in_same_attr_seg: 522 lcn = 0; 523 if (is_mft) { 524 /* MFT allocates clusters from MFT zone. */ 525 pre_alloc = 0; 526 } else if (is_ext) { 527 /* No preallocate for sparse/compress. */ 528 pre_alloc = 0; 529 } else if (pre_alloc == -1) { 530 pre_alloc = 0; 531 if (type == ATTR_DATA && !name_len && 532 sbi->options.prealloc) { 533 CLST new_alen2 = bytes_to_cluster( 534 sbi, get_pre_allocated(new_size)); 535 pre_alloc = new_alen2 - new_alen; 536 } 537 538 /* Get the last LCN to allocate from. */ 539 if (old_alen && 540 !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) { 541 lcn = SPARSE_LCN; 542 } 543 544 if (lcn == SPARSE_LCN) 545 lcn = 0; 546 else if (lcn) 547 lcn += 1; 548 549 free = wnd_zeroes(&sbi->used.bitmap); 550 if (to_allocate > free) { 551 err = -ENOSPC; 552 goto out; 553 } 554 555 if (pre_alloc && to_allocate + pre_alloc > free) 556 pre_alloc = 0; 557 } 558 559 vcn = old_alen; 560 561 if (is_ext) { 562 if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate, 563 false)) { 564 err = -ENOMEM; 565 goto out; 566 } 567 alen = to_allocate; 568 } else { 569 /* ~3 bytes per fragment. */ 570 err = attr_allocate_clusters( 571 sbi, run, vcn, lcn, to_allocate, &pre_alloc, 572 is_mft ? ALLOCATE_MFT : 0, &alen, 573 is_mft ? 0 574 : (sbi->record_size - 575 le32_to_cpu(rec->used) + 8) / 576 3 + 577 1, 578 NULL); 579 if (err) 580 goto out; 581 } 582 583 done += alen; 584 vcn += alen; 585 if (to_allocate > alen) 586 to_allocate -= alen; 587 else 588 to_allocate = 0; 589 590 pack_runs: 591 err = mi_pack_runs(mi, attr, run, vcn - svcn); 592 if (err) 593 goto out; 594 595 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; 596 new_alloc_tmp = (u64)next_svcn << cluster_bits; 597 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp); 598 mi_b->dirty = true; 599 600 if (next_svcn >= vcn && !to_allocate) { 601 /* Normal way. Update attribute and exit. */ 602 attr_b->nres.data_size = cpu_to_le64(new_size); 603 goto ok; 604 } 605 606 /* At least two MFT to avoid recursive loop. */ 607 if (is_mft && next_svcn == vcn && 608 ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) { 609 new_size = new_alloc_tmp; 610 attr_b->nres.data_size = attr_b->nres.alloc_size; 611 goto ok; 612 } 613 614 if (le32_to_cpu(rec->used) < sbi->record_size) { 615 old_alen = next_svcn; 616 evcn = old_alen - 1; 617 goto add_alloc_in_same_attr_seg; 618 } 619 620 attr_b->nres.data_size = attr_b->nres.alloc_size; 621 if (new_alloc_tmp < old_valid) 622 attr_b->nres.valid_size = attr_b->nres.data_size; 623 624 if (type == ATTR_LIST) { 625 err = ni_expand_list(ni); 626 if (err) 627 goto out; 628 if (next_svcn < vcn) 629 goto pack_runs; 630 631 /* Layout of records is changed. */ 632 goto again; 633 } 634 635 if (!ni->attr_list.size) { 636 err = ni_create_attr_list(ni); 637 if (err) 638 goto out; 639 /* Layout of records is changed. */ 640 } 641 642 if (next_svcn >= vcn) { 643 /* This is MFT data, repeat. */ 644 goto again; 645 } 646 647 /* Insert new attribute segment. */ 648 err = ni_insert_nonresident(ni, type, name, name_len, run, 649 next_svcn, vcn - next_svcn, 650 attr_b->flags, &attr, &mi); 651 if (err) 652 goto out; 653 654 if (!is_mft) 655 run_truncate_head(run, evcn + 1); 656 657 svcn = le64_to_cpu(attr->nres.svcn); 658 evcn = le64_to_cpu(attr->nres.evcn); 659 660 le_b = NULL; 661 /* 662 * Layout of records maybe changed. 663 * Find base attribute to update. 664 */ 665 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, 666 NULL, &mi_b); 667 if (!attr_b) { 668 err = -ENOENT; 669 goto out; 670 } 671 672 attr_b->nres.alloc_size = cpu_to_le64((u64)vcn << cluster_bits); 673 attr_b->nres.data_size = attr_b->nres.alloc_size; 674 attr_b->nres.valid_size = attr_b->nres.alloc_size; 675 mi_b->dirty = true; 676 goto again_1; 677 } 678 679 if (new_size != old_size || 680 (new_alloc != old_alloc && !keep_prealloc)) { 681 vcn = max(svcn, new_alen); 682 new_alloc_tmp = (u64)vcn << cluster_bits; 683 684 alen = 0; 685 err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &alen, 686 true); 687 if (err) 688 goto out; 689 690 run_truncate(run, vcn); 691 692 if (vcn > svcn) { 693 err = mi_pack_runs(mi, attr, run, vcn - svcn); 694 if (err) 695 goto out; 696 } else if (le && le->vcn) { 697 u16 le_sz = le16_to_cpu(le->size); 698 699 /* 700 * NOTE: List entries for one attribute are always 701 * the same size. We deal with last entry (vcn==0) 702 * and it is not first in entries array 703 * (list entry for std attribute always first). 704 * So it is safe to step back. 705 */ 706 mi_remove_attr(NULL, mi, attr); 707 708 if (!al_remove_le(ni, le)) { 709 err = -EINVAL; 710 goto out; 711 } 712 713 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz); 714 } else { 715 attr->nres.evcn = cpu_to_le64((u64)vcn - 1); 716 mi->dirty = true; 717 } 718 719 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp); 720 721 if (vcn == new_alen) { 722 attr_b->nres.data_size = cpu_to_le64(new_size); 723 if (new_size < old_valid) 724 attr_b->nres.valid_size = 725 attr_b->nres.data_size; 726 } else { 727 if (new_alloc_tmp <= 728 le64_to_cpu(attr_b->nres.data_size)) 729 attr_b->nres.data_size = 730 attr_b->nres.alloc_size; 731 if (new_alloc_tmp < 732 le64_to_cpu(attr_b->nres.valid_size)) 733 attr_b->nres.valid_size = 734 attr_b->nres.alloc_size; 735 } 736 737 if (is_ext) 738 le64_sub_cpu(&attr_b->nres.total_size, 739 ((u64)alen << cluster_bits)); 740 741 mi_b->dirty = true; 742 743 if (new_alloc_tmp <= new_alloc) 744 goto ok; 745 746 old_size = new_alloc_tmp; 747 vcn = svcn - 1; 748 749 if (le == le_b) { 750 attr = attr_b; 751 mi = mi_b; 752 evcn = svcn - 1; 753 svcn = 0; 754 goto next_le; 755 } 756 757 if (le->type != type || le->name_len != name_len || 758 memcmp(le_name(le), name, name_len * sizeof(short))) { 759 err = -EINVAL; 760 goto out; 761 } 762 763 err = ni_load_mi(ni, le, &mi); 764 if (err) 765 goto out; 766 767 attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id); 768 if (!attr) { 769 err = -EINVAL; 770 goto out; 771 } 772 goto next_le_1; 773 } 774 775 ok: 776 if (new_valid) { 777 __le64 valid = cpu_to_le64(min(*new_valid, new_size)); 778 779 if (attr_b->nres.valid_size != valid) { 780 attr_b->nres.valid_size = valid; 781 mi_b->dirty = true; 782 } 783 } 784 785 out: 786 if (!err && attr_b && ret) 787 *ret = attr_b; 788 789 /* Update inode_set_bytes. */ 790 if (!err && ((type == ATTR_DATA && !name_len) || 791 (type == ATTR_ALLOC && name == I30_NAME))) { 792 bool dirty = false; 793 794 if (ni->vfs_inode.i_size != new_size) { 795 ni->vfs_inode.i_size = new_size; 796 dirty = true; 797 } 798 799 if (attr_b && attr_b->non_res) { 800 new_alloc = le64_to_cpu(attr_b->nres.alloc_size); 801 if (inode_get_bytes(&ni->vfs_inode) != new_alloc) { 802 inode_set_bytes(&ni->vfs_inode, new_alloc); 803 dirty = true; 804 } 805 } 806 807 if (dirty) { 808 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 809 mark_inode_dirty(&ni->vfs_inode); 810 } 811 } 812 813 return err; 814 } 815 816 int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn, 817 CLST *len, bool *new) 818 { 819 int err = 0; 820 struct runs_tree *run = &ni->file.run; 821 struct ntfs_sb_info *sbi; 822 u8 cluster_bits; 823 struct ATTRIB *attr = NULL, *attr_b; 824 struct ATTR_LIST_ENTRY *le, *le_b; 825 struct mft_inode *mi, *mi_b; 826 CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end; 827 u64 total_size; 828 u32 clst_per_frame; 829 bool ok; 830 831 if (new) 832 *new = false; 833 834 down_read(&ni->file.run_lock); 835 ok = run_lookup_entry(run, vcn, lcn, len, NULL); 836 up_read(&ni->file.run_lock); 837 838 if (ok && (*lcn != SPARSE_LCN || !new)) { 839 /* Normal way. */ 840 return 0; 841 } 842 843 if (!clen) 844 clen = 1; 845 846 if (ok && clen > *len) 847 clen = *len; 848 849 sbi = ni->mi.sbi; 850 cluster_bits = sbi->cluster_bits; 851 852 ni_lock(ni); 853 down_write(&ni->file.run_lock); 854 855 le_b = NULL; 856 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); 857 if (!attr_b) { 858 err = -ENOENT; 859 goto out; 860 } 861 862 if (!attr_b->non_res) { 863 *lcn = RESIDENT_LCN; 864 *len = 1; 865 goto out; 866 } 867 868 asize = le64_to_cpu(attr_b->nres.alloc_size) >> sbi->cluster_bits; 869 if (vcn >= asize) { 870 err = -EINVAL; 871 goto out; 872 } 873 874 clst_per_frame = 1u << attr_b->nres.c_unit; 875 to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1); 876 877 if (vcn + to_alloc > asize) 878 to_alloc = asize - vcn; 879 880 svcn = le64_to_cpu(attr_b->nres.svcn); 881 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; 882 883 attr = attr_b; 884 le = le_b; 885 mi = mi_b; 886 887 if (le_b && (vcn < svcn || evcn1 <= vcn)) { 888 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, 889 &mi); 890 if (!attr) { 891 err = -EINVAL; 892 goto out; 893 } 894 svcn = le64_to_cpu(attr->nres.svcn); 895 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 896 } 897 898 err = attr_load_runs(attr, ni, run, NULL); 899 if (err) 900 goto out; 901 902 if (!ok) { 903 ok = run_lookup_entry(run, vcn, lcn, len, NULL); 904 if (ok && (*lcn != SPARSE_LCN || !new)) { 905 /* Normal way. */ 906 err = 0; 907 goto ok; 908 } 909 910 if (!ok && !new) { 911 *len = 0; 912 err = 0; 913 goto ok; 914 } 915 916 if (ok && clen > *len) { 917 clen = *len; 918 to_alloc = (clen + clst_per_frame - 1) & 919 ~(clst_per_frame - 1); 920 } 921 } 922 923 if (!is_attr_ext(attr_b)) { 924 err = -EINVAL; 925 goto out; 926 } 927 928 /* Get the last LCN to allocate from. */ 929 hint = 0; 930 931 if (vcn > evcn1) { 932 if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1, 933 false)) { 934 err = -ENOMEM; 935 goto out; 936 } 937 } else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) { 938 hint = -1; 939 } 940 941 err = attr_allocate_clusters( 942 sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len, 943 (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1, 944 lcn); 945 if (err) 946 goto out; 947 *new = true; 948 949 end = vcn + *len; 950 951 total_size = le64_to_cpu(attr_b->nres.total_size) + 952 ((u64)*len << cluster_bits); 953 954 repack: 955 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn); 956 if (err) 957 goto out; 958 959 attr_b->nres.total_size = cpu_to_le64(total_size); 960 inode_set_bytes(&ni->vfs_inode, total_size); 961 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 962 963 mi_b->dirty = true; 964 mark_inode_dirty(&ni->vfs_inode); 965 966 /* Stored [vcn : next_svcn) from [vcn : end). */ 967 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; 968 969 if (end <= evcn1) { 970 if (next_svcn == evcn1) { 971 /* Normal way. Update attribute and exit. */ 972 goto ok; 973 } 974 /* Add new segment [next_svcn : evcn1 - next_svcn). */ 975 if (!ni->attr_list.size) { 976 err = ni_create_attr_list(ni); 977 if (err) 978 goto out; 979 /* Layout of records is changed. */ 980 le_b = NULL; 981 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 982 0, NULL, &mi_b); 983 if (!attr_b) { 984 err = -ENOENT; 985 goto out; 986 } 987 988 attr = attr_b; 989 le = le_b; 990 mi = mi_b; 991 goto repack; 992 } 993 } 994 995 svcn = evcn1; 996 997 /* Estimate next attribute. */ 998 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi); 999 1000 if (attr) { 1001 CLST alloc = bytes_to_cluster( 1002 sbi, le64_to_cpu(attr_b->nres.alloc_size)); 1003 CLST evcn = le64_to_cpu(attr->nres.evcn); 1004 1005 if (end < next_svcn) 1006 end = next_svcn; 1007 while (end > evcn) { 1008 /* Remove segment [svcn : evcn). */ 1009 mi_remove_attr(NULL, mi, attr); 1010 1011 if (!al_remove_le(ni, le)) { 1012 err = -EINVAL; 1013 goto out; 1014 } 1015 1016 if (evcn + 1 >= alloc) { 1017 /* Last attribute segment. */ 1018 evcn1 = evcn + 1; 1019 goto ins_ext; 1020 } 1021 1022 if (ni_load_mi(ni, le, &mi)) { 1023 attr = NULL; 1024 goto out; 1025 } 1026 1027 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, 1028 &le->id); 1029 if (!attr) { 1030 err = -EINVAL; 1031 goto out; 1032 } 1033 svcn = le64_to_cpu(attr->nres.svcn); 1034 evcn = le64_to_cpu(attr->nres.evcn); 1035 } 1036 1037 if (end < svcn) 1038 end = svcn; 1039 1040 err = attr_load_runs(attr, ni, run, &end); 1041 if (err) 1042 goto out; 1043 1044 evcn1 = evcn + 1; 1045 attr->nres.svcn = cpu_to_le64(next_svcn); 1046 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn); 1047 if (err) 1048 goto out; 1049 1050 le->vcn = cpu_to_le64(next_svcn); 1051 ni->attr_list.dirty = true; 1052 mi->dirty = true; 1053 1054 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; 1055 } 1056 ins_ext: 1057 if (evcn1 > next_svcn) { 1058 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run, 1059 next_svcn, evcn1 - next_svcn, 1060 attr_b->flags, &attr, &mi); 1061 if (err) 1062 goto out; 1063 } 1064 ok: 1065 run_truncate_around(run, vcn); 1066 out: 1067 up_write(&ni->file.run_lock); 1068 ni_unlock(ni); 1069 1070 return err; 1071 } 1072 1073 int attr_data_read_resident(struct ntfs_inode *ni, struct page *page) 1074 { 1075 u64 vbo; 1076 struct ATTRIB *attr; 1077 u32 data_size; 1078 1079 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL); 1080 if (!attr) 1081 return -EINVAL; 1082 1083 if (attr->non_res) 1084 return E_NTFS_NONRESIDENT; 1085 1086 vbo = page->index << PAGE_SHIFT; 1087 data_size = le32_to_cpu(attr->res.data_size); 1088 if (vbo < data_size) { 1089 const char *data = resident_data(attr); 1090 char *kaddr = kmap_atomic(page); 1091 u32 use = data_size - vbo; 1092 1093 if (use > PAGE_SIZE) 1094 use = PAGE_SIZE; 1095 1096 memcpy(kaddr, data + vbo, use); 1097 memset(kaddr + use, 0, PAGE_SIZE - use); 1098 kunmap_atomic(kaddr); 1099 flush_dcache_page(page); 1100 SetPageUptodate(page); 1101 } else if (!PageUptodate(page)) { 1102 zero_user_segment(page, 0, PAGE_SIZE); 1103 SetPageUptodate(page); 1104 } 1105 1106 return 0; 1107 } 1108 1109 int attr_data_write_resident(struct ntfs_inode *ni, struct page *page) 1110 { 1111 u64 vbo; 1112 struct mft_inode *mi; 1113 struct ATTRIB *attr; 1114 u32 data_size; 1115 1116 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi); 1117 if (!attr) 1118 return -EINVAL; 1119 1120 if (attr->non_res) { 1121 /* Return special error code to check this case. */ 1122 return E_NTFS_NONRESIDENT; 1123 } 1124 1125 vbo = page->index << PAGE_SHIFT; 1126 data_size = le32_to_cpu(attr->res.data_size); 1127 if (vbo < data_size) { 1128 char *data = resident_data(attr); 1129 char *kaddr = kmap_atomic(page); 1130 u32 use = data_size - vbo; 1131 1132 if (use > PAGE_SIZE) 1133 use = PAGE_SIZE; 1134 memcpy(data + vbo, kaddr, use); 1135 kunmap_atomic(kaddr); 1136 mi->dirty = true; 1137 } 1138 ni->i_valid = data_size; 1139 1140 return 0; 1141 } 1142 1143 /* 1144 * attr_load_runs_vcn - Load runs with VCN. 1145 */ 1146 int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type, 1147 const __le16 *name, u8 name_len, struct runs_tree *run, 1148 CLST vcn) 1149 { 1150 struct ATTRIB *attr; 1151 int err; 1152 CLST svcn, evcn; 1153 u16 ro; 1154 1155 attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL); 1156 if (!attr) { 1157 /* Is record corrupted? */ 1158 return -ENOENT; 1159 } 1160 1161 svcn = le64_to_cpu(attr->nres.svcn); 1162 evcn = le64_to_cpu(attr->nres.evcn); 1163 1164 if (evcn < vcn || vcn < svcn) { 1165 /* Is record corrupted? */ 1166 return -EINVAL; 1167 } 1168 1169 ro = le16_to_cpu(attr->nres.run_off); 1170 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn, 1171 Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro); 1172 if (err < 0) 1173 return err; 1174 return 0; 1175 } 1176 1177 /* 1178 * attr_load_runs_range - Load runs for given range [from to). 1179 */ 1180 int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type, 1181 const __le16 *name, u8 name_len, struct runs_tree *run, 1182 u64 from, u64 to) 1183 { 1184 struct ntfs_sb_info *sbi = ni->mi.sbi; 1185 u8 cluster_bits = sbi->cluster_bits; 1186 CLST vcn = from >> cluster_bits; 1187 CLST vcn_last = (to - 1) >> cluster_bits; 1188 CLST lcn, clen; 1189 int err; 1190 1191 for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) { 1192 if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) { 1193 err = attr_load_runs_vcn(ni, type, name, name_len, run, 1194 vcn); 1195 if (err) 1196 return err; 1197 clen = 0; /* Next run_lookup_entry(vcn) must be success. */ 1198 } 1199 } 1200 1201 return 0; 1202 } 1203 1204 #ifdef CONFIG_NTFS3_LZX_XPRESS 1205 /* 1206 * attr_wof_frame_info 1207 * 1208 * Read header of Xpress/LZX file to get info about frame. 1209 */ 1210 int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr, 1211 struct runs_tree *run, u64 frame, u64 frames, 1212 u8 frame_bits, u32 *ondisk_size, u64 *vbo_data) 1213 { 1214 struct ntfs_sb_info *sbi = ni->mi.sbi; 1215 u64 vbo[2], off[2], wof_size; 1216 u32 voff; 1217 u8 bytes_per_off; 1218 char *addr; 1219 struct page *page; 1220 int i, err; 1221 __le32 *off32; 1222 __le64 *off64; 1223 1224 if (ni->vfs_inode.i_size < 0x100000000ull) { 1225 /* File starts with array of 32 bit offsets. */ 1226 bytes_per_off = sizeof(__le32); 1227 vbo[1] = frame << 2; 1228 *vbo_data = frames << 2; 1229 } else { 1230 /* File starts with array of 64 bit offsets. */ 1231 bytes_per_off = sizeof(__le64); 1232 vbo[1] = frame << 3; 1233 *vbo_data = frames << 3; 1234 } 1235 1236 /* 1237 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts. 1238 * Read 4/8 bytes at [vbo] == offset where compressed frame ends. 1239 */ 1240 if (!attr->non_res) { 1241 if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) { 1242 ntfs_inode_err(&ni->vfs_inode, "is corrupted"); 1243 return -EINVAL; 1244 } 1245 addr = resident_data(attr); 1246 1247 if (bytes_per_off == sizeof(__le32)) { 1248 off32 = Add2Ptr(addr, vbo[1]); 1249 off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0; 1250 off[1] = le32_to_cpu(off32[0]); 1251 } else { 1252 off64 = Add2Ptr(addr, vbo[1]); 1253 off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0; 1254 off[1] = le64_to_cpu(off64[0]); 1255 } 1256 1257 *vbo_data += off[0]; 1258 *ondisk_size = off[1] - off[0]; 1259 return 0; 1260 } 1261 1262 wof_size = le64_to_cpu(attr->nres.data_size); 1263 down_write(&ni->file.run_lock); 1264 page = ni->file.offs_page; 1265 if (!page) { 1266 page = alloc_page(GFP_KERNEL); 1267 if (!page) { 1268 err = -ENOMEM; 1269 goto out; 1270 } 1271 page->index = -1; 1272 ni->file.offs_page = page; 1273 } 1274 lock_page(page); 1275 addr = page_address(page); 1276 1277 if (vbo[1]) { 1278 voff = vbo[1] & (PAGE_SIZE - 1); 1279 vbo[0] = vbo[1] - bytes_per_off; 1280 i = 0; 1281 } else { 1282 voff = 0; 1283 vbo[0] = 0; 1284 off[0] = 0; 1285 i = 1; 1286 } 1287 1288 do { 1289 pgoff_t index = vbo[i] >> PAGE_SHIFT; 1290 1291 if (index != page->index) { 1292 u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1); 1293 u64 to = min(from + PAGE_SIZE, wof_size); 1294 1295 err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME, 1296 ARRAY_SIZE(WOF_NAME), run, 1297 from, to); 1298 if (err) 1299 goto out1; 1300 1301 err = ntfs_bio_pages(sbi, run, &page, 1, from, 1302 to - from, REQ_OP_READ); 1303 if (err) { 1304 page->index = -1; 1305 goto out1; 1306 } 1307 page->index = index; 1308 } 1309 1310 if (i) { 1311 if (bytes_per_off == sizeof(__le32)) { 1312 off32 = Add2Ptr(addr, voff); 1313 off[1] = le32_to_cpu(*off32); 1314 } else { 1315 off64 = Add2Ptr(addr, voff); 1316 off[1] = le64_to_cpu(*off64); 1317 } 1318 } else if (!voff) { 1319 if (bytes_per_off == sizeof(__le32)) { 1320 off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32)); 1321 off[0] = le32_to_cpu(*off32); 1322 } else { 1323 off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64)); 1324 off[0] = le64_to_cpu(*off64); 1325 } 1326 } else { 1327 /* Two values in one page. */ 1328 if (bytes_per_off == sizeof(__le32)) { 1329 off32 = Add2Ptr(addr, voff); 1330 off[0] = le32_to_cpu(off32[-1]); 1331 off[1] = le32_to_cpu(off32[0]); 1332 } else { 1333 off64 = Add2Ptr(addr, voff); 1334 off[0] = le64_to_cpu(off64[-1]); 1335 off[1] = le64_to_cpu(off64[0]); 1336 } 1337 break; 1338 } 1339 } while (++i < 2); 1340 1341 *vbo_data += off[0]; 1342 *ondisk_size = off[1] - off[0]; 1343 1344 out1: 1345 unlock_page(page); 1346 out: 1347 up_write(&ni->file.run_lock); 1348 return err; 1349 } 1350 #endif 1351 1352 /* 1353 * attr_is_frame_compressed - Used to detect compressed frame. 1354 */ 1355 int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr, 1356 CLST frame, CLST *clst_data) 1357 { 1358 int err; 1359 u32 clst_frame; 1360 CLST clen, lcn, vcn, alen, slen, vcn_next; 1361 size_t idx; 1362 struct runs_tree *run; 1363 1364 *clst_data = 0; 1365 1366 if (!is_attr_compressed(attr)) 1367 return 0; 1368 1369 if (!attr->non_res) 1370 return 0; 1371 1372 clst_frame = 1u << attr->nres.c_unit; 1373 vcn = frame * clst_frame; 1374 run = &ni->file.run; 1375 1376 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) { 1377 err = attr_load_runs_vcn(ni, attr->type, attr_name(attr), 1378 attr->name_len, run, vcn); 1379 if (err) 1380 return err; 1381 1382 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) 1383 return -EINVAL; 1384 } 1385 1386 if (lcn == SPARSE_LCN) { 1387 /* Sparsed frame. */ 1388 return 0; 1389 } 1390 1391 if (clen >= clst_frame) { 1392 /* 1393 * The frame is not compressed 'cause 1394 * it does not contain any sparse clusters. 1395 */ 1396 *clst_data = clst_frame; 1397 return 0; 1398 } 1399 1400 alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size)); 1401 slen = 0; 1402 *clst_data = clen; 1403 1404 /* 1405 * The frame is compressed if *clst_data + slen >= clst_frame. 1406 * Check next fragments. 1407 */ 1408 while ((vcn += clen) < alen) { 1409 vcn_next = vcn; 1410 1411 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) || 1412 vcn_next != vcn) { 1413 err = attr_load_runs_vcn(ni, attr->type, 1414 attr_name(attr), 1415 attr->name_len, run, vcn_next); 1416 if (err) 1417 return err; 1418 vcn = vcn_next; 1419 1420 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) 1421 return -EINVAL; 1422 } 1423 1424 if (lcn == SPARSE_LCN) { 1425 slen += clen; 1426 } else { 1427 if (slen) { 1428 /* 1429 * Data_clusters + sparse_clusters = 1430 * not enough for frame. 1431 */ 1432 return -EINVAL; 1433 } 1434 *clst_data += clen; 1435 } 1436 1437 if (*clst_data + slen >= clst_frame) { 1438 if (!slen) { 1439 /* 1440 * There is no sparsed clusters in this frame 1441 * so it is not compressed. 1442 */ 1443 *clst_data = clst_frame; 1444 } else { 1445 /* Frame is compressed. */ 1446 } 1447 break; 1448 } 1449 } 1450 1451 return 0; 1452 } 1453 1454 /* 1455 * attr_allocate_frame - Allocate/free clusters for @frame. 1456 * 1457 * Assumed: down_write(&ni->file.run_lock); 1458 */ 1459 int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size, 1460 u64 new_valid) 1461 { 1462 int err = 0; 1463 struct runs_tree *run = &ni->file.run; 1464 struct ntfs_sb_info *sbi = ni->mi.sbi; 1465 struct ATTRIB *attr = NULL, *attr_b; 1466 struct ATTR_LIST_ENTRY *le, *le_b; 1467 struct mft_inode *mi, *mi_b; 1468 CLST svcn, evcn1, next_svcn, lcn, len; 1469 CLST vcn, end, clst_data; 1470 u64 total_size, valid_size, data_size; 1471 1472 le_b = NULL; 1473 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); 1474 if (!attr_b) 1475 return -ENOENT; 1476 1477 if (!is_attr_ext(attr_b)) 1478 return -EINVAL; 1479 1480 vcn = frame << NTFS_LZNT_CUNIT; 1481 total_size = le64_to_cpu(attr_b->nres.total_size); 1482 1483 svcn = le64_to_cpu(attr_b->nres.svcn); 1484 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; 1485 data_size = le64_to_cpu(attr_b->nres.data_size); 1486 1487 if (svcn <= vcn && vcn < evcn1) { 1488 attr = attr_b; 1489 le = le_b; 1490 mi = mi_b; 1491 } else if (!le_b) { 1492 err = -EINVAL; 1493 goto out; 1494 } else { 1495 le = le_b; 1496 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, 1497 &mi); 1498 if (!attr) { 1499 err = -EINVAL; 1500 goto out; 1501 } 1502 svcn = le64_to_cpu(attr->nres.svcn); 1503 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 1504 } 1505 1506 err = attr_load_runs(attr, ni, run, NULL); 1507 if (err) 1508 goto out; 1509 1510 err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data); 1511 if (err) 1512 goto out; 1513 1514 total_size -= (u64)clst_data << sbi->cluster_bits; 1515 1516 len = bytes_to_cluster(sbi, compr_size); 1517 1518 if (len == clst_data) 1519 goto out; 1520 1521 if (len < clst_data) { 1522 err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len, 1523 NULL, true); 1524 if (err) 1525 goto out; 1526 1527 if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len, 1528 false)) { 1529 err = -ENOMEM; 1530 goto out; 1531 } 1532 end = vcn + clst_data; 1533 /* Run contains updated range [vcn + len : end). */ 1534 } else { 1535 CLST alen, hint = 0; 1536 /* Get the last LCN to allocate from. */ 1537 if (vcn + clst_data && 1538 !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL, 1539 NULL)) { 1540 hint = -1; 1541 } 1542 1543 err = attr_allocate_clusters(sbi, run, vcn + clst_data, 1544 hint + 1, len - clst_data, NULL, 0, 1545 &alen, 0, &lcn); 1546 if (err) 1547 goto out; 1548 1549 end = vcn + len; 1550 /* Run contains updated range [vcn + clst_data : end). */ 1551 } 1552 1553 total_size += (u64)len << sbi->cluster_bits; 1554 1555 repack: 1556 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn); 1557 if (err) 1558 goto out; 1559 1560 attr_b->nres.total_size = cpu_to_le64(total_size); 1561 inode_set_bytes(&ni->vfs_inode, total_size); 1562 1563 mi_b->dirty = true; 1564 mark_inode_dirty(&ni->vfs_inode); 1565 1566 /* Stored [vcn : next_svcn) from [vcn : end). */ 1567 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; 1568 1569 if (end <= evcn1) { 1570 if (next_svcn == evcn1) { 1571 /* Normal way. Update attribute and exit. */ 1572 goto ok; 1573 } 1574 /* Add new segment [next_svcn : evcn1 - next_svcn). */ 1575 if (!ni->attr_list.size) { 1576 err = ni_create_attr_list(ni); 1577 if (err) 1578 goto out; 1579 /* Layout of records is changed. */ 1580 le_b = NULL; 1581 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 1582 0, NULL, &mi_b); 1583 if (!attr_b) { 1584 err = -ENOENT; 1585 goto out; 1586 } 1587 1588 attr = attr_b; 1589 le = le_b; 1590 mi = mi_b; 1591 goto repack; 1592 } 1593 } 1594 1595 svcn = evcn1; 1596 1597 /* Estimate next attribute. */ 1598 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi); 1599 1600 if (attr) { 1601 CLST alloc = bytes_to_cluster( 1602 sbi, le64_to_cpu(attr_b->nres.alloc_size)); 1603 CLST evcn = le64_to_cpu(attr->nres.evcn); 1604 1605 if (end < next_svcn) 1606 end = next_svcn; 1607 while (end > evcn) { 1608 /* Remove segment [svcn : evcn). */ 1609 mi_remove_attr(NULL, mi, attr); 1610 1611 if (!al_remove_le(ni, le)) { 1612 err = -EINVAL; 1613 goto out; 1614 } 1615 1616 if (evcn + 1 >= alloc) { 1617 /* Last attribute segment. */ 1618 evcn1 = evcn + 1; 1619 goto ins_ext; 1620 } 1621 1622 if (ni_load_mi(ni, le, &mi)) { 1623 attr = NULL; 1624 goto out; 1625 } 1626 1627 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, 1628 &le->id); 1629 if (!attr) { 1630 err = -EINVAL; 1631 goto out; 1632 } 1633 svcn = le64_to_cpu(attr->nres.svcn); 1634 evcn = le64_to_cpu(attr->nres.evcn); 1635 } 1636 1637 if (end < svcn) 1638 end = svcn; 1639 1640 err = attr_load_runs(attr, ni, run, &end); 1641 if (err) 1642 goto out; 1643 1644 evcn1 = evcn + 1; 1645 attr->nres.svcn = cpu_to_le64(next_svcn); 1646 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn); 1647 if (err) 1648 goto out; 1649 1650 le->vcn = cpu_to_le64(next_svcn); 1651 ni->attr_list.dirty = true; 1652 mi->dirty = true; 1653 1654 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; 1655 } 1656 ins_ext: 1657 if (evcn1 > next_svcn) { 1658 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run, 1659 next_svcn, evcn1 - next_svcn, 1660 attr_b->flags, &attr, &mi); 1661 if (err) 1662 goto out; 1663 } 1664 ok: 1665 run_truncate_around(run, vcn); 1666 out: 1667 if (new_valid > data_size) 1668 new_valid = data_size; 1669 1670 valid_size = le64_to_cpu(attr_b->nres.valid_size); 1671 if (new_valid != valid_size) { 1672 attr_b->nres.valid_size = cpu_to_le64(valid_size); 1673 mi_b->dirty = true; 1674 } 1675 1676 return err; 1677 } 1678 1679 /* 1680 * attr_collapse_range - Collapse range in file. 1681 */ 1682 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes) 1683 { 1684 int err = 0; 1685 struct runs_tree *run = &ni->file.run; 1686 struct ntfs_sb_info *sbi = ni->mi.sbi; 1687 struct ATTRIB *attr = NULL, *attr_b; 1688 struct ATTR_LIST_ENTRY *le, *le_b; 1689 struct mft_inode *mi, *mi_b; 1690 CLST svcn, evcn1, len, dealloc, alen; 1691 CLST vcn, end; 1692 u64 valid_size, data_size, alloc_size, total_size; 1693 u32 mask; 1694 __le16 a_flags; 1695 1696 if (!bytes) 1697 return 0; 1698 1699 le_b = NULL; 1700 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); 1701 if (!attr_b) 1702 return -ENOENT; 1703 1704 if (!attr_b->non_res) { 1705 /* Attribute is resident. Nothing to do? */ 1706 return 0; 1707 } 1708 1709 data_size = le64_to_cpu(attr_b->nres.data_size); 1710 alloc_size = le64_to_cpu(attr_b->nres.alloc_size); 1711 a_flags = attr_b->flags; 1712 1713 if (is_attr_ext(attr_b)) { 1714 total_size = le64_to_cpu(attr_b->nres.total_size); 1715 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1; 1716 } else { 1717 total_size = alloc_size; 1718 mask = sbi->cluster_mask; 1719 } 1720 1721 if ((vbo & mask) || (bytes & mask)) { 1722 /* Allow to collapse only cluster aligned ranges. */ 1723 return -EINVAL; 1724 } 1725 1726 if (vbo > data_size) 1727 return -EINVAL; 1728 1729 down_write(&ni->file.run_lock); 1730 1731 if (vbo + bytes >= data_size) { 1732 u64 new_valid = min(ni->i_valid, vbo); 1733 1734 /* Simple truncate file at 'vbo'. */ 1735 truncate_setsize(&ni->vfs_inode, vbo); 1736 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo, 1737 &new_valid, true, NULL); 1738 1739 if (!err && new_valid < ni->i_valid) 1740 ni->i_valid = new_valid; 1741 1742 goto out; 1743 } 1744 1745 /* 1746 * Enumerate all attribute segments and collapse. 1747 */ 1748 alen = alloc_size >> sbi->cluster_bits; 1749 vcn = vbo >> sbi->cluster_bits; 1750 len = bytes >> sbi->cluster_bits; 1751 end = vcn + len; 1752 dealloc = 0; 1753 1754 svcn = le64_to_cpu(attr_b->nres.svcn); 1755 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; 1756 1757 if (svcn <= vcn && vcn < evcn1) { 1758 attr = attr_b; 1759 le = le_b; 1760 mi = mi_b; 1761 } else if (!le_b) { 1762 err = -EINVAL; 1763 goto out; 1764 } else { 1765 le = le_b; 1766 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, 1767 &mi); 1768 if (!attr) { 1769 err = -EINVAL; 1770 goto out; 1771 } 1772 1773 svcn = le64_to_cpu(attr->nres.svcn); 1774 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 1775 } 1776 1777 for (;;) { 1778 if (svcn >= end) { 1779 /* Shift VCN- */ 1780 attr->nres.svcn = cpu_to_le64(svcn - len); 1781 attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len); 1782 if (le) { 1783 le->vcn = attr->nres.svcn; 1784 ni->attr_list.dirty = true; 1785 } 1786 mi->dirty = true; 1787 } else if (svcn < vcn || end < evcn1) { 1788 CLST vcn1, eat, next_svcn; 1789 1790 /* Collapse a part of this attribute segment. */ 1791 err = attr_load_runs(attr, ni, run, &svcn); 1792 if (err) 1793 goto out; 1794 vcn1 = max(vcn, svcn); 1795 eat = min(end, evcn1) - vcn1; 1796 1797 err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc, 1798 true); 1799 if (err) 1800 goto out; 1801 1802 if (!run_collapse_range(run, vcn1, eat)) { 1803 err = -ENOMEM; 1804 goto out; 1805 } 1806 1807 if (svcn >= vcn) { 1808 /* Shift VCN */ 1809 attr->nres.svcn = cpu_to_le64(vcn); 1810 if (le) { 1811 le->vcn = attr->nres.svcn; 1812 ni->attr_list.dirty = true; 1813 } 1814 } 1815 1816 err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat); 1817 if (err) 1818 goto out; 1819 1820 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; 1821 if (next_svcn + eat < evcn1) { 1822 err = ni_insert_nonresident( 1823 ni, ATTR_DATA, NULL, 0, run, next_svcn, 1824 evcn1 - eat - next_svcn, a_flags, &attr, 1825 &mi); 1826 if (err) 1827 goto out; 1828 1829 /* Layout of records maybe changed. */ 1830 attr_b = NULL; 1831 le = al_find_ex(ni, NULL, ATTR_DATA, NULL, 0, 1832 &next_svcn); 1833 if (!le) { 1834 err = -EINVAL; 1835 goto out; 1836 } 1837 } 1838 1839 /* Free all allocated memory. */ 1840 run_truncate(run, 0); 1841 } else { 1842 u16 le_sz; 1843 u16 roff = le16_to_cpu(attr->nres.run_off); 1844 1845 run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, 1846 evcn1 - 1, svcn, Add2Ptr(attr, roff), 1847 le32_to_cpu(attr->size) - roff); 1848 1849 /* Delete this attribute segment. */ 1850 mi_remove_attr(NULL, mi, attr); 1851 if (!le) 1852 break; 1853 1854 le_sz = le16_to_cpu(le->size); 1855 if (!al_remove_le(ni, le)) { 1856 err = -EINVAL; 1857 goto out; 1858 } 1859 1860 if (evcn1 >= alen) 1861 break; 1862 1863 if (!svcn) { 1864 /* Load next record that contains this attribute. */ 1865 if (ni_load_mi(ni, le, &mi)) { 1866 err = -EINVAL; 1867 goto out; 1868 } 1869 1870 /* Look for required attribute. */ 1871 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 1872 0, &le->id); 1873 if (!attr) { 1874 err = -EINVAL; 1875 goto out; 1876 } 1877 goto next_attr; 1878 } 1879 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz); 1880 } 1881 1882 if (evcn1 >= alen) 1883 break; 1884 1885 attr = ni_enum_attr_ex(ni, attr, &le, &mi); 1886 if (!attr) { 1887 err = -EINVAL; 1888 goto out; 1889 } 1890 1891 next_attr: 1892 svcn = le64_to_cpu(attr->nres.svcn); 1893 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 1894 } 1895 1896 if (!attr_b) { 1897 le_b = NULL; 1898 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, 1899 &mi_b); 1900 if (!attr_b) { 1901 err = -ENOENT; 1902 goto out; 1903 } 1904 } 1905 1906 data_size -= bytes; 1907 valid_size = ni->i_valid; 1908 if (vbo + bytes <= valid_size) 1909 valid_size -= bytes; 1910 else if (vbo < valid_size) 1911 valid_size = vbo; 1912 1913 attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes); 1914 attr_b->nres.data_size = cpu_to_le64(data_size); 1915 attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size)); 1916 total_size -= (u64)dealloc << sbi->cluster_bits; 1917 if (is_attr_ext(attr_b)) 1918 attr_b->nres.total_size = cpu_to_le64(total_size); 1919 mi_b->dirty = true; 1920 1921 /* Update inode size. */ 1922 ni->i_valid = valid_size; 1923 ni->vfs_inode.i_size = data_size; 1924 inode_set_bytes(&ni->vfs_inode, total_size); 1925 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 1926 mark_inode_dirty(&ni->vfs_inode); 1927 1928 out: 1929 up_write(&ni->file.run_lock); 1930 if (err) 1931 make_bad_inode(&ni->vfs_inode); 1932 1933 return err; 1934 } 1935 1936 /* 1937 * attr_punch_hole 1938 * 1939 * Not for normal files. 1940 */ 1941 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size) 1942 { 1943 int err = 0; 1944 struct runs_tree *run = &ni->file.run; 1945 struct ntfs_sb_info *sbi = ni->mi.sbi; 1946 struct ATTRIB *attr = NULL, *attr_b; 1947 struct ATTR_LIST_ENTRY *le, *le_b; 1948 struct mft_inode *mi, *mi_b; 1949 CLST svcn, evcn1, vcn, len, end, alen, dealloc; 1950 u64 total_size, alloc_size; 1951 u32 mask; 1952 1953 if (!bytes) 1954 return 0; 1955 1956 le_b = NULL; 1957 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); 1958 if (!attr_b) 1959 return -ENOENT; 1960 1961 if (!attr_b->non_res) { 1962 u32 data_size = le32_to_cpu(attr->res.data_size); 1963 u32 from, to; 1964 1965 if (vbo > data_size) 1966 return 0; 1967 1968 from = vbo; 1969 to = (vbo + bytes) < data_size ? (vbo + bytes) : data_size; 1970 memset(Add2Ptr(resident_data(attr_b), from), 0, to - from); 1971 return 0; 1972 } 1973 1974 if (!is_attr_ext(attr_b)) 1975 return -EOPNOTSUPP; 1976 1977 alloc_size = le64_to_cpu(attr_b->nres.alloc_size); 1978 total_size = le64_to_cpu(attr_b->nres.total_size); 1979 1980 if (vbo >= alloc_size) { 1981 /* NOTE: It is allowed. */ 1982 return 0; 1983 } 1984 1985 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1; 1986 1987 bytes += vbo; 1988 if (bytes > alloc_size) 1989 bytes = alloc_size; 1990 bytes -= vbo; 1991 1992 if ((vbo & mask) || (bytes & mask)) { 1993 /* We have to zero a range(s). */ 1994 if (frame_size == NULL) { 1995 /* Caller insists range is aligned. */ 1996 return -EINVAL; 1997 } 1998 *frame_size = mask + 1; 1999 return E_NTFS_NOTALIGNED; 2000 } 2001 2002 down_write(&ni->file.run_lock); 2003 /* 2004 * Enumerate all attribute segments and punch hole where necessary. 2005 */ 2006 alen = alloc_size >> sbi->cluster_bits; 2007 vcn = vbo >> sbi->cluster_bits; 2008 len = bytes >> sbi->cluster_bits; 2009 end = vcn + len; 2010 dealloc = 0; 2011 2012 svcn = le64_to_cpu(attr_b->nres.svcn); 2013 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; 2014 2015 if (svcn <= vcn && vcn < evcn1) { 2016 attr = attr_b; 2017 le = le_b; 2018 mi = mi_b; 2019 } else if (!le_b) { 2020 err = -EINVAL; 2021 goto out; 2022 } else { 2023 le = le_b; 2024 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, 2025 &mi); 2026 if (!attr) { 2027 err = -EINVAL; 2028 goto out; 2029 } 2030 2031 svcn = le64_to_cpu(attr->nres.svcn); 2032 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 2033 } 2034 2035 while (svcn < end) { 2036 CLST vcn1, zero, dealloc2; 2037 2038 err = attr_load_runs(attr, ni, run, &svcn); 2039 if (err) 2040 goto out; 2041 vcn1 = max(vcn, svcn); 2042 zero = min(end, evcn1) - vcn1; 2043 2044 dealloc2 = dealloc; 2045 err = run_deallocate_ex(sbi, run, vcn1, zero, &dealloc, true); 2046 if (err) 2047 goto out; 2048 2049 if (dealloc2 == dealloc) { 2050 /* Looks like the required range is already sparsed. */ 2051 } else { 2052 if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, 2053 false)) { 2054 err = -ENOMEM; 2055 goto out; 2056 } 2057 2058 err = mi_pack_runs(mi, attr, run, evcn1 - svcn); 2059 if (err) 2060 goto out; 2061 } 2062 /* Free all allocated memory. */ 2063 run_truncate(run, 0); 2064 2065 if (evcn1 >= alen) 2066 break; 2067 2068 attr = ni_enum_attr_ex(ni, attr, &le, &mi); 2069 if (!attr) { 2070 err = -EINVAL; 2071 goto out; 2072 } 2073 2074 svcn = le64_to_cpu(attr->nres.svcn); 2075 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 2076 } 2077 2078 total_size -= (u64)dealloc << sbi->cluster_bits; 2079 attr_b->nres.total_size = cpu_to_le64(total_size); 2080 mi_b->dirty = true; 2081 2082 /* Update inode size. */ 2083 inode_set_bytes(&ni->vfs_inode, total_size); 2084 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 2085 mark_inode_dirty(&ni->vfs_inode); 2086 2087 out: 2088 up_write(&ni->file.run_lock); 2089 if (err) 2090 make_bad_inode(&ni->vfs_inode); 2091 2092 return err; 2093 } 2094