Lines Matching +full:deep +full:- +full:touch
1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
47 align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 + in get_pre_allocated()
52 ret = (((size + clump - 1) >> align_shift)) << align_shift; in get_pre_allocated()
58 * attr_load_runs - Load all runs stored in @attr.
64 CLST svcn = le64_to_cpu(attr->nres.svcn); in attr_load_runs()
65 CLST evcn = le64_to_cpu(attr->nres.evcn); in attr_load_runs()
73 return -EINVAL; in attr_load_runs()
75 asize = le32_to_cpu(attr->size); in attr_load_runs()
76 run_off = le16_to_cpu(attr->nres.run_off); in attr_load_runs()
79 return -EINVAL; in attr_load_runs()
81 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, in attr_load_runs()
83 asize - run_off); in attr_load_runs()
91 * run_deallocate_ex - Deallocate clusters.
106 err = -EINVAL; in run_deallocate_ex()
115 err = -EINVAL; in run_deallocate_ex()
127 len -= clen; in run_deallocate_ex()
134 /* Save memory - don't load entire run. */ in run_deallocate_ex()
147 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
156 size_t cnt = run->count; in attr_allocate_clusters()
162 if (err == -ENOSPC && pre) { in attr_allocate_clusters()
184 err = -ENOMEM; in attr_allocate_clusters()
189 u8 shift = sbi->cluster_bits - SECTOR_SHIFT; in attr_allocate_clusters()
191 err = blkdev_issue_zeroout(sbi->sb->s_bdev, in attr_allocate_clusters()
202 (fr && run->count - cnt >= fr)) { in attr_allocate_clusters()
203 *alen = vcn - vcn0; in attr_allocate_clusters()
207 len -= flen; in attr_allocate_clusters()
212 if (vcn - vcn0) { in attr_allocate_clusters()
213 run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false); in attr_allocate_clusters()
223 * If page is not NULL - it is already contains resident data
240 if (attr->non_res) { in attr_make_nonresident()
245 sbi = mi->sbi; in attr_make_nonresident()
246 rec = mi->mrec; in attr_make_nonresident()
248 used = le32_to_cpu(rec->used); in attr_make_nonresident()
249 asize = le32_to_cpu(attr->size); in attr_make_nonresident()
252 rsize = le32_to_cpu(attr->res.data_size); in attr_make_nonresident()
253 is_data = attr->type == ATTR_DATA && !attr->name_len; in attr_make_nonresident()
255 /* len - how many clusters required to store 'rsize' bytes */ in attr_make_nonresident()
257 u8 shift = sbi->cluster_bits + NTFS_LZNT_CUNIT; in attr_make_nonresident()
258 len = ((rsize + (1u << shift) - 1) >> shift) << NTFS_LZNT_CUNIT; in attr_make_nonresident()
268 err = -ENOMEM; in attr_make_nonresident()
273 /* Empty resident -> Empty nonresident. */ in attr_make_nonresident()
285 /* Empty resident -> Non empty nonresident. */ in attr_make_nonresident()
291 struct address_space *mapping = ni->vfs_inode.i_mapping; in attr_make_nonresident()
310 used -= asize; in attr_make_nonresident()
311 memmove(attr, Add2Ptr(attr, asize), used - aoff); in attr_make_nonresident()
312 rec->used = cpu_to_le32(used); in attr_make_nonresident()
313 mi->dirty = true; in attr_make_nonresident()
317 err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s), in attr_make_nonresident()
318 attr_s->name_len, run, 0, alen, in attr_make_nonresident()
319 attr_s->flags, &attr, NULL, NULL); in attr_make_nonresident()
324 attr->nres.data_size = cpu_to_le64(rsize); in attr_make_nonresident()
325 attr->nres.valid_size = attr->nres.data_size; in attr_make_nonresident()
330 ni->ni_flags &= ~NI_FLAG_RESIDENT; in attr_make_nonresident()
337 memmove(next, attr, used - aoff); in attr_make_nonresident()
339 rec->used = cpu_to_le32(used + asize); in attr_make_nonresident()
340 mi->dirty = true; in attr_make_nonresident()
352 * attr_set_size_res - Helper for attr_set_size().
359 struct ntfs_sb_info *sbi = mi->sbi; in attr_set_size_res()
360 struct MFT_REC *rec = mi->mrec; in attr_set_size_res()
361 u32 used = le32_to_cpu(rec->used); in attr_set_size_res()
362 u32 asize = le32_to_cpu(attr->size); in attr_set_size_res()
364 u32 rsize = le32_to_cpu(attr->res.data_size); in attr_set_size_res()
365 u32 tail = used - aoff - asize; in attr_set_size_res()
367 s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8); in attr_set_size_res()
372 if (used + dsize > sbi->max_bytes_per_attr) in attr_set_size_res()
382 new_size - rsize); in attr_set_size_res()
384 rec->used = cpu_to_le32(used + dsize); in attr_set_size_res()
385 attr->size = cpu_to_le32(asize + dsize); in attr_set_size_res()
386 attr->res.data_size = cpu_to_le32(new_size); in attr_set_size_res()
387 mi->dirty = true; in attr_set_size_res()
394 * attr_set_size - Change the size of attribute.
397 * - Sparse/compressed: No allocated clusters.
398 * - Normal: Append allocated and preallocated new clusters.
400 * - No deallocate if @keep_prealloc is set.
408 struct ntfs_sb_info *sbi = ni->mi.sbi; in attr_set_size()
409 u8 cluster_bits = sbi->cluster_bits; in attr_set_size()
410 bool is_mft = ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && in attr_set_size()
417 CLST next_svcn, pre_alloc = -1, done = 0; in attr_set_size()
429 err = -ENOENT; in attr_set_size()
433 if (!attr_b->non_res) { in attr_set_size()
440 if (!attr_b->non_res) { in attr_set_size()
450 align = sbi->cluster_size; in attr_set_size()
452 align <<= attr_b->nres.c_unit; in attr_set_size()
454 old_valid = le64_to_cpu(attr_b->nres.valid_size); in attr_set_size()
455 old_size = le64_to_cpu(attr_b->nres.data_size); in attr_set_size()
456 old_alloc = le64_to_cpu(attr_b->nres.alloc_size); in attr_set_size()
461 new_alloc = (new_size + align - 1) & ~(u64)(align - 1); in attr_set_size()
465 attr_b->nres.data_size = cpu_to_le64(new_size); in attr_set_size()
466 mi_b->dirty = dirty = true; in attr_set_size()
470 vcn = old_alen - 1; in attr_set_size()
472 svcn = le64_to_cpu(attr_b->nres.svcn); in attr_set_size()
473 evcn = le64_to_cpu(attr_b->nres.evcn); in attr_set_size()
480 err = -EINVAL; in attr_set_size()
487 err = -EINVAL; in attr_set_size()
492 svcn = le64_to_cpu(attr->nres.svcn); in attr_set_size()
493 evcn = le64_to_cpu(attr->nres.evcn); in attr_set_size()
497 * attr,mi,le - last attribute segment (containing 'vcn'). in attr_set_size()
498 * attr_b,mi_b,le_b - base (primary) attribute segment. in attr_set_size()
501 rec = mi->mrec; in attr_set_size()
511 attr_b->nres.data_size = cpu_to_le64(new_size); in attr_set_size()
512 mi_b->dirty = dirty = true; in attr_set_size()
518 * - allocate space (vcn, lcn, len) in attr_set_size()
519 * - update packed run in 'mi' in attr_set_size()
520 * - update attr->nres.evcn in attr_set_size()
521 * - update attr_b->nres.data_size/attr_b->nres.alloc_size in attr_set_size()
523 to_allocate = new_alen - old_alen; in attr_set_size()
532 } else if (pre_alloc == -1) { in attr_set_size()
535 sbi->options->prealloc) { in attr_set_size()
538 new_size)) - in attr_set_size()
553 free = wnd_zeroes(&sbi->used.bitmap); in attr_set_size()
555 err = -ENOSPC; in attr_set_size()
568 err = -ENOMEM; in attr_set_size()
578 (sbi->record_size - in attr_set_size()
579 le32_to_cpu(rec->used) + 8) / in attr_set_size()
590 to_allocate -= alen; in attr_set_size()
595 err = mi_pack_runs(mi, attr, run, vcn - svcn); in attr_set_size()
599 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; in attr_set_size()
601 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp); in attr_set_size()
602 mi_b->dirty = dirty = true; in attr_set_size()
606 attr_b->nres.data_size = cpu_to_le64(new_size); in attr_set_size()
612 ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) { in attr_set_size()
614 attr_b->nres.data_size = attr_b->nres.alloc_size; in attr_set_size()
618 if (le32_to_cpu(rec->used) < sbi->record_size) { in attr_set_size()
620 evcn = old_alen - 1; in attr_set_size()
624 attr_b->nres.data_size = attr_b->nres.alloc_size; in attr_set_size()
626 attr_b->nres.valid_size = attr_b->nres.data_size; in attr_set_size()
639 if (!ni->attr_list.size) { in attr_set_size()
654 next_svcn, vcn - next_svcn, in attr_set_size()
655 attr_b->flags, &attr, &mi, NULL); in attr_set_size()
665 err = -EINVAL; in attr_set_size()
676 if (ni->mi.rno != MFT_REC_MFT) in attr_set_size()
679 svcn = le64_to_cpu(attr->nres.svcn); in attr_set_size()
680 evcn = le64_to_cpu(attr->nres.evcn); in attr_set_size()
687 attr_b->nres.valid_size = attr_b->nres.data_size = in attr_set_size()
688 attr_b->nres.alloc_size = cpu_to_le64(old_size); in attr_set_size()
689 mi_b->dirty = dirty = true; in attr_set_size()
697 * - update packed run in 'mi' in attr_set_size()
698 * - update attr->nres.evcn in attr_set_size()
699 * - update attr_b->nres.data_size/attr_b->nres.alloc_size in attr_set_size()
700 * - mark and trim clusters as free (vcn, lcn, len) in attr_set_size()
708 err = mi_pack_runs(mi, attr, run, vcn - svcn); in attr_set_size()
711 } else if (le && le->vcn) { in attr_set_size()
712 u16 le_sz = le16_to_cpu(le->size); in attr_set_size()
724 err = -EINVAL; in attr_set_size()
728 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz); in attr_set_size()
730 attr->nres.evcn = cpu_to_le64((u64)vcn - 1); in attr_set_size()
731 mi->dirty = true; in attr_set_size()
734 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp); in attr_set_size()
737 attr_b->nres.data_size = cpu_to_le64(new_size); in attr_set_size()
739 attr_b->nres.valid_size = in attr_set_size()
740 attr_b->nres.data_size; in attr_set_size()
743 le64_to_cpu(attr_b->nres.data_size)) in attr_set_size()
744 attr_b->nres.data_size = in attr_set_size()
745 attr_b->nres.alloc_size; in attr_set_size()
747 le64_to_cpu(attr_b->nres.valid_size)) in attr_set_size()
748 attr_b->nres.valid_size = in attr_set_size()
749 attr_b->nres.alloc_size; in attr_set_size()
751 mi_b->dirty = dirty = true; in attr_set_size()
753 err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen, in attr_set_size()
759 /* dlen - really deallocated clusters. */ in attr_set_size()
760 le64_sub_cpu(&attr_b->nres.total_size, in attr_set_size()
770 vcn = svcn - 1; in attr_set_size()
775 evcn = svcn - 1; in attr_set_size()
780 if (le->type != type || le->name_len != name_len || in attr_set_size()
782 err = -EINVAL; in attr_set_size()
790 attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id); in attr_set_size()
792 err = -EINVAL; in attr_set_size()
802 if (attr_b->nres.valid_size != valid) { in attr_set_size()
803 attr_b->nres.valid_size = valid; in attr_set_size()
804 mi_b->dirty = true; in attr_set_size()
815 if (attr_b->non_res) { in attr_set_size()
816 new_alloc = le64_to_cpu(attr_b->nres.alloc_size); in attr_set_size()
817 if (inode_get_bytes(&ni->vfs_inode) != new_alloc) { in attr_set_size()
818 inode_set_bytes(&ni->vfs_inode, new_alloc); in attr_set_size()
825 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; in attr_set_size()
826 mark_inode_dirty(&ni->vfs_inode); in attr_set_size()
833 vcn -= alen; in attr_set_size()
834 attr_b->nres.data_size = cpu_to_le64(old_size); in attr_set_size()
835 attr_b->nres.valid_size = cpu_to_le64(old_valid); in attr_set_size()
836 attr_b->nres.alloc_size = cpu_to_le64(old_alloc); in attr_set_size()
842 if (le64_to_cpu(attr_b->nres.svcn) <= svcn && in attr_set_size()
843 svcn <= le64_to_cpu(attr_b->nres.evcn)) { in attr_set_size()
848 err = -EINVAL; in attr_set_size()
859 if (mi_pack_runs(mi, attr, run, evcn - svcn + 1)) in attr_set_size()
869 _ntfs_bad_inode(&ni->vfs_inode); in attr_set_size()
875 * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
879 * @zero - zeroout new allocated clusters
882 * - @new != NULL is called only for sparsed or compressed attributes.
883 * - new allocated clusters are zeroed via blkdev_issue_zeroout.
889 struct runs_tree *run = &ni->file.run; in attr_data_get_block()
905 down_read(&ni->file.run_lock); in attr_data_get_block()
908 up_read(&ni->file.run_lock); in attr_data_get_block()
914 sbi = ni->mi.sbi; in attr_data_get_block()
915 cluster_bits = sbi->cluster_bits; in attr_data_get_block()
918 down_write(&ni->file.run_lock); in attr_data_get_block()
934 err = -ENOENT; in attr_data_get_block()
938 if (!attr_b->non_res) { in attr_data_get_block()
944 asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits; in attr_data_get_block()
947 err = -EINVAL; in attr_data_get_block()
955 svcn = le64_to_cpu(attr_b->nres.svcn); in attr_data_get_block()
956 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; in attr_data_get_block()
966 err = -EINVAL; in attr_data_get_block()
969 svcn = le64_to_cpu(attr->nres.svcn); in attr_data_get_block()
970 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; in attr_data_get_block()
985 /* if frame is compressed - don't touch it. */ in attr_data_get_block()
988 *len = NTFS_LZNT_CLUSTERS - (vcn & (NTFS_LZNT_CLUSTERS - 1)); in attr_data_get_block()
1001 /* Here we may return -ENOENT. in attr_data_get_block()
1009 err = -EINVAL; in attr_data_get_block()
1015 fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1; in attr_data_get_block()
1019 if (attr_b->nres.c_unit) { in attr_data_get_block()
1020 CLST clst_per_frame = 1u << attr_b->nres.c_unit; in attr_data_get_block()
1021 CLST cmask = ~(clst_per_frame - 1); in attr_data_get_block()
1025 to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn; in attr_data_get_block()
1037 err = -EINVAL; in attr_data_get_block()
1040 evcn1 = le64_to_cpu(attr2->nres.evcn) + 1; in attr_data_get_block()
1048 to_alloc = asize - vcn; in attr_data_get_block()
1054 if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1, in attr_data_get_block()
1056 err = -ENOMEM; in attr_data_get_block()
1059 } else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) { in attr_data_get_block()
1060 hint = -1; in attr_data_get_block()
1074 total_size0 = le64_to_cpu(attr_b->nres.total_size); in attr_data_get_block()
1079 err = -EINVAL; in attr_data_get_block()
1085 err = -EINVAL; in attr_data_get_block()
1090 *len = end - vcn0; in attr_data_get_block()
1094 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn); in attr_data_get_block()
1098 attr_b->nres.total_size = cpu_to_le64(total_size); in attr_data_get_block()
1099 inode_set_bytes(&ni->vfs_inode, total_size); in attr_data_get_block()
1100 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; in attr_data_get_block()
1102 mi_b->dirty = true; in attr_data_get_block()
1103 mark_inode_dirty(&ni->vfs_inode); in attr_data_get_block()
1106 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; in attr_data_get_block()
1113 /* Add new segment [next_svcn : evcn1 - next_svcn). */ in attr_data_get_block()
1114 if (!ni->attr_list.size) { in attr_data_get_block()
1123 err = -ENOENT; in attr_data_get_block()
1137 * It is too complex to undo operations if -ENOSPC occurs deep inside in attr_data_get_block()
1139 * Return in advance -ENOSPC here if there are no free cluster and no free MFT. in attr_data_get_block()
1143 err = -ENOSPC; in attr_data_get_block()
1159 alloc = bytes_to_cluster(sbi, le64_to_cpu(attr_b->nres.alloc_size)); in attr_data_get_block()
1160 evcn = le64_to_cpu(attr->nres.evcn); in attr_data_get_block()
1169 err = -EINVAL; in attr_data_get_block()
1184 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, &le->id); in attr_data_get_block()
1186 err = -EINVAL; in attr_data_get_block()
1189 svcn = le64_to_cpu(attr->nres.svcn); in attr_data_get_block()
1190 evcn = le64_to_cpu(attr->nres.evcn); in attr_data_get_block()
1201 attr->nres.svcn = cpu_to_le64(next_svcn); in attr_data_get_block()
1202 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn); in attr_data_get_block()
1206 le->vcn = cpu_to_le64(next_svcn); in attr_data_get_block()
1207 ni->attr_list.dirty = true; in attr_data_get_block()
1208 mi->dirty = true; in attr_data_get_block()
1209 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; in attr_data_get_block()
1214 next_svcn, evcn1 - next_svcn, in attr_data_get_block()
1215 attr_b->flags, &attr, &mi, NULL); in attr_data_get_block()
1224 _ntfs_bad_inode(&ni->vfs_inode); in attr_data_get_block()
1226 up_write(&ni->file.run_lock); in attr_data_get_block()
1233 attr_b->nres.total_size = cpu_to_le64(total_size0); in attr_data_get_block()
1234 inode_set_bytes(&ni->vfs_inode, total_size0); in attr_data_get_block()
1238 mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn)) { in attr_data_get_block()
1239 _ntfs_bad_inode(&ni->vfs_inode); in attr_data_get_block()
1253 return -EINVAL; in attr_data_read_resident()
1255 if (attr->non_res) in attr_data_read_resident()
1258 vbo = folio->index << PAGE_SHIFT; in attr_data_read_resident()
1259 data_size = le32_to_cpu(attr->res.data_size); in attr_data_read_resident()
1263 len = min(data_size - vbo, folio_size(folio)); in attr_data_read_resident()
1280 return -EINVAL; in attr_data_write_resident()
1282 if (attr->non_res) { in attr_data_write_resident()
1287 vbo = folio->index << PAGE_SHIFT; in attr_data_write_resident()
1288 data_size = le32_to_cpu(attr->res.data_size); in attr_data_write_resident()
1291 size_t len = min(data_size - vbo, folio_size(folio)); in attr_data_write_resident()
1294 mi->dirty = true; in attr_data_write_resident()
1296 ni->i_valid = data_size; in attr_data_write_resident()
1302 * attr_load_runs_vcn - Load runs with VCN.
1315 return -ENOENT; in attr_load_runs_vcn()
1321 return -ENOENT; in attr_load_runs_vcn()
1324 svcn = le64_to_cpu(attr->nres.svcn); in attr_load_runs_vcn()
1325 evcn = le64_to_cpu(attr->nres.evcn); in attr_load_runs_vcn()
1329 return -EINVAL; in attr_load_runs_vcn()
1332 ro = le16_to_cpu(attr->nres.run_off); in attr_load_runs_vcn()
1334 if (ro > le32_to_cpu(attr->size)) in attr_load_runs_vcn()
1335 return -EINVAL; in attr_load_runs_vcn()
1337 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn, in attr_load_runs_vcn()
1338 Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro); in attr_load_runs_vcn()
1345 * attr_load_runs_range - Load runs for given range [from to).
1351 struct ntfs_sb_info *sbi = ni->mi.sbi; in attr_load_runs_range()
1352 u8 cluster_bits = sbi->cluster_bits; in attr_load_runs_range()
1354 CLST vcn_last = (to - 1) >> cluster_bits; in attr_load_runs_range()
1381 struct ntfs_sb_info *sbi = ni->mi.sbi; in attr_wof_frame_info()
1391 if (ni->vfs_inode.i_size < 0x100000000ull) { in attr_wof_frame_info()
1404 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts. in attr_wof_frame_info()
1407 if (!attr->non_res) { in attr_wof_frame_info()
1408 if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) { in attr_wof_frame_info()
1409 ntfs_inode_err(&ni->vfs_inode, "is corrupted"); in attr_wof_frame_info()
1410 return -EINVAL; in attr_wof_frame_info()
1416 off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0; in attr_wof_frame_info()
1420 off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0; in attr_wof_frame_info()
1425 *ondisk_size = off[1] - off[0]; in attr_wof_frame_info()
1429 wof_size = le64_to_cpu(attr->nres.data_size); in attr_wof_frame_info()
1430 down_write(&ni->file.run_lock); in attr_wof_frame_info()
1431 folio = ni->file.offs_folio; in attr_wof_frame_info()
1435 err = -ENOMEM; in attr_wof_frame_info()
1438 folio->index = -1; in attr_wof_frame_info()
1439 ni->file.offs_folio = folio; in attr_wof_frame_info()
1445 voff = vbo[1] & (PAGE_SIZE - 1); in attr_wof_frame_info()
1446 vbo[0] = vbo[1] - bytes_per_off; in attr_wof_frame_info()
1458 if (index != folio->index) { in attr_wof_frame_info()
1459 struct page *page = &folio->page; in attr_wof_frame_info()
1460 u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1); in attr_wof_frame_info()
1470 to - from, REQ_OP_READ); in attr_wof_frame_info()
1472 folio->index = -1; in attr_wof_frame_info()
1475 folio->index = index; in attr_wof_frame_info()
1488 off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32)); in attr_wof_frame_info()
1491 off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64)); in attr_wof_frame_info()
1498 off[0] = le32_to_cpu(off32[-1]); in attr_wof_frame_info()
1502 off[0] = le64_to_cpu(off64[-1]); in attr_wof_frame_info()
1510 *ondisk_size = off[1] - off[0]; in attr_wof_frame_info()
1515 up_write(&ni->file.run_lock); in attr_wof_frame_info()
1521 * attr_is_frame_compressed - Used to detect compressed frame.
1523 * attr - base (primary) attribute segment.
1524 * run - run to use, usually == &ni->file.run.
1525 * Only base segments contains valid 'attr->nres.c_unit'
1540 if (!attr->non_res) in attr_is_frame_compressed()
1543 clst_frame = 1u << attr->nres.c_unit; in attr_is_frame_compressed()
1547 err = attr_load_runs_vcn(ni, attr->type, attr_name(attr), in attr_is_frame_compressed()
1548 attr->name_len, run, vcn); in attr_is_frame_compressed()
1553 return -EINVAL; in attr_is_frame_compressed()
1570 alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size)); in attr_is_frame_compressed()
1583 err = attr_load_runs_vcn(ni, attr->type, in attr_is_frame_compressed()
1585 attr->name_len, run, vcn_next); in attr_is_frame_compressed()
1591 return -EINVAL; in attr_is_frame_compressed()
1602 return -EINVAL; in attr_is_frame_compressed()
1625 * attr_allocate_frame - Allocate/free clusters for @frame.
1627 * Assumed: down_write(&ni->file.run_lock);
1633 struct runs_tree *run = &ni->file.run; in attr_allocate_frame()
1634 struct ntfs_sb_info *sbi = ni->mi.sbi; in attr_allocate_frame()
1645 return -ENOENT; in attr_allocate_frame()
1648 return -EINVAL; in attr_allocate_frame()
1651 total_size = le64_to_cpu(attr_b->nres.total_size); in attr_allocate_frame()
1653 svcn = le64_to_cpu(attr_b->nres.svcn); in attr_allocate_frame()
1654 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; in attr_allocate_frame()
1655 data_size = le64_to_cpu(attr_b->nres.data_size); in attr_allocate_frame()
1662 err = -EINVAL; in attr_allocate_frame()
1669 err = -EINVAL; in attr_allocate_frame()
1672 svcn = le64_to_cpu(attr->nres.svcn); in attr_allocate_frame()
1673 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; in attr_allocate_frame()
1684 total_size -= (u64)clst_data << sbi->cluster_bits; in attr_allocate_frame()
1692 err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len, in attr_allocate_frame()
1697 if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len, in attr_allocate_frame()
1699 err = -ENOMEM; in attr_allocate_frame()
1708 !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL, in attr_allocate_frame()
1710 hint = -1; in attr_allocate_frame()
1714 hint + 1, len - clst_data, NULL, in attr_allocate_frame()
1724 total_size += (u64)len << sbi->cluster_bits; in attr_allocate_frame()
1727 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn); in attr_allocate_frame()
1731 attr_b->nres.total_size = cpu_to_le64(total_size); in attr_allocate_frame()
1732 inode_set_bytes(&ni->vfs_inode, total_size); in attr_allocate_frame()
1733 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; in attr_allocate_frame()
1735 mi_b->dirty = true; in attr_allocate_frame()
1736 mark_inode_dirty(&ni->vfs_inode); in attr_allocate_frame()
1739 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; in attr_allocate_frame()
1746 /* Add new segment [next_svcn : evcn1 - next_svcn). */ in attr_allocate_frame()
1747 if (!ni->attr_list.size) { in attr_allocate_frame()
1756 err = -ENOENT; in attr_allocate_frame()
1774 sbi, le64_to_cpu(attr_b->nres.alloc_size)); in attr_allocate_frame()
1775 CLST evcn = le64_to_cpu(attr->nres.evcn); in attr_allocate_frame()
1784 err = -EINVAL; in attr_allocate_frame()
1800 &le->id); in attr_allocate_frame()
1802 err = -EINVAL; in attr_allocate_frame()
1805 svcn = le64_to_cpu(attr->nres.svcn); in attr_allocate_frame()
1806 evcn = le64_to_cpu(attr->nres.evcn); in attr_allocate_frame()
1817 attr->nres.svcn = cpu_to_le64(next_svcn); in attr_allocate_frame()
1818 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn); in attr_allocate_frame()
1822 le->vcn = cpu_to_le64(next_svcn); in attr_allocate_frame()
1823 ni->attr_list.dirty = true; in attr_allocate_frame()
1824 mi->dirty = true; in attr_allocate_frame()
1826 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; in attr_allocate_frame()
1831 next_svcn, evcn1 - next_svcn, in attr_allocate_frame()
1832 attr_b->flags, &attr, &mi, NULL); in attr_allocate_frame()
1843 valid_size = le64_to_cpu(attr_b->nres.valid_size); in attr_allocate_frame()
1845 attr_b->nres.valid_size = cpu_to_le64(valid_size); in attr_allocate_frame()
1846 mi_b->dirty = true; in attr_allocate_frame()
1854 * attr_collapse_range - Collapse range in file.
1859 struct runs_tree *run = &ni->file.run; in attr_collapse_range()
1860 struct ntfs_sb_info *sbi = ni->mi.sbi; in attr_collapse_range()
1876 return -ENOENT; in attr_collapse_range()
1878 if (!attr_b->non_res) { in attr_collapse_range()
1883 data_size = le64_to_cpu(attr_b->nres.data_size); in attr_collapse_range()
1884 alloc_size = le64_to_cpu(attr_b->nres.alloc_size); in attr_collapse_range()
1885 a_flags = attr_b->flags; in attr_collapse_range()
1888 total_size = le64_to_cpu(attr_b->nres.total_size); in attr_collapse_range()
1889 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1; in attr_collapse_range()
1892 mask = sbi->cluster_mask; in attr_collapse_range()
1897 return -EINVAL; in attr_collapse_range()
1901 return -EINVAL; in attr_collapse_range()
1903 down_write(&ni->file.run_lock); in attr_collapse_range()
1906 u64 new_valid = min(ni->i_valid, vbo); in attr_collapse_range()
1909 truncate_setsize(&ni->vfs_inode, vbo); in attr_collapse_range()
1910 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo, in attr_collapse_range()
1913 if (!err && new_valid < ni->i_valid) in attr_collapse_range()
1914 ni->i_valid = new_valid; in attr_collapse_range()
1922 alen = alloc_size >> sbi->cluster_bits; in attr_collapse_range()
1923 vcn = vbo >> sbi->cluster_bits; in attr_collapse_range()
1924 len = bytes >> sbi->cluster_bits; in attr_collapse_range()
1928 svcn = le64_to_cpu(attr_b->nres.svcn); in attr_collapse_range()
1929 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; in attr_collapse_range()
1936 err = -EINVAL; in attr_collapse_range()
1943 err = -EINVAL; in attr_collapse_range()
1947 svcn = le64_to_cpu(attr->nres.svcn); in attr_collapse_range()
1948 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; in attr_collapse_range()
1953 /* Shift VCN- */ in attr_collapse_range()
1954 attr->nres.svcn = cpu_to_le64(svcn - len); in attr_collapse_range()
1955 attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len); in attr_collapse_range()
1957 le->vcn = attr->nres.svcn; in attr_collapse_range()
1958 ni->attr_list.dirty = true; in attr_collapse_range()
1960 mi->dirty = true; in attr_collapse_range()
1969 eat = min(end, evcn1) - vcn1; in attr_collapse_range()
1977 err = -ENOMEM; in attr_collapse_range()
1983 attr->nres.svcn = cpu_to_le64(vcn); in attr_collapse_range()
1985 le->vcn = attr->nres.svcn; in attr_collapse_range()
1986 ni->attr_list.dirty = true; in attr_collapse_range()
1990 err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat); in attr_collapse_range()
1994 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; in attr_collapse_range()
1998 evcn1 - eat - next_svcn, a_flags, &attr, in attr_collapse_range()
2011 u16 roff = le16_to_cpu(attr->nres.run_off); in attr_collapse_range()
2013 if (roff > le32_to_cpu(attr->size)) { in attr_collapse_range()
2014 err = -EINVAL; in attr_collapse_range()
2018 run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, in attr_collapse_range()
2019 evcn1 - 1, svcn, Add2Ptr(attr, roff), in attr_collapse_range()
2020 le32_to_cpu(attr->size) - roff); in attr_collapse_range()
2027 le_sz = le16_to_cpu(le->size); in attr_collapse_range()
2029 err = -EINVAL; in attr_collapse_range()
2039 err = -EINVAL; in attr_collapse_range()
2045 0, &le->id); in attr_collapse_range()
2047 err = -EINVAL; in attr_collapse_range()
2052 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz); in attr_collapse_range()
2060 err = -EINVAL; in attr_collapse_range()
2065 svcn = le64_to_cpu(attr->nres.svcn); in attr_collapse_range()
2066 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; in attr_collapse_range()
2074 err = -ENOENT; in attr_collapse_range()
2079 data_size -= bytes; in attr_collapse_range()
2080 valid_size = ni->i_valid; in attr_collapse_range()
2082 valid_size -= bytes; in attr_collapse_range()
2086 attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes); in attr_collapse_range()
2087 attr_b->nres.data_size = cpu_to_le64(data_size); in attr_collapse_range()
2088 attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size)); in attr_collapse_range()
2089 total_size -= (u64)dealloc << sbi->cluster_bits; in attr_collapse_range()
2091 attr_b->nres.total_size = cpu_to_le64(total_size); in attr_collapse_range()
2092 mi_b->dirty = true; in attr_collapse_range()
2095 ni->i_valid = valid_size; in attr_collapse_range()
2096 i_size_write(&ni->vfs_inode, data_size); in attr_collapse_range()
2097 inode_set_bytes(&ni->vfs_inode, total_size); in attr_collapse_range()
2098 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; in attr_collapse_range()
2099 mark_inode_dirty(&ni->vfs_inode); in attr_collapse_range()
2102 up_write(&ni->file.run_lock); in attr_collapse_range()
2104 _ntfs_bad_inode(&ni->vfs_inode); in attr_collapse_range()
2117 struct runs_tree *run = &ni->file.run; in attr_punch_hole()
2118 struct ntfs_sb_info *sbi = ni->mi.sbi; in attr_punch_hole()
2134 return -ENOENT; in attr_punch_hole()
2136 if (!attr_b->non_res) { in attr_punch_hole()
2137 u32 data_size = le32_to_cpu(attr_b->res.data_size); in attr_punch_hole()
2145 memset(Add2Ptr(resident_data(attr_b), from), 0, to - from); in attr_punch_hole()
2150 return -EOPNOTSUPP; in attr_punch_hole()
2152 alloc_size = le64_to_cpu(attr_b->nres.alloc_size); in attr_punch_hole()
2153 total_size = le64_to_cpu(attr_b->nres.total_size); in attr_punch_hole()
2160 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1; in attr_punch_hole()
2165 bytes -= vbo; in attr_punch_hole()
2171 return -EINVAL; in attr_punch_hole()
2177 down_write(&ni->file.run_lock); in attr_punch_hole()
2184 alen = alloc_size >> sbi->cluster_bits; in attr_punch_hole()
2185 vcn = vbo >> sbi->cluster_bits; in attr_punch_hole()
2186 len = bytes >> sbi->cluster_bits; in attr_punch_hole()
2190 svcn = le64_to_cpu(attr_b->nres.svcn); in attr_punch_hole()
2191 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; in attr_punch_hole()
2192 a_flags = attr_b->flags; in attr_punch_hole()
2199 err = -EINVAL; in attr_punch_hole()
2206 err = -EINVAL; in attr_punch_hole()
2210 svcn = le64_to_cpu(attr->nres.svcn); in attr_punch_hole()
2211 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; in attr_punch_hole()
2221 zero = min(end, evcn1) - vcn1; in attr_punch_hole()
2243 err = -ENOMEM; in attr_punch_hole()
2248 err = mi_pack_runs(mi, attr, run, evcn1 - svcn); in attr_punch_hole()
2251 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; in attr_punch_hole()
2256 evcn1 - next_svcn, a_flags, in attr_punch_hole()
2278 err = -EINVAL; in attr_punch_hole()
2282 svcn = le64_to_cpu(attr->nres.svcn); in attr_punch_hole()
2283 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; in attr_punch_hole()
2294 err = -EINVAL; in attr_punch_hole()
2299 total_size -= (u64)hole << sbi->cluster_bits; in attr_punch_hole()
2300 attr_b->nres.total_size = cpu_to_le64(total_size); in attr_punch_hole()
2301 mi_b->dirty = true; in attr_punch_hole()
2304 inode_set_bytes(&ni->vfs_inode, total_size); in attr_punch_hole()
2305 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; in attr_punch_hole()
2306 mark_inode_dirty(&ni->vfs_inode); in attr_punch_hole()
2310 up_write(&ni->file.run_lock); in attr_punch_hole()
2314 _ntfs_bad_inode(&ni->vfs_inode); in attr_punch_hole()
2322 if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn)) in attr_punch_hole()
2329 * attr_insert_range - Insert range (hole) in file.
2335 struct runs_tree *run = &ni->file.run; in attr_insert_range()
2336 struct ntfs_sb_info *sbi = ni->mi.sbi; in attr_insert_range()
2351 return -ENOENT; in attr_insert_range()
2355 return -EOPNOTSUPP; in attr_insert_range()
2358 if (!attr_b->non_res) { in attr_insert_range()
2359 data_size = le32_to_cpu(attr_b->res.data_size); in attr_insert_range()
2361 mask = sbi->cluster_mask; /* cluster_size - 1 */ in attr_insert_range()
2363 data_size = le64_to_cpu(attr_b->nres.data_size); in attr_insert_range()
2364 alloc_size = le64_to_cpu(attr_b->nres.alloc_size); in attr_insert_range()
2365 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1; in attr_insert_range()
2375 return -EINVAL; in attr_insert_range()
2380 return -EINVAL; in attr_insert_range()
2387 if (bytes > sbi->maxbytes_sparse - alloc_size) in attr_insert_range()
2388 return -EFBIG; in attr_insert_range()
2390 vcn = vbo >> sbi->cluster_bits; in attr_insert_range()
2391 len = bytes >> sbi->cluster_bits; in attr_insert_range()
2393 down_write(&ni->file.run_lock); in attr_insert_range()
2395 if (!attr_b->non_res) { in attr_insert_range()
2403 err = -EINVAL; in attr_insert_range()
2410 if (!attr_b->non_res) { in attr_insert_range()
2413 le16_to_cpu(attr_b->res.data_off)); in attr_insert_range()
2421 data_size = le64_to_cpu(attr_b->nres.data_size); in attr_insert_range()
2422 alloc_size = le64_to_cpu(attr_b->nres.alloc_size); in attr_insert_range()
2428 a_flags = attr_b->flags; in attr_insert_range()
2429 svcn = le64_to_cpu(attr_b->nres.svcn); in attr_insert_range()
2430 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; in attr_insert_range()
2437 err = -EINVAL; in attr_insert_range()
2444 err = -EINVAL; in attr_insert_range()
2448 svcn = le64_to_cpu(attr->nres.svcn); in attr_insert_range()
2449 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; in attr_insert_range()
2458 err = -ENOMEM; in attr_insert_range()
2463 err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn); in attr_insert_range()
2467 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; in attr_insert_range()
2470 attr->type == ATTR_DATA && !attr->name_len) { in attr_insert_range()
2471 le64_add_cpu(&attr->nres.svcn, len); in attr_insert_range()
2472 le64_add_cpu(&attr->nres.evcn, len); in attr_insert_range()
2474 le->vcn = attr->nres.svcn; in attr_insert_range()
2475 ni->attr_list.dirty = true; in attr_insert_range()
2477 mi->dirty = true; in attr_insert_range()
2482 next_svcn, evcn1 + len - next_svcn, in attr_insert_range()
2489 err = -EINVAL; in attr_insert_range()
2502 if (vbo <= ni->i_valid) in attr_insert_range()
2503 ni->i_valid += bytes; in attr_insert_range()
2505 attr_b->nres.data_size = cpu_to_le64(data_size + bytes); in attr_insert_range()
2506 attr_b->nres.alloc_size = cpu_to_le64(alloc_size + bytes); in attr_insert_range()
2508 /* ni->valid may be not equal valid_size (temporary). */ in attr_insert_range()
2509 if (ni->i_valid > data_size + bytes) in attr_insert_range()
2510 attr_b->nres.valid_size = attr_b->nres.data_size; in attr_insert_range()
2512 attr_b->nres.valid_size = cpu_to_le64(ni->i_valid); in attr_insert_range()
2513 mi_b->dirty = true; in attr_insert_range()
2516 i_size_write(&ni->vfs_inode, ni->vfs_inode.i_size + bytes); in attr_insert_range()
2517 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; in attr_insert_range()
2518 mark_inode_dirty(&ni->vfs_inode); in attr_insert_range()
2523 up_write(&ni->file.run_lock); in attr_insert_range()
2528 _ntfs_bad_inode(&ni->vfs_inode); in attr_insert_range()
2532 svcn = le64_to_cpu(attr_b->nres.svcn); in attr_insert_range()
2533 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; in attr_insert_range()
2549 svcn = le64_to_cpu(attr->nres.svcn); in attr_insert_range()
2550 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; in attr_insert_range()
2559 if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn)) in attr_insert_range()
2563 attr->type == ATTR_DATA && !attr->name_len) { in attr_insert_range()
2564 le64_sub_cpu(&attr->nres.svcn, len); in attr_insert_range()
2565 le64_sub_cpu(&attr->nres.evcn, len); in attr_insert_range()
2567 le->vcn = attr->nres.svcn; in attr_insert_range()
2568 ni->attr_list.dirty = true; in attr_insert_range()
2570 mi->dirty = true; in attr_insert_range()
2590 ntfs_bad_inode(&ni->vfs_inode, "no data attribute"); in attr_force_nonresident()
2591 return -ENOENT; in attr_force_nonresident()
2594 if (attr->non_res) { in attr_force_nonresident()
2599 down_write(&ni->file.run_lock); in attr_force_nonresident()
2601 le32_to_cpu(attr->res.data_size), in attr_force_nonresident()
2602 &ni->file.run, &attr, NULL); in attr_force_nonresident()
2603 up_write(&ni->file.run_lock); in attr_force_nonresident()
2618 return -ENOENT; in attr_set_compress()
2625 if (attr->non_res) { in attr_set_compress()
2630 if (attr->nres.data_size) { in attr_set_compress()
2636 return -EOPNOTSUPP; in attr_set_compress()
2639 run_off = le16_to_cpu(attr->nres.run_off); in attr_set_compress()
2640 run_size = le32_to_cpu(attr->size) - run_off; in attr_set_compress()
2644 /* remove field 'attr->nres.total_size'. */ in attr_set_compress()
2645 memmove(run - 8, run, run_size); in attr_set_compress()
2646 run_off -= 8; in attr_set_compress()
2649 if (!mi_resize_attr(mi, attr, compr ? +8 : -8)) { in attr_set_compress()
2654 return -EOPNOTSUPP; in attr_set_compress()
2658 /* Make a gap for 'attr->nres.total_size'. */ in attr_set_compress()
2661 attr->nres.total_size = attr->nres.alloc_size; in attr_set_compress()
2663 attr->nres.run_off = cpu_to_le16(run_off); in attr_set_compress()
2668 attr->flags |= ATTR_FLAG_COMPRESSED; in attr_set_compress()
2669 attr->nres.c_unit = NTFS_LZNT_CUNIT; in attr_set_compress()
2671 attr->flags &= ~ATTR_FLAG_COMPRESSED; in attr_set_compress()
2672 attr->nres.c_unit = 0; in attr_set_compress()
2674 mi->dirty = true; in attr_set_compress()