Lines Matching +full:trim +full:- +full:data +full:- +full:valid

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
47 align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 + in get_pre_allocated()
52 ret = (((size + clump - 1) >> align_shift)) << align_shift; in get_pre_allocated()
58 * attr_load_runs - Load all runs stored in @attr.
64 CLST svcn = le64_to_cpu(attr->nres.svcn); in attr_load_runs()
65 CLST evcn = le64_to_cpu(attr->nres.evcn); in attr_load_runs()
73 return -EINVAL; in attr_load_runs()
75 asize = le32_to_cpu(attr->size); in attr_load_runs()
76 run_off = le16_to_cpu(attr->nres.run_off); in attr_load_runs()
79 return -EINVAL; in attr_load_runs()
81 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, in attr_load_runs()
83 asize - run_off); in attr_load_runs()
91 * run_deallocate_ex - Deallocate clusters.
94 CLST vcn, CLST len, CLST *done, bool trim) in run_deallocate_ex() argument
106 err = -EINVAL; in run_deallocate_ex()
115 err = -EINVAL; in run_deallocate_ex()
121 /* mark bitmap range [lcn + clen) as free and trim clusters. */ in run_deallocate_ex()
122 mark_as_free_ex(sbi, lcn, clen, trim); in run_deallocate_ex()
127 len -= clen; in run_deallocate_ex()
134 /* Save memory - don't load entire run. */ in run_deallocate_ex()
147 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
156 size_t cnt = run->count; in attr_allocate_clusters()
162 if (err == -ENOSPC && pre) { in attr_allocate_clusters()
184 err = -ENOMEM; in attr_allocate_clusters()
189 u8 shift = sbi->cluster_bits - SECTOR_SHIFT; in attr_allocate_clusters()
191 err = blkdev_issue_zeroout(sbi->sb->s_bdev, in attr_allocate_clusters()
202 (fr && run->count - cnt >= fr)) { in attr_allocate_clusters()
203 *alen = vcn - vcn0; in attr_allocate_clusters()
207 len -= flen; in attr_allocate_clusters()
212 if (vcn - vcn0) { in attr_allocate_clusters()
213 run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false); in attr_allocate_clusters()
223 * If page is not NULL - it is already contains resident data
240 if (attr->non_res) { in attr_make_nonresident()
245 sbi = mi->sbi; in attr_make_nonresident()
246 rec = mi->mrec; in attr_make_nonresident()
248 used = le32_to_cpu(rec->used); in attr_make_nonresident()
249 asize = le32_to_cpu(attr->size); in attr_make_nonresident()
252 rsize = le32_to_cpu(attr->res.data_size); in attr_make_nonresident()
253 is_data = attr->type == ATTR_DATA && !attr->name_len; in attr_make_nonresident()
255 /* len - how many clusters required to store 'rsize' bytes */ in attr_make_nonresident()
257 u8 shift = sbi->cluster_bits + NTFS_LZNT_CUNIT; in attr_make_nonresident()
258 len = ((rsize + (1u << shift) - 1) >> shift) << NTFS_LZNT_CUNIT; in attr_make_nonresident()
268 err = -ENOMEM; in attr_make_nonresident()
273 /* Empty resident -> Empty nonresident. */ in attr_make_nonresident()
276 const char *data = resident_data(attr); in attr_make_nonresident() local
285 /* Empty resident -> Non empty nonresident. */ in attr_make_nonresident()
287 err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0); in attr_make_nonresident()
291 struct address_space *mapping = ni->vfs_inode.i_mapping; in attr_make_nonresident()
301 folio_fill_tail(folio, 0, data, rsize); in attr_make_nonresident()
310 used -= asize; in attr_make_nonresident()
311 memmove(attr, Add2Ptr(attr, asize), used - aoff); in attr_make_nonresident()
312 rec->used = cpu_to_le32(used); in attr_make_nonresident()
313 mi->dirty = true; in attr_make_nonresident()
317 err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s), in attr_make_nonresident()
318 attr_s->name_len, run, 0, alen, in attr_make_nonresident()
319 attr_s->flags, &attr, NULL, NULL); in attr_make_nonresident()
324 attr->nres.data_size = cpu_to_le64(rsize); in attr_make_nonresident()
325 attr->nres.valid_size = attr->nres.data_size; in attr_make_nonresident()
330 ni->ni_flags &= ~NI_FLAG_RESIDENT; in attr_make_nonresident()
337 memmove(next, attr, used - aoff); in attr_make_nonresident()
339 rec->used = cpu_to_le32(used + asize); in attr_make_nonresident()
340 mi->dirty = true; in attr_make_nonresident()
342 /* Undo: do not trim new allocated clusters. */ in attr_make_nonresident()
352 * attr_set_size_res - Helper for attr_set_size().
359 struct ntfs_sb_info *sbi = mi->sbi; in attr_set_size_res()
360 struct MFT_REC *rec = mi->mrec; in attr_set_size_res()
361 u32 used = le32_to_cpu(rec->used); in attr_set_size_res()
362 u32 asize = le32_to_cpu(attr->size); in attr_set_size_res()
364 u32 rsize = le32_to_cpu(attr->res.data_size); in attr_set_size_res()
365 u32 tail = used - aoff - asize; in attr_set_size_res()
367 s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8); in attr_set_size_res()
372 if (used + dsize > sbi->max_bytes_per_attr) in attr_set_size_res()
382 new_size - rsize); in attr_set_size_res()
384 rec->used = cpu_to_le32(used + dsize); in attr_set_size_res()
385 attr->size = cpu_to_le32(asize + dsize); in attr_set_size_res()
386 attr->res.data_size = cpu_to_le32(new_size); in attr_set_size_res()
387 mi->dirty = true; in attr_set_size_res()
394 * attr_set_size - Change the size of attribute.
397 * - Sparse/compressed: No allocated clusters.
398 * - Normal: Append allocated and preallocated new clusters.
400 * - No deallocate if @keep_prealloc is set.
408 struct ntfs_sb_info *sbi = ni->mi.sbi; in attr_set_size()
409 u8 cluster_bits = sbi->cluster_bits; in attr_set_size()
410 bool is_mft = ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && in attr_set_size()
417 CLST next_svcn, pre_alloc = -1, done = 0; in attr_set_size()
429 err = -ENOENT; in attr_set_size()
433 if (!attr_b->non_res) { in attr_set_size()
440 if (!attr_b->non_res) { in attr_set_size()
450 align = sbi->cluster_size; in attr_set_size()
452 align <<= attr_b->nres.c_unit; in attr_set_size()
454 old_valid = le64_to_cpu(attr_b->nres.valid_size); in attr_set_size()
455 old_size = le64_to_cpu(attr_b->nres.data_size); in attr_set_size()
456 old_alloc = le64_to_cpu(attr_b->nres.alloc_size); in attr_set_size()
461 new_alloc = (new_size + align - 1) & ~(u64)(align - 1); in attr_set_size()
465 attr_b->nres.data_size = cpu_to_le64(new_size); in attr_set_size()
466 mi_b->dirty = dirty = true; in attr_set_size()
470 vcn = old_alen - 1; in attr_set_size()
472 svcn = le64_to_cpu(attr_b->nres.svcn); in attr_set_size()
473 evcn = le64_to_cpu(attr_b->nres.evcn); in attr_set_size()
480 err = -EINVAL; in attr_set_size()
487 err = -EINVAL; in attr_set_size()
492 svcn = le64_to_cpu(attr->nres.svcn); in attr_set_size()
493 evcn = le64_to_cpu(attr->nres.evcn); in attr_set_size()
497 * attr,mi,le - last attribute segment (containing 'vcn'). in attr_set_size()
498 * attr_b,mi_b,le_b - base (primary) attribute segment. in attr_set_size()
501 rec = mi->mrec; in attr_set_size()
511 attr_b->nres.data_size = cpu_to_le64(new_size); in attr_set_size()
512 mi_b->dirty = dirty = true; in attr_set_size()
518 * - allocate space (vcn, lcn, len) in attr_set_size()
519 * - update packed run in 'mi' in attr_set_size()
520 * - update attr->nres.evcn in attr_set_size()
521 * - update attr_b->nres.data_size/attr_b->nres.alloc_size in attr_set_size()
523 to_allocate = new_alen - old_alen; in attr_set_size()
532 } else if (pre_alloc == -1) { in attr_set_size()
535 sbi->options->prealloc) { in attr_set_size()
538 new_size)) - in attr_set_size()
553 free = wnd_zeroes(&sbi->used.bitmap); in attr_set_size()
555 err = -ENOSPC; in attr_set_size()
568 err = -ENOMEM; in attr_set_size()
578 (sbi->record_size - in attr_set_size()
579 le32_to_cpu(rec->used) + 8) / in attr_set_size()
590 to_allocate -= alen; in attr_set_size()
595 err = mi_pack_runs(mi, attr, run, vcn - svcn); in attr_set_size()
599 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; in attr_set_size()
601 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp); in attr_set_size()
602 mi_b->dirty = dirty = true; in attr_set_size()
606 attr_b->nres.data_size = cpu_to_le64(new_size); in attr_set_size()
612 ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) { in attr_set_size()
614 attr_b->nres.data_size = attr_b->nres.alloc_size; in attr_set_size()
618 if (le32_to_cpu(rec->used) < sbi->record_size) { in attr_set_size()
620 evcn = old_alen - 1; in attr_set_size()
624 attr_b->nres.data_size = attr_b->nres.alloc_size; in attr_set_size()
626 attr_b->nres.valid_size = attr_b->nres.data_size; in attr_set_size()
639 if (!ni->attr_list.size) { in attr_set_size()
648 /* This is MFT data, repeat. */ in attr_set_size()
654 next_svcn, vcn - next_svcn, in attr_set_size()
655 attr_b->flags, &attr, &mi, NULL); in attr_set_size()
665 err = -EINVAL; in attr_set_size()
676 if (ni->mi.rno != MFT_REC_MFT) in attr_set_size()
679 svcn = le64_to_cpu(attr->nres.svcn); in attr_set_size()
680 evcn = le64_to_cpu(attr->nres.evcn); in attr_set_size()
687 attr_b->nres.valid_size = attr_b->nres.data_size = in attr_set_size()
688 attr_b->nres.alloc_size = cpu_to_le64(old_size); in attr_set_size()
689 mi_b->dirty = dirty = true; in attr_set_size()
697 * - update packed run in 'mi' in attr_set_size()
698 * - update attr->nres.evcn in attr_set_size()
699 * - update attr_b->nres.data_size/attr_b->nres.alloc_size in attr_set_size()
700 * - mark and trim clusters as free (vcn, lcn, len) in attr_set_size()
708 err = mi_pack_runs(mi, attr, run, vcn - svcn); in attr_set_size()
711 } else if (le && le->vcn) { in attr_set_size()
712 u16 le_sz = le16_to_cpu(le->size); in attr_set_size()
724 err = -EINVAL; in attr_set_size()
728 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz); in attr_set_size()
730 attr->nres.evcn = cpu_to_le64((u64)vcn - 1); in attr_set_size()
731 mi->dirty = true; in attr_set_size()
734 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp); in attr_set_size()
737 attr_b->nres.data_size = cpu_to_le64(new_size); in attr_set_size()
739 attr_b->nres.valid_size = in attr_set_size()
740 attr_b->nres.data_size; in attr_set_size()
743 le64_to_cpu(attr_b->nres.data_size)) in attr_set_size()
744 attr_b->nres.data_size = in attr_set_size()
745 attr_b->nres.alloc_size; in attr_set_size()
747 le64_to_cpu(attr_b->nres.valid_size)) in attr_set_size()
748 attr_b->nres.valid_size = in attr_set_size()
749 attr_b->nres.alloc_size; in attr_set_size()
751 mi_b->dirty = dirty = true; in attr_set_size()
753 err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen, in attr_set_size()
759 /* dlen - really deallocated clusters. */ in attr_set_size()
760 le64_sub_cpu(&attr_b->nres.total_size, in attr_set_size()
770 vcn = svcn - 1; in attr_set_size()
775 evcn = svcn - 1; in attr_set_size()
780 if (le->type != type || le->name_len != name_len || in attr_set_size()
782 err = -EINVAL; in attr_set_size()
791 &le->id); in attr_set_size()
793 err = -EINVAL; in attr_set_size()
801 __le64 valid = cpu_to_le64(min(*new_valid, new_size)); in attr_set_size() local
803 if (attr_b->nres.valid_size != valid) { in attr_set_size()
804 attr_b->nres.valid_size = valid; in attr_set_size()
805 mi_b->dirty = true; in attr_set_size()
816 if (attr_b->non_res) { in attr_set_size()
817 new_alloc = le64_to_cpu(attr_b->nres.alloc_size); in attr_set_size()
818 if (inode_get_bytes(&ni->vfs_inode) != new_alloc) { in attr_set_size()
819 inode_set_bytes(&ni->vfs_inode, new_alloc); in attr_set_size()
826 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; in attr_set_size()
827 mark_inode_dirty(&ni->vfs_inode); in attr_set_size()
834 vcn -= alen; in attr_set_size()
835 attr_b->nres.data_size = cpu_to_le64(old_size); in attr_set_size()
836 attr_b->nres.valid_size = cpu_to_le64(old_valid); in attr_set_size()
837 attr_b->nres.alloc_size = cpu_to_le64(old_alloc); in attr_set_size()
843 if (le64_to_cpu(attr_b->nres.svcn) <= svcn && in attr_set_size()
844 svcn <= le64_to_cpu(attr_b->nres.evcn)) { in attr_set_size()
849 err = -EINVAL; in attr_set_size()
860 if (mi_pack_runs(mi, attr, run, evcn - svcn + 1)) in attr_set_size()
870 _ntfs_bad_inode(&ni->vfs_inode); in attr_set_size()
876 * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
880 * @zero - zeroout new allocated clusters
883 * - @new != NULL is called only for sparsed or compressed attributes.
884 * - new allocated clusters are zeroed via blkdev_issue_zeroout.
890 struct runs_tree *run = &ni->file.run; in attr_data_get_block()
906 down_read(&ni->file.run_lock); in attr_data_get_block()
909 up_read(&ni->file.run_lock); in attr_data_get_block()
915 sbi = ni->mi.sbi; in attr_data_get_block()
916 cluster_bits = sbi->cluster_bits; in attr_data_get_block()
919 down_write(&ni->file.run_lock); in attr_data_get_block()
935 err = -ENOENT; in attr_data_get_block()
939 if (!attr_b->non_res) { in attr_data_get_block()
945 asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits; in attr_data_get_block()
948 err = -EINVAL; in attr_data_get_block()
956 svcn = le64_to_cpu(attr_b->nres.svcn); in attr_data_get_block()
957 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; in attr_data_get_block()
967 err = -EINVAL; in attr_data_get_block()
970 svcn = le64_to_cpu(attr->nres.svcn); in attr_data_get_block()
971 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; in attr_data_get_block()
986 /* if frame is compressed - don't touch it. */ in attr_data_get_block()
989 *len = NTFS_LZNT_CLUSTERS - (vcn & (NTFS_LZNT_CLUSTERS - 1)); in attr_data_get_block()
1002 /* Here we may return -ENOENT. in attr_data_get_block()
1010 err = -EINVAL; in attr_data_get_block()
1016 fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1; in attr_data_get_block()
1020 if (attr_b->nres.c_unit) { in attr_data_get_block()
1021 CLST clst_per_frame = 1u << attr_b->nres.c_unit; in attr_data_get_block()
1022 CLST cmask = ~(clst_per_frame - 1); in attr_data_get_block()
1026 to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn; in attr_data_get_block()
1038 err = -EINVAL; in attr_data_get_block()
1041 evcn1 = le64_to_cpu(attr2->nres.evcn) + 1; in attr_data_get_block()
1049 to_alloc = asize - vcn; in attr_data_get_block()
1055 if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1, in attr_data_get_block()
1057 err = -ENOMEM; in attr_data_get_block()
1060 } else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) { in attr_data_get_block()
1061 hint = -1; in attr_data_get_block()
1075 total_size0 = le64_to_cpu(attr_b->nres.total_size); in attr_data_get_block()
1080 err = -EINVAL; in attr_data_get_block()
1086 err = -EINVAL; in attr_data_get_block()
1091 *len = end - vcn0; in attr_data_get_block()
1095 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn); in attr_data_get_block()
1099 attr_b->nres.total_size = cpu_to_le64(total_size); in attr_data_get_block()
1100 inode_set_bytes(&ni->vfs_inode, total_size); in attr_data_get_block()
1101 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; in attr_data_get_block()
1103 mi_b->dirty = true; in attr_data_get_block()
1104 mark_inode_dirty(&ni->vfs_inode); in attr_data_get_block()
1107 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; in attr_data_get_block()
1114 /* Add new segment [next_svcn : evcn1 - next_svcn). */ in attr_data_get_block()
1115 if (!ni->attr_list.size) { in attr_data_get_block()
1124 err = -ENOENT; in attr_data_get_block()
1138 * It is too complex to undo operations if -ENOSPC occurs deep inside in attr_data_get_block()
1140 * Return in advance -ENOSPC here if there are no free cluster and no free MFT. in attr_data_get_block()
1144 err = -ENOSPC; in attr_data_get_block()
1160 alloc = bytes_to_cluster(sbi, le64_to_cpu(attr_b->nres.alloc_size)); in attr_data_get_block()
1161 evcn = le64_to_cpu(attr->nres.evcn); in attr_data_get_block()
1170 err = -EINVAL; in attr_data_get_block()
1185 attr = mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0, &le->id); in attr_data_get_block()
1187 err = -EINVAL; in attr_data_get_block()
1190 svcn = le64_to_cpu(attr->nres.svcn); in attr_data_get_block()
1191 evcn = le64_to_cpu(attr->nres.evcn); in attr_data_get_block()
1202 attr->nres.svcn = cpu_to_le64(next_svcn); in attr_data_get_block()
1203 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn); in attr_data_get_block()
1207 le->vcn = cpu_to_le64(next_svcn); in attr_data_get_block()
1208 ni->attr_list.dirty = true; in attr_data_get_block()
1209 mi->dirty = true; in attr_data_get_block()
1210 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; in attr_data_get_block()
1215 next_svcn, evcn1 - next_svcn, in attr_data_get_block()
1216 attr_b->flags, &attr, &mi, NULL); in attr_data_get_block()
1225 _ntfs_bad_inode(&ni->vfs_inode); in attr_data_get_block()
1227 up_write(&ni->file.run_lock); in attr_data_get_block()
1234 attr_b->nres.total_size = cpu_to_le64(total_size0); in attr_data_get_block()
1235 inode_set_bytes(&ni->vfs_inode, total_size0); in attr_data_get_block()
1239 mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn)) { in attr_data_get_block()
1240 _ntfs_bad_inode(&ni->vfs_inode); in attr_data_get_block()
1254 return -EINVAL; in attr_data_read_resident()
1256 if (attr->non_res) in attr_data_read_resident()
1259 vbo = folio->index << PAGE_SHIFT; in attr_data_read_resident()
1260 data_size = le32_to_cpu(attr->res.data_size); in attr_data_read_resident()
1264 len = min(data_size - vbo, folio_size(folio)); in attr_data_read_resident()
1281 return -EINVAL; in attr_data_write_resident()
1283 if (attr->non_res) { in attr_data_write_resident()
1288 vbo = folio->index << PAGE_SHIFT; in attr_data_write_resident()
1289 data_size = le32_to_cpu(attr->res.data_size); in attr_data_write_resident()
1291 char *data = resident_data(attr); in attr_data_write_resident() local
1292 size_t len = min(data_size - vbo, folio_size(folio)); in attr_data_write_resident()
1294 memcpy_from_folio(data + vbo, folio, 0, len); in attr_data_write_resident()
1295 mi->dirty = true; in attr_data_write_resident()
1297 ni->i_valid = data_size; in attr_data_write_resident()
1303 * attr_load_runs_vcn - Load runs with VCN.
1316 return -ENOENT; in attr_load_runs_vcn()
1322 return -ENOENT; in attr_load_runs_vcn()
1325 svcn = le64_to_cpu(attr->nres.svcn); in attr_load_runs_vcn()
1326 evcn = le64_to_cpu(attr->nres.evcn); in attr_load_runs_vcn()
1330 return -EINVAL; in attr_load_runs_vcn()
1333 ro = le16_to_cpu(attr->nres.run_off); in attr_load_runs_vcn()
1335 if (ro > le32_to_cpu(attr->size)) in attr_load_runs_vcn()
1336 return -EINVAL; in attr_load_runs_vcn()
1338 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn, in attr_load_runs_vcn()
1339 Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro); in attr_load_runs_vcn()
1346 * attr_load_runs_range - Load runs for given range [from to).
1352 struct ntfs_sb_info *sbi = ni->mi.sbi; in attr_load_runs_range()
1353 u8 cluster_bits = sbi->cluster_bits; in attr_load_runs_range()
1355 CLST vcn_last = (to - 1) >> cluster_bits; in attr_load_runs_range()
1382 struct ntfs_sb_info *sbi = ni->mi.sbi; in attr_wof_frame_info()
1392 if (ni->vfs_inode.i_size < 0x100000000ull) { in attr_wof_frame_info()
1405 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts. in attr_wof_frame_info()
1408 if (!attr->non_res) { in attr_wof_frame_info()
1409 if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) { in attr_wof_frame_info()
1410 _ntfs_bad_inode(&ni->vfs_inode); in attr_wof_frame_info()
1411 return -EINVAL; in attr_wof_frame_info()
1417 off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0; in attr_wof_frame_info()
1421 off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0; in attr_wof_frame_info()
1426 *ondisk_size = off[1] - off[0]; in attr_wof_frame_info()
1430 wof_size = le64_to_cpu(attr->nres.data_size); in attr_wof_frame_info()
1431 down_write(&ni->file.run_lock); in attr_wof_frame_info()
1432 folio = ni->file.offs_folio; in attr_wof_frame_info()
1436 err = -ENOMEM; in attr_wof_frame_info()
1439 folio->index = -1; in attr_wof_frame_info()
1440 ni->file.offs_folio = folio; in attr_wof_frame_info()
1446 voff = vbo[1] & (PAGE_SIZE - 1); in attr_wof_frame_info()
1447 vbo[0] = vbo[1] - bytes_per_off; in attr_wof_frame_info()
1459 if (index != folio->index) { in attr_wof_frame_info()
1460 struct page *page = &folio->page; in attr_wof_frame_info()
1461 u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1); in attr_wof_frame_info()
1471 to - from, REQ_OP_READ); in attr_wof_frame_info()
1473 folio->index = -1; in attr_wof_frame_info()
1476 folio->index = index; in attr_wof_frame_info()
1489 off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32)); in attr_wof_frame_info()
1492 off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64)); in attr_wof_frame_info()
1499 off[0] = le32_to_cpu(off32[-1]); in attr_wof_frame_info()
1503 off[0] = le64_to_cpu(off64[-1]); in attr_wof_frame_info()
1511 *ondisk_size = off[1] - off[0]; in attr_wof_frame_info()
1516 up_write(&ni->file.run_lock); in attr_wof_frame_info()
1522 * attr_is_frame_compressed - Used to detect compressed frame.
1524 * attr - base (primary) attribute segment.
1525 * run - run to use, usually == &ni->file.run.
1526 * Only base segments contains valid 'attr->nres.c_unit'
1541 if (!attr->non_res) in attr_is_frame_compressed()
1544 clst_frame = 1u << attr->nres.c_unit; in attr_is_frame_compressed()
1548 err = attr_load_runs_vcn(ni, attr->type, attr_name(attr), in attr_is_frame_compressed()
1549 attr->name_len, run, vcn); in attr_is_frame_compressed()
1554 return -EINVAL; in attr_is_frame_compressed()
1571 alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size)); in attr_is_frame_compressed()
1584 err = attr_load_runs_vcn(ni, attr->type, in attr_is_frame_compressed()
1586 attr->name_len, run, vcn_next); in attr_is_frame_compressed()
1592 return -EINVAL; in attr_is_frame_compressed()
1603 return -EINVAL; in attr_is_frame_compressed()
1626 * attr_allocate_frame - Allocate/free clusters for @frame.
1628 * Assumed: down_write(&ni->file.run_lock);
1634 struct runs_tree *run = &ni->file.run; in attr_allocate_frame()
1635 struct ntfs_sb_info *sbi = ni->mi.sbi; in attr_allocate_frame()
1646 return -ENOENT; in attr_allocate_frame()
1649 return -EINVAL; in attr_allocate_frame()
1652 total_size = le64_to_cpu(attr_b->nres.total_size); in attr_allocate_frame()
1654 svcn = le64_to_cpu(attr_b->nres.svcn); in attr_allocate_frame()
1655 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; in attr_allocate_frame()
1656 data_size = le64_to_cpu(attr_b->nres.data_size); in attr_allocate_frame()
1663 err = -EINVAL; in attr_allocate_frame()
1670 err = -EINVAL; in attr_allocate_frame()
1673 svcn = le64_to_cpu(attr->nres.svcn); in attr_allocate_frame()
1674 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; in attr_allocate_frame()
1685 total_size -= (u64)clst_data << sbi->cluster_bits; in attr_allocate_frame()
1693 err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len, in attr_allocate_frame()
1698 if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len, in attr_allocate_frame()
1700 err = -ENOMEM; in attr_allocate_frame()
1709 !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL, in attr_allocate_frame()
1711 hint = -1; in attr_allocate_frame()
1715 hint + 1, len - clst_data, NULL, in attr_allocate_frame()
1725 total_size += (u64)len << sbi->cluster_bits; in attr_allocate_frame()
1728 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn); in attr_allocate_frame()
1732 attr_b->nres.total_size = cpu_to_le64(total_size); in attr_allocate_frame()
1733 inode_set_bytes(&ni->vfs_inode, total_size); in attr_allocate_frame()
1734 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; in attr_allocate_frame()
1736 mi_b->dirty = true; in attr_allocate_frame()
1737 mark_inode_dirty(&ni->vfs_inode); in attr_allocate_frame()
1740 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; in attr_allocate_frame()
1747 /* Add new segment [next_svcn : evcn1 - next_svcn). */ in attr_allocate_frame()
1748 if (!ni->attr_list.size) { in attr_allocate_frame()
1757 err = -ENOENT; in attr_allocate_frame()
1775 sbi, le64_to_cpu(attr_b->nres.alloc_size)); in attr_allocate_frame()
1776 CLST evcn = le64_to_cpu(attr->nres.evcn); in attr_allocate_frame()
1785 err = -EINVAL; in attr_allocate_frame()
1801 &le->id); in attr_allocate_frame()
1803 err = -EINVAL; in attr_allocate_frame()
1806 svcn = le64_to_cpu(attr->nres.svcn); in attr_allocate_frame()
1807 evcn = le64_to_cpu(attr->nres.evcn); in attr_allocate_frame()
1818 attr->nres.svcn = cpu_to_le64(next_svcn); in attr_allocate_frame()
1819 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn); in attr_allocate_frame()
1823 le->vcn = cpu_to_le64(next_svcn); in attr_allocate_frame()
1824 ni->attr_list.dirty = true; in attr_allocate_frame()
1825 mi->dirty = true; in attr_allocate_frame()
1827 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; in attr_allocate_frame()
1832 next_svcn, evcn1 - next_svcn, in attr_allocate_frame()
1833 attr_b->flags, &attr, &mi, NULL); in attr_allocate_frame()
1844 valid_size = le64_to_cpu(attr_b->nres.valid_size); in attr_allocate_frame()
1846 attr_b->nres.valid_size = cpu_to_le64(valid_size); in attr_allocate_frame()
1847 mi_b->dirty = true; in attr_allocate_frame()
1855 * attr_collapse_range - Collapse range in file.
1860 struct runs_tree *run = &ni->file.run; in attr_collapse_range()
1861 struct ntfs_sb_info *sbi = ni->mi.sbi; in attr_collapse_range()
1877 return -ENOENT; in attr_collapse_range()
1879 if (!attr_b->non_res) { in attr_collapse_range()
1884 data_size = le64_to_cpu(attr_b->nres.data_size); in attr_collapse_range()
1885 alloc_size = le64_to_cpu(attr_b->nres.alloc_size); in attr_collapse_range()
1886 a_flags = attr_b->flags; in attr_collapse_range()
1889 total_size = le64_to_cpu(attr_b->nres.total_size); in attr_collapse_range()
1890 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1; in attr_collapse_range()
1893 mask = sbi->cluster_mask; in attr_collapse_range()
1898 return -EINVAL; in attr_collapse_range()
1902 return -EINVAL; in attr_collapse_range()
1904 down_write(&ni->file.run_lock); in attr_collapse_range()
1907 u64 new_valid = min(ni->i_valid, vbo); in attr_collapse_range()
1910 truncate_setsize(&ni->vfs_inode, vbo); in attr_collapse_range()
1911 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo, in attr_collapse_range()
1914 if (!err && new_valid < ni->i_valid) in attr_collapse_range()
1915 ni->i_valid = new_valid; in attr_collapse_range()
1923 alen = alloc_size >> sbi->cluster_bits; in attr_collapse_range()
1924 vcn = vbo >> sbi->cluster_bits; in attr_collapse_range()
1925 len = bytes >> sbi->cluster_bits; in attr_collapse_range()
1929 svcn = le64_to_cpu(attr_b->nres.svcn); in attr_collapse_range()
1930 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; in attr_collapse_range()
1937 err = -EINVAL; in attr_collapse_range()
1944 err = -EINVAL; in attr_collapse_range()
1948 svcn = le64_to_cpu(attr->nres.svcn); in attr_collapse_range()
1949 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; in attr_collapse_range()
1954 /* Shift VCN- */ in attr_collapse_range()
1955 attr->nres.svcn = cpu_to_le64(svcn - len); in attr_collapse_range()
1956 attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len); in attr_collapse_range()
1958 le->vcn = attr->nres.svcn; in attr_collapse_range()
1959 ni->attr_list.dirty = true; in attr_collapse_range()
1961 mi->dirty = true; in attr_collapse_range()
1970 eat = min(end, evcn1) - vcn1; in attr_collapse_range()
1978 err = -ENOMEM; in attr_collapse_range()
1984 attr->nres.svcn = cpu_to_le64(vcn); in attr_collapse_range()
1986 le->vcn = attr->nres.svcn; in attr_collapse_range()
1987 ni->attr_list.dirty = true; in attr_collapse_range()
1991 err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat); in attr_collapse_range()
1995 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; in attr_collapse_range()
1999 evcn1 - eat - next_svcn, a_flags, &attr, in attr_collapse_range()
2012 u16 roff = le16_to_cpu(attr->nres.run_off); in attr_collapse_range()
2014 if (roff > le32_to_cpu(attr->size)) { in attr_collapse_range()
2015 err = -EINVAL; in attr_collapse_range()
2019 run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, in attr_collapse_range()
2020 evcn1 - 1, svcn, Add2Ptr(attr, roff), in attr_collapse_range()
2021 le32_to_cpu(attr->size) - roff); in attr_collapse_range()
2028 le_sz = le16_to_cpu(le->size); in attr_collapse_range()
2030 err = -EINVAL; in attr_collapse_range()
2040 err = -EINVAL; in attr_collapse_range()
2046 NULL, 0, &le->id); in attr_collapse_range()
2048 err = -EINVAL; in attr_collapse_range()
2053 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz); in attr_collapse_range()
2061 err = -EINVAL; in attr_collapse_range()
2066 svcn = le64_to_cpu(attr->nres.svcn); in attr_collapse_range()
2067 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; in attr_collapse_range()
2075 err = -ENOENT; in attr_collapse_range()
2080 data_size -= bytes; in attr_collapse_range()
2081 valid_size = ni->i_valid; in attr_collapse_range()
2083 valid_size -= bytes; in attr_collapse_range()
2087 attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes); in attr_collapse_range()
2088 attr_b->nres.data_size = cpu_to_le64(data_size); in attr_collapse_range()
2089 attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size)); in attr_collapse_range()
2090 total_size -= (u64)dealloc << sbi->cluster_bits; in attr_collapse_range()
2092 attr_b->nres.total_size = cpu_to_le64(total_size); in attr_collapse_range()
2093 mi_b->dirty = true; in attr_collapse_range()
2096 ni->i_valid = valid_size; in attr_collapse_range()
2097 i_size_write(&ni->vfs_inode, data_size); in attr_collapse_range()
2098 inode_set_bytes(&ni->vfs_inode, total_size); in attr_collapse_range()
2099 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; in attr_collapse_range()
2100 mark_inode_dirty(&ni->vfs_inode); in attr_collapse_range()
2103 up_write(&ni->file.run_lock); in attr_collapse_range()
2105 _ntfs_bad_inode(&ni->vfs_inode); in attr_collapse_range()
2118 struct runs_tree *run = &ni->file.run; in attr_punch_hole()
2119 struct ntfs_sb_info *sbi = ni->mi.sbi; in attr_punch_hole()
2135 return -ENOENT; in attr_punch_hole()
2137 if (!attr_b->non_res) { in attr_punch_hole()
2138 u32 data_size = le32_to_cpu(attr_b->res.data_size); in attr_punch_hole()
2146 memset(Add2Ptr(resident_data(attr_b), from), 0, to - from); in attr_punch_hole()
2151 return -EOPNOTSUPP; in attr_punch_hole()
2153 alloc_size = le64_to_cpu(attr_b->nres.alloc_size); in attr_punch_hole()
2154 total_size = le64_to_cpu(attr_b->nres.total_size); in attr_punch_hole()
2161 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1; in attr_punch_hole()
2166 bytes -= vbo; in attr_punch_hole()
2172 return -EINVAL; in attr_punch_hole()
2178 down_write(&ni->file.run_lock); in attr_punch_hole()
2185 alen = alloc_size >> sbi->cluster_bits; in attr_punch_hole()
2186 vcn = vbo >> sbi->cluster_bits; in attr_punch_hole()
2187 len = bytes >> sbi->cluster_bits; in attr_punch_hole()
2191 svcn = le64_to_cpu(attr_b->nres.svcn); in attr_punch_hole()
2192 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; in attr_punch_hole()
2193 a_flags = attr_b->flags; in attr_punch_hole()
2200 err = -EINVAL; in attr_punch_hole()
2207 err = -EINVAL; in attr_punch_hole()
2211 svcn = le64_to_cpu(attr->nres.svcn); in attr_punch_hole()
2212 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; in attr_punch_hole()
2222 zero = min(end, evcn1) - vcn1; in attr_punch_hole()
2244 err = -ENOMEM; in attr_punch_hole()
2249 err = mi_pack_runs(mi, attr, run, evcn1 - svcn); in attr_punch_hole()
2252 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; in attr_punch_hole()
2257 evcn1 - next_svcn, a_flags, in attr_punch_hole()
2279 err = -EINVAL; in attr_punch_hole()
2283 svcn = le64_to_cpu(attr->nres.svcn); in attr_punch_hole()
2284 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; in attr_punch_hole()
2295 err = -EINVAL; in attr_punch_hole()
2300 total_size -= (u64)hole << sbi->cluster_bits; in attr_punch_hole()
2301 attr_b->nres.total_size = cpu_to_le64(total_size); in attr_punch_hole()
2302 mi_b->dirty = true; in attr_punch_hole()
2305 inode_set_bytes(&ni->vfs_inode, total_size); in attr_punch_hole()
2306 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; in attr_punch_hole()
2307 mark_inode_dirty(&ni->vfs_inode); in attr_punch_hole()
2311 up_write(&ni->file.run_lock); in attr_punch_hole()
2315 _ntfs_bad_inode(&ni->vfs_inode); in attr_punch_hole()
2323 if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn)) in attr_punch_hole()
2330 * attr_insert_range - Insert range (hole) in file.
2336 struct runs_tree *run = &ni->file.run; in attr_insert_range()
2337 struct ntfs_sb_info *sbi = ni->mi.sbi; in attr_insert_range()
2352 return -ENOENT; in attr_insert_range()
2356 return -EOPNOTSUPP; in attr_insert_range()
2359 if (!attr_b->non_res) { in attr_insert_range()
2360 data_size = le32_to_cpu(attr_b->res.data_size); in attr_insert_range()
2362 mask = sbi->cluster_mask; /* cluster_size - 1 */ in attr_insert_range()
2364 data_size = le64_to_cpu(attr_b->nres.data_size); in attr_insert_range()
2365 alloc_size = le64_to_cpu(attr_b->nres.alloc_size); in attr_insert_range()
2366 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1; in attr_insert_range()
2376 return -EINVAL; in attr_insert_range()
2381 return -EINVAL; in attr_insert_range()
2388 if (bytes > sbi->maxbytes_sparse - alloc_size) in attr_insert_range()
2389 return -EFBIG; in attr_insert_range()
2391 vcn = vbo >> sbi->cluster_bits; in attr_insert_range()
2392 len = bytes >> sbi->cluster_bits; in attr_insert_range()
2394 down_write(&ni->file.run_lock); in attr_insert_range()
2396 if (!attr_b->non_res) { in attr_insert_range()
2404 err = -EINVAL; in attr_insert_range()
2411 if (!attr_b->non_res) { in attr_insert_range()
2413 char *data = Add2Ptr(attr_b, in attr_insert_range() local
2414 le16_to_cpu(attr_b->res.data_off)); in attr_insert_range()
2416 memmove(data + bytes, data, bytes); in attr_insert_range()
2417 memset(data, 0, bytes); in attr_insert_range()
2422 data_size = le64_to_cpu(attr_b->nres.data_size); in attr_insert_range()
2423 alloc_size = le64_to_cpu(attr_b->nres.alloc_size); in attr_insert_range()
2429 a_flags = attr_b->flags; in attr_insert_range()
2430 svcn = le64_to_cpu(attr_b->nres.svcn); in attr_insert_range()
2431 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; in attr_insert_range()
2438 err = -EINVAL; in attr_insert_range()
2445 err = -EINVAL; in attr_insert_range()
2449 svcn = le64_to_cpu(attr->nres.svcn); in attr_insert_range()
2450 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; in attr_insert_range()
2459 err = -ENOMEM; in attr_insert_range()
2464 err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn); in attr_insert_range()
2468 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; in attr_insert_range()
2471 attr->type == ATTR_DATA && !attr->name_len) { in attr_insert_range()
2472 le64_add_cpu(&attr->nres.svcn, len); in attr_insert_range()
2473 le64_add_cpu(&attr->nres.evcn, len); in attr_insert_range()
2475 le->vcn = attr->nres.svcn; in attr_insert_range()
2476 ni->attr_list.dirty = true; in attr_insert_range()
2478 mi->dirty = true; in attr_insert_range()
2483 next_svcn, evcn1 + len - next_svcn, in attr_insert_range()
2490 err = -EINVAL; in attr_insert_range()
2503 if (vbo <= ni->i_valid) in attr_insert_range()
2504 ni->i_valid += bytes; in attr_insert_range()
2506 attr_b->nres.data_size = cpu_to_le64(data_size + bytes); in attr_insert_range()
2507 attr_b->nres.alloc_size = cpu_to_le64(alloc_size + bytes); in attr_insert_range()
2509 /* ni->valid may be not equal valid_size (temporary). */ in attr_insert_range()
2510 if (ni->i_valid > data_size + bytes) in attr_insert_range()
2511 attr_b->nres.valid_size = attr_b->nres.data_size; in attr_insert_range()
2513 attr_b->nres.valid_size = cpu_to_le64(ni->i_valid); in attr_insert_range()
2514 mi_b->dirty = true; in attr_insert_range()
2517 i_size_write(&ni->vfs_inode, ni->vfs_inode.i_size + bytes); in attr_insert_range()
2518 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; in attr_insert_range()
2519 mark_inode_dirty(&ni->vfs_inode); in attr_insert_range()
2524 up_write(&ni->file.run_lock); in attr_insert_range()
2529 _ntfs_bad_inode(&ni->vfs_inode); in attr_insert_range()
2533 svcn = le64_to_cpu(attr_b->nres.svcn); in attr_insert_range()
2534 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; in attr_insert_range()
2550 svcn = le64_to_cpu(attr->nres.svcn); in attr_insert_range()
2551 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; in attr_insert_range()
2560 if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn)) in attr_insert_range()
2564 attr->type == ATTR_DATA && !attr->name_len) { in attr_insert_range()
2565 le64_sub_cpu(&attr->nres.svcn, len); in attr_insert_range()
2566 le64_sub_cpu(&attr->nres.evcn, len); in attr_insert_range()
2568 le->vcn = attr->nres.svcn; in attr_insert_range()
2569 ni->attr_list.dirty = true; in attr_insert_range()
2571 mi->dirty = true; in attr_insert_range()
2580 * Convert default data attribute into non resident form.
2591 _ntfs_bad_inode(&ni->vfs_inode); in attr_force_nonresident()
2592 return -ENOENT; in attr_force_nonresident()
2595 if (attr->non_res) { in attr_force_nonresident()
2600 down_write(&ni->file.run_lock); in attr_force_nonresident()
2602 le32_to_cpu(attr->res.data_size), in attr_force_nonresident()
2603 &ni->file.run, &attr, NULL); in attr_force_nonresident()
2604 up_write(&ni->file.run_lock); in attr_force_nonresident()
2610 * Change the compression of data attribute
2619 return -ENOENT; in attr_set_compress()
2626 if (attr->non_res) { in attr_set_compress()
2631 if (attr->nres.data_size) { in attr_set_compress()
2637 return -EOPNOTSUPP; in attr_set_compress()
2640 run_off = le16_to_cpu(attr->nres.run_off); in attr_set_compress()
2641 run_size = le32_to_cpu(attr->size) - run_off; in attr_set_compress()
2645 /* remove field 'attr->nres.total_size'. */ in attr_set_compress()
2646 memmove(run - 8, run, run_size); in attr_set_compress()
2647 run_off -= 8; in attr_set_compress()
2650 if (!mi_resize_attr(mi, attr, compr ? +8 : -8)) { in attr_set_compress()
2655 return -EOPNOTSUPP; in attr_set_compress()
2659 /* Make a gap for 'attr->nres.total_size'. */ in attr_set_compress()
2662 attr->nres.total_size = attr->nres.alloc_size; in attr_set_compress()
2664 attr->nres.run_off = cpu_to_le16(run_off); in attr_set_compress()
2667 /* Update data attribute flags. */ in attr_set_compress()
2669 attr->flags |= ATTR_FLAG_COMPRESSED; in attr_set_compress()
2670 attr->nres.c_unit = NTFS_LZNT_CUNIT; in attr_set_compress()
2672 attr->flags &= ~ATTR_FLAG_COMPRESSED; in attr_set_compress()
2673 attr->nres.c_unit = 0; in attr_set_compress()
2675 mi->dirty = true; in attr_set_compress()