Lines Matching full:run

35 static bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)  in run_lookup()  argument
40 if (!run->count) { in run_lookup()
46 max_idx = run->count - 1; in run_lookup()
49 r = run->runs; in run_lookup()
62 *index = run->count; in run_lookup()
73 r = run->runs + mid_idx; in run_lookup()
94 static void run_consolidate(struct runs_tree *run, size_t index) in run_consolidate() argument
97 struct ntfs_run *r = run->runs + index; in run_consolidate()
99 while (index + 1 < run->count) { in run_consolidate()
101 * I should merge current run with next in run_consolidate()
102 * if start of the next run lies inside one being tested. in run_consolidate()
142 * of a next run lcn does not match in run_consolidate()
143 * last volume block of the current run. in run_consolidate()
155 i = run->count - (index + 1); in run_consolidate()
159 run->count -= 1; in run_consolidate()
168 bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn) in run_is_mapped_full() argument
174 if (!run_lookup(run, svcn, &i)) in run_is_mapped_full()
177 end = run->runs + run->count; in run_is_mapped_full()
178 r = run->runs + i; in run_is_mapped_full()
193 bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn, in run_lookup_entry() argument
201 if (!run->runs) in run_lookup_entry()
204 if (!run_lookup(run, vcn, &idx)) in run_lookup_entry()
207 r = run->runs + idx; in run_lookup_entry()
229 void run_truncate_head(struct runs_tree *run, CLST vcn) in run_truncate_head() argument
234 if (run_lookup(run, vcn, &index)) { in run_truncate_head()
235 r = run->runs + index; in run_truncate_head()
249 r = run->runs; in run_truncate_head()
250 memmove(r, r + index, sizeof(*r) * (run->count - index)); in run_truncate_head()
252 run->count -= index; in run_truncate_head()
254 if (!run->count) { in run_truncate_head()
255 kvfree(run->runs); in run_truncate_head()
256 run->runs = NULL; in run_truncate_head()
257 run->allocated = 0; in run_truncate_head()
264 void run_truncate(struct runs_tree *run, CLST vcn) in run_truncate() argument
274 if (run_lookup(run, vcn, &index)) { in run_truncate()
275 struct ntfs_run *r = run->runs + index; in run_truncate()
288 run->count = index; in run_truncate()
292 kvfree(run->runs); in run_truncate()
293 run->runs = NULL; in run_truncate()
294 run->allocated = 0; in run_truncate()
301 void run_truncate_around(struct runs_tree *run, CLST vcn) in run_truncate_around() argument
303 run_truncate_head(run, vcn); in run_truncate_around()
305 if (run->count >= NTFS3_RUN_MAX_BYTES / sizeof(struct ntfs_run) / 2) in run_truncate_around()
306 run_truncate(run, (run->runs + (run->count >> 1))->vcn); in run_truncate_around()
313 * Run to be added may overlap with existing location.
317 bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len, in run_add_entry() argument
332 inrange = run_lookup(run, vcn, &index); in run_add_entry()
337 * continues previous run. in run_add_entry()
342 struct ntfs_run *t = run->runs + index - 1; in run_add_entry()
364 used = run->count * sizeof(struct ntfs_run); in run_add_entry()
371 if (run->allocated < used + sizeof(struct ntfs_run)) { in run_add_entry()
379 if (is_power_of_2(run->allocated)) in run_add_entry()
380 bytes = run->allocated << 1; in run_add_entry()
385 bytes = run->allocated + (16 * PAGE_SIZE); in run_add_entry()
396 memcpy(new_ptr, run->runs, in run_add_entry()
398 memcpy(r + 1, run->runs + index, in run_add_entry()
399 sizeof(struct ntfs_run) * (run->count - index)); in run_add_entry()
401 kvfree(run->runs); in run_add_entry()
402 run->runs = new_ptr; in run_add_entry()
403 run->allocated = bytes; in run_add_entry()
406 size_t i = run->count - index; in run_add_entry()
408 r = run->runs + index; in run_add_entry()
418 run->count += 1; in run_add_entry()
420 r = run->runs + index; in run_add_entry()
471 run_consolidate(run, index); in run_add_entry()
472 run_consolidate(run, index + 1); in run_add_entry()
479 !run_add_entry(run, tail_vcn, tail_lcn, tail_len, is_mft)) in run_add_entry()
490 bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len) in run_collapse_range() argument
496 if (WARN_ON(!run_lookup(run, vcn, &index))) in run_collapse_range()
499 e = run->runs + run->count; in run_collapse_range()
500 r = run->runs + index; in run_collapse_range()
505 /* Collapse tail of run .*/ in run_collapse_range()
508 /* Collapse a middle part of sparsed run. */ in run_collapse_range()
511 /* Collapse a middle part of normal run, split. */ in run_collapse_range()
512 if (!run_add_entry(run, vcn, SPARSE_LCN, len, false)) in run_collapse_range()
514 return run_collapse_range(run, vcn, len); in run_collapse_range()
532 /* Eat this run. */ in run_collapse_range()
546 run->count -= eat; in run_collapse_range()
556 bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len) in run_insert_range() argument
561 if (WARN_ON(!run_lookup(run, vcn, &index))) in run_insert_range()
564 e = run->runs + run->count; in run_insert_range()
565 r = run->runs + index; in run_insert_range()
573 r = run->runs + index; in run_insert_range()
583 if (!run_add_entry(run, vcn + len, lcn2, len2, false)) in run_insert_range()
587 if (!run_add_entry(run, vcn, SPARSE_LCN, len, false)) in run_insert_range()
596 bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn, in run_get_entry() argument
601 if (index >= run->count) in run_get_entry()
604 r = run->runs + index; in run_get_entry()
818 int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf, in run_pack() argument
836 if (!run_lookup(run, svcn, &i)) in run_pack()
839 r_end = run->runs + run->count; in run_pack()
840 r = run->runs + i; in run_pack()
849 r = run->runs + i; in run_pack()
878 /* Can we store this entire run. */ in run_pack()
883 /* Pack run header. */ in run_pack()
887 /* Pack the length of run. */ in run_pack()
921 int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, in run_unpack() argument
1007 "Volume contains 64 bits run: vcn %llx, lcn %llx, len %llx.\n" in run_unpack()
1018 if (!run) in run_unpack()
1019 ; /* Called from check_attr(fslog.c) to check run. */ in run_unpack()
1020 else if (run == RUN_DEALLOCATE) { in run_unpack()
1023 * without storing in run. in run_unpack()
1028 if (!run_add_entry(run, vcn64, lcn, len, is_mft)) in run_unpack()
1033 if (!run_add_entry(run, vcn, lcn + dlen, len - dlen, in run_unpack()
1057 int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, in run_unpack_ex() argument
1067 ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size); in run_unpack_ex()
1071 if (!sbi->used.bitmap.sb || !run || run == RUN_DEALLOCATE) in run_unpack_ex()
1080 for (ok = run_lookup_entry(run, vcn, &lcn, &len, &index); in run_unpack_ex()
1082 ok = run_get_entry(run, ++index, &vcn, &lcn, &len)) { in run_unpack_ex()
1119 /* Restore zone. Lock mft run. */ in run_unpack_ex()
1177 * Make a copy of run
1179 int run_clone(const struct runs_tree *run, struct runs_tree *new_run) in run_clone() argument
1181 size_t bytes = run->count * sizeof(struct ntfs_run); in run_clone()
1194 memcpy(new_run->runs, run->runs, bytes); in run_clone()
1195 new_run->count = run->count; in run_clone()