1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Code for manipulating bucket marks for garbage collection.
4 *
5 * Copyright 2014 Datera, Inc.
6 */
7
8 #include "bcachefs.h"
9 #include "alloc_background.h"
10 #include "backpointers.h"
11 #include "bset.h"
12 #include "btree_gc.h"
13 #include "btree_update.h"
14 #include "buckets.h"
15 #include "buckets_waiting_for_journal.h"
16 #include "disk_accounting.h"
17 #include "ec.h"
18 #include "error.h"
19 #include "inode.h"
20 #include "movinggc.h"
21 #include "rebalance.h"
22 #include "recovery.h"
23 #include "recovery_passes.h"
24 #include "reflink.h"
25 #include "replicas.h"
26 #include "subvolume.h"
27 #include "trace.h"
28
29 #include <linux/preempt.h>
30
bch2_dev_usage_read_fast(struct bch_dev * ca,struct bch_dev_usage * usage)31 void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
32 {
33 memset(usage, 0, sizeof(*usage));
34 acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage, dev_usage_u64s());
35 }
36
reserve_factor(u64 r)37 static u64 reserve_factor(u64 r)
38 {
39 return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
40 }
41
42 static struct bch_fs_usage_short
__bch2_fs_usage_read_short(struct bch_fs * c)43 __bch2_fs_usage_read_short(struct bch_fs *c)
44 {
45 struct bch_fs_usage_short ret;
46 u64 data, reserved;
47
48 ret.capacity = c->capacity -
49 percpu_u64_get(&c->usage->hidden);
50
51 data = percpu_u64_get(&c->usage->data) +
52 percpu_u64_get(&c->usage->btree);
53 reserved = percpu_u64_get(&c->usage->reserved) +
54 percpu_u64_get(c->online_reserved);
55
56 ret.used = min(ret.capacity, data + reserve_factor(reserved));
57 ret.free = ret.capacity - ret.used;
58
59 ret.nr_inodes = percpu_u64_get(&c->usage->nr_inodes);
60
61 return ret;
62 }
63
64 struct bch_fs_usage_short
bch2_fs_usage_read_short(struct bch_fs * c)65 bch2_fs_usage_read_short(struct bch_fs *c)
66 {
67 struct bch_fs_usage_short ret;
68
69 percpu_down_read(&c->mark_lock);
70 ret = __bch2_fs_usage_read_short(c);
71 percpu_up_read(&c->mark_lock);
72
73 return ret;
74 }
75
bch2_dev_usage_to_text(struct printbuf * out,struct bch_dev * ca,struct bch_dev_usage * usage)76 void bch2_dev_usage_to_text(struct printbuf *out,
77 struct bch_dev *ca,
78 struct bch_dev_usage *usage)
79 {
80 if (out->nr_tabstops < 5) {
81 printbuf_tabstops_reset(out);
82 printbuf_tabstop_push(out, 12);
83 printbuf_tabstop_push(out, 16);
84 printbuf_tabstop_push(out, 16);
85 printbuf_tabstop_push(out, 16);
86 printbuf_tabstop_push(out, 16);
87 }
88
89 prt_printf(out, "\tbuckets\rsectors\rfragmented\r\n");
90
91 for (unsigned i = 0; i < BCH_DATA_NR; i++) {
92 bch2_prt_data_type(out, i);
93 prt_printf(out, "\t%llu\r%llu\r%llu\r\n",
94 usage->d[i].buckets,
95 usage->d[i].sectors,
96 usage->d[i].fragmented);
97 }
98
99 prt_printf(out, "capacity\t%llu\r\n", ca->mi.nbuckets);
100 }
101
bch2_check_fix_ptr(struct btree_trans * trans,struct bkey_s_c k,struct extent_ptr_decoded p,const union bch_extent_entry * entry,bool * do_update)102 static int bch2_check_fix_ptr(struct btree_trans *trans,
103 struct bkey_s_c k,
104 struct extent_ptr_decoded p,
105 const union bch_extent_entry *entry,
106 bool *do_update)
107 {
108 struct bch_fs *c = trans->c;
109 struct printbuf buf = PRINTBUF;
110 int ret = 0;
111
112 struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
113 if (!ca) {
114 if (fsck_err_on(p.ptr.dev != BCH_SB_MEMBER_INVALID,
115 trans, ptr_to_invalid_device,
116 "pointer to missing device %u\n"
117 "while marking %s",
118 p.ptr.dev,
119 (printbuf_reset(&buf),
120 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
121 *do_update = true;
122 return 0;
123 }
124
125 struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
126 if (!g) {
127 if (fsck_err(trans, ptr_to_invalid_device,
128 "pointer to invalid bucket on device %u\n"
129 "while marking %s",
130 p.ptr.dev,
131 (printbuf_reset(&buf),
132 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
133 *do_update = true;
134 goto out;
135 }
136
137 enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
138
139 if (fsck_err_on(!g->gen_valid,
140 trans, ptr_to_missing_alloc_key,
141 "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
142 "while marking %s",
143 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
144 bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
145 p.ptr.gen,
146 (printbuf_reset(&buf),
147 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
148 if (!p.ptr.cached) {
149 g->gen_valid = true;
150 g->gen = p.ptr.gen;
151 } else {
152 *do_update = true;
153 }
154 }
155
156 if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0,
157 trans, ptr_gen_newer_than_bucket_gen,
158 "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
159 "while marking %s",
160 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
161 bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
162 p.ptr.gen, g->gen,
163 (printbuf_reset(&buf),
164 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
165 if (!p.ptr.cached &&
166 (g->data_type != BCH_DATA_btree ||
167 data_type == BCH_DATA_btree)) {
168 g->gen_valid = true;
169 g->gen = p.ptr.gen;
170 g->data_type = 0;
171 g->stripe_sectors = 0;
172 g->dirty_sectors = 0;
173 g->cached_sectors = 0;
174 } else {
175 *do_update = true;
176 }
177 }
178
179 if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX,
180 trans, ptr_gen_newer_than_bucket_gen,
181 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
182 "while marking %s",
183 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
184 bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
185 p.ptr.gen,
186 (printbuf_reset(&buf),
187 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
188 *do_update = true;
189
190 if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0,
191 trans, stale_dirty_ptr,
192 "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
193 "while marking %s",
194 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
195 bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
196 p.ptr.gen, g->gen,
197 (printbuf_reset(&buf),
198 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
199 *do_update = true;
200
201 if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen)
202 goto out;
203
204 if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type),
205 trans, ptr_bucket_data_type_mismatch,
206 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
207 "while marking %s",
208 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
209 bch2_data_type_str(g->data_type),
210 bch2_data_type_str(data_type),
211 (printbuf_reset(&buf),
212 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
213 if (data_type == BCH_DATA_btree) {
214 g->gen_valid = true;
215 g->gen = p.ptr.gen;
216 g->data_type = data_type;
217 g->stripe_sectors = 0;
218 g->dirty_sectors = 0;
219 g->cached_sectors = 0;
220 } else {
221 *do_update = true;
222 }
223 }
224
225 if (p.has_ec) {
226 struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
227
228 if (fsck_err_on(!m || !m->alive,
229 trans, ptr_to_missing_stripe,
230 "pointer to nonexistent stripe %llu\n"
231 "while marking %s",
232 (u64) p.ec.idx,
233 (printbuf_reset(&buf),
234 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
235 *do_update = true;
236
237 if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p),
238 trans, ptr_to_incorrect_stripe,
239 "pointer does not match stripe %llu\n"
240 "while marking %s",
241 (u64) p.ec.idx,
242 (printbuf_reset(&buf),
243 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
244 *do_update = true;
245 }
246 out:
247 fsck_err:
248 bch2_dev_put(ca);
249 printbuf_exit(&buf);
250 return ret;
251 }
252
bch2_check_fix_ptrs(struct btree_trans * trans,enum btree_id btree,unsigned level,struct bkey_s_c k,enum btree_iter_update_trigger_flags flags)253 int bch2_check_fix_ptrs(struct btree_trans *trans,
254 enum btree_id btree, unsigned level, struct bkey_s_c k,
255 enum btree_iter_update_trigger_flags flags)
256 {
257 struct bch_fs *c = trans->c;
258 struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(k);
259 const union bch_extent_entry *entry_c;
260 struct extent_ptr_decoded p = { 0 };
261 bool do_update = false;
262 struct printbuf buf = PRINTBUF;
263 int ret = 0;
264
265 bkey_for_each_ptr_decode(k.k, ptrs_c, p, entry_c) {
266 ret = bch2_check_fix_ptr(trans, k, p, entry_c, &do_update);
267 if (ret)
268 goto err;
269 }
270
271 if (do_update) {
272 if (flags & BTREE_TRIGGER_is_root) {
273 bch_err(c, "cannot update btree roots yet");
274 ret = -EINVAL;
275 goto err;
276 }
277
278 struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
279 ret = PTR_ERR_OR_ZERO(new);
280 if (ret)
281 goto err;
282
283 rcu_read_lock();
284 bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, !bch2_dev_exists(c, ptr->dev));
285 rcu_read_unlock();
286
287 if (level) {
288 /*
289 * We don't want to drop btree node pointers - if the
290 * btree node isn't there anymore, the read path will
291 * sort it out:
292 */
293 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
294 rcu_read_lock();
295 bkey_for_each_ptr(ptrs, ptr) {
296 struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
297 struct bucket *g = PTR_GC_BUCKET(ca, ptr);
298
299 ptr->gen = g->gen;
300 }
301 rcu_read_unlock();
302 } else {
303 struct bkey_ptrs ptrs;
304 union bch_extent_entry *entry;
305
306 rcu_read_lock();
307 restart_drop_ptrs:
308 ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
309 bkey_for_each_ptr_decode(bkey_i_to_s(new).k, ptrs, p, entry) {
310 struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
311 struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
312 enum bch_data_type data_type = bch2_bkey_ptr_data_type(bkey_i_to_s_c(new), p, entry);
313
314 if ((p.ptr.cached &&
315 (!g->gen_valid || gen_cmp(p.ptr.gen, g->gen) > 0)) ||
316 (!p.ptr.cached &&
317 gen_cmp(p.ptr.gen, g->gen) < 0) ||
318 gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX ||
319 (g->data_type &&
320 g->data_type != data_type)) {
321 bch2_bkey_drop_ptr(bkey_i_to_s(new), &entry->ptr);
322 goto restart_drop_ptrs;
323 }
324 }
325 rcu_read_unlock();
326 again:
327 ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
328 bkey_extent_entry_for_each(ptrs, entry) {
329 if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) {
330 struct gc_stripe *m = genradix_ptr(&c->gc_stripes,
331 entry->stripe_ptr.idx);
332 union bch_extent_entry *next_ptr;
333
334 bkey_extent_entry_for_each_from(ptrs, next_ptr, entry)
335 if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr)
336 goto found;
337 next_ptr = NULL;
338 found:
339 if (!next_ptr) {
340 bch_err(c, "aieee, found stripe ptr with no data ptr");
341 continue;
342 }
343
344 if (!m || !m->alive ||
345 !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block],
346 &next_ptr->ptr,
347 m->sectors)) {
348 bch2_bkey_extent_entry_drop(new, entry);
349 goto again;
350 }
351 }
352 }
353 }
354
355 if (0) {
356 printbuf_reset(&buf);
357 bch2_bkey_val_to_text(&buf, c, k);
358 bch_info(c, "updated %s", buf.buf);
359
360 printbuf_reset(&buf);
361 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new));
362 bch_info(c, "new key %s", buf.buf);
363 }
364
365 struct btree_iter iter;
366 bch2_trans_node_iter_init(trans, &iter, btree, new->k.p, 0, level,
367 BTREE_ITER_intent|BTREE_ITER_all_snapshots);
368 ret = bch2_btree_iter_traverse(&iter) ?:
369 bch2_trans_update(trans, &iter, new,
370 BTREE_UPDATE_internal_snapshot_node|
371 BTREE_TRIGGER_norun);
372 bch2_trans_iter_exit(trans, &iter);
373 if (ret)
374 goto err;
375
376 if (level)
377 bch2_btree_node_update_key_early(trans, btree, level - 1, k, new);
378 }
379 err:
380 printbuf_exit(&buf);
381 return ret;
382 }
383
bch2_bucket_ref_update(struct btree_trans * trans,struct bch_dev * ca,struct bkey_s_c k,const struct bch_extent_ptr * ptr,s64 sectors,enum bch_data_type ptr_data_type,u8 b_gen,u8 bucket_data_type,u32 * bucket_sectors)384 int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca,
385 struct bkey_s_c k,
386 const struct bch_extent_ptr *ptr,
387 s64 sectors, enum bch_data_type ptr_data_type,
388 u8 b_gen, u8 bucket_data_type,
389 u32 *bucket_sectors)
390 {
391 struct bch_fs *c = trans->c;
392 size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
393 struct printbuf buf = PRINTBUF;
394 bool inserting = sectors > 0;
395 int ret = 0;
396
397 BUG_ON(!sectors);
398
399 if (gen_after(ptr->gen, b_gen)) {
400 bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
401 log_fsck_err(trans, ptr_gen_newer_than_bucket_gen,
402 "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
403 "while marking %s",
404 ptr->dev, bucket_nr, b_gen,
405 bch2_data_type_str(bucket_data_type ?: ptr_data_type),
406 ptr->gen,
407 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
408 if (inserting)
409 goto err;
410 goto out;
411 }
412
413 if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
414 bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
415 log_fsck_err(trans, ptr_too_stale,
416 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
417 "while marking %s",
418 ptr->dev, bucket_nr, b_gen,
419 bch2_data_type_str(bucket_data_type ?: ptr_data_type),
420 ptr->gen,
421 (printbuf_reset(&buf),
422 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
423 if (inserting)
424 goto err;
425 goto out;
426 }
427
428 if (b_gen != ptr->gen && ptr->cached) {
429 ret = 1;
430 goto out;
431 }
432
433 if (b_gen != ptr->gen) {
434 bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
435 log_fsck_err(trans, stale_dirty_ptr,
436 "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n"
437 "while marking %s",
438 ptr->dev, bucket_nr, b_gen,
439 bucket_gen_get(ca, bucket_nr),
440 bch2_data_type_str(bucket_data_type ?: ptr_data_type),
441 ptr->gen,
442 (printbuf_reset(&buf),
443 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
444 if (inserting)
445 goto err;
446 goto out;
447 }
448
449 if (bucket_data_type_mismatch(bucket_data_type, ptr_data_type)) {
450 bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
451 log_fsck_err(trans, ptr_bucket_data_type_mismatch,
452 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
453 "while marking %s",
454 ptr->dev, bucket_nr, b_gen,
455 bch2_data_type_str(bucket_data_type),
456 bch2_data_type_str(ptr_data_type),
457 (printbuf_reset(&buf),
458 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
459 if (inserting)
460 goto err;
461 goto out;
462 }
463
464 if ((u64) *bucket_sectors + sectors > U32_MAX) {
465 bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
466 log_fsck_err(trans, bucket_sector_count_overflow,
467 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n"
468 "while marking %s",
469 ptr->dev, bucket_nr, b_gen,
470 bch2_data_type_str(bucket_data_type ?: ptr_data_type),
471 *bucket_sectors, sectors,
472 (printbuf_reset(&buf),
473 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
474 if (inserting)
475 goto err;
476 sectors = -*bucket_sectors;
477 }
478
479 *bucket_sectors += sectors;
480 out:
481 printbuf_exit(&buf);
482 return ret;
483 err:
484 fsck_err:
485 bch2_dump_trans_updates(trans);
486 bch2_inconsistent_error(c);
487 ret = -BCH_ERR_bucket_ref_update;
488 goto out;
489 }
490
bch2_trans_account_disk_usage_change(struct btree_trans * trans)491 void bch2_trans_account_disk_usage_change(struct btree_trans *trans)
492 {
493 struct bch_fs *c = trans->c;
494 u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
495 static int warned_disk_usage = 0;
496 bool warn = false;
497
498 percpu_down_read(&c->mark_lock);
499 struct bch_fs_usage_base *src = &trans->fs_usage_delta;
500
501 s64 added = src->btree + src->data + src->reserved;
502
503 /*
504 * Not allowed to reduce sectors_available except by getting a
505 * reservation:
506 */
507 s64 should_not_have_added = added - (s64) disk_res_sectors;
508 if (unlikely(should_not_have_added > 0)) {
509 u64 old, new;
510
511 old = atomic64_read(&c->sectors_available);
512 do {
513 new = max_t(s64, 0, old - should_not_have_added);
514 } while (!atomic64_try_cmpxchg(&c->sectors_available,
515 &old, new));
516
517 added -= should_not_have_added;
518 warn = true;
519 }
520
521 if (added > 0) {
522 trans->disk_res->sectors -= added;
523 this_cpu_sub(*c->online_reserved, added);
524 }
525
526 preempt_disable();
527 struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage);
528 acc_u64s((u64 *) dst, (u64 *) src, sizeof(*src) / sizeof(u64));
529 preempt_enable();
530 percpu_up_read(&c->mark_lock);
531
532 if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
533 bch2_trans_inconsistent(trans,
534 "disk usage increased %lli more than %llu sectors reserved)",
535 should_not_have_added, disk_res_sectors);
536 }
537
538 /* KEY_TYPE_extent: */
539
__mark_pointer(struct btree_trans * trans,struct bch_dev * ca,struct bkey_s_c k,const struct extent_ptr_decoded * p,s64 sectors,enum bch_data_type ptr_data_type,struct bch_alloc_v4 * a,bool insert)540 static int __mark_pointer(struct btree_trans *trans, struct bch_dev *ca,
541 struct bkey_s_c k,
542 const struct extent_ptr_decoded *p,
543 s64 sectors, enum bch_data_type ptr_data_type,
544 struct bch_alloc_v4 *a,
545 bool insert)
546 {
547 u32 *dst_sectors = p->has_ec ? &a->stripe_sectors :
548 !p->ptr.cached ? &a->dirty_sectors :
549 &a->cached_sectors;
550 int ret = bch2_bucket_ref_update(trans, ca, k, &p->ptr, sectors, ptr_data_type,
551 a->gen, a->data_type, dst_sectors);
552
553 if (ret)
554 return ret;
555 if (insert)
556 alloc_data_type_set(a, ptr_data_type);
557 return 0;
558 }
559
bch2_trigger_pointer(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct bkey_s_c k,struct extent_ptr_decoded p,const union bch_extent_entry * entry,s64 * sectors,enum btree_iter_update_trigger_flags flags)560 static int bch2_trigger_pointer(struct btree_trans *trans,
561 enum btree_id btree_id, unsigned level,
562 struct bkey_s_c k, struct extent_ptr_decoded p,
563 const union bch_extent_entry *entry,
564 s64 *sectors,
565 enum btree_iter_update_trigger_flags flags)
566 {
567 struct bch_fs *c = trans->c;
568 bool insert = !(flags & BTREE_TRIGGER_overwrite);
569 struct printbuf buf = PRINTBUF;
570 int ret = 0;
571
572 struct bkey_i_backpointer bp;
573 bch2_extent_ptr_to_bp(c, btree_id, level, k, p, entry, &bp);
574
575 *sectors = insert ? bp.v.bucket_len : -(s64) bp.v.bucket_len;
576
577 struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
578 if (unlikely(!ca)) {
579 if (insert && p.ptr.dev != BCH_SB_MEMBER_INVALID)
580 ret = -BCH_ERR_trigger_pointer;
581 goto err;
582 }
583
584 struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr);
585
586 if (flags & BTREE_TRIGGER_transactional) {
587 struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket, 0);
588 ret = PTR_ERR_OR_ZERO(a) ?:
589 __mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &a->v, insert);
590 if (ret)
591 goto err;
592
593 ret = bch2_bucket_backpointer_mod(trans, k, &bp, insert);
594 if (ret)
595 goto err;
596 }
597
598 if (flags & BTREE_TRIGGER_gc) {
599 struct bucket *g = gc_bucket(ca, bucket.offset);
600 if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s",
601 p.ptr.dev,
602 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
603 ret = -BCH_ERR_trigger_pointer;
604 goto err;
605 }
606
607 bucket_lock(g);
608 struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
609 ret = __mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &new, insert);
610 alloc_to_bucket(g, new);
611 bucket_unlock(g);
612
613 if (!ret)
614 ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags);
615 }
616 err:
617 bch2_dev_put(ca);
618 printbuf_exit(&buf);
619 return ret;
620 }
621
bch2_trigger_stripe_ptr(struct btree_trans * trans,struct bkey_s_c k,struct extent_ptr_decoded p,enum bch_data_type data_type,s64 sectors,enum btree_iter_update_trigger_flags flags)622 static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
623 struct bkey_s_c k,
624 struct extent_ptr_decoded p,
625 enum bch_data_type data_type,
626 s64 sectors,
627 enum btree_iter_update_trigger_flags flags)
628 {
629 if (flags & BTREE_TRIGGER_transactional) {
630 struct btree_iter iter;
631 struct bkey_i_stripe *s = bch2_bkey_get_mut_typed(trans, &iter,
632 BTREE_ID_stripes, POS(0, p.ec.idx),
633 BTREE_ITER_with_updates, stripe);
634 int ret = PTR_ERR_OR_ZERO(s);
635 if (unlikely(ret)) {
636 bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans,
637 "pointer to nonexistent stripe %llu",
638 (u64) p.ec.idx);
639 goto err;
640 }
641
642 if (!bch2_ptr_matches_stripe(&s->v, p)) {
643 bch2_trans_inconsistent(trans,
644 "stripe pointer doesn't match stripe %llu",
645 (u64) p.ec.idx);
646 ret = -BCH_ERR_trigger_stripe_pointer;
647 goto err;
648 }
649
650 stripe_blockcount_set(&s->v, p.ec.block,
651 stripe_blockcount_get(&s->v, p.ec.block) +
652 sectors);
653
654 struct disk_accounting_pos acc = {
655 .type = BCH_DISK_ACCOUNTING_replicas,
656 };
657 bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i));
658 acc.replicas.data_type = data_type;
659 ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, false);
660 err:
661 bch2_trans_iter_exit(trans, &iter);
662 return ret;
663 }
664
665 if (flags & BTREE_TRIGGER_gc) {
666 struct bch_fs *c = trans->c;
667
668 struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.ec.idx, GFP_KERNEL);
669 if (!m) {
670 bch_err(c, "error allocating memory for gc_stripes, idx %llu",
671 (u64) p.ec.idx);
672 return -BCH_ERR_ENOMEM_mark_stripe_ptr;
673 }
674
675 gc_stripe_lock(m);
676
677 if (!m || !m->alive) {
678 gc_stripe_unlock(m);
679 struct printbuf buf = PRINTBUF;
680 bch2_bkey_val_to_text(&buf, c, k);
681 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu\n while marking %s",
682 (u64) p.ec.idx, buf.buf);
683 printbuf_exit(&buf);
684 bch2_inconsistent_error(c);
685 return -BCH_ERR_trigger_stripe_pointer;
686 }
687
688 m->block_sectors[p.ec.block] += sectors;
689
690 struct disk_accounting_pos acc = {
691 .type = BCH_DISK_ACCOUNTING_replicas,
692 };
693 memcpy(&acc.replicas, &m->r.e, replicas_entry_bytes(&m->r.e));
694 gc_stripe_unlock(m);
695
696 acc.replicas.data_type = data_type;
697 int ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, true);
698 if (ret)
699 return ret;
700 }
701
702 return 0;
703 }
704
__trigger_extent(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct bkey_s_c k,enum btree_iter_update_trigger_flags flags,s64 * replicas_sectors)705 static int __trigger_extent(struct btree_trans *trans,
706 enum btree_id btree_id, unsigned level,
707 struct bkey_s_c k,
708 enum btree_iter_update_trigger_flags flags,
709 s64 *replicas_sectors)
710 {
711 bool gc = flags & BTREE_TRIGGER_gc;
712 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
713 const union bch_extent_entry *entry;
714 struct extent_ptr_decoded p;
715 enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
716 ? BCH_DATA_btree
717 : BCH_DATA_user;
718 int ret = 0;
719
720 struct disk_accounting_pos acc_replicas_key = {
721 .type = BCH_DISK_ACCOUNTING_replicas,
722 .replicas.data_type = data_type,
723 .replicas.nr_devs = 0,
724 .replicas.nr_required = 1,
725 };
726
727 unsigned cur_compression_type = 0;
728 u64 compression_acct[3] = { 1, 0, 0 };
729
730 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
731 s64 disk_sectors = 0;
732 ret = bch2_trigger_pointer(trans, btree_id, level, k, p, entry, &disk_sectors, flags);
733 if (ret < 0)
734 return ret;
735
736 bool stale = ret > 0;
737
738 if (p.ptr.cached && stale)
739 continue;
740
741 if (p.ptr.cached) {
742 ret = bch2_mod_dev_cached_sectors(trans, p.ptr.dev, disk_sectors, gc);
743 if (ret)
744 return ret;
745 } else if (!p.has_ec) {
746 *replicas_sectors += disk_sectors;
747 replicas_entry_add_dev(&acc_replicas_key.replicas, p.ptr.dev);
748 } else {
749 ret = bch2_trigger_stripe_ptr(trans, k, p, data_type, disk_sectors, flags);
750 if (ret)
751 return ret;
752
753 /*
754 * There may be other dirty pointers in this extent, but
755 * if so they're not required for mounting if we have an
756 * erasure coded pointer in this extent:
757 */
758 acc_replicas_key.replicas.nr_required = 0;
759 }
760
761 if (cur_compression_type &&
762 cur_compression_type != p.crc.compression_type) {
763 if (flags & BTREE_TRIGGER_overwrite)
764 bch2_u64s_neg(compression_acct, ARRAY_SIZE(compression_acct));
765
766 ret = bch2_disk_accounting_mod2(trans, gc, compression_acct,
767 compression, cur_compression_type);
768 if (ret)
769 return ret;
770
771 compression_acct[0] = 1;
772 compression_acct[1] = 0;
773 compression_acct[2] = 0;
774 }
775
776 cur_compression_type = p.crc.compression_type;
777 if (p.crc.compression_type) {
778 compression_acct[1] += p.crc.uncompressed_size;
779 compression_acct[2] += p.crc.compressed_size;
780 }
781 }
782
783 if (acc_replicas_key.replicas.nr_devs) {
784 ret = bch2_disk_accounting_mod(trans, &acc_replicas_key, replicas_sectors, 1, gc);
785 if (ret)
786 return ret;
787 }
788
789 if (acc_replicas_key.replicas.nr_devs && !level && k.k->p.snapshot) {
790 ret = bch2_disk_accounting_mod2_nr(trans, gc, replicas_sectors, 1, snapshot, k.k->p.snapshot);
791 if (ret)
792 return ret;
793 }
794
795 if (cur_compression_type) {
796 if (flags & BTREE_TRIGGER_overwrite)
797 bch2_u64s_neg(compression_acct, ARRAY_SIZE(compression_acct));
798
799 ret = bch2_disk_accounting_mod2(trans, gc, compression_acct,
800 compression, cur_compression_type);
801 if (ret)
802 return ret;
803 }
804
805 if (level) {
806 ret = bch2_disk_accounting_mod2_nr(trans, gc, replicas_sectors, 1, btree, btree_id);
807 if (ret)
808 return ret;
809 } else {
810 bool insert = !(flags & BTREE_TRIGGER_overwrite);
811
812 s64 v[3] = {
813 insert ? 1 : -1,
814 insert ? k.k->size : -((s64) k.k->size),
815 *replicas_sectors,
816 };
817 ret = bch2_disk_accounting_mod2(trans, gc, v, inum, k.k->p.inode);
818 if (ret)
819 return ret;
820 }
821
822 return 0;
823 }
824
bch2_trigger_extent(struct btree_trans * trans,enum btree_id btree,unsigned level,struct bkey_s_c old,struct bkey_s new,enum btree_iter_update_trigger_flags flags)825 int bch2_trigger_extent(struct btree_trans *trans,
826 enum btree_id btree, unsigned level,
827 struct bkey_s_c old, struct bkey_s new,
828 enum btree_iter_update_trigger_flags flags)
829 {
830 struct bch_fs *c = trans->c;
831 struct bkey_ptrs_c new_ptrs = bch2_bkey_ptrs_c(new.s_c);
832 struct bkey_ptrs_c old_ptrs = bch2_bkey_ptrs_c(old);
833 unsigned new_ptrs_bytes = (void *) new_ptrs.end - (void *) new_ptrs.start;
834 unsigned old_ptrs_bytes = (void *) old_ptrs.end - (void *) old_ptrs.start;
835
836 if (unlikely(flags & BTREE_TRIGGER_check_repair))
837 return bch2_check_fix_ptrs(trans, btree, level, new.s_c, flags);
838
839 /* if pointers aren't changing - nothing to do: */
840 if (new_ptrs_bytes == old_ptrs_bytes &&
841 !memcmp(new_ptrs.start,
842 old_ptrs.start,
843 new_ptrs_bytes))
844 return 0;
845
846 if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) {
847 s64 old_replicas_sectors = 0, new_replicas_sectors = 0;
848
849 if (old.k->type) {
850 int ret = __trigger_extent(trans, btree, level, old,
851 flags & ~BTREE_TRIGGER_insert,
852 &old_replicas_sectors);
853 if (ret)
854 return ret;
855 }
856
857 if (new.k->type) {
858 int ret = __trigger_extent(trans, btree, level, new.s_c,
859 flags & ~BTREE_TRIGGER_overwrite,
860 &new_replicas_sectors);
861 if (ret)
862 return ret;
863 }
864
865 int need_rebalance_delta = 0;
866 s64 need_rebalance_sectors_delta[1] = { 0 };
867
868 s64 s = bch2_bkey_sectors_need_rebalance(c, old);
869 need_rebalance_delta -= s != 0;
870 need_rebalance_sectors_delta[0] -= s;
871
872 s = bch2_bkey_sectors_need_rebalance(c, new.s_c);
873 need_rebalance_delta += s != 0;
874 need_rebalance_sectors_delta[0] += s;
875
876 if ((flags & BTREE_TRIGGER_transactional) && need_rebalance_delta) {
877 int ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_rebalance_work,
878 new.k->p, need_rebalance_delta > 0);
879 if (ret)
880 return ret;
881 }
882
883 if (need_rebalance_sectors_delta[0]) {
884 int ret = bch2_disk_accounting_mod2(trans, flags & BTREE_TRIGGER_gc,
885 need_rebalance_sectors_delta, rebalance_work);
886 if (ret)
887 return ret;
888 }
889 }
890
891 return 0;
892 }
893
894 /* KEY_TYPE_reservation */
895
__trigger_reservation(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct bkey_s_c k,enum btree_iter_update_trigger_flags flags)896 static int __trigger_reservation(struct btree_trans *trans,
897 enum btree_id btree_id, unsigned level, struct bkey_s_c k,
898 enum btree_iter_update_trigger_flags flags)
899 {
900 if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) {
901 s64 sectors[1] = { k.k->size };
902
903 if (flags & BTREE_TRIGGER_overwrite)
904 sectors[0] = -sectors[0];
905
906 return bch2_disk_accounting_mod2(trans, flags & BTREE_TRIGGER_gc, sectors,
907 persistent_reserved, bkey_s_c_to_reservation(k).v->nr_replicas);
908 }
909
910 return 0;
911 }
912
bch2_trigger_reservation(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct bkey_s_c old,struct bkey_s new,enum btree_iter_update_trigger_flags flags)913 int bch2_trigger_reservation(struct btree_trans *trans,
914 enum btree_id btree_id, unsigned level,
915 struct bkey_s_c old, struct bkey_s new,
916 enum btree_iter_update_trigger_flags flags)
917 {
918 return trigger_run_overwrite_then_insert(__trigger_reservation, trans, btree_id, level, old, new, flags);
919 }
920
921 /* Mark superblocks: */
922
__bch2_trans_mark_metadata_bucket(struct btree_trans * trans,struct bch_dev * ca,u64 b,enum bch_data_type type,unsigned sectors)923 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
924 struct bch_dev *ca, u64 b,
925 enum bch_data_type type,
926 unsigned sectors)
927 {
928 struct bch_fs *c = trans->c;
929 struct btree_iter iter;
930 int ret = 0;
931
932 struct bkey_i_alloc_v4 *a =
933 bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(ca->dev_idx, b));
934 if (IS_ERR(a))
935 return PTR_ERR(a);
936
937 if (a->v.data_type && type && a->v.data_type != type) {
938 bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
939 log_fsck_err(trans, bucket_metadata_type_mismatch,
940 "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
941 "while marking %s",
942 iter.pos.inode, iter.pos.offset, a->v.gen,
943 bch2_data_type_str(a->v.data_type),
944 bch2_data_type_str(type),
945 bch2_data_type_str(type));
946 ret = -BCH_ERR_metadata_bucket_inconsistency;
947 goto err;
948 }
949
950 if (a->v.data_type != type ||
951 a->v.dirty_sectors != sectors) {
952 a->v.data_type = type;
953 a->v.dirty_sectors = sectors;
954 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
955 }
956 err:
957 fsck_err:
958 bch2_trans_iter_exit(trans, &iter);
959 return ret;
960 }
961
bch2_mark_metadata_bucket(struct btree_trans * trans,struct bch_dev * ca,u64 b,enum bch_data_type data_type,unsigned sectors,enum btree_iter_update_trigger_flags flags)962 static int bch2_mark_metadata_bucket(struct btree_trans *trans, struct bch_dev *ca,
963 u64 b, enum bch_data_type data_type, unsigned sectors,
964 enum btree_iter_update_trigger_flags flags)
965 {
966 struct bch_fs *c = trans->c;
967 int ret = 0;
968
969 struct bucket *g = gc_bucket(ca, b);
970 if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u when marking metadata type %s",
971 ca->dev_idx, bch2_data_type_str(data_type)))
972 goto err;
973
974 bucket_lock(g);
975 struct bch_alloc_v4 old = bucket_m_to_alloc(*g);
976
977 if (bch2_fs_inconsistent_on(g->data_type &&
978 g->data_type != data_type, c,
979 "different types of data in same bucket: %s, %s",
980 bch2_data_type_str(g->data_type),
981 bch2_data_type_str(data_type)))
982 goto err_unlock;
983
984 if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
985 "bucket %u:%llu gen %u data type %s sector count overflow: %u + %u > bucket size",
986 ca->dev_idx, b, g->gen,
987 bch2_data_type_str(g->data_type ?: data_type),
988 g->dirty_sectors, sectors))
989 goto err_unlock;
990
991 g->data_type = data_type;
992 g->dirty_sectors += sectors;
993 struct bch_alloc_v4 new = bucket_m_to_alloc(*g);
994 bucket_unlock(g);
995 ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags);
996 return ret;
997 err_unlock:
998 bucket_unlock(g);
999 err:
1000 return -BCH_ERR_metadata_bucket_inconsistency;
1001 }
1002
bch2_trans_mark_metadata_bucket(struct btree_trans * trans,struct bch_dev * ca,u64 b,enum bch_data_type type,unsigned sectors,enum btree_iter_update_trigger_flags flags)1003 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1004 struct bch_dev *ca, u64 b,
1005 enum bch_data_type type, unsigned sectors,
1006 enum btree_iter_update_trigger_flags flags)
1007 {
1008 BUG_ON(type != BCH_DATA_free &&
1009 type != BCH_DATA_sb &&
1010 type != BCH_DATA_journal);
1011
1012 /*
1013 * Backup superblock might be past the end of our normal usable space:
1014 */
1015 if (b >= ca->mi.nbuckets)
1016 return 0;
1017
1018 if (flags & BTREE_TRIGGER_gc)
1019 return bch2_mark_metadata_bucket(trans, ca, b, type, sectors, flags);
1020 else if (flags & BTREE_TRIGGER_transactional)
1021 return commit_do(trans, NULL, NULL, 0,
1022 __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
1023 else
1024 BUG();
1025 }
1026
bch2_trans_mark_metadata_sectors(struct btree_trans * trans,struct bch_dev * ca,u64 start,u64 end,enum bch_data_type type,u64 * bucket,unsigned * bucket_sectors,enum btree_iter_update_trigger_flags flags)1027 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
1028 struct bch_dev *ca, u64 start, u64 end,
1029 enum bch_data_type type, u64 *bucket, unsigned *bucket_sectors,
1030 enum btree_iter_update_trigger_flags flags)
1031 {
1032 do {
1033 u64 b = sector_to_bucket(ca, start);
1034 unsigned sectors =
1035 min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
1036
1037 if (b != *bucket && *bucket_sectors) {
1038 int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
1039 type, *bucket_sectors, flags);
1040 if (ret)
1041 return ret;
1042
1043 *bucket_sectors = 0;
1044 }
1045
1046 *bucket = b;
1047 *bucket_sectors += sectors;
1048 start += sectors;
1049 } while (start < end);
1050
1051 return 0;
1052 }
1053
__bch2_trans_mark_dev_sb(struct btree_trans * trans,struct bch_dev * ca,enum btree_iter_update_trigger_flags flags)1054 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, struct bch_dev *ca,
1055 enum btree_iter_update_trigger_flags flags)
1056 {
1057 struct bch_fs *c = trans->c;
1058
1059 mutex_lock(&c->sb_lock);
1060 struct bch_sb_layout layout = ca->disk_sb.sb->layout;
1061 mutex_unlock(&c->sb_lock);
1062
1063 u64 bucket = 0;
1064 unsigned i, bucket_sectors = 0;
1065 int ret;
1066
1067 for (i = 0; i < layout.nr_superblocks; i++) {
1068 u64 offset = le64_to_cpu(layout.sb_offset[i]);
1069
1070 if (offset == BCH_SB_SECTOR) {
1071 ret = bch2_trans_mark_metadata_sectors(trans, ca,
1072 0, BCH_SB_SECTOR,
1073 BCH_DATA_sb, &bucket, &bucket_sectors, flags);
1074 if (ret)
1075 return ret;
1076 }
1077
1078 ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
1079 offset + (1 << layout.sb_max_size_bits),
1080 BCH_DATA_sb, &bucket, &bucket_sectors, flags);
1081 if (ret)
1082 return ret;
1083 }
1084
1085 if (bucket_sectors) {
1086 ret = bch2_trans_mark_metadata_bucket(trans, ca,
1087 bucket, BCH_DATA_sb, bucket_sectors, flags);
1088 if (ret)
1089 return ret;
1090 }
1091
1092 for (i = 0; i < ca->journal.nr; i++) {
1093 ret = bch2_trans_mark_metadata_bucket(trans, ca,
1094 ca->journal.buckets[i],
1095 BCH_DATA_journal, ca->mi.bucket_size, flags);
1096 if (ret)
1097 return ret;
1098 }
1099
1100 return 0;
1101 }
1102
bch2_trans_mark_dev_sb(struct bch_fs * c,struct bch_dev * ca,enum btree_iter_update_trigger_flags flags)1103 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca,
1104 enum btree_iter_update_trigger_flags flags)
1105 {
1106 int ret = bch2_trans_run(c,
1107 __bch2_trans_mark_dev_sb(trans, ca, flags));
1108 bch_err_fn(c, ret);
1109 return ret;
1110 }
1111
bch2_trans_mark_dev_sbs_flags(struct bch_fs * c,enum btree_iter_update_trigger_flags flags)1112 int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c,
1113 enum btree_iter_update_trigger_flags flags)
1114 {
1115 for_each_online_member(c, ca) {
1116 int ret = bch2_trans_mark_dev_sb(c, ca, flags);
1117 if (ret) {
1118 percpu_ref_put(&ca->io_ref);
1119 return ret;
1120 }
1121 }
1122
1123 return 0;
1124 }
1125
bch2_trans_mark_dev_sbs(struct bch_fs * c)1126 int bch2_trans_mark_dev_sbs(struct bch_fs *c)
1127 {
1128 return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_transactional);
1129 }
1130
bch2_is_superblock_bucket(struct bch_dev * ca,u64 b)1131 bool bch2_is_superblock_bucket(struct bch_dev *ca, u64 b)
1132 {
1133 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
1134 u64 b_offset = bucket_to_sector(ca, b);
1135 u64 b_end = bucket_to_sector(ca, b + 1);
1136 unsigned i;
1137
1138 if (!b)
1139 return true;
1140
1141 for (i = 0; i < layout->nr_superblocks; i++) {
1142 u64 offset = le64_to_cpu(layout->sb_offset[i]);
1143 u64 end = offset + (1 << layout->sb_max_size_bits);
1144
1145 if (!(offset >= b_end || end <= b_offset))
1146 return true;
1147 }
1148
1149 for (i = 0; i < ca->journal.nr; i++)
1150 if (b == ca->journal.buckets[i])
1151 return true;
1152
1153 return false;
1154 }
1155
1156 /* Disk reservations: */
1157
1158 #define SECTORS_CACHE 1024
1159
__bch2_disk_reservation_add(struct bch_fs * c,struct disk_reservation * res,u64 sectors,enum bch_reservation_flags flags)1160 int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1161 u64 sectors, enum bch_reservation_flags flags)
1162 {
1163 struct bch_fs_pcpu *pcpu;
1164 u64 old, get;
1165 u64 sectors_available;
1166 int ret;
1167
1168 percpu_down_read(&c->mark_lock);
1169 preempt_disable();
1170 pcpu = this_cpu_ptr(c->pcpu);
1171
1172 if (sectors <= pcpu->sectors_available)
1173 goto out;
1174
1175 old = atomic64_read(&c->sectors_available);
1176 do {
1177 get = min((u64) sectors + SECTORS_CACHE, old);
1178
1179 if (get < sectors) {
1180 preempt_enable();
1181 goto recalculate;
1182 }
1183 } while (!atomic64_try_cmpxchg(&c->sectors_available,
1184 &old, old - get));
1185
1186 pcpu->sectors_available += get;
1187
1188 out:
1189 pcpu->sectors_available -= sectors;
1190 this_cpu_add(*c->online_reserved, sectors);
1191 res->sectors += sectors;
1192
1193 preempt_enable();
1194 percpu_up_read(&c->mark_lock);
1195 return 0;
1196
1197 recalculate:
1198 mutex_lock(&c->sectors_available_lock);
1199
1200 percpu_u64_set(&c->pcpu->sectors_available, 0);
1201 sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
1202
1203 if (sectors_available && (flags & BCH_DISK_RESERVATION_PARTIAL))
1204 sectors = min(sectors, sectors_available);
1205
1206 if (sectors <= sectors_available ||
1207 (flags & BCH_DISK_RESERVATION_NOFAIL)) {
1208 atomic64_set(&c->sectors_available,
1209 max_t(s64, 0, sectors_available - sectors));
1210 this_cpu_add(*c->online_reserved, sectors);
1211 res->sectors += sectors;
1212 ret = 0;
1213 } else {
1214 atomic64_set(&c->sectors_available, sectors_available);
1215 ret = -BCH_ERR_ENOSPC_disk_reservation;
1216 }
1217
1218 mutex_unlock(&c->sectors_available_lock);
1219 percpu_up_read(&c->mark_lock);
1220
1221 return ret;
1222 }
1223
1224 /* Startup/shutdown: */
1225
bch2_buckets_nouse_free(struct bch_fs * c)1226 void bch2_buckets_nouse_free(struct bch_fs *c)
1227 {
1228 for_each_member_device(c, ca) {
1229 kvfree_rcu_mightsleep(ca->buckets_nouse);
1230 ca->buckets_nouse = NULL;
1231 }
1232 }
1233
bch2_buckets_nouse_alloc(struct bch_fs * c)1234 int bch2_buckets_nouse_alloc(struct bch_fs *c)
1235 {
1236 for_each_member_device(c, ca) {
1237 BUG_ON(ca->buckets_nouse);
1238
1239 ca->buckets_nouse = bch2_kvmalloc(BITS_TO_LONGS(ca->mi.nbuckets) *
1240 sizeof(unsigned long),
1241 GFP_KERNEL|__GFP_ZERO);
1242 if (!ca->buckets_nouse) {
1243 bch2_dev_put(ca);
1244 return -BCH_ERR_ENOMEM_buckets_nouse;
1245 }
1246 }
1247
1248 return 0;
1249 }
1250
bucket_gens_free_rcu(struct rcu_head * rcu)1251 static void bucket_gens_free_rcu(struct rcu_head *rcu)
1252 {
1253 struct bucket_gens *buckets =
1254 container_of(rcu, struct bucket_gens, rcu);
1255
1256 kvfree(buckets);
1257 }
1258
bch2_dev_buckets_resize(struct bch_fs * c,struct bch_dev * ca,u64 nbuckets)1259 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1260 {
1261 struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
1262 bool resize = ca->bucket_gens != NULL;
1263 int ret;
1264
1265 if (resize)
1266 lockdep_assert_held(&c->state_lock);
1267
1268 if (resize && ca->buckets_nouse)
1269 return -BCH_ERR_no_resize_with_buckets_nouse;
1270
1271 bucket_gens = bch2_kvmalloc(struct_size(bucket_gens, b, nbuckets),
1272 GFP_KERNEL|__GFP_ZERO);
1273 if (!bucket_gens) {
1274 ret = -BCH_ERR_ENOMEM_bucket_gens;
1275 goto err;
1276 }
1277
1278 bucket_gens->first_bucket = ca->mi.first_bucket;
1279 bucket_gens->nbuckets = nbuckets;
1280 bucket_gens->nbuckets_minus_first =
1281 bucket_gens->nbuckets - bucket_gens->first_bucket;
1282
1283 old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
1284
1285 if (resize) {
1286 bucket_gens->nbuckets = min(bucket_gens->nbuckets,
1287 old_bucket_gens->nbuckets);
1288 bucket_gens->nbuckets_minus_first =
1289 bucket_gens->nbuckets - bucket_gens->first_bucket;
1290 memcpy(bucket_gens->b,
1291 old_bucket_gens->b,
1292 bucket_gens->nbuckets);
1293 }
1294
1295 rcu_assign_pointer(ca->bucket_gens, bucket_gens);
1296 bucket_gens = old_bucket_gens;
1297
1298 nbuckets = ca->mi.nbuckets;
1299
1300 ret = 0;
1301 err:
1302 if (bucket_gens)
1303 call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
1304
1305 return ret;
1306 }
1307
bch2_dev_buckets_free(struct bch_dev * ca)1308 void bch2_dev_buckets_free(struct bch_dev *ca)
1309 {
1310 kvfree(ca->buckets_nouse);
1311 kvfree(rcu_dereference_protected(ca->bucket_gens, 1));
1312 free_percpu(ca->usage);
1313 }
1314
bch2_dev_buckets_alloc(struct bch_fs * c,struct bch_dev * ca)1315 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
1316 {
1317 ca->usage = alloc_percpu(struct bch_dev_usage);
1318 if (!ca->usage)
1319 return -BCH_ERR_ENOMEM_usage_init;
1320
1321 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);
1322 }
1323