xref: /linux/fs/bcachefs/buckets.c (revision ff0905bbf991f4337b5ebc19c0d43525ebb0d96b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for manipulating bucket marks for garbage collection.
4  *
5  * Copyright 2014 Datera, Inc.
6  */
7 
8 #include "bcachefs.h"
9 #include "alloc_background.h"
10 #include "backpointers.h"
11 #include "bset.h"
12 #include "btree_gc.h"
13 #include "btree_update.h"
14 #include "buckets.h"
15 #include "buckets_waiting_for_journal.h"
16 #include "disk_accounting.h"
17 #include "ec.h"
18 #include "error.h"
19 #include "inode.h"
20 #include "movinggc.h"
21 #include "rebalance.h"
22 #include "recovery.h"
23 #include "recovery_passes.h"
24 #include "reflink.h"
25 #include "replicas.h"
26 #include "subvolume.h"
27 #include "trace.h"
28 
29 #include <linux/preempt.h>
30 
bch2_dev_usage_read_fast(struct bch_dev * ca,struct bch_dev_usage * usage)31 void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
32 {
33 	for (unsigned i = 0; i < BCH_DATA_NR; i++)
34 		usage->buckets[i] = percpu_u64_get(&ca->usage->d[i].buckets);
35 }
36 
bch2_dev_usage_full_read_fast(struct bch_dev * ca,struct bch_dev_usage_full * usage)37 void bch2_dev_usage_full_read_fast(struct bch_dev *ca, struct bch_dev_usage_full *usage)
38 {
39 	memset(usage, 0, sizeof(*usage));
40 	acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage,
41 			sizeof(struct bch_dev_usage_full) / sizeof(u64));
42 }
43 
reserve_factor(u64 r)44 static u64 reserve_factor(u64 r)
45 {
46 	return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
47 }
48 
49 static struct bch_fs_usage_short
__bch2_fs_usage_read_short(struct bch_fs * c)50 __bch2_fs_usage_read_short(struct bch_fs *c)
51 {
52 	struct bch_fs_usage_short ret;
53 	u64 data, reserved;
54 
55 	ret.capacity = c->capacity -
56 		percpu_u64_get(&c->usage->hidden);
57 
58 	data		= percpu_u64_get(&c->usage->data) +
59 		percpu_u64_get(&c->usage->btree);
60 	reserved	= percpu_u64_get(&c->usage->reserved) +
61 		percpu_u64_get(c->online_reserved);
62 
63 	ret.used	= min(ret.capacity, data + reserve_factor(reserved));
64 	ret.free	= ret.capacity - ret.used;
65 
66 	ret.nr_inodes	= percpu_u64_get(&c->usage->nr_inodes);
67 
68 	return ret;
69 }
70 
71 struct bch_fs_usage_short
bch2_fs_usage_read_short(struct bch_fs * c)72 bch2_fs_usage_read_short(struct bch_fs *c)
73 {
74 	struct bch_fs_usage_short ret;
75 
76 	percpu_down_read(&c->mark_lock);
77 	ret = __bch2_fs_usage_read_short(c);
78 	percpu_up_read(&c->mark_lock);
79 
80 	return ret;
81 }
82 
bch2_dev_usage_to_text(struct printbuf * out,struct bch_dev * ca,struct bch_dev_usage_full * usage)83 void bch2_dev_usage_to_text(struct printbuf *out,
84 			    struct bch_dev *ca,
85 			    struct bch_dev_usage_full *usage)
86 {
87 	if (out->nr_tabstops < 5) {
88 		printbuf_tabstops_reset(out);
89 		printbuf_tabstop_push(out, 12);
90 		printbuf_tabstop_push(out, 16);
91 		printbuf_tabstop_push(out, 16);
92 		printbuf_tabstop_push(out, 16);
93 		printbuf_tabstop_push(out, 16);
94 	}
95 
96 	prt_printf(out, "\tbuckets\rsectors\rfragmented\r\n");
97 
98 	for (unsigned i = 0; i < BCH_DATA_NR; i++) {
99 		bch2_prt_data_type(out, i);
100 		prt_printf(out, "\t%llu\r%llu\r%llu\r\n",
101 			   usage->d[i].buckets,
102 			   usage->d[i].sectors,
103 			   usage->d[i].fragmented);
104 	}
105 
106 	prt_printf(out, "capacity\t%llu\r\n", ca->mi.nbuckets);
107 }
108 
bch2_check_fix_ptr(struct btree_trans * trans,struct bkey_s_c k,struct extent_ptr_decoded p,const union bch_extent_entry * entry,bool * do_update)109 static int bch2_check_fix_ptr(struct btree_trans *trans,
110 			      struct bkey_s_c k,
111 			      struct extent_ptr_decoded p,
112 			      const union bch_extent_entry *entry,
113 			      bool *do_update)
114 {
115 	struct bch_fs *c = trans->c;
116 	struct printbuf buf = PRINTBUF;
117 	int ret = 0;
118 
119 	struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
120 	if (!ca) {
121 		if (fsck_err_on(p.ptr.dev != BCH_SB_MEMBER_INVALID,
122 				trans, ptr_to_invalid_device,
123 				"pointer to missing device %u\n"
124 				"while marking %s",
125 				p.ptr.dev,
126 				(printbuf_reset(&buf),
127 				 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
128 			*do_update = true;
129 		return 0;
130 	}
131 
132 	struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
133 	if (!g) {
134 		if (fsck_err(trans, ptr_to_invalid_device,
135 			     "pointer to invalid bucket on device %u\n"
136 			     "while marking %s",
137 			     p.ptr.dev,
138 			     (printbuf_reset(&buf),
139 			      bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
140 			*do_update = true;
141 		goto out;
142 	}
143 
144 	enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
145 
146 	if (fsck_err_on(!g->gen_valid,
147 			trans, ptr_to_missing_alloc_key,
148 			"bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
149 			"while marking %s",
150 			p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
151 			bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
152 			p.ptr.gen,
153 			(printbuf_reset(&buf),
154 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
155 		if (!p.ptr.cached) {
156 			g->gen_valid		= true;
157 			g->gen			= p.ptr.gen;
158 		} else {
159 			/* this pointer will be dropped */
160 			*do_update = true;
161 			goto out;
162 		}
163 	}
164 
165 	/* g->gen_valid == true */
166 
167 	if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0,
168 			trans, ptr_gen_newer_than_bucket_gen,
169 			"bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
170 			"while marking %s",
171 			p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
172 			bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
173 			p.ptr.gen, g->gen,
174 			(printbuf_reset(&buf),
175 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
176 		if (!p.ptr.cached &&
177 		    (g->data_type != BCH_DATA_btree ||
178 		     data_type == BCH_DATA_btree)) {
179 			g->data_type		= data_type;
180 			g->stripe_sectors	= 0;
181 			g->dirty_sectors	= 0;
182 			g->cached_sectors	= 0;
183 		}
184 
185 		*do_update = true;
186 	}
187 
188 	if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX,
189 			trans, ptr_gen_newer_than_bucket_gen,
190 			"bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
191 			"while marking %s",
192 			p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
193 			bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
194 			p.ptr.gen,
195 			(printbuf_reset(&buf),
196 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
197 		*do_update = true;
198 
199 	if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0,
200 			trans, stale_dirty_ptr,
201 			"bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
202 			"while marking %s",
203 			p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
204 			bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
205 			p.ptr.gen, g->gen,
206 			(printbuf_reset(&buf),
207 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
208 		*do_update = true;
209 
210 	if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen)
211 		goto out;
212 
213 	if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type),
214 			trans, ptr_bucket_data_type_mismatch,
215 			"bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
216 			"while marking %s",
217 			p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
218 			bch2_data_type_str(g->data_type),
219 			bch2_data_type_str(data_type),
220 			(printbuf_reset(&buf),
221 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
222 		if (!p.ptr.cached &&
223 		    data_type == BCH_DATA_btree) {
224 			switch (g->data_type) {
225 			case BCH_DATA_sb:
226 				bch_err(c, "btree and superblock in the same bucket - cannot repair");
227 				ret = bch_err_throw(c, fsck_repair_unimplemented);
228 				goto out;
229 			case BCH_DATA_journal:
230 				ret = bch2_dev_journal_bucket_delete(ca, PTR_BUCKET_NR(ca, &p.ptr));
231 				bch_err_msg(c, ret, "error deleting journal bucket %zu",
232 					    PTR_BUCKET_NR(ca, &p.ptr));
233 				if (ret)
234 					goto out;
235 				break;
236 			}
237 
238 			g->data_type		= data_type;
239 			g->stripe_sectors	= 0;
240 			g->dirty_sectors	= 0;
241 			g->cached_sectors	= 0;
242 		} else {
243 			*do_update = true;
244 		}
245 	}
246 
247 	if (p.has_ec) {
248 		struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
249 
250 		if (fsck_err_on(!m || !m->alive,
251 				trans, ptr_to_missing_stripe,
252 				"pointer to nonexistent stripe %llu\n"
253 				"while marking %s",
254 				(u64) p.ec.idx,
255 				(printbuf_reset(&buf),
256 				 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
257 			*do_update = true;
258 
259 		if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p),
260 				trans, ptr_to_incorrect_stripe,
261 				"pointer does not match stripe %llu\n"
262 				"while marking %s",
263 				(u64) p.ec.idx,
264 				(printbuf_reset(&buf),
265 				 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
266 			*do_update = true;
267 	}
268 out:
269 fsck_err:
270 	bch2_dev_put(ca);
271 	printbuf_exit(&buf);
272 	return ret;
273 }
274 
bch2_check_fix_ptrs(struct btree_trans * trans,enum btree_id btree,unsigned level,struct bkey_s_c k,enum btree_iter_update_trigger_flags flags)275 int bch2_check_fix_ptrs(struct btree_trans *trans,
276 			enum btree_id btree, unsigned level, struct bkey_s_c k,
277 			enum btree_iter_update_trigger_flags flags)
278 {
279 	struct bch_fs *c = trans->c;
280 	struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(k);
281 	const union bch_extent_entry *entry_c;
282 	struct extent_ptr_decoded p = { 0 };
283 	bool do_update = false;
284 	struct printbuf buf = PRINTBUF;
285 	int ret = 0;
286 
287 	/* We don't yet do btree key updates correctly for when we're RW */
288 	BUG_ON(test_bit(BCH_FS_rw, &c->flags));
289 
290 	bkey_for_each_ptr_decode(k.k, ptrs_c, p, entry_c) {
291 		ret = bch2_check_fix_ptr(trans, k, p, entry_c, &do_update);
292 		if (ret)
293 			goto err;
294 	}
295 
296 	if (do_update) {
297 		struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
298 		ret = PTR_ERR_OR_ZERO(new);
299 		if (ret)
300 			goto err;
301 
302 		scoped_guard(rcu)
303 			bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, !bch2_dev_exists(c, ptr->dev));
304 
305 		if (level) {
306 			/*
307 			 * We don't want to drop btree node pointers - if the
308 			 * btree node isn't there anymore, the read path will
309 			 * sort it out:
310 			 */
311 			struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
312 			scoped_guard(rcu)
313 				bkey_for_each_ptr(ptrs, ptr) {
314 					struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
315 					ptr->gen = PTR_GC_BUCKET(ca, ptr)->gen;
316 				}
317 		} else {
318 			struct bkey_ptrs ptrs;
319 			union bch_extent_entry *entry;
320 
321 			rcu_read_lock();
322 restart_drop_ptrs:
323 			ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
324 			bkey_for_each_ptr_decode(bkey_i_to_s(new).k, ptrs, p, entry) {
325 				struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
326 				struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
327 				enum bch_data_type data_type = bch2_bkey_ptr_data_type(bkey_i_to_s_c(new), p, entry);
328 
329 				if ((p.ptr.cached &&
330 				     (!g->gen_valid || gen_cmp(p.ptr.gen, g->gen) > 0)) ||
331 				    (!p.ptr.cached &&
332 				     gen_cmp(p.ptr.gen, g->gen) < 0) ||
333 				    gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX ||
334 				    (g->data_type &&
335 				     g->data_type != data_type)) {
336 					bch2_bkey_drop_ptr(bkey_i_to_s(new), &entry->ptr);
337 					goto restart_drop_ptrs;
338 				}
339 			}
340 			rcu_read_unlock();
341 again:
342 			ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
343 			bkey_extent_entry_for_each(ptrs, entry) {
344 				if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) {
345 					struct gc_stripe *m = genradix_ptr(&c->gc_stripes,
346 									entry->stripe_ptr.idx);
347 					union bch_extent_entry *next_ptr;
348 
349 					bkey_extent_entry_for_each_from(ptrs, next_ptr, entry)
350 						if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr)
351 							goto found;
352 					next_ptr = NULL;
353 found:
354 					if (!next_ptr) {
355 						bch_err(c, "aieee, found stripe ptr with no data ptr");
356 						continue;
357 					}
358 
359 					if (!m || !m->alive ||
360 					    !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block],
361 								       &next_ptr->ptr,
362 								       m->sectors)) {
363 						bch2_bkey_extent_entry_drop(new, entry);
364 						goto again;
365 					}
366 				}
367 			}
368 		}
369 
370 		if (0) {
371 			printbuf_reset(&buf);
372 			bch2_bkey_val_to_text(&buf, c, k);
373 			bch_info(c, "updated %s", buf.buf);
374 
375 			printbuf_reset(&buf);
376 			bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new));
377 			bch_info(c, "new key %s", buf.buf);
378 		}
379 
380 		if (!(flags & BTREE_TRIGGER_is_root)) {
381 			struct btree_iter iter;
382 			bch2_trans_node_iter_init(trans, &iter, btree, new->k.p, 0, level,
383 						  BTREE_ITER_intent|BTREE_ITER_all_snapshots);
384 			ret =   bch2_btree_iter_traverse(trans, &iter) ?:
385 				bch2_trans_update(trans, &iter, new,
386 						  BTREE_UPDATE_internal_snapshot_node|
387 						  BTREE_TRIGGER_norun);
388 			bch2_trans_iter_exit(trans, &iter);
389 			if (ret)
390 				goto err;
391 
392 			if (level)
393 				bch2_btree_node_update_key_early(trans, btree, level - 1, k, new);
394 		} else {
395 			struct jset_entry *e = bch2_trans_jset_entry_alloc(trans,
396 					       jset_u64s(new->k.u64s));
397 			ret = PTR_ERR_OR_ZERO(e);
398 			if (ret)
399 				goto err;
400 
401 			journal_entry_set(e,
402 					  BCH_JSET_ENTRY_btree_root,
403 					  btree, level - 1,
404 					  new, new->k.u64s);
405 
406 			/*
407 			 * no locking, we're single threaded and not rw yet, see
408 			 * the big assertino above that we repeat here:
409 			 */
410 			BUG_ON(test_bit(BCH_FS_rw, &c->flags));
411 
412 			struct btree *b = bch2_btree_id_root(c, btree)->b;
413 			bkey_copy(&b->key, new);
414 		}
415 	}
416 err:
417 	printbuf_exit(&buf);
418 	return ret;
419 }
420 
bucket_ref_update_err(struct btree_trans * trans,struct printbuf * buf,struct bkey_s_c k,bool insert,enum bch_sb_error_id id)421 static int bucket_ref_update_err(struct btree_trans *trans, struct printbuf *buf,
422 				 struct bkey_s_c k, bool insert, enum bch_sb_error_id id)
423 {
424 	struct bch_fs *c = trans->c;
425 
426 	prt_printf(buf, "\nwhile marking ");
427 	bch2_bkey_val_to_text(buf, c, k);
428 	prt_newline(buf);
429 
430 	bool print = __bch2_count_fsck_err(c, id, buf);
431 
432 	int ret = bch2_run_explicit_recovery_pass(c, buf,
433 					BCH_RECOVERY_PASS_check_allocations, 0);
434 
435 	if (insert) {
436 		bch2_trans_updates_to_text(buf, trans);
437 		__bch2_inconsistent_error(c, buf);
438 		/*
439 		 * If we're in recovery, run_explicit_recovery_pass might give
440 		 * us an error code for rewinding recovery
441 		 */
442 		if (!ret)
443 			ret = bch_err_throw(c, bucket_ref_update);
444 	} else {
445 		/* Always ignore overwrite errors, so that deletion works */
446 		ret = 0;
447 	}
448 
449 	if (print || insert)
450 		bch2_print_str(c, KERN_ERR, buf->buf);
451 	return ret;
452 }
453 
bch2_bucket_ref_update(struct btree_trans * trans,struct bch_dev * ca,struct bkey_s_c k,const struct bch_extent_ptr * ptr,s64 sectors,enum bch_data_type ptr_data_type,u8 b_gen,u8 bucket_data_type,u32 * bucket_sectors)454 int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca,
455 			   struct bkey_s_c k,
456 			   const struct bch_extent_ptr *ptr,
457 			   s64 sectors, enum bch_data_type ptr_data_type,
458 			   u8 b_gen, u8 bucket_data_type,
459 			   u32 *bucket_sectors)
460 {
461 	struct bch_fs *c = trans->c;
462 	size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
463 	struct printbuf buf = PRINTBUF;
464 	bool inserting = sectors > 0;
465 	int ret = 0;
466 
467 	BUG_ON(!sectors);
468 
469 	if (unlikely(gen_after(ptr->gen, b_gen))) {
470 		bch2_log_msg_start(c, &buf);
471 		prt_printf(&buf,
472 			"bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen",
473 			ptr->dev, bucket_nr, b_gen,
474 			bch2_data_type_str(bucket_data_type ?: ptr_data_type),
475 			ptr->gen);
476 
477 		ret = bucket_ref_update_err(trans, &buf, k, inserting,
478 					    BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen);
479 		goto out;
480 	}
481 
482 	if (unlikely(gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX)) {
483 		bch2_log_msg_start(c, &buf);
484 		prt_printf(&buf,
485 			"bucket %u:%zu gen %u data type %s: ptr gen %u too stale",
486 			ptr->dev, bucket_nr, b_gen,
487 			bch2_data_type_str(bucket_data_type ?: ptr_data_type),
488 			ptr->gen);
489 
490 		ret = bucket_ref_update_err(trans, &buf, k, inserting,
491 					    BCH_FSCK_ERR_ptr_too_stale);
492 		goto out;
493 	}
494 
495 	if (b_gen != ptr->gen && ptr->cached) {
496 		ret = 1;
497 		goto out;
498 	}
499 
500 	if (unlikely(b_gen != ptr->gen)) {
501 		bch2_log_msg_start(c, &buf);
502 		prt_printf(&buf,
503 			"bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)",
504 			ptr->dev, bucket_nr, b_gen,
505 			bucket_gen_get(ca, bucket_nr),
506 			bch2_data_type_str(bucket_data_type ?: ptr_data_type),
507 			ptr->gen);
508 
509 		ret = bucket_ref_update_err(trans, &buf, k, inserting,
510 					    BCH_FSCK_ERR_stale_dirty_ptr);
511 		goto out;
512 	}
513 
514 	if (unlikely(bucket_data_type_mismatch(bucket_data_type, ptr_data_type))) {
515 		bch2_log_msg_start(c, &buf);
516 		prt_printf(&buf, "bucket %u:%zu gen %u different types of data in same bucket: %s, %s",
517 			   ptr->dev, bucket_nr, b_gen,
518 			   bch2_data_type_str(bucket_data_type),
519 			   bch2_data_type_str(ptr_data_type));
520 
521 		ret = bucket_ref_update_err(trans, &buf, k, inserting,
522 					    BCH_FSCK_ERR_ptr_bucket_data_type_mismatch);
523 		goto out;
524 	}
525 
526 	if (unlikely((u64) *bucket_sectors + sectors > U32_MAX)) {
527 		bch2_log_msg_start(c, &buf);
528 		prt_printf(&buf,
529 			"bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX",
530 			ptr->dev, bucket_nr, b_gen,
531 			bch2_data_type_str(bucket_data_type ?: ptr_data_type),
532 			*bucket_sectors, sectors);
533 
534 		ret = bucket_ref_update_err(trans, &buf, k, inserting,
535 					    BCH_FSCK_ERR_bucket_sector_count_overflow);
536 		sectors = -*bucket_sectors;
537 		goto out;
538 	}
539 
540 	*bucket_sectors += sectors;
541 out:
542 	printbuf_exit(&buf);
543 	return ret;
544 }
545 
bch2_trans_account_disk_usage_change(struct btree_trans * trans)546 void bch2_trans_account_disk_usage_change(struct btree_trans *trans)
547 {
548 	struct bch_fs *c = trans->c;
549 	u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
550 	static int warned_disk_usage = 0;
551 	bool warn = false;
552 
553 	percpu_down_read(&c->mark_lock);
554 	struct bch_fs_usage_base *src = &trans->fs_usage_delta;
555 
556 	s64 added = src->btree + src->data + src->reserved;
557 
558 	/*
559 	 * Not allowed to reduce sectors_available except by getting a
560 	 * reservation:
561 	 */
562 	s64 should_not_have_added = added - (s64) disk_res_sectors;
563 	if (unlikely(should_not_have_added > 0)) {
564 		u64 old, new;
565 
566 		old = atomic64_read(&c->sectors_available);
567 		do {
568 			new = max_t(s64, 0, old - should_not_have_added);
569 		} while (!atomic64_try_cmpxchg(&c->sectors_available,
570 					       &old, new));
571 
572 		added -= should_not_have_added;
573 		warn = true;
574 	}
575 
576 	if (added > 0) {
577 		trans->disk_res->sectors -= added;
578 		this_cpu_sub(*c->online_reserved, added);
579 	}
580 
581 	preempt_disable();
582 	struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage);
583 	acc_u64s((u64 *) dst, (u64 *) src, sizeof(*src) / sizeof(u64));
584 	preempt_enable();
585 	percpu_up_read(&c->mark_lock);
586 
587 	if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
588 		bch2_trans_inconsistent(trans,
589 					"disk usage increased %lli more than %llu sectors reserved)",
590 					should_not_have_added, disk_res_sectors);
591 }
592 
593 /* KEY_TYPE_extent: */
594 
__mark_pointer(struct btree_trans * trans,struct bch_dev * ca,struct bkey_s_c k,const struct extent_ptr_decoded * p,s64 sectors,enum bch_data_type ptr_data_type,struct bch_alloc_v4 * a,bool insert)595 static int __mark_pointer(struct btree_trans *trans, struct bch_dev *ca,
596 			  struct bkey_s_c k,
597 			  const struct extent_ptr_decoded *p,
598 			  s64 sectors, enum bch_data_type ptr_data_type,
599 			  struct bch_alloc_v4 *a,
600 			  bool insert)
601 {
602 	u32 *dst_sectors = p->has_ec	? &a->stripe_sectors :
603 		!p->ptr.cached		? &a->dirty_sectors :
604 					  &a->cached_sectors;
605 	int ret = bch2_bucket_ref_update(trans, ca, k, &p->ptr, sectors, ptr_data_type,
606 					 a->gen, a->data_type, dst_sectors);
607 
608 	if (ret)
609 		return ret;
610 	if (insert)
611 		alloc_data_type_set(a, ptr_data_type);
612 	return 0;
613 }
614 
bch2_trigger_pointer(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct bkey_s_c k,struct extent_ptr_decoded p,const union bch_extent_entry * entry,s64 * sectors,enum btree_iter_update_trigger_flags flags)615 static int bch2_trigger_pointer(struct btree_trans *trans,
616 			enum btree_id btree_id, unsigned level,
617 			struct bkey_s_c k, struct extent_ptr_decoded p,
618 			const union bch_extent_entry *entry,
619 			s64 *sectors,
620 			enum btree_iter_update_trigger_flags flags)
621 {
622 	struct bch_fs *c = trans->c;
623 	bool insert = !(flags & BTREE_TRIGGER_overwrite);
624 	struct printbuf buf = PRINTBUF;
625 	int ret = 0;
626 
627 	struct bkey_i_backpointer bp;
628 	bch2_extent_ptr_to_bp(c, btree_id, level, k, p, entry, &bp);
629 
630 	*sectors = insert ? bp.v.bucket_len : -(s64) bp.v.bucket_len;
631 
632 	struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
633 	if (unlikely(!ca)) {
634 		if (insert && p.ptr.dev != BCH_SB_MEMBER_INVALID)
635 			ret = bch_err_throw(c, trigger_pointer);
636 		goto err;
637 	}
638 
639 	struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr);
640 	if (!bucket_valid(ca, bucket.offset)) {
641 		if (insert) {
642 			bch2_dev_bucket_missing(ca, bucket.offset);
643 			ret = bch_err_throw(c, trigger_pointer);
644 		}
645 		goto err;
646 	}
647 
648 	if (flags & BTREE_TRIGGER_transactional) {
649 		struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket, 0);
650 		ret = PTR_ERR_OR_ZERO(a) ?:
651 			__mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &a->v, insert);
652 		if (ret)
653 			goto err;
654 
655 		ret = bch2_bucket_backpointer_mod(trans, k, &bp, insert);
656 		if (ret)
657 			goto err;
658 	}
659 
660 	if (flags & BTREE_TRIGGER_gc) {
661 		struct bucket *g = gc_bucket(ca, bucket.offset);
662 		if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n  %s",
663 					    p.ptr.dev,
664 					    (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
665 			ret = bch_err_throw(c, trigger_pointer);
666 			goto err;
667 		}
668 
669 		bucket_lock(g);
670 		struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
671 		ret = __mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &new, insert);
672 		alloc_to_bucket(g, new);
673 		bucket_unlock(g);
674 
675 		if (!ret)
676 			ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags);
677 	}
678 err:
679 	bch2_dev_put(ca);
680 	printbuf_exit(&buf);
681 	return ret;
682 }
683 
bch2_trigger_stripe_ptr(struct btree_trans * trans,struct bkey_s_c k,struct extent_ptr_decoded p,enum bch_data_type data_type,s64 sectors,enum btree_iter_update_trigger_flags flags)684 static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
685 				struct bkey_s_c k,
686 				struct extent_ptr_decoded p,
687 				enum bch_data_type data_type,
688 				s64 sectors,
689 				enum btree_iter_update_trigger_flags flags)
690 {
691 	struct bch_fs *c = trans->c;
692 
693 	if (flags & BTREE_TRIGGER_transactional) {
694 		struct btree_iter iter;
695 		struct bkey_i_stripe *s = bch2_bkey_get_mut_typed(trans, &iter,
696 				BTREE_ID_stripes, POS(0, p.ec.idx),
697 				BTREE_ITER_with_updates, stripe);
698 		int ret = PTR_ERR_OR_ZERO(s);
699 		if (unlikely(ret)) {
700 			bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans,
701 				"pointer to nonexistent stripe %llu",
702 				(u64) p.ec.idx);
703 			goto err;
704 		}
705 
706 		if (!bch2_ptr_matches_stripe(&s->v, p)) {
707 			bch2_trans_inconsistent(trans,
708 				"stripe pointer doesn't match stripe %llu",
709 				(u64) p.ec.idx);
710 			ret = bch_err_throw(c, trigger_stripe_pointer);
711 			goto err;
712 		}
713 
714 		stripe_blockcount_set(&s->v, p.ec.block,
715 			stripe_blockcount_get(&s->v, p.ec.block) +
716 			sectors);
717 
718 		struct disk_accounting_pos acc;
719 		memset(&acc, 0, sizeof(acc));
720 		acc.type = BCH_DISK_ACCOUNTING_replicas;
721 		bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i));
722 		acc.replicas.data_type = data_type;
723 		ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, false);
724 err:
725 		bch2_trans_iter_exit(trans, &iter);
726 		return ret;
727 	}
728 
729 	if (flags & BTREE_TRIGGER_gc) {
730 		struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.ec.idx, GFP_KERNEL);
731 		if (!m) {
732 			bch_err(c, "error allocating memory for gc_stripes, idx %llu",
733 				(u64) p.ec.idx);
734 			return bch_err_throw(c, ENOMEM_mark_stripe_ptr);
735 		}
736 
737 		gc_stripe_lock(m);
738 
739 		if (!m || !m->alive) {
740 			gc_stripe_unlock(m);
741 			struct printbuf buf = PRINTBUF;
742 			bch2_log_msg_start(c, &buf);
743 			prt_printf(&buf, "pointer to nonexistent stripe %llu\n  while marking ",
744 				   (u64) p.ec.idx);
745 			bch2_bkey_val_to_text(&buf, c, k);
746 			__bch2_inconsistent_error(c, &buf);
747 			bch2_print_str(c, KERN_ERR, buf.buf);
748 			printbuf_exit(&buf);
749 			return bch_err_throw(c, trigger_stripe_pointer);
750 		}
751 
752 		m->block_sectors[p.ec.block] += sectors;
753 
754 		struct disk_accounting_pos acc;
755 		memset(&acc, 0, sizeof(acc));
756 		acc.type = BCH_DISK_ACCOUNTING_replicas;
757 		unsafe_memcpy(&acc.replicas, &m->r.e, replicas_entry_bytes(&m->r.e), "VLA");
758 		gc_stripe_unlock(m);
759 
760 		acc.replicas.data_type = data_type;
761 		int ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, true);
762 		if (ret)
763 			return ret;
764 	}
765 
766 	return 0;
767 }
768 
__trigger_extent(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct bkey_s_c k,enum btree_iter_update_trigger_flags flags)769 static int __trigger_extent(struct btree_trans *trans,
770 			    enum btree_id btree_id, unsigned level,
771 			    struct bkey_s_c k,
772 			    enum btree_iter_update_trigger_flags flags)
773 {
774 	bool gc = flags & BTREE_TRIGGER_gc;
775 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
776 	const union bch_extent_entry *entry;
777 	struct extent_ptr_decoded p;
778 	enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
779 		? BCH_DATA_btree
780 		: BCH_DATA_user;
781 	int ret = 0;
782 
783 	s64 replicas_sectors = 0;
784 
785 	struct disk_accounting_pos acc_replicas_key;
786 	memset(&acc_replicas_key, 0, sizeof(acc_replicas_key));
787 	acc_replicas_key.type = BCH_DISK_ACCOUNTING_replicas;
788 	acc_replicas_key.replicas.data_type	= data_type;
789 	acc_replicas_key.replicas.nr_devs	= 0;
790 	acc_replicas_key.replicas.nr_required	= 1;
791 
792 	unsigned cur_compression_type = 0;
793 	u64 compression_acct[3] = { 1, 0, 0 };
794 
795 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
796 		s64 disk_sectors = 0;
797 		ret = bch2_trigger_pointer(trans, btree_id, level, k, p, entry, &disk_sectors, flags);
798 		if (ret < 0)
799 			return ret;
800 
801 		bool stale = ret > 0;
802 
803 		if (p.ptr.cached && stale)
804 			continue;
805 
806 		if (p.ptr.cached) {
807 			ret = bch2_mod_dev_cached_sectors(trans, p.ptr.dev, disk_sectors, gc);
808 			if (ret)
809 				return ret;
810 		} else if (!p.has_ec) {
811 			replicas_sectors       += disk_sectors;
812 			replicas_entry_add_dev(&acc_replicas_key.replicas, p.ptr.dev);
813 		} else {
814 			ret = bch2_trigger_stripe_ptr(trans, k, p, data_type, disk_sectors, flags);
815 			if (ret)
816 				return ret;
817 
818 			/*
819 			 * There may be other dirty pointers in this extent, but
820 			 * if so they're not required for mounting if we have an
821 			 * erasure coded pointer in this extent:
822 			 */
823 			acc_replicas_key.replicas.nr_required = 0;
824 		}
825 
826 		if (cur_compression_type &&
827 		    cur_compression_type != p.crc.compression_type) {
828 			if (flags & BTREE_TRIGGER_overwrite)
829 				bch2_u64s_neg(compression_acct, ARRAY_SIZE(compression_acct));
830 
831 			ret = bch2_disk_accounting_mod2(trans, gc, compression_acct,
832 							compression, cur_compression_type);
833 			if (ret)
834 				return ret;
835 
836 			compression_acct[0] = 1;
837 			compression_acct[1] = 0;
838 			compression_acct[2] = 0;
839 		}
840 
841 		cur_compression_type = p.crc.compression_type;
842 		if (p.crc.compression_type) {
843 			compression_acct[1] += p.crc.uncompressed_size;
844 			compression_acct[2] += p.crc.compressed_size;
845 		}
846 	}
847 
848 	if (acc_replicas_key.replicas.nr_devs) {
849 		ret = bch2_disk_accounting_mod(trans, &acc_replicas_key, &replicas_sectors, 1, gc);
850 		if (ret)
851 			return ret;
852 	}
853 
854 	if (acc_replicas_key.replicas.nr_devs && !level && k.k->p.snapshot) {
855 		ret = bch2_disk_accounting_mod2_nr(trans, gc, &replicas_sectors, 1, snapshot, k.k->p.snapshot);
856 		if (ret)
857 			return ret;
858 	}
859 
860 	if (cur_compression_type) {
861 		if (flags & BTREE_TRIGGER_overwrite)
862 			bch2_u64s_neg(compression_acct, ARRAY_SIZE(compression_acct));
863 
864 		ret = bch2_disk_accounting_mod2(trans, gc, compression_acct,
865 						compression, cur_compression_type);
866 		if (ret)
867 			return ret;
868 	}
869 
870 	if (level) {
871 		ret = bch2_disk_accounting_mod2_nr(trans, gc, &replicas_sectors, 1, btree, btree_id);
872 		if (ret)
873 			return ret;
874 	} else {
875 		bool insert = !(flags & BTREE_TRIGGER_overwrite);
876 
877 		s64 v[3] = {
878 			insert ? 1 : -1,
879 			insert ? k.k->size : -((s64) k.k->size),
880 			replicas_sectors,
881 		};
882 		ret = bch2_disk_accounting_mod2(trans, gc, v, inum, k.k->p.inode);
883 		if (ret)
884 			return ret;
885 	}
886 
887 	return 0;
888 }
889 
bch2_trigger_extent(struct btree_trans * trans,enum btree_id btree,unsigned level,struct bkey_s_c old,struct bkey_s new,enum btree_iter_update_trigger_flags flags)890 int bch2_trigger_extent(struct btree_trans *trans,
891 			enum btree_id btree, unsigned level,
892 			struct bkey_s_c old, struct bkey_s new,
893 			enum btree_iter_update_trigger_flags flags)
894 {
895 	struct bch_fs *c = trans->c;
896 	struct bkey_ptrs_c new_ptrs = bch2_bkey_ptrs_c(new.s_c);
897 	struct bkey_ptrs_c old_ptrs = bch2_bkey_ptrs_c(old);
898 	unsigned new_ptrs_bytes = (void *) new_ptrs.end - (void *) new_ptrs.start;
899 	unsigned old_ptrs_bytes = (void *) old_ptrs.end - (void *) old_ptrs.start;
900 
901 	if (unlikely(flags & BTREE_TRIGGER_check_repair))
902 		return bch2_check_fix_ptrs(trans, btree, level, new.s_c, flags);
903 
904 	/* if pointers aren't changing - nothing to do: */
905 	if (new_ptrs_bytes == old_ptrs_bytes &&
906 	    !memcmp(new_ptrs.start,
907 		    old_ptrs.start,
908 		    new_ptrs_bytes))
909 		return 0;
910 
911 	if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) {
912 		if (old.k->type) {
913 			int ret = __trigger_extent(trans, btree, level, old,
914 						   flags & ~BTREE_TRIGGER_insert);
915 			if (ret)
916 				return ret;
917 		}
918 
919 		if (new.k->type) {
920 			int ret = __trigger_extent(trans, btree, level, new.s_c,
921 						   flags & ~BTREE_TRIGGER_overwrite);
922 			if (ret)
923 				return ret;
924 		}
925 
926 		int need_rebalance_delta = 0;
927 		s64 need_rebalance_sectors_delta[1] = { 0 };
928 
929 		s64 s = bch2_bkey_sectors_need_rebalance(c, old);
930 		need_rebalance_delta -= s != 0;
931 		need_rebalance_sectors_delta[0] -= s;
932 
933 		s = bch2_bkey_sectors_need_rebalance(c, new.s_c);
934 		need_rebalance_delta += s != 0;
935 		need_rebalance_sectors_delta[0] += s;
936 
937 		if ((flags & BTREE_TRIGGER_transactional) && need_rebalance_delta) {
938 			int ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_rebalance_work,
939 							  new.k->p, need_rebalance_delta > 0);
940 			if (ret)
941 				return ret;
942 		}
943 
944 		if (need_rebalance_sectors_delta[0]) {
945 			int ret = bch2_disk_accounting_mod2(trans, flags & BTREE_TRIGGER_gc,
946 							    need_rebalance_sectors_delta, rebalance_work);
947 			if (ret)
948 				return ret;
949 		}
950 	}
951 
952 	return 0;
953 }
954 
955 /* KEY_TYPE_reservation */
956 
__trigger_reservation(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct bkey_s_c k,enum btree_iter_update_trigger_flags flags)957 static int __trigger_reservation(struct btree_trans *trans,
958 			enum btree_id btree_id, unsigned level, struct bkey_s_c k,
959 			enum btree_iter_update_trigger_flags flags)
960 {
961 	if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) {
962 		s64 sectors[1] = { k.k->size };
963 
964 		if (flags & BTREE_TRIGGER_overwrite)
965 			sectors[0] = -sectors[0];
966 
967 		return bch2_disk_accounting_mod2(trans, flags & BTREE_TRIGGER_gc, sectors,
968 				persistent_reserved, bkey_s_c_to_reservation(k).v->nr_replicas);
969 	}
970 
971 	return 0;
972 }
973 
bch2_trigger_reservation(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct bkey_s_c old,struct bkey_s new,enum btree_iter_update_trigger_flags flags)974 int bch2_trigger_reservation(struct btree_trans *trans,
975 			  enum btree_id btree_id, unsigned level,
976 			  struct bkey_s_c old, struct bkey_s new,
977 			  enum btree_iter_update_trigger_flags flags)
978 {
979 	return trigger_run_overwrite_then_insert(__trigger_reservation, trans, btree_id, level, old, new, flags);
980 }
981 
982 /* Mark superblocks: */
983 
__bch2_trans_mark_metadata_bucket(struct btree_trans * trans,struct bch_dev * ca,u64 b,enum bch_data_type type,unsigned sectors)984 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
985 				    struct bch_dev *ca, u64 b,
986 				    enum bch_data_type type,
987 				    unsigned sectors)
988 {
989 	struct bch_fs *c = trans->c;
990 	struct btree_iter iter;
991 	int ret = 0;
992 
993 	struct bkey_i_alloc_v4 *a =
994 		bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(ca->dev_idx, b));
995 	if (IS_ERR(a))
996 		return PTR_ERR(a);
997 
998 	if (a->v.data_type && type && a->v.data_type != type) {
999 		struct printbuf buf = PRINTBUF;
1000 		bch2_log_msg_start(c, &buf);
1001 		prt_printf(&buf, "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
1002 			   "while marking %s\n",
1003 			   iter.pos.inode, iter.pos.offset, a->v.gen,
1004 			   bch2_data_type_str(a->v.data_type),
1005 			   bch2_data_type_str(type),
1006 			   bch2_data_type_str(type));
1007 
1008 		bch2_count_fsck_err(c, bucket_metadata_type_mismatch, &buf);
1009 
1010 		ret = bch2_run_explicit_recovery_pass(c, &buf,
1011 					BCH_RECOVERY_PASS_check_allocations, 0);
1012 
1013 		/* Always print, this is always fatal */
1014 		bch2_print_str(c, KERN_ERR, buf.buf);
1015 		printbuf_exit(&buf);
1016 		if (!ret)
1017 			ret = bch_err_throw(c, metadata_bucket_inconsistency);
1018 		goto err;
1019 	}
1020 
1021 	if (a->v.data_type	!= type ||
1022 	    a->v.dirty_sectors	!= sectors) {
1023 		a->v.data_type		= type;
1024 		a->v.dirty_sectors	= sectors;
1025 		ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
1026 	}
1027 err:
1028 	bch2_trans_iter_exit(trans, &iter);
1029 	return ret;
1030 }
1031 
bch2_mark_metadata_bucket(struct btree_trans * trans,struct bch_dev * ca,u64 b,enum bch_data_type data_type,unsigned sectors,enum btree_iter_update_trigger_flags flags)1032 static int bch2_mark_metadata_bucket(struct btree_trans *trans, struct bch_dev *ca,
1033 			u64 b, enum bch_data_type data_type, unsigned sectors,
1034 			enum btree_iter_update_trigger_flags flags)
1035 {
1036 	struct bch_fs *c = trans->c;
1037 	int ret = 0;
1038 
1039 	struct bucket *g = gc_bucket(ca, b);
1040 	if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u when marking metadata type %s",
1041 				    ca->dev_idx, bch2_data_type_str(data_type)))
1042 		goto err;
1043 
1044 	bucket_lock(g);
1045 	struct bch_alloc_v4 old = bucket_m_to_alloc(*g);
1046 
1047 	if (bch2_fs_inconsistent_on(g->data_type &&
1048 			g->data_type != data_type, c,
1049 			"different types of data in same bucket: %s, %s",
1050 			bch2_data_type_str(g->data_type),
1051 			bch2_data_type_str(data_type)))
1052 		goto err_unlock;
1053 
1054 	if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
1055 			"bucket %u:%llu gen %u data type %s sector count overflow: %u + %u > bucket size",
1056 			ca->dev_idx, b, g->gen,
1057 			bch2_data_type_str(g->data_type ?: data_type),
1058 			g->dirty_sectors, sectors))
1059 		goto err_unlock;
1060 
1061 	g->data_type = data_type;
1062 	g->dirty_sectors += sectors;
1063 	struct bch_alloc_v4 new = bucket_m_to_alloc(*g);
1064 	bucket_unlock(g);
1065 	ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags);
1066 	return ret;
1067 err_unlock:
1068 	bucket_unlock(g);
1069 err:
1070 	return bch_err_throw(c, metadata_bucket_inconsistency);
1071 }
1072 
bch2_trans_mark_metadata_bucket(struct btree_trans * trans,struct bch_dev * ca,u64 b,enum bch_data_type type,unsigned sectors,enum btree_iter_update_trigger_flags flags)1073 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1074 			struct bch_dev *ca, u64 b,
1075 			enum bch_data_type type, unsigned sectors,
1076 			enum btree_iter_update_trigger_flags flags)
1077 {
1078 	BUG_ON(type != BCH_DATA_free &&
1079 	       type != BCH_DATA_sb &&
1080 	       type != BCH_DATA_journal);
1081 
1082 	/*
1083 	 * Backup superblock might be past the end of our normal usable space:
1084 	 */
1085 	if (b >= ca->mi.nbuckets)
1086 		return 0;
1087 
1088 	if (flags & BTREE_TRIGGER_gc)
1089 		return bch2_mark_metadata_bucket(trans, ca, b, type, sectors, flags);
1090 	else if (flags & BTREE_TRIGGER_transactional)
1091 		return commit_do(trans, NULL, NULL, 0,
1092 				 __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
1093 	else
1094 		BUG();
1095 }
1096 
bch2_trans_mark_metadata_sectors(struct btree_trans * trans,struct bch_dev * ca,u64 start,u64 end,enum bch_data_type type,u64 * bucket,unsigned * bucket_sectors,enum btree_iter_update_trigger_flags flags)1097 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
1098 			struct bch_dev *ca, u64 start, u64 end,
1099 			enum bch_data_type type, u64 *bucket, unsigned *bucket_sectors,
1100 			enum btree_iter_update_trigger_flags flags)
1101 {
1102 	do {
1103 		u64 b = sector_to_bucket(ca, start);
1104 		unsigned sectors =
1105 			min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
1106 
1107 		if (b != *bucket && *bucket_sectors) {
1108 			int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
1109 							type, *bucket_sectors, flags);
1110 			if (ret)
1111 				return ret;
1112 
1113 			*bucket_sectors = 0;
1114 		}
1115 
1116 		*bucket		= b;
1117 		*bucket_sectors	+= sectors;
1118 		start += sectors;
1119 	} while (start < end);
1120 
1121 	return 0;
1122 }
1123 
__bch2_trans_mark_dev_sb(struct btree_trans * trans,struct bch_dev * ca,enum btree_iter_update_trigger_flags flags)1124 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, struct bch_dev *ca,
1125 			enum btree_iter_update_trigger_flags flags)
1126 {
1127 	struct bch_fs *c = trans->c;
1128 
1129 	mutex_lock(&c->sb_lock);
1130 	struct bch_sb_layout layout = ca->disk_sb.sb->layout;
1131 	mutex_unlock(&c->sb_lock);
1132 
1133 	u64 bucket = 0;
1134 	unsigned i, bucket_sectors = 0;
1135 	int ret;
1136 
1137 	for (i = 0; i < layout.nr_superblocks; i++) {
1138 		u64 offset = le64_to_cpu(layout.sb_offset[i]);
1139 
1140 		if (offset == BCH_SB_SECTOR) {
1141 			ret = bch2_trans_mark_metadata_sectors(trans, ca,
1142 						0, BCH_SB_SECTOR,
1143 						BCH_DATA_sb, &bucket, &bucket_sectors, flags);
1144 			if (ret)
1145 				return ret;
1146 		}
1147 
1148 		ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
1149 				      offset + (1 << layout.sb_max_size_bits),
1150 				      BCH_DATA_sb, &bucket, &bucket_sectors, flags);
1151 		if (ret)
1152 			return ret;
1153 	}
1154 
1155 	if (bucket_sectors) {
1156 		ret = bch2_trans_mark_metadata_bucket(trans, ca,
1157 				bucket, BCH_DATA_sb, bucket_sectors, flags);
1158 		if (ret)
1159 			return ret;
1160 	}
1161 
1162 	for (i = 0; i < ca->journal.nr; i++) {
1163 		ret = bch2_trans_mark_metadata_bucket(trans, ca,
1164 				ca->journal.buckets[i],
1165 				BCH_DATA_journal, ca->mi.bucket_size, flags);
1166 		if (ret)
1167 			return ret;
1168 	}
1169 
1170 	return 0;
1171 }
1172 
bch2_trans_mark_dev_sb(struct bch_fs * c,struct bch_dev * ca,enum btree_iter_update_trigger_flags flags)1173 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca,
1174 			enum btree_iter_update_trigger_flags flags)
1175 {
1176 	int ret = bch2_trans_run(c,
1177 		__bch2_trans_mark_dev_sb(trans, ca, flags));
1178 	bch_err_fn(c, ret);
1179 	return ret;
1180 }
1181 
bch2_trans_mark_dev_sbs_flags(struct bch_fs * c,enum btree_iter_update_trigger_flags flags)1182 int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c,
1183 			enum btree_iter_update_trigger_flags flags)
1184 {
1185 	for_each_online_member(c, ca, BCH_DEV_READ_REF_trans_mark_dev_sbs) {
1186 		int ret = bch2_trans_mark_dev_sb(c, ca, flags);
1187 		if (ret) {
1188 			enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_trans_mark_dev_sbs);
1189 			return ret;
1190 		}
1191 	}
1192 
1193 	return 0;
1194 }
1195 
bch2_trans_mark_dev_sbs(struct bch_fs * c)1196 int bch2_trans_mark_dev_sbs(struct bch_fs *c)
1197 {
1198 	return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_transactional);
1199 }
1200 
bch2_is_superblock_bucket(struct bch_dev * ca,u64 b)1201 bool bch2_is_superblock_bucket(struct bch_dev *ca, u64 b)
1202 {
1203 	struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
1204 	u64 b_offset	= bucket_to_sector(ca, b);
1205 	u64 b_end	= bucket_to_sector(ca, b + 1);
1206 	unsigned i;
1207 
1208 	if (!b)
1209 		return true;
1210 
1211 	for (i = 0; i < layout->nr_superblocks; i++) {
1212 		u64 offset = le64_to_cpu(layout->sb_offset[i]);
1213 		u64 end = offset + (1 << layout->sb_max_size_bits);
1214 
1215 		if (!(offset >= b_end || end <= b_offset))
1216 			return true;
1217 	}
1218 
1219 	for (i = 0; i < ca->journal.nr; i++)
1220 		if (b == ca->journal.buckets[i])
1221 			return true;
1222 
1223 	return false;
1224 }
1225 
1226 /* Disk reservations: */
1227 
1228 #define SECTORS_CACHE	1024
1229 
__bch2_disk_reservation_add(struct bch_fs * c,struct disk_reservation * res,u64 sectors,enum bch_reservation_flags flags)1230 int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1231 				u64 sectors, enum bch_reservation_flags flags)
1232 {
1233 	struct bch_fs_pcpu *pcpu;
1234 	u64 old, get;
1235 	u64 sectors_available;
1236 	int ret;
1237 
1238 	percpu_down_read(&c->mark_lock);
1239 	preempt_disable();
1240 	pcpu = this_cpu_ptr(c->pcpu);
1241 
1242 	if (sectors <= pcpu->sectors_available)
1243 		goto out;
1244 
1245 	old = atomic64_read(&c->sectors_available);
1246 	do {
1247 		get = min((u64) sectors + SECTORS_CACHE, old);
1248 
1249 		if (get < sectors) {
1250 			preempt_enable();
1251 			goto recalculate;
1252 		}
1253 	} while (!atomic64_try_cmpxchg(&c->sectors_available,
1254 				       &old, old - get));
1255 
1256 	pcpu->sectors_available		+= get;
1257 
1258 out:
1259 	pcpu->sectors_available		-= sectors;
1260 	this_cpu_add(*c->online_reserved, sectors);
1261 	res->sectors			+= sectors;
1262 
1263 	preempt_enable();
1264 	percpu_up_read(&c->mark_lock);
1265 	return 0;
1266 
1267 recalculate:
1268 	mutex_lock(&c->sectors_available_lock);
1269 
1270 	percpu_u64_set(&c->pcpu->sectors_available, 0);
1271 	sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
1272 
1273 	if (sectors_available && (flags & BCH_DISK_RESERVATION_PARTIAL))
1274 		sectors = min(sectors, sectors_available);
1275 
1276 	if (sectors <= sectors_available ||
1277 	    (flags & BCH_DISK_RESERVATION_NOFAIL)) {
1278 		atomic64_set(&c->sectors_available,
1279 			     max_t(s64, 0, sectors_available - sectors));
1280 		this_cpu_add(*c->online_reserved, sectors);
1281 		res->sectors			+= sectors;
1282 		ret = 0;
1283 	} else {
1284 		atomic64_set(&c->sectors_available, sectors_available);
1285 		ret = bch_err_throw(c, ENOSPC_disk_reservation);
1286 	}
1287 
1288 	mutex_unlock(&c->sectors_available_lock);
1289 	percpu_up_read(&c->mark_lock);
1290 
1291 	return ret;
1292 }
1293 
1294 /* Startup/shutdown: */
1295 
bch2_buckets_nouse_free(struct bch_fs * c)1296 void bch2_buckets_nouse_free(struct bch_fs *c)
1297 {
1298 	for_each_member_device(c, ca) {
1299 		kvfree_rcu_mightsleep(ca->buckets_nouse);
1300 		ca->buckets_nouse = NULL;
1301 	}
1302 }
1303 
bch2_buckets_nouse_alloc(struct bch_fs * c)1304 int bch2_buckets_nouse_alloc(struct bch_fs *c)
1305 {
1306 	for_each_member_device(c, ca) {
1307 		BUG_ON(ca->buckets_nouse);
1308 
1309 		ca->buckets_nouse = bch2_kvmalloc(BITS_TO_LONGS(ca->mi.nbuckets) *
1310 					    sizeof(unsigned long),
1311 					    GFP_KERNEL|__GFP_ZERO);
1312 		if (!ca->buckets_nouse) {
1313 			bch2_dev_put(ca);
1314 			return bch_err_throw(c, ENOMEM_buckets_nouse);
1315 		}
1316 	}
1317 
1318 	return 0;
1319 }
1320 
bucket_gens_free_rcu(struct rcu_head * rcu)1321 static void bucket_gens_free_rcu(struct rcu_head *rcu)
1322 {
1323 	struct bucket_gens *buckets =
1324 		container_of(rcu, struct bucket_gens, rcu);
1325 
1326 	kvfree(buckets);
1327 }
1328 
bch2_dev_buckets_resize(struct bch_fs * c,struct bch_dev * ca,u64 nbuckets)1329 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1330 {
1331 	struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
1332 	bool resize = ca->bucket_gens != NULL;
1333 	int ret;
1334 
1335 	if (resize)
1336 		lockdep_assert_held(&c->state_lock);
1337 
1338 	if (resize && ca->buckets_nouse)
1339 		return bch_err_throw(c, no_resize_with_buckets_nouse);
1340 
1341 	bucket_gens = bch2_kvmalloc(struct_size(bucket_gens, b, nbuckets),
1342 				    GFP_KERNEL|__GFP_ZERO);
1343 	if (!bucket_gens) {
1344 		ret = bch_err_throw(c, ENOMEM_bucket_gens);
1345 		goto err;
1346 	}
1347 
1348 	bucket_gens->first_bucket = ca->mi.first_bucket;
1349 	bucket_gens->nbuckets	= nbuckets;
1350 	bucket_gens->nbuckets_minus_first =
1351 		bucket_gens->nbuckets - bucket_gens->first_bucket;
1352 
1353 	old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
1354 
1355 	if (resize) {
1356 		u64 copy = min(bucket_gens->nbuckets,
1357 			       old_bucket_gens->nbuckets);
1358 		memcpy(bucket_gens->b,
1359 		       old_bucket_gens->b,
1360 		       sizeof(bucket_gens->b[0]) * copy);
1361 	}
1362 
1363 	ret =   bch2_bucket_bitmap_resize(ca, &ca->bucket_backpointer_mismatch,
1364 					  ca->mi.nbuckets, nbuckets) ?:
1365 		bch2_bucket_bitmap_resize(ca, &ca->bucket_backpointer_empty,
1366 					  ca->mi.nbuckets, nbuckets);
1367 
1368 	rcu_assign_pointer(ca->bucket_gens, bucket_gens);
1369 	bucket_gens	= old_bucket_gens;
1370 
1371 	nbuckets = ca->mi.nbuckets;
1372 
1373 	ret = 0;
1374 err:
1375 	if (bucket_gens)
1376 		call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
1377 
1378 	return ret;
1379 }
1380 
bch2_dev_buckets_free(struct bch_dev * ca)1381 void bch2_dev_buckets_free(struct bch_dev *ca)
1382 {
1383 	kvfree(ca->buckets_nouse);
1384 	kvfree(rcu_dereference_protected(ca->bucket_gens, 1));
1385 	free_percpu(ca->usage);
1386 }
1387 
bch2_dev_buckets_alloc(struct bch_fs * c,struct bch_dev * ca)1388 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
1389 {
1390 	ca->usage = alloc_percpu(struct bch_dev_usage_full);
1391 	if (!ca->usage)
1392 		return bch_err_throw(c, ENOMEM_usage_init);
1393 
1394 	return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);
1395 }
1396