xref: /linux/fs/bcachefs/buckets.c (revision 56770e24f678a84a21f21bcc1ae9cbc1364677bd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for manipulating bucket marks for garbage collection.
4  *
5  * Copyright 2014 Datera, Inc.
6  */
7 
8 #include "bcachefs.h"
9 #include "alloc_background.h"
10 #include "backpointers.h"
11 #include "bset.h"
12 #include "btree_gc.h"
13 #include "btree_update.h"
14 #include "buckets.h"
15 #include "buckets_waiting_for_journal.h"
16 #include "disk_accounting.h"
17 #include "ec.h"
18 #include "error.h"
19 #include "inode.h"
20 #include "movinggc.h"
21 #include "rebalance.h"
22 #include "recovery.h"
23 #include "recovery_passes.h"
24 #include "reflink.h"
25 #include "replicas.h"
26 #include "subvolume.h"
27 #include "trace.h"
28 
29 #include <linux/preempt.h>
30 
bch2_dev_usage_read_fast(struct bch_dev * ca,struct bch_dev_usage * usage)31 void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
32 {
33 	for (unsigned i = 0; i < BCH_DATA_NR; i++)
34 		usage->buckets[i] = percpu_u64_get(&ca->usage->d[i].buckets);
35 }
36 
bch2_dev_usage_full_read_fast(struct bch_dev * ca,struct bch_dev_usage_full * usage)37 void bch2_dev_usage_full_read_fast(struct bch_dev *ca, struct bch_dev_usage_full *usage)
38 {
39 	memset(usage, 0, sizeof(*usage));
40 	acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage, dev_usage_u64s());
41 }
42 
reserve_factor(u64 r)43 static u64 reserve_factor(u64 r)
44 {
45 	return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
46 }
47 
48 static struct bch_fs_usage_short
__bch2_fs_usage_read_short(struct bch_fs * c)49 __bch2_fs_usage_read_short(struct bch_fs *c)
50 {
51 	struct bch_fs_usage_short ret;
52 	u64 data, reserved;
53 
54 	ret.capacity = c->capacity -
55 		percpu_u64_get(&c->usage->hidden);
56 
57 	data		= percpu_u64_get(&c->usage->data) +
58 		percpu_u64_get(&c->usage->btree);
59 	reserved	= percpu_u64_get(&c->usage->reserved) +
60 		percpu_u64_get(c->online_reserved);
61 
62 	ret.used	= min(ret.capacity, data + reserve_factor(reserved));
63 	ret.free	= ret.capacity - ret.used;
64 
65 	ret.nr_inodes	= percpu_u64_get(&c->usage->nr_inodes);
66 
67 	return ret;
68 }
69 
70 struct bch_fs_usage_short
bch2_fs_usage_read_short(struct bch_fs * c)71 bch2_fs_usage_read_short(struct bch_fs *c)
72 {
73 	struct bch_fs_usage_short ret;
74 
75 	percpu_down_read(&c->mark_lock);
76 	ret = __bch2_fs_usage_read_short(c);
77 	percpu_up_read(&c->mark_lock);
78 
79 	return ret;
80 }
81 
bch2_dev_usage_to_text(struct printbuf * out,struct bch_dev * ca,struct bch_dev_usage_full * usage)82 void bch2_dev_usage_to_text(struct printbuf *out,
83 			    struct bch_dev *ca,
84 			    struct bch_dev_usage_full *usage)
85 {
86 	if (out->nr_tabstops < 5) {
87 		printbuf_tabstops_reset(out);
88 		printbuf_tabstop_push(out, 12);
89 		printbuf_tabstop_push(out, 16);
90 		printbuf_tabstop_push(out, 16);
91 		printbuf_tabstop_push(out, 16);
92 		printbuf_tabstop_push(out, 16);
93 	}
94 
95 	prt_printf(out, "\tbuckets\rsectors\rfragmented\r\n");
96 
97 	for (unsigned i = 0; i < BCH_DATA_NR; i++) {
98 		bch2_prt_data_type(out, i);
99 		prt_printf(out, "\t%llu\r%llu\r%llu\r\n",
100 			   usage->d[i].buckets,
101 			   usage->d[i].sectors,
102 			   usage->d[i].fragmented);
103 	}
104 
105 	prt_printf(out, "capacity\t%llu\r\n", ca->mi.nbuckets);
106 }
107 
bch2_check_fix_ptr(struct btree_trans * trans,struct bkey_s_c k,struct extent_ptr_decoded p,const union bch_extent_entry * entry,bool * do_update)108 static int bch2_check_fix_ptr(struct btree_trans *trans,
109 			      struct bkey_s_c k,
110 			      struct extent_ptr_decoded p,
111 			      const union bch_extent_entry *entry,
112 			      bool *do_update)
113 {
114 	struct bch_fs *c = trans->c;
115 	struct printbuf buf = PRINTBUF;
116 	int ret = 0;
117 
118 	struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
119 	if (!ca) {
120 		if (fsck_err_on(p.ptr.dev != BCH_SB_MEMBER_INVALID,
121 				trans, ptr_to_invalid_device,
122 				"pointer to missing device %u\n"
123 				"while marking %s",
124 				p.ptr.dev,
125 				(printbuf_reset(&buf),
126 				 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
127 			*do_update = true;
128 		return 0;
129 	}
130 
131 	struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
132 	if (!g) {
133 		if (fsck_err(trans, ptr_to_invalid_device,
134 			     "pointer to invalid bucket on device %u\n"
135 			     "while marking %s",
136 			     p.ptr.dev,
137 			     (printbuf_reset(&buf),
138 			      bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
139 			*do_update = true;
140 		goto out;
141 	}
142 
143 	enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
144 
145 	if (fsck_err_on(!g->gen_valid,
146 			trans, ptr_to_missing_alloc_key,
147 			"bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
148 			"while marking %s",
149 			p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
150 			bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
151 			p.ptr.gen,
152 			(printbuf_reset(&buf),
153 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
154 		if (!p.ptr.cached) {
155 			g->gen_valid		= true;
156 			g->gen			= p.ptr.gen;
157 		} else {
158 			*do_update = true;
159 		}
160 	}
161 
162 	if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0,
163 			trans, ptr_gen_newer_than_bucket_gen,
164 			"bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
165 			"while marking %s",
166 			p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
167 			bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
168 			p.ptr.gen, g->gen,
169 			(printbuf_reset(&buf),
170 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
171 		if (!p.ptr.cached &&
172 		    (g->data_type != BCH_DATA_btree ||
173 		     data_type == BCH_DATA_btree)) {
174 			g->gen_valid		= true;
175 			g->gen			= p.ptr.gen;
176 			g->data_type		= 0;
177 			g->stripe_sectors	= 0;
178 			g->dirty_sectors	= 0;
179 			g->cached_sectors	= 0;
180 		} else {
181 			*do_update = true;
182 		}
183 	}
184 
185 	if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX,
186 			trans, ptr_gen_newer_than_bucket_gen,
187 			"bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
188 			"while marking %s",
189 			p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
190 			bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
191 			p.ptr.gen,
192 			(printbuf_reset(&buf),
193 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
194 		*do_update = true;
195 
196 	if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0,
197 			trans, stale_dirty_ptr,
198 			"bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
199 			"while marking %s",
200 			p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
201 			bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
202 			p.ptr.gen, g->gen,
203 			(printbuf_reset(&buf),
204 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
205 		*do_update = true;
206 
207 	if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen)
208 		goto out;
209 
210 	if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type),
211 			trans, ptr_bucket_data_type_mismatch,
212 			"bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
213 			"while marking %s",
214 			p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
215 			bch2_data_type_str(g->data_type),
216 			bch2_data_type_str(data_type),
217 			(printbuf_reset(&buf),
218 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
219 		if (data_type == BCH_DATA_btree) {
220 			g->gen_valid		= true;
221 			g->gen			= p.ptr.gen;
222 			g->data_type		= data_type;
223 			g->stripe_sectors	= 0;
224 			g->dirty_sectors	= 0;
225 			g->cached_sectors	= 0;
226 		} else {
227 			*do_update = true;
228 		}
229 	}
230 
231 	if (p.has_ec) {
232 		struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
233 
234 		if (fsck_err_on(!m || !m->alive,
235 				trans, ptr_to_missing_stripe,
236 				"pointer to nonexistent stripe %llu\n"
237 				"while marking %s",
238 				(u64) p.ec.idx,
239 				(printbuf_reset(&buf),
240 				 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
241 			*do_update = true;
242 
243 		if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p),
244 				trans, ptr_to_incorrect_stripe,
245 				"pointer does not match stripe %llu\n"
246 				"while marking %s",
247 				(u64) p.ec.idx,
248 				(printbuf_reset(&buf),
249 				 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
250 			*do_update = true;
251 	}
252 out:
253 fsck_err:
254 	bch2_dev_put(ca);
255 	printbuf_exit(&buf);
256 	return ret;
257 }
258 
bch2_check_fix_ptrs(struct btree_trans * trans,enum btree_id btree,unsigned level,struct bkey_s_c k,enum btree_iter_update_trigger_flags flags)259 int bch2_check_fix_ptrs(struct btree_trans *trans,
260 			enum btree_id btree, unsigned level, struct bkey_s_c k,
261 			enum btree_iter_update_trigger_flags flags)
262 {
263 	struct bch_fs *c = trans->c;
264 	struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(k);
265 	const union bch_extent_entry *entry_c;
266 	struct extent_ptr_decoded p = { 0 };
267 	bool do_update = false;
268 	struct printbuf buf = PRINTBUF;
269 	int ret = 0;
270 
271 	bkey_for_each_ptr_decode(k.k, ptrs_c, p, entry_c) {
272 		ret = bch2_check_fix_ptr(trans, k, p, entry_c, &do_update);
273 		if (ret)
274 			goto err;
275 	}
276 
277 	if (do_update) {
278 		if (flags & BTREE_TRIGGER_is_root) {
279 			bch_err(c, "cannot update btree roots yet");
280 			ret = -EINVAL;
281 			goto err;
282 		}
283 
284 		struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
285 		ret = PTR_ERR_OR_ZERO(new);
286 		if (ret)
287 			goto err;
288 
289 		rcu_read_lock();
290 		bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, !bch2_dev_exists(c, ptr->dev));
291 		rcu_read_unlock();
292 
293 		if (level) {
294 			/*
295 			 * We don't want to drop btree node pointers - if the
296 			 * btree node isn't there anymore, the read path will
297 			 * sort it out:
298 			 */
299 			struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
300 			rcu_read_lock();
301 			bkey_for_each_ptr(ptrs, ptr) {
302 				struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
303 				struct bucket *g = PTR_GC_BUCKET(ca, ptr);
304 
305 				ptr->gen = g->gen;
306 			}
307 			rcu_read_unlock();
308 		} else {
309 			struct bkey_ptrs ptrs;
310 			union bch_extent_entry *entry;
311 
312 			rcu_read_lock();
313 restart_drop_ptrs:
314 			ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
315 			bkey_for_each_ptr_decode(bkey_i_to_s(new).k, ptrs, p, entry) {
316 				struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
317 				struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
318 				enum bch_data_type data_type = bch2_bkey_ptr_data_type(bkey_i_to_s_c(new), p, entry);
319 
320 				if ((p.ptr.cached &&
321 				     (!g->gen_valid || gen_cmp(p.ptr.gen, g->gen) > 0)) ||
322 				    (!p.ptr.cached &&
323 				     gen_cmp(p.ptr.gen, g->gen) < 0) ||
324 				    gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX ||
325 				    (g->data_type &&
326 				     g->data_type != data_type)) {
327 					bch2_bkey_drop_ptr(bkey_i_to_s(new), &entry->ptr);
328 					goto restart_drop_ptrs;
329 				}
330 			}
331 			rcu_read_unlock();
332 again:
333 			ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
334 			bkey_extent_entry_for_each(ptrs, entry) {
335 				if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) {
336 					struct gc_stripe *m = genradix_ptr(&c->gc_stripes,
337 									entry->stripe_ptr.idx);
338 					union bch_extent_entry *next_ptr;
339 
340 					bkey_extent_entry_for_each_from(ptrs, next_ptr, entry)
341 						if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr)
342 							goto found;
343 					next_ptr = NULL;
344 found:
345 					if (!next_ptr) {
346 						bch_err(c, "aieee, found stripe ptr with no data ptr");
347 						continue;
348 					}
349 
350 					if (!m || !m->alive ||
351 					    !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block],
352 								       &next_ptr->ptr,
353 								       m->sectors)) {
354 						bch2_bkey_extent_entry_drop(new, entry);
355 						goto again;
356 					}
357 				}
358 			}
359 		}
360 
361 		if (0) {
362 			printbuf_reset(&buf);
363 			bch2_bkey_val_to_text(&buf, c, k);
364 			bch_info(c, "updated %s", buf.buf);
365 
366 			printbuf_reset(&buf);
367 			bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new));
368 			bch_info(c, "new key %s", buf.buf);
369 		}
370 
371 		struct btree_iter iter;
372 		bch2_trans_node_iter_init(trans, &iter, btree, new->k.p, 0, level,
373 					  BTREE_ITER_intent|BTREE_ITER_all_snapshots);
374 		ret =   bch2_btree_iter_traverse(trans, &iter) ?:
375 			bch2_trans_update(trans, &iter, new,
376 					  BTREE_UPDATE_internal_snapshot_node|
377 					  BTREE_TRIGGER_norun);
378 		bch2_trans_iter_exit(trans, &iter);
379 		if (ret)
380 			goto err;
381 
382 		if (level)
383 			bch2_btree_node_update_key_early(trans, btree, level - 1, k, new);
384 	}
385 err:
386 	printbuf_exit(&buf);
387 	return ret;
388 }
389 
bucket_ref_update_err(struct btree_trans * trans,struct printbuf * buf,struct bkey_s_c k,bool insert,enum bch_sb_error_id id)390 static int bucket_ref_update_err(struct btree_trans *trans, struct printbuf *buf,
391 				 struct bkey_s_c k, bool insert, enum bch_sb_error_id id)
392 {
393 	struct bch_fs *c = trans->c;
394 	bool repeat = false, print = true, suppress = false;
395 
396 	prt_printf(buf, "\nwhile marking ");
397 	bch2_bkey_val_to_text(buf, c, k);
398 	prt_newline(buf);
399 
400 	__bch2_count_fsck_err(c, id, buf->buf, &repeat, &print, &suppress);
401 
402 	int ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
403 
404 	if (insert) {
405 		print = true;
406 		suppress = false;
407 
408 		bch2_trans_updates_to_text(buf, trans);
409 		__bch2_inconsistent_error(c, buf);
410 		ret = -BCH_ERR_bucket_ref_update;
411 	}
412 
413 	if (suppress)
414 		prt_printf(buf, "Ratelimiting new instances of previous error\n");
415 	if (print)
416 		bch2_print_string_as_lines(KERN_ERR, buf->buf);
417 	return ret;
418 }
419 
bch2_bucket_ref_update(struct btree_trans * trans,struct bch_dev * ca,struct bkey_s_c k,const struct bch_extent_ptr * ptr,s64 sectors,enum bch_data_type ptr_data_type,u8 b_gen,u8 bucket_data_type,u32 * bucket_sectors)420 int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca,
421 			   struct bkey_s_c k,
422 			   const struct bch_extent_ptr *ptr,
423 			   s64 sectors, enum bch_data_type ptr_data_type,
424 			   u8 b_gen, u8 bucket_data_type,
425 			   u32 *bucket_sectors)
426 {
427 	struct bch_fs *c = trans->c;
428 	size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
429 	struct printbuf buf = PRINTBUF;
430 	bool inserting = sectors > 0;
431 	int ret = 0;
432 
433 	BUG_ON(!sectors);
434 
435 	if (unlikely(gen_after(ptr->gen, b_gen))) {
436 		bch2_log_msg_start(c, &buf);
437 		prt_printf(&buf,
438 			"bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen",
439 			ptr->dev, bucket_nr, b_gen,
440 			bch2_data_type_str(bucket_data_type ?: ptr_data_type),
441 			ptr->gen);
442 
443 		ret = bucket_ref_update_err(trans, &buf, k, inserting,
444 					    BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen);
445 		goto out;
446 	}
447 
448 	if (unlikely(gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX)) {
449 		bch2_log_msg_start(c, &buf);
450 		prt_printf(&buf,
451 			"bucket %u:%zu gen %u data type %s: ptr gen %u too stale",
452 			ptr->dev, bucket_nr, b_gen,
453 			bch2_data_type_str(bucket_data_type ?: ptr_data_type),
454 			ptr->gen);
455 
456 		ret = bucket_ref_update_err(trans, &buf, k, inserting,
457 					    BCH_FSCK_ERR_ptr_too_stale);
458 		goto out;
459 	}
460 
461 	if (b_gen != ptr->gen && ptr->cached) {
462 		ret = 1;
463 		goto out;
464 	}
465 
466 	if (unlikely(b_gen != ptr->gen)) {
467 		bch2_log_msg_start(c, &buf);
468 		prt_printf(&buf,
469 			"bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)",
470 			ptr->dev, bucket_nr, b_gen,
471 			bucket_gen_get(ca, bucket_nr),
472 			bch2_data_type_str(bucket_data_type ?: ptr_data_type),
473 			ptr->gen);
474 
475 		ret = bucket_ref_update_err(trans, &buf, k, inserting,
476 					    BCH_FSCK_ERR_stale_dirty_ptr);
477 		goto out;
478 	}
479 
480 	if (unlikely(bucket_data_type_mismatch(bucket_data_type, ptr_data_type))) {
481 		bch2_log_msg_start(c, &buf);
482 		prt_printf(&buf, "bucket %u:%zu gen %u different types of data in same bucket: %s, %s",
483 			   ptr->dev, bucket_nr, b_gen,
484 			   bch2_data_type_str(bucket_data_type),
485 			   bch2_data_type_str(ptr_data_type));
486 
487 		ret = bucket_ref_update_err(trans, &buf, k, inserting,
488 					    BCH_FSCK_ERR_ptr_bucket_data_type_mismatch);
489 		goto out;
490 	}
491 
492 	if (unlikely((u64) *bucket_sectors + sectors > U32_MAX)) {
493 		bch2_log_msg_start(c, &buf);
494 		prt_printf(&buf,
495 			"bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX",
496 			ptr->dev, bucket_nr, b_gen,
497 			bch2_data_type_str(bucket_data_type ?: ptr_data_type),
498 			*bucket_sectors, sectors);
499 
500 		ret = bucket_ref_update_err(trans, &buf, k, inserting,
501 					    BCH_FSCK_ERR_bucket_sector_count_overflow);
502 		sectors = -*bucket_sectors;
503 		goto out;
504 	}
505 
506 	*bucket_sectors += sectors;
507 out:
508 	printbuf_exit(&buf);
509 	return ret;
510 }
511 
bch2_trans_account_disk_usage_change(struct btree_trans * trans)512 void bch2_trans_account_disk_usage_change(struct btree_trans *trans)
513 {
514 	struct bch_fs *c = trans->c;
515 	u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
516 	static int warned_disk_usage = 0;
517 	bool warn = false;
518 
519 	percpu_down_read(&c->mark_lock);
520 	struct bch_fs_usage_base *src = &trans->fs_usage_delta;
521 
522 	s64 added = src->btree + src->data + src->reserved;
523 
524 	/*
525 	 * Not allowed to reduce sectors_available except by getting a
526 	 * reservation:
527 	 */
528 	s64 should_not_have_added = added - (s64) disk_res_sectors;
529 	if (unlikely(should_not_have_added > 0)) {
530 		u64 old, new;
531 
532 		old = atomic64_read(&c->sectors_available);
533 		do {
534 			new = max_t(s64, 0, old - should_not_have_added);
535 		} while (!atomic64_try_cmpxchg(&c->sectors_available,
536 					       &old, new));
537 
538 		added -= should_not_have_added;
539 		warn = true;
540 	}
541 
542 	if (added > 0) {
543 		trans->disk_res->sectors -= added;
544 		this_cpu_sub(*c->online_reserved, added);
545 	}
546 
547 	preempt_disable();
548 	struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage);
549 	acc_u64s((u64 *) dst, (u64 *) src, sizeof(*src) / sizeof(u64));
550 	preempt_enable();
551 	percpu_up_read(&c->mark_lock);
552 
553 	if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
554 		bch2_trans_inconsistent(trans,
555 					"disk usage increased %lli more than %llu sectors reserved)",
556 					should_not_have_added, disk_res_sectors);
557 }
558 
559 /* KEY_TYPE_extent: */
560 
__mark_pointer(struct btree_trans * trans,struct bch_dev * ca,struct bkey_s_c k,const struct extent_ptr_decoded * p,s64 sectors,enum bch_data_type ptr_data_type,struct bch_alloc_v4 * a,bool insert)561 static int __mark_pointer(struct btree_trans *trans, struct bch_dev *ca,
562 			  struct bkey_s_c k,
563 			  const struct extent_ptr_decoded *p,
564 			  s64 sectors, enum bch_data_type ptr_data_type,
565 			  struct bch_alloc_v4 *a,
566 			  bool insert)
567 {
568 	u32 *dst_sectors = p->has_ec	? &a->stripe_sectors :
569 		!p->ptr.cached		? &a->dirty_sectors :
570 					  &a->cached_sectors;
571 	int ret = bch2_bucket_ref_update(trans, ca, k, &p->ptr, sectors, ptr_data_type,
572 					 a->gen, a->data_type, dst_sectors);
573 
574 	if (ret)
575 		return ret;
576 	if (insert)
577 		alloc_data_type_set(a, ptr_data_type);
578 	return 0;
579 }
580 
bch2_trigger_pointer(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct bkey_s_c k,struct extent_ptr_decoded p,const union bch_extent_entry * entry,s64 * sectors,enum btree_iter_update_trigger_flags flags)581 static int bch2_trigger_pointer(struct btree_trans *trans,
582 			enum btree_id btree_id, unsigned level,
583 			struct bkey_s_c k, struct extent_ptr_decoded p,
584 			const union bch_extent_entry *entry,
585 			s64 *sectors,
586 			enum btree_iter_update_trigger_flags flags)
587 {
588 	struct bch_fs *c = trans->c;
589 	bool insert = !(flags & BTREE_TRIGGER_overwrite);
590 	struct printbuf buf = PRINTBUF;
591 	int ret = 0;
592 
593 	struct bkey_i_backpointer bp;
594 	bch2_extent_ptr_to_bp(c, btree_id, level, k, p, entry, &bp);
595 
596 	*sectors = insert ? bp.v.bucket_len : -(s64) bp.v.bucket_len;
597 
598 	struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
599 	if (unlikely(!ca)) {
600 		if (insert && p.ptr.dev != BCH_SB_MEMBER_INVALID)
601 			ret = -BCH_ERR_trigger_pointer;
602 		goto err;
603 	}
604 
605 	struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr);
606 
607 	if (flags & BTREE_TRIGGER_transactional) {
608 		struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket, 0);
609 		ret = PTR_ERR_OR_ZERO(a) ?:
610 			__mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &a->v, insert);
611 		if (ret)
612 			goto err;
613 
614 		ret = bch2_bucket_backpointer_mod(trans, k, &bp, insert);
615 		if (ret)
616 			goto err;
617 	}
618 
619 	if (flags & BTREE_TRIGGER_gc) {
620 		struct bucket *g = gc_bucket(ca, bucket.offset);
621 		if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n  %s",
622 					    p.ptr.dev,
623 					    (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
624 			ret = -BCH_ERR_trigger_pointer;
625 			goto err;
626 		}
627 
628 		bucket_lock(g);
629 		struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
630 		ret = __mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &new, insert);
631 		alloc_to_bucket(g, new);
632 		bucket_unlock(g);
633 
634 		if (!ret)
635 			ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags);
636 	}
637 err:
638 	bch2_dev_put(ca);
639 	printbuf_exit(&buf);
640 	return ret;
641 }
642 
bch2_trigger_stripe_ptr(struct btree_trans * trans,struct bkey_s_c k,struct extent_ptr_decoded p,enum bch_data_type data_type,s64 sectors,enum btree_iter_update_trigger_flags flags)643 static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
644 				struct bkey_s_c k,
645 				struct extent_ptr_decoded p,
646 				enum bch_data_type data_type,
647 				s64 sectors,
648 				enum btree_iter_update_trigger_flags flags)
649 {
650 	if (flags & BTREE_TRIGGER_transactional) {
651 		struct btree_iter iter;
652 		struct bkey_i_stripe *s = bch2_bkey_get_mut_typed(trans, &iter,
653 				BTREE_ID_stripes, POS(0, p.ec.idx),
654 				BTREE_ITER_with_updates, stripe);
655 		int ret = PTR_ERR_OR_ZERO(s);
656 		if (unlikely(ret)) {
657 			bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans,
658 				"pointer to nonexistent stripe %llu",
659 				(u64) p.ec.idx);
660 			goto err;
661 		}
662 
663 		if (!bch2_ptr_matches_stripe(&s->v, p)) {
664 			bch2_trans_inconsistent(trans,
665 				"stripe pointer doesn't match stripe %llu",
666 				(u64) p.ec.idx);
667 			ret = -BCH_ERR_trigger_stripe_pointer;
668 			goto err;
669 		}
670 
671 		stripe_blockcount_set(&s->v, p.ec.block,
672 			stripe_blockcount_get(&s->v, p.ec.block) +
673 			sectors);
674 
675 		struct disk_accounting_pos acc;
676 		memset(&acc, 0, sizeof(acc));
677 		acc.type = BCH_DISK_ACCOUNTING_replicas;
678 		bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i));
679 		acc.replicas.data_type = data_type;
680 		ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, false);
681 err:
682 		bch2_trans_iter_exit(trans, &iter);
683 		return ret;
684 	}
685 
686 	if (flags & BTREE_TRIGGER_gc) {
687 		struct bch_fs *c = trans->c;
688 
689 		struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.ec.idx, GFP_KERNEL);
690 		if (!m) {
691 			bch_err(c, "error allocating memory for gc_stripes, idx %llu",
692 				(u64) p.ec.idx);
693 			return -BCH_ERR_ENOMEM_mark_stripe_ptr;
694 		}
695 
696 		gc_stripe_lock(m);
697 
698 		if (!m || !m->alive) {
699 			gc_stripe_unlock(m);
700 			struct printbuf buf = PRINTBUF;
701 			bch2_log_msg_start(c, &buf);
702 			prt_printf(&buf, "pointer to nonexistent stripe %llu\n  while marking ",
703 				   (u64) p.ec.idx);
704 			bch2_bkey_val_to_text(&buf, c, k);
705 			__bch2_inconsistent_error(c, &buf);
706 			bch2_print_string_as_lines(KERN_ERR, buf.buf);
707 			printbuf_exit(&buf);
708 			return -BCH_ERR_trigger_stripe_pointer;
709 		}
710 
711 		m->block_sectors[p.ec.block] += sectors;
712 
713 		struct disk_accounting_pos acc;
714 		memset(&acc, 0, sizeof(acc));
715 		acc.type = BCH_DISK_ACCOUNTING_replicas;
716 		unsafe_memcpy(&acc.replicas, &m->r.e, replicas_entry_bytes(&m->r.e), "VLA");
717 		gc_stripe_unlock(m);
718 
719 		acc.replicas.data_type = data_type;
720 		int ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, true);
721 		if (ret)
722 			return ret;
723 	}
724 
725 	return 0;
726 }
727 
__trigger_extent(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct bkey_s_c k,enum btree_iter_update_trigger_flags flags,s64 * replicas_sectors)728 static int __trigger_extent(struct btree_trans *trans,
729 			    enum btree_id btree_id, unsigned level,
730 			    struct bkey_s_c k,
731 			    enum btree_iter_update_trigger_flags flags,
732 			    s64 *replicas_sectors)
733 {
734 	bool gc = flags & BTREE_TRIGGER_gc;
735 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
736 	const union bch_extent_entry *entry;
737 	struct extent_ptr_decoded p;
738 	enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
739 		? BCH_DATA_btree
740 		: BCH_DATA_user;
741 	int ret = 0;
742 
743 	struct disk_accounting_pos acc_replicas_key;
744 	memset(&acc_replicas_key, 0, sizeof(acc_replicas_key));
745 	acc_replicas_key.type = BCH_DISK_ACCOUNTING_replicas;
746 	acc_replicas_key.replicas.data_type	= data_type;
747 	acc_replicas_key.replicas.nr_devs	= 0;
748 	acc_replicas_key.replicas.nr_required	= 1;
749 
750 	unsigned cur_compression_type = 0;
751 	u64 compression_acct[3] = { 1, 0, 0 };
752 
753 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
754 		s64 disk_sectors = 0;
755 		ret = bch2_trigger_pointer(trans, btree_id, level, k, p, entry, &disk_sectors, flags);
756 		if (ret < 0)
757 			return ret;
758 
759 		bool stale = ret > 0;
760 
761 		if (p.ptr.cached && stale)
762 			continue;
763 
764 		if (p.ptr.cached) {
765 			ret = bch2_mod_dev_cached_sectors(trans, p.ptr.dev, disk_sectors, gc);
766 			if (ret)
767 				return ret;
768 		} else if (!p.has_ec) {
769 			*replicas_sectors       += disk_sectors;
770 			replicas_entry_add_dev(&acc_replicas_key.replicas, p.ptr.dev);
771 		} else {
772 			ret = bch2_trigger_stripe_ptr(trans, k, p, data_type, disk_sectors, flags);
773 			if (ret)
774 				return ret;
775 
776 			/*
777 			 * There may be other dirty pointers in this extent, but
778 			 * if so they're not required for mounting if we have an
779 			 * erasure coded pointer in this extent:
780 			 */
781 			acc_replicas_key.replicas.nr_required = 0;
782 		}
783 
784 		if (cur_compression_type &&
785 		    cur_compression_type != p.crc.compression_type) {
786 			if (flags & BTREE_TRIGGER_overwrite)
787 				bch2_u64s_neg(compression_acct, ARRAY_SIZE(compression_acct));
788 
789 			ret = bch2_disk_accounting_mod2(trans, gc, compression_acct,
790 							compression, cur_compression_type);
791 			if (ret)
792 				return ret;
793 
794 			compression_acct[0] = 1;
795 			compression_acct[1] = 0;
796 			compression_acct[2] = 0;
797 		}
798 
799 		cur_compression_type = p.crc.compression_type;
800 		if (p.crc.compression_type) {
801 			compression_acct[1] += p.crc.uncompressed_size;
802 			compression_acct[2] += p.crc.compressed_size;
803 		}
804 	}
805 
806 	if (acc_replicas_key.replicas.nr_devs) {
807 		ret = bch2_disk_accounting_mod(trans, &acc_replicas_key, replicas_sectors, 1, gc);
808 		if (ret)
809 			return ret;
810 	}
811 
812 	if (acc_replicas_key.replicas.nr_devs && !level && k.k->p.snapshot) {
813 		ret = bch2_disk_accounting_mod2_nr(trans, gc, replicas_sectors, 1, snapshot, k.k->p.snapshot);
814 		if (ret)
815 			return ret;
816 	}
817 
818 	if (cur_compression_type) {
819 		if (flags & BTREE_TRIGGER_overwrite)
820 			bch2_u64s_neg(compression_acct, ARRAY_SIZE(compression_acct));
821 
822 		ret = bch2_disk_accounting_mod2(trans, gc, compression_acct,
823 						compression, cur_compression_type);
824 		if (ret)
825 			return ret;
826 	}
827 
828 	if (level) {
829 		ret = bch2_disk_accounting_mod2_nr(trans, gc, replicas_sectors, 1, btree, btree_id);
830 		if (ret)
831 			return ret;
832 	} else {
833 		bool insert = !(flags & BTREE_TRIGGER_overwrite);
834 
835 		s64 v[3] = {
836 			insert ? 1 : -1,
837 			insert ? k.k->size : -((s64) k.k->size),
838 			*replicas_sectors,
839 		};
840 		ret = bch2_disk_accounting_mod2(trans, gc, v, inum, k.k->p.inode);
841 		if (ret)
842 			return ret;
843 	}
844 
845 	return 0;
846 }
847 
bch2_trigger_extent(struct btree_trans * trans,enum btree_id btree,unsigned level,struct bkey_s_c old,struct bkey_s new,enum btree_iter_update_trigger_flags flags)848 int bch2_trigger_extent(struct btree_trans *trans,
849 			enum btree_id btree, unsigned level,
850 			struct bkey_s_c old, struct bkey_s new,
851 			enum btree_iter_update_trigger_flags flags)
852 {
853 	struct bch_fs *c = trans->c;
854 	struct bkey_ptrs_c new_ptrs = bch2_bkey_ptrs_c(new.s_c);
855 	struct bkey_ptrs_c old_ptrs = bch2_bkey_ptrs_c(old);
856 	unsigned new_ptrs_bytes = (void *) new_ptrs.end - (void *) new_ptrs.start;
857 	unsigned old_ptrs_bytes = (void *) old_ptrs.end - (void *) old_ptrs.start;
858 
859 	if (unlikely(flags & BTREE_TRIGGER_check_repair))
860 		return bch2_check_fix_ptrs(trans, btree, level, new.s_c, flags);
861 
862 	/* if pointers aren't changing - nothing to do: */
863 	if (new_ptrs_bytes == old_ptrs_bytes &&
864 	    !memcmp(new_ptrs.start,
865 		    old_ptrs.start,
866 		    new_ptrs_bytes))
867 		return 0;
868 
869 	if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) {
870 		s64 old_replicas_sectors = 0, new_replicas_sectors = 0;
871 
872 		if (old.k->type) {
873 			int ret = __trigger_extent(trans, btree, level, old,
874 						   flags & ~BTREE_TRIGGER_insert,
875 						   &old_replicas_sectors);
876 			if (ret)
877 				return ret;
878 		}
879 
880 		if (new.k->type) {
881 			int ret = __trigger_extent(trans, btree, level, new.s_c,
882 						   flags & ~BTREE_TRIGGER_overwrite,
883 						   &new_replicas_sectors);
884 			if (ret)
885 				return ret;
886 		}
887 
888 		int need_rebalance_delta = 0;
889 		s64 need_rebalance_sectors_delta[1] = { 0 };
890 
891 		s64 s = bch2_bkey_sectors_need_rebalance(c, old);
892 		need_rebalance_delta -= s != 0;
893 		need_rebalance_sectors_delta[0] -= s;
894 
895 		s = bch2_bkey_sectors_need_rebalance(c, new.s_c);
896 		need_rebalance_delta += s != 0;
897 		need_rebalance_sectors_delta[0] += s;
898 
899 		if ((flags & BTREE_TRIGGER_transactional) && need_rebalance_delta) {
900 			int ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_rebalance_work,
901 							  new.k->p, need_rebalance_delta > 0);
902 			if (ret)
903 				return ret;
904 		}
905 
906 		if (need_rebalance_sectors_delta[0]) {
907 			int ret = bch2_disk_accounting_mod2(trans, flags & BTREE_TRIGGER_gc,
908 							    need_rebalance_sectors_delta, rebalance_work);
909 			if (ret)
910 				return ret;
911 		}
912 	}
913 
914 	return 0;
915 }
916 
917 /* KEY_TYPE_reservation */
918 
__trigger_reservation(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct bkey_s_c k,enum btree_iter_update_trigger_flags flags)919 static int __trigger_reservation(struct btree_trans *trans,
920 			enum btree_id btree_id, unsigned level, struct bkey_s_c k,
921 			enum btree_iter_update_trigger_flags flags)
922 {
923 	if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) {
924 		s64 sectors[1] = { k.k->size };
925 
926 		if (flags & BTREE_TRIGGER_overwrite)
927 			sectors[0] = -sectors[0];
928 
929 		return bch2_disk_accounting_mod2(trans, flags & BTREE_TRIGGER_gc, sectors,
930 				persistent_reserved, bkey_s_c_to_reservation(k).v->nr_replicas);
931 	}
932 
933 	return 0;
934 }
935 
bch2_trigger_reservation(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct bkey_s_c old,struct bkey_s new,enum btree_iter_update_trigger_flags flags)936 int bch2_trigger_reservation(struct btree_trans *trans,
937 			  enum btree_id btree_id, unsigned level,
938 			  struct bkey_s_c old, struct bkey_s new,
939 			  enum btree_iter_update_trigger_flags flags)
940 {
941 	return trigger_run_overwrite_then_insert(__trigger_reservation, trans, btree_id, level, old, new, flags);
942 }
943 
944 /* Mark superblocks: */
945 
__bch2_trans_mark_metadata_bucket(struct btree_trans * trans,struct bch_dev * ca,u64 b,enum bch_data_type type,unsigned sectors)946 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
947 				    struct bch_dev *ca, u64 b,
948 				    enum bch_data_type type,
949 				    unsigned sectors)
950 {
951 	struct bch_fs *c = trans->c;
952 	struct btree_iter iter;
953 	int ret = 0;
954 
955 	struct bkey_i_alloc_v4 *a =
956 		bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(ca->dev_idx, b));
957 	if (IS_ERR(a))
958 		return PTR_ERR(a);
959 
960 	if (a->v.data_type && type && a->v.data_type != type) {
961 		bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
962 		log_fsck_err(trans, bucket_metadata_type_mismatch,
963 			"bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
964 			"while marking %s",
965 			iter.pos.inode, iter.pos.offset, a->v.gen,
966 			bch2_data_type_str(a->v.data_type),
967 			bch2_data_type_str(type),
968 			bch2_data_type_str(type));
969 		ret = -BCH_ERR_metadata_bucket_inconsistency;
970 		goto err;
971 	}
972 
973 	if (a->v.data_type	!= type ||
974 	    a->v.dirty_sectors	!= sectors) {
975 		a->v.data_type		= type;
976 		a->v.dirty_sectors	= sectors;
977 		ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
978 	}
979 err:
980 fsck_err:
981 	bch2_trans_iter_exit(trans, &iter);
982 	return ret;
983 }
984 
bch2_mark_metadata_bucket(struct btree_trans * trans,struct bch_dev * ca,u64 b,enum bch_data_type data_type,unsigned sectors,enum btree_iter_update_trigger_flags flags)985 static int bch2_mark_metadata_bucket(struct btree_trans *trans, struct bch_dev *ca,
986 			u64 b, enum bch_data_type data_type, unsigned sectors,
987 			enum btree_iter_update_trigger_flags flags)
988 {
989 	struct bch_fs *c = trans->c;
990 	int ret = 0;
991 
992 	struct bucket *g = gc_bucket(ca, b);
993 	if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u when marking metadata type %s",
994 				    ca->dev_idx, bch2_data_type_str(data_type)))
995 		goto err;
996 
997 	bucket_lock(g);
998 	struct bch_alloc_v4 old = bucket_m_to_alloc(*g);
999 
1000 	if (bch2_fs_inconsistent_on(g->data_type &&
1001 			g->data_type != data_type, c,
1002 			"different types of data in same bucket: %s, %s",
1003 			bch2_data_type_str(g->data_type),
1004 			bch2_data_type_str(data_type)))
1005 		goto err_unlock;
1006 
1007 	if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
1008 			"bucket %u:%llu gen %u data type %s sector count overflow: %u + %u > bucket size",
1009 			ca->dev_idx, b, g->gen,
1010 			bch2_data_type_str(g->data_type ?: data_type),
1011 			g->dirty_sectors, sectors))
1012 		goto err_unlock;
1013 
1014 	g->data_type = data_type;
1015 	g->dirty_sectors += sectors;
1016 	struct bch_alloc_v4 new = bucket_m_to_alloc(*g);
1017 	bucket_unlock(g);
1018 	ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags);
1019 	return ret;
1020 err_unlock:
1021 	bucket_unlock(g);
1022 err:
1023 	return -BCH_ERR_metadata_bucket_inconsistency;
1024 }
1025 
bch2_trans_mark_metadata_bucket(struct btree_trans * trans,struct bch_dev * ca,u64 b,enum bch_data_type type,unsigned sectors,enum btree_iter_update_trigger_flags flags)1026 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1027 			struct bch_dev *ca, u64 b,
1028 			enum bch_data_type type, unsigned sectors,
1029 			enum btree_iter_update_trigger_flags flags)
1030 {
1031 	BUG_ON(type != BCH_DATA_free &&
1032 	       type != BCH_DATA_sb &&
1033 	       type != BCH_DATA_journal);
1034 
1035 	/*
1036 	 * Backup superblock might be past the end of our normal usable space:
1037 	 */
1038 	if (b >= ca->mi.nbuckets)
1039 		return 0;
1040 
1041 	if (flags & BTREE_TRIGGER_gc)
1042 		return bch2_mark_metadata_bucket(trans, ca, b, type, sectors, flags);
1043 	else if (flags & BTREE_TRIGGER_transactional)
1044 		return commit_do(trans, NULL, NULL, 0,
1045 				 __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
1046 	else
1047 		BUG();
1048 }
1049 
bch2_trans_mark_metadata_sectors(struct btree_trans * trans,struct bch_dev * ca,u64 start,u64 end,enum bch_data_type type,u64 * bucket,unsigned * bucket_sectors,enum btree_iter_update_trigger_flags flags)1050 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
1051 			struct bch_dev *ca, u64 start, u64 end,
1052 			enum bch_data_type type, u64 *bucket, unsigned *bucket_sectors,
1053 			enum btree_iter_update_trigger_flags flags)
1054 {
1055 	do {
1056 		u64 b = sector_to_bucket(ca, start);
1057 		unsigned sectors =
1058 			min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
1059 
1060 		if (b != *bucket && *bucket_sectors) {
1061 			int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
1062 							type, *bucket_sectors, flags);
1063 			if (ret)
1064 				return ret;
1065 
1066 			*bucket_sectors = 0;
1067 		}
1068 
1069 		*bucket		= b;
1070 		*bucket_sectors	+= sectors;
1071 		start += sectors;
1072 	} while (start < end);
1073 
1074 	return 0;
1075 }
1076 
__bch2_trans_mark_dev_sb(struct btree_trans * trans,struct bch_dev * ca,enum btree_iter_update_trigger_flags flags)1077 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, struct bch_dev *ca,
1078 			enum btree_iter_update_trigger_flags flags)
1079 {
1080 	struct bch_fs *c = trans->c;
1081 
1082 	mutex_lock(&c->sb_lock);
1083 	struct bch_sb_layout layout = ca->disk_sb.sb->layout;
1084 	mutex_unlock(&c->sb_lock);
1085 
1086 	u64 bucket = 0;
1087 	unsigned i, bucket_sectors = 0;
1088 	int ret;
1089 
1090 	for (i = 0; i < layout.nr_superblocks; i++) {
1091 		u64 offset = le64_to_cpu(layout.sb_offset[i]);
1092 
1093 		if (offset == BCH_SB_SECTOR) {
1094 			ret = bch2_trans_mark_metadata_sectors(trans, ca,
1095 						0, BCH_SB_SECTOR,
1096 						BCH_DATA_sb, &bucket, &bucket_sectors, flags);
1097 			if (ret)
1098 				return ret;
1099 		}
1100 
1101 		ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
1102 				      offset + (1 << layout.sb_max_size_bits),
1103 				      BCH_DATA_sb, &bucket, &bucket_sectors, flags);
1104 		if (ret)
1105 			return ret;
1106 	}
1107 
1108 	if (bucket_sectors) {
1109 		ret = bch2_trans_mark_metadata_bucket(trans, ca,
1110 				bucket, BCH_DATA_sb, bucket_sectors, flags);
1111 		if (ret)
1112 			return ret;
1113 	}
1114 
1115 	for (i = 0; i < ca->journal.nr; i++) {
1116 		ret = bch2_trans_mark_metadata_bucket(trans, ca,
1117 				ca->journal.buckets[i],
1118 				BCH_DATA_journal, ca->mi.bucket_size, flags);
1119 		if (ret)
1120 			return ret;
1121 	}
1122 
1123 	return 0;
1124 }
1125 
bch2_trans_mark_dev_sb(struct bch_fs * c,struct bch_dev * ca,enum btree_iter_update_trigger_flags flags)1126 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca,
1127 			enum btree_iter_update_trigger_flags flags)
1128 {
1129 	int ret = bch2_trans_run(c,
1130 		__bch2_trans_mark_dev_sb(trans, ca, flags));
1131 	bch_err_fn(c, ret);
1132 	return ret;
1133 }
1134 
bch2_trans_mark_dev_sbs_flags(struct bch_fs * c,enum btree_iter_update_trigger_flags flags)1135 int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c,
1136 			enum btree_iter_update_trigger_flags flags)
1137 {
1138 	for_each_online_member(c, ca) {
1139 		int ret = bch2_trans_mark_dev_sb(c, ca, flags);
1140 		if (ret) {
1141 			percpu_ref_put(&ca->io_ref[READ]);
1142 			return ret;
1143 		}
1144 	}
1145 
1146 	return 0;
1147 }
1148 
bch2_trans_mark_dev_sbs(struct bch_fs * c)1149 int bch2_trans_mark_dev_sbs(struct bch_fs *c)
1150 {
1151 	return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_transactional);
1152 }
1153 
bch2_is_superblock_bucket(struct bch_dev * ca,u64 b)1154 bool bch2_is_superblock_bucket(struct bch_dev *ca, u64 b)
1155 {
1156 	struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
1157 	u64 b_offset	= bucket_to_sector(ca, b);
1158 	u64 b_end	= bucket_to_sector(ca, b + 1);
1159 	unsigned i;
1160 
1161 	if (!b)
1162 		return true;
1163 
1164 	for (i = 0; i < layout->nr_superblocks; i++) {
1165 		u64 offset = le64_to_cpu(layout->sb_offset[i]);
1166 		u64 end = offset + (1 << layout->sb_max_size_bits);
1167 
1168 		if (!(offset >= b_end || end <= b_offset))
1169 			return true;
1170 	}
1171 
1172 	for (i = 0; i < ca->journal.nr; i++)
1173 		if (b == ca->journal.buckets[i])
1174 			return true;
1175 
1176 	return false;
1177 }
1178 
1179 /* Disk reservations: */
1180 
1181 #define SECTORS_CACHE	1024
1182 
__bch2_disk_reservation_add(struct bch_fs * c,struct disk_reservation * res,u64 sectors,enum bch_reservation_flags flags)1183 int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1184 				u64 sectors, enum bch_reservation_flags flags)
1185 {
1186 	struct bch_fs_pcpu *pcpu;
1187 	u64 old, get;
1188 	u64 sectors_available;
1189 	int ret;
1190 
1191 	percpu_down_read(&c->mark_lock);
1192 	preempt_disable();
1193 	pcpu = this_cpu_ptr(c->pcpu);
1194 
1195 	if (sectors <= pcpu->sectors_available)
1196 		goto out;
1197 
1198 	old = atomic64_read(&c->sectors_available);
1199 	do {
1200 		get = min((u64) sectors + SECTORS_CACHE, old);
1201 
1202 		if (get < sectors) {
1203 			preempt_enable();
1204 			goto recalculate;
1205 		}
1206 	} while (!atomic64_try_cmpxchg(&c->sectors_available,
1207 				       &old, old - get));
1208 
1209 	pcpu->sectors_available		+= get;
1210 
1211 out:
1212 	pcpu->sectors_available		-= sectors;
1213 	this_cpu_add(*c->online_reserved, sectors);
1214 	res->sectors			+= sectors;
1215 
1216 	preempt_enable();
1217 	percpu_up_read(&c->mark_lock);
1218 	return 0;
1219 
1220 recalculate:
1221 	mutex_lock(&c->sectors_available_lock);
1222 
1223 	percpu_u64_set(&c->pcpu->sectors_available, 0);
1224 	sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
1225 
1226 	if (sectors_available && (flags & BCH_DISK_RESERVATION_PARTIAL))
1227 		sectors = min(sectors, sectors_available);
1228 
1229 	if (sectors <= sectors_available ||
1230 	    (flags & BCH_DISK_RESERVATION_NOFAIL)) {
1231 		atomic64_set(&c->sectors_available,
1232 			     max_t(s64, 0, sectors_available - sectors));
1233 		this_cpu_add(*c->online_reserved, sectors);
1234 		res->sectors			+= sectors;
1235 		ret = 0;
1236 	} else {
1237 		atomic64_set(&c->sectors_available, sectors_available);
1238 		ret = -BCH_ERR_ENOSPC_disk_reservation;
1239 	}
1240 
1241 	mutex_unlock(&c->sectors_available_lock);
1242 	percpu_up_read(&c->mark_lock);
1243 
1244 	return ret;
1245 }
1246 
1247 /* Startup/shutdown: */
1248 
bch2_buckets_nouse_free(struct bch_fs * c)1249 void bch2_buckets_nouse_free(struct bch_fs *c)
1250 {
1251 	for_each_member_device(c, ca) {
1252 		kvfree_rcu_mightsleep(ca->buckets_nouse);
1253 		ca->buckets_nouse = NULL;
1254 	}
1255 }
1256 
bch2_buckets_nouse_alloc(struct bch_fs * c)1257 int bch2_buckets_nouse_alloc(struct bch_fs *c)
1258 {
1259 	for_each_member_device(c, ca) {
1260 		BUG_ON(ca->buckets_nouse);
1261 
1262 		ca->buckets_nouse = bch2_kvmalloc(BITS_TO_LONGS(ca->mi.nbuckets) *
1263 					    sizeof(unsigned long),
1264 					    GFP_KERNEL|__GFP_ZERO);
1265 		if (!ca->buckets_nouse) {
1266 			bch2_dev_put(ca);
1267 			return -BCH_ERR_ENOMEM_buckets_nouse;
1268 		}
1269 	}
1270 
1271 	return 0;
1272 }
1273 
bucket_gens_free_rcu(struct rcu_head * rcu)1274 static void bucket_gens_free_rcu(struct rcu_head *rcu)
1275 {
1276 	struct bucket_gens *buckets =
1277 		container_of(rcu, struct bucket_gens, rcu);
1278 
1279 	kvfree(buckets);
1280 }
1281 
bch2_dev_buckets_resize(struct bch_fs * c,struct bch_dev * ca,u64 nbuckets)1282 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1283 {
1284 	struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
1285 	bool resize = ca->bucket_gens != NULL;
1286 	int ret;
1287 
1288 	if (resize)
1289 		lockdep_assert_held(&c->state_lock);
1290 
1291 	if (resize && ca->buckets_nouse)
1292 		return -BCH_ERR_no_resize_with_buckets_nouse;
1293 
1294 	bucket_gens = bch2_kvmalloc(struct_size(bucket_gens, b, nbuckets),
1295 				    GFP_KERNEL|__GFP_ZERO);
1296 	if (!bucket_gens) {
1297 		ret = -BCH_ERR_ENOMEM_bucket_gens;
1298 		goto err;
1299 	}
1300 
1301 	bucket_gens->first_bucket = ca->mi.first_bucket;
1302 	bucket_gens->nbuckets	= nbuckets;
1303 	bucket_gens->nbuckets_minus_first =
1304 		bucket_gens->nbuckets - bucket_gens->first_bucket;
1305 
1306 	old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
1307 
1308 	if (resize) {
1309 		bucket_gens->nbuckets = min(bucket_gens->nbuckets,
1310 					    old_bucket_gens->nbuckets);
1311 		bucket_gens->nbuckets_minus_first =
1312 			bucket_gens->nbuckets - bucket_gens->first_bucket;
1313 		memcpy(bucket_gens->b,
1314 		       old_bucket_gens->b,
1315 		       bucket_gens->nbuckets);
1316 	}
1317 
1318 	rcu_assign_pointer(ca->bucket_gens, bucket_gens);
1319 	bucket_gens	= old_bucket_gens;
1320 
1321 	nbuckets = ca->mi.nbuckets;
1322 
1323 	ret = 0;
1324 err:
1325 	if (bucket_gens)
1326 		call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
1327 
1328 	return ret;
1329 }
1330 
bch2_dev_buckets_free(struct bch_dev * ca)1331 void bch2_dev_buckets_free(struct bch_dev *ca)
1332 {
1333 	kvfree(ca->buckets_nouse);
1334 	kvfree(rcu_dereference_protected(ca->bucket_gens, 1));
1335 	free_percpu(ca->usage);
1336 }
1337 
bch2_dev_buckets_alloc(struct bch_fs * c,struct bch_dev * ca)1338 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
1339 {
1340 	ca->usage = alloc_percpu(struct bch_dev_usage_full);
1341 	if (!ca->usage)
1342 		return -BCH_ERR_ENOMEM_usage_init;
1343 
1344 	return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);
1345 }
1346