xref: /linux/fs/bcachefs/buckets.c (revision c2a96b7f187fb6a455836d4a6e113947ff11de97)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for manipulating bucket marks for garbage collection.
4  *
5  * Copyright 2014 Datera, Inc.
6  */
7 
8 #include "bcachefs.h"
9 #include "alloc_background.h"
10 #include "backpointers.h"
11 #include "bset.h"
12 #include "btree_gc.h"
13 #include "btree_update.h"
14 #include "buckets.h"
15 #include "buckets_waiting_for_journal.h"
16 #include "disk_accounting.h"
17 #include "ec.h"
18 #include "error.h"
19 #include "inode.h"
20 #include "movinggc.h"
21 #include "recovery.h"
22 #include "reflink.h"
23 #include "replicas.h"
24 #include "subvolume.h"
25 #include "trace.h"
26 
27 #include <linux/preempt.h>
28 
29 void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
30 {
31 	memset(usage, 0, sizeof(*usage));
32 	acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage, dev_usage_u64s());
33 }
34 
35 static u64 reserve_factor(u64 r)
36 {
37 	return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
38 }
39 
40 static struct bch_fs_usage_short
41 __bch2_fs_usage_read_short(struct bch_fs *c)
42 {
43 	struct bch_fs_usage_short ret;
44 	u64 data, reserved;
45 
46 	ret.capacity = c->capacity -
47 		percpu_u64_get(&c->usage->hidden);
48 
49 	data		= percpu_u64_get(&c->usage->data) +
50 		percpu_u64_get(&c->usage->btree);
51 	reserved	= percpu_u64_get(&c->usage->reserved) +
52 		percpu_u64_get(c->online_reserved);
53 
54 	ret.used	= min(ret.capacity, data + reserve_factor(reserved));
55 	ret.free	= ret.capacity - ret.used;
56 
57 	ret.nr_inodes	= percpu_u64_get(&c->usage->nr_inodes);
58 
59 	return ret;
60 }
61 
62 struct bch_fs_usage_short
63 bch2_fs_usage_read_short(struct bch_fs *c)
64 {
65 	struct bch_fs_usage_short ret;
66 
67 	percpu_down_read(&c->mark_lock);
68 	ret = __bch2_fs_usage_read_short(c);
69 	percpu_up_read(&c->mark_lock);
70 
71 	return ret;
72 }
73 
74 void bch2_dev_usage_to_text(struct printbuf *out, struct bch_dev_usage *usage)
75 {
76 	prt_printf(out, "\tbuckets\rsectors\rfragmented\r\n");
77 
78 	for (unsigned i = 0; i < BCH_DATA_NR; i++) {
79 		bch2_prt_data_type(out, i);
80 		prt_printf(out, "\t%llu\r%llu\r%llu\r\n",
81 			usage->d[i].buckets,
82 			usage->d[i].sectors,
83 			usage->d[i].fragmented);
84 	}
85 }
86 
87 static int bch2_check_fix_ptr(struct btree_trans *trans,
88 			      struct bkey_s_c k,
89 			      struct extent_ptr_decoded p,
90 			      const union bch_extent_entry *entry,
91 			      bool *do_update)
92 {
93 	struct bch_fs *c = trans->c;
94 	struct printbuf buf = PRINTBUF;
95 	int ret = 0;
96 
97 	struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
98 	if (!ca) {
99 		if (fsck_err(trans, ptr_to_invalid_device,
100 			     "pointer to missing device %u\n"
101 			     "while marking %s",
102 			     p.ptr.dev,
103 			     (printbuf_reset(&buf),
104 			      bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
105 			*do_update = true;
106 		return 0;
107 	}
108 
109 	struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
110 	if (!g) {
111 		if (fsck_err(trans, ptr_to_invalid_device,
112 			     "pointer to invalid bucket on device %u\n"
113 			     "while marking %s",
114 			     p.ptr.dev,
115 			     (printbuf_reset(&buf),
116 			      bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
117 			*do_update = true;
118 		goto out;
119 	}
120 
121 	enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
122 
123 	if (fsck_err_on(!g->gen_valid,
124 			trans, ptr_to_missing_alloc_key,
125 			"bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
126 			"while marking %s",
127 			p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
128 			bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
129 			p.ptr.gen,
130 			(printbuf_reset(&buf),
131 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
132 		if (!p.ptr.cached) {
133 			g->gen_valid		= true;
134 			g->gen			= p.ptr.gen;
135 		} else {
136 			*do_update = true;
137 		}
138 	}
139 
140 	if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0,
141 			trans, ptr_gen_newer_than_bucket_gen,
142 			"bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
143 			"while marking %s",
144 			p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
145 			bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
146 			p.ptr.gen, g->gen,
147 			(printbuf_reset(&buf),
148 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
149 		if (!p.ptr.cached &&
150 		    (g->data_type != BCH_DATA_btree ||
151 		     data_type == BCH_DATA_btree)) {
152 			g->gen_valid		= true;
153 			g->gen			= p.ptr.gen;
154 			g->data_type		= 0;
155 			g->stripe_sectors	= 0;
156 			g->dirty_sectors	= 0;
157 			g->cached_sectors	= 0;
158 		} else {
159 			*do_update = true;
160 		}
161 	}
162 
163 	if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX,
164 			trans, ptr_gen_newer_than_bucket_gen,
165 			"bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
166 			"while marking %s",
167 			p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
168 			bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
169 			p.ptr.gen,
170 			(printbuf_reset(&buf),
171 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
172 		*do_update = true;
173 
174 	if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0,
175 			trans, stale_dirty_ptr,
176 			"bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
177 			"while marking %s",
178 			p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
179 			bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
180 			p.ptr.gen, g->gen,
181 			(printbuf_reset(&buf),
182 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
183 		*do_update = true;
184 
185 	if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen)
186 		goto out;
187 
188 	if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type),
189 			trans, ptr_bucket_data_type_mismatch,
190 			"bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
191 			"while marking %s",
192 			p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
193 			bch2_data_type_str(g->data_type),
194 			bch2_data_type_str(data_type),
195 			(printbuf_reset(&buf),
196 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
197 		if (data_type == BCH_DATA_btree) {
198 			g->gen_valid		= true;
199 			g->gen			= p.ptr.gen;
200 			g->data_type		= data_type;
201 			g->stripe_sectors	= 0;
202 			g->dirty_sectors	= 0;
203 			g->cached_sectors	= 0;
204 		} else {
205 			*do_update = true;
206 		}
207 	}
208 
209 	if (p.has_ec) {
210 		struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
211 
212 		if (fsck_err_on(!m || !m->alive,
213 				trans, ptr_to_missing_stripe,
214 				"pointer to nonexistent stripe %llu\n"
215 				"while marking %s",
216 				(u64) p.ec.idx,
217 				(printbuf_reset(&buf),
218 				 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
219 			*do_update = true;
220 
221 		if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p),
222 				trans, ptr_to_incorrect_stripe,
223 				"pointer does not match stripe %llu\n"
224 				"while marking %s",
225 				(u64) p.ec.idx,
226 				(printbuf_reset(&buf),
227 				 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
228 			*do_update = true;
229 	}
230 out:
231 fsck_err:
232 	bch2_dev_put(ca);
233 	printbuf_exit(&buf);
234 	return ret;
235 }
236 
237 int bch2_check_fix_ptrs(struct btree_trans *trans,
238 			enum btree_id btree, unsigned level, struct bkey_s_c k,
239 			enum btree_iter_update_trigger_flags flags)
240 {
241 	struct bch_fs *c = trans->c;
242 	struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(k);
243 	const union bch_extent_entry *entry_c;
244 	struct extent_ptr_decoded p = { 0 };
245 	bool do_update = false;
246 	struct printbuf buf = PRINTBUF;
247 	int ret = 0;
248 
249 	percpu_down_read(&c->mark_lock);
250 
251 	bkey_for_each_ptr_decode(k.k, ptrs_c, p, entry_c) {
252 		ret = bch2_check_fix_ptr(trans, k, p, entry_c, &do_update);
253 		if (ret)
254 			goto err;
255 	}
256 
257 	if (do_update) {
258 		if (flags & BTREE_TRIGGER_is_root) {
259 			bch_err(c, "cannot update btree roots yet");
260 			ret = -EINVAL;
261 			goto err;
262 		}
263 
264 		struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
265 		ret = PTR_ERR_OR_ZERO(new);
266 		if (ret)
267 			goto err;
268 
269 		rcu_read_lock();
270 		bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, !bch2_dev_rcu(c, ptr->dev));
271 		rcu_read_unlock();
272 
273 		if (level) {
274 			/*
275 			 * We don't want to drop btree node pointers - if the
276 			 * btree node isn't there anymore, the read path will
277 			 * sort it out:
278 			 */
279 			struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
280 			rcu_read_lock();
281 			bkey_for_each_ptr(ptrs, ptr) {
282 				struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
283 				struct bucket *g = PTR_GC_BUCKET(ca, ptr);
284 
285 				ptr->gen = g->gen;
286 			}
287 			rcu_read_unlock();
288 		} else {
289 			struct bkey_ptrs ptrs;
290 			union bch_extent_entry *entry;
291 
292 			rcu_read_lock();
293 restart_drop_ptrs:
294 			ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
295 			bkey_for_each_ptr_decode(bkey_i_to_s(new).k, ptrs, p, entry) {
296 				struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
297 				struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
298 				enum bch_data_type data_type = bch2_bkey_ptr_data_type(bkey_i_to_s_c(new), p, entry);
299 
300 				if ((p.ptr.cached &&
301 				     (!g->gen_valid || gen_cmp(p.ptr.gen, g->gen) > 0)) ||
302 				    (!p.ptr.cached &&
303 				     gen_cmp(p.ptr.gen, g->gen) < 0) ||
304 				    gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX ||
305 				    (g->data_type &&
306 				     g->data_type != data_type)) {
307 					bch2_bkey_drop_ptr(bkey_i_to_s(new), &entry->ptr);
308 					goto restart_drop_ptrs;
309 				}
310 			}
311 			rcu_read_unlock();
312 again:
313 			ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
314 			bkey_extent_entry_for_each(ptrs, entry) {
315 				if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) {
316 					struct gc_stripe *m = genradix_ptr(&c->gc_stripes,
317 									entry->stripe_ptr.idx);
318 					union bch_extent_entry *next_ptr;
319 
320 					bkey_extent_entry_for_each_from(ptrs, next_ptr, entry)
321 						if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr)
322 							goto found;
323 					next_ptr = NULL;
324 found:
325 					if (!next_ptr) {
326 						bch_err(c, "aieee, found stripe ptr with no data ptr");
327 						continue;
328 					}
329 
330 					if (!m || !m->alive ||
331 					    !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block],
332 								       &next_ptr->ptr,
333 								       m->sectors)) {
334 						bch2_bkey_extent_entry_drop(new, entry);
335 						goto again;
336 					}
337 				}
338 			}
339 		}
340 
341 		if (0) {
342 			printbuf_reset(&buf);
343 			bch2_bkey_val_to_text(&buf, c, k);
344 			bch_info(c, "updated %s", buf.buf);
345 
346 			printbuf_reset(&buf);
347 			bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new));
348 			bch_info(c, "new key %s", buf.buf);
349 		}
350 
351 		percpu_up_read(&c->mark_lock);
352 		struct btree_iter iter;
353 		bch2_trans_node_iter_init(trans, &iter, btree, new->k.p, 0, level,
354 					  BTREE_ITER_intent|BTREE_ITER_all_snapshots);
355 		ret =   bch2_btree_iter_traverse(&iter) ?:
356 			bch2_trans_update(trans, &iter, new,
357 					  BTREE_UPDATE_internal_snapshot_node|
358 					  BTREE_TRIGGER_norun);
359 		bch2_trans_iter_exit(trans, &iter);
360 		percpu_down_read(&c->mark_lock);
361 
362 		if (ret)
363 			goto err;
364 
365 		if (level)
366 			bch2_btree_node_update_key_early(trans, btree, level - 1, k, new);
367 	}
368 err:
369 	percpu_up_read(&c->mark_lock);
370 	printbuf_exit(&buf);
371 	return ret;
372 }
373 
374 int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca,
375 			   struct bkey_s_c k,
376 			   const struct bch_extent_ptr *ptr,
377 			   s64 sectors, enum bch_data_type ptr_data_type,
378 			   u8 b_gen, u8 bucket_data_type,
379 			   u32 *bucket_sectors)
380 {
381 	struct bch_fs *c = trans->c;
382 	size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
383 	struct printbuf buf = PRINTBUF;
384 	bool inserting = sectors > 0;
385 	int ret = 0;
386 
387 	BUG_ON(!sectors);
388 
389 	if (gen_after(ptr->gen, b_gen)) {
390 		bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
391 			      ptr_gen_newer_than_bucket_gen,
392 			"bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
393 			"while marking %s",
394 			ptr->dev, bucket_nr, b_gen,
395 			bch2_data_type_str(bucket_data_type ?: ptr_data_type),
396 			ptr->gen,
397 			(bch2_bkey_val_to_text(&buf, c, k), buf.buf));
398 		if (inserting)
399 			goto err;
400 		goto out;
401 	}
402 
403 	if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
404 		bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
405 			      ptr_too_stale,
406 			"bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
407 			"while marking %s",
408 			ptr->dev, bucket_nr, b_gen,
409 			bch2_data_type_str(bucket_data_type ?: ptr_data_type),
410 			ptr->gen,
411 			(printbuf_reset(&buf),
412 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
413 		if (inserting)
414 			goto err;
415 		goto out;
416 	}
417 
418 	if (b_gen != ptr->gen && ptr->cached) {
419 		ret = 1;
420 		goto out;
421 	}
422 
423 	if (b_gen != ptr->gen) {
424 		bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
425 			      stale_dirty_ptr,
426 			"bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n"
427 			"while marking %s",
428 			ptr->dev, bucket_nr, b_gen,
429 			bucket_gen_get(ca, bucket_nr),
430 			bch2_data_type_str(bucket_data_type ?: ptr_data_type),
431 			ptr->gen,
432 			(printbuf_reset(&buf),
433 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
434 		if (inserting)
435 			goto err;
436 		goto out;
437 	}
438 
439 	if (bucket_data_type_mismatch(bucket_data_type, ptr_data_type)) {
440 		bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
441 			      ptr_bucket_data_type_mismatch,
442 			"bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
443 			"while marking %s",
444 			ptr->dev, bucket_nr, b_gen,
445 			bch2_data_type_str(bucket_data_type),
446 			bch2_data_type_str(ptr_data_type),
447 			(printbuf_reset(&buf),
448 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
449 		if (inserting)
450 			goto err;
451 		goto out;
452 	}
453 
454 	if ((u64) *bucket_sectors + sectors > U32_MAX) {
455 		bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
456 			      bucket_sector_count_overflow,
457 			"bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n"
458 			"while marking %s",
459 			ptr->dev, bucket_nr, b_gen,
460 			bch2_data_type_str(bucket_data_type ?: ptr_data_type),
461 			*bucket_sectors, sectors,
462 			(printbuf_reset(&buf),
463 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
464 		if (inserting)
465 			goto err;
466 		sectors = -*bucket_sectors;
467 	}
468 
469 	*bucket_sectors += sectors;
470 out:
471 	printbuf_exit(&buf);
472 	return ret;
473 err:
474 	bch2_dump_trans_updates(trans);
475 	ret = -EIO;
476 	goto out;
477 }
478 
479 void bch2_trans_account_disk_usage_change(struct btree_trans *trans)
480 {
481 	struct bch_fs *c = trans->c;
482 	u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
483 	static int warned_disk_usage = 0;
484 	bool warn = false;
485 
486 	percpu_down_read(&c->mark_lock);
487 	struct bch_fs_usage_base *src = &trans->fs_usage_delta;
488 
489 	s64 added = src->btree + src->data + src->reserved;
490 
491 	/*
492 	 * Not allowed to reduce sectors_available except by getting a
493 	 * reservation:
494 	 */
495 	s64 should_not_have_added = added - (s64) disk_res_sectors;
496 	if (unlikely(should_not_have_added > 0)) {
497 		u64 old, new;
498 
499 		old = atomic64_read(&c->sectors_available);
500 		do {
501 			new = max_t(s64, 0, old - should_not_have_added);
502 		} while (!atomic64_try_cmpxchg(&c->sectors_available,
503 					       &old, new));
504 
505 		added -= should_not_have_added;
506 		warn = true;
507 	}
508 
509 	if (added > 0) {
510 		trans->disk_res->sectors -= added;
511 		this_cpu_sub(*c->online_reserved, added);
512 	}
513 
514 	preempt_disable();
515 	struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage);
516 	acc_u64s((u64 *) dst, (u64 *) src, sizeof(*src) / sizeof(u64));
517 	preempt_enable();
518 	percpu_up_read(&c->mark_lock);
519 
520 	if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
521 		bch2_trans_inconsistent(trans,
522 					"disk usage increased %lli more than %llu sectors reserved)",
523 					should_not_have_added, disk_res_sectors);
524 }
525 
526 /* KEY_TYPE_extent: */
527 
528 static int __mark_pointer(struct btree_trans *trans, struct bch_dev *ca,
529 			  struct bkey_s_c k,
530 			  const struct extent_ptr_decoded *p,
531 			  s64 sectors, enum bch_data_type ptr_data_type,
532 			  struct bch_alloc_v4 *a)
533 {
534 	u32 *dst_sectors = p->has_ec	? &a->stripe_sectors :
535 		!p->ptr.cached		? &a->dirty_sectors :
536 					  &a->cached_sectors;
537 	int ret = bch2_bucket_ref_update(trans, ca, k, &p->ptr, sectors, ptr_data_type,
538 					 a->gen, a->data_type, dst_sectors);
539 
540 	if (ret)
541 		return ret;
542 
543 	alloc_data_type_set(a, ptr_data_type);
544 	return 0;
545 }
546 
547 static int bch2_trigger_pointer(struct btree_trans *trans,
548 			enum btree_id btree_id, unsigned level,
549 			struct bkey_s_c k, struct extent_ptr_decoded p,
550 			const union bch_extent_entry *entry,
551 			s64 *sectors,
552 			enum btree_iter_update_trigger_flags flags)
553 {
554 	bool insert = !(flags & BTREE_TRIGGER_overwrite);
555 	struct printbuf buf = PRINTBUF;
556 	int ret = 0;
557 
558 	struct bch_fs *c = trans->c;
559 	struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
560 	if (unlikely(!ca)) {
561 		if (insert)
562 			ret = -EIO;
563 		goto err;
564 	}
565 
566 	struct bpos bucket;
567 	struct bch_backpointer bp;
568 	bch2_extent_ptr_to_bp(trans->c, ca, btree_id, level, k, p, entry, &bucket, &bp);
569 	*sectors = insert ? bp.bucket_len : -((s64) bp.bucket_len);
570 
571 	if (flags & BTREE_TRIGGER_transactional) {
572 		struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket, 0);
573 		ret = PTR_ERR_OR_ZERO(a) ?:
574 			__mark_pointer(trans, ca, k, &p, *sectors, bp.data_type, &a->v);
575 		if (ret)
576 			goto err;
577 
578 		if (!p.ptr.cached) {
579 			ret = bch2_bucket_backpointer_mod(trans, ca, bucket, bp, k, insert);
580 			if (ret)
581 				goto err;
582 		}
583 	}
584 
585 	if (flags & BTREE_TRIGGER_gc) {
586 		percpu_down_read(&c->mark_lock);
587 		struct bucket *g = gc_bucket(ca, bucket.offset);
588 		if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n  %s",
589 					    p.ptr.dev,
590 					    (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
591 			ret = -EIO;
592 			goto err_unlock;
593 		}
594 
595 		bucket_lock(g);
596 		struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
597 		ret = __mark_pointer(trans, ca, k, &p, *sectors, bp.data_type, &new);
598 		alloc_to_bucket(g, new);
599 		bucket_unlock(g);
600 err_unlock:
601 		percpu_up_read(&c->mark_lock);
602 
603 		if (!ret)
604 			ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags);
605 	}
606 err:
607 	bch2_dev_put(ca);
608 	printbuf_exit(&buf);
609 	return ret;
610 }
611 
612 static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
613 				struct bkey_s_c k,
614 				struct extent_ptr_decoded p,
615 				enum bch_data_type data_type,
616 				s64 sectors,
617 				enum btree_iter_update_trigger_flags flags)
618 {
619 	if (flags & BTREE_TRIGGER_transactional) {
620 		struct btree_iter iter;
621 		struct bkey_i_stripe *s = bch2_bkey_get_mut_typed(trans, &iter,
622 				BTREE_ID_stripes, POS(0, p.ec.idx),
623 				BTREE_ITER_with_updates, stripe);
624 		int ret = PTR_ERR_OR_ZERO(s);
625 		if (unlikely(ret)) {
626 			bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans,
627 				"pointer to nonexistent stripe %llu",
628 				(u64) p.ec.idx);
629 			goto err;
630 		}
631 
632 		if (!bch2_ptr_matches_stripe(&s->v, p)) {
633 			bch2_trans_inconsistent(trans,
634 				"stripe pointer doesn't match stripe %llu",
635 				(u64) p.ec.idx);
636 			ret = -EIO;
637 			goto err;
638 		}
639 
640 		stripe_blockcount_set(&s->v, p.ec.block,
641 			stripe_blockcount_get(&s->v, p.ec.block) +
642 			sectors);
643 
644 		struct disk_accounting_pos acc = {
645 			.type = BCH_DISK_ACCOUNTING_replicas,
646 		};
647 		bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i));
648 		acc.replicas.data_type = data_type;
649 		ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, false);
650 err:
651 		bch2_trans_iter_exit(trans, &iter);
652 		return ret;
653 	}
654 
655 	if (flags & BTREE_TRIGGER_gc) {
656 		struct bch_fs *c = trans->c;
657 
658 		struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.ec.idx, GFP_KERNEL);
659 		if (!m) {
660 			bch_err(c, "error allocating memory for gc_stripes, idx %llu",
661 				(u64) p.ec.idx);
662 			return -BCH_ERR_ENOMEM_mark_stripe_ptr;
663 		}
664 
665 		mutex_lock(&c->ec_stripes_heap_lock);
666 
667 		if (!m || !m->alive) {
668 			mutex_unlock(&c->ec_stripes_heap_lock);
669 			struct printbuf buf = PRINTBUF;
670 			bch2_bkey_val_to_text(&buf, c, k);
671 			bch_err_ratelimited(c, "pointer to nonexistent stripe %llu\n  while marking %s",
672 					    (u64) p.ec.idx, buf.buf);
673 			printbuf_exit(&buf);
674 			bch2_inconsistent_error(c);
675 			return -EIO;
676 		}
677 
678 		m->block_sectors[p.ec.block] += sectors;
679 
680 		struct disk_accounting_pos acc = {
681 			.type = BCH_DISK_ACCOUNTING_replicas,
682 		};
683 		memcpy(&acc.replicas, &m->r.e, replicas_entry_bytes(&m->r.e));
684 		mutex_unlock(&c->ec_stripes_heap_lock);
685 
686 		acc.replicas.data_type = data_type;
687 		int ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, true);
688 		if (ret)
689 			return ret;
690 	}
691 
692 	return 0;
693 }
694 
695 static int __trigger_extent(struct btree_trans *trans,
696 			    enum btree_id btree_id, unsigned level,
697 			    struct bkey_s_c k,
698 			    enum btree_iter_update_trigger_flags flags)
699 {
700 	bool gc = flags & BTREE_TRIGGER_gc;
701 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
702 	const union bch_extent_entry *entry;
703 	struct extent_ptr_decoded p;
704 	enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
705 		? BCH_DATA_btree
706 		: BCH_DATA_user;
707 	s64 replicas_sectors = 0;
708 	int ret = 0;
709 
710 	struct disk_accounting_pos acc_replicas_key = {
711 		.type			= BCH_DISK_ACCOUNTING_replicas,
712 		.replicas.data_type	= data_type,
713 		.replicas.nr_devs	= 0,
714 		.replicas.nr_required	= 1,
715 	};
716 
717 	struct disk_accounting_pos acct_compression_key = {
718 		.type			= BCH_DISK_ACCOUNTING_compression,
719 	};
720 	u64 compression_acct[3] = { 1, 0, 0 };
721 
722 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
723 		s64 disk_sectors = 0;
724 		ret = bch2_trigger_pointer(trans, btree_id, level, k, p, entry, &disk_sectors, flags);
725 		if (ret < 0)
726 			return ret;
727 
728 		bool stale = ret > 0;
729 
730 		if (p.ptr.cached && stale)
731 			continue;
732 
733 		if (p.ptr.cached) {
734 			ret = bch2_mod_dev_cached_sectors(trans, p.ptr.dev, disk_sectors, gc);
735 			if (ret)
736 				return ret;
737 		} else if (!p.has_ec) {
738 			replicas_sectors       += disk_sectors;
739 			acc_replicas_key.replicas.devs[acc_replicas_key.replicas.nr_devs++] = p.ptr.dev;
740 		} else {
741 			ret = bch2_trigger_stripe_ptr(trans, k, p, data_type, disk_sectors, flags);
742 			if (ret)
743 				return ret;
744 
745 			/*
746 			 * There may be other dirty pointers in this extent, but
747 			 * if so they're not required for mounting if we have an
748 			 * erasure coded pointer in this extent:
749 			 */
750 			acc_replicas_key.replicas.nr_required = 0;
751 		}
752 
753 		if (acct_compression_key.compression.type &&
754 		    acct_compression_key.compression.type != p.crc.compression_type) {
755 			if (flags & BTREE_TRIGGER_overwrite)
756 				bch2_u64s_neg(compression_acct, ARRAY_SIZE(compression_acct));
757 
758 			ret = bch2_disk_accounting_mod(trans, &acct_compression_key, compression_acct,
759 						       ARRAY_SIZE(compression_acct), gc);
760 			if (ret)
761 				return ret;
762 
763 			compression_acct[0] = 1;
764 			compression_acct[1] = 0;
765 			compression_acct[2] = 0;
766 		}
767 
768 		acct_compression_key.compression.type = p.crc.compression_type;
769 		if (p.crc.compression_type) {
770 			compression_acct[1] += p.crc.uncompressed_size;
771 			compression_acct[2] += p.crc.compressed_size;
772 		}
773 	}
774 
775 	if (acc_replicas_key.replicas.nr_devs) {
776 		ret = bch2_disk_accounting_mod(trans, &acc_replicas_key, &replicas_sectors, 1, gc);
777 		if (ret)
778 			return ret;
779 	}
780 
781 	if (acc_replicas_key.replicas.nr_devs && !level && k.k->p.snapshot) {
782 		struct disk_accounting_pos acc_snapshot_key = {
783 			.type			= BCH_DISK_ACCOUNTING_snapshot,
784 			.snapshot.id		= k.k->p.snapshot,
785 		};
786 		ret = bch2_disk_accounting_mod(trans, &acc_snapshot_key, &replicas_sectors, 1, gc);
787 		if (ret)
788 			return ret;
789 	}
790 
791 	if (acct_compression_key.compression.type) {
792 		if (flags & BTREE_TRIGGER_overwrite)
793 			bch2_u64s_neg(compression_acct, ARRAY_SIZE(compression_acct));
794 
795 		ret = bch2_disk_accounting_mod(trans, &acct_compression_key, compression_acct,
796 					       ARRAY_SIZE(compression_acct), gc);
797 		if (ret)
798 			return ret;
799 	}
800 
801 	if (level) {
802 		struct disk_accounting_pos acc_btree_key = {
803 			.type		= BCH_DISK_ACCOUNTING_btree,
804 			.btree.id	= btree_id,
805 		};
806 		ret = bch2_disk_accounting_mod(trans, &acc_btree_key, &replicas_sectors, 1, gc);
807 		if (ret)
808 			return ret;
809 	}
810 
811 	if (bch2_bkey_rebalance_opts(k)) {
812 		struct disk_accounting_pos acc = {
813 			.type		= BCH_DISK_ACCOUNTING_rebalance_work,
814 		};
815 		ret = bch2_disk_accounting_mod(trans, &acc, &replicas_sectors, 1, gc);
816 		if (ret)
817 			return ret;
818 	}
819 
820 	return 0;
821 }
822 
823 int bch2_trigger_extent(struct btree_trans *trans,
824 			enum btree_id btree, unsigned level,
825 			struct bkey_s_c old, struct bkey_s new,
826 			enum btree_iter_update_trigger_flags flags)
827 {
828 	struct bkey_ptrs_c new_ptrs = bch2_bkey_ptrs_c(new.s_c);
829 	struct bkey_ptrs_c old_ptrs = bch2_bkey_ptrs_c(old);
830 	unsigned new_ptrs_bytes = (void *) new_ptrs.end - (void *) new_ptrs.start;
831 	unsigned old_ptrs_bytes = (void *) old_ptrs.end - (void *) old_ptrs.start;
832 
833 	if (unlikely(flags & BTREE_TRIGGER_check_repair))
834 		return bch2_check_fix_ptrs(trans, btree, level, new.s_c, flags);
835 
836 	/* if pointers aren't changing - nothing to do: */
837 	if (new_ptrs_bytes == old_ptrs_bytes &&
838 	    !memcmp(new_ptrs.start,
839 		    old_ptrs.start,
840 		    new_ptrs_bytes))
841 		return 0;
842 
843 	if (flags & BTREE_TRIGGER_transactional) {
844 		struct bch_fs *c = trans->c;
845 		int mod = (int) bch2_bkey_needs_rebalance(c, new.s_c) -
846 			  (int) bch2_bkey_needs_rebalance(c, old);
847 
848 		if (mod) {
849 			int ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_rebalance_work,
850 							      new.k->p, mod > 0);
851 			if (ret)
852 				return ret;
853 		}
854 	}
855 
856 	if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc))
857 		return trigger_run_overwrite_then_insert(__trigger_extent, trans, btree, level, old, new, flags);
858 
859 	return 0;
860 }
861 
862 /* KEY_TYPE_reservation */
863 
864 static int __trigger_reservation(struct btree_trans *trans,
865 			enum btree_id btree_id, unsigned level, struct bkey_s_c k,
866 			enum btree_iter_update_trigger_flags flags)
867 {
868 	if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) {
869 		s64 sectors = k.k->size;
870 
871 		if (flags & BTREE_TRIGGER_overwrite)
872 			sectors = -sectors;
873 
874 		struct disk_accounting_pos acc = {
875 			.type = BCH_DISK_ACCOUNTING_persistent_reserved,
876 			.persistent_reserved.nr_replicas = bkey_s_c_to_reservation(k).v->nr_replicas,
877 		};
878 
879 		return bch2_disk_accounting_mod(trans, &acc, &sectors, 1, flags & BTREE_TRIGGER_gc);
880 	}
881 
882 	return 0;
883 }
884 
885 int bch2_trigger_reservation(struct btree_trans *trans,
886 			  enum btree_id btree_id, unsigned level,
887 			  struct bkey_s_c old, struct bkey_s new,
888 			  enum btree_iter_update_trigger_flags flags)
889 {
890 	return trigger_run_overwrite_then_insert(__trigger_reservation, trans, btree_id, level, old, new, flags);
891 }
892 
893 /* Mark superblocks: */
894 
895 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
896 				    struct bch_dev *ca, u64 b,
897 				    enum bch_data_type type,
898 				    unsigned sectors)
899 {
900 	struct bch_fs *c = trans->c;
901 	struct btree_iter iter;
902 	int ret = 0;
903 
904 	struct bkey_i_alloc_v4 *a =
905 		bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(ca->dev_idx, b));
906 	if (IS_ERR(a))
907 		return PTR_ERR(a);
908 
909 	if (a->v.data_type && type && a->v.data_type != type) {
910 		bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
911 			      bucket_metadata_type_mismatch,
912 			"bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
913 			"while marking %s",
914 			iter.pos.inode, iter.pos.offset, a->v.gen,
915 			bch2_data_type_str(a->v.data_type),
916 			bch2_data_type_str(type),
917 			bch2_data_type_str(type));
918 		ret = -EIO;
919 		goto err;
920 	}
921 
922 	if (a->v.data_type	!= type ||
923 	    a->v.dirty_sectors	!= sectors) {
924 		a->v.data_type		= type;
925 		a->v.dirty_sectors	= sectors;
926 		ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
927 	}
928 err:
929 	bch2_trans_iter_exit(trans, &iter);
930 	return ret;
931 }
932 
933 static int bch2_mark_metadata_bucket(struct btree_trans *trans, struct bch_dev *ca,
934 			u64 b, enum bch_data_type data_type, unsigned sectors,
935 			enum btree_iter_update_trigger_flags flags)
936 {
937 	struct bch_fs *c = trans->c;
938 	int ret = 0;
939 
940 	percpu_down_read(&c->mark_lock);
941 	struct bucket *g = gc_bucket(ca, b);
942 	if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u when marking metadata type %s",
943 				    ca->dev_idx, bch2_data_type_str(data_type)))
944 		goto err_unlock;
945 
946 	bucket_lock(g);
947 	struct bch_alloc_v4 old = bucket_m_to_alloc(*g);
948 
949 	if (bch2_fs_inconsistent_on(g->data_type &&
950 			g->data_type != data_type, c,
951 			"different types of data in same bucket: %s, %s",
952 			bch2_data_type_str(g->data_type),
953 			bch2_data_type_str(data_type)))
954 		goto err;
955 
956 	if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
957 			"bucket %u:%llu gen %u data type %s sector count overflow: %u + %u > bucket size",
958 			ca->dev_idx, b, g->gen,
959 			bch2_data_type_str(g->data_type ?: data_type),
960 			g->dirty_sectors, sectors))
961 		goto err;
962 
963 	g->data_type = data_type;
964 	g->dirty_sectors += sectors;
965 	struct bch_alloc_v4 new = bucket_m_to_alloc(*g);
966 	bucket_unlock(g);
967 	percpu_up_read(&c->mark_lock);
968 	ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags);
969 	return ret;
970 err:
971 	bucket_unlock(g);
972 err_unlock:
973 	percpu_up_read(&c->mark_lock);
974 	return -EIO;
975 }
976 
977 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
978 			struct bch_dev *ca, u64 b,
979 			enum bch_data_type type, unsigned sectors,
980 			enum btree_iter_update_trigger_flags flags)
981 {
982 	BUG_ON(type != BCH_DATA_free &&
983 	       type != BCH_DATA_sb &&
984 	       type != BCH_DATA_journal);
985 
986 	/*
987 	 * Backup superblock might be past the end of our normal usable space:
988 	 */
989 	if (b >= ca->mi.nbuckets)
990 		return 0;
991 
992 	if (flags & BTREE_TRIGGER_gc)
993 		return bch2_mark_metadata_bucket(trans, ca, b, type, sectors, flags);
994 	else if (flags & BTREE_TRIGGER_transactional)
995 		return commit_do(trans, NULL, NULL, 0,
996 				 __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
997 	else
998 		BUG();
999 }
1000 
1001 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
1002 			struct bch_dev *ca, u64 start, u64 end,
1003 			enum bch_data_type type, u64 *bucket, unsigned *bucket_sectors,
1004 			enum btree_iter_update_trigger_flags flags)
1005 {
1006 	do {
1007 		u64 b = sector_to_bucket(ca, start);
1008 		unsigned sectors =
1009 			min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
1010 
1011 		if (b != *bucket && *bucket_sectors) {
1012 			int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
1013 							type, *bucket_sectors, flags);
1014 			if (ret)
1015 				return ret;
1016 
1017 			*bucket_sectors = 0;
1018 		}
1019 
1020 		*bucket		= b;
1021 		*bucket_sectors	+= sectors;
1022 		start += sectors;
1023 	} while (start < end);
1024 
1025 	return 0;
1026 }
1027 
1028 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, struct bch_dev *ca,
1029 			enum btree_iter_update_trigger_flags flags)
1030 {
1031 	struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
1032 	u64 bucket = 0;
1033 	unsigned i, bucket_sectors = 0;
1034 	int ret;
1035 
1036 	for (i = 0; i < layout->nr_superblocks; i++) {
1037 		u64 offset = le64_to_cpu(layout->sb_offset[i]);
1038 
1039 		if (offset == BCH_SB_SECTOR) {
1040 			ret = bch2_trans_mark_metadata_sectors(trans, ca,
1041 						0, BCH_SB_SECTOR,
1042 						BCH_DATA_sb, &bucket, &bucket_sectors, flags);
1043 			if (ret)
1044 				return ret;
1045 		}
1046 
1047 		ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
1048 				      offset + (1 << layout->sb_max_size_bits),
1049 				      BCH_DATA_sb, &bucket, &bucket_sectors, flags);
1050 		if (ret)
1051 			return ret;
1052 	}
1053 
1054 	if (bucket_sectors) {
1055 		ret = bch2_trans_mark_metadata_bucket(trans, ca,
1056 				bucket, BCH_DATA_sb, bucket_sectors, flags);
1057 		if (ret)
1058 			return ret;
1059 	}
1060 
1061 	for (i = 0; i < ca->journal.nr; i++) {
1062 		ret = bch2_trans_mark_metadata_bucket(trans, ca,
1063 				ca->journal.buckets[i],
1064 				BCH_DATA_journal, ca->mi.bucket_size, flags);
1065 		if (ret)
1066 			return ret;
1067 	}
1068 
1069 	return 0;
1070 }
1071 
1072 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca,
1073 			enum btree_iter_update_trigger_flags flags)
1074 {
1075 	int ret = bch2_trans_run(c,
1076 		__bch2_trans_mark_dev_sb(trans, ca, flags));
1077 	bch_err_fn(c, ret);
1078 	return ret;
1079 }
1080 
1081 int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c,
1082 			enum btree_iter_update_trigger_flags flags)
1083 {
1084 	for_each_online_member(c, ca) {
1085 		int ret = bch2_trans_mark_dev_sb(c, ca, flags);
1086 		if (ret) {
1087 			percpu_ref_put(&ca->io_ref);
1088 			return ret;
1089 		}
1090 	}
1091 
1092 	return 0;
1093 }
1094 
1095 int bch2_trans_mark_dev_sbs(struct bch_fs *c)
1096 {
1097 	return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_transactional);
1098 }
1099 
1100 /* Disk reservations: */
1101 
1102 #define SECTORS_CACHE	1024
1103 
1104 int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1105 			      u64 sectors, int flags)
1106 {
1107 	struct bch_fs_pcpu *pcpu;
1108 	u64 old, get;
1109 	s64 sectors_available;
1110 	int ret;
1111 
1112 	percpu_down_read(&c->mark_lock);
1113 	preempt_disable();
1114 	pcpu = this_cpu_ptr(c->pcpu);
1115 
1116 	if (sectors <= pcpu->sectors_available)
1117 		goto out;
1118 
1119 	old = atomic64_read(&c->sectors_available);
1120 	do {
1121 		get = min((u64) sectors + SECTORS_CACHE, old);
1122 
1123 		if (get < sectors) {
1124 			preempt_enable();
1125 			goto recalculate;
1126 		}
1127 	} while (!atomic64_try_cmpxchg(&c->sectors_available,
1128 				       &old, old - get));
1129 
1130 	pcpu->sectors_available		+= get;
1131 
1132 out:
1133 	pcpu->sectors_available		-= sectors;
1134 	this_cpu_add(*c->online_reserved, sectors);
1135 	res->sectors			+= sectors;
1136 
1137 	preempt_enable();
1138 	percpu_up_read(&c->mark_lock);
1139 	return 0;
1140 
1141 recalculate:
1142 	mutex_lock(&c->sectors_available_lock);
1143 
1144 	percpu_u64_set(&c->pcpu->sectors_available, 0);
1145 	sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
1146 
1147 	if (sectors <= sectors_available ||
1148 	    (flags & BCH_DISK_RESERVATION_NOFAIL)) {
1149 		atomic64_set(&c->sectors_available,
1150 			     max_t(s64, 0, sectors_available - sectors));
1151 		this_cpu_add(*c->online_reserved, sectors);
1152 		res->sectors			+= sectors;
1153 		ret = 0;
1154 	} else {
1155 		atomic64_set(&c->sectors_available, sectors_available);
1156 		ret = -BCH_ERR_ENOSPC_disk_reservation;
1157 	}
1158 
1159 	mutex_unlock(&c->sectors_available_lock);
1160 	percpu_up_read(&c->mark_lock);
1161 
1162 	return ret;
1163 }
1164 
1165 /* Startup/shutdown: */
1166 
1167 void bch2_buckets_nouse_free(struct bch_fs *c)
1168 {
1169 	for_each_member_device(c, ca) {
1170 		kvfree_rcu_mightsleep(ca->buckets_nouse);
1171 		ca->buckets_nouse = NULL;
1172 	}
1173 }
1174 
1175 int bch2_buckets_nouse_alloc(struct bch_fs *c)
1176 {
1177 	for_each_member_device(c, ca) {
1178 		BUG_ON(ca->buckets_nouse);
1179 
1180 		ca->buckets_nouse = kvmalloc(BITS_TO_LONGS(ca->mi.nbuckets) *
1181 					    sizeof(unsigned long),
1182 					    GFP_KERNEL|__GFP_ZERO);
1183 		if (!ca->buckets_nouse) {
1184 			bch2_dev_put(ca);
1185 			return -BCH_ERR_ENOMEM_buckets_nouse;
1186 		}
1187 	}
1188 
1189 	return 0;
1190 }
1191 
1192 static void bucket_gens_free_rcu(struct rcu_head *rcu)
1193 {
1194 	struct bucket_gens *buckets =
1195 		container_of(rcu, struct bucket_gens, rcu);
1196 
1197 	kvfree(buckets);
1198 }
1199 
1200 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1201 {
1202 	struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
1203 	bool resize = ca->bucket_gens != NULL;
1204 	int ret;
1205 
1206 	BUG_ON(resize && ca->buckets_nouse);
1207 
1208 	if (!(bucket_gens	= kvmalloc(sizeof(struct bucket_gens) + nbuckets,
1209 					   GFP_KERNEL|__GFP_ZERO))) {
1210 		ret = -BCH_ERR_ENOMEM_bucket_gens;
1211 		goto err;
1212 	}
1213 
1214 	bucket_gens->first_bucket = ca->mi.first_bucket;
1215 	bucket_gens->nbuckets	= nbuckets;
1216 	bucket_gens->nbuckets_minus_first =
1217 		bucket_gens->nbuckets - bucket_gens->first_bucket;
1218 
1219 	if (resize) {
1220 		down_write(&ca->bucket_lock);
1221 		percpu_down_write(&c->mark_lock);
1222 	}
1223 
1224 	old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
1225 
1226 	if (resize) {
1227 		size_t n = min(bucket_gens->nbuckets, old_bucket_gens->nbuckets);
1228 
1229 		memcpy(bucket_gens->b,
1230 		       old_bucket_gens->b,
1231 		       n);
1232 	}
1233 
1234 	rcu_assign_pointer(ca->bucket_gens, bucket_gens);
1235 	bucket_gens	= old_bucket_gens;
1236 
1237 	nbuckets = ca->mi.nbuckets;
1238 
1239 	if (resize) {
1240 		percpu_up_write(&c->mark_lock);
1241 		up_write(&ca->bucket_lock);
1242 	}
1243 
1244 	ret = 0;
1245 err:
1246 	if (bucket_gens)
1247 		call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
1248 
1249 	return ret;
1250 }
1251 
1252 void bch2_dev_buckets_free(struct bch_dev *ca)
1253 {
1254 	kvfree(ca->buckets_nouse);
1255 	kvfree(rcu_dereference_protected(ca->bucket_gens, 1));
1256 	free_percpu(ca->usage);
1257 }
1258 
1259 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
1260 {
1261 	ca->usage = alloc_percpu(struct bch_dev_usage);
1262 	if (!ca->usage)
1263 		return -BCH_ERR_ENOMEM_usage_init;
1264 
1265 	return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);
1266 }
1267