1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2012 Google, Inc.
4 *
5 * Foreground allocator code: allocate buckets from freelist, and allocate in
6 * sector granularity from writepoints.
7 *
8 * bch2_bucket_alloc() allocates a single bucket from a specific device.
9 *
10 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11 * in a given filesystem.
12 */
13
14 #include "bcachefs.h"
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
17 #include "backpointers.h"
18 #include "btree_iter.h"
19 #include "btree_update.h"
20 #include "btree_gc.h"
21 #include "buckets.h"
22 #include "buckets_waiting_for_journal.h"
23 #include "clock.h"
24 #include "debug.h"
25 #include "disk_groups.h"
26 #include "ec.h"
27 #include "error.h"
28 #include "io_write.h"
29 #include "journal.h"
30 #include "movinggc.h"
31 #include "nocow_locking.h"
32 #include "trace.h"
33
34 #include <linux/math64.h>
35 #include <linux/rculist.h>
36 #include <linux/rcupdate.h>
37
bch2_trans_mutex_lock_norelock(struct btree_trans * trans,struct mutex * lock)38 static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
39 struct mutex *lock)
40 {
41 if (!mutex_trylock(lock)) {
42 bch2_trans_unlock(trans);
43 mutex_lock(lock);
44 }
45 }
46
47 const char * const bch2_watermarks[] = {
48 #define x(t) #t,
49 BCH_WATERMARKS()
50 #undef x
51 NULL
52 };
53
54 /*
55 * Open buckets represent a bucket that's currently being allocated from. They
56 * serve two purposes:
57 *
58 * - They track buckets that have been partially allocated, allowing for
59 * sub-bucket sized allocations - they're used by the sector allocator below
60 *
61 * - They provide a reference to the buckets they own that mark and sweep GC
62 * can find, until the new allocation has a pointer to it inserted into the
63 * btree
64 *
65 * When allocating some space with the sector allocator, the allocation comes
66 * with a reference to an open bucket - the caller is required to put that
67 * reference _after_ doing the index update that makes its allocation reachable.
68 */
69
bch2_reset_alloc_cursors(struct bch_fs * c)70 void bch2_reset_alloc_cursors(struct bch_fs *c)
71 {
72 rcu_read_lock();
73 for_each_member_device_rcu(c, ca, NULL)
74 memset(ca->alloc_cursor, 0, sizeof(ca->alloc_cursor));
75 rcu_read_unlock();
76 }
77
bch2_open_bucket_hash_add(struct bch_fs * c,struct open_bucket * ob)78 static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
79 {
80 open_bucket_idx_t idx = ob - c->open_buckets;
81 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
82
83 ob->hash = *slot;
84 *slot = idx;
85 }
86
bch2_open_bucket_hash_remove(struct bch_fs * c,struct open_bucket * ob)87 static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
88 {
89 open_bucket_idx_t idx = ob - c->open_buckets;
90 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
91
92 while (*slot != idx) {
93 BUG_ON(!*slot);
94 slot = &c->open_buckets[*slot].hash;
95 }
96
97 *slot = ob->hash;
98 ob->hash = 0;
99 }
100
__bch2_open_bucket_put(struct bch_fs * c,struct open_bucket * ob)101 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
102 {
103 struct bch_dev *ca = ob_dev(c, ob);
104
105 if (ob->ec) {
106 ec_stripe_new_put(c, ob->ec, STRIPE_REF_io);
107 return;
108 }
109
110 percpu_down_read(&c->mark_lock);
111 spin_lock(&ob->lock);
112
113 ob->valid = false;
114 ob->data_type = 0;
115
116 spin_unlock(&ob->lock);
117 percpu_up_read(&c->mark_lock);
118
119 spin_lock(&c->freelist_lock);
120 bch2_open_bucket_hash_remove(c, ob);
121
122 ob->freelist = c->open_buckets_freelist;
123 c->open_buckets_freelist = ob - c->open_buckets;
124
125 c->open_buckets_nr_free++;
126 ca->nr_open_buckets--;
127 spin_unlock(&c->freelist_lock);
128
129 closure_wake_up(&c->open_buckets_wait);
130 }
131
bch2_open_bucket_write_error(struct bch_fs * c,struct open_buckets * obs,unsigned dev)132 void bch2_open_bucket_write_error(struct bch_fs *c,
133 struct open_buckets *obs,
134 unsigned dev)
135 {
136 struct open_bucket *ob;
137 unsigned i;
138
139 open_bucket_for_each(c, obs, ob, i)
140 if (ob->dev == dev && ob->ec)
141 bch2_ec_bucket_cancel(c, ob);
142 }
143
bch2_open_bucket_alloc(struct bch_fs * c)144 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
145 {
146 struct open_bucket *ob;
147
148 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
149
150 ob = c->open_buckets + c->open_buckets_freelist;
151 c->open_buckets_freelist = ob->freelist;
152 atomic_set(&ob->pin, 1);
153 ob->data_type = 0;
154
155 c->open_buckets_nr_free--;
156 return ob;
157 }
158
open_bucket_free_unused(struct bch_fs * c,struct open_bucket * ob)159 static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob)
160 {
161 BUG_ON(c->open_buckets_partial_nr >=
162 ARRAY_SIZE(c->open_buckets_partial));
163
164 spin_lock(&c->freelist_lock);
165 ob->on_partial_list = true;
166 c->open_buckets_partial[c->open_buckets_partial_nr++] =
167 ob - c->open_buckets;
168 spin_unlock(&c->freelist_lock);
169
170 closure_wake_up(&c->open_buckets_wait);
171 closure_wake_up(&c->freelist_wait);
172 }
173
174 /* _only_ for allocating the journal on a new device: */
bch2_bucket_alloc_new_fs(struct bch_dev * ca)175 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
176 {
177 while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
178 u64 b = ca->new_fs_bucket_idx++;
179
180 if (!is_superblock_bucket(ca, b) &&
181 (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
182 return b;
183 }
184
185 return -1;
186 }
187
open_buckets_reserved(enum bch_watermark watermark)188 static inline unsigned open_buckets_reserved(enum bch_watermark watermark)
189 {
190 switch (watermark) {
191 case BCH_WATERMARK_interior_updates:
192 return 0;
193 case BCH_WATERMARK_reclaim:
194 return OPEN_BUCKETS_COUNT / 6;
195 case BCH_WATERMARK_btree:
196 case BCH_WATERMARK_btree_copygc:
197 return OPEN_BUCKETS_COUNT / 4;
198 case BCH_WATERMARK_copygc:
199 return OPEN_BUCKETS_COUNT / 3;
200 default:
201 return OPEN_BUCKETS_COUNT / 2;
202 }
203 }
204
__try_alloc_bucket(struct bch_fs * c,struct bch_dev * ca,u64 bucket,enum bch_watermark watermark,const struct bch_alloc_v4 * a,struct bucket_alloc_state * s,struct closure * cl)205 static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
206 u64 bucket,
207 enum bch_watermark watermark,
208 const struct bch_alloc_v4 *a,
209 struct bucket_alloc_state *s,
210 struct closure *cl)
211 {
212 struct open_bucket *ob;
213
214 if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
215 s->skipped_nouse++;
216 return NULL;
217 }
218
219 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
220 s->skipped_open++;
221 return NULL;
222 }
223
224 if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
225 c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
226 s->skipped_need_journal_commit++;
227 return NULL;
228 }
229
230 if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) {
231 s->skipped_nocow++;
232 return NULL;
233 }
234
235 spin_lock(&c->freelist_lock);
236
237 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(watermark))) {
238 if (cl)
239 closure_wait(&c->open_buckets_wait, cl);
240
241 track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], true);
242 spin_unlock(&c->freelist_lock);
243 return ERR_PTR(-BCH_ERR_open_buckets_empty);
244 }
245
246 /* Recheck under lock: */
247 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
248 spin_unlock(&c->freelist_lock);
249 s->skipped_open++;
250 return NULL;
251 }
252
253 ob = bch2_open_bucket_alloc(c);
254
255 spin_lock(&ob->lock);
256
257 ob->valid = true;
258 ob->sectors_free = ca->mi.bucket_size;
259 ob->dev = ca->dev_idx;
260 ob->gen = a->gen;
261 ob->bucket = bucket;
262 spin_unlock(&ob->lock);
263
264 ca->nr_open_buckets++;
265 bch2_open_bucket_hash_add(c, ob);
266
267 track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], false);
268 track_event_change(&c->times[BCH_TIME_blocked_allocate], false);
269
270 spin_unlock(&c->freelist_lock);
271 return ob;
272 }
273
try_alloc_bucket(struct btree_trans * trans,struct bch_dev * ca,enum bch_watermark watermark,u64 free_entry,struct bucket_alloc_state * s,struct bkey_s_c freespace_k,struct closure * cl)274 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
275 enum bch_watermark watermark, u64 free_entry,
276 struct bucket_alloc_state *s,
277 struct bkey_s_c freespace_k,
278 struct closure *cl)
279 {
280 struct bch_fs *c = trans->c;
281 struct btree_iter iter = { NULL };
282 struct bkey_s_c k;
283 struct open_bucket *ob;
284 struct bch_alloc_v4 a_convert;
285 const struct bch_alloc_v4 *a;
286 u64 b = free_entry & ~(~0ULL << 56);
287 unsigned genbits = free_entry >> 56;
288 struct printbuf buf = PRINTBUF;
289 int ret;
290
291 if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) {
292 prt_printf(&buf, "freespace btree has bucket outside allowed range %u-%llu\n"
293 " freespace key ",
294 ca->mi.first_bucket, ca->mi.nbuckets);
295 bch2_bkey_val_to_text(&buf, c, freespace_k);
296 bch2_trans_inconsistent(trans, "%s", buf.buf);
297 ob = ERR_PTR(-EIO);
298 goto err;
299 }
300
301 k = bch2_bkey_get_iter(trans, &iter,
302 BTREE_ID_alloc, POS(ca->dev_idx, b),
303 BTREE_ITER_cached);
304 ret = bkey_err(k);
305 if (ret) {
306 ob = ERR_PTR(ret);
307 goto err;
308 }
309
310 a = bch2_alloc_to_v4(k, &a_convert);
311
312 if (a->data_type != BCH_DATA_free) {
313 if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
314 ob = NULL;
315 goto err;
316 }
317
318 prt_printf(&buf, "non free bucket in freespace btree\n"
319 " freespace key ");
320 bch2_bkey_val_to_text(&buf, c, freespace_k);
321 prt_printf(&buf, "\n ");
322 bch2_bkey_val_to_text(&buf, c, k);
323 bch2_trans_inconsistent(trans, "%s", buf.buf);
324 ob = ERR_PTR(-EIO);
325 goto err;
326 }
327
328 if (genbits != (alloc_freespace_genbits(*a) >> 56) &&
329 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
330 prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
331 " freespace key ",
332 genbits, alloc_freespace_genbits(*a) >> 56);
333 bch2_bkey_val_to_text(&buf, c, freespace_k);
334 prt_printf(&buf, "\n ");
335 bch2_bkey_val_to_text(&buf, c, k);
336 bch2_trans_inconsistent(trans, "%s", buf.buf);
337 ob = ERR_PTR(-EIO);
338 goto err;
339 }
340
341 if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_extents_to_backpointers) {
342 struct bch_backpointer bp;
343 struct bpos bp_pos = POS_MIN;
344
345 ret = bch2_get_next_backpointer(trans, ca, POS(ca->dev_idx, b), -1,
346 &bp_pos, &bp,
347 BTREE_ITER_nopreserve);
348 if (ret) {
349 ob = ERR_PTR(ret);
350 goto err;
351 }
352
353 if (!bkey_eq(bp_pos, POS_MAX)) {
354 /*
355 * Bucket may have data in it - we don't call
356 * bc2h_trans_inconnsistent() because fsck hasn't
357 * finished yet
358 */
359 ob = NULL;
360 goto err;
361 }
362 }
363
364 ob = __try_alloc_bucket(c, ca, b, watermark, a, s, cl);
365 if (!ob)
366 bch2_set_btree_iter_dontneed(&iter);
367 err:
368 if (iter.path)
369 bch2_set_btree_iter_dontneed(&iter);
370 bch2_trans_iter_exit(trans, &iter);
371 printbuf_exit(&buf);
372 return ob;
373 }
374
375 /*
376 * This path is for before the freespace btree is initialized:
377 *
378 * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
379 * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
380 */
381 static noinline struct open_bucket *
bch2_bucket_alloc_early(struct btree_trans * trans,struct bch_dev * ca,enum bch_watermark watermark,struct bucket_alloc_state * s,struct closure * cl)382 bch2_bucket_alloc_early(struct btree_trans *trans,
383 struct bch_dev *ca,
384 enum bch_watermark watermark,
385 struct bucket_alloc_state *s,
386 struct closure *cl)
387 {
388 struct btree_iter iter, citer;
389 struct bkey_s_c k, ck;
390 struct open_bucket *ob = NULL;
391 u64 first_bucket = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
392 u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
393 u64 alloc_start = max(first_bucket, *dev_alloc_cursor);
394 u64 alloc_cursor = alloc_start;
395 int ret;
396
397 /*
398 * Scan with an uncached iterator to avoid polluting the key cache. An
399 * uncached iter will return a cached key if one exists, but if not
400 * there is no other underlying protection for the associated key cache
401 * slot. To avoid racing bucket allocations, look up the cached key slot
402 * of any likely allocation candidate before attempting to proceed with
403 * the allocation. This provides proper exclusion on the associated
404 * bucket.
405 */
406 again:
407 for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
408 BTREE_ITER_slots, k, ret) {
409 u64 bucket = k.k->p.offset;
410
411 if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
412 break;
413
414 if (ca->new_fs_bucket_idx &&
415 is_superblock_bucket(ca, k.k->p.offset))
416 continue;
417
418 if (s->btree_bitmap != BTREE_BITMAP_ANY &&
419 s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
420 bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
421 if (s->btree_bitmap == BTREE_BITMAP_YES &&
422 bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift)
423 break;
424
425 bucket = sector_to_bucket(ca,
426 round_up(bucket_to_sector(ca, bucket) + 1,
427 1ULL << ca->mi.btree_bitmap_shift));
428 bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, bucket));
429 s->buckets_seen++;
430 s->skipped_mi_btree_bitmap++;
431 continue;
432 }
433
434 struct bch_alloc_v4 a_convert;
435 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
436 if (a->data_type != BCH_DATA_free)
437 continue;
438
439 /* now check the cached key to serialize concurrent allocs of the bucket */
440 ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_cached);
441 ret = bkey_err(ck);
442 if (ret)
443 break;
444
445 a = bch2_alloc_to_v4(ck, &a_convert);
446 if (a->data_type != BCH_DATA_free)
447 goto next;
448
449 s->buckets_seen++;
450
451 ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl);
452 next:
453 bch2_set_btree_iter_dontneed(&citer);
454 bch2_trans_iter_exit(trans, &citer);
455 if (ob)
456 break;
457 }
458 bch2_trans_iter_exit(trans, &iter);
459
460 alloc_cursor = iter.pos.offset;
461
462 if (!ob && ret)
463 ob = ERR_PTR(ret);
464
465 if (!ob && alloc_start > first_bucket) {
466 alloc_cursor = alloc_start = first_bucket;
467 goto again;
468 }
469
470 *dev_alloc_cursor = alloc_cursor;
471
472 return ob;
473 }
474
bch2_bucket_alloc_freelist(struct btree_trans * trans,struct bch_dev * ca,enum bch_watermark watermark,struct bucket_alloc_state * s,struct closure * cl)475 static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
476 struct bch_dev *ca,
477 enum bch_watermark watermark,
478 struct bucket_alloc_state *s,
479 struct closure *cl)
480 {
481 struct btree_iter iter;
482 struct bkey_s_c k;
483 struct open_bucket *ob = NULL;
484 u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
485 u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(*dev_alloc_cursor));
486 u64 alloc_cursor = alloc_start;
487 int ret;
488
489 BUG_ON(ca->new_fs_bucket_idx);
490 again:
491 for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace,
492 POS(ca->dev_idx, alloc_cursor), 0, k, ret) {
493 if (k.k->p.inode != ca->dev_idx)
494 break;
495
496 for (alloc_cursor = max(alloc_cursor, bkey_start_offset(k.k));
497 alloc_cursor < k.k->p.offset;
498 alloc_cursor++) {
499 s->buckets_seen++;
500
501 u64 bucket = alloc_cursor & ~(~0ULL << 56);
502 if (s->btree_bitmap != BTREE_BITMAP_ANY &&
503 s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
504 bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
505 if (s->btree_bitmap == BTREE_BITMAP_YES &&
506 bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift)
507 goto fail;
508
509 bucket = sector_to_bucket(ca,
510 round_up(bucket_to_sector(ca, bucket) + 1,
511 1ULL << ca->mi.btree_bitmap_shift));
512 u64 genbits = alloc_cursor >> 56;
513 alloc_cursor = bucket | (genbits << 56);
514
515 if (alloc_cursor > k.k->p.offset)
516 bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, alloc_cursor));
517 s->skipped_mi_btree_bitmap++;
518 continue;
519 }
520
521 ob = try_alloc_bucket(trans, ca, watermark,
522 alloc_cursor, s, k, cl);
523 if (ob) {
524 bch2_set_btree_iter_dontneed(&iter);
525 break;
526 }
527 }
528
529 if (ob || ret)
530 break;
531 }
532 fail:
533 bch2_trans_iter_exit(trans, &iter);
534
535 if (!ob && ret)
536 ob = ERR_PTR(ret);
537
538 if (!ob && alloc_start > ca->mi.first_bucket) {
539 alloc_cursor = alloc_start = ca->mi.first_bucket;
540 goto again;
541 }
542
543 *dev_alloc_cursor = alloc_cursor;
544
545 return ob;
546 }
547
trace_bucket_alloc2(struct bch_fs * c,struct bch_dev * ca,enum bch_watermark watermark,enum bch_data_type data_type,struct closure * cl,struct bch_dev_usage * usage,struct bucket_alloc_state * s,struct open_bucket * ob)548 static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca,
549 enum bch_watermark watermark,
550 enum bch_data_type data_type,
551 struct closure *cl,
552 struct bch_dev_usage *usage,
553 struct bucket_alloc_state *s,
554 struct open_bucket *ob)
555 {
556 struct printbuf buf = PRINTBUF;
557
558 printbuf_tabstop_push(&buf, 24);
559
560 prt_printf(&buf, "dev\t%s (%u)\n", ca->name, ca->dev_idx);
561 prt_printf(&buf, "watermark\t%s\n", bch2_watermarks[watermark]);
562 prt_printf(&buf, "data type\t%s\n", __bch2_data_types[data_type]);
563 prt_printf(&buf, "blocking\t%u\n", cl != NULL);
564 prt_printf(&buf, "free\t%llu\n", usage->d[BCH_DATA_free].buckets);
565 prt_printf(&buf, "avail\t%llu\n", dev_buckets_free(ca, *usage, watermark));
566 prt_printf(&buf, "copygc_wait\t%lu/%lli\n",
567 bch2_copygc_wait_amount(c),
568 c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now));
569 prt_printf(&buf, "seen\t%llu\n", s->buckets_seen);
570 prt_printf(&buf, "open\t%llu\n", s->skipped_open);
571 prt_printf(&buf, "need journal commit\t%llu\n", s->skipped_need_journal_commit);
572 prt_printf(&buf, "nocow\t%llu\n", s->skipped_nocow);
573 prt_printf(&buf, "nouse\t%llu\n", s->skipped_nouse);
574 prt_printf(&buf, "mi_btree_bitmap\t%llu\n", s->skipped_mi_btree_bitmap);
575
576 if (!IS_ERR(ob)) {
577 prt_printf(&buf, "allocated\t%llu\n", ob->bucket);
578 trace_bucket_alloc(c, buf.buf);
579 } else {
580 prt_printf(&buf, "err\t%s\n", bch2_err_str(PTR_ERR(ob)));
581 trace_bucket_alloc_fail(c, buf.buf);
582 }
583
584 printbuf_exit(&buf);
585 }
586
587 /**
588 * bch2_bucket_alloc_trans - allocate a single bucket from a specific device
589 * @trans: transaction object
590 * @ca: device to allocate from
591 * @watermark: how important is this allocation?
592 * @data_type: BCH_DATA_journal, btree, user...
593 * @cl: if not NULL, closure to be used to wait if buckets not available
594 * @usage: for secondarily also returning the current device usage
595 *
596 * Returns: an open_bucket on success, or an ERR_PTR() on failure.
597 */
bch2_bucket_alloc_trans(struct btree_trans * trans,struct bch_dev * ca,enum bch_watermark watermark,enum bch_data_type data_type,struct closure * cl,struct bch_dev_usage * usage)598 static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
599 struct bch_dev *ca,
600 enum bch_watermark watermark,
601 enum bch_data_type data_type,
602 struct closure *cl,
603 struct bch_dev_usage *usage)
604 {
605 struct bch_fs *c = trans->c;
606 struct open_bucket *ob = NULL;
607 bool freespace = READ_ONCE(ca->mi.freespace_initialized);
608 u64 avail;
609 struct bucket_alloc_state s = {
610 .btree_bitmap = data_type == BCH_DATA_btree,
611 };
612 bool waiting = false;
613 again:
614 bch2_dev_usage_read_fast(ca, usage);
615 avail = dev_buckets_free(ca, *usage, watermark);
616
617 if (usage->d[BCH_DATA_need_discard].buckets > avail)
618 bch2_dev_do_discards(ca);
619
620 if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
621 bch2_gc_gens_async(c);
622
623 if (should_invalidate_buckets(ca, *usage))
624 bch2_dev_do_invalidates(ca);
625
626 if (!avail) {
627 if (cl && !waiting) {
628 closure_wait(&c->freelist_wait, cl);
629 waiting = true;
630 goto again;
631 }
632
633 track_event_change(&c->times[BCH_TIME_blocked_allocate], true);
634
635 ob = ERR_PTR(-BCH_ERR_freelist_empty);
636 goto err;
637 }
638
639 if (waiting)
640 closure_wake_up(&c->freelist_wait);
641 alloc:
642 ob = likely(freespace)
643 ? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl)
644 : bch2_bucket_alloc_early(trans, ca, watermark, &s, cl);
645
646 if (s.skipped_need_journal_commit * 2 > avail)
647 bch2_journal_flush_async(&c->journal, NULL);
648
649 if (!ob && s.btree_bitmap != BTREE_BITMAP_ANY) {
650 s.btree_bitmap = BTREE_BITMAP_ANY;
651 goto alloc;
652 }
653
654 if (!ob && freespace && c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
655 freespace = false;
656 goto alloc;
657 }
658 err:
659 if (!ob)
660 ob = ERR_PTR(-BCH_ERR_no_buckets_found);
661
662 if (!IS_ERR(ob))
663 ob->data_type = data_type;
664
665 if (!IS_ERR(ob))
666 count_event(c, bucket_alloc);
667 else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
668 count_event(c, bucket_alloc_fail);
669
670 if (!IS_ERR(ob)
671 ? trace_bucket_alloc_enabled()
672 : trace_bucket_alloc_fail_enabled())
673 trace_bucket_alloc2(c, ca, watermark, data_type, cl, usage, &s, ob);
674
675 return ob;
676 }
677
bch2_bucket_alloc(struct bch_fs * c,struct bch_dev * ca,enum bch_watermark watermark,enum bch_data_type data_type,struct closure * cl)678 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
679 enum bch_watermark watermark,
680 enum bch_data_type data_type,
681 struct closure *cl)
682 {
683 struct bch_dev_usage usage;
684 struct open_bucket *ob;
685
686 bch2_trans_do(c, NULL, NULL, 0,
687 PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark,
688 data_type, cl, &usage)));
689 return ob;
690 }
691
__dev_stripe_cmp(struct dev_stripe_state * stripe,unsigned l,unsigned r)692 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
693 unsigned l, unsigned r)
694 {
695 return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
696 (stripe->next_alloc[l] < stripe->next_alloc[r]));
697 }
698
699 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
700
bch2_dev_alloc_list(struct bch_fs * c,struct dev_stripe_state * stripe,struct bch_devs_mask * devs)701 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
702 struct dev_stripe_state *stripe,
703 struct bch_devs_mask *devs)
704 {
705 struct dev_alloc_list ret = { .nr = 0 };
706 unsigned i;
707
708 for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
709 ret.devs[ret.nr++] = i;
710
711 bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
712 return ret;
713 }
714
bch2_dev_stripe_increment_inlined(struct bch_dev * ca,struct dev_stripe_state * stripe,struct bch_dev_usage * usage)715 static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
716 struct dev_stripe_state *stripe,
717 struct bch_dev_usage *usage)
718 {
719 u64 *v = stripe->next_alloc + ca->dev_idx;
720 u64 free_space = dev_buckets_available(ca, BCH_WATERMARK_normal);
721 u64 free_space_inv = free_space
722 ? div64_u64(1ULL << 48, free_space)
723 : 1ULL << 48;
724 u64 scale = *v / 4;
725
726 if (*v + free_space_inv >= *v)
727 *v += free_space_inv;
728 else
729 *v = U64_MAX;
730
731 for (v = stripe->next_alloc;
732 v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
733 *v = *v < scale ? 0 : *v - scale;
734 }
735
bch2_dev_stripe_increment(struct bch_dev * ca,struct dev_stripe_state * stripe)736 void bch2_dev_stripe_increment(struct bch_dev *ca,
737 struct dev_stripe_state *stripe)
738 {
739 struct bch_dev_usage usage;
740
741 bch2_dev_usage_read_fast(ca, &usage);
742 bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
743 }
744
add_new_bucket(struct bch_fs * c,struct open_buckets * ptrs,struct bch_devs_mask * devs_may_alloc,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,unsigned flags,struct open_bucket * ob)745 static int add_new_bucket(struct bch_fs *c,
746 struct open_buckets *ptrs,
747 struct bch_devs_mask *devs_may_alloc,
748 unsigned nr_replicas,
749 unsigned *nr_effective,
750 bool *have_cache,
751 unsigned flags,
752 struct open_bucket *ob)
753 {
754 unsigned durability = ob_dev(c, ob)->mi.durability;
755
756 BUG_ON(*nr_effective >= nr_replicas);
757
758 __clear_bit(ob->dev, devs_may_alloc->d);
759 *nr_effective += durability;
760 *have_cache |= !durability;
761
762 ob_push(c, ptrs, ob);
763
764 if (*nr_effective >= nr_replicas)
765 return 1;
766 if (ob->ec)
767 return 1;
768 return 0;
769 }
770
bch2_bucket_alloc_set_trans(struct btree_trans * trans,struct open_buckets * ptrs,struct dev_stripe_state * stripe,struct bch_devs_mask * devs_may_alloc,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,unsigned flags,enum bch_data_type data_type,enum bch_watermark watermark,struct closure * cl)771 int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
772 struct open_buckets *ptrs,
773 struct dev_stripe_state *stripe,
774 struct bch_devs_mask *devs_may_alloc,
775 unsigned nr_replicas,
776 unsigned *nr_effective,
777 bool *have_cache,
778 unsigned flags,
779 enum bch_data_type data_type,
780 enum bch_watermark watermark,
781 struct closure *cl)
782 {
783 struct bch_fs *c = trans->c;
784 struct dev_alloc_list devs_sorted =
785 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
786 int ret = -BCH_ERR_insufficient_devices;
787
788 BUG_ON(*nr_effective >= nr_replicas);
789
790 for (unsigned i = 0; i < devs_sorted.nr; i++) {
791 struct bch_dev_usage usage;
792 struct open_bucket *ob;
793
794 unsigned dev = devs_sorted.devs[i];
795 struct bch_dev *ca = bch2_dev_tryget_noerror(c, dev);
796 if (!ca)
797 continue;
798
799 if (!ca->mi.durability && *have_cache) {
800 bch2_dev_put(ca);
801 continue;
802 }
803
804 ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type, cl, &usage);
805 if (!IS_ERR(ob))
806 bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
807 bch2_dev_put(ca);
808
809 if (IS_ERR(ob)) {
810 ret = PTR_ERR(ob);
811 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl)
812 break;
813 continue;
814 }
815
816 if (add_new_bucket(c, ptrs, devs_may_alloc,
817 nr_replicas, nr_effective,
818 have_cache, flags, ob)) {
819 ret = 0;
820 break;
821 }
822 }
823
824 return ret;
825 }
826
827 /* Allocate from stripes: */
828
829 /*
830 * if we can't allocate a new stripe because there are already too many
831 * partially filled stripes, force allocating from an existing stripe even when
832 * it's to a device we don't want:
833 */
834
bucket_alloc_from_stripe(struct btree_trans * trans,struct open_buckets * ptrs,struct write_point * wp,struct bch_devs_mask * devs_may_alloc,u16 target,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,enum bch_watermark watermark,unsigned flags,struct closure * cl)835 static int bucket_alloc_from_stripe(struct btree_trans *trans,
836 struct open_buckets *ptrs,
837 struct write_point *wp,
838 struct bch_devs_mask *devs_may_alloc,
839 u16 target,
840 unsigned nr_replicas,
841 unsigned *nr_effective,
842 bool *have_cache,
843 enum bch_watermark watermark,
844 unsigned flags,
845 struct closure *cl)
846 {
847 struct bch_fs *c = trans->c;
848 struct dev_alloc_list devs_sorted;
849 struct ec_stripe_head *h;
850 struct open_bucket *ob;
851 unsigned i, ec_idx;
852 int ret = 0;
853
854 if (nr_replicas < 2)
855 return 0;
856
857 if (ec_open_bucket(c, ptrs))
858 return 0;
859
860 h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl);
861 if (IS_ERR(h))
862 return PTR_ERR(h);
863 if (!h)
864 return 0;
865
866 devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
867
868 for (i = 0; i < devs_sorted.nr; i++)
869 for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
870 if (!h->s->blocks[ec_idx])
871 continue;
872
873 ob = c->open_buckets + h->s->blocks[ec_idx];
874 if (ob->dev == devs_sorted.devs[i] &&
875 !test_and_set_bit(ec_idx, h->s->blocks_allocated))
876 goto got_bucket;
877 }
878 goto out_put_head;
879 got_bucket:
880 ob->ec_idx = ec_idx;
881 ob->ec = h->s;
882 ec_stripe_new_get(h->s, STRIPE_REF_io);
883
884 ret = add_new_bucket(c, ptrs, devs_may_alloc,
885 nr_replicas, nr_effective,
886 have_cache, flags, ob);
887 out_put_head:
888 bch2_ec_stripe_head_put(c, h);
889 return ret;
890 }
891
892 /* Sector allocator */
893
want_bucket(struct bch_fs * c,struct write_point * wp,struct bch_devs_mask * devs_may_alloc,bool * have_cache,bool ec,struct open_bucket * ob)894 static bool want_bucket(struct bch_fs *c,
895 struct write_point *wp,
896 struct bch_devs_mask *devs_may_alloc,
897 bool *have_cache, bool ec,
898 struct open_bucket *ob)
899 {
900 struct bch_dev *ca = ob_dev(c, ob);
901
902 if (!test_bit(ob->dev, devs_may_alloc->d))
903 return false;
904
905 if (ob->data_type != wp->data_type)
906 return false;
907
908 if (!ca->mi.durability &&
909 (wp->data_type == BCH_DATA_btree || ec || *have_cache))
910 return false;
911
912 if (ec != (ob->ec != NULL))
913 return false;
914
915 return true;
916 }
917
bucket_alloc_set_writepoint(struct bch_fs * c,struct open_buckets * ptrs,struct write_point * wp,struct bch_devs_mask * devs_may_alloc,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,bool ec,unsigned flags)918 static int bucket_alloc_set_writepoint(struct bch_fs *c,
919 struct open_buckets *ptrs,
920 struct write_point *wp,
921 struct bch_devs_mask *devs_may_alloc,
922 unsigned nr_replicas,
923 unsigned *nr_effective,
924 bool *have_cache,
925 bool ec, unsigned flags)
926 {
927 struct open_buckets ptrs_skip = { .nr = 0 };
928 struct open_bucket *ob;
929 unsigned i;
930 int ret = 0;
931
932 open_bucket_for_each(c, &wp->ptrs, ob, i) {
933 if (!ret && want_bucket(c, wp, devs_may_alloc,
934 have_cache, ec, ob))
935 ret = add_new_bucket(c, ptrs, devs_may_alloc,
936 nr_replicas, nr_effective,
937 have_cache, flags, ob);
938 else
939 ob_push(c, &ptrs_skip, ob);
940 }
941 wp->ptrs = ptrs_skip;
942
943 return ret;
944 }
945
bucket_alloc_set_partial(struct bch_fs * c,struct open_buckets * ptrs,struct write_point * wp,struct bch_devs_mask * devs_may_alloc,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,bool ec,enum bch_watermark watermark,unsigned flags)946 static int bucket_alloc_set_partial(struct bch_fs *c,
947 struct open_buckets *ptrs,
948 struct write_point *wp,
949 struct bch_devs_mask *devs_may_alloc,
950 unsigned nr_replicas,
951 unsigned *nr_effective,
952 bool *have_cache, bool ec,
953 enum bch_watermark watermark,
954 unsigned flags)
955 {
956 int i, ret = 0;
957
958 if (!c->open_buckets_partial_nr)
959 return 0;
960
961 spin_lock(&c->freelist_lock);
962
963 if (!c->open_buckets_partial_nr)
964 goto unlock;
965
966 for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) {
967 struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i];
968
969 if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) {
970 struct bch_dev *ca = ob_dev(c, ob);
971 struct bch_dev_usage usage;
972 u64 avail;
973
974 bch2_dev_usage_read_fast(ca, &usage);
975 avail = dev_buckets_free(ca, usage, watermark);
976 if (!avail)
977 continue;
978
979 array_remove_item(c->open_buckets_partial,
980 c->open_buckets_partial_nr,
981 i);
982 ob->on_partial_list = false;
983
984 ret = add_new_bucket(c, ptrs, devs_may_alloc,
985 nr_replicas, nr_effective,
986 have_cache, flags, ob);
987 if (ret)
988 break;
989 }
990 }
991 unlock:
992 spin_unlock(&c->freelist_lock);
993 return ret;
994 }
995
__open_bucket_add_buckets(struct btree_trans * trans,struct open_buckets * ptrs,struct write_point * wp,struct bch_devs_list * devs_have,u16 target,bool erasure_code,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,enum bch_watermark watermark,unsigned flags,struct closure * _cl)996 static int __open_bucket_add_buckets(struct btree_trans *trans,
997 struct open_buckets *ptrs,
998 struct write_point *wp,
999 struct bch_devs_list *devs_have,
1000 u16 target,
1001 bool erasure_code,
1002 unsigned nr_replicas,
1003 unsigned *nr_effective,
1004 bool *have_cache,
1005 enum bch_watermark watermark,
1006 unsigned flags,
1007 struct closure *_cl)
1008 {
1009 struct bch_fs *c = trans->c;
1010 struct bch_devs_mask devs;
1011 struct open_bucket *ob;
1012 struct closure *cl = NULL;
1013 unsigned i;
1014 int ret;
1015
1016 devs = target_rw_devs(c, wp->data_type, target);
1017
1018 /* Don't allocate from devices we already have pointers to: */
1019 darray_for_each(*devs_have, i)
1020 __clear_bit(*i, devs.d);
1021
1022 open_bucket_for_each(c, ptrs, ob, i)
1023 __clear_bit(ob->dev, devs.d);
1024
1025 if (erasure_code && ec_open_bucket(c, ptrs))
1026 return 0;
1027
1028 ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs,
1029 nr_replicas, nr_effective,
1030 have_cache, erasure_code, flags);
1031 if (ret)
1032 return ret;
1033
1034 ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
1035 nr_replicas, nr_effective,
1036 have_cache, erasure_code, watermark, flags);
1037 if (ret)
1038 return ret;
1039
1040 if (erasure_code) {
1041 ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs,
1042 target,
1043 nr_replicas, nr_effective,
1044 have_cache,
1045 watermark, flags, _cl);
1046 } else {
1047 retry_blocking:
1048 /*
1049 * Try nonblocking first, so that if one device is full we'll try from
1050 * other devices:
1051 */
1052 ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
1053 nr_replicas, nr_effective, have_cache,
1054 flags, wp->data_type, watermark, cl);
1055 if (ret &&
1056 !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
1057 !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
1058 !cl && _cl) {
1059 cl = _cl;
1060 goto retry_blocking;
1061 }
1062 }
1063
1064 return ret;
1065 }
1066
open_bucket_add_buckets(struct btree_trans * trans,struct open_buckets * ptrs,struct write_point * wp,struct bch_devs_list * devs_have,u16 target,unsigned erasure_code,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,enum bch_watermark watermark,unsigned flags,struct closure * cl)1067 static int open_bucket_add_buckets(struct btree_trans *trans,
1068 struct open_buckets *ptrs,
1069 struct write_point *wp,
1070 struct bch_devs_list *devs_have,
1071 u16 target,
1072 unsigned erasure_code,
1073 unsigned nr_replicas,
1074 unsigned *nr_effective,
1075 bool *have_cache,
1076 enum bch_watermark watermark,
1077 unsigned flags,
1078 struct closure *cl)
1079 {
1080 int ret;
1081
1082 if (erasure_code) {
1083 ret = __open_bucket_add_buckets(trans, ptrs, wp,
1084 devs_have, target, erasure_code,
1085 nr_replicas, nr_effective, have_cache,
1086 watermark, flags, cl);
1087 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1088 bch2_err_matches(ret, BCH_ERR_operation_blocked) ||
1089 bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
1090 bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
1091 return ret;
1092 if (*nr_effective >= nr_replicas)
1093 return 0;
1094 }
1095
1096 ret = __open_bucket_add_buckets(trans, ptrs, wp,
1097 devs_have, target, false,
1098 nr_replicas, nr_effective, have_cache,
1099 watermark, flags, cl);
1100 return ret < 0 ? ret : 0;
1101 }
1102
1103 /**
1104 * should_drop_bucket - check if this is open_bucket should go away
1105 * @ob: open_bucket to predicate on
1106 * @c: filesystem handle
1107 * @ca: if set, we're killing buckets for a particular device
1108 * @ec: if true, we're shutting down erasure coding and killing all ec
1109 * open_buckets
1110 * otherwise, return true
1111 * Returns: true if we should kill this open_bucket
1112 *
1113 * We're killing open_buckets because we're shutting down a device, erasure
1114 * coding, or the entire filesystem - check if this open_bucket matches:
1115 */
should_drop_bucket(struct open_bucket * ob,struct bch_fs * c,struct bch_dev * ca,bool ec)1116 static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c,
1117 struct bch_dev *ca, bool ec)
1118 {
1119 if (ec) {
1120 return ob->ec != NULL;
1121 } else if (ca) {
1122 bool drop = ob->dev == ca->dev_idx;
1123 struct open_bucket *ob2;
1124 unsigned i;
1125
1126 if (!drop && ob->ec) {
1127 unsigned nr_blocks;
1128
1129 mutex_lock(&ob->ec->lock);
1130 nr_blocks = bkey_i_to_stripe(&ob->ec->new_stripe.key)->v.nr_blocks;
1131
1132 for (i = 0; i < nr_blocks; i++) {
1133 if (!ob->ec->blocks[i])
1134 continue;
1135
1136 ob2 = c->open_buckets + ob->ec->blocks[i];
1137 drop |= ob2->dev == ca->dev_idx;
1138 }
1139 mutex_unlock(&ob->ec->lock);
1140 }
1141
1142 return drop;
1143 } else {
1144 return true;
1145 }
1146 }
1147
bch2_writepoint_stop(struct bch_fs * c,struct bch_dev * ca,bool ec,struct write_point * wp)1148 static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
1149 bool ec, struct write_point *wp)
1150 {
1151 struct open_buckets ptrs = { .nr = 0 };
1152 struct open_bucket *ob;
1153 unsigned i;
1154
1155 mutex_lock(&wp->lock);
1156 open_bucket_for_each(c, &wp->ptrs, ob, i)
1157 if (should_drop_bucket(ob, c, ca, ec))
1158 bch2_open_bucket_put(c, ob);
1159 else
1160 ob_push(c, &ptrs, ob);
1161 wp->ptrs = ptrs;
1162 mutex_unlock(&wp->lock);
1163 }
1164
bch2_open_buckets_stop(struct bch_fs * c,struct bch_dev * ca,bool ec)1165 void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca,
1166 bool ec)
1167 {
1168 unsigned i;
1169
1170 /* Next, close write points that point to this device... */
1171 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1172 bch2_writepoint_stop(c, ca, ec, &c->write_points[i]);
1173
1174 bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point);
1175 bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point);
1176 bch2_writepoint_stop(c, ca, ec, &c->btree_write_point);
1177
1178 mutex_lock(&c->btree_reserve_cache_lock);
1179 while (c->btree_reserve_cache_nr) {
1180 struct btree_alloc *a =
1181 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1182
1183 bch2_open_buckets_put(c, &a->ob);
1184 }
1185 mutex_unlock(&c->btree_reserve_cache_lock);
1186
1187 spin_lock(&c->freelist_lock);
1188 i = 0;
1189 while (i < c->open_buckets_partial_nr) {
1190 struct open_bucket *ob =
1191 c->open_buckets + c->open_buckets_partial[i];
1192
1193 if (should_drop_bucket(ob, c, ca, ec)) {
1194 --c->open_buckets_partial_nr;
1195 swap(c->open_buckets_partial[i],
1196 c->open_buckets_partial[c->open_buckets_partial_nr]);
1197 ob->on_partial_list = false;
1198 spin_unlock(&c->freelist_lock);
1199 bch2_open_bucket_put(c, ob);
1200 spin_lock(&c->freelist_lock);
1201 } else {
1202 i++;
1203 }
1204 }
1205 spin_unlock(&c->freelist_lock);
1206
1207 bch2_ec_stop_dev(c, ca);
1208 }
1209
writepoint_hash(struct bch_fs * c,unsigned long write_point)1210 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
1211 unsigned long write_point)
1212 {
1213 unsigned hash =
1214 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
1215
1216 return &c->write_points_hash[hash];
1217 }
1218
__writepoint_find(struct hlist_head * head,unsigned long write_point)1219 static struct write_point *__writepoint_find(struct hlist_head *head,
1220 unsigned long write_point)
1221 {
1222 struct write_point *wp;
1223
1224 rcu_read_lock();
1225 hlist_for_each_entry_rcu(wp, head, node)
1226 if (wp->write_point == write_point)
1227 goto out;
1228 wp = NULL;
1229 out:
1230 rcu_read_unlock();
1231 return wp;
1232 }
1233
too_many_writepoints(struct bch_fs * c,unsigned factor)1234 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
1235 {
1236 u64 stranded = c->write_points_nr * c->bucket_size_max;
1237 u64 free = bch2_fs_usage_read_short(c).free;
1238
1239 return stranded * factor > free;
1240 }
1241
try_increase_writepoints(struct bch_fs * c)1242 static bool try_increase_writepoints(struct bch_fs *c)
1243 {
1244 struct write_point *wp;
1245
1246 if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
1247 too_many_writepoints(c, 32))
1248 return false;
1249
1250 wp = c->write_points + c->write_points_nr++;
1251 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
1252 return true;
1253 }
1254
try_decrease_writepoints(struct btree_trans * trans,unsigned old_nr)1255 static bool try_decrease_writepoints(struct btree_trans *trans, unsigned old_nr)
1256 {
1257 struct bch_fs *c = trans->c;
1258 struct write_point *wp;
1259 struct open_bucket *ob;
1260 unsigned i;
1261
1262 mutex_lock(&c->write_points_hash_lock);
1263 if (c->write_points_nr < old_nr) {
1264 mutex_unlock(&c->write_points_hash_lock);
1265 return true;
1266 }
1267
1268 if (c->write_points_nr == 1 ||
1269 !too_many_writepoints(c, 8)) {
1270 mutex_unlock(&c->write_points_hash_lock);
1271 return false;
1272 }
1273
1274 wp = c->write_points + --c->write_points_nr;
1275
1276 hlist_del_rcu(&wp->node);
1277 mutex_unlock(&c->write_points_hash_lock);
1278
1279 bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1280 open_bucket_for_each(c, &wp->ptrs, ob, i)
1281 open_bucket_free_unused(c, ob);
1282 wp->ptrs.nr = 0;
1283 mutex_unlock(&wp->lock);
1284 return true;
1285 }
1286
writepoint_find(struct btree_trans * trans,unsigned long write_point)1287 static struct write_point *writepoint_find(struct btree_trans *trans,
1288 unsigned long write_point)
1289 {
1290 struct bch_fs *c = trans->c;
1291 struct write_point *wp, *oldest;
1292 struct hlist_head *head;
1293
1294 if (!(write_point & 1UL)) {
1295 wp = (struct write_point *) write_point;
1296 bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1297 return wp;
1298 }
1299
1300 head = writepoint_hash(c, write_point);
1301 restart_find:
1302 wp = __writepoint_find(head, write_point);
1303 if (wp) {
1304 lock_wp:
1305 bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1306 if (wp->write_point == write_point)
1307 goto out;
1308 mutex_unlock(&wp->lock);
1309 goto restart_find;
1310 }
1311 restart_find_oldest:
1312 oldest = NULL;
1313 for (wp = c->write_points;
1314 wp < c->write_points + c->write_points_nr; wp++)
1315 if (!oldest || time_before64(wp->last_used, oldest->last_used))
1316 oldest = wp;
1317
1318 bch2_trans_mutex_lock_norelock(trans, &oldest->lock);
1319 bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock);
1320 if (oldest >= c->write_points + c->write_points_nr ||
1321 try_increase_writepoints(c)) {
1322 mutex_unlock(&c->write_points_hash_lock);
1323 mutex_unlock(&oldest->lock);
1324 goto restart_find_oldest;
1325 }
1326
1327 wp = __writepoint_find(head, write_point);
1328 if (wp && wp != oldest) {
1329 mutex_unlock(&c->write_points_hash_lock);
1330 mutex_unlock(&oldest->lock);
1331 goto lock_wp;
1332 }
1333
1334 wp = oldest;
1335 hlist_del_rcu(&wp->node);
1336 wp->write_point = write_point;
1337 hlist_add_head_rcu(&wp->node, head);
1338 mutex_unlock(&c->write_points_hash_lock);
1339 out:
1340 wp->last_used = local_clock();
1341 return wp;
1342 }
1343
1344 static noinline void
deallocate_extra_replicas(struct bch_fs * c,struct open_buckets * ptrs,struct open_buckets * ptrs_no_use,unsigned extra_replicas)1345 deallocate_extra_replicas(struct bch_fs *c,
1346 struct open_buckets *ptrs,
1347 struct open_buckets *ptrs_no_use,
1348 unsigned extra_replicas)
1349 {
1350 struct open_buckets ptrs2 = { 0 };
1351 struct open_bucket *ob;
1352 unsigned i;
1353
1354 open_bucket_for_each(c, ptrs, ob, i) {
1355 unsigned d = ob_dev(c, ob)->mi.durability;
1356
1357 if (d && d <= extra_replicas) {
1358 extra_replicas -= d;
1359 ob_push(c, ptrs_no_use, ob);
1360 } else {
1361 ob_push(c, &ptrs2, ob);
1362 }
1363 }
1364
1365 *ptrs = ptrs2;
1366 }
1367
1368 /*
1369 * Get us an open_bucket we can allocate from, return with it locked:
1370 */
bch2_alloc_sectors_start_trans(struct btree_trans * trans,unsigned target,unsigned erasure_code,struct write_point_specifier write_point,struct bch_devs_list * devs_have,unsigned nr_replicas,unsigned nr_replicas_required,enum bch_watermark watermark,unsigned flags,struct closure * cl,struct write_point ** wp_ret)1371 int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
1372 unsigned target,
1373 unsigned erasure_code,
1374 struct write_point_specifier write_point,
1375 struct bch_devs_list *devs_have,
1376 unsigned nr_replicas,
1377 unsigned nr_replicas_required,
1378 enum bch_watermark watermark,
1379 unsigned flags,
1380 struct closure *cl,
1381 struct write_point **wp_ret)
1382 {
1383 struct bch_fs *c = trans->c;
1384 struct write_point *wp;
1385 struct open_bucket *ob;
1386 struct open_buckets ptrs;
1387 unsigned nr_effective, write_points_nr;
1388 bool have_cache;
1389 int ret;
1390 int i;
1391
1392 if (!IS_ENABLED(CONFIG_BCACHEFS_ERASURE_CODING))
1393 erasure_code = false;
1394
1395 BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS);
1396
1397 BUG_ON(!nr_replicas || !nr_replicas_required);
1398 retry:
1399 ptrs.nr = 0;
1400 nr_effective = 0;
1401 write_points_nr = c->write_points_nr;
1402 have_cache = false;
1403
1404 *wp_ret = wp = writepoint_find(trans, write_point.v);
1405
1406 ret = bch2_trans_relock(trans);
1407 if (ret)
1408 goto err;
1409
1410 /* metadata may not allocate on cache devices: */
1411 if (wp->data_type != BCH_DATA_user)
1412 have_cache = true;
1413
1414 if (target && !(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
1415 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1416 target, erasure_code,
1417 nr_replicas, &nr_effective,
1418 &have_cache, watermark,
1419 flags, NULL);
1420 if (!ret ||
1421 bch2_err_matches(ret, BCH_ERR_transaction_restart))
1422 goto alloc_done;
1423
1424 /* Don't retry from all devices if we're out of open buckets: */
1425 if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) {
1426 int ret2 = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1427 target, erasure_code,
1428 nr_replicas, &nr_effective,
1429 &have_cache, watermark,
1430 flags, cl);
1431 if (!ret2 ||
1432 bch2_err_matches(ret2, BCH_ERR_transaction_restart) ||
1433 bch2_err_matches(ret2, BCH_ERR_open_buckets_empty)) {
1434 ret = ret2;
1435 goto alloc_done;
1436 }
1437 }
1438
1439 /*
1440 * Only try to allocate cache (durability = 0 devices) from the
1441 * specified target:
1442 */
1443 have_cache = true;
1444
1445 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1446 0, erasure_code,
1447 nr_replicas, &nr_effective,
1448 &have_cache, watermark,
1449 flags, cl);
1450 } else {
1451 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1452 target, erasure_code,
1453 nr_replicas, &nr_effective,
1454 &have_cache, watermark,
1455 flags, cl);
1456 }
1457 alloc_done:
1458 BUG_ON(!ret && nr_effective < nr_replicas);
1459
1460 if (erasure_code && !ec_open_bucket(c, &ptrs))
1461 pr_debug("failed to get ec bucket: ret %u", ret);
1462
1463 if (ret == -BCH_ERR_insufficient_devices &&
1464 nr_effective >= nr_replicas_required)
1465 ret = 0;
1466
1467 if (ret)
1468 goto err;
1469
1470 if (nr_effective > nr_replicas)
1471 deallocate_extra_replicas(c, &ptrs, &wp->ptrs, nr_effective - nr_replicas);
1472
1473 /* Free buckets we didn't use: */
1474 open_bucket_for_each(c, &wp->ptrs, ob, i)
1475 open_bucket_free_unused(c, ob);
1476
1477 wp->ptrs = ptrs;
1478
1479 wp->sectors_free = UINT_MAX;
1480
1481 open_bucket_for_each(c, &wp->ptrs, ob, i)
1482 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1483
1484 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1485
1486 return 0;
1487 err:
1488 open_bucket_for_each(c, &wp->ptrs, ob, i)
1489 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
1490 ob_push(c, &ptrs, ob);
1491 else
1492 open_bucket_free_unused(c, ob);
1493 wp->ptrs = ptrs;
1494
1495 mutex_unlock(&wp->lock);
1496
1497 if (bch2_err_matches(ret, BCH_ERR_freelist_empty) &&
1498 try_decrease_writepoints(trans, write_points_nr))
1499 goto retry;
1500
1501 if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
1502 bch2_err_matches(ret, BCH_ERR_freelist_empty))
1503 return cl
1504 ? -BCH_ERR_bucket_alloc_blocked
1505 : -BCH_ERR_ENOSPC_bucket_alloc;
1506
1507 return ret;
1508 }
1509
bch2_ob_ptr(struct bch_fs * c,struct open_bucket * ob)1510 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
1511 {
1512 struct bch_dev *ca = ob_dev(c, ob);
1513
1514 return (struct bch_extent_ptr) {
1515 .type = 1 << BCH_EXTENT_ENTRY_ptr,
1516 .gen = ob->gen,
1517 .dev = ob->dev,
1518 .offset = bucket_to_sector(ca, ob->bucket) +
1519 ca->mi.bucket_size -
1520 ob->sectors_free,
1521 };
1522 }
1523
bch2_alloc_sectors_append_ptrs(struct bch_fs * c,struct write_point * wp,struct bkey_i * k,unsigned sectors,bool cached)1524 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1525 struct bkey_i *k, unsigned sectors,
1526 bool cached)
1527 {
1528 bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached);
1529 }
1530
1531 /*
1532 * Append pointers to the space we just allocated to @k, and mark @sectors space
1533 * as allocated out of @ob
1534 */
bch2_alloc_sectors_done(struct bch_fs * c,struct write_point * wp)1535 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1536 {
1537 bch2_alloc_sectors_done_inlined(c, wp);
1538 }
1539
writepoint_init(struct write_point * wp,enum bch_data_type type)1540 static inline void writepoint_init(struct write_point *wp,
1541 enum bch_data_type type)
1542 {
1543 mutex_init(&wp->lock);
1544 wp->data_type = type;
1545
1546 INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates);
1547 INIT_LIST_HEAD(&wp->writes);
1548 spin_lock_init(&wp->writes_lock);
1549 }
1550
bch2_fs_allocator_foreground_init(struct bch_fs * c)1551 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
1552 {
1553 struct open_bucket *ob;
1554 struct write_point *wp;
1555
1556 mutex_init(&c->write_points_hash_lock);
1557 c->write_points_nr = ARRAY_SIZE(c->write_points);
1558
1559 /* open bucket 0 is a sentinal NULL: */
1560 spin_lock_init(&c->open_buckets[0].lock);
1561
1562 for (ob = c->open_buckets + 1;
1563 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1564 spin_lock_init(&ob->lock);
1565 c->open_buckets_nr_free++;
1566
1567 ob->freelist = c->open_buckets_freelist;
1568 c->open_buckets_freelist = ob - c->open_buckets;
1569 }
1570
1571 writepoint_init(&c->btree_write_point, BCH_DATA_btree);
1572 writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
1573 writepoint_init(&c->copygc_write_point, BCH_DATA_user);
1574
1575 for (wp = c->write_points;
1576 wp < c->write_points + c->write_points_nr; wp++) {
1577 writepoint_init(wp, BCH_DATA_user);
1578
1579 wp->last_used = local_clock();
1580 wp->write_point = (unsigned long) wp;
1581 hlist_add_head_rcu(&wp->node,
1582 writepoint_hash(c, wp->write_point));
1583 }
1584 }
1585
bch2_open_bucket_to_text(struct printbuf * out,struct bch_fs * c,struct open_bucket * ob)1586 void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob)
1587 {
1588 struct bch_dev *ca = ob_dev(c, ob);
1589 unsigned data_type = ob->data_type;
1590 barrier(); /* READ_ONCE() doesn't work on bitfields */
1591
1592 prt_printf(out, "%zu ref %u ",
1593 ob - c->open_buckets,
1594 atomic_read(&ob->pin));
1595 bch2_prt_data_type(out, data_type);
1596 prt_printf(out, " %u:%llu gen %u allocated %u/%u",
1597 ob->dev, ob->bucket, ob->gen,
1598 ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size);
1599 if (ob->ec)
1600 prt_printf(out, " ec idx %llu", ob->ec->idx);
1601 if (ob->on_partial_list)
1602 prt_str(out, " partial");
1603 prt_newline(out);
1604 }
1605
bch2_open_buckets_to_text(struct printbuf * out,struct bch_fs * c,struct bch_dev * ca)1606 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c,
1607 struct bch_dev *ca)
1608 {
1609 struct open_bucket *ob;
1610
1611 out->atomic++;
1612
1613 for (ob = c->open_buckets;
1614 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1615 ob++) {
1616 spin_lock(&ob->lock);
1617 if (ob->valid && !ob->on_partial_list &&
1618 (!ca || ob->dev == ca->dev_idx))
1619 bch2_open_bucket_to_text(out, c, ob);
1620 spin_unlock(&ob->lock);
1621 }
1622
1623 --out->atomic;
1624 }
1625
bch2_open_buckets_partial_to_text(struct printbuf * out,struct bch_fs * c)1626 void bch2_open_buckets_partial_to_text(struct printbuf *out, struct bch_fs *c)
1627 {
1628 unsigned i;
1629
1630 out->atomic++;
1631 spin_lock(&c->freelist_lock);
1632
1633 for (i = 0; i < c->open_buckets_partial_nr; i++)
1634 bch2_open_bucket_to_text(out, c,
1635 c->open_buckets + c->open_buckets_partial[i]);
1636
1637 spin_unlock(&c->freelist_lock);
1638 --out->atomic;
1639 }
1640
1641 static const char * const bch2_write_point_states[] = {
1642 #define x(n) #n,
1643 WRITE_POINT_STATES()
1644 #undef x
1645 NULL
1646 };
1647
bch2_write_point_to_text(struct printbuf * out,struct bch_fs * c,struct write_point * wp)1648 static void bch2_write_point_to_text(struct printbuf *out, struct bch_fs *c,
1649 struct write_point *wp)
1650 {
1651 struct open_bucket *ob;
1652 unsigned i;
1653
1654 prt_printf(out, "%lu: ", wp->write_point);
1655 prt_human_readable_u64(out, wp->sectors_allocated);
1656
1657 prt_printf(out, " last wrote: ");
1658 bch2_pr_time_units(out, sched_clock() - wp->last_used);
1659
1660 for (i = 0; i < WRITE_POINT_STATE_NR; i++) {
1661 prt_printf(out, " %s: ", bch2_write_point_states[i]);
1662 bch2_pr_time_units(out, wp->time[i]);
1663 }
1664
1665 prt_newline(out);
1666
1667 printbuf_indent_add(out, 2);
1668 open_bucket_for_each(c, &wp->ptrs, ob, i)
1669 bch2_open_bucket_to_text(out, c, ob);
1670 printbuf_indent_sub(out, 2);
1671 }
1672
bch2_write_points_to_text(struct printbuf * out,struct bch_fs * c)1673 void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c)
1674 {
1675 struct write_point *wp;
1676
1677 prt_str(out, "Foreground write points\n");
1678 for (wp = c->write_points;
1679 wp < c->write_points + ARRAY_SIZE(c->write_points);
1680 wp++)
1681 bch2_write_point_to_text(out, c, wp);
1682
1683 prt_str(out, "Copygc write point\n");
1684 bch2_write_point_to_text(out, c, &c->copygc_write_point);
1685
1686 prt_str(out, "Rebalance write point\n");
1687 bch2_write_point_to_text(out, c, &c->rebalance_write_point);
1688
1689 prt_str(out, "Btree write point\n");
1690 bch2_write_point_to_text(out, c, &c->btree_write_point);
1691 }
1692
bch2_fs_alloc_debug_to_text(struct printbuf * out,struct bch_fs * c)1693 void bch2_fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
1694 {
1695 unsigned nr[BCH_DATA_NR];
1696
1697 memset(nr, 0, sizeof(nr));
1698
1699 for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
1700 nr[c->open_buckets[i].data_type]++;
1701
1702 printbuf_tabstops_reset(out);
1703 printbuf_tabstop_push(out, 24);
1704
1705 prt_printf(out, "capacity\t%llu\n", c->capacity);
1706 prt_printf(out, "reserved\t%llu\n", c->reserved);
1707 prt_printf(out, "hidden\t%llu\n", percpu_u64_get(&c->usage->hidden));
1708 prt_printf(out, "btree\t%llu\n", percpu_u64_get(&c->usage->btree));
1709 prt_printf(out, "data\t%llu\n", percpu_u64_get(&c->usage->data));
1710 prt_printf(out, "cached\t%llu\n", percpu_u64_get(&c->usage->cached));
1711 prt_printf(out, "reserved\t%llu\n", percpu_u64_get(&c->usage->reserved));
1712 prt_printf(out, "online_reserved\t%llu\n", percpu_u64_get(c->online_reserved));
1713 prt_printf(out, "nr_inodes\t%llu\n", percpu_u64_get(&c->usage->nr_inodes));
1714
1715 prt_newline(out);
1716 prt_printf(out, "freelist_wait\t%s\n", c->freelist_wait.list.first ? "waiting" : "empty");
1717 prt_printf(out, "open buckets allocated\t%i\n", OPEN_BUCKETS_COUNT - c->open_buckets_nr_free);
1718 prt_printf(out, "open buckets total\t%u\n", OPEN_BUCKETS_COUNT);
1719 prt_printf(out, "open_buckets_wait\t%s\n", c->open_buckets_wait.list.first ? "waiting" : "empty");
1720 prt_printf(out, "open_buckets_btree\t%u\n", nr[BCH_DATA_btree]);
1721 prt_printf(out, "open_buckets_user\t%u\n", nr[BCH_DATA_user]);
1722 prt_printf(out, "btree reserve cache\t%u\n", c->btree_reserve_cache_nr);
1723 }
1724
bch2_dev_alloc_debug_to_text(struct printbuf * out,struct bch_dev * ca)1725 void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
1726 {
1727 struct bch_fs *c = ca->fs;
1728 struct bch_dev_usage stats = bch2_dev_usage_read(ca);
1729 unsigned nr[BCH_DATA_NR];
1730
1731 memset(nr, 0, sizeof(nr));
1732
1733 for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
1734 nr[c->open_buckets[i].data_type]++;
1735
1736 printbuf_tabstops_reset(out);
1737 printbuf_tabstop_push(out, 12);
1738 printbuf_tabstop_push(out, 16);
1739 printbuf_tabstop_push(out, 16);
1740 printbuf_tabstop_push(out, 16);
1741 printbuf_tabstop_push(out, 16);
1742
1743 bch2_dev_usage_to_text(out, ca, &stats);
1744
1745 prt_newline(out);
1746
1747 prt_printf(out, "reserves:\n");
1748 for (unsigned i = 0; i < BCH_WATERMARK_NR; i++)
1749 prt_printf(out, "%s\t%llu\r\n", bch2_watermarks[i], bch2_dev_buckets_reserved(ca, i));
1750
1751 prt_newline(out);
1752
1753 printbuf_tabstops_reset(out);
1754 printbuf_tabstop_push(out, 12);
1755 printbuf_tabstop_push(out, 16);
1756
1757 prt_printf(out, "open buckets\t%i\r\n", ca->nr_open_buckets);
1758 prt_printf(out, "buckets to invalidate\t%llu\r\n", should_invalidate_buckets(ca, stats));
1759 }
1760
bch2_print_allocator_stuck(struct bch_fs * c)1761 static noinline void bch2_print_allocator_stuck(struct bch_fs *c)
1762 {
1763 struct printbuf buf = PRINTBUF;
1764
1765 prt_printf(&buf, "Allocator stuck? Waited for %u seconds\n",
1766 c->opts.allocator_stuck_timeout);
1767
1768 prt_printf(&buf, "Allocator debug:\n");
1769 printbuf_indent_add(&buf, 2);
1770 bch2_fs_alloc_debug_to_text(&buf, c);
1771 printbuf_indent_sub(&buf, 2);
1772 prt_newline(&buf);
1773
1774 for_each_online_member(c, ca) {
1775 prt_printf(&buf, "Dev %u:\n", ca->dev_idx);
1776 printbuf_indent_add(&buf, 2);
1777 bch2_dev_alloc_debug_to_text(&buf, ca);
1778 printbuf_indent_sub(&buf, 2);
1779 prt_newline(&buf);
1780 }
1781
1782 prt_printf(&buf, "Copygc debug:\n");
1783 printbuf_indent_add(&buf, 2);
1784 bch2_copygc_wait_to_text(&buf, c);
1785 printbuf_indent_sub(&buf, 2);
1786 prt_newline(&buf);
1787
1788 prt_printf(&buf, "Journal debug:\n");
1789 printbuf_indent_add(&buf, 2);
1790 bch2_journal_debug_to_text(&buf, &c->journal);
1791 printbuf_indent_sub(&buf, 2);
1792
1793 bch2_print_string_as_lines(KERN_ERR, buf.buf);
1794 printbuf_exit(&buf);
1795 }
1796
allocator_wait_timeout(struct bch_fs * c)1797 static inline unsigned allocator_wait_timeout(struct bch_fs *c)
1798 {
1799 if (c->allocator_last_stuck &&
1800 time_after(c->allocator_last_stuck + HZ * 60 * 2, jiffies))
1801 return 0;
1802
1803 return c->opts.allocator_stuck_timeout * HZ;
1804 }
1805
__bch2_wait_on_allocator(struct bch_fs * c,struct closure * cl)1806 void __bch2_wait_on_allocator(struct bch_fs *c, struct closure *cl)
1807 {
1808 unsigned t = allocator_wait_timeout(c);
1809
1810 if (t && closure_sync_timeout(cl, t)) {
1811 c->allocator_last_stuck = jiffies;
1812 bch2_print_allocator_stuck(c);
1813 }
1814
1815 closure_sync(cl);
1816 }
1817