1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2012 Google, Inc.
4 *
5 * Foreground allocator code: allocate buckets from freelist, and allocate in
6 * sector granularity from writepoints.
7 *
8 * bch2_bucket_alloc() allocates a single bucket from a specific device.
9 *
10 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11 * in a given filesystem.
12 */
13
14 #include "bcachefs.h"
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
17 #include "backpointers.h"
18 #include "btree_iter.h"
19 #include "btree_update.h"
20 #include "btree_gc.h"
21 #include "buckets.h"
22 #include "buckets_waiting_for_journal.h"
23 #include "clock.h"
24 #include "debug.h"
25 #include "disk_groups.h"
26 #include "ec.h"
27 #include "error.h"
28 #include "io_write.h"
29 #include "journal.h"
30 #include "movinggc.h"
31 #include "nocow_locking.h"
32 #include "trace.h"
33
34 #include <linux/math64.h>
35 #include <linux/rculist.h>
36 #include <linux/rcupdate.h>
37
bch2_trans_mutex_lock_norelock(struct btree_trans * trans,struct mutex * lock)38 static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
39 struct mutex *lock)
40 {
41 if (!mutex_trylock(lock)) {
42 bch2_trans_unlock(trans);
43 mutex_lock(lock);
44 }
45 }
46
47 const char * const bch2_watermarks[] = {
48 #define x(t) #t,
49 BCH_WATERMARKS()
50 #undef x
51 NULL
52 };
53
54 /*
55 * Open buckets represent a bucket that's currently being allocated from. They
56 * serve two purposes:
57 *
58 * - They track buckets that have been partially allocated, allowing for
59 * sub-bucket sized allocations - they're used by the sector allocator below
60 *
61 * - They provide a reference to the buckets they own that mark and sweep GC
62 * can find, until the new allocation has a pointer to it inserted into the
63 * btree
64 *
65 * When allocating some space with the sector allocator, the allocation comes
66 * with a reference to an open bucket - the caller is required to put that
67 * reference _after_ doing the index update that makes its allocation reachable.
68 */
69
bch2_reset_alloc_cursors(struct bch_fs * c)70 void bch2_reset_alloc_cursors(struct bch_fs *c)
71 {
72 rcu_read_lock();
73 for_each_member_device_rcu(c, ca, NULL)
74 memset(ca->alloc_cursor, 0, sizeof(ca->alloc_cursor));
75 rcu_read_unlock();
76 }
77
bch2_open_bucket_hash_add(struct bch_fs * c,struct open_bucket * ob)78 static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
79 {
80 open_bucket_idx_t idx = ob - c->open_buckets;
81 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
82
83 ob->hash = *slot;
84 *slot = idx;
85 }
86
bch2_open_bucket_hash_remove(struct bch_fs * c,struct open_bucket * ob)87 static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
88 {
89 open_bucket_idx_t idx = ob - c->open_buckets;
90 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
91
92 while (*slot != idx) {
93 BUG_ON(!*slot);
94 slot = &c->open_buckets[*slot].hash;
95 }
96
97 *slot = ob->hash;
98 ob->hash = 0;
99 }
100
__bch2_open_bucket_put(struct bch_fs * c,struct open_bucket * ob)101 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
102 {
103 struct bch_dev *ca = ob_dev(c, ob);
104
105 if (ob->ec) {
106 ec_stripe_new_put(c, ob->ec, STRIPE_REF_io);
107 return;
108 }
109
110 spin_lock(&ob->lock);
111 ob->valid = false;
112 ob->data_type = 0;
113 spin_unlock(&ob->lock);
114
115 spin_lock(&c->freelist_lock);
116 bch2_open_bucket_hash_remove(c, ob);
117
118 ob->freelist = c->open_buckets_freelist;
119 c->open_buckets_freelist = ob - c->open_buckets;
120
121 c->open_buckets_nr_free++;
122 ca->nr_open_buckets--;
123 spin_unlock(&c->freelist_lock);
124
125 closure_wake_up(&c->open_buckets_wait);
126 }
127
bch2_open_bucket_write_error(struct bch_fs * c,struct open_buckets * obs,unsigned dev,int err)128 void bch2_open_bucket_write_error(struct bch_fs *c,
129 struct open_buckets *obs,
130 unsigned dev, int err)
131 {
132 struct open_bucket *ob;
133 unsigned i;
134
135 open_bucket_for_each(c, obs, ob, i)
136 if (ob->dev == dev && ob->ec)
137 bch2_ec_bucket_cancel(c, ob, err);
138 }
139
bch2_open_bucket_alloc(struct bch_fs * c)140 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
141 {
142 struct open_bucket *ob;
143
144 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
145
146 ob = c->open_buckets + c->open_buckets_freelist;
147 c->open_buckets_freelist = ob->freelist;
148 atomic_set(&ob->pin, 1);
149 ob->data_type = 0;
150
151 c->open_buckets_nr_free--;
152 return ob;
153 }
154
is_superblock_bucket(struct bch_fs * c,struct bch_dev * ca,u64 b)155 static inline bool is_superblock_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
156 {
157 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_trans_mark_dev_sbs)
158 return false;
159
160 return bch2_is_superblock_bucket(ca, b);
161 }
162
open_bucket_free_unused(struct bch_fs * c,struct open_bucket * ob)163 static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob)
164 {
165 BUG_ON(c->open_buckets_partial_nr >=
166 ARRAY_SIZE(c->open_buckets_partial));
167
168 spin_lock(&c->freelist_lock);
169 rcu_read_lock();
170 bch2_dev_rcu(c, ob->dev)->nr_partial_buckets++;
171 rcu_read_unlock();
172
173 ob->on_partial_list = true;
174 c->open_buckets_partial[c->open_buckets_partial_nr++] =
175 ob - c->open_buckets;
176 spin_unlock(&c->freelist_lock);
177
178 closure_wake_up(&c->open_buckets_wait);
179 closure_wake_up(&c->freelist_wait);
180 }
181
may_alloc_bucket(struct bch_fs * c,struct bpos bucket,struct bucket_alloc_state * s)182 static inline bool may_alloc_bucket(struct bch_fs *c,
183 struct bpos bucket,
184 struct bucket_alloc_state *s)
185 {
186 if (bch2_bucket_is_open(c, bucket.inode, bucket.offset)) {
187 s->skipped_open++;
188 return false;
189 }
190
191 u64 journal_seq_ready =
192 bch2_bucket_journal_seq_ready(&c->buckets_waiting_for_journal,
193 bucket.inode, bucket.offset);
194 if (journal_seq_ready > c->journal.flushed_seq_ondisk) {
195 if (journal_seq_ready > c->journal.flushing_seq)
196 s->need_journal_commit++;
197 s->skipped_need_journal_commit++;
198 return false;
199 }
200
201 if (bch2_bucket_nocow_is_locked(&c->nocow_locks, bucket)) {
202 s->skipped_nocow++;
203 return false;
204 }
205
206 return true;
207 }
208
__try_alloc_bucket(struct bch_fs * c,struct bch_dev * ca,u64 bucket,u8 gen,enum bch_watermark watermark,struct bucket_alloc_state * s,struct closure * cl)209 static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
210 u64 bucket, u8 gen,
211 enum bch_watermark watermark,
212 struct bucket_alloc_state *s,
213 struct closure *cl)
214 {
215 if (unlikely(is_superblock_bucket(c, ca, bucket)))
216 return NULL;
217
218 if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
219 s->skipped_nouse++;
220 return NULL;
221 }
222
223 spin_lock(&c->freelist_lock);
224
225 if (unlikely(c->open_buckets_nr_free <= bch2_open_buckets_reserved(watermark))) {
226 if (cl)
227 closure_wait(&c->open_buckets_wait, cl);
228
229 track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], true);
230 spin_unlock(&c->freelist_lock);
231 return ERR_PTR(-BCH_ERR_open_buckets_empty);
232 }
233
234 /* Recheck under lock: */
235 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
236 spin_unlock(&c->freelist_lock);
237 s->skipped_open++;
238 return NULL;
239 }
240
241 struct open_bucket *ob = bch2_open_bucket_alloc(c);
242
243 spin_lock(&ob->lock);
244 ob->valid = true;
245 ob->sectors_free = ca->mi.bucket_size;
246 ob->dev = ca->dev_idx;
247 ob->gen = gen;
248 ob->bucket = bucket;
249 spin_unlock(&ob->lock);
250
251 ca->nr_open_buckets++;
252 bch2_open_bucket_hash_add(c, ob);
253
254 track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], false);
255 track_event_change(&c->times[BCH_TIME_blocked_allocate], false);
256
257 spin_unlock(&c->freelist_lock);
258 return ob;
259 }
260
try_alloc_bucket(struct btree_trans * trans,struct bch_dev * ca,enum bch_watermark watermark,struct bucket_alloc_state * s,struct btree_iter * freespace_iter,struct closure * cl)261 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
262 enum bch_watermark watermark,
263 struct bucket_alloc_state *s,
264 struct btree_iter *freespace_iter,
265 struct closure *cl)
266 {
267 struct bch_fs *c = trans->c;
268 u64 b = freespace_iter->pos.offset & ~(~0ULL << 56);
269
270 if (!may_alloc_bucket(c, POS(ca->dev_idx, b), s))
271 return NULL;
272
273 u8 gen;
274 int ret = bch2_check_discard_freespace_key(trans, freespace_iter, &gen, true);
275 if (ret < 0)
276 return ERR_PTR(ret);
277 if (ret)
278 return NULL;
279
280 return __try_alloc_bucket(c, ca, b, gen, watermark, s, cl);
281 }
282
283 /*
284 * This path is for before the freespace btree is initialized:
285 */
286 static noinline struct open_bucket *
bch2_bucket_alloc_early(struct btree_trans * trans,struct bch_dev * ca,enum bch_watermark watermark,struct bucket_alloc_state * s,struct closure * cl)287 bch2_bucket_alloc_early(struct btree_trans *trans,
288 struct bch_dev *ca,
289 enum bch_watermark watermark,
290 struct bucket_alloc_state *s,
291 struct closure *cl)
292 {
293 struct bch_fs *c = trans->c;
294 struct btree_iter iter, citer;
295 struct bkey_s_c k, ck;
296 struct open_bucket *ob = NULL;
297 u64 first_bucket = ca->mi.first_bucket;
298 u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
299 u64 alloc_start = max(first_bucket, *dev_alloc_cursor);
300 u64 alloc_cursor = alloc_start;
301 int ret;
302
303 /*
304 * Scan with an uncached iterator to avoid polluting the key cache. An
305 * uncached iter will return a cached key if one exists, but if not
306 * there is no other underlying protection for the associated key cache
307 * slot. To avoid racing bucket allocations, look up the cached key slot
308 * of any likely allocation candidate before attempting to proceed with
309 * the allocation. This provides proper exclusion on the associated
310 * bucket.
311 */
312 again:
313 for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
314 BTREE_ITER_slots, k, ret) {
315 u64 bucket = k.k->p.offset;
316
317 if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
318 break;
319
320 if (s->btree_bitmap != BTREE_BITMAP_ANY &&
321 s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
322 bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
323 if (s->btree_bitmap == BTREE_BITMAP_YES &&
324 bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift)
325 break;
326
327 bucket = sector_to_bucket(ca,
328 round_up(bucket_to_sector(ca, bucket) + 1,
329 1ULL << ca->mi.btree_bitmap_shift));
330 bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, bucket));
331 s->buckets_seen++;
332 s->skipped_mi_btree_bitmap++;
333 continue;
334 }
335
336 struct bch_alloc_v4 a_convert;
337 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
338 if (a->data_type != BCH_DATA_free)
339 continue;
340
341 /* now check the cached key to serialize concurrent allocs of the bucket */
342 ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_cached);
343 ret = bkey_err(ck);
344 if (ret)
345 break;
346
347 a = bch2_alloc_to_v4(ck, &a_convert);
348 if (a->data_type != BCH_DATA_free)
349 goto next;
350
351 s->buckets_seen++;
352
353 ob = may_alloc_bucket(c, k.k->p, s)
354 ? __try_alloc_bucket(c, ca, k.k->p.offset, a->gen,
355 watermark, s, cl)
356 : NULL;
357 next:
358 bch2_set_btree_iter_dontneed(&citer);
359 bch2_trans_iter_exit(trans, &citer);
360 if (ob)
361 break;
362 }
363 bch2_trans_iter_exit(trans, &iter);
364
365 alloc_cursor = iter.pos.offset;
366
367 if (!ob && ret)
368 ob = ERR_PTR(ret);
369
370 if (!ob && alloc_start > first_bucket) {
371 alloc_cursor = alloc_start = first_bucket;
372 goto again;
373 }
374
375 *dev_alloc_cursor = alloc_cursor;
376
377 return ob;
378 }
379
bch2_bucket_alloc_freelist(struct btree_trans * trans,struct bch_dev * ca,enum bch_watermark watermark,struct bucket_alloc_state * s,struct closure * cl)380 static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
381 struct bch_dev *ca,
382 enum bch_watermark watermark,
383 struct bucket_alloc_state *s,
384 struct closure *cl)
385 {
386 struct btree_iter iter;
387 struct bkey_s_c k;
388 struct open_bucket *ob = NULL;
389 u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
390 u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(*dev_alloc_cursor));
391 u64 alloc_cursor = alloc_start;
392 int ret;
393 again:
394 for_each_btree_key_max_norestart(trans, iter, BTREE_ID_freespace,
395 POS(ca->dev_idx, alloc_cursor),
396 POS(ca->dev_idx, U64_MAX),
397 0, k, ret) {
398 /*
399 * peek normally dosen't trim extents - they can span iter.pos,
400 * which is not what we want here:
401 */
402 iter.k.size = iter.k.p.offset - iter.pos.offset;
403
404 while (iter.k.size) {
405 s->buckets_seen++;
406
407 u64 bucket = iter.pos.offset & ~(~0ULL << 56);
408 if (s->btree_bitmap != BTREE_BITMAP_ANY &&
409 s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
410 bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
411 if (s->btree_bitmap == BTREE_BITMAP_YES &&
412 bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift)
413 goto fail;
414
415 bucket = sector_to_bucket(ca,
416 round_up(bucket_to_sector(ca, bucket + 1),
417 1ULL << ca->mi.btree_bitmap_shift));
418 alloc_cursor = bucket|(iter.pos.offset & (~0ULL << 56));
419
420 bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, alloc_cursor));
421 s->skipped_mi_btree_bitmap++;
422 goto next;
423 }
424
425 ob = try_alloc_bucket(trans, ca, watermark, s, &iter, cl);
426 if (ob) {
427 if (!IS_ERR(ob))
428 *dev_alloc_cursor = iter.pos.offset;
429 bch2_set_btree_iter_dontneed(&iter);
430 break;
431 }
432
433 iter.k.size--;
434 iter.pos.offset++;
435 }
436 next:
437 if (ob || ret)
438 break;
439 }
440 fail:
441 bch2_trans_iter_exit(trans, &iter);
442
443 BUG_ON(ob && ret);
444
445 if (ret)
446 ob = ERR_PTR(ret);
447
448 if (!ob && alloc_start > ca->mi.first_bucket) {
449 alloc_cursor = alloc_start = ca->mi.first_bucket;
450 goto again;
451 }
452
453 return ob;
454 }
455
trace_bucket_alloc2(struct bch_fs * c,struct bch_dev * ca,enum bch_watermark watermark,enum bch_data_type data_type,struct closure * cl,struct bch_dev_usage * usage,struct bucket_alloc_state * s,struct open_bucket * ob)456 static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca,
457 enum bch_watermark watermark,
458 enum bch_data_type data_type,
459 struct closure *cl,
460 struct bch_dev_usage *usage,
461 struct bucket_alloc_state *s,
462 struct open_bucket *ob)
463 {
464 struct printbuf buf = PRINTBUF;
465
466 printbuf_tabstop_push(&buf, 24);
467
468 prt_printf(&buf, "dev\t%s (%u)\n", ca->name, ca->dev_idx);
469 prt_printf(&buf, "watermark\t%s\n", bch2_watermarks[watermark]);
470 prt_printf(&buf, "data type\t%s\n", __bch2_data_types[data_type]);
471 prt_printf(&buf, "blocking\t%u\n", cl != NULL);
472 prt_printf(&buf, "free\t%llu\n", usage->d[BCH_DATA_free].buckets);
473 prt_printf(&buf, "avail\t%llu\n", dev_buckets_free(ca, *usage, watermark));
474 prt_printf(&buf, "copygc_wait\t%lu/%lli\n",
475 bch2_copygc_wait_amount(c),
476 c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now));
477 prt_printf(&buf, "seen\t%llu\n", s->buckets_seen);
478 prt_printf(&buf, "open\t%llu\n", s->skipped_open);
479 prt_printf(&buf, "need journal commit\t%llu\n", s->skipped_need_journal_commit);
480 prt_printf(&buf, "nocow\t%llu\n", s->skipped_nocow);
481 prt_printf(&buf, "nouse\t%llu\n", s->skipped_nouse);
482 prt_printf(&buf, "mi_btree_bitmap\t%llu\n", s->skipped_mi_btree_bitmap);
483
484 if (!IS_ERR(ob)) {
485 prt_printf(&buf, "allocated\t%llu\n", ob->bucket);
486 trace_bucket_alloc(c, buf.buf);
487 } else {
488 prt_printf(&buf, "err\t%s\n", bch2_err_str(PTR_ERR(ob)));
489 trace_bucket_alloc_fail(c, buf.buf);
490 }
491
492 printbuf_exit(&buf);
493 }
494
495 /**
496 * bch2_bucket_alloc_trans - allocate a single bucket from a specific device
497 * @trans: transaction object
498 * @ca: device to allocate from
499 * @watermark: how important is this allocation?
500 * @data_type: BCH_DATA_journal, btree, user...
501 * @cl: if not NULL, closure to be used to wait if buckets not available
502 * @nowait: if true, do not wait for buckets to become available
503 * @usage: for secondarily also returning the current device usage
504 *
505 * Returns: an open_bucket on success, or an ERR_PTR() on failure.
506 */
bch2_bucket_alloc_trans(struct btree_trans * trans,struct bch_dev * ca,enum bch_watermark watermark,enum bch_data_type data_type,struct closure * cl,bool nowait,struct bch_dev_usage * usage)507 static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
508 struct bch_dev *ca,
509 enum bch_watermark watermark,
510 enum bch_data_type data_type,
511 struct closure *cl,
512 bool nowait,
513 struct bch_dev_usage *usage)
514 {
515 struct bch_fs *c = trans->c;
516 struct open_bucket *ob = NULL;
517 bool freespace = READ_ONCE(ca->mi.freespace_initialized);
518 u64 avail;
519 struct bucket_alloc_state s = {
520 .btree_bitmap = data_type == BCH_DATA_btree,
521 };
522 bool waiting = nowait;
523 again:
524 bch2_dev_usage_read_fast(ca, usage);
525 avail = dev_buckets_free(ca, *usage, watermark);
526
527 if (usage->d[BCH_DATA_need_discard].buckets > avail)
528 bch2_dev_do_discards(ca);
529
530 if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
531 bch2_gc_gens_async(c);
532
533 if (should_invalidate_buckets(ca, *usage))
534 bch2_dev_do_invalidates(ca);
535
536 if (!avail) {
537 if (watermark > BCH_WATERMARK_normal &&
538 c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations)
539 goto alloc;
540
541 if (cl && !waiting) {
542 closure_wait(&c->freelist_wait, cl);
543 waiting = true;
544 goto again;
545 }
546
547 track_event_change(&c->times[BCH_TIME_blocked_allocate], true);
548
549 ob = ERR_PTR(-BCH_ERR_freelist_empty);
550 goto err;
551 }
552
553 if (waiting)
554 closure_wake_up(&c->freelist_wait);
555 alloc:
556 ob = likely(freespace)
557 ? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl)
558 : bch2_bucket_alloc_early(trans, ca, watermark, &s, cl);
559
560 if (s.need_journal_commit * 2 > avail)
561 bch2_journal_flush_async(&c->journal, NULL);
562
563 if (!ob && s.btree_bitmap != BTREE_BITMAP_ANY) {
564 s.btree_bitmap = BTREE_BITMAP_ANY;
565 goto alloc;
566 }
567
568 if (!ob && freespace && c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
569 freespace = false;
570 goto alloc;
571 }
572 err:
573 if (!ob)
574 ob = ERR_PTR(-BCH_ERR_no_buckets_found);
575
576 if (!IS_ERR(ob))
577 ob->data_type = data_type;
578
579 if (!IS_ERR(ob))
580 count_event(c, bucket_alloc);
581 else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
582 count_event(c, bucket_alloc_fail);
583
584 if (!IS_ERR(ob)
585 ? trace_bucket_alloc_enabled()
586 : trace_bucket_alloc_fail_enabled())
587 trace_bucket_alloc2(c, ca, watermark, data_type, cl, usage, &s, ob);
588
589 return ob;
590 }
591
bch2_bucket_alloc(struct bch_fs * c,struct bch_dev * ca,enum bch_watermark watermark,enum bch_data_type data_type,struct closure * cl)592 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
593 enum bch_watermark watermark,
594 enum bch_data_type data_type,
595 struct closure *cl)
596 {
597 struct bch_dev_usage usage;
598 struct open_bucket *ob;
599
600 bch2_trans_do(c,
601 PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark,
602 data_type, cl, false, &usage)));
603 return ob;
604 }
605
__dev_stripe_cmp(struct dev_stripe_state * stripe,unsigned l,unsigned r)606 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
607 unsigned l, unsigned r)
608 {
609 return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
610 (stripe->next_alloc[l] < stripe->next_alloc[r]));
611 }
612
613 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
614
bch2_dev_alloc_list(struct bch_fs * c,struct dev_stripe_state * stripe,struct bch_devs_mask * devs)615 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
616 struct dev_stripe_state *stripe,
617 struct bch_devs_mask *devs)
618 {
619 struct dev_alloc_list ret = { .nr = 0 };
620 unsigned i;
621
622 for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
623 ret.data[ret.nr++] = i;
624
625 bubble_sort(ret.data, ret.nr, dev_stripe_cmp);
626 return ret;
627 }
628
bch2_dev_stripe_increment_inlined(struct bch_dev * ca,struct dev_stripe_state * stripe,struct bch_dev_usage * usage)629 static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
630 struct dev_stripe_state *stripe,
631 struct bch_dev_usage *usage)
632 {
633 u64 *v = stripe->next_alloc + ca->dev_idx;
634 u64 free_space = __dev_buckets_available(ca, *usage, BCH_WATERMARK_normal);
635 u64 free_space_inv = free_space
636 ? div64_u64(1ULL << 48, free_space)
637 : 1ULL << 48;
638 u64 scale = *v / 4;
639
640 if (*v + free_space_inv >= *v)
641 *v += free_space_inv;
642 else
643 *v = U64_MAX;
644
645 for (v = stripe->next_alloc;
646 v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
647 *v = *v < scale ? 0 : *v - scale;
648 }
649
bch2_dev_stripe_increment(struct bch_dev * ca,struct dev_stripe_state * stripe)650 void bch2_dev_stripe_increment(struct bch_dev *ca,
651 struct dev_stripe_state *stripe)
652 {
653 struct bch_dev_usage usage;
654
655 bch2_dev_usage_read_fast(ca, &usage);
656 bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
657 }
658
add_new_bucket(struct bch_fs * c,struct open_buckets * ptrs,struct bch_devs_mask * devs_may_alloc,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,struct open_bucket * ob)659 static int add_new_bucket(struct bch_fs *c,
660 struct open_buckets *ptrs,
661 struct bch_devs_mask *devs_may_alloc,
662 unsigned nr_replicas,
663 unsigned *nr_effective,
664 bool *have_cache,
665 struct open_bucket *ob)
666 {
667 unsigned durability = ob_dev(c, ob)->mi.durability;
668
669 BUG_ON(*nr_effective >= nr_replicas);
670
671 __clear_bit(ob->dev, devs_may_alloc->d);
672 *nr_effective += durability;
673 *have_cache |= !durability;
674
675 ob_push(c, ptrs, ob);
676
677 if (*nr_effective >= nr_replicas)
678 return 1;
679 if (ob->ec)
680 return 1;
681 return 0;
682 }
683
bch2_bucket_alloc_set_trans(struct btree_trans * trans,struct open_buckets * ptrs,struct dev_stripe_state * stripe,struct bch_devs_mask * devs_may_alloc,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,enum bch_write_flags flags,enum bch_data_type data_type,enum bch_watermark watermark,struct closure * cl)684 int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
685 struct open_buckets *ptrs,
686 struct dev_stripe_state *stripe,
687 struct bch_devs_mask *devs_may_alloc,
688 unsigned nr_replicas,
689 unsigned *nr_effective,
690 bool *have_cache,
691 enum bch_write_flags flags,
692 enum bch_data_type data_type,
693 enum bch_watermark watermark,
694 struct closure *cl)
695 {
696 struct bch_fs *c = trans->c;
697 int ret = -BCH_ERR_insufficient_devices;
698
699 BUG_ON(*nr_effective >= nr_replicas);
700
701 struct dev_alloc_list devs_sorted = bch2_dev_alloc_list(c, stripe, devs_may_alloc);
702 darray_for_each(devs_sorted, i) {
703 struct bch_dev *ca = bch2_dev_tryget_noerror(c, *i);
704 if (!ca)
705 continue;
706
707 if (!ca->mi.durability && *have_cache) {
708 bch2_dev_put(ca);
709 continue;
710 }
711
712 struct bch_dev_usage usage;
713 struct open_bucket *ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type,
714 cl, flags & BCH_WRITE_alloc_nowait, &usage);
715 if (!IS_ERR(ob))
716 bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
717 bch2_dev_put(ca);
718
719 if (IS_ERR(ob)) {
720 ret = PTR_ERR(ob);
721 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl)
722 break;
723 continue;
724 }
725
726 if (add_new_bucket(c, ptrs, devs_may_alloc,
727 nr_replicas, nr_effective,
728 have_cache, ob)) {
729 ret = 0;
730 break;
731 }
732 }
733
734 return ret;
735 }
736
737 /* Allocate from stripes: */
738
739 /*
740 * if we can't allocate a new stripe because there are already too many
741 * partially filled stripes, force allocating from an existing stripe even when
742 * it's to a device we don't want:
743 */
744
bucket_alloc_from_stripe(struct btree_trans * trans,struct open_buckets * ptrs,struct write_point * wp,struct bch_devs_mask * devs_may_alloc,u16 target,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,enum bch_watermark watermark,enum bch_write_flags flags,struct closure * cl)745 static int bucket_alloc_from_stripe(struct btree_trans *trans,
746 struct open_buckets *ptrs,
747 struct write_point *wp,
748 struct bch_devs_mask *devs_may_alloc,
749 u16 target,
750 unsigned nr_replicas,
751 unsigned *nr_effective,
752 bool *have_cache,
753 enum bch_watermark watermark,
754 enum bch_write_flags flags,
755 struct closure *cl)
756 {
757 struct bch_fs *c = trans->c;
758 int ret = 0;
759
760 if (nr_replicas < 2)
761 return 0;
762
763 if (ec_open_bucket(c, ptrs))
764 return 0;
765
766 struct ec_stripe_head *h =
767 bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl);
768 if (IS_ERR(h))
769 return PTR_ERR(h);
770 if (!h)
771 return 0;
772
773 struct dev_alloc_list devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
774 darray_for_each(devs_sorted, i)
775 for (unsigned ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
776 if (!h->s->blocks[ec_idx])
777 continue;
778
779 struct open_bucket *ob = c->open_buckets + h->s->blocks[ec_idx];
780 if (ob->dev == *i && !test_and_set_bit(ec_idx, h->s->blocks_allocated)) {
781 ob->ec_idx = ec_idx;
782 ob->ec = h->s;
783 ec_stripe_new_get(h->s, STRIPE_REF_io);
784
785 ret = add_new_bucket(c, ptrs, devs_may_alloc,
786 nr_replicas, nr_effective,
787 have_cache, ob);
788 goto out;
789 }
790 }
791 out:
792 bch2_ec_stripe_head_put(c, h);
793 return ret;
794 }
795
796 /* Sector allocator */
797
want_bucket(struct bch_fs * c,struct write_point * wp,struct bch_devs_mask * devs_may_alloc,bool * have_cache,bool ec,struct open_bucket * ob)798 static bool want_bucket(struct bch_fs *c,
799 struct write_point *wp,
800 struct bch_devs_mask *devs_may_alloc,
801 bool *have_cache, bool ec,
802 struct open_bucket *ob)
803 {
804 struct bch_dev *ca = ob_dev(c, ob);
805
806 if (!test_bit(ob->dev, devs_may_alloc->d))
807 return false;
808
809 if (ob->data_type != wp->data_type)
810 return false;
811
812 if (!ca->mi.durability &&
813 (wp->data_type == BCH_DATA_btree || ec || *have_cache))
814 return false;
815
816 if (ec != (ob->ec != NULL))
817 return false;
818
819 return true;
820 }
821
bucket_alloc_set_writepoint(struct bch_fs * c,struct open_buckets * ptrs,struct write_point * wp,struct bch_devs_mask * devs_may_alloc,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,bool ec)822 static int bucket_alloc_set_writepoint(struct bch_fs *c,
823 struct open_buckets *ptrs,
824 struct write_point *wp,
825 struct bch_devs_mask *devs_may_alloc,
826 unsigned nr_replicas,
827 unsigned *nr_effective,
828 bool *have_cache,
829 bool ec)
830 {
831 struct open_buckets ptrs_skip = { .nr = 0 };
832 struct open_bucket *ob;
833 unsigned i;
834 int ret = 0;
835
836 open_bucket_for_each(c, &wp->ptrs, ob, i) {
837 if (!ret && want_bucket(c, wp, devs_may_alloc,
838 have_cache, ec, ob))
839 ret = add_new_bucket(c, ptrs, devs_may_alloc,
840 nr_replicas, nr_effective,
841 have_cache, ob);
842 else
843 ob_push(c, &ptrs_skip, ob);
844 }
845 wp->ptrs = ptrs_skip;
846
847 return ret;
848 }
849
bucket_alloc_set_partial(struct bch_fs * c,struct open_buckets * ptrs,struct write_point * wp,struct bch_devs_mask * devs_may_alloc,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,bool ec,enum bch_watermark watermark)850 static int bucket_alloc_set_partial(struct bch_fs *c,
851 struct open_buckets *ptrs,
852 struct write_point *wp,
853 struct bch_devs_mask *devs_may_alloc,
854 unsigned nr_replicas,
855 unsigned *nr_effective,
856 bool *have_cache, bool ec,
857 enum bch_watermark watermark)
858 {
859 int i, ret = 0;
860
861 if (!c->open_buckets_partial_nr)
862 return 0;
863
864 spin_lock(&c->freelist_lock);
865
866 if (!c->open_buckets_partial_nr)
867 goto unlock;
868
869 for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) {
870 struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i];
871
872 if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) {
873 struct bch_dev *ca = ob_dev(c, ob);
874 struct bch_dev_usage usage;
875 u64 avail;
876
877 bch2_dev_usage_read_fast(ca, &usage);
878 avail = dev_buckets_free(ca, usage, watermark) + ca->nr_partial_buckets;
879 if (!avail)
880 continue;
881
882 array_remove_item(c->open_buckets_partial,
883 c->open_buckets_partial_nr,
884 i);
885 ob->on_partial_list = false;
886
887 rcu_read_lock();
888 bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--;
889 rcu_read_unlock();
890
891 ret = add_new_bucket(c, ptrs, devs_may_alloc,
892 nr_replicas, nr_effective,
893 have_cache, ob);
894 if (ret)
895 break;
896 }
897 }
898 unlock:
899 spin_unlock(&c->freelist_lock);
900 return ret;
901 }
902
__open_bucket_add_buckets(struct btree_trans * trans,struct open_buckets * ptrs,struct write_point * wp,struct bch_devs_list * devs_have,u16 target,bool erasure_code,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,enum bch_watermark watermark,enum bch_write_flags flags,struct closure * _cl)903 static int __open_bucket_add_buckets(struct btree_trans *trans,
904 struct open_buckets *ptrs,
905 struct write_point *wp,
906 struct bch_devs_list *devs_have,
907 u16 target,
908 bool erasure_code,
909 unsigned nr_replicas,
910 unsigned *nr_effective,
911 bool *have_cache,
912 enum bch_watermark watermark,
913 enum bch_write_flags flags,
914 struct closure *_cl)
915 {
916 struct bch_fs *c = trans->c;
917 struct bch_devs_mask devs;
918 struct open_bucket *ob;
919 struct closure *cl = NULL;
920 unsigned i;
921 int ret;
922
923 devs = target_rw_devs(c, wp->data_type, target);
924
925 /* Don't allocate from devices we already have pointers to: */
926 darray_for_each(*devs_have, i)
927 __clear_bit(*i, devs.d);
928
929 open_bucket_for_each(c, ptrs, ob, i)
930 __clear_bit(ob->dev, devs.d);
931
932 ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs,
933 nr_replicas, nr_effective,
934 have_cache, erasure_code);
935 if (ret)
936 return ret;
937
938 ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
939 nr_replicas, nr_effective,
940 have_cache, erasure_code, watermark);
941 if (ret)
942 return ret;
943
944 if (erasure_code) {
945 ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs,
946 target,
947 nr_replicas, nr_effective,
948 have_cache,
949 watermark, flags, _cl);
950 } else {
951 retry_blocking:
952 /*
953 * Try nonblocking first, so that if one device is full we'll try from
954 * other devices:
955 */
956 ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
957 nr_replicas, nr_effective, have_cache,
958 flags, wp->data_type, watermark, cl);
959 if (ret &&
960 !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
961 !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
962 !cl && _cl) {
963 cl = _cl;
964 goto retry_blocking;
965 }
966 }
967
968 return ret;
969 }
970
open_bucket_add_buckets(struct btree_trans * trans,struct open_buckets * ptrs,struct write_point * wp,struct bch_devs_list * devs_have,u16 target,unsigned erasure_code,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,enum bch_watermark watermark,enum bch_write_flags flags,struct closure * cl)971 static int open_bucket_add_buckets(struct btree_trans *trans,
972 struct open_buckets *ptrs,
973 struct write_point *wp,
974 struct bch_devs_list *devs_have,
975 u16 target,
976 unsigned erasure_code,
977 unsigned nr_replicas,
978 unsigned *nr_effective,
979 bool *have_cache,
980 enum bch_watermark watermark,
981 enum bch_write_flags flags,
982 struct closure *cl)
983 {
984 int ret;
985
986 if (erasure_code && !ec_open_bucket(trans->c, ptrs)) {
987 ret = __open_bucket_add_buckets(trans, ptrs, wp,
988 devs_have, target, erasure_code,
989 nr_replicas, nr_effective, have_cache,
990 watermark, flags, cl);
991 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
992 bch2_err_matches(ret, BCH_ERR_operation_blocked) ||
993 bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
994 bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
995 return ret;
996 if (*nr_effective >= nr_replicas)
997 return 0;
998 }
999
1000 ret = __open_bucket_add_buckets(trans, ptrs, wp,
1001 devs_have, target, false,
1002 nr_replicas, nr_effective, have_cache,
1003 watermark, flags, cl);
1004 return ret < 0 ? ret : 0;
1005 }
1006
1007 /**
1008 * should_drop_bucket - check if this is open_bucket should go away
1009 * @ob: open_bucket to predicate on
1010 * @c: filesystem handle
1011 * @ca: if set, we're killing buckets for a particular device
1012 * @ec: if true, we're shutting down erasure coding and killing all ec
1013 * open_buckets
1014 * otherwise, return true
1015 * Returns: true if we should kill this open_bucket
1016 *
1017 * We're killing open_buckets because we're shutting down a device, erasure
1018 * coding, or the entire filesystem - check if this open_bucket matches:
1019 */
should_drop_bucket(struct open_bucket * ob,struct bch_fs * c,struct bch_dev * ca,bool ec)1020 static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c,
1021 struct bch_dev *ca, bool ec)
1022 {
1023 if (ec) {
1024 return ob->ec != NULL;
1025 } else if (ca) {
1026 bool drop = ob->dev == ca->dev_idx;
1027 struct open_bucket *ob2;
1028 unsigned i;
1029
1030 if (!drop && ob->ec) {
1031 unsigned nr_blocks;
1032
1033 mutex_lock(&ob->ec->lock);
1034 nr_blocks = bkey_i_to_stripe(&ob->ec->new_stripe.key)->v.nr_blocks;
1035
1036 for (i = 0; i < nr_blocks; i++) {
1037 if (!ob->ec->blocks[i])
1038 continue;
1039
1040 ob2 = c->open_buckets + ob->ec->blocks[i];
1041 drop |= ob2->dev == ca->dev_idx;
1042 }
1043 mutex_unlock(&ob->ec->lock);
1044 }
1045
1046 return drop;
1047 } else {
1048 return true;
1049 }
1050 }
1051
bch2_writepoint_stop(struct bch_fs * c,struct bch_dev * ca,bool ec,struct write_point * wp)1052 static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
1053 bool ec, struct write_point *wp)
1054 {
1055 struct open_buckets ptrs = { .nr = 0 };
1056 struct open_bucket *ob;
1057 unsigned i;
1058
1059 mutex_lock(&wp->lock);
1060 open_bucket_for_each(c, &wp->ptrs, ob, i)
1061 if (should_drop_bucket(ob, c, ca, ec))
1062 bch2_open_bucket_put(c, ob);
1063 else
1064 ob_push(c, &ptrs, ob);
1065 wp->ptrs = ptrs;
1066 mutex_unlock(&wp->lock);
1067 }
1068
bch2_open_buckets_stop(struct bch_fs * c,struct bch_dev * ca,bool ec)1069 void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca,
1070 bool ec)
1071 {
1072 unsigned i;
1073
1074 /* Next, close write points that point to this device... */
1075 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1076 bch2_writepoint_stop(c, ca, ec, &c->write_points[i]);
1077
1078 bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point);
1079 bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point);
1080 bch2_writepoint_stop(c, ca, ec, &c->btree_write_point);
1081
1082 mutex_lock(&c->btree_reserve_cache_lock);
1083 while (c->btree_reserve_cache_nr) {
1084 struct btree_alloc *a =
1085 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1086
1087 bch2_open_buckets_put(c, &a->ob);
1088 }
1089 mutex_unlock(&c->btree_reserve_cache_lock);
1090
1091 spin_lock(&c->freelist_lock);
1092 i = 0;
1093 while (i < c->open_buckets_partial_nr) {
1094 struct open_bucket *ob =
1095 c->open_buckets + c->open_buckets_partial[i];
1096
1097 if (should_drop_bucket(ob, c, ca, ec)) {
1098 --c->open_buckets_partial_nr;
1099 swap(c->open_buckets_partial[i],
1100 c->open_buckets_partial[c->open_buckets_partial_nr]);
1101
1102 ob->on_partial_list = false;
1103
1104 rcu_read_lock();
1105 bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--;
1106 rcu_read_unlock();
1107
1108 spin_unlock(&c->freelist_lock);
1109 bch2_open_bucket_put(c, ob);
1110 spin_lock(&c->freelist_lock);
1111 } else {
1112 i++;
1113 }
1114 }
1115 spin_unlock(&c->freelist_lock);
1116
1117 bch2_ec_stop_dev(c, ca);
1118 }
1119
writepoint_hash(struct bch_fs * c,unsigned long write_point)1120 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
1121 unsigned long write_point)
1122 {
1123 unsigned hash =
1124 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
1125
1126 return &c->write_points_hash[hash];
1127 }
1128
__writepoint_find(struct hlist_head * head,unsigned long write_point)1129 static struct write_point *__writepoint_find(struct hlist_head *head,
1130 unsigned long write_point)
1131 {
1132 struct write_point *wp;
1133
1134 rcu_read_lock();
1135 hlist_for_each_entry_rcu(wp, head, node)
1136 if (wp->write_point == write_point)
1137 goto out;
1138 wp = NULL;
1139 out:
1140 rcu_read_unlock();
1141 return wp;
1142 }
1143
too_many_writepoints(struct bch_fs * c,unsigned factor)1144 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
1145 {
1146 u64 stranded = c->write_points_nr * c->bucket_size_max;
1147 u64 free = bch2_fs_usage_read_short(c).free;
1148
1149 return stranded * factor > free;
1150 }
1151
try_increase_writepoints(struct bch_fs * c)1152 static bool try_increase_writepoints(struct bch_fs *c)
1153 {
1154 struct write_point *wp;
1155
1156 if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
1157 too_many_writepoints(c, 32))
1158 return false;
1159
1160 wp = c->write_points + c->write_points_nr++;
1161 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
1162 return true;
1163 }
1164
try_decrease_writepoints(struct btree_trans * trans,unsigned old_nr)1165 static bool try_decrease_writepoints(struct btree_trans *trans, unsigned old_nr)
1166 {
1167 struct bch_fs *c = trans->c;
1168 struct write_point *wp;
1169 struct open_bucket *ob;
1170 unsigned i;
1171
1172 mutex_lock(&c->write_points_hash_lock);
1173 if (c->write_points_nr < old_nr) {
1174 mutex_unlock(&c->write_points_hash_lock);
1175 return true;
1176 }
1177
1178 if (c->write_points_nr == 1 ||
1179 !too_many_writepoints(c, 8)) {
1180 mutex_unlock(&c->write_points_hash_lock);
1181 return false;
1182 }
1183
1184 wp = c->write_points + --c->write_points_nr;
1185
1186 hlist_del_rcu(&wp->node);
1187 mutex_unlock(&c->write_points_hash_lock);
1188
1189 bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1190 open_bucket_for_each(c, &wp->ptrs, ob, i)
1191 open_bucket_free_unused(c, ob);
1192 wp->ptrs.nr = 0;
1193 mutex_unlock(&wp->lock);
1194 return true;
1195 }
1196
writepoint_find(struct btree_trans * trans,unsigned long write_point)1197 static struct write_point *writepoint_find(struct btree_trans *trans,
1198 unsigned long write_point)
1199 {
1200 struct bch_fs *c = trans->c;
1201 struct write_point *wp, *oldest;
1202 struct hlist_head *head;
1203
1204 if (!(write_point & 1UL)) {
1205 wp = (struct write_point *) write_point;
1206 bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1207 return wp;
1208 }
1209
1210 head = writepoint_hash(c, write_point);
1211 restart_find:
1212 wp = __writepoint_find(head, write_point);
1213 if (wp) {
1214 lock_wp:
1215 bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1216 if (wp->write_point == write_point)
1217 goto out;
1218 mutex_unlock(&wp->lock);
1219 goto restart_find;
1220 }
1221 restart_find_oldest:
1222 oldest = NULL;
1223 for (wp = c->write_points;
1224 wp < c->write_points + c->write_points_nr; wp++)
1225 if (!oldest || time_before64(wp->last_used, oldest->last_used))
1226 oldest = wp;
1227
1228 bch2_trans_mutex_lock_norelock(trans, &oldest->lock);
1229 bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock);
1230 if (oldest >= c->write_points + c->write_points_nr ||
1231 try_increase_writepoints(c)) {
1232 mutex_unlock(&c->write_points_hash_lock);
1233 mutex_unlock(&oldest->lock);
1234 goto restart_find_oldest;
1235 }
1236
1237 wp = __writepoint_find(head, write_point);
1238 if (wp && wp != oldest) {
1239 mutex_unlock(&c->write_points_hash_lock);
1240 mutex_unlock(&oldest->lock);
1241 goto lock_wp;
1242 }
1243
1244 wp = oldest;
1245 hlist_del_rcu(&wp->node);
1246 wp->write_point = write_point;
1247 hlist_add_head_rcu(&wp->node, head);
1248 mutex_unlock(&c->write_points_hash_lock);
1249 out:
1250 wp->last_used = local_clock();
1251 return wp;
1252 }
1253
1254 static noinline void
deallocate_extra_replicas(struct bch_fs * c,struct open_buckets * ptrs,struct open_buckets * ptrs_no_use,unsigned extra_replicas)1255 deallocate_extra_replicas(struct bch_fs *c,
1256 struct open_buckets *ptrs,
1257 struct open_buckets *ptrs_no_use,
1258 unsigned extra_replicas)
1259 {
1260 struct open_buckets ptrs2 = { 0 };
1261 struct open_bucket *ob;
1262 unsigned i;
1263
1264 open_bucket_for_each(c, ptrs, ob, i) {
1265 unsigned d = ob_dev(c, ob)->mi.durability;
1266
1267 if (d && d <= extra_replicas) {
1268 extra_replicas -= d;
1269 ob_push(c, ptrs_no_use, ob);
1270 } else {
1271 ob_push(c, &ptrs2, ob);
1272 }
1273 }
1274
1275 *ptrs = ptrs2;
1276 }
1277
1278 /*
1279 * Get us an open_bucket we can allocate from, return with it locked:
1280 */
bch2_alloc_sectors_start_trans(struct btree_trans * trans,unsigned target,unsigned erasure_code,struct write_point_specifier write_point,struct bch_devs_list * devs_have,unsigned nr_replicas,unsigned nr_replicas_required,enum bch_watermark watermark,enum bch_write_flags flags,struct closure * cl,struct write_point ** wp_ret)1281 int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
1282 unsigned target,
1283 unsigned erasure_code,
1284 struct write_point_specifier write_point,
1285 struct bch_devs_list *devs_have,
1286 unsigned nr_replicas,
1287 unsigned nr_replicas_required,
1288 enum bch_watermark watermark,
1289 enum bch_write_flags flags,
1290 struct closure *cl,
1291 struct write_point **wp_ret)
1292 {
1293 struct bch_fs *c = trans->c;
1294 struct write_point *wp;
1295 struct open_bucket *ob;
1296 struct open_buckets ptrs;
1297 unsigned nr_effective, write_points_nr;
1298 bool have_cache;
1299 int ret;
1300 int i;
1301
1302 if (!IS_ENABLED(CONFIG_BCACHEFS_ERASURE_CODING))
1303 erasure_code = false;
1304
1305 BUG_ON(!nr_replicas || !nr_replicas_required);
1306 retry:
1307 ptrs.nr = 0;
1308 nr_effective = 0;
1309 write_points_nr = c->write_points_nr;
1310 have_cache = false;
1311
1312 *wp_ret = wp = writepoint_find(trans, write_point.v);
1313
1314 ret = bch2_trans_relock(trans);
1315 if (ret)
1316 goto err;
1317
1318 /* metadata may not allocate on cache devices: */
1319 if (wp->data_type != BCH_DATA_user)
1320 have_cache = true;
1321
1322 if (target && !(flags & BCH_WRITE_only_specified_devs)) {
1323 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1324 target, erasure_code,
1325 nr_replicas, &nr_effective,
1326 &have_cache, watermark,
1327 flags, NULL);
1328 if (!ret ||
1329 bch2_err_matches(ret, BCH_ERR_transaction_restart))
1330 goto alloc_done;
1331
1332 /* Don't retry from all devices if we're out of open buckets: */
1333 if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) {
1334 int ret2 = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1335 target, erasure_code,
1336 nr_replicas, &nr_effective,
1337 &have_cache, watermark,
1338 flags, cl);
1339 if (!ret2 ||
1340 bch2_err_matches(ret2, BCH_ERR_transaction_restart) ||
1341 bch2_err_matches(ret2, BCH_ERR_open_buckets_empty)) {
1342 ret = ret2;
1343 goto alloc_done;
1344 }
1345 }
1346
1347 /*
1348 * Only try to allocate cache (durability = 0 devices) from the
1349 * specified target:
1350 */
1351 have_cache = true;
1352
1353 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1354 0, erasure_code,
1355 nr_replicas, &nr_effective,
1356 &have_cache, watermark,
1357 flags, cl);
1358 } else {
1359 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1360 target, erasure_code,
1361 nr_replicas, &nr_effective,
1362 &have_cache, watermark,
1363 flags, cl);
1364 }
1365 alloc_done:
1366 BUG_ON(!ret && nr_effective < nr_replicas);
1367
1368 if (erasure_code && !ec_open_bucket(c, &ptrs))
1369 pr_debug("failed to get ec bucket: ret %u", ret);
1370
1371 if (ret == -BCH_ERR_insufficient_devices &&
1372 nr_effective >= nr_replicas_required)
1373 ret = 0;
1374
1375 if (ret)
1376 goto err;
1377
1378 if (nr_effective > nr_replicas)
1379 deallocate_extra_replicas(c, &ptrs, &wp->ptrs, nr_effective - nr_replicas);
1380
1381 /* Free buckets we didn't use: */
1382 open_bucket_for_each(c, &wp->ptrs, ob, i)
1383 open_bucket_free_unused(c, ob);
1384
1385 wp->ptrs = ptrs;
1386
1387 wp->sectors_free = UINT_MAX;
1388
1389 open_bucket_for_each(c, &wp->ptrs, ob, i)
1390 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1391
1392 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1393
1394 return 0;
1395 err:
1396 open_bucket_for_each(c, &wp->ptrs, ob, i)
1397 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
1398 ob_push(c, &ptrs, ob);
1399 else
1400 open_bucket_free_unused(c, ob);
1401 wp->ptrs = ptrs;
1402
1403 mutex_unlock(&wp->lock);
1404
1405 if (bch2_err_matches(ret, BCH_ERR_freelist_empty) &&
1406 try_decrease_writepoints(trans, write_points_nr))
1407 goto retry;
1408
1409 if (cl && bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
1410 ret = -BCH_ERR_bucket_alloc_blocked;
1411
1412 if (cl && !(flags & BCH_WRITE_alloc_nowait) &&
1413 bch2_err_matches(ret, BCH_ERR_freelist_empty))
1414 ret = -BCH_ERR_bucket_alloc_blocked;
1415
1416 return ret;
1417 }
1418
bch2_ob_ptr(struct bch_fs * c,struct open_bucket * ob)1419 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
1420 {
1421 struct bch_dev *ca = ob_dev(c, ob);
1422
1423 return (struct bch_extent_ptr) {
1424 .type = 1 << BCH_EXTENT_ENTRY_ptr,
1425 .gen = ob->gen,
1426 .dev = ob->dev,
1427 .offset = bucket_to_sector(ca, ob->bucket) +
1428 ca->mi.bucket_size -
1429 ob->sectors_free,
1430 };
1431 }
1432
bch2_alloc_sectors_append_ptrs(struct bch_fs * c,struct write_point * wp,struct bkey_i * k,unsigned sectors,bool cached)1433 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1434 struct bkey_i *k, unsigned sectors,
1435 bool cached)
1436 {
1437 bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached);
1438 }
1439
1440 /*
1441 * Append pointers to the space we just allocated to @k, and mark @sectors space
1442 * as allocated out of @ob
1443 */
bch2_alloc_sectors_done(struct bch_fs * c,struct write_point * wp)1444 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1445 {
1446 bch2_alloc_sectors_done_inlined(c, wp);
1447 }
1448
writepoint_init(struct write_point * wp,enum bch_data_type type)1449 static inline void writepoint_init(struct write_point *wp,
1450 enum bch_data_type type)
1451 {
1452 mutex_init(&wp->lock);
1453 wp->data_type = type;
1454
1455 INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates);
1456 INIT_LIST_HEAD(&wp->writes);
1457 spin_lock_init(&wp->writes_lock);
1458 }
1459
bch2_fs_allocator_foreground_init(struct bch_fs * c)1460 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
1461 {
1462 struct open_bucket *ob;
1463 struct write_point *wp;
1464
1465 mutex_init(&c->write_points_hash_lock);
1466 c->write_points_nr = ARRAY_SIZE(c->write_points);
1467
1468 /* open bucket 0 is a sentinal NULL: */
1469 spin_lock_init(&c->open_buckets[0].lock);
1470
1471 for (ob = c->open_buckets + 1;
1472 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1473 spin_lock_init(&ob->lock);
1474 c->open_buckets_nr_free++;
1475
1476 ob->freelist = c->open_buckets_freelist;
1477 c->open_buckets_freelist = ob - c->open_buckets;
1478 }
1479
1480 writepoint_init(&c->btree_write_point, BCH_DATA_btree);
1481 writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
1482 writepoint_init(&c->copygc_write_point, BCH_DATA_user);
1483
1484 for (wp = c->write_points;
1485 wp < c->write_points + c->write_points_nr; wp++) {
1486 writepoint_init(wp, BCH_DATA_user);
1487
1488 wp->last_used = local_clock();
1489 wp->write_point = (unsigned long) wp;
1490 hlist_add_head_rcu(&wp->node,
1491 writepoint_hash(c, wp->write_point));
1492 }
1493 }
1494
bch2_open_bucket_to_text(struct printbuf * out,struct bch_fs * c,struct open_bucket * ob)1495 void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob)
1496 {
1497 struct bch_dev *ca = ob_dev(c, ob);
1498 unsigned data_type = ob->data_type;
1499 barrier(); /* READ_ONCE() doesn't work on bitfields */
1500
1501 prt_printf(out, "%zu ref %u ",
1502 ob - c->open_buckets,
1503 atomic_read(&ob->pin));
1504 bch2_prt_data_type(out, data_type);
1505 prt_printf(out, " %u:%llu gen %u allocated %u/%u",
1506 ob->dev, ob->bucket, ob->gen,
1507 ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size);
1508 if (ob->ec)
1509 prt_printf(out, " ec idx %llu", ob->ec->idx);
1510 if (ob->on_partial_list)
1511 prt_str(out, " partial");
1512 prt_newline(out);
1513 }
1514
bch2_open_buckets_to_text(struct printbuf * out,struct bch_fs * c,struct bch_dev * ca)1515 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c,
1516 struct bch_dev *ca)
1517 {
1518 struct open_bucket *ob;
1519
1520 out->atomic++;
1521
1522 for (ob = c->open_buckets;
1523 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1524 ob++) {
1525 spin_lock(&ob->lock);
1526 if (ob->valid && (!ca || ob->dev == ca->dev_idx))
1527 bch2_open_bucket_to_text(out, c, ob);
1528 spin_unlock(&ob->lock);
1529 }
1530
1531 --out->atomic;
1532 }
1533
bch2_open_buckets_partial_to_text(struct printbuf * out,struct bch_fs * c)1534 void bch2_open_buckets_partial_to_text(struct printbuf *out, struct bch_fs *c)
1535 {
1536 unsigned i;
1537
1538 out->atomic++;
1539 spin_lock(&c->freelist_lock);
1540
1541 for (i = 0; i < c->open_buckets_partial_nr; i++)
1542 bch2_open_bucket_to_text(out, c,
1543 c->open_buckets + c->open_buckets_partial[i]);
1544
1545 spin_unlock(&c->freelist_lock);
1546 --out->atomic;
1547 }
1548
1549 static const char * const bch2_write_point_states[] = {
1550 #define x(n) #n,
1551 WRITE_POINT_STATES()
1552 #undef x
1553 NULL
1554 };
1555
bch2_write_point_to_text(struct printbuf * out,struct bch_fs * c,struct write_point * wp)1556 static void bch2_write_point_to_text(struct printbuf *out, struct bch_fs *c,
1557 struct write_point *wp)
1558 {
1559 struct open_bucket *ob;
1560 unsigned i;
1561
1562 prt_printf(out, "%lu: ", wp->write_point);
1563 prt_human_readable_u64(out, wp->sectors_allocated);
1564
1565 prt_printf(out, " last wrote: ");
1566 bch2_pr_time_units(out, sched_clock() - wp->last_used);
1567
1568 for (i = 0; i < WRITE_POINT_STATE_NR; i++) {
1569 prt_printf(out, " %s: ", bch2_write_point_states[i]);
1570 bch2_pr_time_units(out, wp->time[i]);
1571 }
1572
1573 prt_newline(out);
1574
1575 printbuf_indent_add(out, 2);
1576 open_bucket_for_each(c, &wp->ptrs, ob, i)
1577 bch2_open_bucket_to_text(out, c, ob);
1578 printbuf_indent_sub(out, 2);
1579 }
1580
bch2_write_points_to_text(struct printbuf * out,struct bch_fs * c)1581 void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c)
1582 {
1583 struct write_point *wp;
1584
1585 prt_str(out, "Foreground write points\n");
1586 for (wp = c->write_points;
1587 wp < c->write_points + ARRAY_SIZE(c->write_points);
1588 wp++)
1589 bch2_write_point_to_text(out, c, wp);
1590
1591 prt_str(out, "Copygc write point\n");
1592 bch2_write_point_to_text(out, c, &c->copygc_write_point);
1593
1594 prt_str(out, "Rebalance write point\n");
1595 bch2_write_point_to_text(out, c, &c->rebalance_write_point);
1596
1597 prt_str(out, "Btree write point\n");
1598 bch2_write_point_to_text(out, c, &c->btree_write_point);
1599 }
1600
bch2_fs_alloc_debug_to_text(struct printbuf * out,struct bch_fs * c)1601 void bch2_fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
1602 {
1603 unsigned nr[BCH_DATA_NR];
1604
1605 memset(nr, 0, sizeof(nr));
1606
1607 for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
1608 nr[c->open_buckets[i].data_type]++;
1609
1610 printbuf_tabstops_reset(out);
1611 printbuf_tabstop_push(out, 24);
1612
1613 prt_printf(out, "capacity\t%llu\n", c->capacity);
1614 prt_printf(out, "reserved\t%llu\n", c->reserved);
1615 prt_printf(out, "hidden\t%llu\n", percpu_u64_get(&c->usage->hidden));
1616 prt_printf(out, "btree\t%llu\n", percpu_u64_get(&c->usage->btree));
1617 prt_printf(out, "data\t%llu\n", percpu_u64_get(&c->usage->data));
1618 prt_printf(out, "cached\t%llu\n", percpu_u64_get(&c->usage->cached));
1619 prt_printf(out, "reserved\t%llu\n", percpu_u64_get(&c->usage->reserved));
1620 prt_printf(out, "online_reserved\t%llu\n", percpu_u64_get(c->online_reserved));
1621 prt_printf(out, "nr_inodes\t%llu\n", percpu_u64_get(&c->usage->nr_inodes));
1622
1623 prt_newline(out);
1624 prt_printf(out, "freelist_wait\t%s\n", c->freelist_wait.list.first ? "waiting" : "empty");
1625 prt_printf(out, "open buckets allocated\t%i\n", OPEN_BUCKETS_COUNT - c->open_buckets_nr_free);
1626 prt_printf(out, "open buckets total\t%u\n", OPEN_BUCKETS_COUNT);
1627 prt_printf(out, "open_buckets_wait\t%s\n", c->open_buckets_wait.list.first ? "waiting" : "empty");
1628 prt_printf(out, "open_buckets_btree\t%u\n", nr[BCH_DATA_btree]);
1629 prt_printf(out, "open_buckets_user\t%u\n", nr[BCH_DATA_user]);
1630 prt_printf(out, "btree reserve cache\t%u\n", c->btree_reserve_cache_nr);
1631 }
1632
bch2_dev_alloc_debug_to_text(struct printbuf * out,struct bch_dev * ca)1633 void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
1634 {
1635 struct bch_fs *c = ca->fs;
1636 struct bch_dev_usage stats = bch2_dev_usage_read(ca);
1637 unsigned nr[BCH_DATA_NR];
1638
1639 memset(nr, 0, sizeof(nr));
1640
1641 for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
1642 nr[c->open_buckets[i].data_type]++;
1643
1644 bch2_dev_usage_to_text(out, ca, &stats);
1645
1646 prt_newline(out);
1647
1648 prt_printf(out, "reserves:\n");
1649 for (unsigned i = 0; i < BCH_WATERMARK_NR; i++)
1650 prt_printf(out, "%s\t%llu\r\n", bch2_watermarks[i], bch2_dev_buckets_reserved(ca, i));
1651
1652 prt_newline(out);
1653
1654 printbuf_tabstops_reset(out);
1655 printbuf_tabstop_push(out, 12);
1656 printbuf_tabstop_push(out, 16);
1657
1658 prt_printf(out, "open buckets\t%i\r\n", ca->nr_open_buckets);
1659 prt_printf(out, "buckets to invalidate\t%llu\r\n", should_invalidate_buckets(ca, stats));
1660 }
1661
bch2_print_allocator_stuck(struct bch_fs * c)1662 static noinline void bch2_print_allocator_stuck(struct bch_fs *c)
1663 {
1664 struct printbuf buf = PRINTBUF;
1665
1666 prt_printf(&buf, "Allocator stuck? Waited for %u seconds\n",
1667 c->opts.allocator_stuck_timeout);
1668
1669 prt_printf(&buf, "Allocator debug:\n");
1670 printbuf_indent_add(&buf, 2);
1671 bch2_fs_alloc_debug_to_text(&buf, c);
1672 printbuf_indent_sub(&buf, 2);
1673 prt_newline(&buf);
1674
1675 for_each_online_member(c, ca) {
1676 prt_printf(&buf, "Dev %u:\n", ca->dev_idx);
1677 printbuf_indent_add(&buf, 2);
1678 bch2_dev_alloc_debug_to_text(&buf, ca);
1679 printbuf_indent_sub(&buf, 2);
1680 prt_newline(&buf);
1681 }
1682
1683 prt_printf(&buf, "Copygc debug:\n");
1684 printbuf_indent_add(&buf, 2);
1685 bch2_copygc_wait_to_text(&buf, c);
1686 printbuf_indent_sub(&buf, 2);
1687 prt_newline(&buf);
1688
1689 prt_printf(&buf, "Journal debug:\n");
1690 printbuf_indent_add(&buf, 2);
1691 bch2_journal_debug_to_text(&buf, &c->journal);
1692 printbuf_indent_sub(&buf, 2);
1693
1694 bch2_print_string_as_lines(KERN_ERR, buf.buf);
1695 printbuf_exit(&buf);
1696 }
1697
allocator_wait_timeout(struct bch_fs * c)1698 static inline unsigned allocator_wait_timeout(struct bch_fs *c)
1699 {
1700 if (c->allocator_last_stuck &&
1701 time_after(c->allocator_last_stuck + HZ * 60 * 2, jiffies))
1702 return 0;
1703
1704 return c->opts.allocator_stuck_timeout * HZ;
1705 }
1706
__bch2_wait_on_allocator(struct bch_fs * c,struct closure * cl)1707 void __bch2_wait_on_allocator(struct bch_fs *c, struct closure *cl)
1708 {
1709 unsigned t = allocator_wait_timeout(c);
1710
1711 if (t && closure_sync_timeout(cl, t)) {
1712 c->allocator_last_stuck = jiffies;
1713 bch2_print_allocator_stuck(c);
1714 }
1715
1716 closure_sync(cl);
1717 }
1718