xref: /linux/fs/bcachefs/alloc_foreground.c (revision 1cbfb828e05171ca2dd77b5988d068e6872480fe)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2012 Google, Inc.
4  *
5  * Foreground allocator code: allocate buckets from freelist, and allocate in
6  * sector granularity from writepoints.
7  *
8  * bch2_bucket_alloc() allocates a single bucket from a specific device.
9  *
10  * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11  * in a given filesystem.
12  */
13 
14 #include "bcachefs.h"
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
17 #include "backpointers.h"
18 #include "btree_iter.h"
19 #include "btree_update.h"
20 #include "btree_gc.h"
21 #include "buckets.h"
22 #include "buckets_waiting_for_journal.h"
23 #include "clock.h"
24 #include "debug.h"
25 #include "disk_groups.h"
26 #include "ec.h"
27 #include "error.h"
28 #include "io_write.h"
29 #include "journal.h"
30 #include "movinggc.h"
31 #include "nocow_locking.h"
32 #include "trace.h"
33 
34 #include <linux/math64.h>
35 #include <linux/rculist.h>
36 #include <linux/rcupdate.h>
37 
38 static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
39 					   struct mutex *lock)
40 {
41 	if (!mutex_trylock(lock)) {
42 		bch2_trans_unlock(trans);
43 		mutex_lock(lock);
44 	}
45 }
46 
47 const char * const bch2_watermarks[] = {
48 #define x(t) #t,
49 	BCH_WATERMARKS()
50 #undef x
51 	NULL
52 };
53 
54 /*
55  * Open buckets represent a bucket that's currently being allocated from.  They
56  * serve two purposes:
57  *
58  *  - They track buckets that have been partially allocated, allowing for
59  *    sub-bucket sized allocations - they're used by the sector allocator below
60  *
61  *  - They provide a reference to the buckets they own that mark and sweep GC
62  *    can find, until the new allocation has a pointer to it inserted into the
63  *    btree
64  *
65  * When allocating some space with the sector allocator, the allocation comes
66  * with a reference to an open bucket - the caller is required to put that
67  * reference _after_ doing the index update that makes its allocation reachable.
68  */
69 
70 void bch2_reset_alloc_cursors(struct bch_fs *c)
71 {
72 	rcu_read_lock();
73 	for_each_member_device_rcu(c, ca, NULL)
74 		memset(ca->alloc_cursor, 0, sizeof(ca->alloc_cursor));
75 	rcu_read_unlock();
76 }
77 
78 static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
79 {
80 	open_bucket_idx_t idx = ob - c->open_buckets;
81 	open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
82 
83 	ob->hash = *slot;
84 	*slot = idx;
85 }
86 
87 static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
88 {
89 	open_bucket_idx_t idx = ob - c->open_buckets;
90 	open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
91 
92 	while (*slot != idx) {
93 		BUG_ON(!*slot);
94 		slot = &c->open_buckets[*slot].hash;
95 	}
96 
97 	*slot = ob->hash;
98 	ob->hash = 0;
99 }
100 
101 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
102 {
103 	struct bch_dev *ca = ob_dev(c, ob);
104 
105 	if (ob->ec) {
106 		ec_stripe_new_put(c, ob->ec, STRIPE_REF_io);
107 		return;
108 	}
109 
110 	spin_lock(&ob->lock);
111 	ob->valid = false;
112 	ob->data_type = 0;
113 	spin_unlock(&ob->lock);
114 
115 	spin_lock(&c->freelist_lock);
116 	bch2_open_bucket_hash_remove(c, ob);
117 
118 	ob->freelist = c->open_buckets_freelist;
119 	c->open_buckets_freelist = ob - c->open_buckets;
120 
121 	c->open_buckets_nr_free++;
122 	ca->nr_open_buckets--;
123 	spin_unlock(&c->freelist_lock);
124 
125 	closure_wake_up(&c->open_buckets_wait);
126 }
127 
128 void bch2_open_bucket_write_error(struct bch_fs *c,
129 				  struct open_buckets *obs,
130 				  unsigned dev)
131 {
132 	struct open_bucket *ob;
133 	unsigned i;
134 
135 	open_bucket_for_each(c, obs, ob, i)
136 		if (ob->dev == dev && ob->ec)
137 			bch2_ec_bucket_cancel(c, ob);
138 }
139 
140 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
141 {
142 	struct open_bucket *ob;
143 
144 	BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
145 
146 	ob = c->open_buckets + c->open_buckets_freelist;
147 	c->open_buckets_freelist = ob->freelist;
148 	atomic_set(&ob->pin, 1);
149 	ob->data_type = 0;
150 
151 	c->open_buckets_nr_free--;
152 	return ob;
153 }
154 
155 static inline bool is_superblock_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
156 {
157 	if (c->curr_recovery_pass > BCH_RECOVERY_PASS_trans_mark_dev_sbs)
158 		return false;
159 
160 	return bch2_is_superblock_bucket(ca, b);
161 }
162 
163 static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob)
164 {
165 	BUG_ON(c->open_buckets_partial_nr >=
166 	       ARRAY_SIZE(c->open_buckets_partial));
167 
168 	spin_lock(&c->freelist_lock);
169 	rcu_read_lock();
170 	bch2_dev_rcu(c, ob->dev)->nr_partial_buckets++;
171 	rcu_read_unlock();
172 
173 	ob->on_partial_list = true;
174 	c->open_buckets_partial[c->open_buckets_partial_nr++] =
175 		ob - c->open_buckets;
176 	spin_unlock(&c->freelist_lock);
177 
178 	closure_wake_up(&c->open_buckets_wait);
179 	closure_wake_up(&c->freelist_wait);
180 }
181 
182 static inline unsigned open_buckets_reserved(enum bch_watermark watermark)
183 {
184 	switch (watermark) {
185 	case BCH_WATERMARK_interior_updates:
186 		return 0;
187 	case BCH_WATERMARK_reclaim:
188 		return OPEN_BUCKETS_COUNT / 6;
189 	case BCH_WATERMARK_btree:
190 	case BCH_WATERMARK_btree_copygc:
191 		return OPEN_BUCKETS_COUNT / 4;
192 	case BCH_WATERMARK_copygc:
193 		return OPEN_BUCKETS_COUNT / 3;
194 	default:
195 		return OPEN_BUCKETS_COUNT / 2;
196 	}
197 }
198 
199 static inline bool may_alloc_bucket(struct bch_fs *c,
200 				    struct bpos bucket,
201 				    struct bucket_alloc_state *s)
202 {
203 	if (bch2_bucket_is_open(c, bucket.inode, bucket.offset)) {
204 		s->skipped_open++;
205 		return false;
206 	}
207 
208 	if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
209 			c->journal.flushed_seq_ondisk, bucket.inode, bucket.offset)) {
210 		s->skipped_need_journal_commit++;
211 		return false;
212 	}
213 
214 	if (bch2_bucket_nocow_is_locked(&c->nocow_locks, bucket)) {
215 		s->skipped_nocow++;
216 		return false;
217 	}
218 
219 	return true;
220 }
221 
222 static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
223 					      u64 bucket, u8 gen,
224 					      enum bch_watermark watermark,
225 					      struct bucket_alloc_state *s,
226 					      struct closure *cl)
227 {
228 	if (unlikely(is_superblock_bucket(c, ca, bucket)))
229 		return NULL;
230 
231 	if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
232 		s->skipped_nouse++;
233 		return NULL;
234 	}
235 
236 	spin_lock(&c->freelist_lock);
237 
238 	if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(watermark))) {
239 		if (cl)
240 			closure_wait(&c->open_buckets_wait, cl);
241 
242 		track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], true);
243 		spin_unlock(&c->freelist_lock);
244 		return ERR_PTR(-BCH_ERR_open_buckets_empty);
245 	}
246 
247 	/* Recheck under lock: */
248 	if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
249 		spin_unlock(&c->freelist_lock);
250 		s->skipped_open++;
251 		return NULL;
252 	}
253 
254 	struct open_bucket *ob = bch2_open_bucket_alloc(c);
255 
256 	spin_lock(&ob->lock);
257 	ob->valid	= true;
258 	ob->sectors_free = ca->mi.bucket_size;
259 	ob->dev		= ca->dev_idx;
260 	ob->gen		= gen;
261 	ob->bucket	= bucket;
262 	spin_unlock(&ob->lock);
263 
264 	ca->nr_open_buckets++;
265 	bch2_open_bucket_hash_add(c, ob);
266 
267 	track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], false);
268 	track_event_change(&c->times[BCH_TIME_blocked_allocate], false);
269 
270 	spin_unlock(&c->freelist_lock);
271 	return ob;
272 }
273 
274 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
275 					    enum bch_watermark watermark,
276 					    struct bucket_alloc_state *s,
277 					    struct btree_iter *freespace_iter,
278 					    struct closure *cl)
279 {
280 	struct bch_fs *c = trans->c;
281 	u64 b = freespace_iter->pos.offset & ~(~0ULL << 56);
282 
283 	if (!may_alloc_bucket(c, POS(ca->dev_idx, b), s))
284 		return NULL;
285 
286 	u8 gen;
287 	int ret = bch2_check_discard_freespace_key(trans, freespace_iter, &gen, true);
288 	if (ret < 0)
289 		return ERR_PTR(ret);
290 	if (ret)
291 		return NULL;
292 
293 	return __try_alloc_bucket(c, ca, b, gen, watermark, s, cl);
294 }
295 
296 /*
297  * This path is for before the freespace btree is initialized:
298  */
299 static noinline struct open_bucket *
300 bch2_bucket_alloc_early(struct btree_trans *trans,
301 			struct bch_dev *ca,
302 			enum bch_watermark watermark,
303 			struct bucket_alloc_state *s,
304 			struct closure *cl)
305 {
306 	struct bch_fs *c = trans->c;
307 	struct btree_iter iter, citer;
308 	struct bkey_s_c k, ck;
309 	struct open_bucket *ob = NULL;
310 	u64 first_bucket = ca->mi.first_bucket;
311 	u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
312 	u64 alloc_start = max(first_bucket, *dev_alloc_cursor);
313 	u64 alloc_cursor = alloc_start;
314 	int ret;
315 
316 	/*
317 	 * Scan with an uncached iterator to avoid polluting the key cache. An
318 	 * uncached iter will return a cached key if one exists, but if not
319 	 * there is no other underlying protection for the associated key cache
320 	 * slot. To avoid racing bucket allocations, look up the cached key slot
321 	 * of any likely allocation candidate before attempting to proceed with
322 	 * the allocation. This provides proper exclusion on the associated
323 	 * bucket.
324 	 */
325 again:
326 	for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
327 			   BTREE_ITER_slots, k, ret) {
328 		u64 bucket = k.k->p.offset;
329 
330 		if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
331 			break;
332 
333 		if (s->btree_bitmap != BTREE_BITMAP_ANY &&
334 		    s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
335 				bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
336 			if (s->btree_bitmap == BTREE_BITMAP_YES &&
337 			    bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift)
338 				break;
339 
340 			bucket = sector_to_bucket(ca,
341 					round_up(bucket_to_sector(ca, bucket) + 1,
342 						 1ULL << ca->mi.btree_bitmap_shift));
343 			bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, bucket));
344 			s->buckets_seen++;
345 			s->skipped_mi_btree_bitmap++;
346 			continue;
347 		}
348 
349 		struct bch_alloc_v4 a_convert;
350 		const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
351 		if (a->data_type != BCH_DATA_free)
352 			continue;
353 
354 		/* now check the cached key to serialize concurrent allocs of the bucket */
355 		ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_cached);
356 		ret = bkey_err(ck);
357 		if (ret)
358 			break;
359 
360 		a = bch2_alloc_to_v4(ck, &a_convert);
361 		if (a->data_type != BCH_DATA_free)
362 			goto next;
363 
364 		s->buckets_seen++;
365 
366 		ob = may_alloc_bucket(c, k.k->p, s)
367 			? __try_alloc_bucket(c, ca, k.k->p.offset, a->gen,
368 					     watermark, s, cl)
369 			: NULL;
370 next:
371 		bch2_set_btree_iter_dontneed(&citer);
372 		bch2_trans_iter_exit(trans, &citer);
373 		if (ob)
374 			break;
375 	}
376 	bch2_trans_iter_exit(trans, &iter);
377 
378 	alloc_cursor = iter.pos.offset;
379 
380 	if (!ob && ret)
381 		ob = ERR_PTR(ret);
382 
383 	if (!ob && alloc_start > first_bucket) {
384 		alloc_cursor = alloc_start = first_bucket;
385 		goto again;
386 	}
387 
388 	*dev_alloc_cursor = alloc_cursor;
389 
390 	return ob;
391 }
392 
393 static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
394 						   struct bch_dev *ca,
395 						   enum bch_watermark watermark,
396 						   struct bucket_alloc_state *s,
397 						   struct closure *cl)
398 {
399 	struct btree_iter iter;
400 	struct bkey_s_c k;
401 	struct open_bucket *ob = NULL;
402 	u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
403 	u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(*dev_alloc_cursor));
404 	u64 alloc_cursor = alloc_start;
405 	int ret;
406 again:
407 	for_each_btree_key_max_norestart(trans, iter, BTREE_ID_freespace,
408 					 POS(ca->dev_idx, alloc_cursor),
409 					 POS(ca->dev_idx, U64_MAX),
410 					 0, k, ret) {
411 		/*
412 		 * peek normally dosen't trim extents - they can span iter.pos,
413 		 * which is not what we want here:
414 		 */
415 		iter.k.size = iter.k.p.offset - iter.pos.offset;
416 
417 		while (iter.k.size) {
418 			s->buckets_seen++;
419 
420 			u64 bucket = iter.pos.offset & ~(~0ULL << 56);
421 			if (s->btree_bitmap != BTREE_BITMAP_ANY &&
422 			    s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
423 					bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
424 				if (s->btree_bitmap == BTREE_BITMAP_YES &&
425 				    bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift)
426 					goto fail;
427 
428 				bucket = sector_to_bucket(ca,
429 						round_up(bucket_to_sector(ca, bucket + 1),
430 							 1ULL << ca->mi.btree_bitmap_shift));
431 				alloc_cursor = bucket|(iter.pos.offset & (~0ULL << 56));
432 
433 				bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, alloc_cursor));
434 				s->skipped_mi_btree_bitmap++;
435 				goto next;
436 			}
437 
438 			ob = try_alloc_bucket(trans, ca, watermark, s, &iter, cl);
439 			if (ob) {
440 				if (!IS_ERR(ob))
441 					*dev_alloc_cursor = iter.pos.offset;
442 				bch2_set_btree_iter_dontneed(&iter);
443 				break;
444 			}
445 
446 			iter.k.size--;
447 			iter.pos.offset++;
448 		}
449 next:
450 		if (ob || ret)
451 			break;
452 	}
453 fail:
454 	bch2_trans_iter_exit(trans, &iter);
455 
456 	BUG_ON(ob && ret);
457 
458 	if (ret)
459 		ob = ERR_PTR(ret);
460 
461 	if (!ob && alloc_start > ca->mi.first_bucket) {
462 		alloc_cursor = alloc_start = ca->mi.first_bucket;
463 		goto again;
464 	}
465 
466 	return ob;
467 }
468 
469 static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca,
470 					 enum bch_watermark watermark,
471 					 enum bch_data_type data_type,
472 					 struct closure *cl,
473 					 struct bch_dev_usage *usage,
474 					 struct bucket_alloc_state *s,
475 					 struct open_bucket *ob)
476 {
477 	struct printbuf buf = PRINTBUF;
478 
479 	printbuf_tabstop_push(&buf, 24);
480 
481 	prt_printf(&buf, "dev\t%s (%u)\n",	ca->name, ca->dev_idx);
482 	prt_printf(&buf, "watermark\t%s\n",	bch2_watermarks[watermark]);
483 	prt_printf(&buf, "data type\t%s\n",	__bch2_data_types[data_type]);
484 	prt_printf(&buf, "blocking\t%u\n",	cl != NULL);
485 	prt_printf(&buf, "free\t%llu\n",	usage->d[BCH_DATA_free].buckets);
486 	prt_printf(&buf, "avail\t%llu\n",	dev_buckets_free(ca, *usage, watermark));
487 	prt_printf(&buf, "copygc_wait\t%lu/%lli\n",
488 		   bch2_copygc_wait_amount(c),
489 		   c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now));
490 	prt_printf(&buf, "seen\t%llu\n",	s->buckets_seen);
491 	prt_printf(&buf, "open\t%llu\n",	s->skipped_open);
492 	prt_printf(&buf, "need journal commit\t%llu\n", s->skipped_need_journal_commit);
493 	prt_printf(&buf, "nocow\t%llu\n",	s->skipped_nocow);
494 	prt_printf(&buf, "nouse\t%llu\n",	s->skipped_nouse);
495 	prt_printf(&buf, "mi_btree_bitmap\t%llu\n", s->skipped_mi_btree_bitmap);
496 
497 	if (!IS_ERR(ob)) {
498 		prt_printf(&buf, "allocated\t%llu\n", ob->bucket);
499 		trace_bucket_alloc(c, buf.buf);
500 	} else {
501 		prt_printf(&buf, "err\t%s\n", bch2_err_str(PTR_ERR(ob)));
502 		trace_bucket_alloc_fail(c, buf.buf);
503 	}
504 
505 	printbuf_exit(&buf);
506 }
507 
508 /**
509  * bch2_bucket_alloc_trans - allocate a single bucket from a specific device
510  * @trans:	transaction object
511  * @ca:		device to allocate from
512  * @watermark:	how important is this allocation?
513  * @data_type:	BCH_DATA_journal, btree, user...
514  * @cl:		if not NULL, closure to be used to wait if buckets not available
515  * @nowait:	if true, do not wait for buckets to become available
516  * @usage:	for secondarily also returning the current device usage
517  *
518  * Returns:	an open_bucket on success, or an ERR_PTR() on failure.
519  */
520 static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
521 				      struct bch_dev *ca,
522 				      enum bch_watermark watermark,
523 				      enum bch_data_type data_type,
524 				      struct closure *cl,
525 				      bool nowait,
526 				      struct bch_dev_usage *usage)
527 {
528 	struct bch_fs *c = trans->c;
529 	struct open_bucket *ob = NULL;
530 	bool freespace = READ_ONCE(ca->mi.freespace_initialized);
531 	u64 avail;
532 	struct bucket_alloc_state s = {
533 		.btree_bitmap = data_type == BCH_DATA_btree,
534 	};
535 	bool waiting = nowait;
536 again:
537 	bch2_dev_usage_read_fast(ca, usage);
538 	avail = dev_buckets_free(ca, *usage, watermark);
539 
540 	if (usage->d[BCH_DATA_need_discard].buckets > avail)
541 		bch2_dev_do_discards(ca);
542 
543 	if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
544 		bch2_gc_gens_async(c);
545 
546 	if (should_invalidate_buckets(ca, *usage))
547 		bch2_dev_do_invalidates(ca);
548 
549 	if (!avail) {
550 		if (watermark > BCH_WATERMARK_normal &&
551 		    c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations)
552 			goto alloc;
553 
554 		if (cl && !waiting) {
555 			closure_wait(&c->freelist_wait, cl);
556 			waiting = true;
557 			goto again;
558 		}
559 
560 		track_event_change(&c->times[BCH_TIME_blocked_allocate], true);
561 
562 		ob = ERR_PTR(-BCH_ERR_freelist_empty);
563 		goto err;
564 	}
565 
566 	if (waiting)
567 		closure_wake_up(&c->freelist_wait);
568 alloc:
569 	ob = likely(freespace)
570 		? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl)
571 		: bch2_bucket_alloc_early(trans, ca, watermark, &s, cl);
572 
573 	if (s.skipped_need_journal_commit * 2 > avail)
574 		bch2_journal_flush_async(&c->journal, NULL);
575 
576 	if (!ob && s.btree_bitmap != BTREE_BITMAP_ANY) {
577 		s.btree_bitmap = BTREE_BITMAP_ANY;
578 		goto alloc;
579 	}
580 
581 	if (!ob && freespace && c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
582 		freespace = false;
583 		goto alloc;
584 	}
585 err:
586 	if (!ob)
587 		ob = ERR_PTR(-BCH_ERR_no_buckets_found);
588 
589 	if (!IS_ERR(ob))
590 		ob->data_type = data_type;
591 
592 	if (!IS_ERR(ob))
593 		count_event(c, bucket_alloc);
594 	else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
595 		count_event(c, bucket_alloc_fail);
596 
597 	if (!IS_ERR(ob)
598 	    ? trace_bucket_alloc_enabled()
599 	    : trace_bucket_alloc_fail_enabled())
600 		trace_bucket_alloc2(c, ca, watermark, data_type, cl, usage, &s, ob);
601 
602 	return ob;
603 }
604 
605 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
606 				      enum bch_watermark watermark,
607 				      enum bch_data_type data_type,
608 				      struct closure *cl)
609 {
610 	struct bch_dev_usage usage;
611 	struct open_bucket *ob;
612 
613 	bch2_trans_do(c,
614 		      PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark,
615 							data_type, cl, false, &usage)));
616 	return ob;
617 }
618 
619 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
620 			    unsigned l, unsigned r)
621 {
622 	return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
623 		(stripe->next_alloc[l] < stripe->next_alloc[r]));
624 }
625 
626 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
627 
628 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
629 					  struct dev_stripe_state *stripe,
630 					  struct bch_devs_mask *devs)
631 {
632 	struct dev_alloc_list ret = { .nr = 0 };
633 	unsigned i;
634 
635 	for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
636 		ret.data[ret.nr++] = i;
637 
638 	bubble_sort(ret.data, ret.nr, dev_stripe_cmp);
639 	return ret;
640 }
641 
642 static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
643 			       struct dev_stripe_state *stripe,
644 			       struct bch_dev_usage *usage)
645 {
646 	u64 *v = stripe->next_alloc + ca->dev_idx;
647 	u64 free_space = dev_buckets_available(ca, BCH_WATERMARK_normal);
648 	u64 free_space_inv = free_space
649 		? div64_u64(1ULL << 48, free_space)
650 		: 1ULL << 48;
651 	u64 scale = *v / 4;
652 
653 	if (*v + free_space_inv >= *v)
654 		*v += free_space_inv;
655 	else
656 		*v = U64_MAX;
657 
658 	for (v = stripe->next_alloc;
659 	     v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
660 		*v = *v < scale ? 0 : *v - scale;
661 }
662 
663 void bch2_dev_stripe_increment(struct bch_dev *ca,
664 			       struct dev_stripe_state *stripe)
665 {
666 	struct bch_dev_usage usage;
667 
668 	bch2_dev_usage_read_fast(ca, &usage);
669 	bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
670 }
671 
672 static int add_new_bucket(struct bch_fs *c,
673 			   struct open_buckets *ptrs,
674 			   struct bch_devs_mask *devs_may_alloc,
675 			   unsigned nr_replicas,
676 			   unsigned *nr_effective,
677 			   bool *have_cache,
678 			   struct open_bucket *ob)
679 {
680 	unsigned durability = ob_dev(c, ob)->mi.durability;
681 
682 	BUG_ON(*nr_effective >= nr_replicas);
683 
684 	__clear_bit(ob->dev, devs_may_alloc->d);
685 	*nr_effective	+= durability;
686 	*have_cache	|= !durability;
687 
688 	ob_push(c, ptrs, ob);
689 
690 	if (*nr_effective >= nr_replicas)
691 		return 1;
692 	if (ob->ec)
693 		return 1;
694 	return 0;
695 }
696 
697 int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
698 		      struct open_buckets *ptrs,
699 		      struct dev_stripe_state *stripe,
700 		      struct bch_devs_mask *devs_may_alloc,
701 		      unsigned nr_replicas,
702 		      unsigned *nr_effective,
703 		      bool *have_cache,
704 		      enum bch_write_flags flags,
705 		      enum bch_data_type data_type,
706 		      enum bch_watermark watermark,
707 		      struct closure *cl)
708 {
709 	struct bch_fs *c = trans->c;
710 	int ret = -BCH_ERR_insufficient_devices;
711 
712 	BUG_ON(*nr_effective >= nr_replicas);
713 
714 	struct dev_alloc_list devs_sorted = bch2_dev_alloc_list(c, stripe, devs_may_alloc);
715 	darray_for_each(devs_sorted, i) {
716 		struct bch_dev *ca = bch2_dev_tryget_noerror(c, *i);
717 		if (!ca)
718 			continue;
719 
720 		if (!ca->mi.durability && *have_cache) {
721 			bch2_dev_put(ca);
722 			continue;
723 		}
724 
725 		struct bch_dev_usage usage;
726 		struct open_bucket *ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type,
727 						     cl, flags & BCH_WRITE_ALLOC_NOWAIT, &usage);
728 		if (!IS_ERR(ob))
729 			bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
730 		bch2_dev_put(ca);
731 
732 		if (IS_ERR(ob)) {
733 			ret = PTR_ERR(ob);
734 			if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl)
735 				break;
736 			continue;
737 		}
738 
739 		if (add_new_bucket(c, ptrs, devs_may_alloc,
740 				   nr_replicas, nr_effective,
741 				   have_cache, ob)) {
742 			ret = 0;
743 			break;
744 		}
745 	}
746 
747 	return ret;
748 }
749 
750 /* Allocate from stripes: */
751 
752 /*
753  * if we can't allocate a new stripe because there are already too many
754  * partially filled stripes, force allocating from an existing stripe even when
755  * it's to a device we don't want:
756  */
757 
758 static int bucket_alloc_from_stripe(struct btree_trans *trans,
759 			 struct open_buckets *ptrs,
760 			 struct write_point *wp,
761 			 struct bch_devs_mask *devs_may_alloc,
762 			 u16 target,
763 			 unsigned nr_replicas,
764 			 unsigned *nr_effective,
765 			 bool *have_cache,
766 			 enum bch_watermark watermark,
767 			 enum bch_write_flags flags,
768 			 struct closure *cl)
769 {
770 	struct bch_fs *c = trans->c;
771 	int ret = 0;
772 
773 	if (nr_replicas < 2)
774 		return 0;
775 
776 	if (ec_open_bucket(c, ptrs))
777 		return 0;
778 
779 	struct ec_stripe_head *h =
780 		bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl);
781 	if (IS_ERR(h))
782 		return PTR_ERR(h);
783 	if (!h)
784 		return 0;
785 
786 	struct dev_alloc_list devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
787 	darray_for_each(devs_sorted, i)
788 		for (unsigned ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
789 			if (!h->s->blocks[ec_idx])
790 				continue;
791 
792 			struct open_bucket *ob = c->open_buckets + h->s->blocks[ec_idx];
793 			if (ob->dev == *i && !test_and_set_bit(ec_idx, h->s->blocks_allocated)) {
794 				ob->ec_idx	= ec_idx;
795 				ob->ec		= h->s;
796 				ec_stripe_new_get(h->s, STRIPE_REF_io);
797 
798 				ret = add_new_bucket(c, ptrs, devs_may_alloc,
799 						     nr_replicas, nr_effective,
800 						     have_cache, ob);
801 				goto out;
802 			}
803 		}
804 out:
805 	bch2_ec_stripe_head_put(c, h);
806 	return ret;
807 }
808 
809 /* Sector allocator */
810 
811 static bool want_bucket(struct bch_fs *c,
812 			struct write_point *wp,
813 			struct bch_devs_mask *devs_may_alloc,
814 			bool *have_cache, bool ec,
815 			struct open_bucket *ob)
816 {
817 	struct bch_dev *ca = ob_dev(c, ob);
818 
819 	if (!test_bit(ob->dev, devs_may_alloc->d))
820 		return false;
821 
822 	if (ob->data_type != wp->data_type)
823 		return false;
824 
825 	if (!ca->mi.durability &&
826 	    (wp->data_type == BCH_DATA_btree || ec || *have_cache))
827 		return false;
828 
829 	if (ec != (ob->ec != NULL))
830 		return false;
831 
832 	return true;
833 }
834 
835 static int bucket_alloc_set_writepoint(struct bch_fs *c,
836 				       struct open_buckets *ptrs,
837 				       struct write_point *wp,
838 				       struct bch_devs_mask *devs_may_alloc,
839 				       unsigned nr_replicas,
840 				       unsigned *nr_effective,
841 				       bool *have_cache,
842 				       bool ec)
843 {
844 	struct open_buckets ptrs_skip = { .nr = 0 };
845 	struct open_bucket *ob;
846 	unsigned i;
847 	int ret = 0;
848 
849 	open_bucket_for_each(c, &wp->ptrs, ob, i) {
850 		if (!ret && want_bucket(c, wp, devs_may_alloc,
851 					have_cache, ec, ob))
852 			ret = add_new_bucket(c, ptrs, devs_may_alloc,
853 				       nr_replicas, nr_effective,
854 				       have_cache, ob);
855 		else
856 			ob_push(c, &ptrs_skip, ob);
857 	}
858 	wp->ptrs = ptrs_skip;
859 
860 	return ret;
861 }
862 
863 static int bucket_alloc_set_partial(struct bch_fs *c,
864 				    struct open_buckets *ptrs,
865 				    struct write_point *wp,
866 				    struct bch_devs_mask *devs_may_alloc,
867 				    unsigned nr_replicas,
868 				    unsigned *nr_effective,
869 				    bool *have_cache, bool ec,
870 				    enum bch_watermark watermark)
871 {
872 	int i, ret = 0;
873 
874 	if (!c->open_buckets_partial_nr)
875 		return 0;
876 
877 	spin_lock(&c->freelist_lock);
878 
879 	if (!c->open_buckets_partial_nr)
880 		goto unlock;
881 
882 	for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) {
883 		struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i];
884 
885 		if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) {
886 			struct bch_dev *ca = ob_dev(c, ob);
887 			struct bch_dev_usage usage;
888 			u64 avail;
889 
890 			bch2_dev_usage_read_fast(ca, &usage);
891 			avail = dev_buckets_free(ca, usage, watermark) + ca->nr_partial_buckets;
892 			if (!avail)
893 				continue;
894 
895 			array_remove_item(c->open_buckets_partial,
896 					  c->open_buckets_partial_nr,
897 					  i);
898 			ob->on_partial_list = false;
899 
900 			rcu_read_lock();
901 			bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--;
902 			rcu_read_unlock();
903 
904 			ret = add_new_bucket(c, ptrs, devs_may_alloc,
905 					     nr_replicas, nr_effective,
906 					     have_cache, ob);
907 			if (ret)
908 				break;
909 		}
910 	}
911 unlock:
912 	spin_unlock(&c->freelist_lock);
913 	return ret;
914 }
915 
916 static int __open_bucket_add_buckets(struct btree_trans *trans,
917 			struct open_buckets *ptrs,
918 			struct write_point *wp,
919 			struct bch_devs_list *devs_have,
920 			u16 target,
921 			bool erasure_code,
922 			unsigned nr_replicas,
923 			unsigned *nr_effective,
924 			bool *have_cache,
925 			enum bch_watermark watermark,
926 			enum bch_write_flags flags,
927 			struct closure *_cl)
928 {
929 	struct bch_fs *c = trans->c;
930 	struct bch_devs_mask devs;
931 	struct open_bucket *ob;
932 	struct closure *cl = NULL;
933 	unsigned i;
934 	int ret;
935 
936 	devs = target_rw_devs(c, wp->data_type, target);
937 
938 	/* Don't allocate from devices we already have pointers to: */
939 	darray_for_each(*devs_have, i)
940 		__clear_bit(*i, devs.d);
941 
942 	open_bucket_for_each(c, ptrs, ob, i)
943 		__clear_bit(ob->dev, devs.d);
944 
945 	ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs,
946 				 nr_replicas, nr_effective,
947 				 have_cache, erasure_code);
948 	if (ret)
949 		return ret;
950 
951 	ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
952 				 nr_replicas, nr_effective,
953 				 have_cache, erasure_code, watermark);
954 	if (ret)
955 		return ret;
956 
957 	if (erasure_code) {
958 		ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs,
959 					 target,
960 					 nr_replicas, nr_effective,
961 					 have_cache,
962 					 watermark, flags, _cl);
963 	} else {
964 retry_blocking:
965 		/*
966 		 * Try nonblocking first, so that if one device is full we'll try from
967 		 * other devices:
968 		 */
969 		ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
970 					nr_replicas, nr_effective, have_cache,
971 					flags, wp->data_type, watermark, cl);
972 		if (ret &&
973 		    !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
974 		    !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
975 		    !cl && _cl) {
976 			cl = _cl;
977 			goto retry_blocking;
978 		}
979 	}
980 
981 	return ret;
982 }
983 
984 static int open_bucket_add_buckets(struct btree_trans *trans,
985 			struct open_buckets *ptrs,
986 			struct write_point *wp,
987 			struct bch_devs_list *devs_have,
988 			u16 target,
989 			unsigned erasure_code,
990 			unsigned nr_replicas,
991 			unsigned *nr_effective,
992 			bool *have_cache,
993 			enum bch_watermark watermark,
994 			enum bch_write_flags flags,
995 			struct closure *cl)
996 {
997 	int ret;
998 
999 	if (erasure_code && !ec_open_bucket(trans->c, ptrs)) {
1000 		ret = __open_bucket_add_buckets(trans, ptrs, wp,
1001 				devs_have, target, erasure_code,
1002 				nr_replicas, nr_effective, have_cache,
1003 				watermark, flags, cl);
1004 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1005 		    bch2_err_matches(ret, BCH_ERR_operation_blocked) ||
1006 		    bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
1007 		    bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
1008 			return ret;
1009 		if (*nr_effective >= nr_replicas)
1010 			return 0;
1011 	}
1012 
1013 	ret = __open_bucket_add_buckets(trans, ptrs, wp,
1014 			devs_have, target, false,
1015 			nr_replicas, nr_effective, have_cache,
1016 			watermark, flags, cl);
1017 	return ret < 0 ? ret : 0;
1018 }
1019 
1020 /**
1021  * should_drop_bucket - check if this is open_bucket should go away
1022  * @ob:		open_bucket to predicate on
1023  * @c:		filesystem handle
1024  * @ca:		if set, we're killing buckets for a particular device
1025  * @ec:		if true, we're shutting down erasure coding and killing all ec
1026  *		open_buckets
1027  *		otherwise, return true
1028  * Returns: true if we should kill this open_bucket
1029  *
1030  * We're killing open_buckets because we're shutting down a device, erasure
1031  * coding, or the entire filesystem - check if this open_bucket matches:
1032  */
1033 static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c,
1034 			       struct bch_dev *ca, bool ec)
1035 {
1036 	if (ec) {
1037 		return ob->ec != NULL;
1038 	} else if (ca) {
1039 		bool drop = ob->dev == ca->dev_idx;
1040 		struct open_bucket *ob2;
1041 		unsigned i;
1042 
1043 		if (!drop && ob->ec) {
1044 			unsigned nr_blocks;
1045 
1046 			mutex_lock(&ob->ec->lock);
1047 			nr_blocks = bkey_i_to_stripe(&ob->ec->new_stripe.key)->v.nr_blocks;
1048 
1049 			for (i = 0; i < nr_blocks; i++) {
1050 				if (!ob->ec->blocks[i])
1051 					continue;
1052 
1053 				ob2 = c->open_buckets + ob->ec->blocks[i];
1054 				drop |= ob2->dev == ca->dev_idx;
1055 			}
1056 			mutex_unlock(&ob->ec->lock);
1057 		}
1058 
1059 		return drop;
1060 	} else {
1061 		return true;
1062 	}
1063 }
1064 
1065 static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
1066 				 bool ec, struct write_point *wp)
1067 {
1068 	struct open_buckets ptrs = { .nr = 0 };
1069 	struct open_bucket *ob;
1070 	unsigned i;
1071 
1072 	mutex_lock(&wp->lock);
1073 	open_bucket_for_each(c, &wp->ptrs, ob, i)
1074 		if (should_drop_bucket(ob, c, ca, ec))
1075 			bch2_open_bucket_put(c, ob);
1076 		else
1077 			ob_push(c, &ptrs, ob);
1078 	wp->ptrs = ptrs;
1079 	mutex_unlock(&wp->lock);
1080 }
1081 
1082 void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca,
1083 			    bool ec)
1084 {
1085 	unsigned i;
1086 
1087 	/* Next, close write points that point to this device... */
1088 	for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1089 		bch2_writepoint_stop(c, ca, ec, &c->write_points[i]);
1090 
1091 	bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point);
1092 	bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point);
1093 	bch2_writepoint_stop(c, ca, ec, &c->btree_write_point);
1094 
1095 	mutex_lock(&c->btree_reserve_cache_lock);
1096 	while (c->btree_reserve_cache_nr) {
1097 		struct btree_alloc *a =
1098 			&c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1099 
1100 		bch2_open_buckets_put(c, &a->ob);
1101 	}
1102 	mutex_unlock(&c->btree_reserve_cache_lock);
1103 
1104 	spin_lock(&c->freelist_lock);
1105 	i = 0;
1106 	while (i < c->open_buckets_partial_nr) {
1107 		struct open_bucket *ob =
1108 			c->open_buckets + c->open_buckets_partial[i];
1109 
1110 		if (should_drop_bucket(ob, c, ca, ec)) {
1111 			--c->open_buckets_partial_nr;
1112 			swap(c->open_buckets_partial[i],
1113 			     c->open_buckets_partial[c->open_buckets_partial_nr]);
1114 
1115 			ob->on_partial_list = false;
1116 
1117 			rcu_read_lock();
1118 			bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--;
1119 			rcu_read_unlock();
1120 
1121 			spin_unlock(&c->freelist_lock);
1122 			bch2_open_bucket_put(c, ob);
1123 			spin_lock(&c->freelist_lock);
1124 		} else {
1125 			i++;
1126 		}
1127 	}
1128 	spin_unlock(&c->freelist_lock);
1129 
1130 	bch2_ec_stop_dev(c, ca);
1131 }
1132 
1133 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
1134 						 unsigned long write_point)
1135 {
1136 	unsigned hash =
1137 		hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
1138 
1139 	return &c->write_points_hash[hash];
1140 }
1141 
1142 static struct write_point *__writepoint_find(struct hlist_head *head,
1143 					     unsigned long write_point)
1144 {
1145 	struct write_point *wp;
1146 
1147 	rcu_read_lock();
1148 	hlist_for_each_entry_rcu(wp, head, node)
1149 		if (wp->write_point == write_point)
1150 			goto out;
1151 	wp = NULL;
1152 out:
1153 	rcu_read_unlock();
1154 	return wp;
1155 }
1156 
1157 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
1158 {
1159 	u64 stranded	= c->write_points_nr * c->bucket_size_max;
1160 	u64 free	= bch2_fs_usage_read_short(c).free;
1161 
1162 	return stranded * factor > free;
1163 }
1164 
1165 static bool try_increase_writepoints(struct bch_fs *c)
1166 {
1167 	struct write_point *wp;
1168 
1169 	if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
1170 	    too_many_writepoints(c, 32))
1171 		return false;
1172 
1173 	wp = c->write_points + c->write_points_nr++;
1174 	hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
1175 	return true;
1176 }
1177 
1178 static bool try_decrease_writepoints(struct btree_trans *trans, unsigned old_nr)
1179 {
1180 	struct bch_fs *c = trans->c;
1181 	struct write_point *wp;
1182 	struct open_bucket *ob;
1183 	unsigned i;
1184 
1185 	mutex_lock(&c->write_points_hash_lock);
1186 	if (c->write_points_nr < old_nr) {
1187 		mutex_unlock(&c->write_points_hash_lock);
1188 		return true;
1189 	}
1190 
1191 	if (c->write_points_nr == 1 ||
1192 	    !too_many_writepoints(c, 8)) {
1193 		mutex_unlock(&c->write_points_hash_lock);
1194 		return false;
1195 	}
1196 
1197 	wp = c->write_points + --c->write_points_nr;
1198 
1199 	hlist_del_rcu(&wp->node);
1200 	mutex_unlock(&c->write_points_hash_lock);
1201 
1202 	bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1203 	open_bucket_for_each(c, &wp->ptrs, ob, i)
1204 		open_bucket_free_unused(c, ob);
1205 	wp->ptrs.nr = 0;
1206 	mutex_unlock(&wp->lock);
1207 	return true;
1208 }
1209 
1210 static struct write_point *writepoint_find(struct btree_trans *trans,
1211 					   unsigned long write_point)
1212 {
1213 	struct bch_fs *c = trans->c;
1214 	struct write_point *wp, *oldest;
1215 	struct hlist_head *head;
1216 
1217 	if (!(write_point & 1UL)) {
1218 		wp = (struct write_point *) write_point;
1219 		bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1220 		return wp;
1221 	}
1222 
1223 	head = writepoint_hash(c, write_point);
1224 restart_find:
1225 	wp = __writepoint_find(head, write_point);
1226 	if (wp) {
1227 lock_wp:
1228 		bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1229 		if (wp->write_point == write_point)
1230 			goto out;
1231 		mutex_unlock(&wp->lock);
1232 		goto restart_find;
1233 	}
1234 restart_find_oldest:
1235 	oldest = NULL;
1236 	for (wp = c->write_points;
1237 	     wp < c->write_points + c->write_points_nr; wp++)
1238 		if (!oldest || time_before64(wp->last_used, oldest->last_used))
1239 			oldest = wp;
1240 
1241 	bch2_trans_mutex_lock_norelock(trans, &oldest->lock);
1242 	bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock);
1243 	if (oldest >= c->write_points + c->write_points_nr ||
1244 	    try_increase_writepoints(c)) {
1245 		mutex_unlock(&c->write_points_hash_lock);
1246 		mutex_unlock(&oldest->lock);
1247 		goto restart_find_oldest;
1248 	}
1249 
1250 	wp = __writepoint_find(head, write_point);
1251 	if (wp && wp != oldest) {
1252 		mutex_unlock(&c->write_points_hash_lock);
1253 		mutex_unlock(&oldest->lock);
1254 		goto lock_wp;
1255 	}
1256 
1257 	wp = oldest;
1258 	hlist_del_rcu(&wp->node);
1259 	wp->write_point = write_point;
1260 	hlist_add_head_rcu(&wp->node, head);
1261 	mutex_unlock(&c->write_points_hash_lock);
1262 out:
1263 	wp->last_used = local_clock();
1264 	return wp;
1265 }
1266 
1267 static noinline void
1268 deallocate_extra_replicas(struct bch_fs *c,
1269 			  struct open_buckets *ptrs,
1270 			  struct open_buckets *ptrs_no_use,
1271 			  unsigned extra_replicas)
1272 {
1273 	struct open_buckets ptrs2 = { 0 };
1274 	struct open_bucket *ob;
1275 	unsigned i;
1276 
1277 	open_bucket_for_each(c, ptrs, ob, i) {
1278 		unsigned d = ob_dev(c, ob)->mi.durability;
1279 
1280 		if (d && d <= extra_replicas) {
1281 			extra_replicas -= d;
1282 			ob_push(c, ptrs_no_use, ob);
1283 		} else {
1284 			ob_push(c, &ptrs2, ob);
1285 		}
1286 	}
1287 
1288 	*ptrs = ptrs2;
1289 }
1290 
1291 /*
1292  * Get us an open_bucket we can allocate from, return with it locked:
1293  */
1294 int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
1295 			     unsigned target,
1296 			     unsigned erasure_code,
1297 			     struct write_point_specifier write_point,
1298 			     struct bch_devs_list *devs_have,
1299 			     unsigned nr_replicas,
1300 			     unsigned nr_replicas_required,
1301 			     enum bch_watermark watermark,
1302 			     enum bch_write_flags flags,
1303 			     struct closure *cl,
1304 			     struct write_point **wp_ret)
1305 {
1306 	struct bch_fs *c = trans->c;
1307 	struct write_point *wp;
1308 	struct open_bucket *ob;
1309 	struct open_buckets ptrs;
1310 	unsigned nr_effective, write_points_nr;
1311 	bool have_cache;
1312 	int ret;
1313 	int i;
1314 
1315 	if (!IS_ENABLED(CONFIG_BCACHEFS_ERASURE_CODING))
1316 		erasure_code = false;
1317 
1318 	BUG_ON(!nr_replicas || !nr_replicas_required);
1319 retry:
1320 	ptrs.nr		= 0;
1321 	nr_effective	= 0;
1322 	write_points_nr = c->write_points_nr;
1323 	have_cache	= false;
1324 
1325 	*wp_ret = wp = writepoint_find(trans, write_point.v);
1326 
1327 	ret = bch2_trans_relock(trans);
1328 	if (ret)
1329 		goto err;
1330 
1331 	/* metadata may not allocate on cache devices: */
1332 	if (wp->data_type != BCH_DATA_user)
1333 		have_cache = true;
1334 
1335 	if (target && !(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
1336 		ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1337 					      target, erasure_code,
1338 					      nr_replicas, &nr_effective,
1339 					      &have_cache, watermark,
1340 					      flags, NULL);
1341 		if (!ret ||
1342 		    bch2_err_matches(ret, BCH_ERR_transaction_restart))
1343 			goto alloc_done;
1344 
1345 		/* Don't retry from all devices if we're out of open buckets: */
1346 		if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) {
1347 			int ret2 = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1348 					      target, erasure_code,
1349 					      nr_replicas, &nr_effective,
1350 					      &have_cache, watermark,
1351 					      flags, cl);
1352 			if (!ret2 ||
1353 			    bch2_err_matches(ret2, BCH_ERR_transaction_restart) ||
1354 			    bch2_err_matches(ret2, BCH_ERR_open_buckets_empty)) {
1355 				ret = ret2;
1356 				goto alloc_done;
1357 			}
1358 		}
1359 
1360 		/*
1361 		 * Only try to allocate cache (durability = 0 devices) from the
1362 		 * specified target:
1363 		 */
1364 		have_cache = true;
1365 
1366 		ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1367 					      0, erasure_code,
1368 					      nr_replicas, &nr_effective,
1369 					      &have_cache, watermark,
1370 					      flags, cl);
1371 	} else {
1372 		ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1373 					      target, erasure_code,
1374 					      nr_replicas, &nr_effective,
1375 					      &have_cache, watermark,
1376 					      flags, cl);
1377 	}
1378 alloc_done:
1379 	BUG_ON(!ret && nr_effective < nr_replicas);
1380 
1381 	if (erasure_code && !ec_open_bucket(c, &ptrs))
1382 		pr_debug("failed to get ec bucket: ret %u", ret);
1383 
1384 	if (ret == -BCH_ERR_insufficient_devices &&
1385 	    nr_effective >= nr_replicas_required)
1386 		ret = 0;
1387 
1388 	if (ret)
1389 		goto err;
1390 
1391 	if (nr_effective > nr_replicas)
1392 		deallocate_extra_replicas(c, &ptrs, &wp->ptrs, nr_effective - nr_replicas);
1393 
1394 	/* Free buckets we didn't use: */
1395 	open_bucket_for_each(c, &wp->ptrs, ob, i)
1396 		open_bucket_free_unused(c, ob);
1397 
1398 	wp->ptrs = ptrs;
1399 
1400 	wp->sectors_free = UINT_MAX;
1401 
1402 	open_bucket_for_each(c, &wp->ptrs, ob, i)
1403 		wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1404 
1405 	BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1406 
1407 	return 0;
1408 err:
1409 	open_bucket_for_each(c, &wp->ptrs, ob, i)
1410 		if (ptrs.nr < ARRAY_SIZE(ptrs.v))
1411 			ob_push(c, &ptrs, ob);
1412 		else
1413 			open_bucket_free_unused(c, ob);
1414 	wp->ptrs = ptrs;
1415 
1416 	mutex_unlock(&wp->lock);
1417 
1418 	if (bch2_err_matches(ret, BCH_ERR_freelist_empty) &&
1419 	    try_decrease_writepoints(trans, write_points_nr))
1420 		goto retry;
1421 
1422 	if (cl && bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
1423 		ret = -BCH_ERR_bucket_alloc_blocked;
1424 
1425 	if (cl && !(flags & BCH_WRITE_ALLOC_NOWAIT) &&
1426 	    bch2_err_matches(ret, BCH_ERR_freelist_empty))
1427 		ret = -BCH_ERR_bucket_alloc_blocked;
1428 
1429 	return ret;
1430 }
1431 
1432 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
1433 {
1434 	struct bch_dev *ca = ob_dev(c, ob);
1435 
1436 	return (struct bch_extent_ptr) {
1437 		.type	= 1 << BCH_EXTENT_ENTRY_ptr,
1438 		.gen	= ob->gen,
1439 		.dev	= ob->dev,
1440 		.offset	= bucket_to_sector(ca, ob->bucket) +
1441 			ca->mi.bucket_size -
1442 			ob->sectors_free,
1443 	};
1444 }
1445 
1446 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1447 				    struct bkey_i *k, unsigned sectors,
1448 				    bool cached)
1449 {
1450 	bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached);
1451 }
1452 
1453 /*
1454  * Append pointers to the space we just allocated to @k, and mark @sectors space
1455  * as allocated out of @ob
1456  */
1457 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1458 {
1459 	bch2_alloc_sectors_done_inlined(c, wp);
1460 }
1461 
1462 static inline void writepoint_init(struct write_point *wp,
1463 				   enum bch_data_type type)
1464 {
1465 	mutex_init(&wp->lock);
1466 	wp->data_type = type;
1467 
1468 	INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates);
1469 	INIT_LIST_HEAD(&wp->writes);
1470 	spin_lock_init(&wp->writes_lock);
1471 }
1472 
1473 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
1474 {
1475 	struct open_bucket *ob;
1476 	struct write_point *wp;
1477 
1478 	mutex_init(&c->write_points_hash_lock);
1479 	c->write_points_nr = ARRAY_SIZE(c->write_points);
1480 
1481 	/* open bucket 0 is a sentinal NULL: */
1482 	spin_lock_init(&c->open_buckets[0].lock);
1483 
1484 	for (ob = c->open_buckets + 1;
1485 	     ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1486 		spin_lock_init(&ob->lock);
1487 		c->open_buckets_nr_free++;
1488 
1489 		ob->freelist = c->open_buckets_freelist;
1490 		c->open_buckets_freelist = ob - c->open_buckets;
1491 	}
1492 
1493 	writepoint_init(&c->btree_write_point,		BCH_DATA_btree);
1494 	writepoint_init(&c->rebalance_write_point,	BCH_DATA_user);
1495 	writepoint_init(&c->copygc_write_point,		BCH_DATA_user);
1496 
1497 	for (wp = c->write_points;
1498 	     wp < c->write_points + c->write_points_nr; wp++) {
1499 		writepoint_init(wp, BCH_DATA_user);
1500 
1501 		wp->last_used	= local_clock();
1502 		wp->write_point	= (unsigned long) wp;
1503 		hlist_add_head_rcu(&wp->node,
1504 				   writepoint_hash(c, wp->write_point));
1505 	}
1506 }
1507 
1508 void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob)
1509 {
1510 	struct bch_dev *ca = ob_dev(c, ob);
1511 	unsigned data_type = ob->data_type;
1512 	barrier(); /* READ_ONCE() doesn't work on bitfields */
1513 
1514 	prt_printf(out, "%zu ref %u ",
1515 		   ob - c->open_buckets,
1516 		   atomic_read(&ob->pin));
1517 	bch2_prt_data_type(out, data_type);
1518 	prt_printf(out, " %u:%llu gen %u allocated %u/%u",
1519 		   ob->dev, ob->bucket, ob->gen,
1520 		   ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size);
1521 	if (ob->ec)
1522 		prt_printf(out, " ec idx %llu", ob->ec->idx);
1523 	if (ob->on_partial_list)
1524 		prt_str(out, " partial");
1525 	prt_newline(out);
1526 }
1527 
1528 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c,
1529 			       struct bch_dev *ca)
1530 {
1531 	struct open_bucket *ob;
1532 
1533 	out->atomic++;
1534 
1535 	for (ob = c->open_buckets;
1536 	     ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1537 	     ob++) {
1538 		spin_lock(&ob->lock);
1539 		if (ob->valid && (!ca || ob->dev == ca->dev_idx))
1540 			bch2_open_bucket_to_text(out, c, ob);
1541 		spin_unlock(&ob->lock);
1542 	}
1543 
1544 	--out->atomic;
1545 }
1546 
1547 void bch2_open_buckets_partial_to_text(struct printbuf *out, struct bch_fs *c)
1548 {
1549 	unsigned i;
1550 
1551 	out->atomic++;
1552 	spin_lock(&c->freelist_lock);
1553 
1554 	for (i = 0; i < c->open_buckets_partial_nr; i++)
1555 		bch2_open_bucket_to_text(out, c,
1556 				c->open_buckets + c->open_buckets_partial[i]);
1557 
1558 	spin_unlock(&c->freelist_lock);
1559 	--out->atomic;
1560 }
1561 
1562 static const char * const bch2_write_point_states[] = {
1563 #define x(n)	#n,
1564 	WRITE_POINT_STATES()
1565 #undef x
1566 	NULL
1567 };
1568 
1569 static void bch2_write_point_to_text(struct printbuf *out, struct bch_fs *c,
1570 				     struct write_point *wp)
1571 {
1572 	struct open_bucket *ob;
1573 	unsigned i;
1574 
1575 	prt_printf(out, "%lu: ", wp->write_point);
1576 	prt_human_readable_u64(out, wp->sectors_allocated);
1577 
1578 	prt_printf(out, " last wrote: ");
1579 	bch2_pr_time_units(out, sched_clock() - wp->last_used);
1580 
1581 	for (i = 0; i < WRITE_POINT_STATE_NR; i++) {
1582 		prt_printf(out, " %s: ", bch2_write_point_states[i]);
1583 		bch2_pr_time_units(out, wp->time[i]);
1584 	}
1585 
1586 	prt_newline(out);
1587 
1588 	printbuf_indent_add(out, 2);
1589 	open_bucket_for_each(c, &wp->ptrs, ob, i)
1590 		bch2_open_bucket_to_text(out, c, ob);
1591 	printbuf_indent_sub(out, 2);
1592 }
1593 
1594 void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c)
1595 {
1596 	struct write_point *wp;
1597 
1598 	prt_str(out, "Foreground write points\n");
1599 	for (wp = c->write_points;
1600 	     wp < c->write_points + ARRAY_SIZE(c->write_points);
1601 	     wp++)
1602 		bch2_write_point_to_text(out, c, wp);
1603 
1604 	prt_str(out, "Copygc write point\n");
1605 	bch2_write_point_to_text(out, c, &c->copygc_write_point);
1606 
1607 	prt_str(out, "Rebalance write point\n");
1608 	bch2_write_point_to_text(out, c, &c->rebalance_write_point);
1609 
1610 	prt_str(out, "Btree write point\n");
1611 	bch2_write_point_to_text(out, c, &c->btree_write_point);
1612 }
1613 
1614 void bch2_fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
1615 {
1616 	unsigned nr[BCH_DATA_NR];
1617 
1618 	memset(nr, 0, sizeof(nr));
1619 
1620 	for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
1621 		nr[c->open_buckets[i].data_type]++;
1622 
1623 	printbuf_tabstops_reset(out);
1624 	printbuf_tabstop_push(out, 24);
1625 
1626 	prt_printf(out, "capacity\t%llu\n",		c->capacity);
1627 	prt_printf(out, "reserved\t%llu\n",		c->reserved);
1628 	prt_printf(out, "hidden\t%llu\n",		percpu_u64_get(&c->usage->hidden));
1629 	prt_printf(out, "btree\t%llu\n",		percpu_u64_get(&c->usage->btree));
1630 	prt_printf(out, "data\t%llu\n",			percpu_u64_get(&c->usage->data));
1631 	prt_printf(out, "cached\t%llu\n",		percpu_u64_get(&c->usage->cached));
1632 	prt_printf(out, "reserved\t%llu\n",		percpu_u64_get(&c->usage->reserved));
1633 	prt_printf(out, "online_reserved\t%llu\n",	percpu_u64_get(c->online_reserved));
1634 	prt_printf(out, "nr_inodes\t%llu\n",		percpu_u64_get(&c->usage->nr_inodes));
1635 
1636 	prt_newline(out);
1637 	prt_printf(out, "freelist_wait\t%s\n",			c->freelist_wait.list.first ? "waiting" : "empty");
1638 	prt_printf(out, "open buckets allocated\t%i\n",		OPEN_BUCKETS_COUNT - c->open_buckets_nr_free);
1639 	prt_printf(out, "open buckets total\t%u\n",		OPEN_BUCKETS_COUNT);
1640 	prt_printf(out, "open_buckets_wait\t%s\n",		c->open_buckets_wait.list.first ? "waiting" : "empty");
1641 	prt_printf(out, "open_buckets_btree\t%u\n",		nr[BCH_DATA_btree]);
1642 	prt_printf(out, "open_buckets_user\t%u\n",		nr[BCH_DATA_user]);
1643 	prt_printf(out, "btree reserve cache\t%u\n",		c->btree_reserve_cache_nr);
1644 }
1645 
1646 void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
1647 {
1648 	struct bch_fs *c = ca->fs;
1649 	struct bch_dev_usage stats = bch2_dev_usage_read(ca);
1650 	unsigned nr[BCH_DATA_NR];
1651 
1652 	memset(nr, 0, sizeof(nr));
1653 
1654 	for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
1655 		nr[c->open_buckets[i].data_type]++;
1656 
1657 	bch2_dev_usage_to_text(out, ca, &stats);
1658 
1659 	prt_newline(out);
1660 
1661 	prt_printf(out, "reserves:\n");
1662 	for (unsigned i = 0; i < BCH_WATERMARK_NR; i++)
1663 		prt_printf(out, "%s\t%llu\r\n", bch2_watermarks[i], bch2_dev_buckets_reserved(ca, i));
1664 
1665 	prt_newline(out);
1666 
1667 	printbuf_tabstops_reset(out);
1668 	printbuf_tabstop_push(out, 12);
1669 	printbuf_tabstop_push(out, 16);
1670 
1671 	prt_printf(out, "open buckets\t%i\r\n",	ca->nr_open_buckets);
1672 	prt_printf(out, "buckets to invalidate\t%llu\r\n",	should_invalidate_buckets(ca, stats));
1673 }
1674 
1675 static noinline void bch2_print_allocator_stuck(struct bch_fs *c)
1676 {
1677 	struct printbuf buf = PRINTBUF;
1678 
1679 	prt_printf(&buf, "Allocator stuck? Waited for %u seconds\n",
1680 		   c->opts.allocator_stuck_timeout);
1681 
1682 	prt_printf(&buf, "Allocator debug:\n");
1683 	printbuf_indent_add(&buf, 2);
1684 	bch2_fs_alloc_debug_to_text(&buf, c);
1685 	printbuf_indent_sub(&buf, 2);
1686 	prt_newline(&buf);
1687 
1688 	for_each_online_member(c, ca) {
1689 		prt_printf(&buf, "Dev %u:\n", ca->dev_idx);
1690 		printbuf_indent_add(&buf, 2);
1691 		bch2_dev_alloc_debug_to_text(&buf, ca);
1692 		printbuf_indent_sub(&buf, 2);
1693 		prt_newline(&buf);
1694 	}
1695 
1696 	prt_printf(&buf, "Copygc debug:\n");
1697 	printbuf_indent_add(&buf, 2);
1698 	bch2_copygc_wait_to_text(&buf, c);
1699 	printbuf_indent_sub(&buf, 2);
1700 	prt_newline(&buf);
1701 
1702 	prt_printf(&buf, "Journal debug:\n");
1703 	printbuf_indent_add(&buf, 2);
1704 	bch2_journal_debug_to_text(&buf, &c->journal);
1705 	printbuf_indent_sub(&buf, 2);
1706 
1707 	bch2_print_string_as_lines(KERN_ERR, buf.buf);
1708 	printbuf_exit(&buf);
1709 }
1710 
1711 static inline unsigned allocator_wait_timeout(struct bch_fs *c)
1712 {
1713 	if (c->allocator_last_stuck &&
1714 	    time_after(c->allocator_last_stuck + HZ * 60 * 2, jiffies))
1715 		return 0;
1716 
1717 	return c->opts.allocator_stuck_timeout * HZ;
1718 }
1719 
1720 void __bch2_wait_on_allocator(struct bch_fs *c, struct closure *cl)
1721 {
1722 	unsigned t = allocator_wait_timeout(c);
1723 
1724 	if (t && closure_sync_timeout(cl, t)) {
1725 		c->allocator_last_stuck = jiffies;
1726 		bch2_print_allocator_stuck(c);
1727 	}
1728 
1729 	closure_sync(cl);
1730 }
1731