xref: /linux/fs/bcachefs/movinggc.c (revision 50a0844bf8c4d38be540e423672ef9408d029252)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Moving/copying garbage collector
4  *
5  * Copyright 2012 Google, Inc.
6  */
7 
8 #include "bcachefs.h"
9 #include "alloc_background.h"
10 #include "alloc_foreground.h"
11 #include "btree_iter.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
14 #include "buckets.h"
15 #include "clock.h"
16 #include "errcode.h"
17 #include "error.h"
18 #include "lru.h"
19 #include "move.h"
20 #include "movinggc.h"
21 #include "trace.h"
22 
23 #include <linux/freezer.h>
24 #include <linux/kthread.h>
25 #include <linux/math64.h>
26 #include <linux/sched/task.h>
27 #include <linux/wait.h>
28 
29 struct buckets_in_flight {
30 	struct rhashtable		table;
31 	struct move_bucket_in_flight	*first;
32 	struct move_bucket_in_flight	*last;
33 	size_t				nr;
34 	size_t				sectors;
35 };
36 
37 static const struct rhashtable_params bch_move_bucket_params = {
38 	.head_offset	= offsetof(struct move_bucket_in_flight, hash),
39 	.key_offset	= offsetof(struct move_bucket_in_flight, bucket.k),
40 	.key_len	= sizeof(struct move_bucket_key),
41 };
42 
43 static struct move_bucket_in_flight *
44 move_bucket_in_flight_add(struct buckets_in_flight *list, struct move_bucket b)
45 {
46 	struct move_bucket_in_flight *new = kzalloc(sizeof(*new), GFP_KERNEL);
47 	int ret;
48 
49 	if (!new)
50 		return ERR_PTR(-ENOMEM);
51 
52 	new->bucket = b;
53 
54 	ret = rhashtable_lookup_insert_fast(&list->table, &new->hash,
55 					    bch_move_bucket_params);
56 	if (ret) {
57 		kfree(new);
58 		return ERR_PTR(ret);
59 	}
60 
61 	if (!list->first)
62 		list->first = new;
63 	else
64 		list->last->next = new;
65 
66 	list->last = new;
67 	list->nr++;
68 	list->sectors += b.sectors;
69 	return new;
70 }
71 
72 static int bch2_bucket_is_movable(struct btree_trans *trans,
73 				  struct move_bucket *b, u64 time)
74 {
75 	struct btree_iter iter;
76 	struct bkey_s_c k;
77 	struct bch_alloc_v4 _a;
78 	const struct bch_alloc_v4 *a;
79 	int ret;
80 
81 	if (bch2_bucket_is_open(trans->c,
82 				b->k.bucket.inode,
83 				b->k.bucket.offset))
84 		return 0;
85 
86 	k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
87 			       b->k.bucket, BTREE_ITER_CACHED);
88 	ret = bkey_err(k);
89 	if (ret)
90 		return ret;
91 
92 	a = bch2_alloc_to_v4(k, &_a);
93 	b->k.gen	= a->gen;
94 	b->sectors	= bch2_bucket_sectors_dirty(*a);
95 
96 	ret = data_type_movable(a->data_type) &&
97 		a->fragmentation_lru &&
98 		a->fragmentation_lru <= time;
99 
100 	bch2_trans_iter_exit(trans, &iter);
101 	return ret;
102 }
103 
104 static void move_buckets_wait(struct moving_context *ctxt,
105 			      struct buckets_in_flight *list,
106 			      bool flush)
107 {
108 	struct move_bucket_in_flight *i;
109 	int ret;
110 
111 	while ((i = list->first)) {
112 		if (flush)
113 			move_ctxt_wait_event(ctxt, !atomic_read(&i->count));
114 
115 		if (atomic_read(&i->count))
116 			break;
117 
118 		list->first = i->next;
119 		if (!list->first)
120 			list->last = NULL;
121 
122 		list->nr--;
123 		list->sectors -= i->bucket.sectors;
124 
125 		ret = rhashtable_remove_fast(&list->table, &i->hash,
126 					     bch_move_bucket_params);
127 		BUG_ON(ret);
128 		kfree(i);
129 	}
130 
131 	bch2_trans_unlock_long(ctxt->trans);
132 }
133 
134 static bool bucket_in_flight(struct buckets_in_flight *list,
135 			     struct move_bucket_key k)
136 {
137 	return rhashtable_lookup_fast(&list->table, &k, bch_move_bucket_params);
138 }
139 
140 typedef DARRAY(struct move_bucket) move_buckets;
141 
142 static int bch2_copygc_get_buckets(struct moving_context *ctxt,
143 			struct buckets_in_flight *buckets_in_flight,
144 			move_buckets *buckets)
145 {
146 	struct btree_trans *trans = ctxt->trans;
147 	struct bch_fs *c = trans->c;
148 	size_t nr_to_get = max_t(size_t, 16U, buckets_in_flight->nr / 4);
149 	size_t saw = 0, in_flight = 0, not_movable = 0, sectors = 0;
150 	int ret;
151 
152 	move_buckets_wait(ctxt, buckets_in_flight, false);
153 
154 	ret = bch2_btree_write_buffer_tryflush(trans);
155 	if (bch2_err_matches(ret, EROFS))
156 		return ret;
157 
158 	if (bch2_fs_fatal_err_on(ret, c, "%s: from bch2_btree_write_buffer_tryflush()", bch2_err_str(ret)))
159 		return ret;
160 
161 	ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
162 				  lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
163 				  lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
164 				  0, k, ({
165 		struct move_bucket b = { .k.bucket = u64_to_bucket(k.k->p.offset) };
166 		int ret2 = 0;
167 
168 		saw++;
169 
170 		ret2 = bch2_bucket_is_movable(trans, &b, lru_pos_time(k.k->p));
171 		if (ret2 < 0)
172 			goto err;
173 
174 		if (!ret2)
175 			not_movable++;
176 		else if (bucket_in_flight(buckets_in_flight, b.k))
177 			in_flight++;
178 		else {
179 			ret2 = darray_push(buckets, b);
180 			if (ret2)
181 				goto err;
182 			sectors += b.sectors;
183 		}
184 
185 		ret2 = buckets->nr >= nr_to_get;
186 err:
187 		ret2;
188 	}));
189 
190 	pr_debug("have: %zu (%zu) saw %zu in flight %zu not movable %zu got %zu (%zu)/%zu buckets ret %i",
191 		 buckets_in_flight->nr, buckets_in_flight->sectors,
192 		 saw, in_flight, not_movable, buckets->nr, sectors, nr_to_get, ret);
193 
194 	return ret < 0 ? ret : 0;
195 }
196 
197 noinline
198 static int bch2_copygc(struct moving_context *ctxt,
199 		       struct buckets_in_flight *buckets_in_flight,
200 		       bool *did_work)
201 {
202 	struct btree_trans *trans = ctxt->trans;
203 	struct bch_fs *c = trans->c;
204 	struct data_update_opts data_opts = {
205 		.btree_insert_flags = BCH_WATERMARK_copygc,
206 	};
207 	move_buckets buckets = { 0 };
208 	struct move_bucket_in_flight *f;
209 	u64 moved = atomic64_read(&ctxt->stats->sectors_moved);
210 	int ret = 0;
211 
212 	ret = bch2_copygc_get_buckets(ctxt, buckets_in_flight, &buckets);
213 	if (ret)
214 		goto err;
215 
216 	darray_for_each(buckets, i) {
217 		if (kthread_should_stop() || freezing(current))
218 			break;
219 
220 		f = move_bucket_in_flight_add(buckets_in_flight, *i);
221 		ret = PTR_ERR_OR_ZERO(f);
222 		if (ret == -EEXIST) { /* rare race: copygc_get_buckets returned same bucket more than once */
223 			ret = 0;
224 			continue;
225 		}
226 		if (ret == -ENOMEM) { /* flush IO, continue later */
227 			ret = 0;
228 			break;
229 		}
230 
231 		ret = bch2_evacuate_bucket(ctxt, f, f->bucket.k.bucket,
232 					     f->bucket.k.gen, data_opts);
233 		if (ret)
234 			goto err;
235 
236 		*did_work = true;
237 	}
238 err:
239 	darray_exit(&buckets);
240 
241 	/* no entries in LRU btree found, or got to end: */
242 	if (bch2_err_matches(ret, ENOENT))
243 		ret = 0;
244 
245 	if (ret < 0 && !bch2_err_matches(ret, EROFS))
246 		bch_err_msg(c, ret, "from bch2_move_data()");
247 
248 	moved = atomic64_read(&ctxt->stats->sectors_moved) - moved;
249 	trace_and_count(c, copygc, c, moved, 0, 0, 0);
250 	return ret;
251 }
252 
253 /*
254  * Copygc runs when the amount of fragmented data is above some arbitrary
255  * threshold:
256  *
257  * The threshold at the limit - when the device is full - is the amount of space
258  * we reserved in bch2_recalc_capacity; we can't have more than that amount of
259  * disk space stranded due to fragmentation and store everything we have
260  * promised to store.
261  *
262  * But we don't want to be running copygc unnecessarily when the device still
263  * has plenty of free space - rather, we want copygc to smoothly run every so
264  * often and continually reduce the amount of fragmented space as the device
265  * fills up. So, we increase the threshold by half the current free space.
266  */
267 unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
268 {
269 	s64 wait = S64_MAX, fragmented_allowed, fragmented;
270 
271 	for_each_rw_member(c, ca) {
272 		struct bch_dev_usage usage = bch2_dev_usage_read(ca);
273 
274 		fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
275 				       ca->mi.bucket_size) >> 1);
276 		fragmented = 0;
277 
278 		for (unsigned i = 0; i < BCH_DATA_NR; i++)
279 			if (data_type_movable(i))
280 				fragmented += usage.d[i].fragmented;
281 
282 		wait = min(wait, max(0LL, fragmented_allowed - fragmented));
283 	}
284 
285 	return wait;
286 }
287 
288 void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c)
289 {
290 	prt_printf(out, "Currently waiting for:     ");
291 	prt_human_readable_u64(out, max(0LL, c->copygc_wait -
292 					atomic64_read(&c->io_clock[WRITE].now)) << 9);
293 	prt_newline(out);
294 
295 	prt_printf(out, "Currently waiting since:   ");
296 	prt_human_readable_u64(out, max(0LL,
297 					atomic64_read(&c->io_clock[WRITE].now) -
298 					c->copygc_wait_at) << 9);
299 	prt_newline(out);
300 
301 	prt_printf(out, "Currently calculated wait: ");
302 	prt_human_readable_u64(out, bch2_copygc_wait_amount(c));
303 	prt_newline(out);
304 }
305 
306 static int bch2_copygc_thread(void *arg)
307 {
308 	struct bch_fs *c = arg;
309 	struct moving_context ctxt;
310 	struct bch_move_stats move_stats;
311 	struct io_clock *clock = &c->io_clock[WRITE];
312 	struct buckets_in_flight *buckets;
313 	u64 last, wait;
314 	int ret = 0;
315 
316 	buckets = kzalloc(sizeof(struct buckets_in_flight), GFP_KERNEL);
317 	if (!buckets)
318 		return -ENOMEM;
319 	ret = rhashtable_init(&buckets->table, &bch_move_bucket_params);
320 	bch_err_msg(c, ret, "allocating copygc buckets in flight");
321 	if (ret) {
322 		kfree(buckets);
323 		return ret;
324 	}
325 
326 	set_freezable();
327 
328 	bch2_move_stats_init(&move_stats, "copygc");
329 	bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
330 			      writepoint_ptr(&c->copygc_write_point),
331 			      false);
332 
333 	while (!ret && !kthread_should_stop()) {
334 		bool did_work = false;
335 
336 		bch2_trans_unlock_long(ctxt.trans);
337 		cond_resched();
338 
339 		if (!c->copy_gc_enabled) {
340 			move_buckets_wait(&ctxt, buckets, true);
341 			kthread_wait_freezable(c->copy_gc_enabled ||
342 					       kthread_should_stop());
343 		}
344 
345 		if (unlikely(freezing(current))) {
346 			move_buckets_wait(&ctxt, buckets, true);
347 			__refrigerator(false);
348 			continue;
349 		}
350 
351 		last = atomic64_read(&clock->now);
352 		wait = bch2_copygc_wait_amount(c);
353 
354 		if (wait > clock->max_slop) {
355 			c->copygc_wait_at = last;
356 			c->copygc_wait = last + wait;
357 			move_buckets_wait(&ctxt, buckets, true);
358 			trace_and_count(c, copygc_wait, c, wait, last + wait);
359 			bch2_kthread_io_clock_wait(clock, last + wait,
360 					MAX_SCHEDULE_TIMEOUT);
361 			continue;
362 		}
363 
364 		c->copygc_wait = 0;
365 
366 		c->copygc_running = true;
367 		ret = bch2_copygc(&ctxt, buckets, &did_work);
368 		c->copygc_running = false;
369 
370 		wake_up(&c->copygc_running_wq);
371 
372 		if (!wait && !did_work) {
373 			u64 min_member_capacity = bch2_min_rw_member_capacity(c);
374 
375 			if (min_member_capacity == U64_MAX)
376 				min_member_capacity = 128 * 2048;
377 
378 			bch2_trans_unlock_long(ctxt.trans);
379 			bch2_kthread_io_clock_wait(clock, last + (min_member_capacity >> 6),
380 					MAX_SCHEDULE_TIMEOUT);
381 		}
382 	}
383 
384 	move_buckets_wait(&ctxt, buckets, true);
385 
386 	rhashtable_destroy(&buckets->table);
387 	kfree(buckets);
388 	bch2_moving_ctxt_exit(&ctxt);
389 	bch2_move_stats_exit(&move_stats, c);
390 
391 	return 0;
392 }
393 
394 void bch2_copygc_stop(struct bch_fs *c)
395 {
396 	if (c->copygc_thread) {
397 		kthread_stop(c->copygc_thread);
398 		put_task_struct(c->copygc_thread);
399 	}
400 	c->copygc_thread = NULL;
401 }
402 
403 int bch2_copygc_start(struct bch_fs *c)
404 {
405 	struct task_struct *t;
406 	int ret;
407 
408 	if (c->copygc_thread)
409 		return 0;
410 
411 	if (c->opts.nochanges)
412 		return 0;
413 
414 	if (bch2_fs_init_fault("copygc_start"))
415 		return -ENOMEM;
416 
417 	t = kthread_create(bch2_copygc_thread, c, "bch-copygc/%s", c->name);
418 	ret = PTR_ERR_OR_ZERO(t);
419 	bch_err_msg(c, ret, "creating copygc thread");
420 	if (ret)
421 		return ret;
422 
423 	get_task_struct(t);
424 
425 	c->copygc_thread = t;
426 	wake_up_process(c->copygc_thread);
427 
428 	return 0;
429 }
430 
431 void bch2_fs_copygc_init(struct bch_fs *c)
432 {
433 	init_waitqueue_head(&c->copygc_running_wq);
434 	c->copygc_running = false;
435 }
436