1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Moving/copying garbage collector
4 *
5 * Copyright 2012 Google, Inc.
6 */
7
8 #include "bcachefs.h"
9 #include "alloc_background.h"
10 #include "alloc_foreground.h"
11 #include "btree_iter.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
14 #include "buckets.h"
15 #include "clock.h"
16 #include "errcode.h"
17 #include "error.h"
18 #include "lru.h"
19 #include "move.h"
20 #include "movinggc.h"
21 #include "trace.h"
22
23 #include <linux/freezer.h>
24 #include <linux/kthread.h>
25 #include <linux/math64.h>
26 #include <linux/sched/task.h>
27 #include <linux/wait.h>
28
29 struct buckets_in_flight {
30 struct rhashtable table;
31 struct move_bucket_in_flight *first;
32 struct move_bucket_in_flight *last;
33 size_t nr;
34 size_t sectors;
35 };
36
37 static const struct rhashtable_params bch_move_bucket_params = {
38 .head_offset = offsetof(struct move_bucket_in_flight, hash),
39 .key_offset = offsetof(struct move_bucket_in_flight, bucket.k),
40 .key_len = sizeof(struct move_bucket_key),
41 .automatic_shrinking = true,
42 };
43
44 static struct move_bucket_in_flight *
move_bucket_in_flight_add(struct buckets_in_flight * list,struct move_bucket b)45 move_bucket_in_flight_add(struct buckets_in_flight *list, struct move_bucket b)
46 {
47 struct move_bucket_in_flight *new = kzalloc(sizeof(*new), GFP_KERNEL);
48 int ret;
49
50 if (!new)
51 return ERR_PTR(-ENOMEM);
52
53 new->bucket = b;
54
55 ret = rhashtable_lookup_insert_fast(&list->table, &new->hash,
56 bch_move_bucket_params);
57 if (ret) {
58 kfree(new);
59 return ERR_PTR(ret);
60 }
61
62 if (!list->first)
63 list->first = new;
64 else
65 list->last->next = new;
66
67 list->last = new;
68 list->nr++;
69 list->sectors += b.sectors;
70 return new;
71 }
72
bch2_bucket_is_movable(struct btree_trans * trans,struct move_bucket * b,u64 time)73 static int bch2_bucket_is_movable(struct btree_trans *trans,
74 struct move_bucket *b, u64 time)
75 {
76 struct btree_iter iter;
77 struct bkey_s_c k;
78 struct bch_alloc_v4 _a;
79 const struct bch_alloc_v4 *a;
80 int ret;
81
82 if (bch2_bucket_is_open(trans->c,
83 b->k.bucket.inode,
84 b->k.bucket.offset))
85 return 0;
86
87 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
88 b->k.bucket, BTREE_ITER_cached);
89 ret = bkey_err(k);
90 if (ret)
91 return ret;
92
93 a = bch2_alloc_to_v4(k, &_a);
94 b->k.gen = a->gen;
95 b->sectors = bch2_bucket_sectors_dirty(*a);
96
97 ret = data_type_movable(a->data_type) &&
98 a->fragmentation_lru &&
99 a->fragmentation_lru <= time;
100
101 bch2_trans_iter_exit(trans, &iter);
102 return ret;
103 }
104
move_buckets_wait(struct moving_context * ctxt,struct buckets_in_flight * list,bool flush)105 static void move_buckets_wait(struct moving_context *ctxt,
106 struct buckets_in_flight *list,
107 bool flush)
108 {
109 struct move_bucket_in_flight *i;
110 int ret;
111
112 while ((i = list->first)) {
113 if (flush)
114 move_ctxt_wait_event(ctxt, !atomic_read(&i->count));
115
116 if (atomic_read(&i->count))
117 break;
118
119 list->first = i->next;
120 if (!list->first)
121 list->last = NULL;
122
123 list->nr--;
124 list->sectors -= i->bucket.sectors;
125
126 ret = rhashtable_remove_fast(&list->table, &i->hash,
127 bch_move_bucket_params);
128 BUG_ON(ret);
129 kfree(i);
130 }
131
132 bch2_trans_unlock_long(ctxt->trans);
133 }
134
bucket_in_flight(struct buckets_in_flight * list,struct move_bucket_key k)135 static bool bucket_in_flight(struct buckets_in_flight *list,
136 struct move_bucket_key k)
137 {
138 return rhashtable_lookup_fast(&list->table, &k, bch_move_bucket_params);
139 }
140
141 typedef DARRAY(struct move_bucket) move_buckets;
142
bch2_copygc_get_buckets(struct moving_context * ctxt,struct buckets_in_flight * buckets_in_flight,move_buckets * buckets)143 static int bch2_copygc_get_buckets(struct moving_context *ctxt,
144 struct buckets_in_flight *buckets_in_flight,
145 move_buckets *buckets)
146 {
147 struct btree_trans *trans = ctxt->trans;
148 struct bch_fs *c = trans->c;
149 size_t nr_to_get = max_t(size_t, 16U, buckets_in_flight->nr / 4);
150 size_t saw = 0, in_flight = 0, not_movable = 0, sectors = 0;
151 int ret;
152
153 move_buckets_wait(ctxt, buckets_in_flight, false);
154
155 ret = bch2_btree_write_buffer_tryflush(trans);
156 if (bch2_err_matches(ret, EROFS))
157 return ret;
158
159 if (bch2_fs_fatal_err_on(ret, c, "%s: from bch2_btree_write_buffer_tryflush()", bch2_err_str(ret)))
160 return ret;
161
162 bch2_trans_begin(trans);
163
164 ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
165 lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
166 lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
167 0, k, ({
168 struct move_bucket b = { .k.bucket = u64_to_bucket(k.k->p.offset) };
169 int ret2 = 0;
170
171 saw++;
172
173 ret2 = bch2_bucket_is_movable(trans, &b, lru_pos_time(k.k->p));
174 if (ret2 < 0)
175 goto err;
176
177 if (!ret2)
178 not_movable++;
179 else if (bucket_in_flight(buckets_in_flight, b.k))
180 in_flight++;
181 else {
182 ret2 = darray_push(buckets, b);
183 if (ret2)
184 goto err;
185 sectors += b.sectors;
186 }
187
188 ret2 = buckets->nr >= nr_to_get;
189 err:
190 ret2;
191 }));
192
193 pr_debug("have: %zu (%zu) saw %zu in flight %zu not movable %zu got %zu (%zu)/%zu buckets ret %i",
194 buckets_in_flight->nr, buckets_in_flight->sectors,
195 saw, in_flight, not_movable, buckets->nr, sectors, nr_to_get, ret);
196
197 return ret < 0 ? ret : 0;
198 }
199
200 noinline
bch2_copygc(struct moving_context * ctxt,struct buckets_in_flight * buckets_in_flight,bool * did_work)201 static int bch2_copygc(struct moving_context *ctxt,
202 struct buckets_in_flight *buckets_in_flight,
203 bool *did_work)
204 {
205 struct btree_trans *trans = ctxt->trans;
206 struct bch_fs *c = trans->c;
207 struct data_update_opts data_opts = {
208 .btree_insert_flags = BCH_WATERMARK_copygc,
209 };
210 move_buckets buckets = { 0 };
211 struct move_bucket_in_flight *f;
212 u64 moved = atomic64_read(&ctxt->stats->sectors_moved);
213 int ret = 0;
214
215 ret = bch2_copygc_get_buckets(ctxt, buckets_in_flight, &buckets);
216 if (ret)
217 goto err;
218
219 darray_for_each(buckets, i) {
220 if (kthread_should_stop() || freezing(current))
221 break;
222
223 f = move_bucket_in_flight_add(buckets_in_flight, *i);
224 ret = PTR_ERR_OR_ZERO(f);
225 if (ret == -EEXIST) { /* rare race: copygc_get_buckets returned same bucket more than once */
226 ret = 0;
227 continue;
228 }
229 if (ret == -ENOMEM) { /* flush IO, continue later */
230 ret = 0;
231 break;
232 }
233
234 ret = bch2_evacuate_bucket(ctxt, f, f->bucket.k.bucket,
235 f->bucket.k.gen, data_opts);
236 if (ret)
237 goto err;
238
239 *did_work = true;
240 }
241 err:
242 darray_exit(&buckets);
243
244 /* no entries in LRU btree found, or got to end: */
245 if (bch2_err_matches(ret, ENOENT))
246 ret = 0;
247
248 if (ret < 0 && !bch2_err_matches(ret, EROFS))
249 bch_err_msg(c, ret, "from bch2_move_data()");
250
251 moved = atomic64_read(&ctxt->stats->sectors_moved) - moved;
252 trace_and_count(c, copygc, c, moved, 0, 0, 0);
253 return ret;
254 }
255
256 /*
257 * Copygc runs when the amount of fragmented data is above some arbitrary
258 * threshold:
259 *
260 * The threshold at the limit - when the device is full - is the amount of space
261 * we reserved in bch2_recalc_capacity; we can't have more than that amount of
262 * disk space stranded due to fragmentation and store everything we have
263 * promised to store.
264 *
265 * But we don't want to be running copygc unnecessarily when the device still
266 * has plenty of free space - rather, we want copygc to smoothly run every so
267 * often and continually reduce the amount of fragmented space as the device
268 * fills up. So, we increase the threshold by half the current free space.
269 */
bch2_copygc_wait_amount(struct bch_fs * c)270 unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
271 {
272 s64 wait = S64_MAX, fragmented_allowed, fragmented;
273
274 for_each_rw_member(c, ca) {
275 struct bch_dev_usage usage = bch2_dev_usage_read(ca);
276
277 fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
278 ca->mi.bucket_size) >> 1);
279 fragmented = 0;
280
281 for (unsigned i = 0; i < BCH_DATA_NR; i++)
282 if (data_type_movable(i))
283 fragmented += usage.d[i].fragmented;
284
285 wait = min(wait, max(0LL, fragmented_allowed - fragmented));
286 }
287
288 return wait;
289 }
290
bch2_copygc_wait_to_text(struct printbuf * out,struct bch_fs * c)291 void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c)
292 {
293 printbuf_tabstop_push(out, 32);
294 prt_printf(out, "running:\t%u\n", c->copygc_running);
295 prt_printf(out, "copygc_wait:\t%llu\n", c->copygc_wait);
296 prt_printf(out, "copygc_wait_at:\t%llu\n", c->copygc_wait_at);
297
298 prt_printf(out, "Currently waiting for:\t");
299 prt_human_readable_u64(out, max(0LL, c->copygc_wait -
300 atomic64_read(&c->io_clock[WRITE].now)) << 9);
301 prt_newline(out);
302
303 prt_printf(out, "Currently waiting since:\t");
304 prt_human_readable_u64(out, max(0LL,
305 atomic64_read(&c->io_clock[WRITE].now) -
306 c->copygc_wait_at) << 9);
307 prt_newline(out);
308
309 prt_printf(out, "Currently calculated wait:\t");
310 prt_human_readable_u64(out, bch2_copygc_wait_amount(c));
311 prt_newline(out);
312 }
313
bch2_copygc_thread(void * arg)314 static int bch2_copygc_thread(void *arg)
315 {
316 struct bch_fs *c = arg;
317 struct moving_context ctxt;
318 struct bch_move_stats move_stats;
319 struct io_clock *clock = &c->io_clock[WRITE];
320 struct buckets_in_flight *buckets;
321 u64 last, wait;
322 int ret = 0;
323
324 buckets = kzalloc(sizeof(struct buckets_in_flight), GFP_KERNEL);
325 if (!buckets)
326 return -ENOMEM;
327 ret = rhashtable_init(&buckets->table, &bch_move_bucket_params);
328 bch_err_msg(c, ret, "allocating copygc buckets in flight");
329 if (ret) {
330 kfree(buckets);
331 return ret;
332 }
333
334 set_freezable();
335
336 bch2_move_stats_init(&move_stats, "copygc");
337 bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
338 writepoint_ptr(&c->copygc_write_point),
339 false);
340
341 while (!ret && !kthread_should_stop()) {
342 bool did_work = false;
343
344 bch2_trans_unlock_long(ctxt.trans);
345 cond_resched();
346
347 if (!c->copy_gc_enabled) {
348 move_buckets_wait(&ctxt, buckets, true);
349 kthread_wait_freezable(c->copy_gc_enabled ||
350 kthread_should_stop());
351 }
352
353 if (unlikely(freezing(current))) {
354 move_buckets_wait(&ctxt, buckets, true);
355 __refrigerator(false);
356 continue;
357 }
358
359 last = atomic64_read(&clock->now);
360 wait = bch2_copygc_wait_amount(c);
361
362 if (wait > clock->max_slop) {
363 c->copygc_wait_at = last;
364 c->copygc_wait = last + wait;
365 move_buckets_wait(&ctxt, buckets, true);
366 trace_and_count(c, copygc_wait, c, wait, last + wait);
367 bch2_kthread_io_clock_wait(clock, last + wait,
368 MAX_SCHEDULE_TIMEOUT);
369 continue;
370 }
371
372 c->copygc_wait = 0;
373
374 c->copygc_running = true;
375 ret = bch2_copygc(&ctxt, buckets, &did_work);
376 c->copygc_running = false;
377
378 wake_up(&c->copygc_running_wq);
379
380 if (!wait && !did_work) {
381 u64 min_member_capacity = bch2_min_rw_member_capacity(c);
382
383 if (min_member_capacity == U64_MAX)
384 min_member_capacity = 128 * 2048;
385
386 move_buckets_wait(&ctxt, buckets, true);
387 bch2_kthread_io_clock_wait(clock, last + (min_member_capacity >> 6),
388 MAX_SCHEDULE_TIMEOUT);
389 }
390 }
391
392 move_buckets_wait(&ctxt, buckets, true);
393
394 rhashtable_destroy(&buckets->table);
395 kfree(buckets);
396 bch2_moving_ctxt_exit(&ctxt);
397 bch2_move_stats_exit(&move_stats, c);
398
399 return 0;
400 }
401
bch2_copygc_stop(struct bch_fs * c)402 void bch2_copygc_stop(struct bch_fs *c)
403 {
404 if (c->copygc_thread) {
405 kthread_stop(c->copygc_thread);
406 put_task_struct(c->copygc_thread);
407 }
408 c->copygc_thread = NULL;
409 }
410
bch2_copygc_start(struct bch_fs * c)411 int bch2_copygc_start(struct bch_fs *c)
412 {
413 struct task_struct *t;
414 int ret;
415
416 if (c->copygc_thread)
417 return 0;
418
419 if (c->opts.nochanges)
420 return 0;
421
422 if (bch2_fs_init_fault("copygc_start"))
423 return -ENOMEM;
424
425 t = kthread_create(bch2_copygc_thread, c, "bch-copygc/%s", c->name);
426 ret = PTR_ERR_OR_ZERO(t);
427 bch_err_msg(c, ret, "creating copygc thread");
428 if (ret)
429 return ret;
430
431 get_task_struct(t);
432
433 c->copygc_thread = t;
434 wake_up_process(c->copygc_thread);
435
436 return 0;
437 }
438
bch2_fs_copygc_init(struct bch_fs * c)439 void bch2_fs_copygc_init(struct bch_fs *c)
440 {
441 init_waitqueue_head(&c->copygc_running_wq);
442 c->copygc_running = false;
443 }
444