xref: /linux/fs/bcachefs/rebalance.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "alloc_background.h"
5 #include "alloc_foreground.h"
6 #include "btree_iter.h"
7 #include "btree_update.h"
8 #include "btree_write_buffer.h"
9 #include "buckets.h"
10 #include "clock.h"
11 #include "compress.h"
12 #include "disk_groups.h"
13 #include "errcode.h"
14 #include "error.h"
15 #include "inode.h"
16 #include "io_write.h"
17 #include "move.h"
18 #include "rebalance.h"
19 #include "subvolume.h"
20 #include "super-io.h"
21 #include "trace.h"
22 
23 #include <linux/freezer.h>
24 #include <linux/kthread.h>
25 #include <linux/sched/cputime.h>
26 
27 #define REBALANCE_WORK_SCAN_OFFSET	(U64_MAX - 1)
28 
29 static const char * const bch2_rebalance_state_strs[] = {
30 #define x(t) #t,
31 	BCH_REBALANCE_STATES()
32 	NULL
33 #undef x
34 };
35 
36 static int __bch2_set_rebalance_needs_scan(struct btree_trans *trans, u64 inum)
37 {
38 	struct btree_iter iter;
39 	struct bkey_s_c k;
40 	struct bkey_i_cookie *cookie;
41 	u64 v;
42 	int ret;
43 
44 	bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
45 			     SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
46 			     BTREE_ITER_intent);
47 	k = bch2_btree_iter_peek_slot(&iter);
48 	ret = bkey_err(k);
49 	if (ret)
50 		goto err;
51 
52 	v = k.k->type == KEY_TYPE_cookie
53 		? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie)
54 		: 0;
55 
56 	cookie = bch2_trans_kmalloc(trans, sizeof(*cookie));
57 	ret = PTR_ERR_OR_ZERO(cookie);
58 	if (ret)
59 		goto err;
60 
61 	bkey_cookie_init(&cookie->k_i);
62 	cookie->k.p = iter.pos;
63 	cookie->v.cookie = cpu_to_le64(v + 1);
64 
65 	ret = bch2_trans_update(trans, &iter, &cookie->k_i, 0);
66 err:
67 	bch2_trans_iter_exit(trans, &iter);
68 	return ret;
69 }
70 
71 int bch2_set_rebalance_needs_scan(struct bch_fs *c, u64 inum)
72 {
73 	int ret = bch2_trans_commit_do(c, NULL, NULL,
74 				       BCH_TRANS_COMMIT_no_enospc|
75 				       BCH_TRANS_COMMIT_lazy_rw,
76 			    __bch2_set_rebalance_needs_scan(trans, inum));
77 	rebalance_wakeup(c);
78 	return ret;
79 }
80 
81 int bch2_set_fs_needs_rebalance(struct bch_fs *c)
82 {
83 	return bch2_set_rebalance_needs_scan(c, 0);
84 }
85 
86 static int bch2_clear_rebalance_needs_scan(struct btree_trans *trans, u64 inum, u64 cookie)
87 {
88 	struct btree_iter iter;
89 	struct bkey_s_c k;
90 	u64 v;
91 	int ret;
92 
93 	bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
94 			     SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
95 			     BTREE_ITER_intent);
96 	k = bch2_btree_iter_peek_slot(&iter);
97 	ret = bkey_err(k);
98 	if (ret)
99 		goto err;
100 
101 	v = k.k->type == KEY_TYPE_cookie
102 		? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie)
103 		: 0;
104 
105 	if (v == cookie)
106 		ret = bch2_btree_delete_at(trans, &iter, 0);
107 err:
108 	bch2_trans_iter_exit(trans, &iter);
109 	return ret;
110 }
111 
112 static struct bkey_s_c next_rebalance_entry(struct btree_trans *trans,
113 					    struct btree_iter *work_iter)
114 {
115 	return !kthread_should_stop()
116 		? bch2_btree_iter_peek(work_iter)
117 		: bkey_s_c_null;
118 }
119 
120 static int bch2_bkey_clear_needs_rebalance(struct btree_trans *trans,
121 					   struct btree_iter *iter,
122 					   struct bkey_s_c k)
123 {
124 	struct bkey_i *n = bch2_bkey_make_mut(trans, iter, &k, 0);
125 	int ret = PTR_ERR_OR_ZERO(n);
126 	if (ret)
127 		return ret;
128 
129 	extent_entry_drop(bkey_i_to_s(n),
130 			  (void *) bch2_bkey_rebalance_opts(bkey_i_to_s_c(n)));
131 	return bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
132 }
133 
134 static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
135 			struct bpos work_pos,
136 			struct btree_iter *extent_iter,
137 			struct data_update_opts *data_opts)
138 {
139 	struct bch_fs *c = trans->c;
140 	struct bkey_s_c k;
141 
142 	bch2_trans_iter_exit(trans, extent_iter);
143 	bch2_trans_iter_init(trans, extent_iter,
144 			     work_pos.inode ? BTREE_ID_extents : BTREE_ID_reflink,
145 			     work_pos,
146 			     BTREE_ITER_all_snapshots);
147 	k = bch2_btree_iter_peek_slot(extent_iter);
148 	if (bkey_err(k))
149 		return k;
150 
151 	const struct bch_extent_rebalance *r = k.k ? bch2_bkey_rebalance_opts(k) : NULL;
152 	if (!r) {
153 		/* raced due to btree write buffer, nothing to do */
154 		return bkey_s_c_null;
155 	}
156 
157 	memset(data_opts, 0, sizeof(*data_opts));
158 
159 	data_opts->rewrite_ptrs		=
160 		bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression);
161 	data_opts->target		= r->target;
162 	data_opts->write_flags		|= BCH_WRITE_ONLY_SPECIFIED_DEVS;
163 
164 	if (!data_opts->rewrite_ptrs) {
165 		/*
166 		 * device we would want to write to offline? devices in target
167 		 * changed?
168 		 *
169 		 * We'll now need a full scan before this extent is picked up
170 		 * again:
171 		 */
172 		int ret = bch2_bkey_clear_needs_rebalance(trans, extent_iter, k);
173 		if (ret)
174 			return bkey_s_c_err(ret);
175 		return bkey_s_c_null;
176 	}
177 
178 	if (trace_rebalance_extent_enabled()) {
179 		struct printbuf buf = PRINTBUF;
180 
181 		prt_str(&buf, "target=");
182 		bch2_target_to_text(&buf, c, r->target);
183 		prt_str(&buf, " compression=");
184 		bch2_compression_opt_to_text(&buf, r->compression);
185 		prt_str(&buf, " ");
186 		bch2_bkey_val_to_text(&buf, c, k);
187 
188 		trace_rebalance_extent(c, buf.buf);
189 		printbuf_exit(&buf);
190 	}
191 
192 	return k;
193 }
194 
195 noinline_for_stack
196 static int do_rebalance_extent(struct moving_context *ctxt,
197 			       struct bpos work_pos,
198 			       struct btree_iter *extent_iter)
199 {
200 	struct btree_trans *trans = ctxt->trans;
201 	struct bch_fs *c = trans->c;
202 	struct bch_fs_rebalance *r = &trans->c->rebalance;
203 	struct data_update_opts data_opts;
204 	struct bch_io_opts io_opts;
205 	struct bkey_s_c k;
206 	struct bkey_buf sk;
207 	int ret;
208 
209 	ctxt->stats = &r->work_stats;
210 	r->state = BCH_REBALANCE_working;
211 
212 	bch2_bkey_buf_init(&sk);
213 
214 	ret = bkey_err(k = next_rebalance_extent(trans, work_pos,
215 						 extent_iter, &data_opts));
216 	if (ret || !k.k)
217 		goto out;
218 
219 	ret = bch2_move_get_io_opts_one(trans, &io_opts, k);
220 	if (ret)
221 		goto out;
222 
223 	atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
224 
225 	/*
226 	 * The iterator gets unlocked by __bch2_read_extent - need to
227 	 * save a copy of @k elsewhere:
228 	 */
229 	bch2_bkey_buf_reassemble(&sk, c, k);
230 	k = bkey_i_to_s_c(sk.k);
231 
232 	ret = bch2_move_extent(ctxt, NULL, extent_iter, k, io_opts, data_opts);
233 	if (ret) {
234 		if (bch2_err_matches(ret, ENOMEM)) {
235 			/* memory allocation failure, wait for some IO to finish */
236 			bch2_move_ctxt_wait_for_io(ctxt);
237 			ret = -BCH_ERR_transaction_restart_nested;
238 		}
239 
240 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
241 			goto out;
242 
243 		/* skip it and continue, XXX signal failure */
244 		ret = 0;
245 	}
246 out:
247 	bch2_bkey_buf_exit(&sk, c);
248 	return ret;
249 }
250 
251 static bool rebalance_pred(struct bch_fs *c, void *arg,
252 			   struct bkey_s_c k,
253 			   struct bch_io_opts *io_opts,
254 			   struct data_update_opts *data_opts)
255 {
256 	unsigned target, compression;
257 
258 	if (k.k->p.inode) {
259 		target		= io_opts->background_target;
260 		compression	= background_compression(*io_opts);
261 	} else {
262 		const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
263 
264 		target		= r ? r->target : io_opts->background_target;
265 		compression	= r ? r->compression : background_compression(*io_opts);
266 	}
267 
268 	data_opts->rewrite_ptrs		= bch2_bkey_ptrs_need_rebalance(c, k, target, compression);
269 	data_opts->target		= target;
270 	data_opts->write_flags		|= BCH_WRITE_ONLY_SPECIFIED_DEVS;
271 	return data_opts->rewrite_ptrs != 0;
272 }
273 
274 static int do_rebalance_scan(struct moving_context *ctxt, u64 inum, u64 cookie)
275 {
276 	struct btree_trans *trans = ctxt->trans;
277 	struct bch_fs_rebalance *r = &trans->c->rebalance;
278 	int ret;
279 
280 	bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
281 	ctxt->stats = &r->scan_stats;
282 
283 	if (!inum) {
284 		r->scan_start	= BBPOS_MIN;
285 		r->scan_end	= BBPOS_MAX;
286 	} else {
287 		r->scan_start	= BBPOS(BTREE_ID_extents, POS(inum, 0));
288 		r->scan_end	= BBPOS(BTREE_ID_extents, POS(inum, U64_MAX));
289 	}
290 
291 	r->state = BCH_REBALANCE_scanning;
292 
293 	ret = __bch2_move_data(ctxt, r->scan_start, r->scan_end, rebalance_pred, NULL) ?:
294 		commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
295 			  bch2_clear_rebalance_needs_scan(trans, inum, cookie));
296 
297 	bch2_move_stats_exit(&r->scan_stats, trans->c);
298 	return ret;
299 }
300 
301 static void rebalance_wait(struct bch_fs *c)
302 {
303 	struct bch_fs_rebalance *r = &c->rebalance;
304 	struct io_clock *clock = &c->io_clock[WRITE];
305 	u64 now = atomic64_read(&clock->now);
306 	u64 min_member_capacity = bch2_min_rw_member_capacity(c);
307 
308 	if (min_member_capacity == U64_MAX)
309 		min_member_capacity = 128 * 2048;
310 
311 	r->wait_iotime_end		= now + (min_member_capacity >> 6);
312 
313 	if (r->state != BCH_REBALANCE_waiting) {
314 		r->wait_iotime_start	= now;
315 		r->wait_wallclock_start	= ktime_get_real_ns();
316 		r->state		= BCH_REBALANCE_waiting;
317 	}
318 
319 	bch2_kthread_io_clock_wait(clock, r->wait_iotime_end, MAX_SCHEDULE_TIMEOUT);
320 }
321 
322 static int do_rebalance(struct moving_context *ctxt)
323 {
324 	struct btree_trans *trans = ctxt->trans;
325 	struct bch_fs *c = trans->c;
326 	struct bch_fs_rebalance *r = &c->rebalance;
327 	struct btree_iter rebalance_work_iter, extent_iter = { NULL };
328 	struct bkey_s_c k;
329 	int ret = 0;
330 
331 	bch2_trans_begin(trans);
332 
333 	bch2_move_stats_init(&r->work_stats, "rebalance_work");
334 	bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
335 
336 	bch2_trans_iter_init(trans, &rebalance_work_iter,
337 			     BTREE_ID_rebalance_work, POS_MIN,
338 			     BTREE_ITER_all_snapshots);
339 
340 	while (!bch2_move_ratelimit(ctxt)) {
341 		if (!r->enabled) {
342 			bch2_moving_ctxt_flush_all(ctxt);
343 			kthread_wait_freezable(r->enabled ||
344 					       kthread_should_stop());
345 		}
346 
347 		if (kthread_should_stop())
348 			break;
349 
350 		bch2_trans_begin(trans);
351 
352 		ret = bkey_err(k = next_rebalance_entry(trans, &rebalance_work_iter));
353 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
354 			continue;
355 		if (ret || !k.k)
356 			break;
357 
358 		ret = k.k->type == KEY_TYPE_cookie
359 			? do_rebalance_scan(ctxt, k.k->p.inode,
360 					    le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie))
361 			: do_rebalance_extent(ctxt, k.k->p, &extent_iter);
362 
363 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
364 			continue;
365 		if (ret)
366 			break;
367 
368 		bch2_btree_iter_advance(&rebalance_work_iter);
369 	}
370 
371 	bch2_trans_iter_exit(trans, &extent_iter);
372 	bch2_trans_iter_exit(trans, &rebalance_work_iter);
373 	bch2_move_stats_exit(&r->scan_stats, c);
374 
375 	if (!ret &&
376 	    !kthread_should_stop() &&
377 	    !atomic64_read(&r->work_stats.sectors_seen) &&
378 	    !atomic64_read(&r->scan_stats.sectors_seen)) {
379 		bch2_moving_ctxt_flush_all(ctxt);
380 		bch2_trans_unlock_long(trans);
381 		rebalance_wait(c);
382 	}
383 
384 	if (!bch2_err_matches(ret, EROFS))
385 		bch_err_fn(c, ret);
386 	return ret;
387 }
388 
389 static int bch2_rebalance_thread(void *arg)
390 {
391 	struct bch_fs *c = arg;
392 	struct bch_fs_rebalance *r = &c->rebalance;
393 	struct moving_context ctxt;
394 
395 	set_freezable();
396 
397 	bch2_moving_ctxt_init(&ctxt, c, NULL, &r->work_stats,
398 			      writepoint_ptr(&c->rebalance_write_point),
399 			      true);
400 
401 	while (!kthread_should_stop() && !do_rebalance(&ctxt))
402 		;
403 
404 	bch2_moving_ctxt_exit(&ctxt);
405 
406 	return 0;
407 }
408 
409 void bch2_rebalance_status_to_text(struct printbuf *out, struct bch_fs *c)
410 {
411 	struct bch_fs_rebalance *r = &c->rebalance;
412 
413 	prt_str(out, bch2_rebalance_state_strs[r->state]);
414 	prt_newline(out);
415 	printbuf_indent_add(out, 2);
416 
417 	switch (r->state) {
418 	case BCH_REBALANCE_waiting: {
419 		u64 now = atomic64_read(&c->io_clock[WRITE].now);
420 
421 		prt_str(out, "io wait duration:  ");
422 		bch2_prt_human_readable_s64(out, (r->wait_iotime_end - r->wait_iotime_start) << 9);
423 		prt_newline(out);
424 
425 		prt_str(out, "io wait remaining: ");
426 		bch2_prt_human_readable_s64(out, (r->wait_iotime_end - now) << 9);
427 		prt_newline(out);
428 
429 		prt_str(out, "duration waited:   ");
430 		bch2_pr_time_units(out, ktime_get_real_ns() - r->wait_wallclock_start);
431 		prt_newline(out);
432 		break;
433 	}
434 	case BCH_REBALANCE_working:
435 		bch2_move_stats_to_text(out, &r->work_stats);
436 		break;
437 	case BCH_REBALANCE_scanning:
438 		bch2_move_stats_to_text(out, &r->scan_stats);
439 		break;
440 	}
441 	prt_newline(out);
442 	printbuf_indent_sub(out, 2);
443 }
444 
445 void bch2_rebalance_stop(struct bch_fs *c)
446 {
447 	struct task_struct *p;
448 
449 	c->rebalance.pd.rate.rate = UINT_MAX;
450 	bch2_ratelimit_reset(&c->rebalance.pd.rate);
451 
452 	p = rcu_dereference_protected(c->rebalance.thread, 1);
453 	c->rebalance.thread = NULL;
454 
455 	if (p) {
456 		/* for sychronizing with rebalance_wakeup() */
457 		synchronize_rcu();
458 
459 		kthread_stop(p);
460 		put_task_struct(p);
461 	}
462 }
463 
464 int bch2_rebalance_start(struct bch_fs *c)
465 {
466 	struct task_struct *p;
467 	int ret;
468 
469 	if (c->rebalance.thread)
470 		return 0;
471 
472 	if (c->opts.nochanges)
473 		return 0;
474 
475 	p = kthread_create(bch2_rebalance_thread, c, "bch-rebalance/%s", c->name);
476 	ret = PTR_ERR_OR_ZERO(p);
477 	bch_err_msg(c, ret, "creating rebalance thread");
478 	if (ret)
479 		return ret;
480 
481 	get_task_struct(p);
482 	rcu_assign_pointer(c->rebalance.thread, p);
483 	wake_up_process(p);
484 	return 0;
485 }
486 
487 void bch2_fs_rebalance_init(struct bch_fs *c)
488 {
489 	bch2_pd_controller_init(&c->rebalance.pd);
490 }
491