xref: /linux/fs/bcachefs/rebalance.c (revision 5ce42b5de461c3154f61a023b191dd6b77ee66c0)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "alloc_background.h"
5 #include "alloc_foreground.h"
6 #include "btree_iter.h"
7 #include "btree_update.h"
8 #include "btree_write_buffer.h"
9 #include "buckets.h"
10 #include "clock.h"
11 #include "compress.h"
12 #include "disk_groups.h"
13 #include "errcode.h"
14 #include "error.h"
15 #include "inode.h"
16 #include "io_write.h"
17 #include "move.h"
18 #include "rebalance.h"
19 #include "subvolume.h"
20 #include "super-io.h"
21 #include "trace.h"
22 
23 #include <linux/freezer.h>
24 #include <linux/kthread.h>
25 #include <linux/sched/cputime.h>
26 
27 #define REBALANCE_WORK_SCAN_OFFSET	(U64_MAX - 1)
28 
29 static const char * const bch2_rebalance_state_strs[] = {
30 #define x(t) #t,
31 	BCH_REBALANCE_STATES()
32 	NULL
33 #undef x
34 };
35 
36 static int __bch2_set_rebalance_needs_scan(struct btree_trans *trans, u64 inum)
37 {
38 	struct btree_iter iter;
39 	struct bkey_s_c k;
40 	struct bkey_i_cookie *cookie;
41 	u64 v;
42 	int ret;
43 
44 	bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
45 			     SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
46 			     BTREE_ITER_intent);
47 	k = bch2_btree_iter_peek_slot(&iter);
48 	ret = bkey_err(k);
49 	if (ret)
50 		goto err;
51 
52 	v = k.k->type == KEY_TYPE_cookie
53 		? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie)
54 		: 0;
55 
56 	cookie = bch2_trans_kmalloc(trans, sizeof(*cookie));
57 	ret = PTR_ERR_OR_ZERO(cookie);
58 	if (ret)
59 		goto err;
60 
61 	bkey_cookie_init(&cookie->k_i);
62 	cookie->k.p = iter.pos;
63 	cookie->v.cookie = cpu_to_le64(v + 1);
64 
65 	ret = bch2_trans_update(trans, &iter, &cookie->k_i, 0);
66 err:
67 	bch2_trans_iter_exit(trans, &iter);
68 	return ret;
69 }
70 
71 int bch2_set_rebalance_needs_scan(struct bch_fs *c, u64 inum)
72 {
73 	int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc|BCH_TRANS_COMMIT_lazy_rw,
74 			    __bch2_set_rebalance_needs_scan(trans, inum));
75 	rebalance_wakeup(c);
76 	return ret;
77 }
78 
79 int bch2_set_fs_needs_rebalance(struct bch_fs *c)
80 {
81 	return bch2_set_rebalance_needs_scan(c, 0);
82 }
83 
84 static int bch2_clear_rebalance_needs_scan(struct btree_trans *trans, u64 inum, u64 cookie)
85 {
86 	struct btree_iter iter;
87 	struct bkey_s_c k;
88 	u64 v;
89 	int ret;
90 
91 	bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
92 			     SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
93 			     BTREE_ITER_intent);
94 	k = bch2_btree_iter_peek_slot(&iter);
95 	ret = bkey_err(k);
96 	if (ret)
97 		goto err;
98 
99 	v = k.k->type == KEY_TYPE_cookie
100 		? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie)
101 		: 0;
102 
103 	if (v == cookie)
104 		ret = bch2_btree_delete_at(trans, &iter, 0);
105 err:
106 	bch2_trans_iter_exit(trans, &iter);
107 	return ret;
108 }
109 
110 static struct bkey_s_c next_rebalance_entry(struct btree_trans *trans,
111 					    struct btree_iter *work_iter)
112 {
113 	return !kthread_should_stop()
114 		? bch2_btree_iter_peek(work_iter)
115 		: bkey_s_c_null;
116 }
117 
118 static int bch2_bkey_clear_needs_rebalance(struct btree_trans *trans,
119 					   struct btree_iter *iter,
120 					   struct bkey_s_c k)
121 {
122 	struct bkey_i *n = bch2_bkey_make_mut(trans, iter, &k, 0);
123 	int ret = PTR_ERR_OR_ZERO(n);
124 	if (ret)
125 		return ret;
126 
127 	extent_entry_drop(bkey_i_to_s(n),
128 			  (void *) bch2_bkey_rebalance_opts(bkey_i_to_s_c(n)));
129 	return bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
130 }
131 
132 static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
133 			struct bpos work_pos,
134 			struct btree_iter *extent_iter,
135 			struct data_update_opts *data_opts)
136 {
137 	struct bch_fs *c = trans->c;
138 	struct bkey_s_c k;
139 
140 	bch2_trans_iter_exit(trans, extent_iter);
141 	bch2_trans_iter_init(trans, extent_iter,
142 			     work_pos.inode ? BTREE_ID_extents : BTREE_ID_reflink,
143 			     work_pos,
144 			     BTREE_ITER_all_snapshots);
145 	k = bch2_btree_iter_peek_slot(extent_iter);
146 	if (bkey_err(k))
147 		return k;
148 
149 	const struct bch_extent_rebalance *r = k.k ? bch2_bkey_rebalance_opts(k) : NULL;
150 	if (!r) {
151 		/* raced due to btree write buffer, nothing to do */
152 		return bkey_s_c_null;
153 	}
154 
155 	memset(data_opts, 0, sizeof(*data_opts));
156 
157 	data_opts->rewrite_ptrs		=
158 		bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression);
159 	data_opts->target		= r->target;
160 	data_opts->write_flags		|= BCH_WRITE_ONLY_SPECIFIED_DEVS;
161 
162 	if (!data_opts->rewrite_ptrs) {
163 		/*
164 		 * device we would want to write to offline? devices in target
165 		 * changed?
166 		 *
167 		 * We'll now need a full scan before this extent is picked up
168 		 * again:
169 		 */
170 		int ret = bch2_bkey_clear_needs_rebalance(trans, extent_iter, k);
171 		if (ret)
172 			return bkey_s_c_err(ret);
173 		return bkey_s_c_null;
174 	}
175 
176 	if (trace_rebalance_extent_enabled()) {
177 		struct printbuf buf = PRINTBUF;
178 
179 		prt_str(&buf, "target=");
180 		bch2_target_to_text(&buf, c, r->target);
181 		prt_str(&buf, " compression=");
182 		bch2_compression_opt_to_text(&buf, r->compression);
183 		prt_str(&buf, " ");
184 		bch2_bkey_val_to_text(&buf, c, k);
185 
186 		trace_rebalance_extent(c, buf.buf);
187 		printbuf_exit(&buf);
188 	}
189 
190 	return k;
191 }
192 
193 noinline_for_stack
194 static int do_rebalance_extent(struct moving_context *ctxt,
195 			       struct bpos work_pos,
196 			       struct btree_iter *extent_iter)
197 {
198 	struct btree_trans *trans = ctxt->trans;
199 	struct bch_fs *c = trans->c;
200 	struct bch_fs_rebalance *r = &trans->c->rebalance;
201 	struct data_update_opts data_opts;
202 	struct bch_io_opts io_opts;
203 	struct bkey_s_c k;
204 	struct bkey_buf sk;
205 	int ret;
206 
207 	ctxt->stats = &r->work_stats;
208 	r->state = BCH_REBALANCE_working;
209 
210 	bch2_bkey_buf_init(&sk);
211 
212 	ret = bkey_err(k = next_rebalance_extent(trans, work_pos,
213 						 extent_iter, &data_opts));
214 	if (ret || !k.k)
215 		goto out;
216 
217 	ret = bch2_move_get_io_opts_one(trans, &io_opts, k);
218 	if (ret)
219 		goto out;
220 
221 	atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
222 
223 	/*
224 	 * The iterator gets unlocked by __bch2_read_extent - need to
225 	 * save a copy of @k elsewhere:
226 	 */
227 	bch2_bkey_buf_reassemble(&sk, c, k);
228 	k = bkey_i_to_s_c(sk.k);
229 
230 	ret = bch2_move_extent(ctxt, NULL, extent_iter, k, io_opts, data_opts);
231 	if (ret) {
232 		if (bch2_err_matches(ret, ENOMEM)) {
233 			/* memory allocation failure, wait for some IO to finish */
234 			bch2_move_ctxt_wait_for_io(ctxt);
235 			ret = -BCH_ERR_transaction_restart_nested;
236 		}
237 
238 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
239 			goto out;
240 
241 		/* skip it and continue, XXX signal failure */
242 		ret = 0;
243 	}
244 out:
245 	bch2_bkey_buf_exit(&sk, c);
246 	return ret;
247 }
248 
249 static bool rebalance_pred(struct bch_fs *c, void *arg,
250 			   struct bkey_s_c k,
251 			   struct bch_io_opts *io_opts,
252 			   struct data_update_opts *data_opts)
253 {
254 	unsigned target, compression;
255 
256 	if (k.k->p.inode) {
257 		target		= io_opts->background_target;
258 		compression	= background_compression(*io_opts);
259 	} else {
260 		const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
261 
262 		target		= r ? r->target : io_opts->background_target;
263 		compression	= r ? r->compression : background_compression(*io_opts);
264 	}
265 
266 	data_opts->rewrite_ptrs		= bch2_bkey_ptrs_need_rebalance(c, k, target, compression);
267 	data_opts->target		= target;
268 	data_opts->write_flags		|= BCH_WRITE_ONLY_SPECIFIED_DEVS;
269 	return data_opts->rewrite_ptrs != 0;
270 }
271 
272 static int do_rebalance_scan(struct moving_context *ctxt, u64 inum, u64 cookie)
273 {
274 	struct btree_trans *trans = ctxt->trans;
275 	struct bch_fs_rebalance *r = &trans->c->rebalance;
276 	int ret;
277 
278 	bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
279 	ctxt->stats = &r->scan_stats;
280 
281 	if (!inum) {
282 		r->scan_start	= BBPOS_MIN;
283 		r->scan_end	= BBPOS_MAX;
284 	} else {
285 		r->scan_start	= BBPOS(BTREE_ID_extents, POS(inum, 0));
286 		r->scan_end	= BBPOS(BTREE_ID_extents, POS(inum, U64_MAX));
287 	}
288 
289 	r->state = BCH_REBALANCE_scanning;
290 
291 	ret = __bch2_move_data(ctxt, r->scan_start, r->scan_end, rebalance_pred, NULL) ?:
292 		commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
293 			  bch2_clear_rebalance_needs_scan(trans, inum, cookie));
294 
295 	bch2_move_stats_exit(&r->scan_stats, trans->c);
296 	return ret;
297 }
298 
299 static void rebalance_wait(struct bch_fs *c)
300 {
301 	struct bch_fs_rebalance *r = &c->rebalance;
302 	struct io_clock *clock = &c->io_clock[WRITE];
303 	u64 now = atomic64_read(&clock->now);
304 	u64 min_member_capacity = bch2_min_rw_member_capacity(c);
305 
306 	if (min_member_capacity == U64_MAX)
307 		min_member_capacity = 128 * 2048;
308 
309 	r->wait_iotime_end		= now + (min_member_capacity >> 6);
310 
311 	if (r->state != BCH_REBALANCE_waiting) {
312 		r->wait_iotime_start	= now;
313 		r->wait_wallclock_start	= ktime_get_real_ns();
314 		r->state		= BCH_REBALANCE_waiting;
315 	}
316 
317 	bch2_kthread_io_clock_wait(clock, r->wait_iotime_end, MAX_SCHEDULE_TIMEOUT);
318 }
319 
320 static int do_rebalance(struct moving_context *ctxt)
321 {
322 	struct btree_trans *trans = ctxt->trans;
323 	struct bch_fs *c = trans->c;
324 	struct bch_fs_rebalance *r = &c->rebalance;
325 	struct btree_iter rebalance_work_iter, extent_iter = { NULL };
326 	struct bkey_s_c k;
327 	int ret = 0;
328 
329 	bch2_trans_begin(trans);
330 
331 	bch2_move_stats_init(&r->work_stats, "rebalance_work");
332 	bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
333 
334 	bch2_trans_iter_init(trans, &rebalance_work_iter,
335 			     BTREE_ID_rebalance_work, POS_MIN,
336 			     BTREE_ITER_all_snapshots);
337 
338 	while (!bch2_move_ratelimit(ctxt)) {
339 		if (!r->enabled) {
340 			bch2_moving_ctxt_flush_all(ctxt);
341 			kthread_wait_freezable(r->enabled ||
342 					       kthread_should_stop());
343 		}
344 
345 		if (kthread_should_stop())
346 			break;
347 
348 		bch2_trans_begin(trans);
349 
350 		ret = bkey_err(k = next_rebalance_entry(trans, &rebalance_work_iter));
351 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
352 			continue;
353 		if (ret || !k.k)
354 			break;
355 
356 		ret = k.k->type == KEY_TYPE_cookie
357 			? do_rebalance_scan(ctxt, k.k->p.inode,
358 					    le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie))
359 			: do_rebalance_extent(ctxt, k.k->p, &extent_iter);
360 
361 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
362 			continue;
363 		if (ret)
364 			break;
365 
366 		bch2_btree_iter_advance(&rebalance_work_iter);
367 	}
368 
369 	bch2_trans_iter_exit(trans, &extent_iter);
370 	bch2_trans_iter_exit(trans, &rebalance_work_iter);
371 	bch2_move_stats_exit(&r->scan_stats, c);
372 
373 	if (!ret &&
374 	    !kthread_should_stop() &&
375 	    !atomic64_read(&r->work_stats.sectors_seen) &&
376 	    !atomic64_read(&r->scan_stats.sectors_seen)) {
377 		bch2_moving_ctxt_flush_all(ctxt);
378 		bch2_trans_unlock_long(trans);
379 		rebalance_wait(c);
380 	}
381 
382 	if (!bch2_err_matches(ret, EROFS))
383 		bch_err_fn(c, ret);
384 	return ret;
385 }
386 
387 static int bch2_rebalance_thread(void *arg)
388 {
389 	struct bch_fs *c = arg;
390 	struct bch_fs_rebalance *r = &c->rebalance;
391 	struct moving_context ctxt;
392 
393 	set_freezable();
394 
395 	bch2_moving_ctxt_init(&ctxt, c, NULL, &r->work_stats,
396 			      writepoint_ptr(&c->rebalance_write_point),
397 			      true);
398 
399 	while (!kthread_should_stop() && !do_rebalance(&ctxt))
400 		;
401 
402 	bch2_moving_ctxt_exit(&ctxt);
403 
404 	return 0;
405 }
406 
407 void bch2_rebalance_status_to_text(struct printbuf *out, struct bch_fs *c)
408 {
409 	struct bch_fs_rebalance *r = &c->rebalance;
410 
411 	prt_str(out, bch2_rebalance_state_strs[r->state]);
412 	prt_newline(out);
413 	printbuf_indent_add(out, 2);
414 
415 	switch (r->state) {
416 	case BCH_REBALANCE_waiting: {
417 		u64 now = atomic64_read(&c->io_clock[WRITE].now);
418 
419 		prt_str(out, "io wait duration:  ");
420 		bch2_prt_human_readable_s64(out, (r->wait_iotime_end - r->wait_iotime_start) << 9);
421 		prt_newline(out);
422 
423 		prt_str(out, "io wait remaining: ");
424 		bch2_prt_human_readable_s64(out, (r->wait_iotime_end - now) << 9);
425 		prt_newline(out);
426 
427 		prt_str(out, "duration waited:   ");
428 		bch2_pr_time_units(out, ktime_get_real_ns() - r->wait_wallclock_start);
429 		prt_newline(out);
430 		break;
431 	}
432 	case BCH_REBALANCE_working:
433 		bch2_move_stats_to_text(out, &r->work_stats);
434 		break;
435 	case BCH_REBALANCE_scanning:
436 		bch2_move_stats_to_text(out, &r->scan_stats);
437 		break;
438 	}
439 	prt_newline(out);
440 	printbuf_indent_sub(out, 2);
441 }
442 
443 void bch2_rebalance_stop(struct bch_fs *c)
444 {
445 	struct task_struct *p;
446 
447 	c->rebalance.pd.rate.rate = UINT_MAX;
448 	bch2_ratelimit_reset(&c->rebalance.pd.rate);
449 
450 	p = rcu_dereference_protected(c->rebalance.thread, 1);
451 	c->rebalance.thread = NULL;
452 
453 	if (p) {
454 		/* for sychronizing with rebalance_wakeup() */
455 		synchronize_rcu();
456 
457 		kthread_stop(p);
458 		put_task_struct(p);
459 	}
460 }
461 
462 int bch2_rebalance_start(struct bch_fs *c)
463 {
464 	struct task_struct *p;
465 	int ret;
466 
467 	if (c->rebalance.thread)
468 		return 0;
469 
470 	if (c->opts.nochanges)
471 		return 0;
472 
473 	p = kthread_create(bch2_rebalance_thread, c, "bch-rebalance/%s", c->name);
474 	ret = PTR_ERR_OR_ZERO(p);
475 	bch_err_msg(c, ret, "creating rebalance thread");
476 	if (ret)
477 		return ret;
478 
479 	get_task_struct(p);
480 	rcu_assign_pointer(c->rebalance.thread, p);
481 	wake_up_process(p);
482 	return 0;
483 }
484 
485 void bch2_fs_rebalance_init(struct bch_fs *c)
486 {
487 	bch2_pd_controller_init(&c->rebalance.pd);
488 }
489