xref: /linux/fs/bcachefs/rebalance.c (revision 4a4b30ea80d8cb5e8c4c62bb86201f4ea0d9b030)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "alloc_background.h"
5 #include "alloc_foreground.h"
6 #include "btree_iter.h"
7 #include "btree_update.h"
8 #include "btree_write_buffer.h"
9 #include "buckets.h"
10 #include "clock.h"
11 #include "compress.h"
12 #include "disk_groups.h"
13 #include "errcode.h"
14 #include "error.h"
15 #include "inode.h"
16 #include "io_write.h"
17 #include "move.h"
18 #include "rebalance.h"
19 #include "subvolume.h"
20 #include "super-io.h"
21 #include "trace.h"
22 
23 #include <linux/freezer.h>
24 #include <linux/kthread.h>
25 #include <linux/sched/cputime.h>
26 
27 /* bch_extent_rebalance: */
28 
bch2_bkey_ptrs_rebalance_opts(struct bkey_ptrs_c ptrs)29 static const struct bch_extent_rebalance *bch2_bkey_ptrs_rebalance_opts(struct bkey_ptrs_c ptrs)
30 {
31 	const union bch_extent_entry *entry;
32 
33 	bkey_extent_entry_for_each(ptrs, entry)
34 		if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance)
35 			return &entry->rebalance;
36 
37 	return NULL;
38 }
39 
bch2_bkey_rebalance_opts(struct bkey_s_c k)40 static const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k)
41 {
42 	return bch2_bkey_ptrs_rebalance_opts(bch2_bkey_ptrs_c(k));
43 }
44 
bch2_bkey_ptrs_need_compress(struct bch_fs * c,struct bch_io_opts * opts,struct bkey_s_c k,struct bkey_ptrs_c ptrs)45 static inline unsigned bch2_bkey_ptrs_need_compress(struct bch_fs *c,
46 					   struct bch_io_opts *opts,
47 					   struct bkey_s_c k,
48 					   struct bkey_ptrs_c ptrs)
49 {
50 	if (!opts->background_compression)
51 		return 0;
52 
53 	unsigned compression_type = bch2_compression_opt_to_type(opts->background_compression);
54 	const union bch_extent_entry *entry;
55 	struct extent_ptr_decoded p;
56 	unsigned ptr_bit = 1;
57 	unsigned rewrite_ptrs = 0;
58 
59 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
60 		if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
61 		    p.ptr.unwritten)
62 			return 0;
63 
64 		if (!p.ptr.cached && p.crc.compression_type != compression_type)
65 			rewrite_ptrs |= ptr_bit;
66 		ptr_bit <<= 1;
67 	}
68 
69 	return rewrite_ptrs;
70 }
71 
bch2_bkey_ptrs_need_move(struct bch_fs * c,struct bch_io_opts * opts,struct bkey_ptrs_c ptrs)72 static inline unsigned bch2_bkey_ptrs_need_move(struct bch_fs *c,
73 				       struct bch_io_opts *opts,
74 				       struct bkey_ptrs_c ptrs)
75 {
76 	if (!opts->background_target ||
77 	    !bch2_target_accepts_data(c, BCH_DATA_user, opts->background_target))
78 		return 0;
79 
80 	unsigned ptr_bit = 1;
81 	unsigned rewrite_ptrs = 0;
82 
83 	bkey_for_each_ptr(ptrs, ptr) {
84 		if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, opts->background_target))
85 			rewrite_ptrs |= ptr_bit;
86 		ptr_bit <<= 1;
87 	}
88 
89 	return rewrite_ptrs;
90 }
91 
bch2_bkey_ptrs_need_rebalance(struct bch_fs * c,struct bch_io_opts * opts,struct bkey_s_c k)92 static unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c,
93 					      struct bch_io_opts *opts,
94 					      struct bkey_s_c k)
95 {
96 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
97 
98 	return bch2_bkey_ptrs_need_compress(c, opts, k, ptrs) |
99 		bch2_bkey_ptrs_need_move(c, opts, ptrs);
100 }
101 
bch2_bkey_sectors_need_rebalance(struct bch_fs * c,struct bkey_s_c k)102 u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k)
103 {
104 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
105 
106 	const struct bch_extent_rebalance *opts = bch2_bkey_ptrs_rebalance_opts(ptrs);
107 	if (!opts)
108 		return 0;
109 
110 	const union bch_extent_entry *entry;
111 	struct extent_ptr_decoded p;
112 	u64 sectors = 0;
113 
114 	if (opts->background_compression) {
115 		unsigned compression_type = bch2_compression_opt_to_type(opts->background_compression);
116 
117 		bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
118 			if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
119 			    p.ptr.unwritten) {
120 				sectors = 0;
121 				goto incompressible;
122 			}
123 
124 			if (!p.ptr.cached && p.crc.compression_type != compression_type)
125 				sectors += p.crc.compressed_size;
126 		}
127 	}
128 incompressible:
129 	if (opts->background_target)
130 		bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
131 			if (!p.ptr.cached && !bch2_dev_in_target(c, p.ptr.dev, opts->background_target))
132 				sectors += p.crc.compressed_size;
133 
134 	return sectors;
135 }
136 
bch2_bkey_rebalance_needs_update(struct bch_fs * c,struct bch_io_opts * opts,struct bkey_s_c k)137 static bool bch2_bkey_rebalance_needs_update(struct bch_fs *c, struct bch_io_opts *opts,
138 					     struct bkey_s_c k)
139 {
140 	if (!bkey_extent_is_direct_data(k.k))
141 		return 0;
142 
143 	const struct bch_extent_rebalance *old = bch2_bkey_rebalance_opts(k);
144 
145 	if (k.k->type == KEY_TYPE_reflink_v || bch2_bkey_ptrs_need_rebalance(c, opts, k)) {
146 		struct bch_extent_rebalance new = io_opts_to_rebalance_opts(c, opts);
147 		return old == NULL || memcmp(old, &new, sizeof(new));
148 	} else {
149 		return old != NULL;
150 	}
151 }
152 
bch2_bkey_set_needs_rebalance(struct bch_fs * c,struct bch_io_opts * opts,struct bkey_i * _k)153 int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bch_io_opts *opts,
154 				  struct bkey_i *_k)
155 {
156 	if (!bkey_extent_is_direct_data(&_k->k))
157 		return 0;
158 
159 	struct bkey_s k = bkey_i_to_s(_k);
160 	struct bch_extent_rebalance *old =
161 		(struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c);
162 
163 	if (k.k->type == KEY_TYPE_reflink_v || bch2_bkey_ptrs_need_rebalance(c, opts, k.s_c)) {
164 		if (!old) {
165 			old = bkey_val_end(k);
166 			k.k->u64s += sizeof(*old) / sizeof(u64);
167 		}
168 
169 		*old = io_opts_to_rebalance_opts(c, opts);
170 	} else {
171 		if (old)
172 			extent_entry_drop(k, (union bch_extent_entry *) old);
173 	}
174 
175 	return 0;
176 }
177 
bch2_get_update_rebalance_opts(struct btree_trans * trans,struct bch_io_opts * io_opts,struct btree_iter * iter,struct bkey_s_c k)178 int bch2_get_update_rebalance_opts(struct btree_trans *trans,
179 				   struct bch_io_opts *io_opts,
180 				   struct btree_iter *iter,
181 				   struct bkey_s_c k)
182 {
183 	BUG_ON(iter->flags & BTREE_ITER_is_extents);
184 	BUG_ON(iter->flags & BTREE_ITER_filter_snapshots);
185 
186 	const struct bch_extent_rebalance *r = k.k->type == KEY_TYPE_reflink_v
187 		? bch2_bkey_rebalance_opts(k) : NULL;
188 	if (r) {
189 #define x(_name)							\
190 		if (r->_name##_from_inode) {				\
191 			io_opts->_name = r->_name;			\
192 			io_opts->_name##_from_inode = true;		\
193 		}
194 		BCH_REBALANCE_OPTS()
195 #undef x
196 	}
197 
198 	if (!bch2_bkey_rebalance_needs_update(trans->c, io_opts, k))
199 		return 0;
200 
201 	struct bkey_i *n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + 8);
202 	int ret = PTR_ERR_OR_ZERO(n);
203 	if (ret)
204 		return ret;
205 
206 	bkey_reassemble(n, k);
207 
208 	/* On successfull transaction commit, @k was invalidated: */
209 
210 	return bch2_bkey_set_needs_rebalance(trans->c, io_opts, n) ?:
211 		bch2_trans_update(trans, iter, n, BTREE_UPDATE_internal_snapshot_node) ?:
212 		bch2_trans_commit(trans, NULL, NULL, 0) ?:
213 		-BCH_ERR_transaction_restart_nested;
214 }
215 
216 #define REBALANCE_WORK_SCAN_OFFSET	(U64_MAX - 1)
217 
218 static const char * const bch2_rebalance_state_strs[] = {
219 #define x(t) #t,
220 	BCH_REBALANCE_STATES()
221 	NULL
222 #undef x
223 };
224 
bch2_set_rebalance_needs_scan_trans(struct btree_trans * trans,u64 inum)225 int bch2_set_rebalance_needs_scan_trans(struct btree_trans *trans, u64 inum)
226 {
227 	struct btree_iter iter;
228 	struct bkey_s_c k;
229 	struct bkey_i_cookie *cookie;
230 	u64 v;
231 	int ret;
232 
233 	bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
234 			     SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
235 			     BTREE_ITER_intent);
236 	k = bch2_btree_iter_peek_slot(&iter);
237 	ret = bkey_err(k);
238 	if (ret)
239 		goto err;
240 
241 	v = k.k->type == KEY_TYPE_cookie
242 		? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie)
243 		: 0;
244 
245 	cookie = bch2_trans_kmalloc(trans, sizeof(*cookie));
246 	ret = PTR_ERR_OR_ZERO(cookie);
247 	if (ret)
248 		goto err;
249 
250 	bkey_cookie_init(&cookie->k_i);
251 	cookie->k.p = iter.pos;
252 	cookie->v.cookie = cpu_to_le64(v + 1);
253 
254 	ret = bch2_trans_update(trans, &iter, &cookie->k_i, 0);
255 err:
256 	bch2_trans_iter_exit(trans, &iter);
257 	return ret;
258 }
259 
bch2_set_rebalance_needs_scan(struct bch_fs * c,u64 inum)260 int bch2_set_rebalance_needs_scan(struct bch_fs *c, u64 inum)
261 {
262 	int ret = bch2_trans_commit_do(c, NULL, NULL,
263 				       BCH_TRANS_COMMIT_no_enospc,
264 			    bch2_set_rebalance_needs_scan_trans(trans, inum));
265 	rebalance_wakeup(c);
266 	return ret;
267 }
268 
bch2_set_fs_needs_rebalance(struct bch_fs * c)269 int bch2_set_fs_needs_rebalance(struct bch_fs *c)
270 {
271 	return bch2_set_rebalance_needs_scan(c, 0);
272 }
273 
bch2_clear_rebalance_needs_scan(struct btree_trans * trans,u64 inum,u64 cookie)274 static int bch2_clear_rebalance_needs_scan(struct btree_trans *trans, u64 inum, u64 cookie)
275 {
276 	struct btree_iter iter;
277 	struct bkey_s_c k;
278 	u64 v;
279 	int ret;
280 
281 	bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
282 			     SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
283 			     BTREE_ITER_intent);
284 	k = bch2_btree_iter_peek_slot(&iter);
285 	ret = bkey_err(k);
286 	if (ret)
287 		goto err;
288 
289 	v = k.k->type == KEY_TYPE_cookie
290 		? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie)
291 		: 0;
292 
293 	if (v == cookie)
294 		ret = bch2_btree_delete_at(trans, &iter, 0);
295 err:
296 	bch2_trans_iter_exit(trans, &iter);
297 	return ret;
298 }
299 
next_rebalance_entry(struct btree_trans * trans,struct btree_iter * work_iter)300 static struct bkey_s_c next_rebalance_entry(struct btree_trans *trans,
301 					    struct btree_iter *work_iter)
302 {
303 	return !kthread_should_stop()
304 		? bch2_btree_iter_peek(work_iter)
305 		: bkey_s_c_null;
306 }
307 
bch2_bkey_clear_needs_rebalance(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c k)308 static int bch2_bkey_clear_needs_rebalance(struct btree_trans *trans,
309 					   struct btree_iter *iter,
310 					   struct bkey_s_c k)
311 {
312 	if (!bch2_bkey_rebalance_opts(k))
313 		return 0;
314 
315 	struct bkey_i *n = bch2_bkey_make_mut(trans, iter, &k, 0);
316 	int ret = PTR_ERR_OR_ZERO(n);
317 	if (ret)
318 		return ret;
319 
320 	extent_entry_drop(bkey_i_to_s(n),
321 			  (void *) bch2_bkey_rebalance_opts(bkey_i_to_s_c(n)));
322 	return bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
323 }
324 
next_rebalance_extent(struct btree_trans * trans,struct bpos work_pos,struct btree_iter * extent_iter,struct bch_io_opts * io_opts,struct data_update_opts * data_opts)325 static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
326 			struct bpos work_pos,
327 			struct btree_iter *extent_iter,
328 			struct bch_io_opts *io_opts,
329 			struct data_update_opts *data_opts)
330 {
331 	struct bch_fs *c = trans->c;
332 
333 	bch2_trans_iter_exit(trans, extent_iter);
334 	bch2_trans_iter_init(trans, extent_iter,
335 			     work_pos.inode ? BTREE_ID_extents : BTREE_ID_reflink,
336 			     work_pos,
337 			     BTREE_ITER_all_snapshots);
338 	struct bkey_s_c k = bch2_btree_iter_peek_slot(extent_iter);
339 	if (bkey_err(k))
340 		return k;
341 
342 	int ret = bch2_move_get_io_opts_one(trans, io_opts, extent_iter, k);
343 	if (ret)
344 		return bkey_s_c_err(ret);
345 
346 	memset(data_opts, 0, sizeof(*data_opts));
347 	data_opts->rewrite_ptrs		= bch2_bkey_ptrs_need_rebalance(c, io_opts, k);
348 	data_opts->target		= io_opts->background_target;
349 	data_opts->write_flags		|= BCH_WRITE_only_specified_devs;
350 
351 	if (!data_opts->rewrite_ptrs) {
352 		/*
353 		 * device we would want to write to offline? devices in target
354 		 * changed?
355 		 *
356 		 * We'll now need a full scan before this extent is picked up
357 		 * again:
358 		 */
359 		int ret = bch2_bkey_clear_needs_rebalance(trans, extent_iter, k);
360 		if (ret)
361 			return bkey_s_c_err(ret);
362 		return bkey_s_c_null;
363 	}
364 
365 	if (trace_rebalance_extent_enabled()) {
366 		struct printbuf buf = PRINTBUF;
367 
368 		bch2_bkey_val_to_text(&buf, c, k);
369 		prt_newline(&buf);
370 
371 		struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
372 
373 		unsigned p = bch2_bkey_ptrs_need_compress(c, io_opts, k, ptrs);
374 		if (p) {
375 			prt_str(&buf, "compression=");
376 			bch2_compression_opt_to_text(&buf, io_opts->background_compression);
377 			prt_str(&buf, " ");
378 			bch2_prt_u64_base2(&buf, p);
379 			prt_newline(&buf);
380 		}
381 
382 		p = bch2_bkey_ptrs_need_move(c, io_opts, ptrs);
383 		if (p) {
384 			prt_str(&buf, "move=");
385 			bch2_target_to_text(&buf, c, io_opts->background_target);
386 			prt_str(&buf, " ");
387 			bch2_prt_u64_base2(&buf, p);
388 			prt_newline(&buf);
389 		}
390 
391 		trace_rebalance_extent(c, buf.buf);
392 		printbuf_exit(&buf);
393 	}
394 
395 	return k;
396 }
397 
398 noinline_for_stack
do_rebalance_extent(struct moving_context * ctxt,struct bpos work_pos,struct btree_iter * extent_iter)399 static int do_rebalance_extent(struct moving_context *ctxt,
400 			       struct bpos work_pos,
401 			       struct btree_iter *extent_iter)
402 {
403 	struct btree_trans *trans = ctxt->trans;
404 	struct bch_fs *c = trans->c;
405 	struct bch_fs_rebalance *r = &trans->c->rebalance;
406 	struct data_update_opts data_opts;
407 	struct bch_io_opts io_opts;
408 	struct bkey_s_c k;
409 	struct bkey_buf sk;
410 	int ret;
411 
412 	ctxt->stats = &r->work_stats;
413 	r->state = BCH_REBALANCE_working;
414 
415 	bch2_bkey_buf_init(&sk);
416 
417 	ret = bkey_err(k = next_rebalance_extent(trans, work_pos,
418 				extent_iter, &io_opts, &data_opts));
419 	if (ret || !k.k)
420 		goto out;
421 
422 	atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
423 
424 	/*
425 	 * The iterator gets unlocked by __bch2_read_extent - need to
426 	 * save a copy of @k elsewhere:
427 	 */
428 	bch2_bkey_buf_reassemble(&sk, c, k);
429 	k = bkey_i_to_s_c(sk.k);
430 
431 	ret = bch2_move_extent(ctxt, NULL, extent_iter, k, io_opts, data_opts);
432 	if (ret) {
433 		if (bch2_err_matches(ret, ENOMEM)) {
434 			/* memory allocation failure, wait for some IO to finish */
435 			bch2_move_ctxt_wait_for_io(ctxt);
436 			ret = -BCH_ERR_transaction_restart_nested;
437 		}
438 
439 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
440 			goto out;
441 
442 		/* skip it and continue, XXX signal failure */
443 		ret = 0;
444 	}
445 out:
446 	bch2_bkey_buf_exit(&sk, c);
447 	return ret;
448 }
449 
rebalance_pred(struct bch_fs * c,void * arg,struct bkey_s_c k,struct bch_io_opts * io_opts,struct data_update_opts * data_opts)450 static bool rebalance_pred(struct bch_fs *c, void *arg,
451 			   struct bkey_s_c k,
452 			   struct bch_io_opts *io_opts,
453 			   struct data_update_opts *data_opts)
454 {
455 	data_opts->rewrite_ptrs		= bch2_bkey_ptrs_need_rebalance(c, io_opts, k);
456 	data_opts->target		= io_opts->background_target;
457 	data_opts->write_flags		|= BCH_WRITE_only_specified_devs;
458 	return data_opts->rewrite_ptrs != 0;
459 }
460 
do_rebalance_scan(struct moving_context * ctxt,u64 inum,u64 cookie)461 static int do_rebalance_scan(struct moving_context *ctxt, u64 inum, u64 cookie)
462 {
463 	struct btree_trans *trans = ctxt->trans;
464 	struct bch_fs_rebalance *r = &trans->c->rebalance;
465 	int ret;
466 
467 	bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
468 	ctxt->stats = &r->scan_stats;
469 
470 	if (!inum) {
471 		r->scan_start	= BBPOS_MIN;
472 		r->scan_end	= BBPOS_MAX;
473 	} else {
474 		r->scan_start	= BBPOS(BTREE_ID_extents, POS(inum, 0));
475 		r->scan_end	= BBPOS(BTREE_ID_extents, POS(inum, U64_MAX));
476 	}
477 
478 	r->state = BCH_REBALANCE_scanning;
479 
480 	ret = __bch2_move_data(ctxt, r->scan_start, r->scan_end, rebalance_pred, NULL) ?:
481 		commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
482 			  bch2_clear_rebalance_needs_scan(trans, inum, cookie));
483 
484 	bch2_move_stats_exit(&r->scan_stats, trans->c);
485 	return ret;
486 }
487 
rebalance_wait(struct bch_fs * c)488 static void rebalance_wait(struct bch_fs *c)
489 {
490 	struct bch_fs_rebalance *r = &c->rebalance;
491 	struct io_clock *clock = &c->io_clock[WRITE];
492 	u64 now = atomic64_read(&clock->now);
493 	u64 min_member_capacity = bch2_min_rw_member_capacity(c);
494 
495 	if (min_member_capacity == U64_MAX)
496 		min_member_capacity = 128 * 2048;
497 
498 	r->wait_iotime_end		= now + (min_member_capacity >> 6);
499 
500 	if (r->state != BCH_REBALANCE_waiting) {
501 		r->wait_iotime_start	= now;
502 		r->wait_wallclock_start	= ktime_get_real_ns();
503 		r->state		= BCH_REBALANCE_waiting;
504 	}
505 
506 	bch2_kthread_io_clock_wait(clock, r->wait_iotime_end, MAX_SCHEDULE_TIMEOUT);
507 }
508 
do_rebalance(struct moving_context * ctxt)509 static int do_rebalance(struct moving_context *ctxt)
510 {
511 	struct btree_trans *trans = ctxt->trans;
512 	struct bch_fs *c = trans->c;
513 	struct bch_fs_rebalance *r = &c->rebalance;
514 	struct btree_iter rebalance_work_iter, extent_iter = { NULL };
515 	struct bkey_s_c k;
516 	int ret = 0;
517 
518 	bch2_trans_begin(trans);
519 
520 	bch2_move_stats_init(&r->work_stats, "rebalance_work");
521 	bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
522 
523 	bch2_trans_iter_init(trans, &rebalance_work_iter,
524 			     BTREE_ID_rebalance_work, POS_MIN,
525 			     BTREE_ITER_all_snapshots);
526 
527 	while (!bch2_move_ratelimit(ctxt)) {
528 		if (!c->opts.rebalance_enabled) {
529 			bch2_moving_ctxt_flush_all(ctxt);
530 			kthread_wait_freezable(c->opts.rebalance_enabled ||
531 					       kthread_should_stop());
532 		}
533 
534 		if (kthread_should_stop())
535 			break;
536 
537 		bch2_trans_begin(trans);
538 
539 		ret = bkey_err(k = next_rebalance_entry(trans, &rebalance_work_iter));
540 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
541 			continue;
542 		if (ret || !k.k)
543 			break;
544 
545 		ret = k.k->type == KEY_TYPE_cookie
546 			? do_rebalance_scan(ctxt, k.k->p.inode,
547 					    le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie))
548 			: do_rebalance_extent(ctxt, k.k->p, &extent_iter);
549 
550 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
551 			continue;
552 		if (ret)
553 			break;
554 
555 		bch2_btree_iter_advance(&rebalance_work_iter);
556 	}
557 
558 	bch2_trans_iter_exit(trans, &extent_iter);
559 	bch2_trans_iter_exit(trans, &rebalance_work_iter);
560 	bch2_move_stats_exit(&r->scan_stats, c);
561 
562 	if (!ret &&
563 	    !kthread_should_stop() &&
564 	    !atomic64_read(&r->work_stats.sectors_seen) &&
565 	    !atomic64_read(&r->scan_stats.sectors_seen)) {
566 		bch2_moving_ctxt_flush_all(ctxt);
567 		bch2_trans_unlock_long(trans);
568 		rebalance_wait(c);
569 	}
570 
571 	if (!bch2_err_matches(ret, EROFS))
572 		bch_err_fn(c, ret);
573 	return ret;
574 }
575 
bch2_rebalance_thread(void * arg)576 static int bch2_rebalance_thread(void *arg)
577 {
578 	struct bch_fs *c = arg;
579 	struct bch_fs_rebalance *r = &c->rebalance;
580 	struct moving_context ctxt;
581 
582 	set_freezable();
583 
584 	bch2_moving_ctxt_init(&ctxt, c, NULL, &r->work_stats,
585 			      writepoint_ptr(&c->rebalance_write_point),
586 			      true);
587 
588 	while (!kthread_should_stop() && !do_rebalance(&ctxt))
589 		;
590 
591 	bch2_moving_ctxt_exit(&ctxt);
592 
593 	return 0;
594 }
595 
bch2_rebalance_status_to_text(struct printbuf * out,struct bch_fs * c)596 void bch2_rebalance_status_to_text(struct printbuf *out, struct bch_fs *c)
597 {
598 	printbuf_tabstop_push(out, 32);
599 
600 	struct bch_fs_rebalance *r = &c->rebalance;
601 
602 	/* print pending work */
603 	struct disk_accounting_pos acc = { .type = BCH_DISK_ACCOUNTING_rebalance_work, };
604 	u64 v;
605 	bch2_accounting_mem_read(c, disk_accounting_pos_to_bpos(&acc), &v, 1);
606 
607 	prt_printf(out, "pending work:\t");
608 	prt_human_readable_u64(out, v);
609 	prt_printf(out, "\n\n");
610 
611 	prt_str(out, bch2_rebalance_state_strs[r->state]);
612 	prt_newline(out);
613 	printbuf_indent_add(out, 2);
614 
615 	switch (r->state) {
616 	case BCH_REBALANCE_waiting: {
617 		u64 now = atomic64_read(&c->io_clock[WRITE].now);
618 
619 		prt_printf(out, "io wait duration:\t");
620 		bch2_prt_human_readable_s64(out, (r->wait_iotime_end - r->wait_iotime_start) << 9);
621 		prt_newline(out);
622 
623 		prt_printf(out, "io wait remaining:\t");
624 		bch2_prt_human_readable_s64(out, (r->wait_iotime_end - now) << 9);
625 		prt_newline(out);
626 
627 		prt_printf(out, "duration waited:\t");
628 		bch2_pr_time_units(out, ktime_get_real_ns() - r->wait_wallclock_start);
629 		prt_newline(out);
630 		break;
631 	}
632 	case BCH_REBALANCE_working:
633 		bch2_move_stats_to_text(out, &r->work_stats);
634 		break;
635 	case BCH_REBALANCE_scanning:
636 		bch2_move_stats_to_text(out, &r->scan_stats);
637 		break;
638 	}
639 	prt_newline(out);
640 
641 	rcu_read_lock();
642 	struct task_struct *t = rcu_dereference(c->rebalance.thread);
643 	if (t)
644 		get_task_struct(t);
645 	rcu_read_unlock();
646 
647 	if (t) {
648 		bch2_prt_task_backtrace(out, t, 0, GFP_KERNEL);
649 		put_task_struct(t);
650 	}
651 
652 	printbuf_indent_sub(out, 2);
653 }
654 
bch2_rebalance_stop(struct bch_fs * c)655 void bch2_rebalance_stop(struct bch_fs *c)
656 {
657 	struct task_struct *p;
658 
659 	c->rebalance.pd.rate.rate = UINT_MAX;
660 	bch2_ratelimit_reset(&c->rebalance.pd.rate);
661 
662 	p = rcu_dereference_protected(c->rebalance.thread, 1);
663 	c->rebalance.thread = NULL;
664 
665 	if (p) {
666 		/* for sychronizing with rebalance_wakeup() */
667 		synchronize_rcu();
668 
669 		kthread_stop(p);
670 		put_task_struct(p);
671 	}
672 }
673 
bch2_rebalance_start(struct bch_fs * c)674 int bch2_rebalance_start(struct bch_fs *c)
675 {
676 	struct task_struct *p;
677 	int ret;
678 
679 	if (c->rebalance.thread)
680 		return 0;
681 
682 	if (c->opts.nochanges)
683 		return 0;
684 
685 	p = kthread_create(bch2_rebalance_thread, c, "bch-rebalance/%s", c->name);
686 	ret = PTR_ERR_OR_ZERO(p);
687 	bch_err_msg(c, ret, "creating rebalance thread");
688 	if (ret)
689 		return ret;
690 
691 	get_task_struct(p);
692 	rcu_assign_pointer(c->rebalance.thread, p);
693 	wake_up_process(p);
694 	return 0;
695 }
696 
bch2_fs_rebalance_init(struct bch_fs * c)697 void bch2_fs_rebalance_init(struct bch_fs *c)
698 {
699 	bch2_pd_controller_init(&c->rebalance.pd);
700 }
701