1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "alloc_background.h"
5 #include "alloc_foreground.h"
6 #include "btree_iter.h"
7 #include "btree_update.h"
8 #include "btree_write_buffer.h"
9 #include "buckets.h"
10 #include "clock.h"
11 #include "compress.h"
12 #include "disk_groups.h"
13 #include "errcode.h"
14 #include "error.h"
15 #include "inode.h"
16 #include "io_write.h"
17 #include "move.h"
18 #include "rebalance.h"
19 #include "subvolume.h"
20 #include "super-io.h"
21 #include "trace.h"
22
23 #include <linux/freezer.h>
24 #include <linux/kthread.h>
25 #include <linux/sched/cputime.h>
26
27 /* bch_extent_rebalance: */
28
bch2_bkey_rebalance_opts(struct bkey_s_c k)29 static const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k)
30 {
31 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
32 const union bch_extent_entry *entry;
33
34 bkey_extent_entry_for_each(ptrs, entry)
35 if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance)
36 return &entry->rebalance;
37
38 return NULL;
39 }
40
bch2_bkey_ptrs_need_compress(struct bch_fs * c,struct bch_io_opts * opts,struct bkey_s_c k,struct bkey_ptrs_c ptrs)41 static inline unsigned bch2_bkey_ptrs_need_compress(struct bch_fs *c,
42 struct bch_io_opts *opts,
43 struct bkey_s_c k,
44 struct bkey_ptrs_c ptrs)
45 {
46 if (!opts->background_compression)
47 return 0;
48
49 unsigned compression_type = bch2_compression_opt_to_type(opts->background_compression);
50 const union bch_extent_entry *entry;
51 struct extent_ptr_decoded p;
52 unsigned ptr_bit = 1;
53 unsigned rewrite_ptrs = 0;
54
55 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
56 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
57 p.ptr.unwritten)
58 return 0;
59
60 if (!p.ptr.cached && p.crc.compression_type != compression_type)
61 rewrite_ptrs |= ptr_bit;
62 ptr_bit <<= 1;
63 }
64
65 return rewrite_ptrs;
66 }
67
bch2_bkey_ptrs_need_move(struct bch_fs * c,struct bch_io_opts * opts,struct bkey_ptrs_c ptrs)68 static inline unsigned bch2_bkey_ptrs_need_move(struct bch_fs *c,
69 struct bch_io_opts *opts,
70 struct bkey_ptrs_c ptrs)
71 {
72 if (!opts->background_target ||
73 !bch2_target_accepts_data(c, BCH_DATA_user, opts->background_target))
74 return 0;
75
76 unsigned ptr_bit = 1;
77 unsigned rewrite_ptrs = 0;
78
79 bkey_for_each_ptr(ptrs, ptr) {
80 if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, opts->background_target))
81 rewrite_ptrs |= ptr_bit;
82 ptr_bit <<= 1;
83 }
84
85 return rewrite_ptrs;
86 }
87
bch2_bkey_ptrs_need_rebalance(struct bch_fs * c,struct bch_io_opts * opts,struct bkey_s_c k)88 static unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c,
89 struct bch_io_opts *opts,
90 struct bkey_s_c k)
91 {
92 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
93
94 return bch2_bkey_ptrs_need_compress(c, opts, k, ptrs) |
95 bch2_bkey_ptrs_need_move(c, opts, ptrs);
96 }
97
bch2_bkey_sectors_need_rebalance(struct bch_fs * c,struct bkey_s_c k)98 u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k)
99 {
100 const struct bch_extent_rebalance *opts = bch2_bkey_rebalance_opts(k);
101 if (!opts)
102 return 0;
103
104 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
105 const union bch_extent_entry *entry;
106 struct extent_ptr_decoded p;
107 u64 sectors = 0;
108
109 if (opts->background_compression) {
110 unsigned compression_type = bch2_compression_opt_to_type(opts->background_compression);
111
112 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
113 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
114 p.ptr.unwritten) {
115 sectors = 0;
116 goto incompressible;
117 }
118
119 if (!p.ptr.cached && p.crc.compression_type != compression_type)
120 sectors += p.crc.compressed_size;
121 }
122 }
123 incompressible:
124 if (opts->background_target)
125 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
126 if (!p.ptr.cached && !bch2_dev_in_target(c, p.ptr.dev, opts->background_target))
127 sectors += p.crc.compressed_size;
128
129 return sectors;
130 }
131
bch2_bkey_rebalance_needs_update(struct bch_fs * c,struct bch_io_opts * opts,struct bkey_s_c k)132 static bool bch2_bkey_rebalance_needs_update(struct bch_fs *c, struct bch_io_opts *opts,
133 struct bkey_s_c k)
134 {
135 if (!bkey_extent_is_direct_data(k.k))
136 return 0;
137
138 const struct bch_extent_rebalance *old = bch2_bkey_rebalance_opts(k);
139
140 if (k.k->type == KEY_TYPE_reflink_v || bch2_bkey_ptrs_need_rebalance(c, opts, k)) {
141 struct bch_extent_rebalance new = io_opts_to_rebalance_opts(c, opts);
142 return old == NULL || memcmp(old, &new, sizeof(new));
143 } else {
144 return old != NULL;
145 }
146 }
147
bch2_bkey_set_needs_rebalance(struct bch_fs * c,struct bch_io_opts * opts,struct bkey_i * _k)148 int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bch_io_opts *opts,
149 struct bkey_i *_k)
150 {
151 if (!bkey_extent_is_direct_data(&_k->k))
152 return 0;
153
154 struct bkey_s k = bkey_i_to_s(_k);
155 struct bch_extent_rebalance *old =
156 (struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c);
157
158 if (k.k->type == KEY_TYPE_reflink_v || bch2_bkey_ptrs_need_rebalance(c, opts, k.s_c)) {
159 if (!old) {
160 old = bkey_val_end(k);
161 k.k->u64s += sizeof(*old) / sizeof(u64);
162 }
163
164 *old = io_opts_to_rebalance_opts(c, opts);
165 } else {
166 if (old)
167 extent_entry_drop(k, (union bch_extent_entry *) old);
168 }
169
170 return 0;
171 }
172
bch2_get_update_rebalance_opts(struct btree_trans * trans,struct bch_io_opts * io_opts,struct btree_iter * iter,struct bkey_s_c k)173 int bch2_get_update_rebalance_opts(struct btree_trans *trans,
174 struct bch_io_opts *io_opts,
175 struct btree_iter *iter,
176 struct bkey_s_c k)
177 {
178 BUG_ON(iter->flags & BTREE_ITER_is_extents);
179 BUG_ON(iter->flags & BTREE_ITER_filter_snapshots);
180
181 const struct bch_extent_rebalance *r = k.k->type == KEY_TYPE_reflink_v
182 ? bch2_bkey_rebalance_opts(k) : NULL;
183 if (r) {
184 #define x(_name) \
185 if (r->_name##_from_inode) { \
186 io_opts->_name = r->_name; \
187 io_opts->_name##_from_inode = true; \
188 }
189 BCH_REBALANCE_OPTS()
190 #undef x
191 }
192
193 if (!bch2_bkey_rebalance_needs_update(trans->c, io_opts, k))
194 return 0;
195
196 struct bkey_i *n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + 8);
197 int ret = PTR_ERR_OR_ZERO(n);
198 if (ret)
199 return ret;
200
201 bkey_reassemble(n, k);
202
203 /* On successfull transaction commit, @k was invalidated: */
204
205 return bch2_bkey_set_needs_rebalance(trans->c, io_opts, n) ?:
206 bch2_trans_update(trans, iter, n, BTREE_UPDATE_internal_snapshot_node) ?:
207 bch2_trans_commit(trans, NULL, NULL, 0) ?:
208 -BCH_ERR_transaction_restart_nested;
209 }
210
211 #define REBALANCE_WORK_SCAN_OFFSET (U64_MAX - 1)
212
213 static const char * const bch2_rebalance_state_strs[] = {
214 #define x(t) #t,
215 BCH_REBALANCE_STATES()
216 NULL
217 #undef x
218 };
219
bch2_set_rebalance_needs_scan_trans(struct btree_trans * trans,u64 inum)220 int bch2_set_rebalance_needs_scan_trans(struct btree_trans *trans, u64 inum)
221 {
222 struct btree_iter iter;
223 struct bkey_s_c k;
224 struct bkey_i_cookie *cookie;
225 u64 v;
226 int ret;
227
228 bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
229 SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
230 BTREE_ITER_intent);
231 k = bch2_btree_iter_peek_slot(&iter);
232 ret = bkey_err(k);
233 if (ret)
234 goto err;
235
236 v = k.k->type == KEY_TYPE_cookie
237 ? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie)
238 : 0;
239
240 cookie = bch2_trans_kmalloc(trans, sizeof(*cookie));
241 ret = PTR_ERR_OR_ZERO(cookie);
242 if (ret)
243 goto err;
244
245 bkey_cookie_init(&cookie->k_i);
246 cookie->k.p = iter.pos;
247 cookie->v.cookie = cpu_to_le64(v + 1);
248
249 ret = bch2_trans_update(trans, &iter, &cookie->k_i, 0);
250 err:
251 bch2_trans_iter_exit(trans, &iter);
252 return ret;
253 }
254
bch2_set_rebalance_needs_scan(struct bch_fs * c,u64 inum)255 int bch2_set_rebalance_needs_scan(struct bch_fs *c, u64 inum)
256 {
257 int ret = bch2_trans_commit_do(c, NULL, NULL,
258 BCH_TRANS_COMMIT_no_enospc,
259 bch2_set_rebalance_needs_scan_trans(trans, inum));
260 rebalance_wakeup(c);
261 return ret;
262 }
263
bch2_set_fs_needs_rebalance(struct bch_fs * c)264 int bch2_set_fs_needs_rebalance(struct bch_fs *c)
265 {
266 return bch2_set_rebalance_needs_scan(c, 0);
267 }
268
bch2_clear_rebalance_needs_scan(struct btree_trans * trans,u64 inum,u64 cookie)269 static int bch2_clear_rebalance_needs_scan(struct btree_trans *trans, u64 inum, u64 cookie)
270 {
271 struct btree_iter iter;
272 struct bkey_s_c k;
273 u64 v;
274 int ret;
275
276 bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
277 SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
278 BTREE_ITER_intent);
279 k = bch2_btree_iter_peek_slot(&iter);
280 ret = bkey_err(k);
281 if (ret)
282 goto err;
283
284 v = k.k->type == KEY_TYPE_cookie
285 ? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie)
286 : 0;
287
288 if (v == cookie)
289 ret = bch2_btree_delete_at(trans, &iter, 0);
290 err:
291 bch2_trans_iter_exit(trans, &iter);
292 return ret;
293 }
294
next_rebalance_entry(struct btree_trans * trans,struct btree_iter * work_iter)295 static struct bkey_s_c next_rebalance_entry(struct btree_trans *trans,
296 struct btree_iter *work_iter)
297 {
298 return !kthread_should_stop()
299 ? bch2_btree_iter_peek(work_iter)
300 : bkey_s_c_null;
301 }
302
bch2_bkey_clear_needs_rebalance(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c k)303 static int bch2_bkey_clear_needs_rebalance(struct btree_trans *trans,
304 struct btree_iter *iter,
305 struct bkey_s_c k)
306 {
307 if (!bch2_bkey_rebalance_opts(k))
308 return 0;
309
310 struct bkey_i *n = bch2_bkey_make_mut(trans, iter, &k, 0);
311 int ret = PTR_ERR_OR_ZERO(n);
312 if (ret)
313 return ret;
314
315 extent_entry_drop(bkey_i_to_s(n),
316 (void *) bch2_bkey_rebalance_opts(bkey_i_to_s_c(n)));
317 return bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
318 }
319
next_rebalance_extent(struct btree_trans * trans,struct bpos work_pos,struct btree_iter * extent_iter,struct bch_io_opts * io_opts,struct data_update_opts * data_opts)320 static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
321 struct bpos work_pos,
322 struct btree_iter *extent_iter,
323 struct bch_io_opts *io_opts,
324 struct data_update_opts *data_opts)
325 {
326 struct bch_fs *c = trans->c;
327
328 bch2_trans_iter_exit(trans, extent_iter);
329 bch2_trans_iter_init(trans, extent_iter,
330 work_pos.inode ? BTREE_ID_extents : BTREE_ID_reflink,
331 work_pos,
332 BTREE_ITER_all_snapshots);
333 struct bkey_s_c k = bch2_btree_iter_peek_slot(extent_iter);
334 if (bkey_err(k))
335 return k;
336
337 int ret = bch2_move_get_io_opts_one(trans, io_opts, extent_iter, k);
338 if (ret)
339 return bkey_s_c_err(ret);
340
341 memset(data_opts, 0, sizeof(*data_opts));
342 data_opts->rewrite_ptrs = bch2_bkey_ptrs_need_rebalance(c, io_opts, k);
343 data_opts->target = io_opts->background_target;
344 data_opts->write_flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS;
345
346 if (!data_opts->rewrite_ptrs) {
347 /*
348 * device we would want to write to offline? devices in target
349 * changed?
350 *
351 * We'll now need a full scan before this extent is picked up
352 * again:
353 */
354 int ret = bch2_bkey_clear_needs_rebalance(trans, extent_iter, k);
355 if (ret)
356 return bkey_s_c_err(ret);
357 return bkey_s_c_null;
358 }
359
360 if (trace_rebalance_extent_enabled()) {
361 struct printbuf buf = PRINTBUF;
362
363 bch2_bkey_val_to_text(&buf, c, k);
364 prt_newline(&buf);
365
366 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
367
368 unsigned p = bch2_bkey_ptrs_need_compress(c, io_opts, k, ptrs);
369 if (p) {
370 prt_str(&buf, "compression=");
371 bch2_compression_opt_to_text(&buf, io_opts->background_compression);
372 prt_str(&buf, " ");
373 bch2_prt_u64_base2(&buf, p);
374 prt_newline(&buf);
375 }
376
377 p = bch2_bkey_ptrs_need_move(c, io_opts, ptrs);
378 if (p) {
379 prt_str(&buf, "move=");
380 bch2_target_to_text(&buf, c, io_opts->background_target);
381 prt_str(&buf, " ");
382 bch2_prt_u64_base2(&buf, p);
383 prt_newline(&buf);
384 }
385
386 trace_rebalance_extent(c, buf.buf);
387 printbuf_exit(&buf);
388 }
389
390 return k;
391 }
392
393 noinline_for_stack
do_rebalance_extent(struct moving_context * ctxt,struct bpos work_pos,struct btree_iter * extent_iter)394 static int do_rebalance_extent(struct moving_context *ctxt,
395 struct bpos work_pos,
396 struct btree_iter *extent_iter)
397 {
398 struct btree_trans *trans = ctxt->trans;
399 struct bch_fs *c = trans->c;
400 struct bch_fs_rebalance *r = &trans->c->rebalance;
401 struct data_update_opts data_opts;
402 struct bch_io_opts io_opts;
403 struct bkey_s_c k;
404 struct bkey_buf sk;
405 int ret;
406
407 ctxt->stats = &r->work_stats;
408 r->state = BCH_REBALANCE_working;
409
410 bch2_bkey_buf_init(&sk);
411
412 ret = bkey_err(k = next_rebalance_extent(trans, work_pos,
413 extent_iter, &io_opts, &data_opts));
414 if (ret || !k.k)
415 goto out;
416
417 atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
418
419 /*
420 * The iterator gets unlocked by __bch2_read_extent - need to
421 * save a copy of @k elsewhere:
422 */
423 bch2_bkey_buf_reassemble(&sk, c, k);
424 k = bkey_i_to_s_c(sk.k);
425
426 ret = bch2_move_extent(ctxt, NULL, extent_iter, k, io_opts, data_opts);
427 if (ret) {
428 if (bch2_err_matches(ret, ENOMEM)) {
429 /* memory allocation failure, wait for some IO to finish */
430 bch2_move_ctxt_wait_for_io(ctxt);
431 ret = -BCH_ERR_transaction_restart_nested;
432 }
433
434 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
435 goto out;
436
437 /* skip it and continue, XXX signal failure */
438 ret = 0;
439 }
440 out:
441 bch2_bkey_buf_exit(&sk, c);
442 return ret;
443 }
444
rebalance_pred(struct bch_fs * c,void * arg,struct bkey_s_c k,struct bch_io_opts * io_opts,struct data_update_opts * data_opts)445 static bool rebalance_pred(struct bch_fs *c, void *arg,
446 struct bkey_s_c k,
447 struct bch_io_opts *io_opts,
448 struct data_update_opts *data_opts)
449 {
450 data_opts->rewrite_ptrs = bch2_bkey_ptrs_need_rebalance(c, io_opts, k);
451 data_opts->target = io_opts->background_target;
452 data_opts->write_flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS;
453 return data_opts->rewrite_ptrs != 0;
454 }
455
do_rebalance_scan(struct moving_context * ctxt,u64 inum,u64 cookie)456 static int do_rebalance_scan(struct moving_context *ctxt, u64 inum, u64 cookie)
457 {
458 struct btree_trans *trans = ctxt->trans;
459 struct bch_fs_rebalance *r = &trans->c->rebalance;
460 int ret;
461
462 bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
463 ctxt->stats = &r->scan_stats;
464
465 if (!inum) {
466 r->scan_start = BBPOS_MIN;
467 r->scan_end = BBPOS_MAX;
468 } else {
469 r->scan_start = BBPOS(BTREE_ID_extents, POS(inum, 0));
470 r->scan_end = BBPOS(BTREE_ID_extents, POS(inum, U64_MAX));
471 }
472
473 r->state = BCH_REBALANCE_scanning;
474
475 ret = __bch2_move_data(ctxt, r->scan_start, r->scan_end, rebalance_pred, NULL) ?:
476 commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
477 bch2_clear_rebalance_needs_scan(trans, inum, cookie));
478
479 bch2_move_stats_exit(&r->scan_stats, trans->c);
480 return ret;
481 }
482
rebalance_wait(struct bch_fs * c)483 static void rebalance_wait(struct bch_fs *c)
484 {
485 struct bch_fs_rebalance *r = &c->rebalance;
486 struct io_clock *clock = &c->io_clock[WRITE];
487 u64 now = atomic64_read(&clock->now);
488 u64 min_member_capacity = bch2_min_rw_member_capacity(c);
489
490 if (min_member_capacity == U64_MAX)
491 min_member_capacity = 128 * 2048;
492
493 r->wait_iotime_end = now + (min_member_capacity >> 6);
494
495 if (r->state != BCH_REBALANCE_waiting) {
496 r->wait_iotime_start = now;
497 r->wait_wallclock_start = ktime_get_real_ns();
498 r->state = BCH_REBALANCE_waiting;
499 }
500
501 bch2_kthread_io_clock_wait(clock, r->wait_iotime_end, MAX_SCHEDULE_TIMEOUT);
502 }
503
do_rebalance(struct moving_context * ctxt)504 static int do_rebalance(struct moving_context *ctxt)
505 {
506 struct btree_trans *trans = ctxt->trans;
507 struct bch_fs *c = trans->c;
508 struct bch_fs_rebalance *r = &c->rebalance;
509 struct btree_iter rebalance_work_iter, extent_iter = { NULL };
510 struct bkey_s_c k;
511 int ret = 0;
512
513 bch2_trans_begin(trans);
514
515 bch2_move_stats_init(&r->work_stats, "rebalance_work");
516 bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
517
518 bch2_trans_iter_init(trans, &rebalance_work_iter,
519 BTREE_ID_rebalance_work, POS_MIN,
520 BTREE_ITER_all_snapshots);
521
522 while (!bch2_move_ratelimit(ctxt)) {
523 if (!c->opts.rebalance_enabled) {
524 bch2_moving_ctxt_flush_all(ctxt);
525 kthread_wait_freezable(c->opts.rebalance_enabled ||
526 kthread_should_stop());
527 }
528
529 if (kthread_should_stop())
530 break;
531
532 bch2_trans_begin(trans);
533
534 ret = bkey_err(k = next_rebalance_entry(trans, &rebalance_work_iter));
535 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
536 continue;
537 if (ret || !k.k)
538 break;
539
540 ret = k.k->type == KEY_TYPE_cookie
541 ? do_rebalance_scan(ctxt, k.k->p.inode,
542 le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie))
543 : do_rebalance_extent(ctxt, k.k->p, &extent_iter);
544
545 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
546 continue;
547 if (ret)
548 break;
549
550 bch2_btree_iter_advance(&rebalance_work_iter);
551 }
552
553 bch2_trans_iter_exit(trans, &extent_iter);
554 bch2_trans_iter_exit(trans, &rebalance_work_iter);
555 bch2_move_stats_exit(&r->scan_stats, c);
556
557 if (!ret &&
558 !kthread_should_stop() &&
559 !atomic64_read(&r->work_stats.sectors_seen) &&
560 !atomic64_read(&r->scan_stats.sectors_seen)) {
561 bch2_moving_ctxt_flush_all(ctxt);
562 bch2_trans_unlock_long(trans);
563 rebalance_wait(c);
564 }
565
566 if (!bch2_err_matches(ret, EROFS))
567 bch_err_fn(c, ret);
568 return ret;
569 }
570
bch2_rebalance_thread(void * arg)571 static int bch2_rebalance_thread(void *arg)
572 {
573 struct bch_fs *c = arg;
574 struct bch_fs_rebalance *r = &c->rebalance;
575 struct moving_context ctxt;
576
577 set_freezable();
578
579 bch2_moving_ctxt_init(&ctxt, c, NULL, &r->work_stats,
580 writepoint_ptr(&c->rebalance_write_point),
581 true);
582
583 while (!kthread_should_stop() && !do_rebalance(&ctxt))
584 ;
585
586 bch2_moving_ctxt_exit(&ctxt);
587
588 return 0;
589 }
590
bch2_rebalance_status_to_text(struct printbuf * out,struct bch_fs * c)591 void bch2_rebalance_status_to_text(struct printbuf *out, struct bch_fs *c)
592 {
593 struct bch_fs_rebalance *r = &c->rebalance;
594
595 prt_str(out, bch2_rebalance_state_strs[r->state]);
596 prt_newline(out);
597 printbuf_indent_add(out, 2);
598
599 switch (r->state) {
600 case BCH_REBALANCE_waiting: {
601 u64 now = atomic64_read(&c->io_clock[WRITE].now);
602
603 prt_str(out, "io wait duration: ");
604 bch2_prt_human_readable_s64(out, (r->wait_iotime_end - r->wait_iotime_start) << 9);
605 prt_newline(out);
606
607 prt_str(out, "io wait remaining: ");
608 bch2_prt_human_readable_s64(out, (r->wait_iotime_end - now) << 9);
609 prt_newline(out);
610
611 prt_str(out, "duration waited: ");
612 bch2_pr_time_units(out, ktime_get_real_ns() - r->wait_wallclock_start);
613 prt_newline(out);
614 break;
615 }
616 case BCH_REBALANCE_working:
617 bch2_move_stats_to_text(out, &r->work_stats);
618 break;
619 case BCH_REBALANCE_scanning:
620 bch2_move_stats_to_text(out, &r->scan_stats);
621 break;
622 }
623 prt_newline(out);
624 printbuf_indent_sub(out, 2);
625 }
626
bch2_rebalance_stop(struct bch_fs * c)627 void bch2_rebalance_stop(struct bch_fs *c)
628 {
629 struct task_struct *p;
630
631 c->rebalance.pd.rate.rate = UINT_MAX;
632 bch2_ratelimit_reset(&c->rebalance.pd.rate);
633
634 p = rcu_dereference_protected(c->rebalance.thread, 1);
635 c->rebalance.thread = NULL;
636
637 if (p) {
638 /* for sychronizing with rebalance_wakeup() */
639 synchronize_rcu();
640
641 kthread_stop(p);
642 put_task_struct(p);
643 }
644 }
645
bch2_rebalance_start(struct bch_fs * c)646 int bch2_rebalance_start(struct bch_fs *c)
647 {
648 struct task_struct *p;
649 int ret;
650
651 if (c->rebalance.thread)
652 return 0;
653
654 if (c->opts.nochanges)
655 return 0;
656
657 p = kthread_create(bch2_rebalance_thread, c, "bch-rebalance/%s", c->name);
658 ret = PTR_ERR_OR_ZERO(p);
659 bch_err_msg(c, ret, "creating rebalance thread");
660 if (ret)
661 return ret;
662
663 get_task_struct(p);
664 rcu_assign_pointer(c->rebalance.thread, p);
665 wake_up_process(p);
666 return 0;
667 }
668
bch2_fs_rebalance_init(struct bch_fs * c)669 void bch2_fs_rebalance_init(struct bch_fs *c)
670 {
671 bch2_pd_controller_init(&c->rebalance.pd);
672 }
673