xref: /linux/fs/bcachefs/move.c (revision 4a4b30ea80d8cb5e8c4c62bb86201f4ea0d9b030)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "alloc_background.h"
5 #include "alloc_foreground.h"
6 #include "backpointers.h"
7 #include "bkey_buf.h"
8 #include "btree_gc.h"
9 #include "btree_io.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
12 #include "btree_write_buffer.h"
13 #include "compress.h"
14 #include "disk_groups.h"
15 #include "ec.h"
16 #include "errcode.h"
17 #include "error.h"
18 #include "inode.h"
19 #include "io_read.h"
20 #include "io_write.h"
21 #include "journal_reclaim.h"
22 #include "keylist.h"
23 #include "move.h"
24 #include "rebalance.h"
25 #include "reflink.h"
26 #include "replicas.h"
27 #include "snapshot.h"
28 #include "super-io.h"
29 #include "trace.h"
30 
31 #include <linux/ioprio.h>
32 #include <linux/kthread.h>
33 
34 const char * const bch2_data_ops_strs[] = {
35 #define x(t, n, ...) [n] = #t,
36 	BCH_DATA_OPS()
37 #undef x
38 	NULL
39 };
40 
trace_io_move2(struct bch_fs * c,struct bkey_s_c k,struct bch_io_opts * io_opts,struct data_update_opts * data_opts)41 static void trace_io_move2(struct bch_fs *c, struct bkey_s_c k,
42 			       struct bch_io_opts *io_opts,
43 			       struct data_update_opts *data_opts)
44 {
45 	if (trace_io_move_enabled()) {
46 		struct printbuf buf = PRINTBUF;
47 
48 		bch2_bkey_val_to_text(&buf, c, k);
49 		prt_newline(&buf);
50 		bch2_data_update_opts_to_text(&buf, c, io_opts, data_opts);
51 		trace_io_move(c, buf.buf);
52 		printbuf_exit(&buf);
53 	}
54 }
55 
trace_io_move_read2(struct bch_fs * c,struct bkey_s_c k)56 static void trace_io_move_read2(struct bch_fs *c, struct bkey_s_c k)
57 {
58 	if (trace_io_move_read_enabled()) {
59 		struct printbuf buf = PRINTBUF;
60 
61 		bch2_bkey_val_to_text(&buf, c, k);
62 		trace_io_move_read(c, buf.buf);
63 		printbuf_exit(&buf);
64 	}
65 }
66 
67 struct moving_io {
68 	struct list_head		read_list;
69 	struct list_head		io_list;
70 	struct move_bucket_in_flight	*b;
71 	struct closure			cl;
72 	bool				read_completed;
73 
74 	unsigned			read_sectors;
75 	unsigned			write_sectors;
76 
77 	struct data_update		write;
78 };
79 
move_free(struct moving_io * io)80 static void move_free(struct moving_io *io)
81 {
82 	struct moving_context *ctxt = io->write.ctxt;
83 
84 	if (io->b)
85 		atomic_dec(&io->b->count);
86 
87 	mutex_lock(&ctxt->lock);
88 	list_del(&io->io_list);
89 	wake_up(&ctxt->wait);
90 	mutex_unlock(&ctxt->lock);
91 
92 	if (!io->write.data_opts.scrub) {
93 		bch2_data_update_exit(&io->write);
94 	} else {
95 		bch2_bio_free_pages_pool(io->write.op.c, &io->write.op.wbio.bio);
96 		kfree(io->write.bvecs);
97 	}
98 	kfree(io);
99 }
100 
move_write_done(struct bch_write_op * op)101 static void move_write_done(struct bch_write_op *op)
102 {
103 	struct moving_io *io = container_of(op, struct moving_io, write.op);
104 	struct bch_fs *c = op->c;
105 	struct moving_context *ctxt = io->write.ctxt;
106 
107 	if (op->error) {
108 		if (trace_io_move_write_fail_enabled()) {
109 			struct printbuf buf = PRINTBUF;
110 
111 			bch2_write_op_to_text(&buf, op);
112 			prt_printf(&buf, "ret\t%s\n", bch2_err_str(op->error));
113 			trace_io_move_write_fail(c, buf.buf);
114 			printbuf_exit(&buf);
115 		}
116 		this_cpu_inc(c->counters[BCH_COUNTER_io_move_write_fail]);
117 
118 		ctxt->write_error = true;
119 	}
120 
121 	atomic_sub(io->write_sectors, &ctxt->write_sectors);
122 	atomic_dec(&ctxt->write_ios);
123 	move_free(io);
124 	closure_put(&ctxt->cl);
125 }
126 
move_write(struct moving_io * io)127 static void move_write(struct moving_io *io)
128 {
129 	struct moving_context *ctxt = io->write.ctxt;
130 
131 	if (ctxt->stats) {
132 		if (io->write.rbio.bio.bi_status)
133 			atomic64_add(io->write.rbio.bvec_iter.bi_size >> 9,
134 				     &ctxt->stats->sectors_error_uncorrected);
135 		else if (io->write.rbio.saw_error)
136 			atomic64_add(io->write.rbio.bvec_iter.bi_size >> 9,
137 				     &ctxt->stats->sectors_error_corrected);
138 	}
139 
140 	if (unlikely(io->write.rbio.ret ||
141 		     io->write.rbio.bio.bi_status ||
142 		     io->write.data_opts.scrub)) {
143 		move_free(io);
144 		return;
145 	}
146 
147 	if (trace_io_move_write_enabled()) {
148 		struct bch_fs *c = io->write.op.c;
149 		struct printbuf buf = PRINTBUF;
150 
151 		bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(io->write.k.k));
152 		trace_io_move_write(c, buf.buf);
153 		printbuf_exit(&buf);
154 	}
155 
156 	closure_get(&io->write.ctxt->cl);
157 	atomic_add(io->write_sectors, &io->write.ctxt->write_sectors);
158 	atomic_inc(&io->write.ctxt->write_ios);
159 
160 	bch2_data_update_read_done(&io->write);
161 }
162 
bch2_moving_ctxt_next_pending_write(struct moving_context * ctxt)163 struct moving_io *bch2_moving_ctxt_next_pending_write(struct moving_context *ctxt)
164 {
165 	struct moving_io *io =
166 		list_first_entry_or_null(&ctxt->reads, struct moving_io, read_list);
167 
168 	return io && io->read_completed ? io : NULL;
169 }
170 
move_read_endio(struct bio * bio)171 static void move_read_endio(struct bio *bio)
172 {
173 	struct moving_io *io = container_of(bio, struct moving_io, write.rbio.bio);
174 	struct moving_context *ctxt = io->write.ctxt;
175 
176 	atomic_sub(io->read_sectors, &ctxt->read_sectors);
177 	atomic_dec(&ctxt->read_ios);
178 	io->read_completed = true;
179 
180 	wake_up(&ctxt->wait);
181 	closure_put(&ctxt->cl);
182 }
183 
bch2_moving_ctxt_do_pending_writes(struct moving_context * ctxt)184 void bch2_moving_ctxt_do_pending_writes(struct moving_context *ctxt)
185 {
186 	struct moving_io *io;
187 
188 	while ((io = bch2_moving_ctxt_next_pending_write(ctxt))) {
189 		bch2_trans_unlock_long(ctxt->trans);
190 		list_del(&io->read_list);
191 		move_write(io);
192 	}
193 }
194 
bch2_move_ctxt_wait_for_io(struct moving_context * ctxt)195 void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt)
196 {
197 	unsigned sectors_pending = atomic_read(&ctxt->write_sectors);
198 
199 	move_ctxt_wait_event(ctxt,
200 		!atomic_read(&ctxt->write_sectors) ||
201 		atomic_read(&ctxt->write_sectors) != sectors_pending);
202 }
203 
bch2_moving_ctxt_flush_all(struct moving_context * ctxt)204 void bch2_moving_ctxt_flush_all(struct moving_context *ctxt)
205 {
206 	move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads));
207 	bch2_trans_unlock_long(ctxt->trans);
208 	closure_sync(&ctxt->cl);
209 }
210 
bch2_moving_ctxt_exit(struct moving_context * ctxt)211 void bch2_moving_ctxt_exit(struct moving_context *ctxt)
212 {
213 	struct bch_fs *c = ctxt->trans->c;
214 
215 	bch2_moving_ctxt_flush_all(ctxt);
216 
217 	EBUG_ON(atomic_read(&ctxt->write_sectors));
218 	EBUG_ON(atomic_read(&ctxt->write_ios));
219 	EBUG_ON(atomic_read(&ctxt->read_sectors));
220 	EBUG_ON(atomic_read(&ctxt->read_ios));
221 
222 	mutex_lock(&c->moving_context_lock);
223 	list_del(&ctxt->list);
224 	mutex_unlock(&c->moving_context_lock);
225 
226 	/*
227 	 * Generally, releasing a transaction within a transaction restart means
228 	 * an unhandled transaction restart: but this can happen legitimately
229 	 * within the move code, e.g. when bch2_move_ratelimit() tells us to
230 	 * exit before we've retried
231 	 */
232 	bch2_trans_begin(ctxt->trans);
233 	bch2_trans_put(ctxt->trans);
234 	memset(ctxt, 0, sizeof(*ctxt));
235 }
236 
bch2_moving_ctxt_init(struct moving_context * ctxt,struct bch_fs * c,struct bch_ratelimit * rate,struct bch_move_stats * stats,struct write_point_specifier wp,bool wait_on_copygc)237 void bch2_moving_ctxt_init(struct moving_context *ctxt,
238 			   struct bch_fs *c,
239 			   struct bch_ratelimit *rate,
240 			   struct bch_move_stats *stats,
241 			   struct write_point_specifier wp,
242 			   bool wait_on_copygc)
243 {
244 	memset(ctxt, 0, sizeof(*ctxt));
245 
246 	ctxt->trans	= bch2_trans_get(c);
247 	ctxt->fn	= (void *) _RET_IP_;
248 	ctxt->rate	= rate;
249 	ctxt->stats	= stats;
250 	ctxt->wp	= wp;
251 	ctxt->wait_on_copygc = wait_on_copygc;
252 
253 	closure_init_stack(&ctxt->cl);
254 
255 	mutex_init(&ctxt->lock);
256 	INIT_LIST_HEAD(&ctxt->reads);
257 	INIT_LIST_HEAD(&ctxt->ios);
258 	init_waitqueue_head(&ctxt->wait);
259 
260 	mutex_lock(&c->moving_context_lock);
261 	list_add(&ctxt->list, &c->moving_context_list);
262 	mutex_unlock(&c->moving_context_lock);
263 }
264 
bch2_move_stats_exit(struct bch_move_stats * stats,struct bch_fs * c)265 void bch2_move_stats_exit(struct bch_move_stats *stats, struct bch_fs *c)
266 {
267 	trace_move_data(c, stats);
268 }
269 
bch2_move_stats_init(struct bch_move_stats * stats,const char * name)270 void bch2_move_stats_init(struct bch_move_stats *stats, const char *name)
271 {
272 	memset(stats, 0, sizeof(*stats));
273 	stats->data_type = BCH_DATA_user;
274 	scnprintf(stats->name, sizeof(stats->name), "%s", name);
275 }
276 
bch2_move_extent(struct moving_context * ctxt,struct move_bucket_in_flight * bucket_in_flight,struct btree_iter * iter,struct bkey_s_c k,struct bch_io_opts io_opts,struct data_update_opts data_opts)277 int bch2_move_extent(struct moving_context *ctxt,
278 		     struct move_bucket_in_flight *bucket_in_flight,
279 		     struct btree_iter *iter,
280 		     struct bkey_s_c k,
281 		     struct bch_io_opts io_opts,
282 		     struct data_update_opts data_opts)
283 {
284 	struct btree_trans *trans = ctxt->trans;
285 	struct bch_fs *c = trans->c;
286 	int ret = -ENOMEM;
287 
288 	trace_io_move2(c, k, &io_opts, &data_opts);
289 	this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size);
290 
291 	if (ctxt->stats)
292 		ctxt->stats->pos = BBPOS(iter->btree_id, iter->pos);
293 
294 	bch2_data_update_opts_normalize(k, &data_opts);
295 
296 	if (!data_opts.rewrite_ptrs &&
297 	    !data_opts.extra_replicas &&
298 	    !data_opts.scrub) {
299 		if (data_opts.kill_ptrs)
300 			return bch2_extent_drop_ptrs(trans, iter, k, &io_opts, &data_opts);
301 		return 0;
302 	}
303 
304 	/*
305 	 * Before memory allocations & taking nocow locks in
306 	 * bch2_data_update_init():
307 	 */
308 	bch2_trans_unlock(trans);
309 
310 	struct moving_io *io = kzalloc(sizeof(struct moving_io), GFP_KERNEL);
311 	if (!io)
312 		goto err;
313 
314 	INIT_LIST_HEAD(&io->io_list);
315 	io->write.ctxt		= ctxt;
316 	io->read_sectors	= k.k->size;
317 	io->write_sectors	= k.k->size;
318 
319 	if (!data_opts.scrub) {
320 		ret = bch2_data_update_init(trans, iter, ctxt, &io->write, ctxt->wp,
321 					    &io_opts, data_opts, iter->btree_id, k);
322 		if (ret)
323 			goto err_free;
324 
325 		io->write.op.end_io	= move_write_done;
326 	} else {
327 		bch2_bkey_buf_init(&io->write.k);
328 		bch2_bkey_buf_reassemble(&io->write.k, c, k);
329 
330 		io->write.op.c		= c;
331 		io->write.data_opts	= data_opts;
332 
333 		ret = bch2_data_update_bios_init(&io->write, c, &io_opts);
334 		if (ret)
335 			goto err_free;
336 	}
337 
338 	io->write.rbio.bio.bi_end_io = move_read_endio;
339 	io->write.rbio.bio.bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
340 
341 	if (ctxt->rate)
342 		bch2_ratelimit_increment(ctxt->rate, k.k->size);
343 
344 	if (ctxt->stats) {
345 		atomic64_inc(&ctxt->stats->keys_moved);
346 		atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
347 	}
348 
349 	if (bucket_in_flight) {
350 		io->b = bucket_in_flight;
351 		atomic_inc(&io->b->count);
352 	}
353 
354 	trace_io_move_read2(c, k);
355 
356 	mutex_lock(&ctxt->lock);
357 	atomic_add(io->read_sectors, &ctxt->read_sectors);
358 	atomic_inc(&ctxt->read_ios);
359 
360 	list_add_tail(&io->read_list, &ctxt->reads);
361 	list_add_tail(&io->io_list, &ctxt->ios);
362 	mutex_unlock(&ctxt->lock);
363 
364 	/*
365 	 * dropped by move_read_endio() - guards against use after free of
366 	 * ctxt when doing wakeup
367 	 */
368 	closure_get(&ctxt->cl);
369 	__bch2_read_extent(trans, &io->write.rbio,
370 			   io->write.rbio.bio.bi_iter,
371 			   bkey_start_pos(k.k),
372 			   iter->btree_id, k, 0,
373 			   NULL,
374 			   BCH_READ_last_fragment,
375 			   data_opts.scrub ?  data_opts.read_dev : -1);
376 	return 0;
377 err_free:
378 	kfree(io);
379 err:
380 	if (bch2_err_matches(ret, BCH_ERR_data_update_done))
381 		return 0;
382 
383 	if (bch2_err_matches(ret, EROFS) ||
384 	    bch2_err_matches(ret, BCH_ERR_transaction_restart))
385 		return ret;
386 
387 	count_event(c, io_move_start_fail);
388 
389 	if (trace_io_move_start_fail_enabled()) {
390 		struct printbuf buf = PRINTBUF;
391 
392 		bch2_bkey_val_to_text(&buf, c, k);
393 		prt_str(&buf, ": ");
394 		prt_str(&buf, bch2_err_str(ret));
395 		trace_io_move_start_fail(c, buf.buf);
396 		printbuf_exit(&buf);
397 	}
398 	return ret;
399 }
400 
bch2_move_get_io_opts(struct btree_trans * trans,struct per_snapshot_io_opts * io_opts,struct bpos extent_pos,struct btree_iter * extent_iter,struct bkey_s_c extent_k)401 static struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans,
402 			  struct per_snapshot_io_opts *io_opts,
403 			  struct bpos extent_pos, /* extent_iter, extent_k may be in reflink btree */
404 			  struct btree_iter *extent_iter,
405 			  struct bkey_s_c extent_k)
406 {
407 	struct bch_fs *c = trans->c;
408 	u32 restart_count = trans->restart_count;
409 	struct bch_io_opts *opts_ret = &io_opts->fs_io_opts;
410 	int ret = 0;
411 
412 	if (extent_k.k->type == KEY_TYPE_reflink_v)
413 		goto out;
414 
415 	if (io_opts->cur_inum != extent_pos.inode) {
416 		io_opts->d.nr = 0;
417 
418 		ret = for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, extent_pos.inode),
419 					 BTREE_ITER_all_snapshots, k, ({
420 			if (k.k->p.offset != extent_pos.inode)
421 				break;
422 
423 			if (!bkey_is_inode(k.k))
424 				continue;
425 
426 			struct bch_inode_unpacked inode;
427 			_ret3 = bch2_inode_unpack(k, &inode);
428 			if (_ret3)
429 				break;
430 
431 			struct snapshot_io_opts_entry e = { .snapshot = k.k->p.snapshot };
432 			bch2_inode_opts_get(&e.io_opts, trans->c, &inode);
433 
434 			darray_push(&io_opts->d, e);
435 		}));
436 		io_opts->cur_inum = extent_pos.inode;
437 	}
438 
439 	ret = ret ?: trans_was_restarted(trans, restart_count);
440 	if (ret)
441 		return ERR_PTR(ret);
442 
443 	if (extent_k.k->p.snapshot)
444 		darray_for_each(io_opts->d, i)
445 			if (bch2_snapshot_is_ancestor(c, extent_k.k->p.snapshot, i->snapshot)) {
446 				opts_ret = &i->io_opts;
447 				break;
448 			}
449 out:
450 	ret = bch2_get_update_rebalance_opts(trans, opts_ret, extent_iter, extent_k);
451 	if (ret)
452 		return ERR_PTR(ret);
453 	return opts_ret;
454 }
455 
bch2_move_get_io_opts_one(struct btree_trans * trans,struct bch_io_opts * io_opts,struct btree_iter * extent_iter,struct bkey_s_c extent_k)456 int bch2_move_get_io_opts_one(struct btree_trans *trans,
457 			      struct bch_io_opts *io_opts,
458 			      struct btree_iter *extent_iter,
459 			      struct bkey_s_c extent_k)
460 {
461 	struct bch_fs *c = trans->c;
462 
463 	*io_opts = bch2_opts_to_inode_opts(c->opts);
464 
465 	/* reflink btree? */
466 	if (!extent_k.k->p.inode)
467 		goto out;
468 
469 	struct btree_iter inode_iter;
470 	struct bkey_s_c inode_k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes,
471 			       SPOS(0, extent_k.k->p.inode, extent_k.k->p.snapshot),
472 			       BTREE_ITER_cached);
473 	int ret = bkey_err(inode_k);
474 	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
475 		return ret;
476 
477 	if (!ret && bkey_is_inode(inode_k.k)) {
478 		struct bch_inode_unpacked inode;
479 		bch2_inode_unpack(inode_k, &inode);
480 		bch2_inode_opts_get(io_opts, c, &inode);
481 	}
482 	bch2_trans_iter_exit(trans, &inode_iter);
483 out:
484 	return bch2_get_update_rebalance_opts(trans, io_opts, extent_iter, extent_k);
485 }
486 
bch2_move_ratelimit(struct moving_context * ctxt)487 int bch2_move_ratelimit(struct moving_context *ctxt)
488 {
489 	struct bch_fs *c = ctxt->trans->c;
490 	bool is_kthread = current->flags & PF_KTHREAD;
491 	u64 delay;
492 
493 	if (ctxt->wait_on_copygc && c->copygc_running) {
494 		bch2_moving_ctxt_flush_all(ctxt);
495 		wait_event_killable(c->copygc_running_wq,
496 				    !c->copygc_running ||
497 				    (is_kthread && kthread_should_stop()));
498 	}
499 
500 	do {
501 		delay = ctxt->rate ? bch2_ratelimit_delay(ctxt->rate) : 0;
502 
503 		if (is_kthread && kthread_should_stop())
504 			return 1;
505 
506 		if (delay)
507 			move_ctxt_wait_event_timeout(ctxt,
508 					freezing(current) ||
509 					(is_kthread && kthread_should_stop()),
510 					delay);
511 
512 		if (unlikely(freezing(current))) {
513 			bch2_moving_ctxt_flush_all(ctxt);
514 			try_to_freeze();
515 		}
516 	} while (delay);
517 
518 	/*
519 	 * XXX: these limits really ought to be per device, SSDs and hard drives
520 	 * will want different limits
521 	 */
522 	move_ctxt_wait_event(ctxt,
523 		atomic_read(&ctxt->write_sectors) < c->opts.move_bytes_in_flight >> 9 &&
524 		atomic_read(&ctxt->read_sectors) < c->opts.move_bytes_in_flight >> 9 &&
525 		atomic_read(&ctxt->write_ios) < c->opts.move_ios_in_flight &&
526 		atomic_read(&ctxt->read_ios) < c->opts.move_ios_in_flight);
527 
528 	return 0;
529 }
530 
bch2_move_data_btree(struct moving_context * ctxt,struct bpos start,struct bpos end,move_pred_fn pred,void * arg,enum btree_id btree_id)531 static int bch2_move_data_btree(struct moving_context *ctxt,
532 				struct bpos start,
533 				struct bpos end,
534 				move_pred_fn pred, void *arg,
535 				enum btree_id btree_id)
536 {
537 	struct btree_trans *trans = ctxt->trans;
538 	struct bch_fs *c = trans->c;
539 	struct per_snapshot_io_opts snapshot_io_opts;
540 	struct bch_io_opts *io_opts;
541 	struct bkey_buf sk;
542 	struct btree_iter iter, reflink_iter = {};
543 	struct bkey_s_c k;
544 	struct data_update_opts data_opts;
545 	/*
546 	 * If we're moving a single file, also process reflinked data it points
547 	 * to (this includes propagating changed io_opts from the inode to the
548 	 * extent):
549 	 */
550 	bool walk_indirect = start.inode == end.inode;
551 	int ret = 0, ret2;
552 
553 	per_snapshot_io_opts_init(&snapshot_io_opts, c);
554 	bch2_bkey_buf_init(&sk);
555 
556 	if (ctxt->stats) {
557 		ctxt->stats->data_type	= BCH_DATA_user;
558 		ctxt->stats->pos	= BBPOS(btree_id, start);
559 	}
560 
561 	bch2_trans_begin(trans);
562 	bch2_trans_iter_init(trans, &iter, btree_id, start,
563 			     BTREE_ITER_prefetch|
564 			     BTREE_ITER_not_extents|
565 			     BTREE_ITER_all_snapshots);
566 
567 	if (ctxt->rate)
568 		bch2_ratelimit_reset(ctxt->rate);
569 
570 	while (!bch2_move_ratelimit(ctxt)) {
571 		struct btree_iter *extent_iter = &iter;
572 
573 		bch2_trans_begin(trans);
574 
575 		k = bch2_btree_iter_peek(&iter);
576 		if (!k.k)
577 			break;
578 
579 		ret = bkey_err(k);
580 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
581 			continue;
582 		if (ret)
583 			break;
584 
585 		if (bkey_ge(bkey_start_pos(k.k), end))
586 			break;
587 
588 		if (ctxt->stats)
589 			ctxt->stats->pos = BBPOS(iter.btree_id, iter.pos);
590 
591 		if (walk_indirect &&
592 		    k.k->type == KEY_TYPE_reflink_p &&
593 		    REFLINK_P_MAY_UPDATE_OPTIONS(bkey_s_c_to_reflink_p(k).v)) {
594 			struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
595 			s64 offset_into_extent	= 0;
596 
597 			bch2_trans_iter_exit(trans, &reflink_iter);
598 			k = bch2_lookup_indirect_extent(trans, &reflink_iter, &offset_into_extent, p, true, 0);
599 			ret = bkey_err(k);
600 			if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
601 				continue;
602 			if (ret)
603 				break;
604 
605 			if (bkey_deleted(k.k))
606 				goto next_nondata;
607 
608 			/*
609 			 * XXX: reflink pointers may point to multiple indirect
610 			 * extents, so don't advance past the entire reflink
611 			 * pointer - need to fixup iter->k
612 			 */
613 			extent_iter = &reflink_iter;
614 			offset_into_extent = 0;
615 		}
616 
617 		if (!bkey_extent_is_direct_data(k.k))
618 			goto next_nondata;
619 
620 		io_opts = bch2_move_get_io_opts(trans, &snapshot_io_opts,
621 						iter.pos, extent_iter, k);
622 		ret = PTR_ERR_OR_ZERO(io_opts);
623 		if (ret)
624 			continue;
625 
626 		memset(&data_opts, 0, sizeof(data_opts));
627 		if (!pred(c, arg, k, io_opts, &data_opts))
628 			goto next;
629 
630 		/*
631 		 * The iterator gets unlocked by __bch2_read_extent - need to
632 		 * save a copy of @k elsewhere:
633 		 */
634 		bch2_bkey_buf_reassemble(&sk, c, k);
635 		k = bkey_i_to_s_c(sk.k);
636 
637 		ret2 = bch2_move_extent(ctxt, NULL, extent_iter, k, *io_opts, data_opts);
638 		if (ret2) {
639 			if (bch2_err_matches(ret2, BCH_ERR_transaction_restart))
640 				continue;
641 
642 			if (bch2_err_matches(ret2, ENOMEM)) {
643 				/* memory allocation failure, wait for some IO to finish */
644 				bch2_move_ctxt_wait_for_io(ctxt);
645 				continue;
646 			}
647 
648 			/* XXX signal failure */
649 			goto next;
650 		}
651 next:
652 		if (ctxt->stats)
653 			atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
654 next_nondata:
655 		bch2_btree_iter_advance(&iter);
656 	}
657 
658 	bch2_trans_iter_exit(trans, &reflink_iter);
659 	bch2_trans_iter_exit(trans, &iter);
660 	bch2_bkey_buf_exit(&sk, c);
661 	per_snapshot_io_opts_exit(&snapshot_io_opts);
662 
663 	return ret;
664 }
665 
__bch2_move_data(struct moving_context * ctxt,struct bbpos start,struct bbpos end,move_pred_fn pred,void * arg)666 int __bch2_move_data(struct moving_context *ctxt,
667 		     struct bbpos start,
668 		     struct bbpos end,
669 		     move_pred_fn pred, void *arg)
670 {
671 	struct bch_fs *c = ctxt->trans->c;
672 	enum btree_id id;
673 	int ret = 0;
674 
675 	for (id = start.btree;
676 	     id <= min_t(unsigned, end.btree, btree_id_nr_alive(c) - 1);
677 	     id++) {
678 		ctxt->stats->pos = BBPOS(id, POS_MIN);
679 
680 		if (!btree_type_has_ptrs(id) ||
681 		    !bch2_btree_id_root(c, id)->b)
682 			continue;
683 
684 		ret = bch2_move_data_btree(ctxt,
685 				       id == start.btree ? start.pos : POS_MIN,
686 				       id == end.btree   ? end.pos   : POS_MAX,
687 				       pred, arg, id);
688 		if (ret)
689 			break;
690 	}
691 
692 	return ret;
693 }
694 
bch2_move_data(struct bch_fs * c,struct bbpos start,struct bbpos end,struct bch_ratelimit * rate,struct bch_move_stats * stats,struct write_point_specifier wp,bool wait_on_copygc,move_pred_fn pred,void * arg)695 int bch2_move_data(struct bch_fs *c,
696 		   struct bbpos start,
697 		   struct bbpos end,
698 		   struct bch_ratelimit *rate,
699 		   struct bch_move_stats *stats,
700 		   struct write_point_specifier wp,
701 		   bool wait_on_copygc,
702 		   move_pred_fn pred, void *arg)
703 {
704 	struct moving_context ctxt;
705 
706 	bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc);
707 	int ret = __bch2_move_data(&ctxt, start, end, pred, arg);
708 	bch2_moving_ctxt_exit(&ctxt);
709 
710 	return ret;
711 }
712 
__bch2_move_data_phys(struct moving_context * ctxt,struct move_bucket_in_flight * bucket_in_flight,unsigned dev,u64 bucket_start,u64 bucket_end,unsigned data_types,move_pred_fn pred,void * arg)713 static int __bch2_move_data_phys(struct moving_context *ctxt,
714 			struct move_bucket_in_flight *bucket_in_flight,
715 			unsigned dev,
716 			u64 bucket_start,
717 			u64 bucket_end,
718 			unsigned data_types,
719 			move_pred_fn pred, void *arg)
720 {
721 	struct btree_trans *trans = ctxt->trans;
722 	struct bch_fs *c = trans->c;
723 	bool is_kthread = current->flags & PF_KTHREAD;
724 	struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
725 	struct btree_iter iter = {}, bp_iter = {};
726 	struct bkey_buf sk;
727 	struct bkey_s_c k;
728 	struct bkey_buf last_flushed;
729 	int ret = 0;
730 
731 	struct bch_dev *ca = bch2_dev_tryget(c, dev);
732 	if (!ca)
733 		return 0;
734 
735 	bucket_end = min(bucket_end, ca->mi.nbuckets);
736 
737 	struct bpos bp_start	= bucket_pos_to_bp_start(ca, POS(dev, bucket_start));
738 	struct bpos bp_end	= bucket_pos_to_bp_end(ca, POS(dev, bucket_end));
739 	bch2_dev_put(ca);
740 	ca = NULL;
741 
742 	bch2_bkey_buf_init(&last_flushed);
743 	bkey_init(&last_flushed.k->k);
744 	bch2_bkey_buf_init(&sk);
745 
746 	/*
747 	 * We're not run in a context that handles transaction restarts:
748 	 */
749 	bch2_trans_begin(trans);
750 
751 	bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers, bp_start, 0);
752 
753 	bch_err_msg(c, ret, "looking up alloc key");
754 	if (ret)
755 		goto err;
756 
757 	ret = bch2_btree_write_buffer_tryflush(trans);
758 	bch_err_msg(c, ret, "flushing btree write buffer");
759 	if (ret)
760 		goto err;
761 
762 	while (!(ret = bch2_move_ratelimit(ctxt))) {
763 		if (is_kthread && kthread_should_stop())
764 			break;
765 
766 		bch2_trans_begin(trans);
767 
768 		k = bch2_btree_iter_peek(&bp_iter);
769 		ret = bkey_err(k);
770 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
771 			continue;
772 		if (ret)
773 			goto err;
774 
775 		if (!k.k || bkey_gt(k.k->p, bp_end))
776 			break;
777 
778 		if (k.k->type != KEY_TYPE_backpointer)
779 			goto next;
780 
781 		struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
782 
783 		if (ctxt->stats)
784 			ctxt->stats->offset = bp.k->p.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT;
785 
786 		if (!(data_types & BIT(bp.v->data_type)))
787 			goto next;
788 
789 		if (!bp.v->level && bp.v->btree_id == BTREE_ID_stripes)
790 			goto next;
791 
792 		k = bch2_backpointer_get_key(trans, bp, &iter, 0, &last_flushed);
793 		ret = bkey_err(k);
794 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
795 			continue;
796 		if (ret)
797 			goto err;
798 		if (!k.k)
799 			goto next;
800 
801 		if (!bp.v->level) {
802 			ret = bch2_move_get_io_opts_one(trans, &io_opts, &iter, k);
803 			if (ret) {
804 				bch2_trans_iter_exit(trans, &iter);
805 				continue;
806 			}
807 		}
808 
809 		struct data_update_opts data_opts = {};
810 		if (!pred(c, arg, k, &io_opts, &data_opts)) {
811 			bch2_trans_iter_exit(trans, &iter);
812 			goto next;
813 		}
814 
815 		if (data_opts.scrub &&
816 		    !bch2_dev_idx_is_online(c, data_opts.read_dev)) {
817 			bch2_trans_iter_exit(trans, &iter);
818 			ret = -BCH_ERR_device_offline;
819 			break;
820 		}
821 
822 		bch2_bkey_buf_reassemble(&sk, c, k);
823 		k = bkey_i_to_s_c(sk.k);
824 
825 		/* move_extent will drop locks */
826 		unsigned sectors = bp.v->bucket_len;
827 
828 		if (!bp.v->level)
829 			ret = bch2_move_extent(ctxt, bucket_in_flight, &iter, k, io_opts, data_opts);
830 		else if (!data_opts.scrub)
831 			ret = bch2_btree_node_rewrite_pos(trans, bp.v->btree_id, bp.v->level, k.k->p, 0);
832 		else
833 			ret = bch2_btree_node_scrub(trans, bp.v->btree_id, bp.v->level, k, data_opts.read_dev);
834 
835 		bch2_trans_iter_exit(trans, &iter);
836 
837 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
838 			continue;
839 		if (ret == -ENOMEM) {
840 			/* memory allocation failure, wait for some IO to finish */
841 			bch2_move_ctxt_wait_for_io(ctxt);
842 			continue;
843 		}
844 		if (ret)
845 			goto err;
846 
847 		if (ctxt->stats)
848 			atomic64_add(sectors, &ctxt->stats->sectors_seen);
849 next:
850 		bch2_btree_iter_advance(&bp_iter);
851 	}
852 err:
853 	bch2_trans_iter_exit(trans, &bp_iter);
854 	bch2_bkey_buf_exit(&sk, c);
855 	bch2_bkey_buf_exit(&last_flushed, c);
856 	return ret;
857 }
858 
bch2_move_data_phys(struct bch_fs * c,unsigned dev,u64 start,u64 end,unsigned data_types,struct bch_ratelimit * rate,struct bch_move_stats * stats,struct write_point_specifier wp,bool wait_on_copygc,move_pred_fn pred,void * arg)859 static int bch2_move_data_phys(struct bch_fs *c,
860 			       unsigned dev,
861 			       u64 start,
862 			       u64 end,
863 			       unsigned data_types,
864 			       struct bch_ratelimit *rate,
865 			       struct bch_move_stats *stats,
866 			       struct write_point_specifier wp,
867 			       bool wait_on_copygc,
868 			       move_pred_fn pred, void *arg)
869 {
870 	struct moving_context ctxt;
871 
872 	bch2_trans_run(c, bch2_btree_write_buffer_flush_sync(trans));
873 
874 	bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc);
875 	ctxt.stats->phys = true;
876 	ctxt.stats->data_type = (int) DATA_PROGRESS_DATA_TYPE_phys;
877 
878 	int ret = __bch2_move_data_phys(&ctxt, NULL, dev, start, end, data_types, pred, arg);
879 	bch2_moving_ctxt_exit(&ctxt);
880 
881 	return ret;
882 }
883 
884 struct evacuate_bucket_arg {
885 	struct bpos		bucket;
886 	int			gen;
887 	struct data_update_opts	data_opts;
888 };
889 
evacuate_bucket_pred(struct bch_fs * c,void * _arg,struct bkey_s_c k,struct bch_io_opts * io_opts,struct data_update_opts * data_opts)890 static bool evacuate_bucket_pred(struct bch_fs *c, void *_arg, struct bkey_s_c k,
891 				 struct bch_io_opts *io_opts,
892 				 struct data_update_opts *data_opts)
893 {
894 	struct evacuate_bucket_arg *arg = _arg;
895 
896 	*data_opts = arg->data_opts;
897 
898 	unsigned i = 0;
899 	bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) {
900 		if (ptr->dev == arg->bucket.inode &&
901 		    (arg->gen < 0 || arg->gen == ptr->gen) &&
902 		    !ptr->cached)
903 			data_opts->rewrite_ptrs |= BIT(i);
904 		i++;
905 	}
906 
907 	return data_opts->rewrite_ptrs != 0;
908 }
909 
bch2_evacuate_bucket(struct moving_context * ctxt,struct move_bucket_in_flight * bucket_in_flight,struct bpos bucket,int gen,struct data_update_opts data_opts)910 int bch2_evacuate_bucket(struct moving_context *ctxt,
911 			   struct move_bucket_in_flight *bucket_in_flight,
912 			   struct bpos bucket, int gen,
913 			   struct data_update_opts data_opts)
914 {
915 	struct evacuate_bucket_arg arg = { bucket, gen, data_opts, };
916 
917 	return __bch2_move_data_phys(ctxt, bucket_in_flight,
918 				   bucket.inode,
919 				   bucket.offset,
920 				   bucket.offset + 1,
921 				   ~0,
922 				   evacuate_bucket_pred, &arg);
923 }
924 
925 typedef bool (*move_btree_pred)(struct bch_fs *, void *,
926 				struct btree *, struct bch_io_opts *,
927 				struct data_update_opts *);
928 
bch2_move_btree(struct bch_fs * c,struct bbpos start,struct bbpos end,move_btree_pred pred,void * arg,struct bch_move_stats * stats)929 static int bch2_move_btree(struct bch_fs *c,
930 			   struct bbpos start,
931 			   struct bbpos end,
932 			   move_btree_pred pred, void *arg,
933 			   struct bch_move_stats *stats)
934 {
935 	bool kthread = (current->flags & PF_KTHREAD) != 0;
936 	struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
937 	struct moving_context ctxt;
938 	struct btree_trans *trans;
939 	struct btree_iter iter;
940 	struct btree *b;
941 	enum btree_id btree;
942 	struct data_update_opts data_opts;
943 	int ret = 0;
944 
945 	bch2_moving_ctxt_init(&ctxt, c, NULL, stats,
946 			      writepoint_ptr(&c->btree_write_point),
947 			      true);
948 	trans = ctxt.trans;
949 
950 	stats->data_type = BCH_DATA_btree;
951 
952 	for (btree = start.btree;
953 	     btree <= min_t(unsigned, end.btree, btree_id_nr_alive(c) - 1);
954 	     btree ++) {
955 		stats->pos = BBPOS(btree, POS_MIN);
956 
957 		if (!bch2_btree_id_root(c, btree)->b)
958 			continue;
959 
960 		bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN, 0, 0,
961 					  BTREE_ITER_prefetch);
962 retry:
963 		ret = 0;
964 		while (bch2_trans_begin(trans),
965 		       (b = bch2_btree_iter_peek_node(&iter)) &&
966 		       !(ret = PTR_ERR_OR_ZERO(b))) {
967 			if (kthread && kthread_should_stop())
968 				break;
969 
970 			if ((cmp_int(btree, end.btree) ?:
971 			     bpos_cmp(b->key.k.p, end.pos)) > 0)
972 				break;
973 
974 			stats->pos = BBPOS(iter.btree_id, iter.pos);
975 
976 			if (!pred(c, arg, b, &io_opts, &data_opts))
977 				goto next;
978 
979 			ret = bch2_btree_node_rewrite(trans, &iter, b, 0) ?: ret;
980 			if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
981 				continue;
982 			if (ret)
983 				break;
984 next:
985 			bch2_btree_iter_next_node(&iter);
986 		}
987 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
988 			goto retry;
989 
990 		bch2_trans_iter_exit(trans, &iter);
991 
992 		if (kthread && kthread_should_stop())
993 			break;
994 	}
995 
996 	bch_err_fn(c, ret);
997 	bch2_moving_ctxt_exit(&ctxt);
998 	bch2_btree_interior_updates_flush(c);
999 
1000 	return ret;
1001 }
1002 
rereplicate_pred(struct bch_fs * c,void * arg,struct bkey_s_c k,struct bch_io_opts * io_opts,struct data_update_opts * data_opts)1003 static bool rereplicate_pred(struct bch_fs *c, void *arg,
1004 			     struct bkey_s_c k,
1005 			     struct bch_io_opts *io_opts,
1006 			     struct data_update_opts *data_opts)
1007 {
1008 	unsigned nr_good = bch2_bkey_durability(c, k);
1009 	unsigned replicas = bkey_is_btree_ptr(k.k)
1010 		? c->opts.metadata_replicas
1011 		: io_opts->data_replicas;
1012 
1013 	rcu_read_lock();
1014 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1015 	unsigned i = 0;
1016 	bkey_for_each_ptr(ptrs, ptr) {
1017 		struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
1018 		if (!ptr->cached &&
1019 		    (!ca || !ca->mi.durability))
1020 			data_opts->kill_ptrs |= BIT(i);
1021 		i++;
1022 	}
1023 	rcu_read_unlock();
1024 
1025 	if (!data_opts->kill_ptrs &&
1026 	    (!nr_good || nr_good >= replicas))
1027 		return false;
1028 
1029 	data_opts->target		= 0;
1030 	data_opts->extra_replicas	= replicas - nr_good;
1031 	data_opts->btree_insert_flags	= 0;
1032 	return true;
1033 }
1034 
migrate_pred(struct bch_fs * c,void * arg,struct bkey_s_c k,struct bch_io_opts * io_opts,struct data_update_opts * data_opts)1035 static bool migrate_pred(struct bch_fs *c, void *arg,
1036 			 struct bkey_s_c k,
1037 			 struct bch_io_opts *io_opts,
1038 			 struct data_update_opts *data_opts)
1039 {
1040 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1041 	struct bch_ioctl_data *op = arg;
1042 	unsigned i = 0;
1043 
1044 	data_opts->rewrite_ptrs		= 0;
1045 	data_opts->target		= 0;
1046 	data_opts->extra_replicas	= 0;
1047 	data_opts->btree_insert_flags	= 0;
1048 
1049 	bkey_for_each_ptr(ptrs, ptr) {
1050 		if (ptr->dev == op->migrate.dev)
1051 			data_opts->rewrite_ptrs |= 1U << i;
1052 		i++;
1053 	}
1054 
1055 	return data_opts->rewrite_ptrs != 0;
1056 }
1057 
rereplicate_btree_pred(struct bch_fs * c,void * arg,struct btree * b,struct bch_io_opts * io_opts,struct data_update_opts * data_opts)1058 static bool rereplicate_btree_pred(struct bch_fs *c, void *arg,
1059 				   struct btree *b,
1060 				   struct bch_io_opts *io_opts,
1061 				   struct data_update_opts *data_opts)
1062 {
1063 	return rereplicate_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts);
1064 }
1065 
1066 /*
1067  * Ancient versions of bcachefs produced packed formats which could represent
1068  * keys that the in memory format cannot represent; this checks for those
1069  * formats so we can get rid of them.
1070  */
bformat_needs_redo(struct bkey_format * f)1071 static bool bformat_needs_redo(struct bkey_format *f)
1072 {
1073 	for (unsigned i = 0; i < f->nr_fields; i++)
1074 		if (bch2_bkey_format_field_overflows(f, i))
1075 			return true;
1076 
1077 	return false;
1078 }
1079 
rewrite_old_nodes_pred(struct bch_fs * c,void * arg,struct btree * b,struct bch_io_opts * io_opts,struct data_update_opts * data_opts)1080 static bool rewrite_old_nodes_pred(struct bch_fs *c, void *arg,
1081 				   struct btree *b,
1082 				   struct bch_io_opts *io_opts,
1083 				   struct data_update_opts *data_opts)
1084 {
1085 	if (b->version_ondisk != c->sb.version ||
1086 	    btree_node_need_rewrite(b) ||
1087 	    bformat_needs_redo(&b->format)) {
1088 		data_opts->target		= 0;
1089 		data_opts->extra_replicas	= 0;
1090 		data_opts->btree_insert_flags	= 0;
1091 		return true;
1092 	}
1093 
1094 	return false;
1095 }
1096 
bch2_scan_old_btree_nodes(struct bch_fs * c,struct bch_move_stats * stats)1097 int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats)
1098 {
1099 	int ret;
1100 
1101 	ret = bch2_move_btree(c,
1102 			      BBPOS_MIN,
1103 			      BBPOS_MAX,
1104 			      rewrite_old_nodes_pred, c, stats);
1105 	if (!ret) {
1106 		mutex_lock(&c->sb_lock);
1107 		c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
1108 		c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
1109 		c->disk_sb.sb->version_min = c->disk_sb.sb->version;
1110 		bch2_write_super(c);
1111 		mutex_unlock(&c->sb_lock);
1112 	}
1113 
1114 	bch_err_fn(c, ret);
1115 	return ret;
1116 }
1117 
drop_extra_replicas_pred(struct bch_fs * c,void * arg,struct bkey_s_c k,struct bch_io_opts * io_opts,struct data_update_opts * data_opts)1118 static bool drop_extra_replicas_pred(struct bch_fs *c, void *arg,
1119 			     struct bkey_s_c k,
1120 			     struct bch_io_opts *io_opts,
1121 			     struct data_update_opts *data_opts)
1122 {
1123 	unsigned durability = bch2_bkey_durability(c, k);
1124 	unsigned replicas = bkey_is_btree_ptr(k.k)
1125 		? c->opts.metadata_replicas
1126 		: io_opts->data_replicas;
1127 	const union bch_extent_entry *entry;
1128 	struct extent_ptr_decoded p;
1129 	unsigned i = 0;
1130 
1131 	rcu_read_lock();
1132 	bkey_for_each_ptr_decode(k.k, bch2_bkey_ptrs_c(k), p, entry) {
1133 		unsigned d = bch2_extent_ptr_durability(c, &p);
1134 
1135 		if (d && durability - d >= replicas) {
1136 			data_opts->kill_ptrs |= BIT(i);
1137 			durability -= d;
1138 		}
1139 
1140 		i++;
1141 	}
1142 	rcu_read_unlock();
1143 
1144 	return data_opts->kill_ptrs != 0;
1145 }
1146 
drop_extra_replicas_btree_pred(struct bch_fs * c,void * arg,struct btree * b,struct bch_io_opts * io_opts,struct data_update_opts * data_opts)1147 static bool drop_extra_replicas_btree_pred(struct bch_fs *c, void *arg,
1148 				   struct btree *b,
1149 				   struct bch_io_opts *io_opts,
1150 				   struct data_update_opts *data_opts)
1151 {
1152 	return drop_extra_replicas_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts);
1153 }
1154 
scrub_pred(struct bch_fs * c,void * _arg,struct bkey_s_c k,struct bch_io_opts * io_opts,struct data_update_opts * data_opts)1155 static bool scrub_pred(struct bch_fs *c, void *_arg,
1156 		       struct bkey_s_c k,
1157 		       struct bch_io_opts *io_opts,
1158 		       struct data_update_opts *data_opts)
1159 {
1160 	struct bch_ioctl_data *arg = _arg;
1161 
1162 	if (k.k->type != KEY_TYPE_btree_ptr_v2) {
1163 		struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1164 		const union bch_extent_entry *entry;
1165 		struct extent_ptr_decoded p;
1166 		bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
1167 			if (p.ptr.dev == arg->migrate.dev) {
1168 				if (!p.crc.csum_type)
1169 					return false;
1170 				break;
1171 			}
1172 	}
1173 
1174 	data_opts->scrub	= true;
1175 	data_opts->read_dev	= arg->migrate.dev;
1176 	return true;
1177 }
1178 
bch2_data_job(struct bch_fs * c,struct bch_move_stats * stats,struct bch_ioctl_data op)1179 int bch2_data_job(struct bch_fs *c,
1180 		  struct bch_move_stats *stats,
1181 		  struct bch_ioctl_data op)
1182 {
1183 	struct bbpos start	= BBPOS(op.start_btree, op.start_pos);
1184 	struct bbpos end	= BBPOS(op.end_btree, op.end_pos);
1185 	int ret = 0;
1186 
1187 	if (op.op >= BCH_DATA_OP_NR)
1188 		return -EINVAL;
1189 
1190 	bch2_move_stats_init(stats, bch2_data_ops_strs[op.op]);
1191 
1192 	switch (op.op) {
1193 	case BCH_DATA_OP_scrub:
1194 		/*
1195 		 * prevent tests from spuriously failing, make sure we see all
1196 		 * btree nodes that need to be repaired
1197 		 */
1198 		bch2_btree_interior_updates_flush(c);
1199 
1200 		ret = bch2_move_data_phys(c, op.scrub.dev, 0, U64_MAX,
1201 					  op.scrub.data_types,
1202 					  NULL,
1203 					  stats,
1204 					  writepoint_hashed((unsigned long) current),
1205 					  false,
1206 					  scrub_pred, &op) ?: ret;
1207 		break;
1208 
1209 	case BCH_DATA_OP_rereplicate:
1210 		stats->data_type = BCH_DATA_journal;
1211 		ret = bch2_journal_flush_device_pins(&c->journal, -1);
1212 		ret = bch2_move_btree(c, start, end,
1213 				      rereplicate_btree_pred, c, stats) ?: ret;
1214 		ret = bch2_move_data(c, start, end,
1215 				     NULL,
1216 				     stats,
1217 				     writepoint_hashed((unsigned long) current),
1218 				     true,
1219 				     rereplicate_pred, c) ?: ret;
1220 		ret = bch2_replicas_gc2(c) ?: ret;
1221 		break;
1222 	case BCH_DATA_OP_migrate:
1223 		if (op.migrate.dev >= c->sb.nr_devices)
1224 			return -EINVAL;
1225 
1226 		stats->data_type = BCH_DATA_journal;
1227 		ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev);
1228 		ret = bch2_move_data_phys(c, op.migrate.dev, 0, U64_MAX,
1229 					  ~0,
1230 					  NULL,
1231 					  stats,
1232 					  writepoint_hashed((unsigned long) current),
1233 					  true,
1234 					  migrate_pred, &op) ?: ret;
1235 		bch2_btree_interior_updates_flush(c);
1236 		ret = bch2_replicas_gc2(c) ?: ret;
1237 		break;
1238 	case BCH_DATA_OP_rewrite_old_nodes:
1239 		ret = bch2_scan_old_btree_nodes(c, stats);
1240 		break;
1241 	case BCH_DATA_OP_drop_extra_replicas:
1242 		ret = bch2_move_btree(c, start, end,
1243 				drop_extra_replicas_btree_pred, c, stats) ?: ret;
1244 		ret = bch2_move_data(c, start, end, NULL, stats,
1245 				writepoint_hashed((unsigned long) current),
1246 				true,
1247 				drop_extra_replicas_pred, c) ?: ret;
1248 		ret = bch2_replicas_gc2(c) ?: ret;
1249 		break;
1250 	default:
1251 		ret = -EINVAL;
1252 	}
1253 
1254 	bch2_move_stats_exit(stats, c);
1255 	return ret;
1256 }
1257 
bch2_move_stats_to_text(struct printbuf * out,struct bch_move_stats * stats)1258 void bch2_move_stats_to_text(struct printbuf *out, struct bch_move_stats *stats)
1259 {
1260 	prt_printf(out, "%s: data type==", stats->name);
1261 	bch2_prt_data_type(out, stats->data_type);
1262 	prt_str(out, " pos=");
1263 	bch2_bbpos_to_text(out, stats->pos);
1264 	prt_newline(out);
1265 	printbuf_indent_add(out, 2);
1266 
1267 	prt_printf(out, "keys moved:\t%llu\n",	atomic64_read(&stats->keys_moved));
1268 	prt_printf(out, "keys raced:\t%llu\n",	atomic64_read(&stats->keys_raced));
1269 	prt_printf(out, "bytes seen:\t");
1270 	prt_human_readable_u64(out, atomic64_read(&stats->sectors_seen) << 9);
1271 	prt_newline(out);
1272 
1273 	prt_printf(out, "bytes moved:\t");
1274 	prt_human_readable_u64(out, atomic64_read(&stats->sectors_moved) << 9);
1275 	prt_newline(out);
1276 
1277 	prt_printf(out, "bytes raced:\t");
1278 	prt_human_readable_u64(out, atomic64_read(&stats->sectors_raced) << 9);
1279 	prt_newline(out);
1280 
1281 	printbuf_indent_sub(out, 2);
1282 }
1283 
bch2_moving_ctxt_to_text(struct printbuf * out,struct bch_fs * c,struct moving_context * ctxt)1284 static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, struct moving_context *ctxt)
1285 {
1286 	if (!out->nr_tabstops)
1287 		printbuf_tabstop_push(out, 32);
1288 
1289 	bch2_move_stats_to_text(out, ctxt->stats);
1290 	printbuf_indent_add(out, 2);
1291 
1292 	prt_printf(out, "reads: ios %u/%u sectors %u/%u\n",
1293 		   atomic_read(&ctxt->read_ios),
1294 		   c->opts.move_ios_in_flight,
1295 		   atomic_read(&ctxt->read_sectors),
1296 		   c->opts.move_bytes_in_flight >> 9);
1297 
1298 	prt_printf(out, "writes: ios %u/%u sectors %u/%u\n",
1299 		   atomic_read(&ctxt->write_ios),
1300 		   c->opts.move_ios_in_flight,
1301 		   atomic_read(&ctxt->write_sectors),
1302 		   c->opts.move_bytes_in_flight >> 9);
1303 
1304 	printbuf_indent_add(out, 2);
1305 
1306 	mutex_lock(&ctxt->lock);
1307 	struct moving_io *io;
1308 	list_for_each_entry(io, &ctxt->ios, io_list)
1309 		bch2_data_update_inflight_to_text(out, &io->write);
1310 	mutex_unlock(&ctxt->lock);
1311 
1312 	printbuf_indent_sub(out, 4);
1313 }
1314 
bch2_fs_moving_ctxts_to_text(struct printbuf * out,struct bch_fs * c)1315 void bch2_fs_moving_ctxts_to_text(struct printbuf *out, struct bch_fs *c)
1316 {
1317 	struct moving_context *ctxt;
1318 
1319 	mutex_lock(&c->moving_context_lock);
1320 	list_for_each_entry(ctxt, &c->moving_context_list, list)
1321 		bch2_moving_ctxt_to_text(out, c, ctxt);
1322 	mutex_unlock(&c->moving_context_lock);
1323 }
1324 
bch2_fs_move_init(struct bch_fs * c)1325 void bch2_fs_move_init(struct bch_fs *c)
1326 {
1327 	INIT_LIST_HEAD(&c->moving_context_list);
1328 	mutex_init(&c->moving_context_lock);
1329 }
1330