xref: /linux/fs/bcachefs/move.c (revision 1cbfb828e05171ca2dd77b5988d068e6872480fe)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "alloc_background.h"
5 #include "alloc_foreground.h"
6 #include "backpointers.h"
7 #include "bkey_buf.h"
8 #include "btree_gc.h"
9 #include "btree_io.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
12 #include "btree_write_buffer.h"
13 #include "compress.h"
14 #include "disk_groups.h"
15 #include "ec.h"
16 #include "errcode.h"
17 #include "error.h"
18 #include "inode.h"
19 #include "io_read.h"
20 #include "io_write.h"
21 #include "journal_reclaim.h"
22 #include "keylist.h"
23 #include "move.h"
24 #include "rebalance.h"
25 #include "reflink.h"
26 #include "replicas.h"
27 #include "snapshot.h"
28 #include "super-io.h"
29 #include "trace.h"
30 
31 #include <linux/ioprio.h>
32 #include <linux/kthread.h>
33 
34 const char * const bch2_data_ops_strs[] = {
35 #define x(t, n, ...) [n] = #t,
36 	BCH_DATA_OPS()
37 #undef x
38 	NULL
39 };
40 
41 static void trace_move_extent2(struct bch_fs *c, struct bkey_s_c k,
42 			       struct bch_io_opts *io_opts,
43 			       struct data_update_opts *data_opts)
44 {
45 	if (trace_move_extent_enabled()) {
46 		struct printbuf buf = PRINTBUF;
47 
48 		bch2_bkey_val_to_text(&buf, c, k);
49 		prt_newline(&buf);
50 		bch2_data_update_opts_to_text(&buf, c, io_opts, data_opts);
51 		trace_move_extent(c, buf.buf);
52 		printbuf_exit(&buf);
53 	}
54 }
55 
56 static void trace_move_extent_read2(struct bch_fs *c, struct bkey_s_c k)
57 {
58 	if (trace_move_extent_read_enabled()) {
59 		struct printbuf buf = PRINTBUF;
60 
61 		bch2_bkey_val_to_text(&buf, c, k);
62 		trace_move_extent_read(c, buf.buf);
63 		printbuf_exit(&buf);
64 	}
65 }
66 
67 struct moving_io {
68 	struct list_head		read_list;
69 	struct list_head		io_list;
70 	struct move_bucket_in_flight	*b;
71 	struct closure			cl;
72 	bool				read_completed;
73 
74 	unsigned			read_sectors;
75 	unsigned			write_sectors;
76 
77 	struct bch_read_bio		rbio;
78 
79 	struct data_update		write;
80 	/* Must be last since it is variable size */
81 	struct bio_vec			bi_inline_vecs[];
82 };
83 
84 static void move_free(struct moving_io *io)
85 {
86 	struct moving_context *ctxt = io->write.ctxt;
87 
88 	if (io->b)
89 		atomic_dec(&io->b->count);
90 
91 	bch2_data_update_exit(&io->write);
92 
93 	mutex_lock(&ctxt->lock);
94 	list_del(&io->io_list);
95 	wake_up(&ctxt->wait);
96 	mutex_unlock(&ctxt->lock);
97 
98 	kfree(io);
99 }
100 
101 static void move_write_done(struct bch_write_op *op)
102 {
103 	struct moving_io *io = container_of(op, struct moving_io, write.op);
104 	struct moving_context *ctxt = io->write.ctxt;
105 
106 	if (io->write.op.error)
107 		ctxt->write_error = true;
108 
109 	atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors);
110 	atomic_dec(&io->write.ctxt->write_ios);
111 	move_free(io);
112 	closure_put(&ctxt->cl);
113 }
114 
115 static void move_write(struct moving_io *io)
116 {
117 	if (unlikely(io->rbio.bio.bi_status || io->rbio.hole)) {
118 		move_free(io);
119 		return;
120 	}
121 
122 	if (trace_move_extent_write_enabled()) {
123 		struct bch_fs *c = io->write.op.c;
124 		struct printbuf buf = PRINTBUF;
125 
126 		bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(io->write.k.k));
127 		trace_move_extent_write(c, buf.buf);
128 		printbuf_exit(&buf);
129 	}
130 
131 	closure_get(&io->write.ctxt->cl);
132 	atomic_add(io->write_sectors, &io->write.ctxt->write_sectors);
133 	atomic_inc(&io->write.ctxt->write_ios);
134 
135 	bch2_data_update_read_done(&io->write, io->rbio.pick.crc);
136 }
137 
138 struct moving_io *bch2_moving_ctxt_next_pending_write(struct moving_context *ctxt)
139 {
140 	struct moving_io *io =
141 		list_first_entry_or_null(&ctxt->reads, struct moving_io, read_list);
142 
143 	return io && io->read_completed ? io : NULL;
144 }
145 
146 static void move_read_endio(struct bio *bio)
147 {
148 	struct moving_io *io = container_of(bio, struct moving_io, rbio.bio);
149 	struct moving_context *ctxt = io->write.ctxt;
150 
151 	atomic_sub(io->read_sectors, &ctxt->read_sectors);
152 	atomic_dec(&ctxt->read_ios);
153 	io->read_completed = true;
154 
155 	wake_up(&ctxt->wait);
156 	closure_put(&ctxt->cl);
157 }
158 
159 void bch2_moving_ctxt_do_pending_writes(struct moving_context *ctxt)
160 {
161 	struct moving_io *io;
162 
163 	while ((io = bch2_moving_ctxt_next_pending_write(ctxt))) {
164 		bch2_trans_unlock_long(ctxt->trans);
165 		list_del(&io->read_list);
166 		move_write(io);
167 	}
168 }
169 
170 void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt)
171 {
172 	unsigned sectors_pending = atomic_read(&ctxt->write_sectors);
173 
174 	move_ctxt_wait_event(ctxt,
175 		!atomic_read(&ctxt->write_sectors) ||
176 		atomic_read(&ctxt->write_sectors) != sectors_pending);
177 }
178 
179 void bch2_moving_ctxt_flush_all(struct moving_context *ctxt)
180 {
181 	move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads));
182 	bch2_trans_unlock_long(ctxt->trans);
183 	closure_sync(&ctxt->cl);
184 }
185 
186 void bch2_moving_ctxt_exit(struct moving_context *ctxt)
187 {
188 	struct bch_fs *c = ctxt->trans->c;
189 
190 	bch2_moving_ctxt_flush_all(ctxt);
191 
192 	EBUG_ON(atomic_read(&ctxt->write_sectors));
193 	EBUG_ON(atomic_read(&ctxt->write_ios));
194 	EBUG_ON(atomic_read(&ctxt->read_sectors));
195 	EBUG_ON(atomic_read(&ctxt->read_ios));
196 
197 	mutex_lock(&c->moving_context_lock);
198 	list_del(&ctxt->list);
199 	mutex_unlock(&c->moving_context_lock);
200 
201 	/*
202 	 * Generally, releasing a transaction within a transaction restart means
203 	 * an unhandled transaction restart: but this can happen legitimately
204 	 * within the move code, e.g. when bch2_move_ratelimit() tells us to
205 	 * exit before we've retried
206 	 */
207 	bch2_trans_begin(ctxt->trans);
208 	bch2_trans_put(ctxt->trans);
209 	memset(ctxt, 0, sizeof(*ctxt));
210 }
211 
212 void bch2_moving_ctxt_init(struct moving_context *ctxt,
213 			   struct bch_fs *c,
214 			   struct bch_ratelimit *rate,
215 			   struct bch_move_stats *stats,
216 			   struct write_point_specifier wp,
217 			   bool wait_on_copygc)
218 {
219 	memset(ctxt, 0, sizeof(*ctxt));
220 
221 	ctxt->trans	= bch2_trans_get(c);
222 	ctxt->fn	= (void *) _RET_IP_;
223 	ctxt->rate	= rate;
224 	ctxt->stats	= stats;
225 	ctxt->wp	= wp;
226 	ctxt->wait_on_copygc = wait_on_copygc;
227 
228 	closure_init_stack(&ctxt->cl);
229 
230 	mutex_init(&ctxt->lock);
231 	INIT_LIST_HEAD(&ctxt->reads);
232 	INIT_LIST_HEAD(&ctxt->ios);
233 	init_waitqueue_head(&ctxt->wait);
234 
235 	mutex_lock(&c->moving_context_lock);
236 	list_add(&ctxt->list, &c->moving_context_list);
237 	mutex_unlock(&c->moving_context_lock);
238 }
239 
240 void bch2_move_stats_exit(struct bch_move_stats *stats, struct bch_fs *c)
241 {
242 	trace_move_data(c, stats);
243 }
244 
245 void bch2_move_stats_init(struct bch_move_stats *stats, const char *name)
246 {
247 	memset(stats, 0, sizeof(*stats));
248 	stats->data_type = BCH_DATA_user;
249 	scnprintf(stats->name, sizeof(stats->name), "%s", name);
250 }
251 
252 int bch2_move_extent(struct moving_context *ctxt,
253 		     struct move_bucket_in_flight *bucket_in_flight,
254 		     struct btree_iter *iter,
255 		     struct bkey_s_c k,
256 		     struct bch_io_opts io_opts,
257 		     struct data_update_opts data_opts)
258 {
259 	struct btree_trans *trans = ctxt->trans;
260 	struct bch_fs *c = trans->c;
261 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
262 	struct moving_io *io;
263 	const union bch_extent_entry *entry;
264 	struct extent_ptr_decoded p;
265 	unsigned sectors = k.k->size, pages;
266 	int ret = -ENOMEM;
267 
268 	trace_move_extent2(c, k, &io_opts, &data_opts);
269 
270 	if (ctxt->stats)
271 		ctxt->stats->pos = BBPOS(iter->btree_id, iter->pos);
272 
273 	bch2_data_update_opts_normalize(k, &data_opts);
274 
275 	if (!data_opts.rewrite_ptrs &&
276 	    !data_opts.extra_replicas) {
277 		if (data_opts.kill_ptrs)
278 			return bch2_extent_drop_ptrs(trans, iter, k, &io_opts, &data_opts);
279 		return 0;
280 	}
281 
282 	/*
283 	 * Before memory allocations & taking nocow locks in
284 	 * bch2_data_update_init():
285 	 */
286 	bch2_trans_unlock(trans);
287 
288 	/* write path might have to decompress data: */
289 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
290 		sectors = max_t(unsigned, sectors, p.crc.uncompressed_size);
291 
292 	pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
293 	io = kzalloc(sizeof(struct moving_io) +
294 		     sizeof(struct bio_vec) * pages, GFP_KERNEL);
295 	if (!io)
296 		goto err;
297 
298 	INIT_LIST_HEAD(&io->io_list);
299 	io->write.ctxt		= ctxt;
300 	io->read_sectors	= k.k->size;
301 	io->write_sectors	= k.k->size;
302 
303 	bio_init(&io->write.op.wbio.bio, NULL, io->bi_inline_vecs, pages, 0);
304 	io->write.op.wbio.bio.bi_ioprio =
305 		     IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
306 
307 	if (bch2_bio_alloc_pages(&io->write.op.wbio.bio, sectors << 9,
308 				 GFP_KERNEL))
309 		goto err_free;
310 
311 	io->rbio.c		= c;
312 	io->rbio.opts		= io_opts;
313 	bio_init(&io->rbio.bio, NULL, io->bi_inline_vecs, pages, 0);
314 	io->rbio.bio.bi_vcnt = pages;
315 	io->rbio.bio.bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
316 	io->rbio.bio.bi_iter.bi_size = sectors << 9;
317 
318 	io->rbio.bio.bi_opf		= REQ_OP_READ;
319 	io->rbio.bio.bi_iter.bi_sector	= bkey_start_offset(k.k);
320 	io->rbio.bio.bi_end_io		= move_read_endio;
321 
322 	ret = bch2_data_update_init(trans, iter, ctxt, &io->write, ctxt->wp,
323 				    io_opts, data_opts, iter->btree_id, k);
324 	if (ret)
325 		goto err_free_pages;
326 
327 	io->write.op.end_io = move_write_done;
328 
329 	if (ctxt->rate)
330 		bch2_ratelimit_increment(ctxt->rate, k.k->size);
331 
332 	if (ctxt->stats) {
333 		atomic64_inc(&ctxt->stats->keys_moved);
334 		atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
335 	}
336 
337 	if (bucket_in_flight) {
338 		io->b = bucket_in_flight;
339 		atomic_inc(&io->b->count);
340 	}
341 
342 	this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size);
343 	this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size);
344 	trace_move_extent_read2(c, k);
345 
346 	mutex_lock(&ctxt->lock);
347 	atomic_add(io->read_sectors, &ctxt->read_sectors);
348 	atomic_inc(&ctxt->read_ios);
349 
350 	list_add_tail(&io->read_list, &ctxt->reads);
351 	list_add_tail(&io->io_list, &ctxt->ios);
352 	mutex_unlock(&ctxt->lock);
353 
354 	/*
355 	 * dropped by move_read_endio() - guards against use after free of
356 	 * ctxt when doing wakeup
357 	 */
358 	closure_get(&ctxt->cl);
359 	bch2_read_extent(trans, &io->rbio,
360 			 bkey_start_pos(k.k),
361 			 iter->btree_id, k, 0,
362 			 BCH_READ_NODECODE|
363 			 BCH_READ_LAST_FRAGMENT);
364 	return 0;
365 err_free_pages:
366 	bio_free_pages(&io->write.op.wbio.bio);
367 err_free:
368 	kfree(io);
369 err:
370 	if (ret == -BCH_ERR_data_update_done)
371 		return 0;
372 
373 	if (bch2_err_matches(ret, EROFS) ||
374 	    bch2_err_matches(ret, BCH_ERR_transaction_restart))
375 		return ret;
376 
377 	count_event(c, move_extent_start_fail);
378 
379 	if (trace_move_extent_start_fail_enabled()) {
380 		struct printbuf buf = PRINTBUF;
381 
382 		bch2_bkey_val_to_text(&buf, c, k);
383 		prt_str(&buf, ": ");
384 		prt_str(&buf, bch2_err_str(ret));
385 		trace_move_extent_start_fail(c, buf.buf);
386 		printbuf_exit(&buf);
387 	}
388 	return ret;
389 }
390 
391 static struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans,
392 			  struct per_snapshot_io_opts *io_opts,
393 			  struct bpos extent_pos, /* extent_iter, extent_k may be in reflink btree */
394 			  struct btree_iter *extent_iter,
395 			  struct bkey_s_c extent_k)
396 {
397 	struct bch_fs *c = trans->c;
398 	u32 restart_count = trans->restart_count;
399 	struct bch_io_opts *opts_ret = &io_opts->fs_io_opts;
400 	int ret = 0;
401 
402 	if (extent_k.k->type == KEY_TYPE_reflink_v)
403 		goto out;
404 
405 	if (io_opts->cur_inum != extent_pos.inode) {
406 		io_opts->d.nr = 0;
407 
408 		ret = for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, extent_pos.inode),
409 					 BTREE_ITER_all_snapshots, k, ({
410 			if (k.k->p.offset != extent_pos.inode)
411 				break;
412 
413 			if (!bkey_is_inode(k.k))
414 				continue;
415 
416 			struct bch_inode_unpacked inode;
417 			_ret3 = bch2_inode_unpack(k, &inode);
418 			if (_ret3)
419 				break;
420 
421 			struct snapshot_io_opts_entry e = { .snapshot = k.k->p.snapshot };
422 			bch2_inode_opts_get(&e.io_opts, trans->c, &inode);
423 
424 			darray_push(&io_opts->d, e);
425 		}));
426 		io_opts->cur_inum = extent_pos.inode;
427 	}
428 
429 	ret = ret ?: trans_was_restarted(trans, restart_count);
430 	if (ret)
431 		return ERR_PTR(ret);
432 
433 	if (extent_k.k->p.snapshot)
434 		darray_for_each(io_opts->d, i)
435 			if (bch2_snapshot_is_ancestor(c, extent_k.k->p.snapshot, i->snapshot)) {
436 				opts_ret = &i->io_opts;
437 				break;
438 			}
439 out:
440 	ret = bch2_get_update_rebalance_opts(trans, opts_ret, extent_iter, extent_k);
441 	if (ret)
442 		return ERR_PTR(ret);
443 	return opts_ret;
444 }
445 
446 int bch2_move_get_io_opts_one(struct btree_trans *trans,
447 			      struct bch_io_opts *io_opts,
448 			      struct btree_iter *extent_iter,
449 			      struct bkey_s_c extent_k)
450 {
451 	struct bch_fs *c = trans->c;
452 
453 	*io_opts = bch2_opts_to_inode_opts(c->opts);
454 
455 	/* reflink btree? */
456 	if (!extent_k.k->p.inode)
457 		goto out;
458 
459 	struct btree_iter inode_iter;
460 	struct bkey_s_c inode_k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes,
461 			       SPOS(0, extent_k.k->p.inode, extent_k.k->p.snapshot),
462 			       BTREE_ITER_cached);
463 	int ret = bkey_err(inode_k);
464 	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
465 		return ret;
466 
467 	if (!ret && bkey_is_inode(inode_k.k)) {
468 		struct bch_inode_unpacked inode;
469 		bch2_inode_unpack(inode_k, &inode);
470 		bch2_inode_opts_get(io_opts, c, &inode);
471 	}
472 	bch2_trans_iter_exit(trans, &inode_iter);
473 out:
474 	return bch2_get_update_rebalance_opts(trans, io_opts, extent_iter, extent_k);
475 }
476 
477 int bch2_move_ratelimit(struct moving_context *ctxt)
478 {
479 	struct bch_fs *c = ctxt->trans->c;
480 	bool is_kthread = current->flags & PF_KTHREAD;
481 	u64 delay;
482 
483 	if (ctxt->wait_on_copygc && c->copygc_running) {
484 		bch2_moving_ctxt_flush_all(ctxt);
485 		wait_event_killable(c->copygc_running_wq,
486 				    !c->copygc_running ||
487 				    (is_kthread && kthread_should_stop()));
488 	}
489 
490 	do {
491 		delay = ctxt->rate ? bch2_ratelimit_delay(ctxt->rate) : 0;
492 
493 		if (is_kthread && kthread_should_stop())
494 			return 1;
495 
496 		if (delay)
497 			move_ctxt_wait_event_timeout(ctxt,
498 					freezing(current) ||
499 					(is_kthread && kthread_should_stop()),
500 					delay);
501 
502 		if (unlikely(freezing(current))) {
503 			bch2_moving_ctxt_flush_all(ctxt);
504 			try_to_freeze();
505 		}
506 	} while (delay);
507 
508 	/*
509 	 * XXX: these limits really ought to be per device, SSDs and hard drives
510 	 * will want different limits
511 	 */
512 	move_ctxt_wait_event(ctxt,
513 		atomic_read(&ctxt->write_sectors) < c->opts.move_bytes_in_flight >> 9 &&
514 		atomic_read(&ctxt->read_sectors) < c->opts.move_bytes_in_flight >> 9 &&
515 		atomic_read(&ctxt->write_ios) < c->opts.move_ios_in_flight &&
516 		atomic_read(&ctxt->read_ios) < c->opts.move_ios_in_flight);
517 
518 	return 0;
519 }
520 
521 static int bch2_move_data_btree(struct moving_context *ctxt,
522 				struct bpos start,
523 				struct bpos end,
524 				move_pred_fn pred, void *arg,
525 				enum btree_id btree_id)
526 {
527 	struct btree_trans *trans = ctxt->trans;
528 	struct bch_fs *c = trans->c;
529 	struct per_snapshot_io_opts snapshot_io_opts;
530 	struct bch_io_opts *io_opts;
531 	struct bkey_buf sk;
532 	struct btree_iter iter, reflink_iter = {};
533 	struct bkey_s_c k;
534 	struct data_update_opts data_opts;
535 	/*
536 	 * If we're moving a single file, also process reflinked data it points
537 	 * to (this includes propagating changed io_opts from the inode to the
538 	 * extent):
539 	 */
540 	bool walk_indirect = start.inode == end.inode;
541 	int ret = 0, ret2;
542 
543 	per_snapshot_io_opts_init(&snapshot_io_opts, c);
544 	bch2_bkey_buf_init(&sk);
545 
546 	if (ctxt->stats) {
547 		ctxt->stats->data_type	= BCH_DATA_user;
548 		ctxt->stats->pos	= BBPOS(btree_id, start);
549 	}
550 
551 	bch2_trans_begin(trans);
552 	bch2_trans_iter_init(trans, &iter, btree_id, start,
553 			     BTREE_ITER_prefetch|
554 			     BTREE_ITER_all_snapshots);
555 
556 	if (ctxt->rate)
557 		bch2_ratelimit_reset(ctxt->rate);
558 
559 	while (!bch2_move_ratelimit(ctxt)) {
560 		struct btree_iter *extent_iter = &iter;
561 
562 		bch2_trans_begin(trans);
563 
564 		k = bch2_btree_iter_peek(&iter);
565 		if (!k.k)
566 			break;
567 
568 		ret = bkey_err(k);
569 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
570 			continue;
571 		if (ret)
572 			break;
573 
574 		if (bkey_ge(bkey_start_pos(k.k), end))
575 			break;
576 
577 		if (ctxt->stats)
578 			ctxt->stats->pos = BBPOS(iter.btree_id, iter.pos);
579 
580 		if (walk_indirect &&
581 		    k.k->type == KEY_TYPE_reflink_p &&
582 		    REFLINK_P_MAY_UPDATE_OPTIONS(bkey_s_c_to_reflink_p(k).v)) {
583 			struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
584 			s64 offset_into_extent	= iter.pos.offset - bkey_start_offset(k.k);
585 
586 			bch2_trans_iter_exit(trans, &reflink_iter);
587 			k = bch2_lookup_indirect_extent(trans, &reflink_iter, &offset_into_extent, p, true, 0);
588 			ret = bkey_err(k);
589 			if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
590 				continue;
591 			if (ret)
592 				break;
593 
594 			if (bkey_deleted(k.k))
595 				goto next_nondata;
596 
597 			/*
598 			 * XXX: reflink pointers may point to multiple indirect
599 			 * extents, so don't advance past the entire reflink
600 			 * pointer - need to fixup iter->k
601 			 */
602 			extent_iter = &reflink_iter;
603 		}
604 
605 		if (!bkey_extent_is_direct_data(k.k))
606 			goto next_nondata;
607 
608 		io_opts = bch2_move_get_io_opts(trans, &snapshot_io_opts,
609 						iter.pos, extent_iter, k);
610 		ret = PTR_ERR_OR_ZERO(io_opts);
611 		if (ret)
612 			continue;
613 
614 		memset(&data_opts, 0, sizeof(data_opts));
615 		if (!pred(c, arg, k, io_opts, &data_opts))
616 			goto next;
617 
618 		/*
619 		 * The iterator gets unlocked by __bch2_read_extent - need to
620 		 * save a copy of @k elsewhere:
621 		 */
622 		bch2_bkey_buf_reassemble(&sk, c, k);
623 		k = bkey_i_to_s_c(sk.k);
624 
625 		ret2 = bch2_move_extent(ctxt, NULL, extent_iter, k, *io_opts, data_opts);
626 		if (ret2) {
627 			if (bch2_err_matches(ret2, BCH_ERR_transaction_restart))
628 				continue;
629 
630 			if (ret2 == -ENOMEM) {
631 				/* memory allocation failure, wait for some IO to finish */
632 				bch2_move_ctxt_wait_for_io(ctxt);
633 				continue;
634 			}
635 
636 			/* XXX signal failure */
637 			goto next;
638 		}
639 next:
640 		if (ctxt->stats)
641 			atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
642 next_nondata:
643 		bch2_btree_iter_advance(&iter);
644 	}
645 
646 	bch2_trans_iter_exit(trans, &reflink_iter);
647 	bch2_trans_iter_exit(trans, &iter);
648 	bch2_bkey_buf_exit(&sk, c);
649 	per_snapshot_io_opts_exit(&snapshot_io_opts);
650 
651 	return ret;
652 }
653 
654 int __bch2_move_data(struct moving_context *ctxt,
655 		     struct bbpos start,
656 		     struct bbpos end,
657 		     move_pred_fn pred, void *arg)
658 {
659 	struct bch_fs *c = ctxt->trans->c;
660 	enum btree_id id;
661 	int ret = 0;
662 
663 	for (id = start.btree;
664 	     id <= min_t(unsigned, end.btree, btree_id_nr_alive(c) - 1);
665 	     id++) {
666 		ctxt->stats->pos = BBPOS(id, POS_MIN);
667 
668 		if (!btree_type_has_ptrs(id) ||
669 		    !bch2_btree_id_root(c, id)->b)
670 			continue;
671 
672 		ret = bch2_move_data_btree(ctxt,
673 				       id == start.btree ? start.pos : POS_MIN,
674 				       id == end.btree   ? end.pos   : POS_MAX,
675 				       pred, arg, id);
676 		if (ret)
677 			break;
678 	}
679 
680 	return ret;
681 }
682 
683 int bch2_move_data(struct bch_fs *c,
684 		   struct bbpos start,
685 		   struct bbpos end,
686 		   struct bch_ratelimit *rate,
687 		   struct bch_move_stats *stats,
688 		   struct write_point_specifier wp,
689 		   bool wait_on_copygc,
690 		   move_pred_fn pred, void *arg)
691 {
692 
693 	struct moving_context ctxt;
694 	int ret;
695 
696 	bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc);
697 	ret = __bch2_move_data(&ctxt, start, end, pred, arg);
698 	bch2_moving_ctxt_exit(&ctxt);
699 
700 	return ret;
701 }
702 
703 int bch2_evacuate_bucket(struct moving_context *ctxt,
704 			   struct move_bucket_in_flight *bucket_in_flight,
705 			   struct bpos bucket, int gen,
706 			   struct data_update_opts _data_opts)
707 {
708 	struct btree_trans *trans = ctxt->trans;
709 	struct bch_fs *c = trans->c;
710 	bool is_kthread = current->flags & PF_KTHREAD;
711 	struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
712 	struct btree_iter iter = {}, bp_iter = {};
713 	struct bkey_buf sk;
714 	struct bkey_s_c k;
715 	struct data_update_opts data_opts;
716 	unsigned sectors_moved = 0;
717 	struct bkey_buf last_flushed;
718 	int ret = 0;
719 
720 	struct bch_dev *ca = bch2_dev_tryget(c, bucket.inode);
721 	if (!ca)
722 		return 0;
723 
724 	trace_bucket_evacuate(c, &bucket);
725 
726 	bch2_bkey_buf_init(&last_flushed);
727 	bkey_init(&last_flushed.k->k);
728 	bch2_bkey_buf_init(&sk);
729 
730 	/*
731 	 * We're not run in a context that handles transaction restarts:
732 	 */
733 	bch2_trans_begin(trans);
734 
735 	bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
736 			     bucket_pos_to_bp_start(ca, bucket), 0);
737 
738 	bch_err_msg(c, ret, "looking up alloc key");
739 	if (ret)
740 		goto err;
741 
742 	ret = bch2_btree_write_buffer_tryflush(trans);
743 	bch_err_msg(c, ret, "flushing btree write buffer");
744 	if (ret)
745 		goto err;
746 
747 	while (!(ret = bch2_move_ratelimit(ctxt))) {
748 		if (is_kthread && kthread_should_stop())
749 			break;
750 
751 		bch2_trans_begin(trans);
752 
753 		k = bch2_btree_iter_peek(&bp_iter);
754 		ret = bkey_err(k);
755 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
756 			continue;
757 		if (ret)
758 			goto err;
759 
760 		if (!k.k || bkey_gt(k.k->p, bucket_pos_to_bp_end(ca, bucket)))
761 			break;
762 
763 		if (k.k->type != KEY_TYPE_backpointer)
764 			goto next;
765 
766 		struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
767 
768 		if (!bp.v->level) {
769 			k = bch2_backpointer_get_key(trans, bp, &iter, 0, &last_flushed);
770 			ret = bkey_err(k);
771 			if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
772 				continue;
773 			if (ret)
774 				goto err;
775 			if (!k.k)
776 				goto next;
777 
778 			bch2_bkey_buf_reassemble(&sk, c, k);
779 			k = bkey_i_to_s_c(sk.k);
780 
781 			ret = bch2_move_get_io_opts_one(trans, &io_opts, &iter, k);
782 			if (ret) {
783 				bch2_trans_iter_exit(trans, &iter);
784 				continue;
785 			}
786 
787 			data_opts = _data_opts;
788 			data_opts.target	= io_opts.background_target;
789 			data_opts.rewrite_ptrs = 0;
790 
791 			unsigned sectors = bp.v->bucket_len; /* move_extent will drop locks */
792 			unsigned i = 0;
793 			const union bch_extent_entry *entry;
794 			struct extent_ptr_decoded p;
795 			bkey_for_each_ptr_decode(k.k, bch2_bkey_ptrs_c(k), p, entry) {
796 				if (p.ptr.dev == bucket.inode) {
797 					if (p.ptr.cached) {
798 						bch2_trans_iter_exit(trans, &iter);
799 						goto next;
800 					}
801 					data_opts.rewrite_ptrs |= 1U << i;
802 					break;
803 				}
804 				i++;
805 			}
806 
807 			ret = bch2_move_extent(ctxt, bucket_in_flight,
808 					       &iter, k, io_opts, data_opts);
809 			bch2_trans_iter_exit(trans, &iter);
810 
811 			if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
812 				continue;
813 			if (ret == -ENOMEM) {
814 				/* memory allocation failure, wait for some IO to finish */
815 				bch2_move_ctxt_wait_for_io(ctxt);
816 				continue;
817 			}
818 			if (ret)
819 				goto err;
820 
821 			if (ctxt->stats)
822 				atomic64_add(sectors, &ctxt->stats->sectors_seen);
823 			sectors_moved += sectors;
824 		} else {
825 			struct btree *b;
826 
827 			b = bch2_backpointer_get_node(trans, bp, &iter, &last_flushed);
828 			ret = PTR_ERR_OR_ZERO(b);
829 			if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
830 				goto next;
831 			if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
832 				continue;
833 			if (ret)
834 				goto err;
835 			if (!b)
836 				goto next;
837 
838 			unsigned sectors = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
839 
840 			ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
841 			bch2_trans_iter_exit(trans, &iter);
842 
843 			if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
844 				continue;
845 			if (ret)
846 				goto err;
847 
848 			if (ctxt->rate)
849 				bch2_ratelimit_increment(ctxt->rate, sectors);
850 			if (ctxt->stats) {
851 				atomic64_add(sectors, &ctxt->stats->sectors_seen);
852 				atomic64_add(sectors, &ctxt->stats->sectors_moved);
853 			}
854 			sectors_moved += btree_sectors(c);
855 		}
856 next:
857 		bch2_btree_iter_advance(&bp_iter);
858 	}
859 
860 	trace_evacuate_bucket(c, &bucket, sectors_moved, ca->mi.bucket_size, ret);
861 err:
862 	bch2_trans_iter_exit(trans, &bp_iter);
863 	bch2_dev_put(ca);
864 	bch2_bkey_buf_exit(&sk, c);
865 	bch2_bkey_buf_exit(&last_flushed, c);
866 	return ret;
867 }
868 
869 typedef bool (*move_btree_pred)(struct bch_fs *, void *,
870 				struct btree *, struct bch_io_opts *,
871 				struct data_update_opts *);
872 
873 static int bch2_move_btree(struct bch_fs *c,
874 			   struct bbpos start,
875 			   struct bbpos end,
876 			   move_btree_pred pred, void *arg,
877 			   struct bch_move_stats *stats)
878 {
879 	bool kthread = (current->flags & PF_KTHREAD) != 0;
880 	struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
881 	struct moving_context ctxt;
882 	struct btree_trans *trans;
883 	struct btree_iter iter;
884 	struct btree *b;
885 	enum btree_id btree;
886 	struct data_update_opts data_opts;
887 	int ret = 0;
888 
889 	bch2_moving_ctxt_init(&ctxt, c, NULL, stats,
890 			      writepoint_ptr(&c->btree_write_point),
891 			      true);
892 	trans = ctxt.trans;
893 
894 	stats->data_type = BCH_DATA_btree;
895 
896 	for (btree = start.btree;
897 	     btree <= min_t(unsigned, end.btree, btree_id_nr_alive(c) - 1);
898 	     btree ++) {
899 		stats->pos = BBPOS(btree, POS_MIN);
900 
901 		if (!bch2_btree_id_root(c, btree)->b)
902 			continue;
903 
904 		bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN, 0, 0,
905 					  BTREE_ITER_prefetch);
906 retry:
907 		ret = 0;
908 		while (bch2_trans_begin(trans),
909 		       (b = bch2_btree_iter_peek_node(&iter)) &&
910 		       !(ret = PTR_ERR_OR_ZERO(b))) {
911 			if (kthread && kthread_should_stop())
912 				break;
913 
914 			if ((cmp_int(btree, end.btree) ?:
915 			     bpos_cmp(b->key.k.p, end.pos)) > 0)
916 				break;
917 
918 			stats->pos = BBPOS(iter.btree_id, iter.pos);
919 
920 			if (!pred(c, arg, b, &io_opts, &data_opts))
921 				goto next;
922 
923 			ret = bch2_btree_node_rewrite(trans, &iter, b, 0) ?: ret;
924 			if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
925 				continue;
926 			if (ret)
927 				break;
928 next:
929 			bch2_btree_iter_next_node(&iter);
930 		}
931 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
932 			goto retry;
933 
934 		bch2_trans_iter_exit(trans, &iter);
935 
936 		if (kthread && kthread_should_stop())
937 			break;
938 	}
939 
940 	bch_err_fn(c, ret);
941 	bch2_moving_ctxt_exit(&ctxt);
942 	bch2_btree_interior_updates_flush(c);
943 
944 	return ret;
945 }
946 
947 static bool rereplicate_pred(struct bch_fs *c, void *arg,
948 			     struct bkey_s_c k,
949 			     struct bch_io_opts *io_opts,
950 			     struct data_update_opts *data_opts)
951 {
952 	unsigned nr_good = bch2_bkey_durability(c, k);
953 	unsigned replicas = bkey_is_btree_ptr(k.k)
954 		? c->opts.metadata_replicas
955 		: io_opts->data_replicas;
956 
957 	rcu_read_lock();
958 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
959 	unsigned i = 0;
960 	bkey_for_each_ptr(ptrs, ptr) {
961 		struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
962 		if (!ptr->cached &&
963 		    (!ca || !ca->mi.durability))
964 			data_opts->kill_ptrs |= BIT(i);
965 		i++;
966 	}
967 	rcu_read_unlock();
968 
969 	if (!data_opts->kill_ptrs &&
970 	    (!nr_good || nr_good >= replicas))
971 		return false;
972 
973 	data_opts->target		= 0;
974 	data_opts->extra_replicas	= replicas - nr_good;
975 	data_opts->btree_insert_flags	= 0;
976 	return true;
977 }
978 
979 static bool migrate_pred(struct bch_fs *c, void *arg,
980 			 struct bkey_s_c k,
981 			 struct bch_io_opts *io_opts,
982 			 struct data_update_opts *data_opts)
983 {
984 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
985 	struct bch_ioctl_data *op = arg;
986 	unsigned i = 0;
987 
988 	data_opts->rewrite_ptrs		= 0;
989 	data_opts->target		= 0;
990 	data_opts->extra_replicas	= 0;
991 	data_opts->btree_insert_flags	= 0;
992 
993 	bkey_for_each_ptr(ptrs, ptr) {
994 		if (ptr->dev == op->migrate.dev)
995 			data_opts->rewrite_ptrs |= 1U << i;
996 		i++;
997 	}
998 
999 	return data_opts->rewrite_ptrs != 0;
1000 }
1001 
1002 static bool rereplicate_btree_pred(struct bch_fs *c, void *arg,
1003 				   struct btree *b,
1004 				   struct bch_io_opts *io_opts,
1005 				   struct data_update_opts *data_opts)
1006 {
1007 	return rereplicate_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts);
1008 }
1009 
1010 static bool migrate_btree_pred(struct bch_fs *c, void *arg,
1011 			       struct btree *b,
1012 			       struct bch_io_opts *io_opts,
1013 			       struct data_update_opts *data_opts)
1014 {
1015 	return migrate_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts);
1016 }
1017 
1018 /*
1019  * Ancient versions of bcachefs produced packed formats which could represent
1020  * keys that the in memory format cannot represent; this checks for those
1021  * formats so we can get rid of them.
1022  */
1023 static bool bformat_needs_redo(struct bkey_format *f)
1024 {
1025 	for (unsigned i = 0; i < f->nr_fields; i++)
1026 		if (bch2_bkey_format_field_overflows(f, i))
1027 			return true;
1028 
1029 	return false;
1030 }
1031 
1032 static bool rewrite_old_nodes_pred(struct bch_fs *c, void *arg,
1033 				   struct btree *b,
1034 				   struct bch_io_opts *io_opts,
1035 				   struct data_update_opts *data_opts)
1036 {
1037 	if (b->version_ondisk != c->sb.version ||
1038 	    btree_node_need_rewrite(b) ||
1039 	    bformat_needs_redo(&b->format)) {
1040 		data_opts->target		= 0;
1041 		data_opts->extra_replicas	= 0;
1042 		data_opts->btree_insert_flags	= 0;
1043 		return true;
1044 	}
1045 
1046 	return false;
1047 }
1048 
1049 int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats)
1050 {
1051 	int ret;
1052 
1053 	ret = bch2_move_btree(c,
1054 			      BBPOS_MIN,
1055 			      BBPOS_MAX,
1056 			      rewrite_old_nodes_pred, c, stats);
1057 	if (!ret) {
1058 		mutex_lock(&c->sb_lock);
1059 		c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
1060 		c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
1061 		c->disk_sb.sb->version_min = c->disk_sb.sb->version;
1062 		bch2_write_super(c);
1063 		mutex_unlock(&c->sb_lock);
1064 	}
1065 
1066 	bch_err_fn(c, ret);
1067 	return ret;
1068 }
1069 
1070 static bool drop_extra_replicas_pred(struct bch_fs *c, void *arg,
1071 			     struct bkey_s_c k,
1072 			     struct bch_io_opts *io_opts,
1073 			     struct data_update_opts *data_opts)
1074 {
1075 	unsigned durability = bch2_bkey_durability(c, k);
1076 	unsigned replicas = bkey_is_btree_ptr(k.k)
1077 		? c->opts.metadata_replicas
1078 		: io_opts->data_replicas;
1079 	const union bch_extent_entry *entry;
1080 	struct extent_ptr_decoded p;
1081 	unsigned i = 0;
1082 
1083 	rcu_read_lock();
1084 	bkey_for_each_ptr_decode(k.k, bch2_bkey_ptrs_c(k), p, entry) {
1085 		unsigned d = bch2_extent_ptr_durability(c, &p);
1086 
1087 		if (d && durability - d >= replicas) {
1088 			data_opts->kill_ptrs |= BIT(i);
1089 			durability -= d;
1090 		}
1091 
1092 		i++;
1093 	}
1094 	rcu_read_unlock();
1095 
1096 	return data_opts->kill_ptrs != 0;
1097 }
1098 
1099 static bool drop_extra_replicas_btree_pred(struct bch_fs *c, void *arg,
1100 				   struct btree *b,
1101 				   struct bch_io_opts *io_opts,
1102 				   struct data_update_opts *data_opts)
1103 {
1104 	return drop_extra_replicas_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts);
1105 }
1106 
1107 int bch2_data_job(struct bch_fs *c,
1108 		  struct bch_move_stats *stats,
1109 		  struct bch_ioctl_data op)
1110 {
1111 	struct bbpos start	= BBPOS(op.start_btree, op.start_pos);
1112 	struct bbpos end	= BBPOS(op.end_btree, op.end_pos);
1113 	int ret = 0;
1114 
1115 	if (op.op >= BCH_DATA_OP_NR)
1116 		return -EINVAL;
1117 
1118 	bch2_move_stats_init(stats, bch2_data_ops_strs[op.op]);
1119 
1120 	switch (op.op) {
1121 	case BCH_DATA_OP_rereplicate:
1122 		stats->data_type = BCH_DATA_journal;
1123 		ret = bch2_journal_flush_device_pins(&c->journal, -1);
1124 		ret = bch2_move_btree(c, start, end,
1125 				      rereplicate_btree_pred, c, stats) ?: ret;
1126 		ret = bch2_move_data(c, start, end,
1127 				     NULL,
1128 				     stats,
1129 				     writepoint_hashed((unsigned long) current),
1130 				     true,
1131 				     rereplicate_pred, c) ?: ret;
1132 		ret = bch2_replicas_gc2(c) ?: ret;
1133 		break;
1134 	case BCH_DATA_OP_migrate:
1135 		if (op.migrate.dev >= c->sb.nr_devices)
1136 			return -EINVAL;
1137 
1138 		stats->data_type = BCH_DATA_journal;
1139 		ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev);
1140 		ret = bch2_move_btree(c, start, end,
1141 				      migrate_btree_pred, &op, stats) ?: ret;
1142 		ret = bch2_move_data(c, start, end,
1143 				     NULL,
1144 				     stats,
1145 				     writepoint_hashed((unsigned long) current),
1146 				     true,
1147 				     migrate_pred, &op) ?: ret;
1148 		ret = bch2_replicas_gc2(c) ?: ret;
1149 		break;
1150 	case BCH_DATA_OP_rewrite_old_nodes:
1151 		ret = bch2_scan_old_btree_nodes(c, stats);
1152 		break;
1153 	case BCH_DATA_OP_drop_extra_replicas:
1154 		ret = bch2_move_btree(c, start, end,
1155 				drop_extra_replicas_btree_pred, c, stats) ?: ret;
1156 		ret = bch2_move_data(c, start, end, NULL, stats,
1157 				writepoint_hashed((unsigned long) current),
1158 				true,
1159 				drop_extra_replicas_pred, c) ?: ret;
1160 		ret = bch2_replicas_gc2(c) ?: ret;
1161 		break;
1162 	default:
1163 		ret = -EINVAL;
1164 	}
1165 
1166 	bch2_move_stats_exit(stats, c);
1167 	return ret;
1168 }
1169 
1170 void bch2_move_stats_to_text(struct printbuf *out, struct bch_move_stats *stats)
1171 {
1172 	prt_printf(out, "%s: data type==", stats->name);
1173 	bch2_prt_data_type(out, stats->data_type);
1174 	prt_str(out, " pos=");
1175 	bch2_bbpos_to_text(out, stats->pos);
1176 	prt_newline(out);
1177 	printbuf_indent_add(out, 2);
1178 
1179 	prt_printf(out, "keys moved:  %llu\n",	atomic64_read(&stats->keys_moved));
1180 	prt_printf(out, "keys raced:  %llu\n",	atomic64_read(&stats->keys_raced));
1181 	prt_printf(out, "bytes seen:  ");
1182 	prt_human_readable_u64(out, atomic64_read(&stats->sectors_seen) << 9);
1183 	prt_newline(out);
1184 
1185 	prt_printf(out, "bytes moved: ");
1186 	prt_human_readable_u64(out, atomic64_read(&stats->sectors_moved) << 9);
1187 	prt_newline(out);
1188 
1189 	prt_printf(out, "bytes raced: ");
1190 	prt_human_readable_u64(out, atomic64_read(&stats->sectors_raced) << 9);
1191 	prt_newline(out);
1192 
1193 	printbuf_indent_sub(out, 2);
1194 }
1195 
1196 static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, struct moving_context *ctxt)
1197 {
1198 	struct moving_io *io;
1199 
1200 	bch2_move_stats_to_text(out, ctxt->stats);
1201 	printbuf_indent_add(out, 2);
1202 
1203 	prt_printf(out, "reads: ios %u/%u sectors %u/%u\n",
1204 		   atomic_read(&ctxt->read_ios),
1205 		   c->opts.move_ios_in_flight,
1206 		   atomic_read(&ctxt->read_sectors),
1207 		   c->opts.move_bytes_in_flight >> 9);
1208 
1209 	prt_printf(out, "writes: ios %u/%u sectors %u/%u\n",
1210 		   atomic_read(&ctxt->write_ios),
1211 		   c->opts.move_ios_in_flight,
1212 		   atomic_read(&ctxt->write_sectors),
1213 		   c->opts.move_bytes_in_flight >> 9);
1214 
1215 	printbuf_indent_add(out, 2);
1216 
1217 	mutex_lock(&ctxt->lock);
1218 	list_for_each_entry(io, &ctxt->ios, io_list)
1219 		bch2_write_op_to_text(out, &io->write.op);
1220 	mutex_unlock(&ctxt->lock);
1221 
1222 	printbuf_indent_sub(out, 4);
1223 }
1224 
1225 void bch2_fs_moving_ctxts_to_text(struct printbuf *out, struct bch_fs *c)
1226 {
1227 	struct moving_context *ctxt;
1228 
1229 	mutex_lock(&c->moving_context_lock);
1230 	list_for_each_entry(ctxt, &c->moving_context_list, list)
1231 		bch2_moving_ctxt_to_text(out, c, ctxt);
1232 	mutex_unlock(&c->moving_context_lock);
1233 }
1234 
1235 void bch2_fs_move_init(struct bch_fs *c)
1236 {
1237 	INIT_LIST_HEAD(&c->moving_context_list);
1238 	mutex_init(&c->moving_context_lock);
1239 }
1240