xref: /linux/fs/bcachefs/data_update.c (revision dd83757f6e686a2188997cb58b5975f744bb7786)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "alloc_foreground.h"
5 #include "bkey_buf.h"
6 #include "btree_update.h"
7 #include "buckets.h"
8 #include "compress.h"
9 #include "data_update.h"
10 #include "disk_groups.h"
11 #include "ec.h"
12 #include "error.h"
13 #include "extents.h"
14 #include "io_write.h"
15 #include "keylist.h"
16 #include "move.h"
17 #include "nocow_locking.h"
18 #include "rebalance.h"
19 #include "snapshot.h"
20 #include "subvolume.h"
21 #include "trace.h"
22 
bkey_put_dev_refs(struct bch_fs * c,struct bkey_s_c k)23 static void bkey_put_dev_refs(struct bch_fs *c, struct bkey_s_c k)
24 {
25 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
26 
27 	bkey_for_each_ptr(ptrs, ptr)
28 		bch2_dev_put(bch2_dev_have_ref(c, ptr->dev));
29 }
30 
bkey_get_dev_refs(struct bch_fs * c,struct bkey_s_c k)31 static bool bkey_get_dev_refs(struct bch_fs *c, struct bkey_s_c k)
32 {
33 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
34 
35 	bkey_for_each_ptr(ptrs, ptr) {
36 		if (!bch2_dev_tryget(c, ptr->dev)) {
37 			bkey_for_each_ptr(ptrs, ptr2) {
38 				if (ptr2 == ptr)
39 					break;
40 				bch2_dev_put(bch2_dev_have_ref(c, ptr2->dev));
41 			}
42 			return false;
43 		}
44 	}
45 	return true;
46 }
47 
bkey_nocow_unlock(struct bch_fs * c,struct bkey_s_c k)48 static void bkey_nocow_unlock(struct bch_fs *c, struct bkey_s_c k)
49 {
50 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
51 
52 	bkey_for_each_ptr(ptrs, ptr) {
53 		struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
54 		struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
55 
56 		bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0);
57 	}
58 }
59 
bkey_nocow_lock(struct bch_fs * c,struct moving_context * ctxt,struct bkey_s_c k)60 static bool bkey_nocow_lock(struct bch_fs *c, struct moving_context *ctxt, struct bkey_s_c k)
61 {
62 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
63 
64 	bkey_for_each_ptr(ptrs, ptr) {
65 		struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
66 		struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
67 
68 		if (ctxt) {
69 			bool locked;
70 
71 			move_ctxt_wait_event(ctxt,
72 				(locked = bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) ||
73 				list_empty(&ctxt->ios));
74 
75 			if (!locked)
76 				bch2_bucket_nocow_lock(&c->nocow_locks, bucket, 0);
77 		} else {
78 			if (!bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) {
79 				bkey_for_each_ptr(ptrs, ptr2) {
80 					if (ptr2 == ptr)
81 						break;
82 
83 					ca = bch2_dev_have_ref(c, ptr2->dev);
84 					bucket = PTR_BUCKET_POS(ca, ptr2);
85 					bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0);
86 				}
87 				return false;
88 			}
89 		}
90 	}
91 	return true;
92 }
93 
trace_move_extent_finish2(struct data_update * u,struct bkey_i * new,struct bkey_i * insert)94 static noinline void trace_move_extent_finish2(struct data_update *u,
95 					       struct bkey_i *new,
96 					       struct bkey_i *insert)
97 {
98 	struct bch_fs *c = u->op.c;
99 	struct printbuf buf = PRINTBUF;
100 
101 	prt_newline(&buf);
102 
103 	bch2_data_update_to_text(&buf, u);
104 	prt_newline(&buf);
105 
106 	prt_str_indented(&buf, "new replicas:\t");
107 	bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new));
108 	prt_newline(&buf);
109 
110 	prt_str_indented(&buf, "insert:\t");
111 	bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
112 	prt_newline(&buf);
113 
114 	trace_move_extent_finish(c, buf.buf);
115 	printbuf_exit(&buf);
116 }
117 
trace_move_extent_fail2(struct data_update * m,struct bkey_s_c new,struct bkey_s_c wrote,struct bkey_i * insert,const char * msg)118 static void trace_move_extent_fail2(struct data_update *m,
119 			 struct bkey_s_c new,
120 			 struct bkey_s_c wrote,
121 			 struct bkey_i *insert,
122 			 const char *msg)
123 {
124 	struct bch_fs *c = m->op.c;
125 	struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
126 	struct printbuf buf = PRINTBUF;
127 	unsigned rewrites_found = 0;
128 
129 	if (!trace_move_extent_fail_enabled())
130 		return;
131 
132 	prt_str(&buf, msg);
133 
134 	if (insert) {
135 		const union bch_extent_entry *entry;
136 		struct bch_extent_ptr *ptr;
137 		struct extent_ptr_decoded p;
138 
139 		unsigned ptr_bit = 1;
140 		bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
141 			if ((ptr_bit & m->data_opts.rewrite_ptrs) &&
142 			    (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
143 			    !ptr->cached)
144 				rewrites_found |= ptr_bit;
145 			ptr_bit <<= 1;
146 		}
147 	}
148 
149 	prt_str(&buf, "rewrites found:\t");
150 	bch2_prt_u64_base2(&buf, rewrites_found);
151 	prt_newline(&buf);
152 
153 	bch2_data_update_opts_to_text(&buf, c, &m->op.opts, &m->data_opts);
154 
155 	prt_str(&buf, "\nold:    ");
156 	bch2_bkey_val_to_text(&buf, c, old);
157 
158 	prt_str(&buf, "\nnew:    ");
159 	bch2_bkey_val_to_text(&buf, c, new);
160 
161 	prt_str(&buf, "\nwrote:  ");
162 	bch2_bkey_val_to_text(&buf, c, wrote);
163 
164 	if (insert) {
165 		prt_str(&buf, "\ninsert: ");
166 		bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
167 	}
168 
169 	trace_move_extent_fail(c, buf.buf);
170 	printbuf_exit(&buf);
171 }
172 
__bch2_data_update_index_update(struct btree_trans * trans,struct bch_write_op * op)173 static int __bch2_data_update_index_update(struct btree_trans *trans,
174 					   struct bch_write_op *op)
175 {
176 	struct bch_fs *c = op->c;
177 	struct btree_iter iter;
178 	struct data_update *m =
179 		container_of(op, struct data_update, op);
180 	struct keylist *keys = &op->insert_keys;
181 	struct bkey_buf _new, _insert;
182 	int ret = 0;
183 
184 	bch2_bkey_buf_init(&_new);
185 	bch2_bkey_buf_init(&_insert);
186 	bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
187 
188 	bch2_trans_iter_init(trans, &iter, m->btree_id,
189 			     bkey_start_pos(&bch2_keylist_front(keys)->k),
190 			     BTREE_ITER_slots|BTREE_ITER_intent);
191 
192 	while (1) {
193 		struct bkey_s_c k;
194 		struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
195 		struct bkey_i *insert = NULL;
196 		struct bkey_i_extent *new;
197 		const union bch_extent_entry *entry_c;
198 		union bch_extent_entry *entry;
199 		struct extent_ptr_decoded p;
200 		struct bch_extent_ptr *ptr;
201 		const struct bch_extent_ptr *ptr_c;
202 		struct bpos next_pos;
203 		bool should_check_enospc;
204 		s64 i_sectors_delta = 0, disk_sectors_delta = 0;
205 		unsigned rewrites_found = 0, durability, ptr_bit;
206 
207 		bch2_trans_begin(trans);
208 
209 		k = bch2_btree_iter_peek_slot(&iter);
210 		ret = bkey_err(k);
211 		if (ret)
212 			goto err;
213 
214 		new = bkey_i_to_extent(bch2_keylist_front(keys));
215 
216 		if (!bch2_extents_match(k, old)) {
217 			trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i),
218 						NULL, "no match:");
219 			goto nowork;
220 		}
221 
222 		bkey_reassemble(_insert.k, k);
223 		insert = _insert.k;
224 
225 		bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys));
226 		new = bkey_i_to_extent(_new.k);
227 		bch2_cut_front(iter.pos, &new->k_i);
228 
229 		bch2_cut_front(iter.pos,	insert);
230 		bch2_cut_back(new->k.p,		insert);
231 		bch2_cut_back(insert->k.p,	&new->k_i);
232 
233 		/*
234 		 * @old: extent that we read from
235 		 * @insert: key that we're going to update, initialized from
236 		 * extent currently in btree - same as @old unless we raced with
237 		 * other updates
238 		 * @new: extent with new pointers that we'll be adding to @insert
239 		 *
240 		 * Fist, drop rewrite_ptrs from @new:
241 		 */
242 		ptr_bit = 1;
243 		bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) {
244 			if ((ptr_bit & m->data_opts.rewrite_ptrs) &&
245 			    (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
246 			    !ptr->cached) {
247 				bch2_extent_ptr_set_cached(c, &m->op.opts,
248 							   bkey_i_to_s(insert), ptr);
249 				rewrites_found |= ptr_bit;
250 			}
251 			ptr_bit <<= 1;
252 		}
253 
254 		if (m->data_opts.rewrite_ptrs &&
255 		    !rewrites_found &&
256 		    bch2_bkey_durability(c, k) >= m->op.opts.data_replicas) {
257 			trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:");
258 			goto nowork;
259 		}
260 
261 		/*
262 		 * A replica that we just wrote might conflict with a replica
263 		 * that we want to keep, due to racing with another move:
264 		 */
265 restart_drop_conflicting_replicas:
266 		extent_for_each_ptr(extent_i_to_s(new), ptr)
267 			if ((ptr_c = bch2_bkey_has_device_c(bkey_i_to_s_c(insert), ptr->dev)) &&
268 			    !ptr_c->cached) {
269 				bch2_bkey_drop_ptr_noerror(bkey_i_to_s(&new->k_i), ptr);
270 				goto restart_drop_conflicting_replicas;
271 			}
272 
273 		if (!bkey_val_u64s(&new->k)) {
274 			trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:");
275 			goto nowork;
276 		}
277 
278 		/* Now, drop pointers that conflict with what we just wrote: */
279 		extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
280 			if ((ptr = bch2_bkey_has_device(bkey_i_to_s(insert), p.ptr.dev)))
281 				bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
282 
283 		durability = bch2_bkey_durability(c, bkey_i_to_s_c(insert)) +
284 			bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i));
285 
286 		/* Now, drop excess replicas: */
287 		rcu_read_lock();
288 restart_drop_extra_replicas:
289 		bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
290 			unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
291 
292 			if (!p.ptr.cached &&
293 			    durability - ptr_durability >= m->op.opts.data_replicas) {
294 				durability -= ptr_durability;
295 
296 				bch2_extent_ptr_set_cached(c, &m->op.opts,
297 							   bkey_i_to_s(insert), &entry->ptr);
298 				goto restart_drop_extra_replicas;
299 			}
300 		}
301 		rcu_read_unlock();
302 
303 		/* Finally, add the pointers we just wrote: */
304 		extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
305 			bch2_extent_ptr_decoded_append(insert, &p);
306 
307 		bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
308 		bch2_extent_normalize_by_opts(c, &m->op.opts, bkey_i_to_s(insert));
309 
310 		ret = bch2_sum_sector_overwrites(trans, &iter, insert,
311 						 &should_check_enospc,
312 						 &i_sectors_delta,
313 						 &disk_sectors_delta);
314 		if (ret)
315 			goto err;
316 
317 		if (disk_sectors_delta > (s64) op->res.sectors) {
318 			ret = bch2_disk_reservation_add(c, &op->res,
319 						disk_sectors_delta - op->res.sectors,
320 						!should_check_enospc
321 						? BCH_DISK_RESERVATION_NOFAIL : 0);
322 			if (ret)
323 				goto out;
324 		}
325 
326 		next_pos = insert->k.p;
327 
328 		/*
329 		 * Check for nonce offset inconsistency:
330 		 * This is debug code - we've been seeing this bug rarely, and
331 		 * it's been hard to reproduce, so this should give us some more
332 		 * information when it does occur:
333 		 */
334 		int invalid = bch2_bkey_validate(c, bkey_i_to_s_c(insert),
335 						 (struct bkey_validate_context) {
336 							.btree	= m->btree_id,
337 							.flags	= BCH_VALIDATE_commit,
338 						 });
339 		if (invalid) {
340 			struct printbuf buf = PRINTBUF;
341 
342 			prt_str(&buf, "about to insert invalid key in data update path");
343 			prt_printf(&buf, "\nop.nonce: %u", m->op.nonce);
344 			prt_str(&buf, "\nold: ");
345 			bch2_bkey_val_to_text(&buf, c, old);
346 			prt_str(&buf, "\nk:   ");
347 			bch2_bkey_val_to_text(&buf, c, k);
348 			prt_str(&buf, "\nnew: ");
349 			bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
350 
351 			bch2_print_string_as_lines(KERN_ERR, buf.buf);
352 			printbuf_exit(&buf);
353 
354 			bch2_fatal_error(c);
355 			ret = -EIO;
356 			goto out;
357 		}
358 
359 		if (trace_data_update_enabled()) {
360 			struct printbuf buf = PRINTBUF;
361 
362 			prt_str(&buf, "\nold: ");
363 			bch2_bkey_val_to_text(&buf, c, old);
364 			prt_str(&buf, "\nk:   ");
365 			bch2_bkey_val_to_text(&buf, c, k);
366 			prt_str(&buf, "\nnew: ");
367 			bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
368 
369 			trace_data_update(c, buf.buf);
370 			printbuf_exit(&buf);
371 		}
372 
373 		ret =   bch2_insert_snapshot_whiteouts(trans, m->btree_id,
374 						k.k->p, bkey_start_pos(&insert->k)) ?:
375 			bch2_insert_snapshot_whiteouts(trans, m->btree_id,
376 						k.k->p, insert->k.p) ?:
377 			bch2_bkey_set_needs_rebalance(c, &op->opts, insert) ?:
378 			bch2_trans_update(trans, &iter, insert,
379 				BTREE_UPDATE_internal_snapshot_node) ?:
380 			bch2_trans_commit(trans, &op->res,
381 				NULL,
382 				BCH_TRANS_COMMIT_no_check_rw|
383 				BCH_TRANS_COMMIT_no_enospc|
384 				m->data_opts.btree_insert_flags);
385 		if (!ret) {
386 			bch2_btree_iter_set_pos(&iter, next_pos);
387 
388 			this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
389 			if (trace_move_extent_finish_enabled())
390 				trace_move_extent_finish2(m, &new->k_i, insert);
391 		}
392 err:
393 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
394 			ret = 0;
395 		if (ret)
396 			break;
397 next:
398 		while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
399 			bch2_keylist_pop_front(keys);
400 			if (bch2_keylist_empty(keys))
401 				goto out;
402 		}
403 		continue;
404 nowork:
405 		if (m->stats) {
406 			BUG_ON(k.k->p.offset <= iter.pos.offset);
407 			atomic64_inc(&m->stats->keys_raced);
408 			atomic64_add(k.k->p.offset - iter.pos.offset,
409 				     &m->stats->sectors_raced);
410 		}
411 
412 		count_event(c, move_extent_fail);
413 
414 		bch2_btree_iter_advance(&iter);
415 		goto next;
416 	}
417 out:
418 	bch2_trans_iter_exit(trans, &iter);
419 	bch2_bkey_buf_exit(&_insert, c);
420 	bch2_bkey_buf_exit(&_new, c);
421 	BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
422 	return ret;
423 }
424 
bch2_data_update_index_update(struct bch_write_op * op)425 int bch2_data_update_index_update(struct bch_write_op *op)
426 {
427 	return bch2_trans_run(op->c, __bch2_data_update_index_update(trans, op));
428 }
429 
bch2_data_update_read_done(struct data_update * m,struct bch_extent_crc_unpacked crc)430 void bch2_data_update_read_done(struct data_update *m,
431 				struct bch_extent_crc_unpacked crc)
432 {
433 	/* write bio must own pages: */
434 	BUG_ON(!m->op.wbio.bio.bi_vcnt);
435 
436 	m->op.crc = crc;
437 	m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
438 
439 	closure_call(&m->op.cl, bch2_write, NULL, NULL);
440 }
441 
bch2_data_update_exit(struct data_update * update)442 void bch2_data_update_exit(struct data_update *update)
443 {
444 	struct bch_fs *c = update->op.c;
445 	struct bkey_s_c k = bkey_i_to_s_c(update->k.k);
446 
447 	if (c->opts.nocow_enabled)
448 		bkey_nocow_unlock(c, k);
449 	bkey_put_dev_refs(c, k);
450 	bch2_bkey_buf_exit(&update->k, c);
451 	bch2_disk_reservation_put(c, &update->op.res);
452 	bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
453 }
454 
bch2_update_unwritten_extent(struct btree_trans * trans,struct data_update * update)455 static void bch2_update_unwritten_extent(struct btree_trans *trans,
456 				  struct data_update *update)
457 {
458 	struct bch_fs *c = update->op.c;
459 	struct bio *bio = &update->op.wbio.bio;
460 	struct bkey_i_extent *e;
461 	struct write_point *wp;
462 	struct closure cl;
463 	struct btree_iter iter;
464 	struct bkey_s_c k;
465 	int ret;
466 
467 	closure_init_stack(&cl);
468 	bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
469 
470 	while (bio_sectors(bio)) {
471 		unsigned sectors = bio_sectors(bio);
472 
473 		bch2_trans_begin(trans);
474 
475 		bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
476 				     BTREE_ITER_slots);
477 		ret = lockrestart_do(trans, ({
478 			k = bch2_btree_iter_peek_slot(&iter);
479 			bkey_err(k);
480 		}));
481 		bch2_trans_iter_exit(trans, &iter);
482 
483 		if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
484 			break;
485 
486 		e = bkey_extent_init(update->op.insert_keys.top);
487 		e->k.p = update->op.pos;
488 
489 		ret = bch2_alloc_sectors_start_trans(trans,
490 				update->op.target,
491 				false,
492 				update->op.write_point,
493 				&update->op.devs_have,
494 				update->op.nr_replicas,
495 				update->op.nr_replicas,
496 				update->op.watermark,
497 				0, &cl, &wp);
498 		if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) {
499 			bch2_trans_unlock(trans);
500 			closure_sync(&cl);
501 			continue;
502 		}
503 
504 		bch_err_fn_ratelimited(c, ret);
505 
506 		if (ret)
507 			return;
508 
509 		sectors = min(sectors, wp->sectors_free);
510 
511 		bch2_key_resize(&e->k, sectors);
512 
513 		bch2_open_bucket_get(c, wp, &update->op.open_buckets);
514 		bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
515 		bch2_alloc_sectors_done(c, wp);
516 
517 		bio_advance(bio, sectors << 9);
518 		update->op.pos.offset += sectors;
519 
520 		extent_for_each_ptr(extent_i_to_s(e), ptr)
521 			ptr->unwritten = true;
522 		bch2_keylist_push(&update->op.insert_keys);
523 
524 		ret = __bch2_data_update_index_update(trans, &update->op);
525 
526 		bch2_open_buckets_put(c, &update->op.open_buckets);
527 
528 		if (ret)
529 			break;
530 	}
531 
532 	if (closure_nr_remaining(&cl) != 1) {
533 		bch2_trans_unlock(trans);
534 		closure_sync(&cl);
535 	}
536 }
537 
bch2_data_update_opts_to_text(struct printbuf * out,struct bch_fs * c,struct bch_io_opts * io_opts,struct data_update_opts * data_opts)538 void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c,
539 				   struct bch_io_opts *io_opts,
540 				   struct data_update_opts *data_opts)
541 {
542 	printbuf_tabstop_push(out, 20);
543 
544 	prt_str_indented(out, "rewrite ptrs:\t");
545 	bch2_prt_u64_base2(out, data_opts->rewrite_ptrs);
546 	prt_newline(out);
547 
548 	prt_str_indented(out, "kill ptrs:\t");
549 	bch2_prt_u64_base2(out, data_opts->kill_ptrs);
550 	prt_newline(out);
551 
552 	prt_str_indented(out, "target:\t");
553 	bch2_target_to_text(out, c, data_opts->target);
554 	prt_newline(out);
555 
556 	prt_str_indented(out, "compression:\t");
557 	bch2_compression_opt_to_text(out, io_opts->background_compression);
558 	prt_newline(out);
559 
560 	prt_str_indented(out, "opts.replicas:\t");
561 	prt_u64(out, io_opts->data_replicas);
562 	prt_newline(out);
563 
564 	prt_str_indented(out, "extra replicas:\t");
565 	prt_u64(out, data_opts->extra_replicas);
566 }
567 
bch2_data_update_to_text(struct printbuf * out,struct data_update * m)568 void bch2_data_update_to_text(struct printbuf *out, struct data_update *m)
569 {
570 	bch2_data_update_opts_to_text(out, m->op.c, &m->op.opts, &m->data_opts);
571 	prt_newline(out);
572 
573 	prt_str_indented(out, "old key:\t");
574 	bch2_bkey_val_to_text(out, m->op.c, bkey_i_to_s_c(m->k.k));
575 }
576 
bch2_extent_drop_ptrs(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c k,struct bch_io_opts * io_opts,struct data_update_opts * data_opts)577 int bch2_extent_drop_ptrs(struct btree_trans *trans,
578 			  struct btree_iter *iter,
579 			  struct bkey_s_c k,
580 			  struct bch_io_opts *io_opts,
581 			  struct data_update_opts *data_opts)
582 {
583 	struct bch_fs *c = trans->c;
584 	struct bkey_i *n;
585 	int ret;
586 
587 	n = bch2_bkey_make_mut_noupdate(trans, k);
588 	ret = PTR_ERR_OR_ZERO(n);
589 	if (ret)
590 		return ret;
591 
592 	while (data_opts->kill_ptrs) {
593 		unsigned i = 0, drop = __fls(data_opts->kill_ptrs);
594 
595 		bch2_bkey_drop_ptrs_noerror(bkey_i_to_s(n), ptr, i++ == drop);
596 		data_opts->kill_ptrs ^= 1U << drop;
597 	}
598 
599 	/*
600 	 * If the new extent no longer has any pointers, bch2_extent_normalize()
601 	 * will do the appropriate thing with it (turning it into a
602 	 * KEY_TYPE_error key, or just a discard if it was a cached extent)
603 	 */
604 	bch2_extent_normalize_by_opts(c, io_opts, bkey_i_to_s(n));
605 
606 	/*
607 	 * Since we're not inserting through an extent iterator
608 	 * (BTREE_ITER_all_snapshots iterators aren't extent iterators),
609 	 * we aren't using the extent overwrite path to delete, we're
610 	 * just using the normal key deletion path:
611 	 */
612 	if (bkey_deleted(&n->k) && !(iter->flags & BTREE_ITER_is_extents))
613 		n->k.size = 0;
614 
615 	return bch2_trans_relock(trans) ?:
616 		bch2_trans_update(trans, iter, n, BTREE_UPDATE_internal_snapshot_node) ?:
617 		bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
618 }
619 
bch2_data_update_init(struct btree_trans * trans,struct btree_iter * iter,struct moving_context * ctxt,struct data_update * m,struct write_point_specifier wp,struct bch_io_opts io_opts,struct data_update_opts data_opts,enum btree_id btree_id,struct bkey_s_c k)620 int bch2_data_update_init(struct btree_trans *trans,
621 			  struct btree_iter *iter,
622 			  struct moving_context *ctxt,
623 			  struct data_update *m,
624 			  struct write_point_specifier wp,
625 			  struct bch_io_opts io_opts,
626 			  struct data_update_opts data_opts,
627 			  enum btree_id btree_id,
628 			  struct bkey_s_c k)
629 {
630 	struct bch_fs *c = trans->c;
631 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
632 	const union bch_extent_entry *entry;
633 	struct extent_ptr_decoded p;
634 	unsigned reserve_sectors = k.k->size * data_opts.extra_replicas;
635 	int ret = 0;
636 
637 	/*
638 	 * fs is corrupt  we have a key for a snapshot node that doesn't exist,
639 	 * and we have to check for this because we go rw before repairing the
640 	 * snapshots table - just skip it, we can move it later.
641 	 */
642 	if (unlikely(k.k->p.snapshot && !bch2_snapshot_exists(c, k.k->p.snapshot)))
643 		return -BCH_ERR_data_update_done;
644 
645 	if (!bkey_get_dev_refs(c, k))
646 		return -BCH_ERR_data_update_done;
647 
648 	if (c->opts.nocow_enabled &&
649 	    !bkey_nocow_lock(c, ctxt, k)) {
650 		bkey_put_dev_refs(c, k);
651 		return -BCH_ERR_nocow_lock_blocked;
652 	}
653 
654 	bch2_bkey_buf_init(&m->k);
655 	bch2_bkey_buf_reassemble(&m->k, c, k);
656 	m->btree_id	= btree_id;
657 	m->data_opts	= data_opts;
658 	m->ctxt		= ctxt;
659 	m->stats	= ctxt ? ctxt->stats : NULL;
660 
661 	bch2_write_op_init(&m->op, c, io_opts);
662 	m->op.pos	= bkey_start_pos(k.k);
663 	m->op.version	= k.k->bversion;
664 	m->op.target	= data_opts.target;
665 	m->op.write_point = wp;
666 	m->op.nr_replicas = 0;
667 	m->op.flags	|= BCH_WRITE_PAGES_STABLE|
668 		BCH_WRITE_PAGES_OWNED|
669 		BCH_WRITE_DATA_ENCODED|
670 		BCH_WRITE_MOVE|
671 		m->data_opts.write_flags;
672 	m->op.compression_opt	= io_opts.background_compression;
673 	m->op.watermark		= m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
674 
675 	unsigned durability_have = 0, durability_removing = 0;
676 
677 	unsigned ptr_bit = 1;
678 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
679 		if (!p.ptr.cached) {
680 			rcu_read_lock();
681 			if (ptr_bit & m->data_opts.rewrite_ptrs) {
682 				if (crc_is_compressed(p.crc))
683 					reserve_sectors += k.k->size;
684 
685 				m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p);
686 				durability_removing += bch2_extent_ptr_desired_durability(c, &p);
687 			} else if (!(ptr_bit & m->data_opts.kill_ptrs)) {
688 				bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
689 				durability_have += bch2_extent_ptr_durability(c, &p);
690 			}
691 			rcu_read_unlock();
692 		}
693 
694 		/*
695 		 * op->csum_type is normally initialized from the fs/file's
696 		 * current options - but if an extent is encrypted, we require
697 		 * that it stays encrypted:
698 		 */
699 		if (bch2_csum_type_is_encryption(p.crc.csum_type)) {
700 			m->op.nonce	= p.crc.nonce + p.crc.offset;
701 			m->op.csum_type = p.crc.csum_type;
702 		}
703 
704 		if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
705 			m->op.incompressible = true;
706 
707 		ptr_bit <<= 1;
708 	}
709 
710 	unsigned durability_required = max(0, (int) (io_opts.data_replicas - durability_have));
711 
712 	/*
713 	 * If current extent durability is less than io_opts.data_replicas,
714 	 * we're not trying to rereplicate the extent up to data_replicas here -
715 	 * unless extra_replicas was specified
716 	 *
717 	 * Increasing replication is an explicit operation triggered by
718 	 * rereplicate, currently, so that users don't get an unexpected -ENOSPC
719 	 */
720 	m->op.nr_replicas = min(durability_removing, durability_required) +
721 		m->data_opts.extra_replicas;
722 
723 	/*
724 	 * If device(s) were set to durability=0 after data was written to them
725 	 * we can end up with a duribilty=0 extent, and the normal algorithm
726 	 * that tries not to increase durability doesn't work:
727 	 */
728 	if (!(durability_have + durability_removing))
729 		m->op.nr_replicas = max((unsigned) m->op.nr_replicas, 1);
730 
731 	m->op.nr_replicas_required = m->op.nr_replicas;
732 
733 	/*
734 	 * It might turn out that we don't need any new replicas, if the
735 	 * replicas or durability settings have been changed since the extent
736 	 * was written:
737 	 */
738 	if (!m->op.nr_replicas) {
739 		m->data_opts.kill_ptrs |= m->data_opts.rewrite_ptrs;
740 		m->data_opts.rewrite_ptrs = 0;
741 		/* if iter == NULL, it's just a promote */
742 		if (iter)
743 			ret = bch2_extent_drop_ptrs(trans, iter, k, &io_opts, &m->data_opts);
744 		goto out;
745 	}
746 
747 	if (reserve_sectors) {
748 		ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
749 				m->data_opts.extra_replicas
750 				? 0
751 				: BCH_DISK_RESERVATION_NOFAIL);
752 		if (ret)
753 			goto out;
754 	}
755 
756 	if (bkey_extent_is_unwritten(k)) {
757 		bch2_update_unwritten_extent(trans, m);
758 		goto out;
759 	}
760 
761 	return 0;
762 out:
763 	bch2_data_update_exit(m);
764 	return ret ?: -BCH_ERR_data_update_done;
765 }
766 
bch2_data_update_opts_normalize(struct bkey_s_c k,struct data_update_opts * opts)767 void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)
768 {
769 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
770 	unsigned ptr_bit = 1;
771 
772 	bkey_for_each_ptr(ptrs, ptr) {
773 		if ((opts->rewrite_ptrs & ptr_bit) && ptr->cached) {
774 			opts->kill_ptrs |= ptr_bit;
775 			opts->rewrite_ptrs ^= ptr_bit;
776 		}
777 
778 		ptr_bit <<= 1;
779 	}
780 }
781