1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "alloc_foreground.h"
5 #include "bkey_buf.h"
6 #include "btree_update.h"
7 #include "buckets.h"
8 #include "compress.h"
9 #include "data_update.h"
10 #include "disk_groups.h"
11 #include "ec.h"
12 #include "error.h"
13 #include "extents.h"
14 #include "io_write.h"
15 #include "keylist.h"
16 #include "move.h"
17 #include "nocow_locking.h"
18 #include "rebalance.h"
19 #include "snapshot.h"
20 #include "subvolume.h"
21 #include "trace.h"
22
bkey_put_dev_refs(struct bch_fs * c,struct bkey_s_c k)23 static void bkey_put_dev_refs(struct bch_fs *c, struct bkey_s_c k)
24 {
25 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
26
27 bkey_for_each_ptr(ptrs, ptr)
28 bch2_dev_put(bch2_dev_have_ref(c, ptr->dev));
29 }
30
bkey_get_dev_refs(struct bch_fs * c,struct bkey_s_c k)31 static bool bkey_get_dev_refs(struct bch_fs *c, struct bkey_s_c k)
32 {
33 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
34
35 bkey_for_each_ptr(ptrs, ptr) {
36 if (!bch2_dev_tryget(c, ptr->dev)) {
37 bkey_for_each_ptr(ptrs, ptr2) {
38 if (ptr2 == ptr)
39 break;
40 bch2_dev_put(bch2_dev_have_ref(c, ptr2->dev));
41 }
42 return false;
43 }
44 }
45 return true;
46 }
47
bkey_nocow_unlock(struct bch_fs * c,struct bkey_s_c k)48 static void bkey_nocow_unlock(struct bch_fs *c, struct bkey_s_c k)
49 {
50 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
51
52 bkey_for_each_ptr(ptrs, ptr) {
53 struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
54 struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
55
56 bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0);
57 }
58 }
59
bkey_nocow_lock(struct bch_fs * c,struct moving_context * ctxt,struct bkey_s_c k)60 static bool bkey_nocow_lock(struct bch_fs *c, struct moving_context *ctxt, struct bkey_s_c k)
61 {
62 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
63
64 bkey_for_each_ptr(ptrs, ptr) {
65 struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
66 struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
67
68 if (ctxt) {
69 bool locked;
70
71 move_ctxt_wait_event(ctxt,
72 (locked = bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) ||
73 list_empty(&ctxt->ios));
74
75 if (!locked)
76 bch2_bucket_nocow_lock(&c->nocow_locks, bucket, 0);
77 } else {
78 if (!bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) {
79 bkey_for_each_ptr(ptrs, ptr2) {
80 if (ptr2 == ptr)
81 break;
82
83 bucket = PTR_BUCKET_POS(ca, ptr2);
84 bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0);
85 }
86 return false;
87 }
88 }
89 }
90 return true;
91 }
92
trace_move_extent_finish2(struct bch_fs * c,struct bkey_s_c k)93 static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k)
94 {
95 if (trace_move_extent_finish_enabled()) {
96 struct printbuf buf = PRINTBUF;
97
98 bch2_bkey_val_to_text(&buf, c, k);
99 trace_move_extent_finish(c, buf.buf);
100 printbuf_exit(&buf);
101 }
102 }
103
trace_move_extent_fail2(struct data_update * m,struct bkey_s_c new,struct bkey_s_c wrote,struct bkey_i * insert,const char * msg)104 static void trace_move_extent_fail2(struct data_update *m,
105 struct bkey_s_c new,
106 struct bkey_s_c wrote,
107 struct bkey_i *insert,
108 const char *msg)
109 {
110 struct bch_fs *c = m->op.c;
111 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
112 const union bch_extent_entry *entry;
113 struct bch_extent_ptr *ptr;
114 struct extent_ptr_decoded p;
115 struct printbuf buf = PRINTBUF;
116 unsigned i, rewrites_found = 0;
117
118 if (!trace_move_extent_fail_enabled())
119 return;
120
121 prt_str(&buf, msg);
122
123 if (insert) {
124 i = 0;
125 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
126 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
127 (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
128 !ptr->cached)
129 rewrites_found |= 1U << i;
130 i++;
131 }
132 }
133
134 prt_printf(&buf, "\nrewrite ptrs: %u%u%u%u",
135 (m->data_opts.rewrite_ptrs & (1 << 0)) != 0,
136 (m->data_opts.rewrite_ptrs & (1 << 1)) != 0,
137 (m->data_opts.rewrite_ptrs & (1 << 2)) != 0,
138 (m->data_opts.rewrite_ptrs & (1 << 3)) != 0);
139
140 prt_printf(&buf, "\nrewrites found: %u%u%u%u",
141 (rewrites_found & (1 << 0)) != 0,
142 (rewrites_found & (1 << 1)) != 0,
143 (rewrites_found & (1 << 2)) != 0,
144 (rewrites_found & (1 << 3)) != 0);
145
146 prt_str(&buf, "\nold: ");
147 bch2_bkey_val_to_text(&buf, c, old);
148
149 prt_str(&buf, "\nnew: ");
150 bch2_bkey_val_to_text(&buf, c, new);
151
152 prt_str(&buf, "\nwrote: ");
153 bch2_bkey_val_to_text(&buf, c, wrote);
154
155 if (insert) {
156 prt_str(&buf, "\ninsert: ");
157 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
158 }
159
160 trace_move_extent_fail(c, buf.buf);
161 printbuf_exit(&buf);
162 }
163
__bch2_data_update_index_update(struct btree_trans * trans,struct bch_write_op * op)164 static int __bch2_data_update_index_update(struct btree_trans *trans,
165 struct bch_write_op *op)
166 {
167 struct bch_fs *c = op->c;
168 struct btree_iter iter;
169 struct data_update *m =
170 container_of(op, struct data_update, op);
171 struct keylist *keys = &op->insert_keys;
172 struct bkey_buf _new, _insert;
173 int ret = 0;
174
175 bch2_bkey_buf_init(&_new);
176 bch2_bkey_buf_init(&_insert);
177 bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
178
179 bch2_trans_iter_init(trans, &iter, m->btree_id,
180 bkey_start_pos(&bch2_keylist_front(keys)->k),
181 BTREE_ITER_slots|BTREE_ITER_intent);
182
183 while (1) {
184 struct bkey_s_c k;
185 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
186 struct bkey_i *insert = NULL;
187 struct bkey_i_extent *new;
188 const union bch_extent_entry *entry_c;
189 union bch_extent_entry *entry;
190 struct extent_ptr_decoded p;
191 struct bch_extent_ptr *ptr;
192 const struct bch_extent_ptr *ptr_c;
193 struct bpos next_pos;
194 bool should_check_enospc;
195 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
196 unsigned rewrites_found = 0, durability, i;
197
198 bch2_trans_begin(trans);
199
200 k = bch2_btree_iter_peek_slot(&iter);
201 ret = bkey_err(k);
202 if (ret)
203 goto err;
204
205 new = bkey_i_to_extent(bch2_keylist_front(keys));
206
207 if (!bch2_extents_match(k, old)) {
208 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i),
209 NULL, "no match:");
210 goto nowork;
211 }
212
213 bkey_reassemble(_insert.k, k);
214 insert = _insert.k;
215
216 bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys));
217 new = bkey_i_to_extent(_new.k);
218 bch2_cut_front(iter.pos, &new->k_i);
219
220 bch2_cut_front(iter.pos, insert);
221 bch2_cut_back(new->k.p, insert);
222 bch2_cut_back(insert->k.p, &new->k_i);
223
224 /*
225 * @old: extent that we read from
226 * @insert: key that we're going to update, initialized from
227 * extent currently in btree - same as @old unless we raced with
228 * other updates
229 * @new: extent with new pointers that we'll be adding to @insert
230 *
231 * Fist, drop rewrite_ptrs from @new:
232 */
233 i = 0;
234 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) {
235 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
236 (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
237 !ptr->cached) {
238 bch2_extent_ptr_set_cached(bkey_i_to_s(insert), ptr);
239 rewrites_found |= 1U << i;
240 }
241 i++;
242 }
243
244 if (m->data_opts.rewrite_ptrs &&
245 !rewrites_found &&
246 bch2_bkey_durability(c, k) >= m->op.opts.data_replicas) {
247 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:");
248 goto nowork;
249 }
250
251 /*
252 * A replica that we just wrote might conflict with a replica
253 * that we want to keep, due to racing with another move:
254 */
255 restart_drop_conflicting_replicas:
256 extent_for_each_ptr(extent_i_to_s(new), ptr)
257 if ((ptr_c = bch2_bkey_has_device_c(bkey_i_to_s_c(insert), ptr->dev)) &&
258 !ptr_c->cached) {
259 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(&new->k_i), ptr);
260 goto restart_drop_conflicting_replicas;
261 }
262
263 if (!bkey_val_u64s(&new->k)) {
264 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:");
265 goto nowork;
266 }
267
268 /* Now, drop pointers that conflict with what we just wrote: */
269 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
270 if ((ptr = bch2_bkey_has_device(bkey_i_to_s(insert), p.ptr.dev)))
271 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
272
273 durability = bch2_bkey_durability(c, bkey_i_to_s_c(insert)) +
274 bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i));
275
276 /* Now, drop excess replicas: */
277 rcu_read_lock();
278 restart_drop_extra_replicas:
279 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
280 unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
281
282 if (!p.ptr.cached &&
283 durability - ptr_durability >= m->op.opts.data_replicas) {
284 durability -= ptr_durability;
285
286 bch2_extent_ptr_set_cached(bkey_i_to_s(insert), &entry->ptr);
287 goto restart_drop_extra_replicas;
288 }
289 }
290 rcu_read_unlock();
291
292 /* Finally, add the pointers we just wrote: */
293 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
294 bch2_extent_ptr_decoded_append(insert, &p);
295
296 bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
297 bch2_extent_normalize(c, bkey_i_to_s(insert));
298
299 ret = bch2_sum_sector_overwrites(trans, &iter, insert,
300 &should_check_enospc,
301 &i_sectors_delta,
302 &disk_sectors_delta);
303 if (ret)
304 goto err;
305
306 if (disk_sectors_delta > (s64) op->res.sectors) {
307 ret = bch2_disk_reservation_add(c, &op->res,
308 disk_sectors_delta - op->res.sectors,
309 !should_check_enospc
310 ? BCH_DISK_RESERVATION_NOFAIL : 0);
311 if (ret)
312 goto out;
313 }
314
315 next_pos = insert->k.p;
316
317 /*
318 * Check for nonce offset inconsistency:
319 * This is debug code - we've been seeing this bug rarely, and
320 * it's been hard to reproduce, so this should give us some more
321 * information when it does occur:
322 */
323 int invalid = bch2_bkey_validate(c, bkey_i_to_s_c(insert), __btree_node_type(0, m->btree_id),
324 BCH_VALIDATE_commit);
325 if (invalid) {
326 struct printbuf buf = PRINTBUF;
327
328 prt_str(&buf, "about to insert invalid key in data update path");
329 prt_str(&buf, "\nold: ");
330 bch2_bkey_val_to_text(&buf, c, old);
331 prt_str(&buf, "\nk: ");
332 bch2_bkey_val_to_text(&buf, c, k);
333 prt_str(&buf, "\nnew: ");
334 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
335
336 bch2_print_string_as_lines(KERN_ERR, buf.buf);
337 printbuf_exit(&buf);
338
339 bch2_fatal_error(c);
340 ret = -EIO;
341 goto out;
342 }
343
344 if (trace_data_update_enabled()) {
345 struct printbuf buf = PRINTBUF;
346
347 prt_str(&buf, "\nold: ");
348 bch2_bkey_val_to_text(&buf, c, old);
349 prt_str(&buf, "\nk: ");
350 bch2_bkey_val_to_text(&buf, c, k);
351 prt_str(&buf, "\nnew: ");
352 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
353
354 trace_data_update(c, buf.buf);
355 printbuf_exit(&buf);
356 }
357
358 ret = bch2_insert_snapshot_whiteouts(trans, m->btree_id,
359 k.k->p, bkey_start_pos(&insert->k)) ?:
360 bch2_insert_snapshot_whiteouts(trans, m->btree_id,
361 k.k->p, insert->k.p) ?:
362 bch2_bkey_set_needs_rebalance(c, insert, &op->opts) ?:
363 bch2_trans_update(trans, &iter, insert,
364 BTREE_UPDATE_internal_snapshot_node) ?:
365 bch2_trans_commit(trans, &op->res,
366 NULL,
367 BCH_TRANS_COMMIT_no_check_rw|
368 BCH_TRANS_COMMIT_no_enospc|
369 m->data_opts.btree_insert_flags);
370 if (!ret) {
371 bch2_btree_iter_set_pos(&iter, next_pos);
372
373 this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
374 trace_move_extent_finish2(c, bkey_i_to_s_c(&new->k_i));
375 }
376 err:
377 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
378 ret = 0;
379 if (ret)
380 break;
381 next:
382 while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
383 bch2_keylist_pop_front(keys);
384 if (bch2_keylist_empty(keys))
385 goto out;
386 }
387 continue;
388 nowork:
389 if (m->stats) {
390 BUG_ON(k.k->p.offset <= iter.pos.offset);
391 atomic64_inc(&m->stats->keys_raced);
392 atomic64_add(k.k->p.offset - iter.pos.offset,
393 &m->stats->sectors_raced);
394 }
395
396 count_event(c, move_extent_fail);
397
398 bch2_btree_iter_advance(&iter);
399 goto next;
400 }
401 out:
402 bch2_trans_iter_exit(trans, &iter);
403 bch2_bkey_buf_exit(&_insert, c);
404 bch2_bkey_buf_exit(&_new, c);
405 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
406 return ret;
407 }
408
bch2_data_update_index_update(struct bch_write_op * op)409 int bch2_data_update_index_update(struct bch_write_op *op)
410 {
411 return bch2_trans_run(op->c, __bch2_data_update_index_update(trans, op));
412 }
413
bch2_data_update_read_done(struct data_update * m,struct bch_extent_crc_unpacked crc)414 void bch2_data_update_read_done(struct data_update *m,
415 struct bch_extent_crc_unpacked crc)
416 {
417 /* write bio must own pages: */
418 BUG_ON(!m->op.wbio.bio.bi_vcnt);
419
420 m->op.crc = crc;
421 m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
422
423 closure_call(&m->op.cl, bch2_write, NULL, NULL);
424 }
425
bch2_data_update_exit(struct data_update * update)426 void bch2_data_update_exit(struct data_update *update)
427 {
428 struct bch_fs *c = update->op.c;
429 struct bkey_s_c k = bkey_i_to_s_c(update->k.k);
430
431 if (c->opts.nocow_enabled)
432 bkey_nocow_unlock(c, k);
433 bkey_put_dev_refs(c, k);
434 bch2_bkey_buf_exit(&update->k, c);
435 bch2_disk_reservation_put(c, &update->op.res);
436 bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
437 }
438
bch2_update_unwritten_extent(struct btree_trans * trans,struct data_update * update)439 static void bch2_update_unwritten_extent(struct btree_trans *trans,
440 struct data_update *update)
441 {
442 struct bch_fs *c = update->op.c;
443 struct bio *bio = &update->op.wbio.bio;
444 struct bkey_i_extent *e;
445 struct write_point *wp;
446 struct closure cl;
447 struct btree_iter iter;
448 struct bkey_s_c k;
449 int ret;
450
451 closure_init_stack(&cl);
452 bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
453
454 while (bio_sectors(bio)) {
455 unsigned sectors = bio_sectors(bio);
456
457 bch2_trans_begin(trans);
458
459 bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
460 BTREE_ITER_slots);
461 ret = lockrestart_do(trans, ({
462 k = bch2_btree_iter_peek_slot(&iter);
463 bkey_err(k);
464 }));
465 bch2_trans_iter_exit(trans, &iter);
466
467 if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
468 break;
469
470 e = bkey_extent_init(update->op.insert_keys.top);
471 e->k.p = update->op.pos;
472
473 ret = bch2_alloc_sectors_start_trans(trans,
474 update->op.target,
475 false,
476 update->op.write_point,
477 &update->op.devs_have,
478 update->op.nr_replicas,
479 update->op.nr_replicas,
480 update->op.watermark,
481 0, &cl, &wp);
482 if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) {
483 bch2_trans_unlock(trans);
484 closure_sync(&cl);
485 continue;
486 }
487
488 bch_err_fn_ratelimited(c, ret);
489
490 if (ret)
491 return;
492
493 sectors = min(sectors, wp->sectors_free);
494
495 bch2_key_resize(&e->k, sectors);
496
497 bch2_open_bucket_get(c, wp, &update->op.open_buckets);
498 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
499 bch2_alloc_sectors_done(c, wp);
500
501 bio_advance(bio, sectors << 9);
502 update->op.pos.offset += sectors;
503
504 extent_for_each_ptr(extent_i_to_s(e), ptr)
505 ptr->unwritten = true;
506 bch2_keylist_push(&update->op.insert_keys);
507
508 ret = __bch2_data_update_index_update(trans, &update->op);
509
510 bch2_open_buckets_put(c, &update->op.open_buckets);
511
512 if (ret)
513 break;
514 }
515
516 if (closure_nr_remaining(&cl) != 1) {
517 bch2_trans_unlock(trans);
518 closure_sync(&cl);
519 }
520 }
521
bch2_data_update_opts_to_text(struct printbuf * out,struct bch_fs * c,struct bch_io_opts * io_opts,struct data_update_opts * data_opts)522 void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c,
523 struct bch_io_opts *io_opts,
524 struct data_update_opts *data_opts)
525 {
526 printbuf_tabstop_push(out, 20);
527 prt_str(out, "rewrite ptrs:\t");
528 bch2_prt_u64_base2(out, data_opts->rewrite_ptrs);
529 prt_newline(out);
530
531 prt_str(out, "kill ptrs:\t");
532 bch2_prt_u64_base2(out, data_opts->kill_ptrs);
533 prt_newline(out);
534
535 prt_str(out, "target:\t");
536 bch2_target_to_text(out, c, data_opts->target);
537 prt_newline(out);
538
539 prt_str(out, "compression:\t");
540 bch2_compression_opt_to_text(out, background_compression(*io_opts));
541 prt_newline(out);
542
543 prt_str(out, "opts.replicas:\t");
544 prt_u64(out, io_opts->data_replicas);
545
546 prt_str(out, "extra replicas:\t");
547 prt_u64(out, data_opts->extra_replicas);
548 }
549
bch2_data_update_to_text(struct printbuf * out,struct data_update * m)550 void bch2_data_update_to_text(struct printbuf *out, struct data_update *m)
551 {
552 bch2_bkey_val_to_text(out, m->op.c, bkey_i_to_s_c(m->k.k));
553 prt_newline(out);
554 bch2_data_update_opts_to_text(out, m->op.c, &m->op.opts, &m->data_opts);
555 }
556
bch2_extent_drop_ptrs(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c k,struct data_update_opts data_opts)557 int bch2_extent_drop_ptrs(struct btree_trans *trans,
558 struct btree_iter *iter,
559 struct bkey_s_c k,
560 struct data_update_opts data_opts)
561 {
562 struct bch_fs *c = trans->c;
563 struct bkey_i *n;
564 int ret;
565
566 n = bch2_bkey_make_mut_noupdate(trans, k);
567 ret = PTR_ERR_OR_ZERO(n);
568 if (ret)
569 return ret;
570
571 while (data_opts.kill_ptrs) {
572 unsigned i = 0, drop = __fls(data_opts.kill_ptrs);
573
574 bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, i++ == drop);
575 data_opts.kill_ptrs ^= 1U << drop;
576 }
577
578 /*
579 * If the new extent no longer has any pointers, bch2_extent_normalize()
580 * will do the appropriate thing with it (turning it into a
581 * KEY_TYPE_error key, or just a discard if it was a cached extent)
582 */
583 bch2_extent_normalize(c, bkey_i_to_s(n));
584
585 /*
586 * Since we're not inserting through an extent iterator
587 * (BTREE_ITER_all_snapshots iterators aren't extent iterators),
588 * we aren't using the extent overwrite path to delete, we're
589 * just using the normal key deletion path:
590 */
591 if (bkey_deleted(&n->k) && !(iter->flags & BTREE_ITER_is_extents))
592 n->k.size = 0;
593
594 return bch2_trans_relock(trans) ?:
595 bch2_trans_update(trans, iter, n, BTREE_UPDATE_internal_snapshot_node) ?:
596 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
597 }
598
bch2_data_update_init(struct btree_trans * trans,struct btree_iter * iter,struct moving_context * ctxt,struct data_update * m,struct write_point_specifier wp,struct bch_io_opts io_opts,struct data_update_opts data_opts,enum btree_id btree_id,struct bkey_s_c k)599 int bch2_data_update_init(struct btree_trans *trans,
600 struct btree_iter *iter,
601 struct moving_context *ctxt,
602 struct data_update *m,
603 struct write_point_specifier wp,
604 struct bch_io_opts io_opts,
605 struct data_update_opts data_opts,
606 enum btree_id btree_id,
607 struct bkey_s_c k)
608 {
609 struct bch_fs *c = trans->c;
610 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
611 const union bch_extent_entry *entry;
612 struct extent_ptr_decoded p;
613 unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
614 int ret = 0;
615
616 /*
617 * fs is corrupt we have a key for a snapshot node that doesn't exist,
618 * and we have to check for this because we go rw before repairing the
619 * snapshots table - just skip it, we can move it later.
620 */
621 if (unlikely(k.k->p.snapshot && !bch2_snapshot_equiv(c, k.k->p.snapshot)))
622 return -BCH_ERR_data_update_done;
623
624 if (!bkey_get_dev_refs(c, k))
625 return -BCH_ERR_data_update_done;
626
627 if (c->opts.nocow_enabled &&
628 !bkey_nocow_lock(c, ctxt, k)) {
629 bkey_put_dev_refs(c, k);
630 return -BCH_ERR_nocow_lock_blocked;
631 }
632
633 bch2_bkey_buf_init(&m->k);
634 bch2_bkey_buf_reassemble(&m->k, c, k);
635 m->btree_id = btree_id;
636 m->data_opts = data_opts;
637 m->ctxt = ctxt;
638 m->stats = ctxt ? ctxt->stats : NULL;
639
640 bch2_write_op_init(&m->op, c, io_opts);
641 m->op.pos = bkey_start_pos(k.k);
642 m->op.version = k.k->version;
643 m->op.target = data_opts.target;
644 m->op.write_point = wp;
645 m->op.nr_replicas = 0;
646 m->op.flags |= BCH_WRITE_PAGES_STABLE|
647 BCH_WRITE_PAGES_OWNED|
648 BCH_WRITE_DATA_ENCODED|
649 BCH_WRITE_MOVE|
650 m->data_opts.write_flags;
651 m->op.compression_opt = background_compression(io_opts);
652 m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
653
654 unsigned durability_have = 0, durability_removing = 0;
655
656 i = 0;
657 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
658 if (!p.ptr.cached) {
659 rcu_read_lock();
660 if (BIT(i) & m->data_opts.rewrite_ptrs) {
661 if (crc_is_compressed(p.crc))
662 reserve_sectors += k.k->size;
663
664 m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p);
665 durability_removing += bch2_extent_ptr_desired_durability(c, &p);
666 } else if (!(BIT(i) & m->data_opts.kill_ptrs)) {
667 bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
668 durability_have += bch2_extent_ptr_durability(c, &p);
669 }
670 rcu_read_unlock();
671 }
672
673 /*
674 * op->csum_type is normally initialized from the fs/file's
675 * current options - but if an extent is encrypted, we require
676 * that it stays encrypted:
677 */
678 if (bch2_csum_type_is_encryption(p.crc.csum_type)) {
679 m->op.nonce = p.crc.nonce + p.crc.offset;
680 m->op.csum_type = p.crc.csum_type;
681 }
682
683 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
684 m->op.incompressible = true;
685
686 i++;
687 }
688
689 unsigned durability_required = max(0, (int) (io_opts.data_replicas - durability_have));
690
691 /*
692 * If current extent durability is less than io_opts.data_replicas,
693 * we're not trying to rereplicate the extent up to data_replicas here -
694 * unless extra_replicas was specified
695 *
696 * Increasing replication is an explicit operation triggered by
697 * rereplicate, currently, so that users don't get an unexpected -ENOSPC
698 */
699 m->op.nr_replicas = min(durability_removing, durability_required) +
700 m->data_opts.extra_replicas;
701
702 /*
703 * If device(s) were set to durability=0 after data was written to them
704 * we can end up with a duribilty=0 extent, and the normal algorithm
705 * that tries not to increase durability doesn't work:
706 */
707 if (!(durability_have + durability_removing))
708 m->op.nr_replicas = max((unsigned) m->op.nr_replicas, 1);
709
710 m->op.nr_replicas_required = m->op.nr_replicas;
711
712 /*
713 * It might turn out that we don't need any new replicas, if the
714 * replicas or durability settings have been changed since the extent
715 * was written:
716 */
717 if (!m->op.nr_replicas) {
718 m->data_opts.kill_ptrs |= m->data_opts.rewrite_ptrs;
719 m->data_opts.rewrite_ptrs = 0;
720 /* if iter == NULL, it's just a promote */
721 if (iter)
722 ret = bch2_extent_drop_ptrs(trans, iter, k, m->data_opts);
723 goto out;
724 }
725
726 if (reserve_sectors) {
727 ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
728 m->data_opts.extra_replicas
729 ? 0
730 : BCH_DISK_RESERVATION_NOFAIL);
731 if (ret)
732 goto out;
733 }
734
735 if (bkey_extent_is_unwritten(k)) {
736 bch2_update_unwritten_extent(trans, m);
737 goto out;
738 }
739
740 return 0;
741 out:
742 bch2_data_update_exit(m);
743 return ret ?: -BCH_ERR_data_update_done;
744 }
745
bch2_data_update_opts_normalize(struct bkey_s_c k,struct data_update_opts * opts)746 void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)
747 {
748 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
749 unsigned i = 0;
750
751 bkey_for_each_ptr(ptrs, ptr) {
752 if ((opts->rewrite_ptrs & (1U << i)) && ptr->cached) {
753 opts->kill_ptrs |= 1U << i;
754 opts->rewrite_ptrs ^= 1U << i;
755 }
756
757 i++;
758 }
759 }
760