1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
4 * Copyright 2012 Google, Inc.
5 */
6
7 #include "bcachefs.h"
8 #include "alloc_foreground.h"
9 #include "async_objs.h"
10 #include "bkey_buf.h"
11 #include "bset.h"
12 #include "btree_update.h"
13 #include "buckets.h"
14 #include "checksum.h"
15 #include "clock.h"
16 #include "compress.h"
17 #include "debug.h"
18 #include "ec.h"
19 #include "enumerated_ref.h"
20 #include "error.h"
21 #include "extent_update.h"
22 #include "inode.h"
23 #include "io_write.h"
24 #include "journal.h"
25 #include "keylist.h"
26 #include "move.h"
27 #include "nocow_locking.h"
28 #include "rebalance.h"
29 #include "subvolume.h"
30 #include "super.h"
31 #include "super-io.h"
32 #include "trace.h"
33
34 #include <linux/blkdev.h>
35 #include <linux/prefetch.h>
36 #include <linux/random.h>
37 #include <linux/sched/mm.h>
38
39 #ifdef CONFIG_BCACHEFS_DEBUG
40 static unsigned bch2_write_corrupt_ratio;
41 module_param_named(write_corrupt_ratio, bch2_write_corrupt_ratio, uint, 0644);
42 MODULE_PARM_DESC(write_corrupt_ratio, "");
43 #endif
44
45 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
46
bch2_congested_acct(struct bch_dev * ca,u64 io_latency,u64 now,int rw)47 static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
48 u64 now, int rw)
49 {
50 u64 latency_capable =
51 ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
52 /* ideally we'd be taking into account the device's variance here: */
53 u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
54 s64 latency_over = io_latency - latency_threshold;
55
56 if (latency_threshold && latency_over > 0) {
57 /*
58 * bump up congested by approximately latency_over * 4 /
59 * latency_threshold - we don't need much accuracy here so don't
60 * bother with the divide:
61 */
62 if (atomic_read(&ca->congested) < CONGESTED_MAX)
63 atomic_add(latency_over >>
64 max_t(int, ilog2(latency_threshold) - 2, 0),
65 &ca->congested);
66
67 ca->congested_last = now;
68 } else if (atomic_read(&ca->congested) > 0) {
69 atomic_dec(&ca->congested);
70 }
71 }
72
bch2_latency_acct(struct bch_dev * ca,u64 submit_time,int rw)73 void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
74 {
75 atomic64_t *latency = &ca->cur_latency[rw];
76 u64 now = local_clock();
77 u64 io_latency = time_after64(now, submit_time)
78 ? now - submit_time
79 : 0;
80 u64 old, new;
81
82 old = atomic64_read(latency);
83 do {
84 /*
85 * If the io latency was reasonably close to the current
86 * latency, skip doing the update and atomic operation - most of
87 * the time:
88 */
89 if (abs((int) (old - io_latency)) < (old >> 1) &&
90 now & ~(~0U << 5))
91 break;
92
93 new = ewma_add(old, io_latency, 5);
94 } while (!atomic64_try_cmpxchg(latency, &old, new));
95
96 bch2_congested_acct(ca, io_latency, now, rw);
97
98 __bch2_time_stats_update(&ca->io_latency[rw].stats, submit_time, now);
99 }
100
101 #endif
102
103 /* Allocate, free from mempool: */
104
bch2_bio_free_pages_pool(struct bch_fs * c,struct bio * bio)105 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
106 {
107 struct bvec_iter_all iter;
108 struct bio_vec *bv;
109
110 bio_for_each_segment_all(bv, bio, iter)
111 if (bv->bv_page != ZERO_PAGE(0))
112 mempool_free(bv->bv_page, &c->bio_bounce_pages);
113 bio->bi_vcnt = 0;
114 }
115
__bio_alloc_page_pool(struct bch_fs * c,bool * using_mempool)116 static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
117 {
118 struct page *page;
119
120 if (likely(!*using_mempool)) {
121 page = alloc_page(GFP_NOFS);
122 if (unlikely(!page)) {
123 mutex_lock(&c->bio_bounce_pages_lock);
124 *using_mempool = true;
125 goto pool_alloc;
126
127 }
128 } else {
129 pool_alloc:
130 page = mempool_alloc(&c->bio_bounce_pages, GFP_NOFS);
131 }
132
133 return page;
134 }
135
bch2_bio_alloc_pages_pool(struct bch_fs * c,struct bio * bio,size_t size)136 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
137 size_t size)
138 {
139 bool using_mempool = false;
140
141 while (size) {
142 struct page *page = __bio_alloc_page_pool(c, &using_mempool);
143 unsigned len = min_t(size_t, PAGE_SIZE, size);
144
145 BUG_ON(!bio_add_page(bio, page, len, 0));
146 size -= len;
147 }
148
149 if (using_mempool)
150 mutex_unlock(&c->bio_bounce_pages_lock);
151 }
152
153 /* Extent update path: */
154
bch2_sum_sector_overwrites(struct btree_trans * trans,struct btree_iter * extent_iter,struct bkey_i * new,bool * usage_increasing,s64 * i_sectors_delta,s64 * disk_sectors_delta)155 int bch2_sum_sector_overwrites(struct btree_trans *trans,
156 struct btree_iter *extent_iter,
157 struct bkey_i *new,
158 bool *usage_increasing,
159 s64 *i_sectors_delta,
160 s64 *disk_sectors_delta)
161 {
162 struct bch_fs *c = trans->c;
163 struct btree_iter iter;
164 struct bkey_s_c old;
165 unsigned new_replicas = bch2_bkey_replicas(c, bkey_i_to_s_c(new));
166 bool new_compressed = bch2_bkey_sectors_compressed(bkey_i_to_s_c(new));
167 int ret = 0;
168
169 *usage_increasing = false;
170 *i_sectors_delta = 0;
171 *disk_sectors_delta = 0;
172
173 bch2_trans_copy_iter(trans, &iter, extent_iter);
174
175 for_each_btree_key_max_continue_norestart(trans, iter,
176 new->k.p, BTREE_ITER_slots, old, ret) {
177 s64 sectors = min(new->k.p.offset, old.k->p.offset) -
178 max(bkey_start_offset(&new->k),
179 bkey_start_offset(old.k));
180
181 *i_sectors_delta += sectors *
182 (bkey_extent_is_allocation(&new->k) -
183 bkey_extent_is_allocation(old.k));
184
185 *disk_sectors_delta += sectors * bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new));
186 *disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot
187 ? sectors * bch2_bkey_nr_ptrs_fully_allocated(old)
188 : 0;
189
190 if (!*usage_increasing &&
191 (new->k.p.snapshot != old.k->p.snapshot ||
192 new_replicas > bch2_bkey_replicas(c, old) ||
193 (!new_compressed && bch2_bkey_sectors_compressed(old))))
194 *usage_increasing = true;
195
196 if (bkey_ge(old.k->p, new->k.p))
197 break;
198 }
199
200 bch2_trans_iter_exit(trans, &iter);
201 return ret;
202 }
203
bch2_extent_update_i_size_sectors(struct btree_trans * trans,struct btree_iter * extent_iter,u64 new_i_size,s64 i_sectors_delta)204 static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
205 struct btree_iter *extent_iter,
206 u64 new_i_size,
207 s64 i_sectors_delta)
208 {
209 /*
210 * Crazy performance optimization:
211 * Every extent update needs to also update the inode: the inode trigger
212 * will set bi->journal_seq to the journal sequence number of this
213 * transaction - for fsync.
214 *
215 * But if that's the only reason we're updating the inode (we're not
216 * updating bi_size or bi_sectors), then we don't need the inode update
217 * to be journalled - if we crash, the bi_journal_seq update will be
218 * lost, but that's fine.
219 */
220 unsigned inode_update_flags = BTREE_UPDATE_nojournal;
221
222 struct btree_iter iter;
223 struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
224 SPOS(0,
225 extent_iter->pos.inode,
226 extent_iter->snapshot),
227 BTREE_ITER_intent|
228 BTREE_ITER_cached);
229 int ret = bkey_err(k);
230 if (unlikely(ret))
231 return ret;
232
233 /*
234 * varint_decode_fast(), in the inode .invalid method, reads up to 7
235 * bytes past the end of the buffer:
236 */
237 struct bkey_i *k_mut = bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k) + 8);
238 ret = PTR_ERR_OR_ZERO(k_mut);
239 if (unlikely(ret))
240 goto err;
241
242 bkey_reassemble(k_mut, k);
243
244 if (unlikely(k_mut->k.type != KEY_TYPE_inode_v3)) {
245 k_mut = bch2_inode_to_v3(trans, k_mut);
246 ret = PTR_ERR_OR_ZERO(k_mut);
247 if (unlikely(ret))
248 goto err;
249 }
250
251 struct bkey_i_inode_v3 *inode = bkey_i_to_inode_v3(k_mut);
252
253 if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_i_size_dirty) &&
254 new_i_size > le64_to_cpu(inode->v.bi_size)) {
255 inode->v.bi_size = cpu_to_le64(new_i_size);
256 inode_update_flags = 0;
257 }
258
259 if (i_sectors_delta) {
260 s64 bi_sectors = le64_to_cpu(inode->v.bi_sectors);
261 if (unlikely(bi_sectors + i_sectors_delta < 0)) {
262 struct bch_fs *c = trans->c;
263 struct printbuf buf = PRINTBUF;
264 bch2_log_msg_start(c, &buf);
265 prt_printf(&buf, "inode %llu i_sectors underflow: %lli + %lli < 0",
266 extent_iter->pos.inode, bi_sectors, i_sectors_delta);
267
268 bool print = bch2_count_fsck_err(c, inode_i_sectors_underflow, &buf);
269 if (print)
270 bch2_print_str(c, KERN_ERR, buf.buf);
271 printbuf_exit(&buf);
272
273 if (i_sectors_delta < 0)
274 i_sectors_delta = -bi_sectors;
275 else
276 i_sectors_delta = 0;
277 }
278
279 le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta);
280 inode_update_flags = 0;
281 }
282
283 /*
284 * extents, dirents and xattrs updates require that an inode update also
285 * happens - to ensure that if a key exists in one of those btrees with
286 * a given snapshot ID an inode is also present - so we may have to skip
287 * the nojournal optimization:
288 */
289 if (inode->k.p.snapshot != iter.snapshot) {
290 inode->k.p.snapshot = iter.snapshot;
291 inode_update_flags = 0;
292 }
293
294 ret = bch2_trans_update(trans, &iter, &inode->k_i,
295 BTREE_UPDATE_internal_snapshot_node|
296 inode_update_flags);
297 err:
298 bch2_trans_iter_exit(trans, &iter);
299 return ret;
300 }
301
bch2_extent_update(struct btree_trans * trans,subvol_inum inum,struct btree_iter * iter,struct bkey_i * k,struct disk_reservation * disk_res,u64 new_i_size,s64 * i_sectors_delta_total,bool check_enospc)302 int bch2_extent_update(struct btree_trans *trans,
303 subvol_inum inum,
304 struct btree_iter *iter,
305 struct bkey_i *k,
306 struct disk_reservation *disk_res,
307 u64 new_i_size,
308 s64 *i_sectors_delta_total,
309 bool check_enospc)
310 {
311 struct bpos next_pos;
312 bool usage_increasing;
313 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
314 int ret;
315
316 /*
317 * This traverses us the iterator without changing iter->path->pos to
318 * search_key() (which is pos + 1 for extents): we want there to be a
319 * path already traversed at iter->pos because
320 * bch2_trans_extent_update() will use it to attempt extent merging
321 */
322 ret = __bch2_btree_iter_traverse(trans, iter);
323 if (ret)
324 return ret;
325
326 ret = bch2_extent_trim_atomic(trans, iter, k);
327 if (ret)
328 return ret;
329
330 next_pos = k->k.p;
331
332 ret = bch2_sum_sector_overwrites(trans, iter, k,
333 &usage_increasing,
334 &i_sectors_delta,
335 &disk_sectors_delta);
336 if (ret)
337 return ret;
338
339 if (disk_res &&
340 disk_sectors_delta > (s64) disk_res->sectors) {
341 ret = bch2_disk_reservation_add(trans->c, disk_res,
342 disk_sectors_delta - disk_res->sectors,
343 !check_enospc || !usage_increasing
344 ? BCH_DISK_RESERVATION_NOFAIL : 0);
345 if (ret)
346 return ret;
347 }
348
349 /*
350 * Note:
351 * We always have to do an inode update - even when i_size/i_sectors
352 * aren't changing - for fsync to work properly; fsync relies on
353 * inode->bi_journal_seq which is updated by the trigger code:
354 */
355 ret = bch2_extent_update_i_size_sectors(trans, iter,
356 min(k->k.p.offset << 9, new_i_size),
357 i_sectors_delta) ?:
358 bch2_trans_update(trans, iter, k, 0) ?:
359 bch2_trans_commit(trans, disk_res, NULL,
360 BCH_TRANS_COMMIT_no_check_rw|
361 BCH_TRANS_COMMIT_no_enospc);
362 if (unlikely(ret))
363 return ret;
364
365 if (i_sectors_delta_total)
366 *i_sectors_delta_total += i_sectors_delta;
367 bch2_btree_iter_set_pos(trans, iter, next_pos);
368 return 0;
369 }
370
bch2_write_index_default(struct bch_write_op * op)371 static int bch2_write_index_default(struct bch_write_op *op)
372 {
373 struct bch_fs *c = op->c;
374 struct bkey_buf sk;
375 struct keylist *keys = &op->insert_keys;
376 struct bkey_i *k = bch2_keylist_front(keys);
377 struct btree_trans *trans = bch2_trans_get(c);
378 struct btree_iter iter;
379 subvol_inum inum = {
380 .subvol = op->subvol,
381 .inum = k->k.p.inode,
382 };
383 int ret;
384
385 BUG_ON(!inum.subvol);
386
387 bch2_bkey_buf_init(&sk);
388
389 do {
390 bch2_trans_begin(trans);
391
392 k = bch2_keylist_front(keys);
393 bch2_bkey_buf_copy(&sk, c, k);
394
395 ret = bch2_subvolume_get_snapshot(trans, inum.subvol,
396 &sk.k->k.p.snapshot);
397 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
398 continue;
399 if (ret)
400 break;
401
402 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
403 bkey_start_pos(&sk.k->k),
404 BTREE_ITER_slots|BTREE_ITER_intent);
405
406 ret = bch2_extent_update(trans, inum, &iter, sk.k,
407 &op->res,
408 op->new_i_size, &op->i_sectors_delta,
409 op->flags & BCH_WRITE_check_enospc);
410 bch2_trans_iter_exit(trans, &iter);
411
412 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
413 continue;
414 if (ret)
415 break;
416
417 if (bkey_ge(iter.pos, k->k.p))
418 bch2_keylist_pop_front(&op->insert_keys);
419 else
420 bch2_cut_front(iter.pos, k);
421 } while (!bch2_keylist_empty(keys));
422
423 bch2_trans_put(trans);
424 bch2_bkey_buf_exit(&sk, c);
425
426 return ret;
427 }
428
429 /* Writes */
430
bch2_write_op_error(struct bch_write_op * op,u64 offset,const char * fmt,...)431 void bch2_write_op_error(struct bch_write_op *op, u64 offset, const char *fmt, ...)
432 {
433 struct printbuf buf = PRINTBUF;
434
435 if (op->subvol) {
436 bch2_inum_offset_err_msg(op->c, &buf,
437 (subvol_inum) { op->subvol, op->pos.inode, },
438 offset << 9);
439 } else {
440 struct bpos pos = op->pos;
441 pos.offset = offset;
442 bch2_inum_snap_offset_err_msg(op->c, &buf, pos);
443 }
444
445 prt_str(&buf, "write error: ");
446
447 va_list args;
448 va_start(args, fmt);
449 prt_vprintf(&buf, fmt, args);
450 va_end(args);
451
452 if (op->flags & BCH_WRITE_move) {
453 struct data_update *u = container_of(op, struct data_update, op);
454
455 prt_printf(&buf, "\n from internal move ");
456 bch2_bkey_val_to_text(&buf, op->c, bkey_i_to_s_c(u->k.k));
457 }
458
459 bch_err_ratelimited(op->c, "%s", buf.buf);
460 printbuf_exit(&buf);
461 }
462
bch2_submit_wbio_replicas(struct bch_write_bio * wbio,struct bch_fs * c,enum bch_data_type type,const struct bkey_i * k,bool nocow)463 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
464 enum bch_data_type type,
465 const struct bkey_i *k,
466 bool nocow)
467 {
468 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
469 struct bch_write_bio *n;
470 unsigned ref_rw = type == BCH_DATA_btree ? READ : WRITE;
471 unsigned ref_idx = type == BCH_DATA_btree
472 ? BCH_DEV_READ_REF_btree_node_write
473 : BCH_DEV_WRITE_REF_io_write;
474
475 BUG_ON(c->opts.nochanges);
476
477 const struct bch_extent_ptr *last = NULL;
478 bkey_for_each_ptr(ptrs, ptr)
479 last = ptr;
480
481 bkey_for_each_ptr(ptrs, ptr) {
482 /*
483 * XXX: btree writes should be using io_ref[WRITE], but we
484 * aren't retrying failed btree writes yet (due to device
485 * removal/ro):
486 */
487 struct bch_dev *ca = nocow
488 ? bch2_dev_have_ref(c, ptr->dev)
489 : bch2_dev_get_ioref(c, ptr->dev, ref_rw, ref_idx);
490
491 if (ptr != last) {
492 n = to_wbio(bio_alloc_clone(NULL, &wbio->bio, GFP_NOFS, &c->replica_set));
493
494 n->bio.bi_end_io = wbio->bio.bi_end_io;
495 n->bio.bi_private = wbio->bio.bi_private;
496 n->parent = wbio;
497 n->split = true;
498 n->bounce = false;
499 n->put_bio = true;
500 n->bio.bi_opf = wbio->bio.bi_opf;
501 bio_inc_remaining(&wbio->bio);
502 } else {
503 n = wbio;
504 n->split = false;
505 }
506
507 n->c = c;
508 n->dev = ptr->dev;
509 n->have_ioref = ca != NULL;
510 n->nocow = nocow;
511 n->submit_time = local_clock();
512 n->inode_offset = bkey_start_offset(&k->k);
513 if (nocow)
514 n->nocow_bucket = PTR_BUCKET_NR(ca, ptr);
515 n->bio.bi_iter.bi_sector = ptr->offset;
516
517 if (likely(n->have_ioref)) {
518 this_cpu_add(ca->io_done->sectors[WRITE][type],
519 bio_sectors(&n->bio));
520
521 bio_set_dev(&n->bio, ca->disk_sb.bdev);
522
523 if (type != BCH_DATA_btree && unlikely(c->opts.no_data_io)) {
524 bio_endio(&n->bio);
525 continue;
526 }
527
528 submit_bio(&n->bio);
529 } else {
530 n->bio.bi_status = BLK_STS_REMOVED;
531 bio_endio(&n->bio);
532 }
533 }
534 }
535
536 static void __bch2_write(struct bch_write_op *);
537
bch2_write_done(struct closure * cl)538 static void bch2_write_done(struct closure *cl)
539 {
540 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
541 struct bch_fs *c = op->c;
542
543 EBUG_ON(op->open_buckets.nr);
544
545 bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
546 bch2_disk_reservation_put(c, &op->res);
547
548 if (!(op->flags & BCH_WRITE_move))
549 enumerated_ref_put(&c->writes, BCH_WRITE_REF_write);
550 bch2_keylist_free(&op->insert_keys, op->inline_keys);
551
552 EBUG_ON(cl->parent);
553 closure_debug_destroy(cl);
554 async_object_list_del(c, write_op, op->list_idx);
555 if (op->end_io)
556 op->end_io(op);
557 }
558
bch2_write_drop_io_error_ptrs(struct bch_write_op * op)559 static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
560 {
561 struct bch_fs *c = op->c;
562 struct keylist *keys = &op->insert_keys;
563 struct bkey_i *src, *dst = keys->keys, *n;
564
565 for (src = keys->keys; src != keys->top; src = n) {
566 n = bkey_next(src);
567
568 if (bkey_extent_is_direct_data(&src->k)) {
569 bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr,
570 test_bit(ptr->dev, op->failed.d));
571
572 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src)))
573 return bch_err_throw(c, data_write_io);
574 }
575
576 if (dst != src)
577 memmove_u64s_down(dst, src, src->k.u64s);
578 dst = bkey_next(dst);
579 }
580
581 keys->top = dst;
582 return 0;
583 }
584
585 /**
586 * __bch2_write_index - after a write, update index to point to new data
587 * @op: bch_write_op to process
588 */
__bch2_write_index(struct bch_write_op * op)589 static void __bch2_write_index(struct bch_write_op *op)
590 {
591 struct bch_fs *c = op->c;
592 struct keylist *keys = &op->insert_keys;
593 unsigned dev;
594 int ret = 0;
595
596 if (unlikely(op->flags & BCH_WRITE_io_error)) {
597 ret = bch2_write_drop_io_error_ptrs(op);
598 if (ret)
599 goto err;
600 }
601
602 if (!bch2_keylist_empty(keys)) {
603 u64 sectors_start = keylist_sectors(keys);
604
605 ret = !(op->flags & BCH_WRITE_move)
606 ? bch2_write_index_default(op)
607 : bch2_data_update_index_update(op);
608
609 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
610 BUG_ON(keylist_sectors(keys) && !ret);
611
612 op->written += sectors_start - keylist_sectors(keys);
613
614 if (unlikely(ret && !bch2_err_matches(ret, EROFS))) {
615 struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
616
617 bch2_write_op_error(op, bkey_start_offset(&insert->k),
618 "btree update error: %s", bch2_err_str(ret));
619 }
620
621 if (ret)
622 goto err;
623 }
624 out:
625 /* If some a bucket wasn't written, we can't erasure code it: */
626 for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
627 bch2_open_bucket_write_error(c, &op->open_buckets, dev, -BCH_ERR_data_write_io);
628
629 bch2_open_buckets_put(c, &op->open_buckets);
630 return;
631 err:
632 keys->top = keys->keys;
633 op->error = ret;
634 op->flags |= BCH_WRITE_submitted;
635 goto out;
636 }
637
__wp_update_state(struct write_point * wp,enum write_point_state state)638 static inline void __wp_update_state(struct write_point *wp, enum write_point_state state)
639 {
640 if (state != wp->state) {
641 struct task_struct *p = current;
642 u64 now = ktime_get_ns();
643 u64 runtime = p->se.sum_exec_runtime +
644 (now - p->se.exec_start);
645
646 if (state == WRITE_POINT_runnable)
647 wp->last_runtime = runtime;
648 else if (wp->state == WRITE_POINT_runnable)
649 wp->time[WRITE_POINT_running] += runtime - wp->last_runtime;
650
651 if (wp->last_state_change &&
652 time_after64(now, wp->last_state_change))
653 wp->time[wp->state] += now - wp->last_state_change;
654 wp->state = state;
655 wp->last_state_change = now;
656 }
657 }
658
wp_update_state(struct write_point * wp,bool running)659 static inline void wp_update_state(struct write_point *wp, bool running)
660 {
661 enum write_point_state state;
662
663 state = running ? WRITE_POINT_runnable:
664 !list_empty(&wp->writes) ? WRITE_POINT_waiting_io
665 : WRITE_POINT_stopped;
666
667 __wp_update_state(wp, state);
668 }
669
CLOSURE_CALLBACK(bch2_write_index)670 static CLOSURE_CALLBACK(bch2_write_index)
671 {
672 closure_type(op, struct bch_write_op, cl);
673 struct write_point *wp = op->wp;
674 struct workqueue_struct *wq = index_update_wq(op);
675 unsigned long flags;
676
677 if ((op->flags & BCH_WRITE_submitted) &&
678 (op->flags & BCH_WRITE_move))
679 bch2_bio_free_pages_pool(op->c, &op->wbio.bio);
680
681 spin_lock_irqsave(&wp->writes_lock, flags);
682 if (wp->state == WRITE_POINT_waiting_io)
683 __wp_update_state(wp, WRITE_POINT_waiting_work);
684 list_add_tail(&op->wp_list, &wp->writes);
685 spin_unlock_irqrestore (&wp->writes_lock, flags);
686
687 queue_work(wq, &wp->index_update_work);
688 }
689
bch2_write_queue(struct bch_write_op * op,struct write_point * wp)690 static inline void bch2_write_queue(struct bch_write_op *op, struct write_point *wp)
691 {
692 op->wp = wp;
693
694 if (wp->state == WRITE_POINT_stopped) {
695 spin_lock_irq(&wp->writes_lock);
696 __wp_update_state(wp, WRITE_POINT_waiting_io);
697 spin_unlock_irq(&wp->writes_lock);
698 }
699 }
700
bch2_write_point_do_index_updates(struct work_struct * work)701 void bch2_write_point_do_index_updates(struct work_struct *work)
702 {
703 struct write_point *wp =
704 container_of(work, struct write_point, index_update_work);
705 struct bch_write_op *op;
706
707 while (1) {
708 spin_lock_irq(&wp->writes_lock);
709 op = list_pop_entry(&wp->writes, struct bch_write_op, wp_list);
710 wp_update_state(wp, op != NULL);
711 spin_unlock_irq(&wp->writes_lock);
712
713 if (!op)
714 break;
715
716 op->flags |= BCH_WRITE_in_worker;
717
718 __bch2_write_index(op);
719
720 if (!(op->flags & BCH_WRITE_submitted))
721 __bch2_write(op);
722 else
723 bch2_write_done(&op->cl);
724 }
725 }
726
bch2_write_endio(struct bio * bio)727 static void bch2_write_endio(struct bio *bio)
728 {
729 struct closure *cl = bio->bi_private;
730 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
731 struct bch_write_bio *wbio = to_wbio(bio);
732 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
733 struct bch_fs *c = wbio->c;
734 struct bch_dev *ca = wbio->have_ioref
735 ? bch2_dev_have_ref(c, wbio->dev)
736 : NULL;
737
738 bch2_account_io_completion(ca, BCH_MEMBER_ERROR_write,
739 wbio->submit_time, !bio->bi_status);
740
741 if (unlikely(bio->bi_status)) {
742 if (ca)
743 bch_err_inum_offset_ratelimited(ca,
744 op->pos.inode,
745 wbio->inode_offset << 9,
746 "data write error: %s",
747 bch2_blk_status_to_str(bio->bi_status));
748 else
749 bch_err_inum_offset_ratelimited(c,
750 op->pos.inode,
751 wbio->inode_offset << 9,
752 "data write error: %s",
753 bch2_blk_status_to_str(bio->bi_status));
754 set_bit(wbio->dev, op->failed.d);
755 op->flags |= BCH_WRITE_io_error;
756 }
757
758 if (wbio->nocow) {
759 bch2_bucket_nocow_unlock(&c->nocow_locks,
760 POS(ca->dev_idx, wbio->nocow_bucket),
761 BUCKET_NOCOW_LOCK_UPDATE);
762 set_bit(wbio->dev, op->devs_need_flush->d);
763 }
764
765 if (wbio->have_ioref)
766 enumerated_ref_put(&ca->io_ref[WRITE],
767 BCH_DEV_WRITE_REF_io_write);
768
769 if (wbio->bounce)
770 bch2_bio_free_pages_pool(c, bio);
771
772 if (wbio->put_bio)
773 bio_put(bio);
774
775 if (parent)
776 bio_endio(&parent->bio);
777 else
778 closure_put(cl);
779 }
780
init_append_extent(struct bch_write_op * op,struct write_point * wp,struct bversion version,struct bch_extent_crc_unpacked crc)781 static void init_append_extent(struct bch_write_op *op,
782 struct write_point *wp,
783 struct bversion version,
784 struct bch_extent_crc_unpacked crc)
785 {
786 struct bkey_i_extent *e;
787
788 op->pos.offset += crc.uncompressed_size;
789
790 e = bkey_extent_init(op->insert_keys.top);
791 e->k.p = op->pos;
792 e->k.size = crc.uncompressed_size;
793 e->k.bversion = version;
794
795 if (crc.csum_type ||
796 crc.compression_type ||
797 crc.nonce)
798 bch2_extent_crc_append(&e->k_i, crc);
799
800 bch2_alloc_sectors_append_ptrs_inlined(op->c, wp, &e->k_i, crc.compressed_size,
801 op->flags & BCH_WRITE_cached);
802
803 if (!(op->flags & BCH_WRITE_move))
804 bch2_bkey_set_needs_rebalance(op->c, &op->opts, &e->k_i);
805
806 bch2_keylist_push(&op->insert_keys);
807 }
808
bch2_write_bio_alloc(struct bch_fs * c,struct write_point * wp,struct bio * src,bool * page_alloc_failed,void * buf)809 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
810 struct write_point *wp,
811 struct bio *src,
812 bool *page_alloc_failed,
813 void *buf)
814 {
815 struct bch_write_bio *wbio;
816 struct bio *bio;
817 unsigned output_available =
818 min(wp->sectors_free << 9, src->bi_iter.bi_size);
819 unsigned pages = DIV_ROUND_UP(output_available +
820 (buf
821 ? ((unsigned long) buf & (PAGE_SIZE - 1))
822 : 0), PAGE_SIZE);
823
824 pages = min(pages, BIO_MAX_VECS);
825
826 bio = bio_alloc_bioset(NULL, pages, 0,
827 GFP_NOFS, &c->bio_write);
828 wbio = wbio_init(bio);
829 wbio->put_bio = true;
830 /* copy WRITE_SYNC flag */
831 wbio->bio.bi_opf = src->bi_opf;
832
833 if (buf) {
834 bch2_bio_map(bio, buf, output_available);
835 return bio;
836 }
837
838 wbio->bounce = true;
839
840 /*
841 * We can't use mempool for more than c->sb.encoded_extent_max
842 * worth of pages, but we'd like to allocate more if we can:
843 */
844 bch2_bio_alloc_pages_pool(c, bio,
845 min_t(unsigned, output_available,
846 c->opts.encoded_extent_max));
847
848 if (bio->bi_iter.bi_size < output_available)
849 *page_alloc_failed =
850 bch2_bio_alloc_pages(bio,
851 output_available -
852 bio->bi_iter.bi_size,
853 GFP_NOFS) != 0;
854
855 return bio;
856 }
857
bch2_write_rechecksum(struct bch_fs * c,struct bch_write_op * op,unsigned new_csum_type)858 static int bch2_write_rechecksum(struct bch_fs *c,
859 struct bch_write_op *op,
860 unsigned new_csum_type)
861 {
862 struct bio *bio = &op->wbio.bio;
863 struct bch_extent_crc_unpacked new_crc;
864
865 /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
866
867 if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
868 bch2_csum_type_is_encryption(new_csum_type))
869 new_csum_type = op->crc.csum_type;
870
871 int ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
872 NULL, &new_crc,
873 op->crc.offset, op->crc.live_size,
874 new_csum_type);
875 if (ret)
876 return ret;
877
878 bio_advance(bio, op->crc.offset << 9);
879 bio->bi_iter.bi_size = op->crc.live_size << 9;
880 op->crc = new_crc;
881 return 0;
882 }
883
bch2_write_prep_encoded_data(struct bch_write_op * op,struct write_point * wp)884 static noinline int bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
885 {
886 struct bch_fs *c = op->c;
887 struct bio *bio = &op->wbio.bio;
888 struct bch_csum csum;
889 int ret = 0;
890
891 BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
892
893 /* Can we just write the entire extent as is? */
894 if (op->crc.uncompressed_size == op->crc.live_size &&
895 op->crc.uncompressed_size <= c->opts.encoded_extent_max >> 9 &&
896 op->crc.compressed_size <= wp->sectors_free &&
897 (op->crc.compression_type == bch2_compression_opt_to_type(op->compression_opt) ||
898 op->incompressible)) {
899 if (!crc_is_compressed(op->crc) &&
900 op->csum_type != op->crc.csum_type) {
901 ret = bch2_write_rechecksum(c, op, op->csum_type);
902 if (ret)
903 return ret;
904 }
905
906 return 1;
907 }
908
909 /*
910 * If the data is compressed and we couldn't write the entire extent as
911 * is, we have to decompress it:
912 */
913 if (crc_is_compressed(op->crc)) {
914 /* Last point we can still verify checksum: */
915 struct nonce nonce = extent_nonce(op->version, op->crc);
916 csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, bio);
917 if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
918 goto csum_err;
919
920 if (bch2_csum_type_is_encryption(op->crc.csum_type)) {
921 ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, bio);
922 if (ret)
923 return ret;
924
925 op->crc.csum_type = 0;
926 op->crc.csum = (struct bch_csum) { 0, 0 };
927 }
928
929 ret = bch2_bio_uncompress_inplace(op, bio);
930 if (ret)
931 return ret;
932 }
933
934 /*
935 * No longer have compressed data after this point - data might be
936 * encrypted:
937 */
938
939 /*
940 * If the data is checksummed and we're only writing a subset,
941 * rechecksum and adjust bio to point to currently live data:
942 */
943 if (op->crc.live_size != op->crc.uncompressed_size ||
944 op->crc.csum_type != op->csum_type) {
945 ret = bch2_write_rechecksum(c, op, op->csum_type);
946 if (ret)
947 return ret;
948 }
949
950 /*
951 * If we want to compress the data, it has to be decrypted:
952 */
953 if (bch2_csum_type_is_encryption(op->crc.csum_type) &&
954 (op->compression_opt || op->crc.csum_type != op->csum_type)) {
955 struct nonce nonce = extent_nonce(op->version, op->crc);
956 csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, bio);
957 if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
958 goto csum_err;
959
960 ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, bio);
961 if (ret)
962 return ret;
963
964 op->crc.csum_type = 0;
965 op->crc.csum = (struct bch_csum) { 0, 0 };
966 }
967
968 return 0;
969 csum_err:
970 bch2_write_op_error(op, op->pos.offset,
971 "error verifying existing checksum while moving existing data (memory corruption?)\n"
972 " expected %0llx:%0llx got %0llx:%0llx type %s",
973 op->crc.csum.hi,
974 op->crc.csum.lo,
975 csum.hi,
976 csum.lo,
977 op->crc.csum_type < BCH_CSUM_NR
978 ? __bch2_csum_types[op->crc.csum_type]
979 : "(unknown)");
980 return bch_err_throw(c, data_write_csum);
981 }
982
bch2_write_extent(struct bch_write_op * op,struct write_point * wp,struct bio ** _dst)983 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
984 struct bio **_dst)
985 {
986 struct bch_fs *c = op->c;
987 struct bio *src = &op->wbio.bio, *dst = src;
988 struct bvec_iter saved_iter;
989 void *ec_buf;
990 unsigned total_output = 0, total_input = 0;
991 bool bounce = false;
992 bool page_alloc_failed = false;
993 int ret, more = 0;
994
995 if (op->incompressible)
996 op->compression_opt = 0;
997
998 BUG_ON(!bio_sectors(src));
999
1000 ec_buf = bch2_writepoint_ec_buf(c, wp);
1001
1002 if (unlikely(op->flags & BCH_WRITE_data_encoded)) {
1003 ret = bch2_write_prep_encoded_data(op, wp);
1004 if (ret < 0)
1005 goto err;
1006 if (ret) {
1007 if (ec_buf) {
1008 dst = bch2_write_bio_alloc(c, wp, src,
1009 &page_alloc_failed,
1010 ec_buf);
1011 bio_copy_data(dst, src);
1012 bounce = true;
1013 }
1014 init_append_extent(op, wp, op->version, op->crc);
1015 goto do_write;
1016 }
1017 }
1018
1019 if (ec_buf ||
1020 op->compression_opt ||
1021 (op->csum_type &&
1022 !(op->flags & BCH_WRITE_pages_stable)) ||
1023 (bch2_csum_type_is_encryption(op->csum_type) &&
1024 !(op->flags & BCH_WRITE_pages_owned))) {
1025 dst = bch2_write_bio_alloc(c, wp, src,
1026 &page_alloc_failed,
1027 ec_buf);
1028 bounce = true;
1029 }
1030
1031 #ifdef CONFIG_BCACHEFS_DEBUG
1032 unsigned write_corrupt_ratio = READ_ONCE(bch2_write_corrupt_ratio);
1033 if (!bounce && write_corrupt_ratio) {
1034 dst = bch2_write_bio_alloc(c, wp, src,
1035 &page_alloc_failed,
1036 ec_buf);
1037 bounce = true;
1038 }
1039 #endif
1040 saved_iter = dst->bi_iter;
1041
1042 do {
1043 struct bch_extent_crc_unpacked crc = { 0 };
1044 struct bversion version = op->version;
1045 size_t dst_len = 0, src_len = 0;
1046
1047 if (page_alloc_failed &&
1048 dst->bi_iter.bi_size < (wp->sectors_free << 9) &&
1049 dst->bi_iter.bi_size < c->opts.encoded_extent_max)
1050 break;
1051
1052 BUG_ON(op->compression_opt &&
1053 (op->flags & BCH_WRITE_data_encoded) &&
1054 bch2_csum_type_is_encryption(op->crc.csum_type));
1055 BUG_ON(op->compression_opt && !bounce);
1056
1057 crc.compression_type = op->incompressible
1058 ? BCH_COMPRESSION_TYPE_incompressible
1059 : op->compression_opt
1060 ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
1061 op->compression_opt)
1062 : 0;
1063 if (!crc_is_compressed(crc)) {
1064 dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
1065 dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
1066
1067 if (op->csum_type)
1068 dst_len = min_t(unsigned, dst_len,
1069 c->opts.encoded_extent_max);
1070
1071 if (bounce) {
1072 swap(dst->bi_iter.bi_size, dst_len);
1073 bio_copy_data(dst, src);
1074 swap(dst->bi_iter.bi_size, dst_len);
1075 }
1076
1077 src_len = dst_len;
1078 }
1079
1080 BUG_ON(!src_len || !dst_len);
1081
1082 if (bch2_csum_type_is_encryption(op->csum_type)) {
1083 if (bversion_zero(version)) {
1084 version.lo = atomic64_inc_return(&c->key_version);
1085 } else {
1086 crc.nonce = op->nonce;
1087 op->nonce += src_len >> 9;
1088 }
1089 }
1090
1091 if ((op->flags & BCH_WRITE_data_encoded) &&
1092 !crc_is_compressed(crc) &&
1093 bch2_csum_type_is_encryption(op->crc.csum_type) ==
1094 bch2_csum_type_is_encryption(op->csum_type)) {
1095 u8 compression_type = crc.compression_type;
1096 u16 nonce = crc.nonce;
1097 /*
1098 * Note: when we're using rechecksum(), we need to be
1099 * checksumming @src because it has all the data our
1100 * existing checksum covers - if we bounced (because we
1101 * were trying to compress), @dst will only have the
1102 * part of the data the new checksum will cover.
1103 *
1104 * But normally we want to be checksumming post bounce,
1105 * because part of the reason for bouncing is so the
1106 * data can't be modified (by userspace) while it's in
1107 * flight.
1108 */
1109 ret = bch2_rechecksum_bio(c, src, version, op->crc,
1110 &crc, &op->crc,
1111 src_len >> 9,
1112 bio_sectors(src) - (src_len >> 9),
1113 op->csum_type);
1114 if (ret)
1115 goto err;
1116 /*
1117 * rchecksum_bio sets compression_type on crc from op->crc,
1118 * this isn't always correct as sometimes we're changing
1119 * an extent from uncompressed to incompressible.
1120 */
1121 crc.compression_type = compression_type;
1122 crc.nonce = nonce;
1123 } else {
1124 if ((op->flags & BCH_WRITE_data_encoded) &&
1125 (ret = bch2_rechecksum_bio(c, src, version, op->crc,
1126 NULL, &op->crc,
1127 src_len >> 9,
1128 bio_sectors(src) - (src_len >> 9),
1129 op->crc.csum_type)))
1130 goto err;
1131
1132 crc.compressed_size = dst_len >> 9;
1133 crc.uncompressed_size = src_len >> 9;
1134 crc.live_size = src_len >> 9;
1135
1136 swap(dst->bi_iter.bi_size, dst_len);
1137 ret = bch2_encrypt_bio(c, op->csum_type,
1138 extent_nonce(version, crc), dst);
1139 if (ret)
1140 goto err;
1141
1142 crc.csum = bch2_checksum_bio(c, op->csum_type,
1143 extent_nonce(version, crc), dst);
1144 crc.csum_type = op->csum_type;
1145 swap(dst->bi_iter.bi_size, dst_len);
1146 }
1147
1148 init_append_extent(op, wp, version, crc);
1149
1150 #ifdef CONFIG_BCACHEFS_DEBUG
1151 if (write_corrupt_ratio) {
1152 swap(dst->bi_iter.bi_size, dst_len);
1153 bch2_maybe_corrupt_bio(dst, write_corrupt_ratio);
1154 swap(dst->bi_iter.bi_size, dst_len);
1155 }
1156 #endif
1157
1158 if (dst != src)
1159 bio_advance(dst, dst_len);
1160 bio_advance(src, src_len);
1161 total_output += dst_len;
1162 total_input += src_len;
1163 } while (dst->bi_iter.bi_size &&
1164 src->bi_iter.bi_size &&
1165 wp->sectors_free &&
1166 !bch2_keylist_realloc(&op->insert_keys,
1167 op->inline_keys,
1168 ARRAY_SIZE(op->inline_keys),
1169 BKEY_EXTENT_U64s_MAX));
1170
1171 more = src->bi_iter.bi_size != 0;
1172
1173 dst->bi_iter = saved_iter;
1174
1175 if (dst == src && more) {
1176 BUG_ON(total_output != total_input);
1177
1178 dst = bio_split(src, total_input >> 9,
1179 GFP_NOFS, &c->bio_write);
1180 wbio_init(dst)->put_bio = true;
1181 /* copy WRITE_SYNC flag */
1182 dst->bi_opf = src->bi_opf;
1183 }
1184
1185 dst->bi_iter.bi_size = total_output;
1186 do_write:
1187 *_dst = dst;
1188 return more;
1189 err:
1190 if (to_wbio(dst)->bounce)
1191 bch2_bio_free_pages_pool(c, dst);
1192 if (to_wbio(dst)->put_bio)
1193 bio_put(dst);
1194
1195 return ret;
1196 }
1197
bch2_extent_is_writeable(struct bch_write_op * op,struct bkey_s_c k)1198 static bool bch2_extent_is_writeable(struct bch_write_op *op,
1199 struct bkey_s_c k)
1200 {
1201 struct bch_fs *c = op->c;
1202 struct bkey_s_c_extent e;
1203 struct extent_ptr_decoded p;
1204 const union bch_extent_entry *entry;
1205 unsigned replicas = 0;
1206
1207 if (k.k->type != KEY_TYPE_extent)
1208 return false;
1209
1210 e = bkey_s_c_to_extent(k);
1211
1212 guard(rcu)();
1213 extent_for_each_ptr_decode(e, p, entry) {
1214 if (crc_is_encoded(p.crc) || p.has_ec)
1215 return false;
1216
1217 replicas += bch2_extent_ptr_durability(c, &p);
1218 }
1219
1220 return replicas >= op->opts.data_replicas;
1221 }
1222
bch2_nocow_write_convert_one_unwritten(struct btree_trans * trans,struct btree_iter * iter,struct bkey_i * orig,struct bkey_s_c k,u64 new_i_size)1223 static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
1224 struct btree_iter *iter,
1225 struct bkey_i *orig,
1226 struct bkey_s_c k,
1227 u64 new_i_size)
1228 {
1229 if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) {
1230 /* trace this */
1231 return 0;
1232 }
1233
1234 struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
1235 int ret = PTR_ERR_OR_ZERO(new);
1236 if (ret)
1237 return ret;
1238
1239 bch2_cut_front(bkey_start_pos(&orig->k), new);
1240 bch2_cut_back(orig->k.p, new);
1241
1242 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
1243 bkey_for_each_ptr(ptrs, ptr)
1244 ptr->unwritten = 0;
1245
1246 /*
1247 * Note that we're not calling bch2_subvol_get_snapshot() in this path -
1248 * that was done when we kicked off the write, and here it's important
1249 * that we update the extent that we wrote to - even if a snapshot has
1250 * since been created. The write is still outstanding, so we're ok
1251 * w.r.t. snapshot atomicity:
1252 */
1253 return bch2_extent_update_i_size_sectors(trans, iter,
1254 min(new->k.p.offset << 9, new_i_size), 0) ?:
1255 bch2_trans_update(trans, iter, new,
1256 BTREE_UPDATE_internal_snapshot_node);
1257 }
1258
bch2_nocow_write_convert_unwritten(struct bch_write_op * op)1259 static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
1260 {
1261 struct bch_fs *c = op->c;
1262 struct btree_trans *trans = bch2_trans_get(c);
1263 int ret = 0;
1264
1265 for_each_keylist_key(&op->insert_keys, orig) {
1266 ret = for_each_btree_key_max_commit(trans, iter, BTREE_ID_extents,
1267 bkey_start_pos(&orig->k), orig->k.p,
1268 BTREE_ITER_intent, k,
1269 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
1270 bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size);
1271 }));
1272 if (ret)
1273 break;
1274 }
1275
1276 bch2_trans_put(trans);
1277
1278 if (ret && !bch2_err_matches(ret, EROFS)) {
1279 struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
1280 bch2_write_op_error(op, bkey_start_offset(&insert->k),
1281 "btree update error: %s", bch2_err_str(ret));
1282 }
1283
1284 if (ret)
1285 op->error = ret;
1286 }
1287
__bch2_nocow_write_done(struct bch_write_op * op)1288 static void __bch2_nocow_write_done(struct bch_write_op *op)
1289 {
1290 if (unlikely(op->flags & BCH_WRITE_io_error)) {
1291 op->error = bch_err_throw(op->c, data_write_io);
1292 } else if (unlikely(op->flags & BCH_WRITE_convert_unwritten))
1293 bch2_nocow_write_convert_unwritten(op);
1294 }
1295
CLOSURE_CALLBACK(bch2_nocow_write_done)1296 static CLOSURE_CALLBACK(bch2_nocow_write_done)
1297 {
1298 closure_type(op, struct bch_write_op, cl);
1299
1300 __bch2_nocow_write_done(op);
1301 bch2_write_done(cl);
1302 }
1303
1304 struct bucket_to_lock {
1305 struct bpos b;
1306 unsigned gen;
1307 struct nocow_lock_bucket *l;
1308 };
1309
bch2_nocow_write(struct bch_write_op * op)1310 static void bch2_nocow_write(struct bch_write_op *op)
1311 {
1312 struct bch_fs *c = op->c;
1313 struct btree_trans *trans;
1314 struct btree_iter iter;
1315 struct bkey_s_c k;
1316 DARRAY_PREALLOCATED(struct bucket_to_lock, 3) buckets;
1317 u32 snapshot;
1318 struct bucket_to_lock *stale_at;
1319 int stale, ret;
1320
1321 if (op->flags & BCH_WRITE_move)
1322 return;
1323
1324 darray_init(&buckets);
1325 trans = bch2_trans_get(c);
1326 retry:
1327 bch2_trans_begin(trans);
1328
1329 ret = bch2_subvolume_get_snapshot(trans, op->subvol, &snapshot);
1330 if (unlikely(ret))
1331 goto err;
1332
1333 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1334 SPOS(op->pos.inode, op->pos.offset, snapshot),
1335 BTREE_ITER_slots);
1336 while (1) {
1337 struct bio *bio = &op->wbio.bio;
1338
1339 buckets.nr = 0;
1340
1341 ret = bch2_trans_relock(trans);
1342 if (ret)
1343 break;
1344
1345 k = bch2_btree_iter_peek_slot(trans, &iter);
1346 ret = bkey_err(k);
1347 if (ret)
1348 break;
1349
1350 /* fall back to normal cow write path? */
1351 if (unlikely(k.k->p.snapshot != snapshot ||
1352 !bch2_extent_is_writeable(op, k)))
1353 break;
1354
1355 if (bch2_keylist_realloc(&op->insert_keys,
1356 op->inline_keys,
1357 ARRAY_SIZE(op->inline_keys),
1358 k.k->u64s))
1359 break;
1360
1361 /* Get iorefs before dropping btree locks: */
1362 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1363 bkey_for_each_ptr(ptrs, ptr) {
1364 struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE,
1365 BCH_DEV_WRITE_REF_io_write);
1366 if (unlikely(!ca))
1367 goto err_get_ioref;
1368
1369 struct bpos b = PTR_BUCKET_POS(ca, ptr);
1370 struct nocow_lock_bucket *l =
1371 bucket_nocow_lock(&c->nocow_locks, bucket_to_u64(b));
1372 prefetch(l);
1373
1374 /* XXX allocating memory with btree locks held - rare */
1375 darray_push_gfp(&buckets, ((struct bucket_to_lock) {
1376 .b = b, .gen = ptr->gen, .l = l,
1377 }), GFP_KERNEL|__GFP_NOFAIL);
1378
1379 if (ptr->unwritten)
1380 op->flags |= BCH_WRITE_convert_unwritten;
1381 }
1382
1383 /* Unlock before taking nocow locks, doing IO: */
1384 bkey_reassemble(op->insert_keys.top, k);
1385 bch2_trans_unlock(trans);
1386
1387 bch2_cut_front(op->pos, op->insert_keys.top);
1388 if (op->flags & BCH_WRITE_convert_unwritten)
1389 bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
1390
1391 darray_for_each(buckets, i) {
1392 struct bch_dev *ca = bch2_dev_have_ref(c, i->b.inode);
1393
1394 __bch2_bucket_nocow_lock(&c->nocow_locks, i->l,
1395 bucket_to_u64(i->b),
1396 BUCKET_NOCOW_LOCK_UPDATE);
1397
1398 int gen = bucket_gen_get(ca, i->b.offset);
1399 stale = gen < 0 ? gen : gen_after(gen, i->gen);
1400 if (unlikely(stale)) {
1401 stale_at = i;
1402 goto err_bucket_stale;
1403 }
1404 }
1405
1406 bio = &op->wbio.bio;
1407 if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) {
1408 bio = bio_split(bio, k.k->p.offset - op->pos.offset,
1409 GFP_KERNEL, &c->bio_write);
1410 wbio_init(bio)->put_bio = true;
1411 bio->bi_opf = op->wbio.bio.bi_opf;
1412 } else {
1413 op->flags |= BCH_WRITE_submitted;
1414 }
1415
1416 op->pos.offset += bio_sectors(bio);
1417 op->written += bio_sectors(bio);
1418
1419 bio->bi_end_io = bch2_write_endio;
1420 bio->bi_private = &op->cl;
1421 bio->bi_opf |= REQ_OP_WRITE;
1422 closure_get(&op->cl);
1423
1424 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1425 op->insert_keys.top, true);
1426
1427 bch2_keylist_push(&op->insert_keys);
1428 if (op->flags & BCH_WRITE_submitted)
1429 break;
1430 bch2_btree_iter_advance(trans, &iter);
1431 }
1432 out:
1433 bch2_trans_iter_exit(trans, &iter);
1434 err:
1435 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1436 goto retry;
1437
1438 bch2_trans_put(trans);
1439 darray_exit(&buckets);
1440
1441 if (ret) {
1442 bch2_write_op_error(op, op->pos.offset,
1443 "%s(): btree lookup error: %s", __func__, bch2_err_str(ret));
1444 op->error = ret;
1445 op->flags |= BCH_WRITE_submitted;
1446 }
1447
1448 /* fallback to cow write path? */
1449 if (!(op->flags & BCH_WRITE_submitted)) {
1450 closure_sync(&op->cl);
1451 __bch2_nocow_write_done(op);
1452 op->insert_keys.top = op->insert_keys.keys;
1453 } else if (op->flags & BCH_WRITE_sync) {
1454 closure_sync(&op->cl);
1455 bch2_nocow_write_done(&op->cl.work);
1456 } else {
1457 /*
1458 * XXX
1459 * needs to run out of process context because ei_quota_lock is
1460 * a mutex
1461 */
1462 continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op));
1463 }
1464 return;
1465 err_get_ioref:
1466 darray_for_each(buckets, i)
1467 enumerated_ref_put(&bch2_dev_have_ref(c, i->b.inode)->io_ref[WRITE],
1468 BCH_DEV_WRITE_REF_io_write);
1469
1470 /* Fall back to COW path: */
1471 goto out;
1472 err_bucket_stale:
1473 darray_for_each(buckets, i) {
1474 bch2_bucket_nocow_unlock(&c->nocow_locks, i->b, BUCKET_NOCOW_LOCK_UPDATE);
1475 if (i == stale_at)
1476 break;
1477 }
1478
1479 struct printbuf buf = PRINTBUF;
1480 if (bch2_fs_inconsistent_on(stale < 0, c,
1481 "pointer to invalid bucket in nocow path on device %llu\n %s",
1482 stale_at->b.inode,
1483 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1484 ret = bch_err_throw(c, data_write_invalid_ptr);
1485 } else {
1486 /* We can retry this: */
1487 ret = bch_err_throw(c, transaction_restart);
1488 }
1489 printbuf_exit(&buf);
1490
1491 goto err_get_ioref;
1492 }
1493
__bch2_write(struct bch_write_op * op)1494 static void __bch2_write(struct bch_write_op *op)
1495 {
1496 struct bch_fs *c = op->c;
1497 struct write_point *wp = NULL;
1498 struct bio *bio = NULL;
1499 unsigned nofs_flags;
1500 int ret;
1501
1502 nofs_flags = memalloc_nofs_save();
1503
1504 if (unlikely(op->opts.nocow && c->opts.nocow_enabled)) {
1505 bch2_nocow_write(op);
1506 if (op->flags & BCH_WRITE_submitted)
1507 goto out_nofs_restore;
1508 }
1509 again:
1510 memset(&op->failed, 0, sizeof(op->failed));
1511
1512 do {
1513 struct bkey_i *key_to_write;
1514 unsigned key_to_write_offset = op->insert_keys.top_p -
1515 op->insert_keys.keys_p;
1516
1517 /* +1 for possible cache device: */
1518 if (op->open_buckets.nr + op->nr_replicas + 1 >
1519 ARRAY_SIZE(op->open_buckets.v))
1520 break;
1521
1522 if (bch2_keylist_realloc(&op->insert_keys,
1523 op->inline_keys,
1524 ARRAY_SIZE(op->inline_keys),
1525 BKEY_EXTENT_U64s_MAX))
1526 break;
1527
1528 /*
1529 * The copygc thread is now global, which means it's no longer
1530 * freeing up space on specific disks, which means that
1531 * allocations for specific disks may hang arbitrarily long:
1532 */
1533 ret = bch2_trans_run(c, lockrestart_do(trans,
1534 bch2_alloc_sectors_start_trans(trans,
1535 op->target,
1536 op->opts.erasure_code && !(op->flags & BCH_WRITE_cached),
1537 op->write_point,
1538 &op->devs_have,
1539 op->nr_replicas,
1540 op->nr_replicas_required,
1541 op->watermark,
1542 op->flags,
1543 &op->cl, &wp)));
1544 if (unlikely(ret)) {
1545 if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
1546 break;
1547
1548 goto err;
1549 }
1550
1551 EBUG_ON(!wp);
1552
1553 bch2_open_bucket_get(c, wp, &op->open_buckets);
1554 ret = bch2_write_extent(op, wp, &bio);
1555
1556 bch2_alloc_sectors_done_inlined(c, wp);
1557 err:
1558 if (ret <= 0) {
1559 op->flags |= BCH_WRITE_submitted;
1560
1561 if (unlikely(ret < 0)) {
1562 if (!(op->flags & BCH_WRITE_alloc_nowait))
1563 bch2_write_op_error(op, op->pos.offset,
1564 "%s(): %s", __func__, bch2_err_str(ret));
1565 op->error = ret;
1566 break;
1567 }
1568 }
1569
1570 bio->bi_end_io = bch2_write_endio;
1571 bio->bi_private = &op->cl;
1572 bio->bi_opf |= REQ_OP_WRITE;
1573
1574 closure_get(bio->bi_private);
1575
1576 key_to_write = (void *) (op->insert_keys.keys_p +
1577 key_to_write_offset);
1578
1579 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1580 key_to_write, false);
1581 } while (ret);
1582
1583 /*
1584 * Sync or no?
1585 *
1586 * If we're running asynchronously, wne may still want to block
1587 * synchronously here if we weren't able to submit all of the IO at
1588 * once, as that signals backpressure to the caller.
1589 */
1590 if ((op->flags & BCH_WRITE_sync) ||
1591 (!(op->flags & BCH_WRITE_submitted) &&
1592 !(op->flags & BCH_WRITE_in_worker))) {
1593 bch2_wait_on_allocator(c, &op->cl);
1594
1595 __bch2_write_index(op);
1596
1597 if (!(op->flags & BCH_WRITE_submitted))
1598 goto again;
1599 bch2_write_done(&op->cl);
1600 } else {
1601 bch2_write_queue(op, wp);
1602 continue_at(&op->cl, bch2_write_index, NULL);
1603 }
1604 out_nofs_restore:
1605 memalloc_nofs_restore(nofs_flags);
1606 }
1607
bch2_write_data_inline(struct bch_write_op * op,unsigned data_len)1608 static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
1609 {
1610 struct bio *bio = &op->wbio.bio;
1611 struct bvec_iter iter;
1612 struct bkey_i_inline_data *id;
1613 unsigned sectors;
1614 int ret;
1615
1616 memset(&op->failed, 0, sizeof(op->failed));
1617
1618 op->flags |= BCH_WRITE_wrote_data_inline;
1619 op->flags |= BCH_WRITE_submitted;
1620
1621 bch2_check_set_feature(op->c, BCH_FEATURE_inline_data);
1622
1623 ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
1624 ARRAY_SIZE(op->inline_keys),
1625 BKEY_U64s + DIV_ROUND_UP(data_len, 8));
1626 if (ret) {
1627 op->error = ret;
1628 goto err;
1629 }
1630
1631 sectors = bio_sectors(bio);
1632 op->pos.offset += sectors;
1633
1634 id = bkey_inline_data_init(op->insert_keys.top);
1635 id->k.p = op->pos;
1636 id->k.bversion = op->version;
1637 id->k.size = sectors;
1638
1639 iter = bio->bi_iter;
1640 iter.bi_size = data_len;
1641 memcpy_from_bio(id->v.data, bio, iter);
1642
1643 while (data_len & 7)
1644 id->v.data[data_len++] = '\0';
1645 set_bkey_val_bytes(&id->k, data_len);
1646 bch2_keylist_push(&op->insert_keys);
1647
1648 __bch2_write_index(op);
1649 err:
1650 bch2_write_done(&op->cl);
1651 }
1652
1653 /**
1654 * bch2_write() - handle a write to a cache device or flash only volume
1655 * @cl: &bch_write_op->cl
1656 *
1657 * This is the starting point for any data to end up in a cache device; it could
1658 * be from a normal write, or a writeback write, or a write to a flash only
1659 * volume - it's also used by the moving garbage collector to compact data in
1660 * mostly empty buckets.
1661 *
1662 * It first writes the data to the cache, creating a list of keys to be inserted
1663 * (if the data won't fit in a single open bucket, there will be multiple keys);
1664 * after the data is written it calls bch_journal, and after the keys have been
1665 * added to the next journal write they're inserted into the btree.
1666 *
1667 * If op->discard is true, instead of inserting the data it invalidates the
1668 * region of the cache represented by op->bio and op->inode.
1669 */
CLOSURE_CALLBACK(bch2_write)1670 CLOSURE_CALLBACK(bch2_write)
1671 {
1672 closure_type(op, struct bch_write_op, cl);
1673 struct bio *bio = &op->wbio.bio;
1674 struct bch_fs *c = op->c;
1675 unsigned data_len;
1676
1677 EBUG_ON(op->cl.parent);
1678 BUG_ON(!op->nr_replicas);
1679 BUG_ON(!op->write_point.v);
1680 BUG_ON(bkey_eq(op->pos, POS_MAX));
1681
1682 async_object_list_add(c, write_op, op, &op->list_idx);
1683
1684 if (op->flags & BCH_WRITE_only_specified_devs)
1685 op->flags |= BCH_WRITE_alloc_nowait;
1686
1687 op->nr_replicas_required = min_t(unsigned, op->nr_replicas_required, op->nr_replicas);
1688 op->start_time = local_clock();
1689 bch2_keylist_init(&op->insert_keys, op->inline_keys);
1690 wbio_init(bio)->put_bio = false;
1691
1692 if (unlikely(bio->bi_iter.bi_size & (c->opts.block_size - 1))) {
1693 bch2_write_op_error(op, op->pos.offset, "misaligned write");
1694 op->error = bch_err_throw(c, data_write_misaligned);
1695 goto err;
1696 }
1697
1698 if (c->opts.nochanges) {
1699 op->error = bch_err_throw(c, erofs_no_writes);
1700 goto err;
1701 }
1702
1703 if (!(op->flags & BCH_WRITE_move) &&
1704 !enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_write)) {
1705 op->error = bch_err_throw(c, erofs_no_writes);
1706 goto err;
1707 }
1708
1709 if (!(op->flags & BCH_WRITE_move))
1710 this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio));
1711 bch2_increment_clock(c, bio_sectors(bio), WRITE);
1712
1713 data_len = min_t(u64, bio->bi_iter.bi_size,
1714 op->new_i_size - (op->pos.offset << 9));
1715
1716 if (c->opts.inline_data &&
1717 data_len <= min(block_bytes(c) / 2, 1024U)) {
1718 bch2_write_data_inline(op, data_len);
1719 return;
1720 }
1721
1722 __bch2_write(op);
1723 return;
1724 err:
1725 bch2_disk_reservation_put(c, &op->res);
1726
1727 closure_debug_destroy(&op->cl);
1728 async_object_list_del(c, write_op, op->list_idx);
1729 if (op->end_io)
1730 op->end_io(op);
1731 }
1732
1733 static const char * const bch2_write_flags[] = {
1734 #define x(f) #f,
1735 BCH_WRITE_FLAGS()
1736 #undef x
1737 NULL
1738 };
1739
bch2_write_op_to_text(struct printbuf * out,struct bch_write_op * op)1740 void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
1741 {
1742 if (!out->nr_tabstops)
1743 printbuf_tabstop_push(out, 32);
1744
1745 prt_printf(out, "pos:\t");
1746 bch2_bpos_to_text(out, op->pos);
1747 prt_newline(out);
1748 printbuf_indent_add(out, 2);
1749
1750 prt_printf(out, "started:\t");
1751 bch2_pr_time_units(out, local_clock() - op->start_time);
1752 prt_newline(out);
1753
1754 prt_printf(out, "flags:\t");
1755 prt_bitflags(out, bch2_write_flags, op->flags);
1756 prt_newline(out);
1757
1758 prt_printf(out, "nr_replicas:\t%u\n", op->nr_replicas);
1759 prt_printf(out, "nr_replicas_required:\t%u\n", op->nr_replicas_required);
1760
1761 prt_printf(out, "ref:\t%u\n", closure_nr_remaining(&op->cl));
1762 prt_printf(out, "ret\t%s\n", bch2_err_str(op->error));
1763
1764 printbuf_indent_sub(out, 2);
1765 }
1766
bch2_fs_io_write_exit(struct bch_fs * c)1767 void bch2_fs_io_write_exit(struct bch_fs *c)
1768 {
1769 bioset_exit(&c->replica_set);
1770 bioset_exit(&c->bio_write);
1771 }
1772
bch2_fs_io_write_init(struct bch_fs * c)1773 int bch2_fs_io_write_init(struct bch_fs *c)
1774 {
1775 if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio), BIOSET_NEED_BVECS) ||
1776 bioset_init(&c->replica_set, 4, offsetof(struct bch_write_bio, bio), 0))
1777 return bch_err_throw(c, ENOMEM_bio_write_init);
1778
1779 return 0;
1780 }
1781