1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 4 * Copyright 2012 Google, Inc. 5 */ 6 7 #include "bcachefs.h" 8 #include "alloc_foreground.h" 9 #include "bkey_buf.h" 10 #include "bset.h" 11 #include "btree_update.h" 12 #include "buckets.h" 13 #include "checksum.h" 14 #include "clock.h" 15 #include "compress.h" 16 #include "debug.h" 17 #include "ec.h" 18 #include "error.h" 19 #include "extent_update.h" 20 #include "inode.h" 21 #include "io_write.h" 22 #include "journal.h" 23 #include "keylist.h" 24 #include "move.h" 25 #include "nocow_locking.h" 26 #include "rebalance.h" 27 #include "subvolume.h" 28 #include "super.h" 29 #include "super-io.h" 30 #include "trace.h" 31 32 #include <linux/blkdev.h> 33 #include <linux/prefetch.h> 34 #include <linux/random.h> 35 #include <linux/sched/mm.h> 36 37 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT 38 39 static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency, 40 u64 now, int rw) 41 { 42 u64 latency_capable = 43 ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m; 44 /* ideally we'd be taking into account the device's variance here: */ 45 u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3); 46 s64 latency_over = io_latency - latency_threshold; 47 48 if (latency_threshold && latency_over > 0) { 49 /* 50 * bump up congested by approximately latency_over * 4 / 51 * latency_threshold - we don't need much accuracy here so don't 52 * bother with the divide: 53 */ 54 if (atomic_read(&ca->congested) < CONGESTED_MAX) 55 atomic_add(latency_over >> 56 max_t(int, ilog2(latency_threshold) - 2, 0), 57 &ca->congested); 58 59 ca->congested_last = now; 60 } else if (atomic_read(&ca->congested) > 0) { 61 atomic_dec(&ca->congested); 62 } 63 } 64 65 void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw) 66 { 67 atomic64_t *latency = &ca->cur_latency[rw]; 68 u64 now = local_clock(); 69 u64 io_latency = time_after64(now, submit_time) 70 ? now - submit_time 71 : 0; 72 u64 old, new, v = atomic64_read(latency); 73 74 do { 75 old = v; 76 77 /* 78 * If the io latency was reasonably close to the current 79 * latency, skip doing the update and atomic operation - most of 80 * the time: 81 */ 82 if (abs((int) (old - io_latency)) < (old >> 1) && 83 now & ~(~0U << 5)) 84 break; 85 86 new = ewma_add(old, io_latency, 5); 87 } while ((v = atomic64_cmpxchg(latency, old, new)) != old); 88 89 bch2_congested_acct(ca, io_latency, now, rw); 90 91 __bch2_time_stats_update(&ca->io_latency[rw].stats, submit_time, now); 92 } 93 94 #endif 95 96 /* Allocate, free from mempool: */ 97 98 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio) 99 { 100 struct bvec_iter_all iter; 101 struct bio_vec *bv; 102 103 bio_for_each_segment_all(bv, bio, iter) 104 if (bv->bv_page != ZERO_PAGE(0)) 105 mempool_free(bv->bv_page, &c->bio_bounce_pages); 106 bio->bi_vcnt = 0; 107 } 108 109 static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool) 110 { 111 struct page *page; 112 113 if (likely(!*using_mempool)) { 114 page = alloc_page(GFP_NOFS); 115 if (unlikely(!page)) { 116 mutex_lock(&c->bio_bounce_pages_lock); 117 *using_mempool = true; 118 goto pool_alloc; 119 120 } 121 } else { 122 pool_alloc: 123 page = mempool_alloc(&c->bio_bounce_pages, GFP_NOFS); 124 } 125 126 return page; 127 } 128 129 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio, 130 size_t size) 131 { 132 bool using_mempool = false; 133 134 while (size) { 135 struct page *page = __bio_alloc_page_pool(c, &using_mempool); 136 unsigned len = min_t(size_t, PAGE_SIZE, size); 137 138 BUG_ON(!bio_add_page(bio, page, len, 0)); 139 size -= len; 140 } 141 142 if (using_mempool) 143 mutex_unlock(&c->bio_bounce_pages_lock); 144 } 145 146 /* Extent update path: */ 147 148 int bch2_sum_sector_overwrites(struct btree_trans *trans, 149 struct btree_iter *extent_iter, 150 struct bkey_i *new, 151 bool *usage_increasing, 152 s64 *i_sectors_delta, 153 s64 *disk_sectors_delta) 154 { 155 struct bch_fs *c = trans->c; 156 struct btree_iter iter; 157 struct bkey_s_c old; 158 unsigned new_replicas = bch2_bkey_replicas(c, bkey_i_to_s_c(new)); 159 bool new_compressed = bch2_bkey_sectors_compressed(bkey_i_to_s_c(new)); 160 int ret = 0; 161 162 *usage_increasing = false; 163 *i_sectors_delta = 0; 164 *disk_sectors_delta = 0; 165 166 bch2_trans_copy_iter(&iter, extent_iter); 167 168 for_each_btree_key_upto_continue_norestart(iter, 169 new->k.p, BTREE_ITER_slots, old, ret) { 170 s64 sectors = min(new->k.p.offset, old.k->p.offset) - 171 max(bkey_start_offset(&new->k), 172 bkey_start_offset(old.k)); 173 174 *i_sectors_delta += sectors * 175 (bkey_extent_is_allocation(&new->k) - 176 bkey_extent_is_allocation(old.k)); 177 178 *disk_sectors_delta += sectors * bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new)); 179 *disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot 180 ? sectors * bch2_bkey_nr_ptrs_fully_allocated(old) 181 : 0; 182 183 if (!*usage_increasing && 184 (new->k.p.snapshot != old.k->p.snapshot || 185 new_replicas > bch2_bkey_replicas(c, old) || 186 (!new_compressed && bch2_bkey_sectors_compressed(old)))) 187 *usage_increasing = true; 188 189 if (bkey_ge(old.k->p, new->k.p)) 190 break; 191 } 192 193 bch2_trans_iter_exit(trans, &iter); 194 return ret; 195 } 196 197 static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans, 198 struct btree_iter *extent_iter, 199 u64 new_i_size, 200 s64 i_sectors_delta) 201 { 202 /* 203 * Crazy performance optimization: 204 * Every extent update needs to also update the inode: the inode trigger 205 * will set bi->journal_seq to the journal sequence number of this 206 * transaction - for fsync. 207 * 208 * But if that's the only reason we're updating the inode (we're not 209 * updating bi_size or bi_sectors), then we don't need the inode update 210 * to be journalled - if we crash, the bi_journal_seq update will be 211 * lost, but that's fine. 212 */ 213 unsigned inode_update_flags = BTREE_UPDATE_nojournal; 214 215 struct btree_iter iter; 216 struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, 217 SPOS(0, 218 extent_iter->pos.inode, 219 extent_iter->snapshot), 220 BTREE_ITER_cached); 221 int ret = bkey_err(k); 222 if (unlikely(ret)) 223 return ret; 224 225 /* 226 * varint_decode_fast(), in the inode .invalid method, reads up to 7 227 * bytes past the end of the buffer: 228 */ 229 struct bkey_i *k_mut = bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k) + 8); 230 ret = PTR_ERR_OR_ZERO(k_mut); 231 if (unlikely(ret)) 232 goto err; 233 234 bkey_reassemble(k_mut, k); 235 236 if (unlikely(k_mut->k.type != KEY_TYPE_inode_v3)) { 237 k_mut = bch2_inode_to_v3(trans, k_mut); 238 ret = PTR_ERR_OR_ZERO(k_mut); 239 if (unlikely(ret)) 240 goto err; 241 } 242 243 struct bkey_i_inode_v3 *inode = bkey_i_to_inode_v3(k_mut); 244 245 if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_i_size_dirty) && 246 new_i_size > le64_to_cpu(inode->v.bi_size)) { 247 inode->v.bi_size = cpu_to_le64(new_i_size); 248 inode_update_flags = 0; 249 } 250 251 if (i_sectors_delta) { 252 le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta); 253 inode_update_flags = 0; 254 } 255 256 if (inode->k.p.snapshot != iter.snapshot) { 257 inode->k.p.snapshot = iter.snapshot; 258 inode_update_flags = 0; 259 } 260 261 ret = bch2_trans_update(trans, &iter, &inode->k_i, 262 BTREE_UPDATE_internal_snapshot_node| 263 inode_update_flags); 264 err: 265 bch2_trans_iter_exit(trans, &iter); 266 return ret; 267 } 268 269 int bch2_extent_update(struct btree_trans *trans, 270 subvol_inum inum, 271 struct btree_iter *iter, 272 struct bkey_i *k, 273 struct disk_reservation *disk_res, 274 u64 new_i_size, 275 s64 *i_sectors_delta_total, 276 bool check_enospc) 277 { 278 struct bpos next_pos; 279 bool usage_increasing; 280 s64 i_sectors_delta = 0, disk_sectors_delta = 0; 281 int ret; 282 283 /* 284 * This traverses us the iterator without changing iter->path->pos to 285 * search_key() (which is pos + 1 for extents): we want there to be a 286 * path already traversed at iter->pos because 287 * bch2_trans_extent_update() will use it to attempt extent merging 288 */ 289 ret = __bch2_btree_iter_traverse(iter); 290 if (ret) 291 return ret; 292 293 ret = bch2_extent_trim_atomic(trans, iter, k); 294 if (ret) 295 return ret; 296 297 next_pos = k->k.p; 298 299 ret = bch2_sum_sector_overwrites(trans, iter, k, 300 &usage_increasing, 301 &i_sectors_delta, 302 &disk_sectors_delta); 303 if (ret) 304 return ret; 305 306 if (disk_res && 307 disk_sectors_delta > (s64) disk_res->sectors) { 308 ret = bch2_disk_reservation_add(trans->c, disk_res, 309 disk_sectors_delta - disk_res->sectors, 310 !check_enospc || !usage_increasing 311 ? BCH_DISK_RESERVATION_NOFAIL : 0); 312 if (ret) 313 return ret; 314 } 315 316 /* 317 * Note: 318 * We always have to do an inode update - even when i_size/i_sectors 319 * aren't changing - for fsync to work properly; fsync relies on 320 * inode->bi_journal_seq which is updated by the trigger code: 321 */ 322 ret = bch2_extent_update_i_size_sectors(trans, iter, 323 min(k->k.p.offset << 9, new_i_size), 324 i_sectors_delta) ?: 325 bch2_trans_update(trans, iter, k, 0) ?: 326 bch2_trans_commit(trans, disk_res, NULL, 327 BCH_TRANS_COMMIT_no_check_rw| 328 BCH_TRANS_COMMIT_no_enospc); 329 if (unlikely(ret)) 330 return ret; 331 332 if (i_sectors_delta_total) 333 *i_sectors_delta_total += i_sectors_delta; 334 bch2_btree_iter_set_pos(iter, next_pos); 335 return 0; 336 } 337 338 static int bch2_write_index_default(struct bch_write_op *op) 339 { 340 struct bch_fs *c = op->c; 341 struct bkey_buf sk; 342 struct keylist *keys = &op->insert_keys; 343 struct bkey_i *k = bch2_keylist_front(keys); 344 struct btree_trans *trans = bch2_trans_get(c); 345 struct btree_iter iter; 346 subvol_inum inum = { 347 .subvol = op->subvol, 348 .inum = k->k.p.inode, 349 }; 350 int ret; 351 352 BUG_ON(!inum.subvol); 353 354 bch2_bkey_buf_init(&sk); 355 356 do { 357 bch2_trans_begin(trans); 358 359 k = bch2_keylist_front(keys); 360 bch2_bkey_buf_copy(&sk, c, k); 361 362 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, 363 &sk.k->k.p.snapshot); 364 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 365 continue; 366 if (ret) 367 break; 368 369 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, 370 bkey_start_pos(&sk.k->k), 371 BTREE_ITER_slots|BTREE_ITER_intent); 372 373 ret = bch2_bkey_set_needs_rebalance(c, sk.k, &op->opts) ?: 374 bch2_extent_update(trans, inum, &iter, sk.k, 375 &op->res, 376 op->new_i_size, &op->i_sectors_delta, 377 op->flags & BCH_WRITE_CHECK_ENOSPC); 378 bch2_trans_iter_exit(trans, &iter); 379 380 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 381 continue; 382 if (ret) 383 break; 384 385 if (bkey_ge(iter.pos, k->k.p)) 386 bch2_keylist_pop_front(&op->insert_keys); 387 else 388 bch2_cut_front(iter.pos, k); 389 } while (!bch2_keylist_empty(keys)); 390 391 bch2_trans_put(trans); 392 bch2_bkey_buf_exit(&sk, c); 393 394 return ret; 395 } 396 397 /* Writes */ 398 399 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c, 400 enum bch_data_type type, 401 const struct bkey_i *k, 402 bool nocow) 403 { 404 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k)); 405 struct bch_write_bio *n; 406 407 BUG_ON(c->opts.nochanges); 408 409 bkey_for_each_ptr(ptrs, ptr) { 410 struct bch_dev *ca = nocow 411 ? bch2_dev_have_ref(c, ptr->dev) 412 : bch2_dev_get_ioref(c, ptr->dev, type == BCH_DATA_btree ? READ : WRITE); 413 414 if (to_entry(ptr + 1) < ptrs.end) { 415 n = to_wbio(bio_alloc_clone(NULL, &wbio->bio, GFP_NOFS, &c->replica_set)); 416 417 n->bio.bi_end_io = wbio->bio.bi_end_io; 418 n->bio.bi_private = wbio->bio.bi_private; 419 n->parent = wbio; 420 n->split = true; 421 n->bounce = false; 422 n->put_bio = true; 423 n->bio.bi_opf = wbio->bio.bi_opf; 424 bio_inc_remaining(&wbio->bio); 425 } else { 426 n = wbio; 427 n->split = false; 428 } 429 430 n->c = c; 431 n->dev = ptr->dev; 432 n->have_ioref = ca != NULL; 433 n->nocow = nocow; 434 n->submit_time = local_clock(); 435 n->inode_offset = bkey_start_offset(&k->k); 436 if (nocow) 437 n->nocow_bucket = PTR_BUCKET_NR(ca, ptr); 438 n->bio.bi_iter.bi_sector = ptr->offset; 439 440 if (likely(n->have_ioref)) { 441 this_cpu_add(ca->io_done->sectors[WRITE][type], 442 bio_sectors(&n->bio)); 443 444 bio_set_dev(&n->bio, ca->disk_sb.bdev); 445 446 if (type != BCH_DATA_btree && unlikely(c->opts.no_data_io)) { 447 bio_endio(&n->bio); 448 continue; 449 } 450 451 submit_bio(&n->bio); 452 } else { 453 n->bio.bi_status = BLK_STS_REMOVED; 454 bio_endio(&n->bio); 455 } 456 } 457 } 458 459 static void __bch2_write(struct bch_write_op *); 460 461 static void bch2_write_done(struct closure *cl) 462 { 463 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl); 464 struct bch_fs *c = op->c; 465 466 EBUG_ON(op->open_buckets.nr); 467 468 bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time); 469 bch2_disk_reservation_put(c, &op->res); 470 471 if (!(op->flags & BCH_WRITE_MOVE)) 472 bch2_write_ref_put(c, BCH_WRITE_REF_write); 473 bch2_keylist_free(&op->insert_keys, op->inline_keys); 474 475 EBUG_ON(cl->parent); 476 closure_debug_destroy(cl); 477 if (op->end_io) 478 op->end_io(op); 479 } 480 481 static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op) 482 { 483 struct keylist *keys = &op->insert_keys; 484 struct bkey_i *src, *dst = keys->keys, *n; 485 486 for (src = keys->keys; src != keys->top; src = n) { 487 n = bkey_next(src); 488 489 if (bkey_extent_is_direct_data(&src->k)) { 490 bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr, 491 test_bit(ptr->dev, op->failed.d)); 492 493 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src))) 494 return -EIO; 495 } 496 497 if (dst != src) 498 memmove_u64s_down(dst, src, src->k.u64s); 499 dst = bkey_next(dst); 500 } 501 502 keys->top = dst; 503 return 0; 504 } 505 506 /** 507 * __bch2_write_index - after a write, update index to point to new data 508 * @op: bch_write_op to process 509 */ 510 static void __bch2_write_index(struct bch_write_op *op) 511 { 512 struct bch_fs *c = op->c; 513 struct keylist *keys = &op->insert_keys; 514 unsigned dev; 515 int ret = 0; 516 517 if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) { 518 ret = bch2_write_drop_io_error_ptrs(op); 519 if (ret) 520 goto err; 521 } 522 523 if (!bch2_keylist_empty(keys)) { 524 u64 sectors_start = keylist_sectors(keys); 525 526 ret = !(op->flags & BCH_WRITE_MOVE) 527 ? bch2_write_index_default(op) 528 : bch2_data_update_index_update(op); 529 530 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart)); 531 BUG_ON(keylist_sectors(keys) && !ret); 532 533 op->written += sectors_start - keylist_sectors(keys); 534 535 if (ret && !bch2_err_matches(ret, EROFS)) { 536 struct bkey_i *insert = bch2_keylist_front(&op->insert_keys); 537 538 bch_err_inum_offset_ratelimited(c, 539 insert->k.p.inode, insert->k.p.offset << 9, 540 "%s write error while doing btree update: %s", 541 op->flags & BCH_WRITE_MOVE ? "move" : "user", 542 bch2_err_str(ret)); 543 } 544 545 if (ret) 546 goto err; 547 } 548 out: 549 /* If some a bucket wasn't written, we can't erasure code it: */ 550 for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX) 551 bch2_open_bucket_write_error(c, &op->open_buckets, dev); 552 553 bch2_open_buckets_put(c, &op->open_buckets); 554 return; 555 err: 556 keys->top = keys->keys; 557 op->error = ret; 558 op->flags |= BCH_WRITE_DONE; 559 goto out; 560 } 561 562 static inline void __wp_update_state(struct write_point *wp, enum write_point_state state) 563 { 564 if (state != wp->state) { 565 u64 now = ktime_get_ns(); 566 567 if (wp->last_state_change && 568 time_after64(now, wp->last_state_change)) 569 wp->time[wp->state] += now - wp->last_state_change; 570 wp->state = state; 571 wp->last_state_change = now; 572 } 573 } 574 575 static inline void wp_update_state(struct write_point *wp, bool running) 576 { 577 enum write_point_state state; 578 579 state = running ? WRITE_POINT_running : 580 !list_empty(&wp->writes) ? WRITE_POINT_waiting_io 581 : WRITE_POINT_stopped; 582 583 __wp_update_state(wp, state); 584 } 585 586 static CLOSURE_CALLBACK(bch2_write_index) 587 { 588 closure_type(op, struct bch_write_op, cl); 589 struct write_point *wp = op->wp; 590 struct workqueue_struct *wq = index_update_wq(op); 591 unsigned long flags; 592 593 if ((op->flags & BCH_WRITE_DONE) && 594 (op->flags & BCH_WRITE_MOVE)) 595 bch2_bio_free_pages_pool(op->c, &op->wbio.bio); 596 597 spin_lock_irqsave(&wp->writes_lock, flags); 598 if (wp->state == WRITE_POINT_waiting_io) 599 __wp_update_state(wp, WRITE_POINT_waiting_work); 600 list_add_tail(&op->wp_list, &wp->writes); 601 spin_unlock_irqrestore (&wp->writes_lock, flags); 602 603 queue_work(wq, &wp->index_update_work); 604 } 605 606 static inline void bch2_write_queue(struct bch_write_op *op, struct write_point *wp) 607 { 608 op->wp = wp; 609 610 if (wp->state == WRITE_POINT_stopped) { 611 spin_lock_irq(&wp->writes_lock); 612 __wp_update_state(wp, WRITE_POINT_waiting_io); 613 spin_unlock_irq(&wp->writes_lock); 614 } 615 } 616 617 void bch2_write_point_do_index_updates(struct work_struct *work) 618 { 619 struct write_point *wp = 620 container_of(work, struct write_point, index_update_work); 621 struct bch_write_op *op; 622 623 while (1) { 624 spin_lock_irq(&wp->writes_lock); 625 op = list_first_entry_or_null(&wp->writes, struct bch_write_op, wp_list); 626 if (op) 627 list_del(&op->wp_list); 628 wp_update_state(wp, op != NULL); 629 spin_unlock_irq(&wp->writes_lock); 630 631 if (!op) 632 break; 633 634 op->flags |= BCH_WRITE_IN_WORKER; 635 636 __bch2_write_index(op); 637 638 if (!(op->flags & BCH_WRITE_DONE)) 639 __bch2_write(op); 640 else 641 bch2_write_done(&op->cl); 642 } 643 } 644 645 static void bch2_write_endio(struct bio *bio) 646 { 647 struct closure *cl = bio->bi_private; 648 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl); 649 struct bch_write_bio *wbio = to_wbio(bio); 650 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL; 651 struct bch_fs *c = wbio->c; 652 struct bch_dev *ca = wbio->have_ioref 653 ? bch2_dev_have_ref(c, wbio->dev) 654 : NULL; 655 656 if (bch2_dev_inum_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write, 657 op->pos.inode, 658 wbio->inode_offset << 9, 659 "data write error: %s", 660 bch2_blk_status_to_str(bio->bi_status))) { 661 set_bit(wbio->dev, op->failed.d); 662 op->flags |= BCH_WRITE_IO_ERROR; 663 } 664 665 if (wbio->nocow) { 666 bch2_bucket_nocow_unlock(&c->nocow_locks, 667 POS(ca->dev_idx, wbio->nocow_bucket), 668 BUCKET_NOCOW_LOCK_UPDATE); 669 set_bit(wbio->dev, op->devs_need_flush->d); 670 } 671 672 if (wbio->have_ioref) { 673 bch2_latency_acct(ca, wbio->submit_time, WRITE); 674 percpu_ref_put(&ca->io_ref); 675 } 676 677 if (wbio->bounce) 678 bch2_bio_free_pages_pool(c, bio); 679 680 if (wbio->put_bio) 681 bio_put(bio); 682 683 if (parent) 684 bio_endio(&parent->bio); 685 else 686 closure_put(cl); 687 } 688 689 static void init_append_extent(struct bch_write_op *op, 690 struct write_point *wp, 691 struct bversion version, 692 struct bch_extent_crc_unpacked crc) 693 { 694 struct bkey_i_extent *e; 695 696 op->pos.offset += crc.uncompressed_size; 697 698 e = bkey_extent_init(op->insert_keys.top); 699 e->k.p = op->pos; 700 e->k.size = crc.uncompressed_size; 701 e->k.version = version; 702 703 if (crc.csum_type || 704 crc.compression_type || 705 crc.nonce) 706 bch2_extent_crc_append(&e->k_i, crc); 707 708 bch2_alloc_sectors_append_ptrs_inlined(op->c, wp, &e->k_i, crc.compressed_size, 709 op->flags & BCH_WRITE_CACHED); 710 711 bch2_keylist_push(&op->insert_keys); 712 } 713 714 static struct bio *bch2_write_bio_alloc(struct bch_fs *c, 715 struct write_point *wp, 716 struct bio *src, 717 bool *page_alloc_failed, 718 void *buf) 719 { 720 struct bch_write_bio *wbio; 721 struct bio *bio; 722 unsigned output_available = 723 min(wp->sectors_free << 9, src->bi_iter.bi_size); 724 unsigned pages = DIV_ROUND_UP(output_available + 725 (buf 726 ? ((unsigned long) buf & (PAGE_SIZE - 1)) 727 : 0), PAGE_SIZE); 728 729 pages = min(pages, BIO_MAX_VECS); 730 731 bio = bio_alloc_bioset(NULL, pages, 0, 732 GFP_NOFS, &c->bio_write); 733 wbio = wbio_init(bio); 734 wbio->put_bio = true; 735 /* copy WRITE_SYNC flag */ 736 wbio->bio.bi_opf = src->bi_opf; 737 738 if (buf) { 739 bch2_bio_map(bio, buf, output_available); 740 return bio; 741 } 742 743 wbio->bounce = true; 744 745 /* 746 * We can't use mempool for more than c->sb.encoded_extent_max 747 * worth of pages, but we'd like to allocate more if we can: 748 */ 749 bch2_bio_alloc_pages_pool(c, bio, 750 min_t(unsigned, output_available, 751 c->opts.encoded_extent_max)); 752 753 if (bio->bi_iter.bi_size < output_available) 754 *page_alloc_failed = 755 bch2_bio_alloc_pages(bio, 756 output_available - 757 bio->bi_iter.bi_size, 758 GFP_NOFS) != 0; 759 760 return bio; 761 } 762 763 static int bch2_write_rechecksum(struct bch_fs *c, 764 struct bch_write_op *op, 765 unsigned new_csum_type) 766 { 767 struct bio *bio = &op->wbio.bio; 768 struct bch_extent_crc_unpacked new_crc; 769 int ret; 770 771 /* bch2_rechecksum_bio() can't encrypt or decrypt data: */ 772 773 if (bch2_csum_type_is_encryption(op->crc.csum_type) != 774 bch2_csum_type_is_encryption(new_csum_type)) 775 new_csum_type = op->crc.csum_type; 776 777 ret = bch2_rechecksum_bio(c, bio, op->version, op->crc, 778 NULL, &new_crc, 779 op->crc.offset, op->crc.live_size, 780 new_csum_type); 781 if (ret) 782 return ret; 783 784 bio_advance(bio, op->crc.offset << 9); 785 bio->bi_iter.bi_size = op->crc.live_size << 9; 786 op->crc = new_crc; 787 return 0; 788 } 789 790 static int bch2_write_decrypt(struct bch_write_op *op) 791 { 792 struct bch_fs *c = op->c; 793 struct nonce nonce = extent_nonce(op->version, op->crc); 794 struct bch_csum csum; 795 int ret; 796 797 if (!bch2_csum_type_is_encryption(op->crc.csum_type)) 798 return 0; 799 800 /* 801 * If we need to decrypt data in the write path, we'll no longer be able 802 * to verify the existing checksum (poly1305 mac, in this case) after 803 * it's decrypted - this is the last point we'll be able to reverify the 804 * checksum: 805 */ 806 csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio); 807 if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io) 808 return -EIO; 809 810 ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio); 811 op->crc.csum_type = 0; 812 op->crc.csum = (struct bch_csum) { 0, 0 }; 813 return ret; 814 } 815 816 static enum prep_encoded_ret { 817 PREP_ENCODED_OK, 818 PREP_ENCODED_ERR, 819 PREP_ENCODED_CHECKSUM_ERR, 820 PREP_ENCODED_DO_WRITE, 821 } bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp) 822 { 823 struct bch_fs *c = op->c; 824 struct bio *bio = &op->wbio.bio; 825 826 if (!(op->flags & BCH_WRITE_DATA_ENCODED)) 827 return PREP_ENCODED_OK; 828 829 BUG_ON(bio_sectors(bio) != op->crc.compressed_size); 830 831 /* Can we just write the entire extent as is? */ 832 if (op->crc.uncompressed_size == op->crc.live_size && 833 op->crc.uncompressed_size <= c->opts.encoded_extent_max >> 9 && 834 op->crc.compressed_size <= wp->sectors_free && 835 (op->crc.compression_type == bch2_compression_opt_to_type(op->compression_opt) || 836 op->incompressible)) { 837 if (!crc_is_compressed(op->crc) && 838 op->csum_type != op->crc.csum_type && 839 bch2_write_rechecksum(c, op, op->csum_type) && 840 !c->opts.no_data_io) 841 return PREP_ENCODED_CHECKSUM_ERR; 842 843 return PREP_ENCODED_DO_WRITE; 844 } 845 846 /* 847 * If the data is compressed and we couldn't write the entire extent as 848 * is, we have to decompress it: 849 */ 850 if (crc_is_compressed(op->crc)) { 851 struct bch_csum csum; 852 853 if (bch2_write_decrypt(op)) 854 return PREP_ENCODED_CHECKSUM_ERR; 855 856 /* Last point we can still verify checksum: */ 857 csum = bch2_checksum_bio(c, op->crc.csum_type, 858 extent_nonce(op->version, op->crc), 859 bio); 860 if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io) 861 return PREP_ENCODED_CHECKSUM_ERR; 862 863 if (bch2_bio_uncompress_inplace(c, bio, &op->crc)) 864 return PREP_ENCODED_ERR; 865 } 866 867 /* 868 * No longer have compressed data after this point - data might be 869 * encrypted: 870 */ 871 872 /* 873 * If the data is checksummed and we're only writing a subset, 874 * rechecksum and adjust bio to point to currently live data: 875 */ 876 if ((op->crc.live_size != op->crc.uncompressed_size || 877 op->crc.csum_type != op->csum_type) && 878 bch2_write_rechecksum(c, op, op->csum_type) && 879 !c->opts.no_data_io) 880 return PREP_ENCODED_CHECKSUM_ERR; 881 882 /* 883 * If we want to compress the data, it has to be decrypted: 884 */ 885 if ((op->compression_opt || 886 bch2_csum_type_is_encryption(op->crc.csum_type) != 887 bch2_csum_type_is_encryption(op->csum_type)) && 888 bch2_write_decrypt(op)) 889 return PREP_ENCODED_CHECKSUM_ERR; 890 891 return PREP_ENCODED_OK; 892 } 893 894 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp, 895 struct bio **_dst) 896 { 897 struct bch_fs *c = op->c; 898 struct bio *src = &op->wbio.bio, *dst = src; 899 struct bvec_iter saved_iter; 900 void *ec_buf; 901 unsigned total_output = 0, total_input = 0; 902 bool bounce = false; 903 bool page_alloc_failed = false; 904 int ret, more = 0; 905 906 BUG_ON(!bio_sectors(src)); 907 908 ec_buf = bch2_writepoint_ec_buf(c, wp); 909 910 switch (bch2_write_prep_encoded_data(op, wp)) { 911 case PREP_ENCODED_OK: 912 break; 913 case PREP_ENCODED_ERR: 914 ret = -EIO; 915 goto err; 916 case PREP_ENCODED_CHECKSUM_ERR: 917 goto csum_err; 918 case PREP_ENCODED_DO_WRITE: 919 /* XXX look for bug here */ 920 if (ec_buf) { 921 dst = bch2_write_bio_alloc(c, wp, src, 922 &page_alloc_failed, 923 ec_buf); 924 bio_copy_data(dst, src); 925 bounce = true; 926 } 927 init_append_extent(op, wp, op->version, op->crc); 928 goto do_write; 929 } 930 931 if (ec_buf || 932 op->compression_opt || 933 (op->csum_type && 934 !(op->flags & BCH_WRITE_PAGES_STABLE)) || 935 (bch2_csum_type_is_encryption(op->csum_type) && 936 !(op->flags & BCH_WRITE_PAGES_OWNED))) { 937 dst = bch2_write_bio_alloc(c, wp, src, 938 &page_alloc_failed, 939 ec_buf); 940 bounce = true; 941 } 942 943 saved_iter = dst->bi_iter; 944 945 do { 946 struct bch_extent_crc_unpacked crc = { 0 }; 947 struct bversion version = op->version; 948 size_t dst_len = 0, src_len = 0; 949 950 if (page_alloc_failed && 951 dst->bi_iter.bi_size < (wp->sectors_free << 9) && 952 dst->bi_iter.bi_size < c->opts.encoded_extent_max) 953 break; 954 955 BUG_ON(op->compression_opt && 956 (op->flags & BCH_WRITE_DATA_ENCODED) && 957 bch2_csum_type_is_encryption(op->crc.csum_type)); 958 BUG_ON(op->compression_opt && !bounce); 959 960 crc.compression_type = op->incompressible 961 ? BCH_COMPRESSION_TYPE_incompressible 962 : op->compression_opt 963 ? bch2_bio_compress(c, dst, &dst_len, src, &src_len, 964 op->compression_opt) 965 : 0; 966 if (!crc_is_compressed(crc)) { 967 dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size); 968 dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9); 969 970 if (op->csum_type) 971 dst_len = min_t(unsigned, dst_len, 972 c->opts.encoded_extent_max); 973 974 if (bounce) { 975 swap(dst->bi_iter.bi_size, dst_len); 976 bio_copy_data(dst, src); 977 swap(dst->bi_iter.bi_size, dst_len); 978 } 979 980 src_len = dst_len; 981 } 982 983 BUG_ON(!src_len || !dst_len); 984 985 if (bch2_csum_type_is_encryption(op->csum_type)) { 986 if (bversion_zero(version)) { 987 version.lo = atomic64_inc_return(&c->key_version); 988 } else { 989 crc.nonce = op->nonce; 990 op->nonce += src_len >> 9; 991 } 992 } 993 994 if ((op->flags & BCH_WRITE_DATA_ENCODED) && 995 !crc_is_compressed(crc) && 996 bch2_csum_type_is_encryption(op->crc.csum_type) == 997 bch2_csum_type_is_encryption(op->csum_type)) { 998 u8 compression_type = crc.compression_type; 999 u16 nonce = crc.nonce; 1000 /* 1001 * Note: when we're using rechecksum(), we need to be 1002 * checksumming @src because it has all the data our 1003 * existing checksum covers - if we bounced (because we 1004 * were trying to compress), @dst will only have the 1005 * part of the data the new checksum will cover. 1006 * 1007 * But normally we want to be checksumming post bounce, 1008 * because part of the reason for bouncing is so the 1009 * data can't be modified (by userspace) while it's in 1010 * flight. 1011 */ 1012 if (bch2_rechecksum_bio(c, src, version, op->crc, 1013 &crc, &op->crc, 1014 src_len >> 9, 1015 bio_sectors(src) - (src_len >> 9), 1016 op->csum_type)) 1017 goto csum_err; 1018 /* 1019 * rchecksum_bio sets compression_type on crc from op->crc, 1020 * this isn't always correct as sometimes we're changing 1021 * an extent from uncompressed to incompressible. 1022 */ 1023 crc.compression_type = compression_type; 1024 crc.nonce = nonce; 1025 } else { 1026 if ((op->flags & BCH_WRITE_DATA_ENCODED) && 1027 bch2_rechecksum_bio(c, src, version, op->crc, 1028 NULL, &op->crc, 1029 src_len >> 9, 1030 bio_sectors(src) - (src_len >> 9), 1031 op->crc.csum_type)) 1032 goto csum_err; 1033 1034 crc.compressed_size = dst_len >> 9; 1035 crc.uncompressed_size = src_len >> 9; 1036 crc.live_size = src_len >> 9; 1037 1038 swap(dst->bi_iter.bi_size, dst_len); 1039 ret = bch2_encrypt_bio(c, op->csum_type, 1040 extent_nonce(version, crc), dst); 1041 if (ret) 1042 goto err; 1043 1044 crc.csum = bch2_checksum_bio(c, op->csum_type, 1045 extent_nonce(version, crc), dst); 1046 crc.csum_type = op->csum_type; 1047 swap(dst->bi_iter.bi_size, dst_len); 1048 } 1049 1050 init_append_extent(op, wp, version, crc); 1051 1052 if (dst != src) 1053 bio_advance(dst, dst_len); 1054 bio_advance(src, src_len); 1055 total_output += dst_len; 1056 total_input += src_len; 1057 } while (dst->bi_iter.bi_size && 1058 src->bi_iter.bi_size && 1059 wp->sectors_free && 1060 !bch2_keylist_realloc(&op->insert_keys, 1061 op->inline_keys, 1062 ARRAY_SIZE(op->inline_keys), 1063 BKEY_EXTENT_U64s_MAX)); 1064 1065 more = src->bi_iter.bi_size != 0; 1066 1067 dst->bi_iter = saved_iter; 1068 1069 if (dst == src && more) { 1070 BUG_ON(total_output != total_input); 1071 1072 dst = bio_split(src, total_input >> 9, 1073 GFP_NOFS, &c->bio_write); 1074 wbio_init(dst)->put_bio = true; 1075 /* copy WRITE_SYNC flag */ 1076 dst->bi_opf = src->bi_opf; 1077 } 1078 1079 dst->bi_iter.bi_size = total_output; 1080 do_write: 1081 *_dst = dst; 1082 return more; 1083 csum_err: 1084 bch_err(c, "%s writ error: error verifying existing checksum while rewriting existing data (memory corruption?)", 1085 op->flags & BCH_WRITE_MOVE ? "move" : "user"); 1086 ret = -EIO; 1087 err: 1088 if (to_wbio(dst)->bounce) 1089 bch2_bio_free_pages_pool(c, dst); 1090 if (to_wbio(dst)->put_bio) 1091 bio_put(dst); 1092 1093 return ret; 1094 } 1095 1096 static bool bch2_extent_is_writeable(struct bch_write_op *op, 1097 struct bkey_s_c k) 1098 { 1099 struct bch_fs *c = op->c; 1100 struct bkey_s_c_extent e; 1101 struct extent_ptr_decoded p; 1102 const union bch_extent_entry *entry; 1103 unsigned replicas = 0; 1104 1105 if (k.k->type != KEY_TYPE_extent) 1106 return false; 1107 1108 e = bkey_s_c_to_extent(k); 1109 1110 rcu_read_lock(); 1111 extent_for_each_ptr_decode(e, p, entry) { 1112 if (crc_is_encoded(p.crc) || p.has_ec) { 1113 rcu_read_unlock(); 1114 return false; 1115 } 1116 1117 replicas += bch2_extent_ptr_durability(c, &p); 1118 } 1119 rcu_read_unlock(); 1120 1121 return replicas >= op->opts.data_replicas; 1122 } 1123 1124 static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans, 1125 struct btree_iter *iter, 1126 struct bkey_i *orig, 1127 struct bkey_s_c k, 1128 u64 new_i_size) 1129 { 1130 if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) { 1131 /* trace this */ 1132 return 0; 1133 } 1134 1135 struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k); 1136 int ret = PTR_ERR_OR_ZERO(new); 1137 if (ret) 1138 return ret; 1139 1140 bch2_cut_front(bkey_start_pos(&orig->k), new); 1141 bch2_cut_back(orig->k.p, new); 1142 1143 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new)); 1144 bkey_for_each_ptr(ptrs, ptr) 1145 ptr->unwritten = 0; 1146 1147 /* 1148 * Note that we're not calling bch2_subvol_get_snapshot() in this path - 1149 * that was done when we kicked off the write, and here it's important 1150 * that we update the extent that we wrote to - even if a snapshot has 1151 * since been created. The write is still outstanding, so we're ok 1152 * w.r.t. snapshot atomicity: 1153 */ 1154 return bch2_extent_update_i_size_sectors(trans, iter, 1155 min(new->k.p.offset << 9, new_i_size), 0) ?: 1156 bch2_trans_update(trans, iter, new, 1157 BTREE_UPDATE_internal_snapshot_node); 1158 } 1159 1160 static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op) 1161 { 1162 struct bch_fs *c = op->c; 1163 struct btree_trans *trans = bch2_trans_get(c); 1164 1165 for_each_keylist_key(&op->insert_keys, orig) { 1166 int ret = for_each_btree_key_upto_commit(trans, iter, BTREE_ID_extents, 1167 bkey_start_pos(&orig->k), orig->k.p, 1168 BTREE_ITER_intent, k, 1169 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ 1170 bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size); 1171 })); 1172 1173 if (ret && !bch2_err_matches(ret, EROFS)) { 1174 struct bkey_i *insert = bch2_keylist_front(&op->insert_keys); 1175 1176 bch_err_inum_offset_ratelimited(c, 1177 insert->k.p.inode, insert->k.p.offset << 9, 1178 "%s write error while doing btree update: %s", 1179 op->flags & BCH_WRITE_MOVE ? "move" : "user", 1180 bch2_err_str(ret)); 1181 } 1182 1183 if (ret) { 1184 op->error = ret; 1185 break; 1186 } 1187 } 1188 1189 bch2_trans_put(trans); 1190 } 1191 1192 static void __bch2_nocow_write_done(struct bch_write_op *op) 1193 { 1194 if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) { 1195 op->error = -EIO; 1196 } else if (unlikely(op->flags & BCH_WRITE_CONVERT_UNWRITTEN)) 1197 bch2_nocow_write_convert_unwritten(op); 1198 } 1199 1200 static CLOSURE_CALLBACK(bch2_nocow_write_done) 1201 { 1202 closure_type(op, struct bch_write_op, cl); 1203 1204 __bch2_nocow_write_done(op); 1205 bch2_write_done(cl); 1206 } 1207 1208 struct bucket_to_lock { 1209 struct bpos b; 1210 unsigned gen; 1211 struct nocow_lock_bucket *l; 1212 }; 1213 1214 static void bch2_nocow_write(struct bch_write_op *op) 1215 { 1216 struct bch_fs *c = op->c; 1217 struct btree_trans *trans; 1218 struct btree_iter iter; 1219 struct bkey_s_c k; 1220 DARRAY_PREALLOCATED(struct bucket_to_lock, 3) buckets; 1221 u32 snapshot; 1222 struct bucket_to_lock *stale_at; 1223 int stale, ret; 1224 1225 if (op->flags & BCH_WRITE_MOVE) 1226 return; 1227 1228 darray_init(&buckets); 1229 trans = bch2_trans_get(c); 1230 retry: 1231 bch2_trans_begin(trans); 1232 1233 ret = bch2_subvolume_get_snapshot(trans, op->subvol, &snapshot); 1234 if (unlikely(ret)) 1235 goto err; 1236 1237 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, 1238 SPOS(op->pos.inode, op->pos.offset, snapshot), 1239 BTREE_ITER_slots); 1240 while (1) { 1241 struct bio *bio = &op->wbio.bio; 1242 1243 buckets.nr = 0; 1244 1245 ret = bch2_trans_relock(trans); 1246 if (ret) 1247 break; 1248 1249 k = bch2_btree_iter_peek_slot(&iter); 1250 ret = bkey_err(k); 1251 if (ret) 1252 break; 1253 1254 /* fall back to normal cow write path? */ 1255 if (unlikely(k.k->p.snapshot != snapshot || 1256 !bch2_extent_is_writeable(op, k))) 1257 break; 1258 1259 if (bch2_keylist_realloc(&op->insert_keys, 1260 op->inline_keys, 1261 ARRAY_SIZE(op->inline_keys), 1262 k.k->u64s)) 1263 break; 1264 1265 /* Get iorefs before dropping btree locks: */ 1266 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 1267 bkey_for_each_ptr(ptrs, ptr) { 1268 struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE); 1269 if (unlikely(!ca)) 1270 goto err_get_ioref; 1271 1272 struct bpos b = PTR_BUCKET_POS(ca, ptr); 1273 struct nocow_lock_bucket *l = 1274 bucket_nocow_lock(&c->nocow_locks, bucket_to_u64(b)); 1275 prefetch(l); 1276 1277 /* XXX allocating memory with btree locks held - rare */ 1278 darray_push_gfp(&buckets, ((struct bucket_to_lock) { 1279 .b = b, .gen = ptr->gen, .l = l, 1280 }), GFP_KERNEL|__GFP_NOFAIL); 1281 1282 if (ptr->unwritten) 1283 op->flags |= BCH_WRITE_CONVERT_UNWRITTEN; 1284 } 1285 1286 /* Unlock before taking nocow locks, doing IO: */ 1287 bkey_reassemble(op->insert_keys.top, k); 1288 bch2_trans_unlock(trans); 1289 1290 bch2_cut_front(op->pos, op->insert_keys.top); 1291 if (op->flags & BCH_WRITE_CONVERT_UNWRITTEN) 1292 bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top); 1293 1294 darray_for_each(buckets, i) { 1295 struct bch_dev *ca = bch2_dev_have_ref(c, i->b.inode); 1296 1297 __bch2_bucket_nocow_lock(&c->nocow_locks, i->l, 1298 bucket_to_u64(i->b), 1299 BUCKET_NOCOW_LOCK_UPDATE); 1300 1301 rcu_read_lock(); 1302 u8 *gen = bucket_gen(ca, i->b.offset); 1303 stale = !gen ? -1 : gen_after(*gen, i->gen); 1304 rcu_read_unlock(); 1305 1306 if (unlikely(stale)) { 1307 stale_at = i; 1308 goto err_bucket_stale; 1309 } 1310 } 1311 1312 bio = &op->wbio.bio; 1313 if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) { 1314 bio = bio_split(bio, k.k->p.offset - op->pos.offset, 1315 GFP_KERNEL, &c->bio_write); 1316 wbio_init(bio)->put_bio = true; 1317 bio->bi_opf = op->wbio.bio.bi_opf; 1318 } else { 1319 op->flags |= BCH_WRITE_DONE; 1320 } 1321 1322 op->pos.offset += bio_sectors(bio); 1323 op->written += bio_sectors(bio); 1324 1325 bio->bi_end_io = bch2_write_endio; 1326 bio->bi_private = &op->cl; 1327 bio->bi_opf |= REQ_OP_WRITE; 1328 closure_get(&op->cl); 1329 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user, 1330 op->insert_keys.top, true); 1331 1332 bch2_keylist_push(&op->insert_keys); 1333 if (op->flags & BCH_WRITE_DONE) 1334 break; 1335 bch2_btree_iter_advance(&iter); 1336 } 1337 out: 1338 bch2_trans_iter_exit(trans, &iter); 1339 err: 1340 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 1341 goto retry; 1342 1343 if (ret) { 1344 bch_err_inum_offset_ratelimited(c, 1345 op->pos.inode, op->pos.offset << 9, 1346 "%s: btree lookup error %s", __func__, bch2_err_str(ret)); 1347 op->error = ret; 1348 op->flags |= BCH_WRITE_DONE; 1349 } 1350 1351 bch2_trans_put(trans); 1352 darray_exit(&buckets); 1353 1354 /* fallback to cow write path? */ 1355 if (!(op->flags & BCH_WRITE_DONE)) { 1356 closure_sync(&op->cl); 1357 __bch2_nocow_write_done(op); 1358 op->insert_keys.top = op->insert_keys.keys; 1359 } else if (op->flags & BCH_WRITE_SYNC) { 1360 closure_sync(&op->cl); 1361 bch2_nocow_write_done(&op->cl.work); 1362 } else { 1363 /* 1364 * XXX 1365 * needs to run out of process context because ei_quota_lock is 1366 * a mutex 1367 */ 1368 continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op)); 1369 } 1370 return; 1371 err_get_ioref: 1372 darray_for_each(buckets, i) 1373 percpu_ref_put(&bch2_dev_have_ref(c, i->b.inode)->io_ref); 1374 1375 /* Fall back to COW path: */ 1376 goto out; 1377 err_bucket_stale: 1378 darray_for_each(buckets, i) { 1379 bch2_bucket_nocow_unlock(&c->nocow_locks, i->b, BUCKET_NOCOW_LOCK_UPDATE); 1380 if (i == stale_at) 1381 break; 1382 } 1383 1384 struct printbuf buf = PRINTBUF; 1385 if (bch2_fs_inconsistent_on(stale < 0, c, 1386 "pointer to invalid bucket in nocow path on device %llu\n %s", 1387 stale_at->b.inode, 1388 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 1389 ret = -EIO; 1390 } else { 1391 /* We can retry this: */ 1392 ret = -BCH_ERR_transaction_restart; 1393 } 1394 printbuf_exit(&buf); 1395 1396 goto err_get_ioref; 1397 } 1398 1399 static void __bch2_write(struct bch_write_op *op) 1400 { 1401 struct bch_fs *c = op->c; 1402 struct write_point *wp = NULL; 1403 struct bio *bio = NULL; 1404 unsigned nofs_flags; 1405 int ret; 1406 1407 nofs_flags = memalloc_nofs_save(); 1408 1409 if (unlikely(op->opts.nocow && c->opts.nocow_enabled)) { 1410 bch2_nocow_write(op); 1411 if (op->flags & BCH_WRITE_DONE) 1412 goto out_nofs_restore; 1413 } 1414 again: 1415 memset(&op->failed, 0, sizeof(op->failed)); 1416 1417 do { 1418 struct bkey_i *key_to_write; 1419 unsigned key_to_write_offset = op->insert_keys.top_p - 1420 op->insert_keys.keys_p; 1421 1422 /* +1 for possible cache device: */ 1423 if (op->open_buckets.nr + op->nr_replicas + 1 > 1424 ARRAY_SIZE(op->open_buckets.v)) 1425 break; 1426 1427 if (bch2_keylist_realloc(&op->insert_keys, 1428 op->inline_keys, 1429 ARRAY_SIZE(op->inline_keys), 1430 BKEY_EXTENT_U64s_MAX)) 1431 break; 1432 1433 /* 1434 * The copygc thread is now global, which means it's no longer 1435 * freeing up space on specific disks, which means that 1436 * allocations for specific disks may hang arbitrarily long: 1437 */ 1438 ret = bch2_trans_do(c, NULL, NULL, 0, 1439 bch2_alloc_sectors_start_trans(trans, 1440 op->target, 1441 op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED), 1442 op->write_point, 1443 &op->devs_have, 1444 op->nr_replicas, 1445 op->nr_replicas_required, 1446 op->watermark, 1447 op->flags, 1448 (op->flags & (BCH_WRITE_ALLOC_NOWAIT| 1449 BCH_WRITE_ONLY_SPECIFIED_DEVS)) 1450 ? NULL : &op->cl, &wp)); 1451 if (unlikely(ret)) { 1452 if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) 1453 break; 1454 1455 goto err; 1456 } 1457 1458 EBUG_ON(!wp); 1459 1460 bch2_open_bucket_get(c, wp, &op->open_buckets); 1461 ret = bch2_write_extent(op, wp, &bio); 1462 1463 bch2_alloc_sectors_done_inlined(c, wp); 1464 err: 1465 if (ret <= 0) { 1466 op->flags |= BCH_WRITE_DONE; 1467 1468 if (ret < 0) { 1469 if (!(op->flags & BCH_WRITE_ALLOC_NOWAIT)) 1470 bch_err_inum_offset_ratelimited(c, 1471 op->pos.inode, 1472 op->pos.offset << 9, 1473 "%s(): %s error: %s", __func__, 1474 op->flags & BCH_WRITE_MOVE ? "move" : "user", 1475 bch2_err_str(ret)); 1476 op->error = ret; 1477 break; 1478 } 1479 } 1480 1481 bio->bi_end_io = bch2_write_endio; 1482 bio->bi_private = &op->cl; 1483 bio->bi_opf |= REQ_OP_WRITE; 1484 1485 closure_get(bio->bi_private); 1486 1487 key_to_write = (void *) (op->insert_keys.keys_p + 1488 key_to_write_offset); 1489 1490 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user, 1491 key_to_write, false); 1492 } while (ret); 1493 1494 /* 1495 * Sync or no? 1496 * 1497 * If we're running asynchronously, wne may still want to block 1498 * synchronously here if we weren't able to submit all of the IO at 1499 * once, as that signals backpressure to the caller. 1500 */ 1501 if ((op->flags & BCH_WRITE_SYNC) || 1502 (!(op->flags & BCH_WRITE_DONE) && 1503 !(op->flags & BCH_WRITE_IN_WORKER))) { 1504 if (closure_sync_timeout(&op->cl, HZ * 10)) { 1505 bch2_print_allocator_stuck(c); 1506 closure_sync(&op->cl); 1507 } 1508 1509 __bch2_write_index(op); 1510 1511 if (!(op->flags & BCH_WRITE_DONE)) 1512 goto again; 1513 bch2_write_done(&op->cl); 1514 } else { 1515 bch2_write_queue(op, wp); 1516 continue_at(&op->cl, bch2_write_index, NULL); 1517 } 1518 out_nofs_restore: 1519 memalloc_nofs_restore(nofs_flags); 1520 } 1521 1522 static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len) 1523 { 1524 struct bio *bio = &op->wbio.bio; 1525 struct bvec_iter iter; 1526 struct bkey_i_inline_data *id; 1527 unsigned sectors; 1528 int ret; 1529 1530 memset(&op->failed, 0, sizeof(op->failed)); 1531 1532 op->flags |= BCH_WRITE_WROTE_DATA_INLINE; 1533 op->flags |= BCH_WRITE_DONE; 1534 1535 bch2_check_set_feature(op->c, BCH_FEATURE_inline_data); 1536 1537 ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys, 1538 ARRAY_SIZE(op->inline_keys), 1539 BKEY_U64s + DIV_ROUND_UP(data_len, 8)); 1540 if (ret) { 1541 op->error = ret; 1542 goto err; 1543 } 1544 1545 sectors = bio_sectors(bio); 1546 op->pos.offset += sectors; 1547 1548 id = bkey_inline_data_init(op->insert_keys.top); 1549 id->k.p = op->pos; 1550 id->k.version = op->version; 1551 id->k.size = sectors; 1552 1553 iter = bio->bi_iter; 1554 iter.bi_size = data_len; 1555 memcpy_from_bio(id->v.data, bio, iter); 1556 1557 while (data_len & 7) 1558 id->v.data[data_len++] = '\0'; 1559 set_bkey_val_bytes(&id->k, data_len); 1560 bch2_keylist_push(&op->insert_keys); 1561 1562 __bch2_write_index(op); 1563 err: 1564 bch2_write_done(&op->cl); 1565 } 1566 1567 /** 1568 * bch2_write() - handle a write to a cache device or flash only volume 1569 * @cl: &bch_write_op->cl 1570 * 1571 * This is the starting point for any data to end up in a cache device; it could 1572 * be from a normal write, or a writeback write, or a write to a flash only 1573 * volume - it's also used by the moving garbage collector to compact data in 1574 * mostly empty buckets. 1575 * 1576 * It first writes the data to the cache, creating a list of keys to be inserted 1577 * (if the data won't fit in a single open bucket, there will be multiple keys); 1578 * after the data is written it calls bch_journal, and after the keys have been 1579 * added to the next journal write they're inserted into the btree. 1580 * 1581 * If op->discard is true, instead of inserting the data it invalidates the 1582 * region of the cache represented by op->bio and op->inode. 1583 */ 1584 CLOSURE_CALLBACK(bch2_write) 1585 { 1586 closure_type(op, struct bch_write_op, cl); 1587 struct bio *bio = &op->wbio.bio; 1588 struct bch_fs *c = op->c; 1589 unsigned data_len; 1590 1591 EBUG_ON(op->cl.parent); 1592 BUG_ON(!op->nr_replicas); 1593 BUG_ON(!op->write_point.v); 1594 BUG_ON(bkey_eq(op->pos, POS_MAX)); 1595 1596 op->nr_replicas_required = min_t(unsigned, op->nr_replicas_required, op->nr_replicas); 1597 op->start_time = local_clock(); 1598 bch2_keylist_init(&op->insert_keys, op->inline_keys); 1599 wbio_init(bio)->put_bio = false; 1600 1601 if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) { 1602 bch_err_inum_offset_ratelimited(c, 1603 op->pos.inode, 1604 op->pos.offset << 9, 1605 "%s write error: misaligned write", 1606 op->flags & BCH_WRITE_MOVE ? "move" : "user"); 1607 op->error = -EIO; 1608 goto err; 1609 } 1610 1611 if (c->opts.nochanges) { 1612 op->error = -BCH_ERR_erofs_no_writes; 1613 goto err; 1614 } 1615 1616 if (!(op->flags & BCH_WRITE_MOVE) && 1617 !bch2_write_ref_tryget(c, BCH_WRITE_REF_write)) { 1618 op->error = -BCH_ERR_erofs_no_writes; 1619 goto err; 1620 } 1621 1622 this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio)); 1623 bch2_increment_clock(c, bio_sectors(bio), WRITE); 1624 1625 data_len = min_t(u64, bio->bi_iter.bi_size, 1626 op->new_i_size - (op->pos.offset << 9)); 1627 1628 if (c->opts.inline_data && 1629 data_len <= min(block_bytes(c) / 2, 1024U)) { 1630 bch2_write_data_inline(op, data_len); 1631 return; 1632 } 1633 1634 __bch2_write(op); 1635 return; 1636 err: 1637 bch2_disk_reservation_put(c, &op->res); 1638 1639 closure_debug_destroy(&op->cl); 1640 if (op->end_io) 1641 op->end_io(op); 1642 } 1643 1644 static const char * const bch2_write_flags[] = { 1645 #define x(f) #f, 1646 BCH_WRITE_FLAGS() 1647 #undef x 1648 NULL 1649 }; 1650 1651 void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op) 1652 { 1653 prt_str(out, "pos: "); 1654 bch2_bpos_to_text(out, op->pos); 1655 prt_newline(out); 1656 printbuf_indent_add(out, 2); 1657 1658 prt_str(out, "started: "); 1659 bch2_pr_time_units(out, local_clock() - op->start_time); 1660 prt_newline(out); 1661 1662 prt_str(out, "flags: "); 1663 prt_bitflags(out, bch2_write_flags, op->flags); 1664 prt_newline(out); 1665 1666 prt_printf(out, "ref: %u\n", closure_nr_remaining(&op->cl)); 1667 1668 printbuf_indent_sub(out, 2); 1669 } 1670 1671 void bch2_fs_io_write_exit(struct bch_fs *c) 1672 { 1673 mempool_exit(&c->bio_bounce_pages); 1674 bioset_exit(&c->replica_set); 1675 bioset_exit(&c->bio_write); 1676 } 1677 1678 int bch2_fs_io_write_init(struct bch_fs *c) 1679 { 1680 if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio), BIOSET_NEED_BVECS) || 1681 bioset_init(&c->replica_set, 4, offsetof(struct bch_write_bio, bio), 0)) 1682 return -BCH_ERR_ENOMEM_bio_write_init; 1683 1684 if (mempool_init_page_pool(&c->bio_bounce_pages, 1685 max_t(unsigned, 1686 c->opts.btree_node_size, 1687 c->opts.encoded_extent_max) / 1688 PAGE_SIZE, 0)) 1689 return -BCH_ERR_ENOMEM_bio_bounce_pages_init; 1690 1691 return 0; 1692 } 1693