1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * io_misc.c - fallocate, fpunch, truncate: 4 */ 5 6 #include "bcachefs.h" 7 #include "alloc_foreground.h" 8 #include "bkey_buf.h" 9 #include "btree_update.h" 10 #include "buckets.h" 11 #include "clock.h" 12 #include "error.h" 13 #include "extents.h" 14 #include "extent_update.h" 15 #include "inode.h" 16 #include "io_misc.h" 17 #include "io_write.h" 18 #include "logged_ops.h" 19 #include "rebalance.h" 20 #include "subvolume.h" 21 22 /* Overwrites whatever was present with zeroes: */ 23 int bch2_extent_fallocate(struct btree_trans *trans, 24 subvol_inum inum, 25 struct btree_iter *iter, 26 u64 sectors, 27 struct bch_io_opts opts, 28 s64 *i_sectors_delta, 29 struct write_point_specifier write_point) 30 { 31 struct bch_fs *c = trans->c; 32 struct disk_reservation disk_res = { 0 }; 33 struct closure cl; 34 struct open_buckets open_buckets = { 0 }; 35 struct bkey_s_c k; 36 struct bkey_buf old, new; 37 unsigned sectors_allocated = 0, new_replicas; 38 bool unwritten = opts.nocow && 39 c->sb.version >= bcachefs_metadata_version_unwritten_extents; 40 int ret; 41 42 bch2_bkey_buf_init(&old); 43 bch2_bkey_buf_init(&new); 44 closure_init_stack(&cl); 45 46 k = bch2_btree_iter_peek_slot(iter); 47 ret = bkey_err(k); 48 if (ret) 49 return ret; 50 51 sectors = min_t(u64, sectors, k.k->p.offset - iter->pos.offset); 52 new_replicas = max(0, (int) opts.data_replicas - 53 (int) bch2_bkey_nr_ptrs_fully_allocated(k)); 54 55 /* 56 * Get a disk reservation before (in the nocow case) calling 57 * into the allocator: 58 */ 59 ret = bch2_disk_reservation_get(c, &disk_res, sectors, new_replicas, 0); 60 if (unlikely(ret)) 61 goto err_noprint; 62 63 bch2_bkey_buf_reassemble(&old, c, k); 64 65 if (!unwritten) { 66 struct bkey_i_reservation *reservation; 67 68 bch2_bkey_buf_realloc(&new, c, sizeof(*reservation) / sizeof(u64)); 69 reservation = bkey_reservation_init(new.k); 70 reservation->k.p = iter->pos; 71 bch2_key_resize(&reservation->k, sectors); 72 reservation->v.nr_replicas = opts.data_replicas; 73 } else { 74 struct bkey_i_extent *e; 75 struct bch_devs_list devs_have; 76 struct write_point *wp; 77 78 devs_have.nr = 0; 79 80 bch2_bkey_buf_realloc(&new, c, BKEY_EXTENT_U64s_MAX); 81 82 e = bkey_extent_init(new.k); 83 e->k.p = iter->pos; 84 85 ret = bch2_alloc_sectors_start_trans(trans, 86 opts.foreground_target, 87 false, 88 write_point, 89 &devs_have, 90 opts.data_replicas, 91 opts.data_replicas, 92 BCH_WATERMARK_normal, 0, &cl, &wp); 93 if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) 94 ret = -BCH_ERR_transaction_restart_nested; 95 if (ret) 96 goto err; 97 98 sectors = min_t(u64, sectors, wp->sectors_free); 99 sectors_allocated = sectors; 100 101 bch2_key_resize(&e->k, sectors); 102 103 bch2_open_bucket_get(c, wp, &open_buckets); 104 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false); 105 bch2_alloc_sectors_done(c, wp); 106 107 extent_for_each_ptr(extent_i_to_s(e), ptr) 108 ptr->unwritten = true; 109 } 110 111 ret = bch2_extent_update(trans, inum, iter, new.k, &disk_res, 112 0, i_sectors_delta, true); 113 err: 114 if (!ret && sectors_allocated) 115 bch2_increment_clock(c, sectors_allocated, WRITE); 116 if (should_print_err(ret)) { 117 struct printbuf buf = PRINTBUF; 118 bch2_inum_offset_err_msg_trans(trans, &buf, inum, iter->pos.offset << 9); 119 prt_printf(&buf, "fallocate error: %s", bch2_err_str(ret)); 120 bch_err_ratelimited(c, "%s", buf.buf); 121 printbuf_exit(&buf); 122 } 123 err_noprint: 124 bch2_open_buckets_put(c, &open_buckets); 125 bch2_disk_reservation_put(c, &disk_res); 126 bch2_bkey_buf_exit(&new, c); 127 bch2_bkey_buf_exit(&old, c); 128 129 if (closure_nr_remaining(&cl) != 1) { 130 bch2_trans_unlock_long(trans); 131 bch2_wait_on_allocator(c, &cl); 132 } 133 134 return ret; 135 } 136 137 /* 138 * Returns -BCH_ERR_transacton_restart if we had to drop locks: 139 */ 140 int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter, 141 subvol_inum inum, u64 end, 142 s64 *i_sectors_delta) 143 { 144 struct bch_fs *c = trans->c; 145 unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits); 146 struct bpos end_pos = POS(inum.inum, end); 147 struct bkey_s_c k; 148 int ret = 0, ret2 = 0; 149 u32 snapshot; 150 151 while (!ret || 152 bch2_err_matches(ret, BCH_ERR_transaction_restart)) { 153 struct disk_reservation disk_res = 154 bch2_disk_reservation_init(c, 0); 155 struct bkey_i delete; 156 157 if (ret) 158 ret2 = ret; 159 160 bch2_trans_begin(trans); 161 162 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot); 163 if (ret) 164 continue; 165 166 bch2_btree_iter_set_snapshot(iter, snapshot); 167 168 /* 169 * peek_max() doesn't have ideal semantics for extents: 170 */ 171 k = bch2_btree_iter_peek_max(iter, end_pos); 172 if (!k.k) 173 break; 174 175 ret = bkey_err(k); 176 if (ret) 177 continue; 178 179 bkey_init(&delete.k); 180 delete.k.p = iter->pos; 181 182 /* create the biggest key we can */ 183 bch2_key_resize(&delete.k, max_sectors); 184 bch2_cut_back(end_pos, &delete); 185 186 ret = bch2_extent_update(trans, inum, iter, &delete, 187 &disk_res, 0, i_sectors_delta, false); 188 bch2_disk_reservation_put(c, &disk_res); 189 } 190 191 return ret ?: ret2; 192 } 193 194 int bch2_fpunch(struct bch_fs *c, subvol_inum inum, u64 start, u64 end, 195 s64 *i_sectors_delta) 196 { 197 struct btree_trans *trans = bch2_trans_get(c); 198 struct btree_iter iter; 199 int ret; 200 201 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, 202 POS(inum.inum, start), 203 BTREE_ITER_intent); 204 205 ret = bch2_fpunch_at(trans, &iter, inum, end, i_sectors_delta); 206 207 bch2_trans_iter_exit(trans, &iter); 208 bch2_trans_put(trans); 209 210 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 211 ret = 0; 212 213 return ret; 214 } 215 216 /* truncate: */ 217 218 void bch2_logged_op_truncate_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) 219 { 220 struct bkey_s_c_logged_op_truncate op = bkey_s_c_to_logged_op_truncate(k); 221 222 prt_printf(out, "subvol=%u", le32_to_cpu(op.v->subvol)); 223 prt_printf(out, " inum=%llu", le64_to_cpu(op.v->inum)); 224 prt_printf(out, " new_i_size=%llu", le64_to_cpu(op.v->new_i_size)); 225 } 226 227 static int truncate_set_isize(struct btree_trans *trans, 228 subvol_inum inum, 229 u64 new_i_size, 230 bool warn) 231 { 232 struct btree_iter iter = { NULL }; 233 struct bch_inode_unpacked inode_u; 234 int ret; 235 236 ret = __bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_intent, warn) ?: 237 (inode_u.bi_size = new_i_size, 0) ?: 238 bch2_inode_write(trans, &iter, &inode_u); 239 240 bch2_trans_iter_exit(trans, &iter); 241 return ret; 242 } 243 244 static int __bch2_resume_logged_op_truncate(struct btree_trans *trans, 245 struct bkey_i *op_k, 246 u64 *i_sectors_delta) 247 { 248 struct bch_fs *c = trans->c; 249 struct btree_iter fpunch_iter; 250 struct bkey_i_logged_op_truncate *op = bkey_i_to_logged_op_truncate(op_k); 251 subvol_inum inum = { le32_to_cpu(op->v.subvol), le64_to_cpu(op->v.inum) }; 252 u64 new_i_size = le64_to_cpu(op->v.new_i_size); 253 bool warn_errors = i_sectors_delta != NULL; 254 int ret; 255 256 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 257 truncate_set_isize(trans, inum, new_i_size, i_sectors_delta != NULL)); 258 if (ret) 259 goto err; 260 261 bch2_trans_iter_init(trans, &fpunch_iter, BTREE_ID_extents, 262 POS(inum.inum, round_up(new_i_size, block_bytes(c)) >> 9), 263 BTREE_ITER_intent); 264 ret = bch2_fpunch_at(trans, &fpunch_iter, inum, U64_MAX, i_sectors_delta); 265 bch2_trans_iter_exit(trans, &fpunch_iter); 266 267 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 268 ret = 0; 269 err: 270 if (warn_errors) 271 bch_err_fn(c, ret); 272 return ret; 273 } 274 275 int bch2_resume_logged_op_truncate(struct btree_trans *trans, struct bkey_i *op_k) 276 { 277 return __bch2_resume_logged_op_truncate(trans, op_k, NULL); 278 } 279 280 int bch2_truncate(struct bch_fs *c, subvol_inum inum, u64 new_i_size, u64 *i_sectors_delta) 281 { 282 struct bkey_i_logged_op_truncate op; 283 284 bkey_logged_op_truncate_init(&op.k_i); 285 op.v.subvol = cpu_to_le32(inum.subvol); 286 op.v.inum = cpu_to_le64(inum.inum); 287 op.v.new_i_size = cpu_to_le64(new_i_size); 288 289 /* 290 * Logged ops aren't atomic w.r.t. snapshot creation: creating a 291 * snapshot while they're in progress, then crashing, will result in the 292 * resume only proceeding in one of the snapshots 293 */ 294 down_read(&c->snapshot_create_lock); 295 struct btree_trans *trans = bch2_trans_get(c); 296 int ret = bch2_logged_op_start(trans, &op.k_i); 297 if (ret) 298 goto out; 299 ret = __bch2_resume_logged_op_truncate(trans, &op.k_i, i_sectors_delta); 300 ret = bch2_logged_op_finish(trans, &op.k_i) ?: ret; 301 out: 302 bch2_trans_put(trans); 303 up_read(&c->snapshot_create_lock); 304 305 return ret; 306 } 307 308 /* finsert/fcollapse: */ 309 310 void bch2_logged_op_finsert_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) 311 { 312 struct bkey_s_c_logged_op_finsert op = bkey_s_c_to_logged_op_finsert(k); 313 314 prt_printf(out, "subvol=%u", le32_to_cpu(op.v->subvol)); 315 prt_printf(out, " inum=%llu", le64_to_cpu(op.v->inum)); 316 prt_printf(out, " dst_offset=%lli", le64_to_cpu(op.v->dst_offset)); 317 prt_printf(out, " src_offset=%llu", le64_to_cpu(op.v->src_offset)); 318 } 319 320 static int adjust_i_size(struct btree_trans *trans, subvol_inum inum, 321 u64 offset, s64 len, bool warn) 322 { 323 struct btree_iter iter; 324 struct bch_inode_unpacked inode_u; 325 int ret; 326 327 offset <<= 9; 328 len <<= 9; 329 330 ret = __bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_intent, warn); 331 if (ret) 332 return ret; 333 334 if (len > 0) { 335 if (MAX_LFS_FILESIZE - inode_u.bi_size < len) { 336 ret = -EFBIG; 337 goto err; 338 } 339 340 if (offset >= inode_u.bi_size) { 341 ret = -EINVAL; 342 goto err; 343 } 344 } 345 346 inode_u.bi_size += len; 347 inode_u.bi_mtime = inode_u.bi_ctime = bch2_current_time(trans->c); 348 349 ret = bch2_inode_write(trans, &iter, &inode_u); 350 err: 351 bch2_trans_iter_exit(trans, &iter); 352 return ret; 353 } 354 355 static int __bch2_resume_logged_op_finsert(struct btree_trans *trans, 356 struct bkey_i *op_k, 357 u64 *i_sectors_delta) 358 { 359 struct bch_fs *c = trans->c; 360 struct btree_iter iter; 361 struct bkey_i_logged_op_finsert *op = bkey_i_to_logged_op_finsert(op_k); 362 subvol_inum inum = { le32_to_cpu(op->v.subvol), le64_to_cpu(op->v.inum) }; 363 struct bch_io_opts opts; 364 u64 dst_offset = le64_to_cpu(op->v.dst_offset); 365 u64 src_offset = le64_to_cpu(op->v.src_offset); 366 s64 shift = dst_offset - src_offset; 367 u64 len = abs(shift); 368 u64 pos = le64_to_cpu(op->v.pos); 369 bool insert = shift > 0; 370 u32 snapshot; 371 bool warn_errors = i_sectors_delta != NULL; 372 int ret = 0; 373 374 ret = bch2_inum_opts_get(trans, inum, &opts); 375 if (ret) 376 return ret; 377 378 /* 379 * check for missing subvolume before fpunch, as in resume we don't want 380 * it to be a fatal error 381 */ 382 ret = lockrestart_do(trans, __bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot, warn_errors)); 383 if (ret) 384 return ret; 385 386 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, 387 POS(inum.inum, 0), 388 BTREE_ITER_intent); 389 390 switch (op->v.state) { 391 case LOGGED_OP_FINSERT_start: 392 op->v.state = LOGGED_OP_FINSERT_shift_extents; 393 394 if (insert) { 395 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 396 adjust_i_size(trans, inum, src_offset, len, warn_errors) ?: 397 bch2_logged_op_update(trans, &op->k_i)); 398 if (ret) 399 goto err; 400 } else { 401 bch2_btree_iter_set_pos(&iter, POS(inum.inum, src_offset)); 402 403 ret = bch2_fpunch_at(trans, &iter, inum, src_offset + len, i_sectors_delta); 404 if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart)) 405 goto err; 406 407 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 408 bch2_logged_op_update(trans, &op->k_i)); 409 } 410 411 fallthrough; 412 case LOGGED_OP_FINSERT_shift_extents: 413 while (1) { 414 struct disk_reservation disk_res = 415 bch2_disk_reservation_init(c, 0); 416 struct bkey_i delete, *copy; 417 struct bkey_s_c k; 418 struct bpos src_pos = POS(inum.inum, src_offset); 419 420 bch2_trans_begin(trans); 421 422 ret = __bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot, 423 warn_errors); 424 if (ret) 425 goto btree_err; 426 427 bch2_btree_iter_set_snapshot(&iter, snapshot); 428 bch2_btree_iter_set_pos(&iter, SPOS(inum.inum, pos, snapshot)); 429 430 k = insert 431 ? bch2_btree_iter_peek_prev_min(&iter, POS(inum.inum, 0)) 432 : bch2_btree_iter_peek_max(&iter, POS(inum.inum, U64_MAX)); 433 if ((ret = bkey_err(k))) 434 goto btree_err; 435 436 if (!k.k || 437 k.k->p.inode != inum.inum || 438 bkey_le(k.k->p, POS(inum.inum, src_offset))) 439 break; 440 441 copy = bch2_bkey_make_mut_noupdate(trans, k); 442 if ((ret = PTR_ERR_OR_ZERO(copy))) 443 goto btree_err; 444 445 if (insert && 446 bkey_lt(bkey_start_pos(k.k), src_pos)) { 447 bch2_cut_front(src_pos, copy); 448 449 /* Splitting compressed extent? */ 450 bch2_disk_reservation_add(c, &disk_res, 451 copy->k.size * 452 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy)), 453 BCH_DISK_RESERVATION_NOFAIL); 454 } 455 456 bkey_init(&delete.k); 457 delete.k.p = copy->k.p; 458 delete.k.p.snapshot = snapshot; 459 delete.k.size = copy->k.size; 460 461 copy->k.p.offset += shift; 462 copy->k.p.snapshot = snapshot; 463 464 op->v.pos = cpu_to_le64(insert ? bkey_start_offset(&delete.k) : delete.k.p.offset); 465 466 ret = bch2_bkey_set_needs_rebalance(c, &opts, copy) ?: 467 bch2_btree_insert_trans(trans, BTREE_ID_extents, &delete, 0) ?: 468 bch2_btree_insert_trans(trans, BTREE_ID_extents, copy, 0) ?: 469 bch2_logged_op_update(trans, &op->k_i) ?: 470 bch2_trans_commit(trans, &disk_res, NULL, BCH_TRANS_COMMIT_no_enospc); 471 btree_err: 472 bch2_disk_reservation_put(c, &disk_res); 473 474 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 475 continue; 476 if (ret) 477 goto err; 478 479 pos = le64_to_cpu(op->v.pos); 480 } 481 482 op->v.state = LOGGED_OP_FINSERT_finish; 483 484 if (!insert) { 485 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 486 adjust_i_size(trans, inum, src_offset, shift, warn_errors) ?: 487 bch2_logged_op_update(trans, &op->k_i)); 488 } else { 489 /* We need an inode update to update bi_journal_seq for fsync: */ 490 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 491 adjust_i_size(trans, inum, 0, 0, warn_errors) ?: 492 bch2_logged_op_update(trans, &op->k_i)); 493 } 494 495 break; 496 case LOGGED_OP_FINSERT_finish: 497 break; 498 } 499 err: 500 bch2_trans_iter_exit(trans, &iter); 501 if (warn_errors) 502 bch_err_fn(c, ret); 503 return ret; 504 } 505 506 int bch2_resume_logged_op_finsert(struct btree_trans *trans, struct bkey_i *op_k) 507 { 508 return __bch2_resume_logged_op_finsert(trans, op_k, NULL); 509 } 510 511 int bch2_fcollapse_finsert(struct bch_fs *c, subvol_inum inum, 512 u64 offset, u64 len, bool insert, 513 s64 *i_sectors_delta) 514 { 515 struct bkey_i_logged_op_finsert op; 516 s64 shift = insert ? len : -len; 517 518 bkey_logged_op_finsert_init(&op.k_i); 519 op.v.subvol = cpu_to_le32(inum.subvol); 520 op.v.inum = cpu_to_le64(inum.inum); 521 op.v.dst_offset = cpu_to_le64(offset + shift); 522 op.v.src_offset = cpu_to_le64(offset); 523 op.v.pos = cpu_to_le64(insert ? U64_MAX : offset); 524 525 /* 526 * Logged ops aren't atomic w.r.t. snapshot creation: creating a 527 * snapshot while they're in progress, then crashing, will result in the 528 * resume only proceeding in one of the snapshots 529 */ 530 down_read(&c->snapshot_create_lock); 531 struct btree_trans *trans = bch2_trans_get(c); 532 int ret = bch2_logged_op_start(trans, &op.k_i); 533 if (ret) 534 goto out; 535 ret = __bch2_resume_logged_op_finsert(trans, &op.k_i, i_sectors_delta); 536 ret = bch2_logged_op_finish(trans, &op.k_i) ?: ret; 537 out: 538 bch2_trans_put(trans); 539 up_read(&c->snapshot_create_lock); 540 541 return ret; 542 } 543