1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * io_misc.c - fallocate, fpunch, truncate:
4 */
5
6 #include "bcachefs.h"
7 #include "alloc_foreground.h"
8 #include "bkey_buf.h"
9 #include "btree_update.h"
10 #include "buckets.h"
11 #include "clock.h"
12 #include "error.h"
13 #include "extents.h"
14 #include "extent_update.h"
15 #include "inode.h"
16 #include "io_misc.h"
17 #include "io_write.h"
18 #include "logged_ops.h"
19 #include "rebalance.h"
20 #include "subvolume.h"
21
22 /* Overwrites whatever was present with zeroes: */
bch2_extent_fallocate(struct btree_trans * trans,subvol_inum inum,struct btree_iter * iter,u64 sectors,struct bch_io_opts opts,s64 * i_sectors_delta,struct write_point_specifier write_point)23 int bch2_extent_fallocate(struct btree_trans *trans,
24 subvol_inum inum,
25 struct btree_iter *iter,
26 u64 sectors,
27 struct bch_io_opts opts,
28 s64 *i_sectors_delta,
29 struct write_point_specifier write_point)
30 {
31 struct bch_fs *c = trans->c;
32 struct disk_reservation disk_res = { 0 };
33 struct closure cl;
34 struct open_buckets open_buckets = { 0 };
35 struct bkey_s_c k;
36 struct bkey_buf old, new;
37 unsigned sectors_allocated = 0, new_replicas;
38 bool unwritten = opts.nocow &&
39 c->sb.version >= bcachefs_metadata_version_unwritten_extents;
40 int ret;
41
42 bch2_bkey_buf_init(&old);
43 bch2_bkey_buf_init(&new);
44 closure_init_stack(&cl);
45
46 k = bch2_btree_iter_peek_slot(trans, iter);
47 ret = bkey_err(k);
48 if (ret)
49 return ret;
50
51 sectors = min_t(u64, sectors, k.k->p.offset - iter->pos.offset);
52 new_replicas = max(0, (int) opts.data_replicas -
53 (int) bch2_bkey_nr_ptrs_fully_allocated(k));
54
55 /*
56 * Get a disk reservation before (in the nocow case) calling
57 * into the allocator:
58 */
59 ret = bch2_disk_reservation_get(c, &disk_res, sectors, new_replicas, 0);
60 if (unlikely(ret))
61 goto err_noprint;
62
63 bch2_bkey_buf_reassemble(&old, c, k);
64
65 if (!unwritten) {
66 struct bkey_i_reservation *reservation;
67
68 bch2_bkey_buf_realloc(&new, c, sizeof(*reservation) / sizeof(u64));
69 reservation = bkey_reservation_init(new.k);
70 reservation->k.p = iter->pos;
71 bch2_key_resize(&reservation->k, sectors);
72 reservation->v.nr_replicas = opts.data_replicas;
73 } else {
74 struct bkey_i_extent *e;
75 struct bch_devs_list devs_have;
76 struct write_point *wp;
77
78 devs_have.nr = 0;
79
80 bch2_bkey_buf_realloc(&new, c, BKEY_EXTENT_U64s_MAX);
81
82 e = bkey_extent_init(new.k);
83 e->k.p = iter->pos;
84
85 ret = bch2_alloc_sectors_start_trans(trans,
86 opts.foreground_target,
87 false,
88 write_point,
89 &devs_have,
90 opts.data_replicas,
91 opts.data_replicas,
92 BCH_WATERMARK_normal, 0, &cl, &wp);
93 if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
94 ret = bch_err_throw(c, transaction_restart_nested);
95 if (ret)
96 goto err;
97
98 sectors = min_t(u64, sectors, wp->sectors_free);
99 sectors_allocated = sectors;
100
101 bch2_key_resize(&e->k, sectors);
102
103 bch2_open_bucket_get(c, wp, &open_buckets);
104 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
105 bch2_alloc_sectors_done(c, wp);
106
107 extent_for_each_ptr(extent_i_to_s(e), ptr)
108 ptr->unwritten = true;
109 }
110
111 ret = bch2_extent_update(trans, inum, iter, new.k, &disk_res,
112 0, i_sectors_delta, true);
113 err:
114 if (!ret && sectors_allocated)
115 bch2_increment_clock(c, sectors_allocated, WRITE);
116 if (should_print_err(ret)) {
117 struct printbuf buf = PRINTBUF;
118 lockrestart_do(trans,
119 bch2_inum_offset_err_msg_trans(trans, &buf, inum, iter->pos.offset << 9));
120 prt_printf(&buf, "fallocate error: %s", bch2_err_str(ret));
121 bch_err_ratelimited(c, "%s", buf.buf);
122 printbuf_exit(&buf);
123 }
124 err_noprint:
125 bch2_open_buckets_put(c, &open_buckets);
126 bch2_disk_reservation_put(c, &disk_res);
127 bch2_bkey_buf_exit(&new, c);
128 bch2_bkey_buf_exit(&old, c);
129
130 if (closure_nr_remaining(&cl) != 1) {
131 bch2_trans_unlock_long(trans);
132 bch2_wait_on_allocator(c, &cl);
133 }
134
135 return ret;
136 }
137
138 /* For fsck */
bch2_fpunch_snapshot(struct btree_trans * trans,struct bpos start,struct bpos end)139 int bch2_fpunch_snapshot(struct btree_trans *trans, struct bpos start, struct bpos end)
140 {
141 u32 restart_count = trans->restart_count;
142 struct bch_fs *c = trans->c;
143 struct disk_reservation disk_res = bch2_disk_reservation_init(c, 0);
144 unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
145 struct bkey_i delete;
146
147 int ret = for_each_btree_key_max_commit(trans, iter, BTREE_ID_extents,
148 start, end, 0, k,
149 &disk_res, NULL, BCH_TRANS_COMMIT_no_enospc, ({
150 bkey_init(&delete.k);
151 delete.k.p = iter.pos;
152
153 /* create the biggest key we can */
154 bch2_key_resize(&delete.k, max_sectors);
155 bch2_cut_back(end, &delete);
156
157 bch2_extent_trim_atomic(trans, &iter, &delete) ?:
158 bch2_trans_update(trans, &iter, &delete, 0);
159 }));
160
161 bch2_disk_reservation_put(c, &disk_res);
162 return ret ?: trans_was_restarted(trans, restart_count);
163 }
164
165 /*
166 * Returns -BCH_ERR_transacton_restart if we had to drop locks:
167 */
bch2_fpunch_at(struct btree_trans * trans,struct btree_iter * iter,subvol_inum inum,u64 end,s64 * i_sectors_delta)168 int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
169 subvol_inum inum, u64 end,
170 s64 *i_sectors_delta)
171 {
172 struct bch_fs *c = trans->c;
173 unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
174 struct bpos end_pos = POS(inum.inum, end);
175 struct bkey_s_c k;
176 int ret = 0, ret2 = 0;
177 u32 snapshot;
178
179 while (!ret ||
180 bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
181 struct disk_reservation disk_res =
182 bch2_disk_reservation_init(c, 0);
183 struct bkey_i delete;
184
185 if (ret)
186 ret2 = ret;
187
188 bch2_trans_begin(trans);
189
190 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
191 if (ret)
192 continue;
193
194 bch2_btree_iter_set_snapshot(trans, iter, snapshot);
195
196 /*
197 * peek_max() doesn't have ideal semantics for extents:
198 */
199 k = bch2_btree_iter_peek_max(trans, iter, end_pos);
200 if (!k.k)
201 break;
202
203 ret = bkey_err(k);
204 if (ret)
205 continue;
206
207 bkey_init(&delete.k);
208 delete.k.p = iter->pos;
209
210 /* create the biggest key we can */
211 bch2_key_resize(&delete.k, max_sectors);
212 bch2_cut_back(end_pos, &delete);
213
214 ret = bch2_extent_update(trans, inum, iter, &delete,
215 &disk_res, 0, i_sectors_delta, false);
216 bch2_disk_reservation_put(c, &disk_res);
217 }
218
219 return ret ?: ret2;
220 }
221
bch2_fpunch(struct bch_fs * c,subvol_inum inum,u64 start,u64 end,s64 * i_sectors_delta)222 int bch2_fpunch(struct bch_fs *c, subvol_inum inum, u64 start, u64 end,
223 s64 *i_sectors_delta)
224 {
225 struct btree_trans *trans = bch2_trans_get(c);
226 struct btree_iter iter;
227 int ret;
228
229 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
230 POS(inum.inum, start),
231 BTREE_ITER_intent);
232
233 ret = bch2_fpunch_at(trans, &iter, inum, end, i_sectors_delta);
234
235 bch2_trans_iter_exit(trans, &iter);
236 bch2_trans_put(trans);
237
238 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
239 ret = 0;
240
241 return ret;
242 }
243
244 /* truncate: */
245
bch2_logged_op_truncate_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)246 void bch2_logged_op_truncate_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
247 {
248 struct bkey_s_c_logged_op_truncate op = bkey_s_c_to_logged_op_truncate(k);
249
250 prt_printf(out, "subvol=%u", le32_to_cpu(op.v->subvol));
251 prt_printf(out, " inum=%llu", le64_to_cpu(op.v->inum));
252 prt_printf(out, " new_i_size=%llu", le64_to_cpu(op.v->new_i_size));
253 }
254
truncate_set_isize(struct btree_trans * trans,subvol_inum inum,u64 new_i_size,bool warn)255 static int truncate_set_isize(struct btree_trans *trans,
256 subvol_inum inum,
257 u64 new_i_size,
258 bool warn)
259 {
260 struct btree_iter iter = {};
261 struct bch_inode_unpacked inode_u;
262 int ret;
263
264 ret = __bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_intent, warn) ?:
265 (inode_u.bi_size = new_i_size, 0) ?:
266 bch2_inode_write(trans, &iter, &inode_u);
267
268 bch2_trans_iter_exit(trans, &iter);
269 return ret;
270 }
271
__bch2_resume_logged_op_truncate(struct btree_trans * trans,struct bkey_i * op_k,u64 * i_sectors_delta)272 static int __bch2_resume_logged_op_truncate(struct btree_trans *trans,
273 struct bkey_i *op_k,
274 u64 *i_sectors_delta)
275 {
276 struct bch_fs *c = trans->c;
277 struct btree_iter fpunch_iter;
278 struct bkey_i_logged_op_truncate *op = bkey_i_to_logged_op_truncate(op_k);
279 subvol_inum inum = { le32_to_cpu(op->v.subvol), le64_to_cpu(op->v.inum) };
280 u64 new_i_size = le64_to_cpu(op->v.new_i_size);
281 bool warn_errors = i_sectors_delta != NULL;
282 int ret;
283
284 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
285 truncate_set_isize(trans, inum, new_i_size, i_sectors_delta != NULL));
286 if (ret)
287 goto err;
288
289 bch2_trans_iter_init(trans, &fpunch_iter, BTREE_ID_extents,
290 POS(inum.inum, round_up(new_i_size, block_bytes(c)) >> 9),
291 BTREE_ITER_intent);
292 ret = bch2_fpunch_at(trans, &fpunch_iter, inum, U64_MAX, i_sectors_delta);
293 bch2_trans_iter_exit(trans, &fpunch_iter);
294
295 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
296 ret = 0;
297 err:
298 if (warn_errors)
299 bch_err_fn(c, ret);
300 return ret;
301 }
302
bch2_resume_logged_op_truncate(struct btree_trans * trans,struct bkey_i * op_k)303 int bch2_resume_logged_op_truncate(struct btree_trans *trans, struct bkey_i *op_k)
304 {
305 return __bch2_resume_logged_op_truncate(trans, op_k, NULL);
306 }
307
bch2_truncate(struct bch_fs * c,subvol_inum inum,u64 new_i_size,u64 * i_sectors_delta)308 int bch2_truncate(struct bch_fs *c, subvol_inum inum, u64 new_i_size, u64 *i_sectors_delta)
309 {
310 struct bkey_i_logged_op_truncate op;
311
312 bkey_logged_op_truncate_init(&op.k_i);
313 op.v.subvol = cpu_to_le32(inum.subvol);
314 op.v.inum = cpu_to_le64(inum.inum);
315 op.v.new_i_size = cpu_to_le64(new_i_size);
316
317 /*
318 * Logged ops aren't atomic w.r.t. snapshot creation: creating a
319 * snapshot while they're in progress, then crashing, will result in the
320 * resume only proceeding in one of the snapshots
321 */
322 down_read(&c->snapshot_create_lock);
323 struct btree_trans *trans = bch2_trans_get(c);
324 int ret = bch2_logged_op_start(trans, &op.k_i);
325 if (ret)
326 goto out;
327 ret = __bch2_resume_logged_op_truncate(trans, &op.k_i, i_sectors_delta);
328 ret = bch2_logged_op_finish(trans, &op.k_i) ?: ret;
329 out:
330 bch2_trans_put(trans);
331 up_read(&c->snapshot_create_lock);
332
333 return ret;
334 }
335
336 /* finsert/fcollapse: */
337
bch2_logged_op_finsert_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)338 void bch2_logged_op_finsert_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
339 {
340 struct bkey_s_c_logged_op_finsert op = bkey_s_c_to_logged_op_finsert(k);
341
342 prt_printf(out, "subvol=%u", le32_to_cpu(op.v->subvol));
343 prt_printf(out, " inum=%llu", le64_to_cpu(op.v->inum));
344 prt_printf(out, " dst_offset=%lli", le64_to_cpu(op.v->dst_offset));
345 prt_printf(out, " src_offset=%llu", le64_to_cpu(op.v->src_offset));
346 }
347
adjust_i_size(struct btree_trans * trans,subvol_inum inum,u64 offset,s64 len,bool warn)348 static int adjust_i_size(struct btree_trans *trans, subvol_inum inum,
349 u64 offset, s64 len, bool warn)
350 {
351 struct btree_iter iter;
352 struct bch_inode_unpacked inode_u;
353 int ret;
354
355 offset <<= 9;
356 len <<= 9;
357
358 ret = __bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_intent, warn);
359 if (ret)
360 return ret;
361
362 if (len > 0) {
363 if (MAX_LFS_FILESIZE - inode_u.bi_size < len) {
364 ret = -EFBIG;
365 goto err;
366 }
367
368 if (offset >= inode_u.bi_size) {
369 ret = -EINVAL;
370 goto err;
371 }
372 }
373
374 inode_u.bi_size += len;
375 inode_u.bi_mtime = inode_u.bi_ctime = bch2_current_time(trans->c);
376
377 ret = bch2_inode_write(trans, &iter, &inode_u);
378 err:
379 bch2_trans_iter_exit(trans, &iter);
380 return ret;
381 }
382
__bch2_resume_logged_op_finsert(struct btree_trans * trans,struct bkey_i * op_k,u64 * i_sectors_delta)383 static int __bch2_resume_logged_op_finsert(struct btree_trans *trans,
384 struct bkey_i *op_k,
385 u64 *i_sectors_delta)
386 {
387 struct bch_fs *c = trans->c;
388 struct btree_iter iter;
389 struct bkey_i_logged_op_finsert *op = bkey_i_to_logged_op_finsert(op_k);
390 subvol_inum inum = { le32_to_cpu(op->v.subvol), le64_to_cpu(op->v.inum) };
391 struct bch_io_opts opts;
392 u64 dst_offset = le64_to_cpu(op->v.dst_offset);
393 u64 src_offset = le64_to_cpu(op->v.src_offset);
394 s64 shift = dst_offset - src_offset;
395 u64 len = abs(shift);
396 u64 pos = le64_to_cpu(op->v.pos);
397 bool insert = shift > 0;
398 u32 snapshot;
399 bool warn_errors = i_sectors_delta != NULL;
400 int ret = 0;
401
402 ret = bch2_inum_opts_get(trans, inum, &opts);
403 if (ret)
404 return ret;
405
406 /*
407 * check for missing subvolume before fpunch, as in resume we don't want
408 * it to be a fatal error
409 */
410 ret = lockrestart_do(trans, __bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot, warn_errors));
411 if (ret)
412 return ret;
413
414 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
415 POS(inum.inum, 0),
416 BTREE_ITER_intent);
417
418 switch (op->v.state) {
419 case LOGGED_OP_FINSERT_start:
420 op->v.state = LOGGED_OP_FINSERT_shift_extents;
421
422 if (insert) {
423 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
424 adjust_i_size(trans, inum, src_offset, len, warn_errors) ?:
425 bch2_logged_op_update(trans, &op->k_i));
426 if (ret)
427 goto err;
428 } else {
429 bch2_btree_iter_set_pos(trans, &iter, POS(inum.inum, src_offset));
430
431 ret = bch2_fpunch_at(trans, &iter, inum, src_offset + len, i_sectors_delta);
432 if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
433 goto err;
434
435 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
436 bch2_logged_op_update(trans, &op->k_i));
437 }
438
439 fallthrough;
440 case LOGGED_OP_FINSERT_shift_extents:
441 while (1) {
442 struct disk_reservation disk_res =
443 bch2_disk_reservation_init(c, 0);
444 struct bkey_i delete, *copy;
445 struct bkey_s_c k;
446 struct bpos src_pos = POS(inum.inum, src_offset);
447
448 bch2_trans_begin(trans);
449
450 ret = __bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot,
451 warn_errors);
452 if (ret)
453 goto btree_err;
454
455 bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
456 bch2_btree_iter_set_pos(trans, &iter, SPOS(inum.inum, pos, snapshot));
457
458 k = insert
459 ? bch2_btree_iter_peek_prev_min(trans, &iter, POS(inum.inum, 0))
460 : bch2_btree_iter_peek_max(trans, &iter, POS(inum.inum, U64_MAX));
461 if ((ret = bkey_err(k)))
462 goto btree_err;
463
464 if (!k.k ||
465 k.k->p.inode != inum.inum ||
466 bkey_le(k.k->p, POS(inum.inum, src_offset)))
467 break;
468
469 copy = bch2_bkey_make_mut_noupdate(trans, k);
470 if ((ret = PTR_ERR_OR_ZERO(copy)))
471 goto btree_err;
472
473 if (insert &&
474 bkey_lt(bkey_start_pos(k.k), src_pos)) {
475 bch2_cut_front(src_pos, copy);
476
477 /* Splitting compressed extent? */
478 bch2_disk_reservation_add(c, &disk_res,
479 copy->k.size *
480 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy)),
481 BCH_DISK_RESERVATION_NOFAIL);
482 }
483
484 bkey_init(&delete.k);
485 delete.k.p = copy->k.p;
486 delete.k.p.snapshot = snapshot;
487 delete.k.size = copy->k.size;
488
489 copy->k.p.offset += shift;
490 copy->k.p.snapshot = snapshot;
491
492 op->v.pos = cpu_to_le64(insert ? bkey_start_offset(&delete.k) : delete.k.p.offset);
493
494 ret = bch2_bkey_set_needs_rebalance(c, &opts, copy) ?:
495 bch2_btree_insert_trans(trans, BTREE_ID_extents, &delete, 0) ?:
496 bch2_btree_insert_trans(trans, BTREE_ID_extents, copy, 0) ?:
497 bch2_logged_op_update(trans, &op->k_i) ?:
498 bch2_trans_commit(trans, &disk_res, NULL, BCH_TRANS_COMMIT_no_enospc);
499 btree_err:
500 bch2_disk_reservation_put(c, &disk_res);
501
502 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
503 continue;
504 if (ret)
505 goto err;
506
507 pos = le64_to_cpu(op->v.pos);
508 }
509
510 op->v.state = LOGGED_OP_FINSERT_finish;
511
512 if (!insert) {
513 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
514 adjust_i_size(trans, inum, src_offset, shift, warn_errors) ?:
515 bch2_logged_op_update(trans, &op->k_i));
516 } else {
517 /* We need an inode update to update bi_journal_seq for fsync: */
518 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
519 adjust_i_size(trans, inum, 0, 0, warn_errors) ?:
520 bch2_logged_op_update(trans, &op->k_i));
521 }
522
523 break;
524 case LOGGED_OP_FINSERT_finish:
525 break;
526 }
527 err:
528 bch2_trans_iter_exit(trans, &iter);
529 if (warn_errors)
530 bch_err_fn(c, ret);
531 return ret;
532 }
533
bch2_resume_logged_op_finsert(struct btree_trans * trans,struct bkey_i * op_k)534 int bch2_resume_logged_op_finsert(struct btree_trans *trans, struct bkey_i *op_k)
535 {
536 return __bch2_resume_logged_op_finsert(trans, op_k, NULL);
537 }
538
bch2_fcollapse_finsert(struct bch_fs * c,subvol_inum inum,u64 offset,u64 len,bool insert,s64 * i_sectors_delta)539 int bch2_fcollapse_finsert(struct bch_fs *c, subvol_inum inum,
540 u64 offset, u64 len, bool insert,
541 s64 *i_sectors_delta)
542 {
543 struct bkey_i_logged_op_finsert op;
544 s64 shift = insert ? len : -len;
545
546 bkey_logged_op_finsert_init(&op.k_i);
547 op.v.subvol = cpu_to_le32(inum.subvol);
548 op.v.inum = cpu_to_le64(inum.inum);
549 op.v.dst_offset = cpu_to_le64(offset + shift);
550 op.v.src_offset = cpu_to_le64(offset);
551 op.v.pos = cpu_to_le64(insert ? U64_MAX : offset);
552
553 /*
554 * Logged ops aren't atomic w.r.t. snapshot creation: creating a
555 * snapshot while they're in progress, then crashing, will result in the
556 * resume only proceeding in one of the snapshots
557 */
558 down_read(&c->snapshot_create_lock);
559 struct btree_trans *trans = bch2_trans_get(c);
560 int ret = bch2_logged_op_start(trans, &op.k_i);
561 if (ret)
562 goto out;
563 ret = __bch2_resume_logged_op_finsert(trans, &op.k_i, i_sectors_delta);
564 ret = bch2_logged_op_finish(trans, &op.k_i) ?: ret;
565 out:
566 bch2_trans_put(trans);
567 up_read(&c->snapshot_create_lock);
568
569 return ret;
570 }
571