1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * io_misc.c - fallocate, fpunch, truncate:
4 */
5
6 #include "bcachefs.h"
7 #include "alloc_foreground.h"
8 #include "bkey_buf.h"
9 #include "btree_update.h"
10 #include "buckets.h"
11 #include "clock.h"
12 #include "error.h"
13 #include "extents.h"
14 #include "extent_update.h"
15 #include "inode.h"
16 #include "io_misc.h"
17 #include "io_write.h"
18 #include "logged_ops.h"
19 #include "rebalance.h"
20 #include "subvolume.h"
21
22 /* Overwrites whatever was present with zeroes: */
bch2_extent_fallocate(struct btree_trans * trans,subvol_inum inum,struct btree_iter * iter,u64 sectors,struct bch_io_opts opts,s64 * i_sectors_delta,struct write_point_specifier write_point)23 int bch2_extent_fallocate(struct btree_trans *trans,
24 subvol_inum inum,
25 struct btree_iter *iter,
26 u64 sectors,
27 struct bch_io_opts opts,
28 s64 *i_sectors_delta,
29 struct write_point_specifier write_point)
30 {
31 struct bch_fs *c = trans->c;
32 struct disk_reservation disk_res = { 0 };
33 struct closure cl;
34 struct open_buckets open_buckets = { 0 };
35 struct bkey_s_c k;
36 struct bkey_buf old, new;
37 unsigned sectors_allocated = 0, new_replicas;
38 bool unwritten = opts.nocow &&
39 c->sb.version >= bcachefs_metadata_version_unwritten_extents;
40 int ret;
41
42 bch2_bkey_buf_init(&old);
43 bch2_bkey_buf_init(&new);
44 closure_init_stack(&cl);
45
46 k = bch2_btree_iter_peek_slot(iter);
47 ret = bkey_err(k);
48 if (ret)
49 return ret;
50
51 sectors = min_t(u64, sectors, k.k->p.offset - iter->pos.offset);
52 new_replicas = max(0, (int) opts.data_replicas -
53 (int) bch2_bkey_nr_ptrs_fully_allocated(k));
54
55 /*
56 * Get a disk reservation before (in the nocow case) calling
57 * into the allocator:
58 */
59 ret = bch2_disk_reservation_get(c, &disk_res, sectors, new_replicas, 0);
60 if (unlikely(ret))
61 goto err_noprint;
62
63 bch2_bkey_buf_reassemble(&old, c, k);
64
65 if (!unwritten) {
66 struct bkey_i_reservation *reservation;
67
68 bch2_bkey_buf_realloc(&new, c, sizeof(*reservation) / sizeof(u64));
69 reservation = bkey_reservation_init(new.k);
70 reservation->k.p = iter->pos;
71 bch2_key_resize(&reservation->k, sectors);
72 reservation->v.nr_replicas = opts.data_replicas;
73 } else {
74 struct bkey_i_extent *e;
75 struct bch_devs_list devs_have;
76 struct write_point *wp;
77
78 devs_have.nr = 0;
79
80 bch2_bkey_buf_realloc(&new, c, BKEY_EXTENT_U64s_MAX);
81
82 e = bkey_extent_init(new.k);
83 e->k.p = iter->pos;
84
85 ret = bch2_alloc_sectors_start_trans(trans,
86 opts.foreground_target,
87 false,
88 write_point,
89 &devs_have,
90 opts.data_replicas,
91 opts.data_replicas,
92 BCH_WATERMARK_normal, 0, &cl, &wp);
93 if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
94 ret = -BCH_ERR_transaction_restart_nested;
95 if (ret)
96 goto err;
97
98 sectors = min_t(u64, sectors, wp->sectors_free);
99 sectors_allocated = sectors;
100
101 bch2_key_resize(&e->k, sectors);
102
103 bch2_open_bucket_get(c, wp, &open_buckets);
104 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
105 bch2_alloc_sectors_done(c, wp);
106
107 extent_for_each_ptr(extent_i_to_s(e), ptr)
108 ptr->unwritten = true;
109 }
110
111 ret = bch2_extent_update(trans, inum, iter, new.k, &disk_res,
112 0, i_sectors_delta, true);
113 err:
114 if (!ret && sectors_allocated)
115 bch2_increment_clock(c, sectors_allocated, WRITE);
116 if (should_print_err(ret)) {
117 struct printbuf buf = PRINTBUF;
118 lockrestart_do(trans,
119 bch2_inum_offset_err_msg_trans(trans, &buf, inum, iter->pos.offset << 9));
120 prt_printf(&buf, "fallocate error: %s", bch2_err_str(ret));
121 bch_err_ratelimited(c, "%s", buf.buf);
122 printbuf_exit(&buf);
123 }
124 err_noprint:
125 bch2_open_buckets_put(c, &open_buckets);
126 bch2_disk_reservation_put(c, &disk_res);
127 bch2_bkey_buf_exit(&new, c);
128 bch2_bkey_buf_exit(&old, c);
129
130 if (closure_nr_remaining(&cl) != 1) {
131 bch2_trans_unlock_long(trans);
132 bch2_wait_on_allocator(c, &cl);
133 }
134
135 return ret;
136 }
137
138 /*
139 * Returns -BCH_ERR_transacton_restart if we had to drop locks:
140 */
bch2_fpunch_at(struct btree_trans * trans,struct btree_iter * iter,subvol_inum inum,u64 end,s64 * i_sectors_delta)141 int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
142 subvol_inum inum, u64 end,
143 s64 *i_sectors_delta)
144 {
145 struct bch_fs *c = trans->c;
146 unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
147 struct bpos end_pos = POS(inum.inum, end);
148 struct bkey_s_c k;
149 int ret = 0, ret2 = 0;
150 u32 snapshot;
151
152 while (!ret ||
153 bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
154 struct disk_reservation disk_res =
155 bch2_disk_reservation_init(c, 0);
156 struct bkey_i delete;
157
158 if (ret)
159 ret2 = ret;
160
161 bch2_trans_begin(trans);
162
163 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
164 if (ret)
165 continue;
166
167 bch2_btree_iter_set_snapshot(iter, snapshot);
168
169 /*
170 * peek_max() doesn't have ideal semantics for extents:
171 */
172 k = bch2_btree_iter_peek_max(iter, end_pos);
173 if (!k.k)
174 break;
175
176 ret = bkey_err(k);
177 if (ret)
178 continue;
179
180 bkey_init(&delete.k);
181 delete.k.p = iter->pos;
182
183 /* create the biggest key we can */
184 bch2_key_resize(&delete.k, max_sectors);
185 bch2_cut_back(end_pos, &delete);
186
187 ret = bch2_extent_update(trans, inum, iter, &delete,
188 &disk_res, 0, i_sectors_delta, false);
189 bch2_disk_reservation_put(c, &disk_res);
190 }
191
192 return ret ?: ret2;
193 }
194
bch2_fpunch(struct bch_fs * c,subvol_inum inum,u64 start,u64 end,s64 * i_sectors_delta)195 int bch2_fpunch(struct bch_fs *c, subvol_inum inum, u64 start, u64 end,
196 s64 *i_sectors_delta)
197 {
198 struct btree_trans *trans = bch2_trans_get(c);
199 struct btree_iter iter;
200 int ret;
201
202 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
203 POS(inum.inum, start),
204 BTREE_ITER_intent);
205
206 ret = bch2_fpunch_at(trans, &iter, inum, end, i_sectors_delta);
207
208 bch2_trans_iter_exit(trans, &iter);
209 bch2_trans_put(trans);
210
211 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
212 ret = 0;
213
214 return ret;
215 }
216
217 /* truncate: */
218
bch2_logged_op_truncate_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)219 void bch2_logged_op_truncate_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
220 {
221 struct bkey_s_c_logged_op_truncate op = bkey_s_c_to_logged_op_truncate(k);
222
223 prt_printf(out, "subvol=%u", le32_to_cpu(op.v->subvol));
224 prt_printf(out, " inum=%llu", le64_to_cpu(op.v->inum));
225 prt_printf(out, " new_i_size=%llu", le64_to_cpu(op.v->new_i_size));
226 }
227
truncate_set_isize(struct btree_trans * trans,subvol_inum inum,u64 new_i_size,bool warn)228 static int truncate_set_isize(struct btree_trans *trans,
229 subvol_inum inum,
230 u64 new_i_size,
231 bool warn)
232 {
233 struct btree_iter iter = { NULL };
234 struct bch_inode_unpacked inode_u;
235 int ret;
236
237 ret = __bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_intent, warn) ?:
238 (inode_u.bi_size = new_i_size, 0) ?:
239 bch2_inode_write(trans, &iter, &inode_u);
240
241 bch2_trans_iter_exit(trans, &iter);
242 return ret;
243 }
244
__bch2_resume_logged_op_truncate(struct btree_trans * trans,struct bkey_i * op_k,u64 * i_sectors_delta)245 static int __bch2_resume_logged_op_truncate(struct btree_trans *trans,
246 struct bkey_i *op_k,
247 u64 *i_sectors_delta)
248 {
249 struct bch_fs *c = trans->c;
250 struct btree_iter fpunch_iter;
251 struct bkey_i_logged_op_truncate *op = bkey_i_to_logged_op_truncate(op_k);
252 subvol_inum inum = { le32_to_cpu(op->v.subvol), le64_to_cpu(op->v.inum) };
253 u64 new_i_size = le64_to_cpu(op->v.new_i_size);
254 bool warn_errors = i_sectors_delta != NULL;
255 int ret;
256
257 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
258 truncate_set_isize(trans, inum, new_i_size, i_sectors_delta != NULL));
259 if (ret)
260 goto err;
261
262 bch2_trans_iter_init(trans, &fpunch_iter, BTREE_ID_extents,
263 POS(inum.inum, round_up(new_i_size, block_bytes(c)) >> 9),
264 BTREE_ITER_intent);
265 ret = bch2_fpunch_at(trans, &fpunch_iter, inum, U64_MAX, i_sectors_delta);
266 bch2_trans_iter_exit(trans, &fpunch_iter);
267
268 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
269 ret = 0;
270 err:
271 if (warn_errors)
272 bch_err_fn(c, ret);
273 return ret;
274 }
275
bch2_resume_logged_op_truncate(struct btree_trans * trans,struct bkey_i * op_k)276 int bch2_resume_logged_op_truncate(struct btree_trans *trans, struct bkey_i *op_k)
277 {
278 return __bch2_resume_logged_op_truncate(trans, op_k, NULL);
279 }
280
bch2_truncate(struct bch_fs * c,subvol_inum inum,u64 new_i_size,u64 * i_sectors_delta)281 int bch2_truncate(struct bch_fs *c, subvol_inum inum, u64 new_i_size, u64 *i_sectors_delta)
282 {
283 struct bkey_i_logged_op_truncate op;
284
285 bkey_logged_op_truncate_init(&op.k_i);
286 op.v.subvol = cpu_to_le32(inum.subvol);
287 op.v.inum = cpu_to_le64(inum.inum);
288 op.v.new_i_size = cpu_to_le64(new_i_size);
289
290 /*
291 * Logged ops aren't atomic w.r.t. snapshot creation: creating a
292 * snapshot while they're in progress, then crashing, will result in the
293 * resume only proceeding in one of the snapshots
294 */
295 down_read(&c->snapshot_create_lock);
296 struct btree_trans *trans = bch2_trans_get(c);
297 int ret = bch2_logged_op_start(trans, &op.k_i);
298 if (ret)
299 goto out;
300 ret = __bch2_resume_logged_op_truncate(trans, &op.k_i, i_sectors_delta);
301 ret = bch2_logged_op_finish(trans, &op.k_i) ?: ret;
302 out:
303 bch2_trans_put(trans);
304 up_read(&c->snapshot_create_lock);
305
306 return ret;
307 }
308
309 /* finsert/fcollapse: */
310
bch2_logged_op_finsert_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)311 void bch2_logged_op_finsert_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
312 {
313 struct bkey_s_c_logged_op_finsert op = bkey_s_c_to_logged_op_finsert(k);
314
315 prt_printf(out, "subvol=%u", le32_to_cpu(op.v->subvol));
316 prt_printf(out, " inum=%llu", le64_to_cpu(op.v->inum));
317 prt_printf(out, " dst_offset=%lli", le64_to_cpu(op.v->dst_offset));
318 prt_printf(out, " src_offset=%llu", le64_to_cpu(op.v->src_offset));
319 }
320
adjust_i_size(struct btree_trans * trans,subvol_inum inum,u64 offset,s64 len,bool warn)321 static int adjust_i_size(struct btree_trans *trans, subvol_inum inum,
322 u64 offset, s64 len, bool warn)
323 {
324 struct btree_iter iter;
325 struct bch_inode_unpacked inode_u;
326 int ret;
327
328 offset <<= 9;
329 len <<= 9;
330
331 ret = __bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_intent, warn);
332 if (ret)
333 return ret;
334
335 if (len > 0) {
336 if (MAX_LFS_FILESIZE - inode_u.bi_size < len) {
337 ret = -EFBIG;
338 goto err;
339 }
340
341 if (offset >= inode_u.bi_size) {
342 ret = -EINVAL;
343 goto err;
344 }
345 }
346
347 inode_u.bi_size += len;
348 inode_u.bi_mtime = inode_u.bi_ctime = bch2_current_time(trans->c);
349
350 ret = bch2_inode_write(trans, &iter, &inode_u);
351 err:
352 bch2_trans_iter_exit(trans, &iter);
353 return ret;
354 }
355
__bch2_resume_logged_op_finsert(struct btree_trans * trans,struct bkey_i * op_k,u64 * i_sectors_delta)356 static int __bch2_resume_logged_op_finsert(struct btree_trans *trans,
357 struct bkey_i *op_k,
358 u64 *i_sectors_delta)
359 {
360 struct bch_fs *c = trans->c;
361 struct btree_iter iter;
362 struct bkey_i_logged_op_finsert *op = bkey_i_to_logged_op_finsert(op_k);
363 subvol_inum inum = { le32_to_cpu(op->v.subvol), le64_to_cpu(op->v.inum) };
364 struct bch_io_opts opts;
365 u64 dst_offset = le64_to_cpu(op->v.dst_offset);
366 u64 src_offset = le64_to_cpu(op->v.src_offset);
367 s64 shift = dst_offset - src_offset;
368 u64 len = abs(shift);
369 u64 pos = le64_to_cpu(op->v.pos);
370 bool insert = shift > 0;
371 u32 snapshot;
372 bool warn_errors = i_sectors_delta != NULL;
373 int ret = 0;
374
375 ret = bch2_inum_opts_get(trans, inum, &opts);
376 if (ret)
377 return ret;
378
379 /*
380 * check for missing subvolume before fpunch, as in resume we don't want
381 * it to be a fatal error
382 */
383 ret = lockrestart_do(trans, __bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot, warn_errors));
384 if (ret)
385 return ret;
386
387 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
388 POS(inum.inum, 0),
389 BTREE_ITER_intent);
390
391 switch (op->v.state) {
392 case LOGGED_OP_FINSERT_start:
393 op->v.state = LOGGED_OP_FINSERT_shift_extents;
394
395 if (insert) {
396 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
397 adjust_i_size(trans, inum, src_offset, len, warn_errors) ?:
398 bch2_logged_op_update(trans, &op->k_i));
399 if (ret)
400 goto err;
401 } else {
402 bch2_btree_iter_set_pos(&iter, POS(inum.inum, src_offset));
403
404 ret = bch2_fpunch_at(trans, &iter, inum, src_offset + len, i_sectors_delta);
405 if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
406 goto err;
407
408 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
409 bch2_logged_op_update(trans, &op->k_i));
410 }
411
412 fallthrough;
413 case LOGGED_OP_FINSERT_shift_extents:
414 while (1) {
415 struct disk_reservation disk_res =
416 bch2_disk_reservation_init(c, 0);
417 struct bkey_i delete, *copy;
418 struct bkey_s_c k;
419 struct bpos src_pos = POS(inum.inum, src_offset);
420
421 bch2_trans_begin(trans);
422
423 ret = __bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot,
424 warn_errors);
425 if (ret)
426 goto btree_err;
427
428 bch2_btree_iter_set_snapshot(&iter, snapshot);
429 bch2_btree_iter_set_pos(&iter, SPOS(inum.inum, pos, snapshot));
430
431 k = insert
432 ? bch2_btree_iter_peek_prev_min(&iter, POS(inum.inum, 0))
433 : bch2_btree_iter_peek_max(&iter, POS(inum.inum, U64_MAX));
434 if ((ret = bkey_err(k)))
435 goto btree_err;
436
437 if (!k.k ||
438 k.k->p.inode != inum.inum ||
439 bkey_le(k.k->p, POS(inum.inum, src_offset)))
440 break;
441
442 copy = bch2_bkey_make_mut_noupdate(trans, k);
443 if ((ret = PTR_ERR_OR_ZERO(copy)))
444 goto btree_err;
445
446 if (insert &&
447 bkey_lt(bkey_start_pos(k.k), src_pos)) {
448 bch2_cut_front(src_pos, copy);
449
450 /* Splitting compressed extent? */
451 bch2_disk_reservation_add(c, &disk_res,
452 copy->k.size *
453 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy)),
454 BCH_DISK_RESERVATION_NOFAIL);
455 }
456
457 bkey_init(&delete.k);
458 delete.k.p = copy->k.p;
459 delete.k.p.snapshot = snapshot;
460 delete.k.size = copy->k.size;
461
462 copy->k.p.offset += shift;
463 copy->k.p.snapshot = snapshot;
464
465 op->v.pos = cpu_to_le64(insert ? bkey_start_offset(&delete.k) : delete.k.p.offset);
466
467 ret = bch2_bkey_set_needs_rebalance(c, &opts, copy) ?:
468 bch2_btree_insert_trans(trans, BTREE_ID_extents, &delete, 0) ?:
469 bch2_btree_insert_trans(trans, BTREE_ID_extents, copy, 0) ?:
470 bch2_logged_op_update(trans, &op->k_i) ?:
471 bch2_trans_commit(trans, &disk_res, NULL, BCH_TRANS_COMMIT_no_enospc);
472 btree_err:
473 bch2_disk_reservation_put(c, &disk_res);
474
475 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
476 continue;
477 if (ret)
478 goto err;
479
480 pos = le64_to_cpu(op->v.pos);
481 }
482
483 op->v.state = LOGGED_OP_FINSERT_finish;
484
485 if (!insert) {
486 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
487 adjust_i_size(trans, inum, src_offset, shift, warn_errors) ?:
488 bch2_logged_op_update(trans, &op->k_i));
489 } else {
490 /* We need an inode update to update bi_journal_seq for fsync: */
491 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
492 adjust_i_size(trans, inum, 0, 0, warn_errors) ?:
493 bch2_logged_op_update(trans, &op->k_i));
494 }
495
496 break;
497 case LOGGED_OP_FINSERT_finish:
498 break;
499 }
500 err:
501 bch2_trans_iter_exit(trans, &iter);
502 if (warn_errors)
503 bch_err_fn(c, ret);
504 return ret;
505 }
506
bch2_resume_logged_op_finsert(struct btree_trans * trans,struct bkey_i * op_k)507 int bch2_resume_logged_op_finsert(struct btree_trans *trans, struct bkey_i *op_k)
508 {
509 return __bch2_resume_logged_op_finsert(trans, op_k, NULL);
510 }
511
bch2_fcollapse_finsert(struct bch_fs * c,subvol_inum inum,u64 offset,u64 len,bool insert,s64 * i_sectors_delta)512 int bch2_fcollapse_finsert(struct bch_fs *c, subvol_inum inum,
513 u64 offset, u64 len, bool insert,
514 s64 *i_sectors_delta)
515 {
516 struct bkey_i_logged_op_finsert op;
517 s64 shift = insert ? len : -len;
518
519 bkey_logged_op_finsert_init(&op.k_i);
520 op.v.subvol = cpu_to_le32(inum.subvol);
521 op.v.inum = cpu_to_le64(inum.inum);
522 op.v.dst_offset = cpu_to_le64(offset + shift);
523 op.v.src_offset = cpu_to_le64(offset);
524 op.v.pos = cpu_to_le64(insert ? U64_MAX : offset);
525
526 /*
527 * Logged ops aren't atomic w.r.t. snapshot creation: creating a
528 * snapshot while they're in progress, then crashing, will result in the
529 * resume only proceeding in one of the snapshots
530 */
531 down_read(&c->snapshot_create_lock);
532 struct btree_trans *trans = bch2_trans_get(c);
533 int ret = bch2_logged_op_start(trans, &op.k_i);
534 if (ret)
535 goto out;
536 ret = __bch2_resume_logged_op_finsert(trans, &op.k_i, i_sectors_delta);
537 ret = bch2_logged_op_finish(trans, &op.k_i) ?: ret;
538 out:
539 bch2_trans_put(trans);
540 up_read(&c->snapshot_create_lock);
541
542 return ret;
543 }
544