xref: /linux/fs/bcachefs/io_write.c (revision 7fffcb5cceea5cec643da76671607c6cc5c8e8be)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
4  * Copyright 2012 Google, Inc.
5  */
6 
7 #include "bcachefs.h"
8 #include "alloc_foreground.h"
9 #include "bkey_buf.h"
10 #include "bset.h"
11 #include "btree_update.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "clock.h"
15 #include "compress.h"
16 #include "debug.h"
17 #include "ec.h"
18 #include "error.h"
19 #include "extent_update.h"
20 #include "inode.h"
21 #include "io_write.h"
22 #include "journal.h"
23 #include "keylist.h"
24 #include "move.h"
25 #include "nocow_locking.h"
26 #include "rebalance.h"
27 #include "subvolume.h"
28 #include "super.h"
29 #include "super-io.h"
30 #include "trace.h"
31 
32 #include <linux/blkdev.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/sched/mm.h>
36 
37 #ifdef CONFIG_BCACHEFS_DEBUG
38 static unsigned bch2_write_corrupt_ratio;
39 module_param_named(write_corrupt_ratio, bch2_write_corrupt_ratio, uint, 0644);
40 MODULE_PARM_DESC(write_corrupt_ratio, "");
41 #endif
42 
43 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
44 
45 static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
46 				       u64 now, int rw)
47 {
48 	u64 latency_capable =
49 		ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
50 	/* ideally we'd be taking into account the device's variance here: */
51 	u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
52 	s64 latency_over = io_latency - latency_threshold;
53 
54 	if (latency_threshold && latency_over > 0) {
55 		/*
56 		 * bump up congested by approximately latency_over * 4 /
57 		 * latency_threshold - we don't need much accuracy here so don't
58 		 * bother with the divide:
59 		 */
60 		if (atomic_read(&ca->congested) < CONGESTED_MAX)
61 			atomic_add(latency_over >>
62 				   max_t(int, ilog2(latency_threshold) - 2, 0),
63 				   &ca->congested);
64 
65 		ca->congested_last = now;
66 	} else if (atomic_read(&ca->congested) > 0) {
67 		atomic_dec(&ca->congested);
68 	}
69 }
70 
71 void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
72 {
73 	atomic64_t *latency = &ca->cur_latency[rw];
74 	u64 now = local_clock();
75 	u64 io_latency = time_after64(now, submit_time)
76 		? now - submit_time
77 		: 0;
78 	u64 old, new;
79 
80 	old = atomic64_read(latency);
81 	do {
82 		/*
83 		 * If the io latency was reasonably close to the current
84 		 * latency, skip doing the update and atomic operation - most of
85 		 * the time:
86 		 */
87 		if (abs((int) (old - io_latency)) < (old >> 1) &&
88 		    now & ~(~0U << 5))
89 			break;
90 
91 		new = ewma_add(old, io_latency, 5);
92 	} while (!atomic64_try_cmpxchg(latency, &old, new));
93 
94 	bch2_congested_acct(ca, io_latency, now, rw);
95 
96 	__bch2_time_stats_update(&ca->io_latency[rw].stats, submit_time, now);
97 }
98 
99 #endif
100 
101 /* Allocate, free from mempool: */
102 
103 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
104 {
105 	struct bvec_iter_all iter;
106 	struct bio_vec *bv;
107 
108 	bio_for_each_segment_all(bv, bio, iter)
109 		if (bv->bv_page != ZERO_PAGE(0))
110 			mempool_free(bv->bv_page, &c->bio_bounce_pages);
111 	bio->bi_vcnt = 0;
112 }
113 
114 static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
115 {
116 	struct page *page;
117 
118 	if (likely(!*using_mempool)) {
119 		page = alloc_page(GFP_NOFS);
120 		if (unlikely(!page)) {
121 			mutex_lock(&c->bio_bounce_pages_lock);
122 			*using_mempool = true;
123 			goto pool_alloc;
124 
125 		}
126 	} else {
127 pool_alloc:
128 		page = mempool_alloc(&c->bio_bounce_pages, GFP_NOFS);
129 	}
130 
131 	return page;
132 }
133 
134 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
135 			       size_t size)
136 {
137 	bool using_mempool = false;
138 
139 	while (size) {
140 		struct page *page = __bio_alloc_page_pool(c, &using_mempool);
141 		unsigned len = min_t(size_t, PAGE_SIZE, size);
142 
143 		BUG_ON(!bio_add_page(bio, page, len, 0));
144 		size -= len;
145 	}
146 
147 	if (using_mempool)
148 		mutex_unlock(&c->bio_bounce_pages_lock);
149 }
150 
151 /* Extent update path: */
152 
153 int bch2_sum_sector_overwrites(struct btree_trans *trans,
154 			       struct btree_iter *extent_iter,
155 			       struct bkey_i *new,
156 			       bool *usage_increasing,
157 			       s64 *i_sectors_delta,
158 			       s64 *disk_sectors_delta)
159 {
160 	struct bch_fs *c = trans->c;
161 	struct btree_iter iter;
162 	struct bkey_s_c old;
163 	unsigned new_replicas = bch2_bkey_replicas(c, bkey_i_to_s_c(new));
164 	bool new_compressed = bch2_bkey_sectors_compressed(bkey_i_to_s_c(new));
165 	int ret = 0;
166 
167 	*usage_increasing	= false;
168 	*i_sectors_delta	= 0;
169 	*disk_sectors_delta	= 0;
170 
171 	bch2_trans_copy_iter(trans, &iter, extent_iter);
172 
173 	for_each_btree_key_max_continue_norestart(trans, iter,
174 				new->k.p, BTREE_ITER_slots, old, ret) {
175 		s64 sectors = min(new->k.p.offset, old.k->p.offset) -
176 			max(bkey_start_offset(&new->k),
177 			    bkey_start_offset(old.k));
178 
179 		*i_sectors_delta += sectors *
180 			(bkey_extent_is_allocation(&new->k) -
181 			 bkey_extent_is_allocation(old.k));
182 
183 		*disk_sectors_delta += sectors * bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new));
184 		*disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot
185 			? sectors * bch2_bkey_nr_ptrs_fully_allocated(old)
186 			: 0;
187 
188 		if (!*usage_increasing &&
189 		    (new->k.p.snapshot != old.k->p.snapshot ||
190 		     new_replicas > bch2_bkey_replicas(c, old) ||
191 		     (!new_compressed && bch2_bkey_sectors_compressed(old))))
192 			*usage_increasing = true;
193 
194 		if (bkey_ge(old.k->p, new->k.p))
195 			break;
196 	}
197 
198 	bch2_trans_iter_exit(trans, &iter);
199 	return ret;
200 }
201 
202 static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
203 						    struct btree_iter *extent_iter,
204 						    u64 new_i_size,
205 						    s64 i_sectors_delta)
206 {
207 	/*
208 	 * Crazy performance optimization:
209 	 * Every extent update needs to also update the inode: the inode trigger
210 	 * will set bi->journal_seq to the journal sequence number of this
211 	 * transaction - for fsync.
212 	 *
213 	 * But if that's the only reason we're updating the inode (we're not
214 	 * updating bi_size or bi_sectors), then we don't need the inode update
215 	 * to be journalled - if we crash, the bi_journal_seq update will be
216 	 * lost, but that's fine.
217 	 */
218 	unsigned inode_update_flags = BTREE_UPDATE_nojournal;
219 
220 	struct btree_iter iter;
221 	struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
222 			      SPOS(0,
223 				   extent_iter->pos.inode,
224 				   extent_iter->snapshot),
225 			      BTREE_ITER_intent|
226 			      BTREE_ITER_cached);
227 	int ret = bkey_err(k);
228 	if (unlikely(ret))
229 		return ret;
230 
231 	/*
232 	 * varint_decode_fast(), in the inode .invalid method, reads up to 7
233 	 * bytes past the end of the buffer:
234 	 */
235 	struct bkey_i *k_mut = bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k) + 8);
236 	ret = PTR_ERR_OR_ZERO(k_mut);
237 	if (unlikely(ret))
238 		goto err;
239 
240 	bkey_reassemble(k_mut, k);
241 
242 	if (unlikely(k_mut->k.type != KEY_TYPE_inode_v3)) {
243 		k_mut = bch2_inode_to_v3(trans, k_mut);
244 		ret = PTR_ERR_OR_ZERO(k_mut);
245 		if (unlikely(ret))
246 			goto err;
247 	}
248 
249 	struct bkey_i_inode_v3 *inode = bkey_i_to_inode_v3(k_mut);
250 
251 	if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_i_size_dirty) &&
252 	    new_i_size > le64_to_cpu(inode->v.bi_size)) {
253 		inode->v.bi_size = cpu_to_le64(new_i_size);
254 		inode_update_flags = 0;
255 	}
256 
257 	if (i_sectors_delta) {
258 		le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta);
259 		inode_update_flags = 0;
260 	}
261 
262 	if (inode->k.p.snapshot != iter.snapshot) {
263 		inode->k.p.snapshot = iter.snapshot;
264 		inode_update_flags = 0;
265 	}
266 
267 	ret = bch2_trans_update(trans, &iter, &inode->k_i,
268 				BTREE_UPDATE_internal_snapshot_node|
269 				inode_update_flags);
270 err:
271 	bch2_trans_iter_exit(trans, &iter);
272 	return ret;
273 }
274 
275 int bch2_extent_update(struct btree_trans *trans,
276 		       subvol_inum inum,
277 		       struct btree_iter *iter,
278 		       struct bkey_i *k,
279 		       struct disk_reservation *disk_res,
280 		       u64 new_i_size,
281 		       s64 *i_sectors_delta_total,
282 		       bool check_enospc)
283 {
284 	struct bpos next_pos;
285 	bool usage_increasing;
286 	s64 i_sectors_delta = 0, disk_sectors_delta = 0;
287 	int ret;
288 
289 	/*
290 	 * This traverses us the iterator without changing iter->path->pos to
291 	 * search_key() (which is pos + 1 for extents): we want there to be a
292 	 * path already traversed at iter->pos because
293 	 * bch2_trans_extent_update() will use it to attempt extent merging
294 	 */
295 	ret = __bch2_btree_iter_traverse(trans, iter);
296 	if (ret)
297 		return ret;
298 
299 	ret = bch2_extent_trim_atomic(trans, iter, k);
300 	if (ret)
301 		return ret;
302 
303 	next_pos = k->k.p;
304 
305 	ret = bch2_sum_sector_overwrites(trans, iter, k,
306 			&usage_increasing,
307 			&i_sectors_delta,
308 			&disk_sectors_delta);
309 	if (ret)
310 		return ret;
311 
312 	if (disk_res &&
313 	    disk_sectors_delta > (s64) disk_res->sectors) {
314 		ret = bch2_disk_reservation_add(trans->c, disk_res,
315 					disk_sectors_delta - disk_res->sectors,
316 					!check_enospc || !usage_increasing
317 					? BCH_DISK_RESERVATION_NOFAIL : 0);
318 		if (ret)
319 			return ret;
320 	}
321 
322 	/*
323 	 * Note:
324 	 * We always have to do an inode update - even when i_size/i_sectors
325 	 * aren't changing - for fsync to work properly; fsync relies on
326 	 * inode->bi_journal_seq which is updated by the trigger code:
327 	 */
328 	ret =   bch2_extent_update_i_size_sectors(trans, iter,
329 						  min(k->k.p.offset << 9, new_i_size),
330 						  i_sectors_delta) ?:
331 		bch2_trans_update(trans, iter, k, 0) ?:
332 		bch2_trans_commit(trans, disk_res, NULL,
333 				BCH_TRANS_COMMIT_no_check_rw|
334 				BCH_TRANS_COMMIT_no_enospc);
335 	if (unlikely(ret))
336 		return ret;
337 
338 	if (i_sectors_delta_total)
339 		*i_sectors_delta_total += i_sectors_delta;
340 	bch2_btree_iter_set_pos(trans, iter, next_pos);
341 	return 0;
342 }
343 
344 static int bch2_write_index_default(struct bch_write_op *op)
345 {
346 	struct bch_fs *c = op->c;
347 	struct bkey_buf sk;
348 	struct keylist *keys = &op->insert_keys;
349 	struct bkey_i *k = bch2_keylist_front(keys);
350 	struct btree_trans *trans = bch2_trans_get(c);
351 	struct btree_iter iter;
352 	subvol_inum inum = {
353 		.subvol = op->subvol,
354 		.inum	= k->k.p.inode,
355 	};
356 	int ret;
357 
358 	BUG_ON(!inum.subvol);
359 
360 	bch2_bkey_buf_init(&sk);
361 
362 	do {
363 		bch2_trans_begin(trans);
364 
365 		k = bch2_keylist_front(keys);
366 		bch2_bkey_buf_copy(&sk, c, k);
367 
368 		ret = bch2_subvolume_get_snapshot(trans, inum.subvol,
369 						  &sk.k->k.p.snapshot);
370 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
371 			continue;
372 		if (ret)
373 			break;
374 
375 		bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
376 				     bkey_start_pos(&sk.k->k),
377 				     BTREE_ITER_slots|BTREE_ITER_intent);
378 
379 		ret =   bch2_bkey_set_needs_rebalance(c, &op->opts, sk.k) ?:
380 			bch2_extent_update(trans, inum, &iter, sk.k,
381 					&op->res,
382 					op->new_i_size, &op->i_sectors_delta,
383 					op->flags & BCH_WRITE_check_enospc);
384 		bch2_trans_iter_exit(trans, &iter);
385 
386 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
387 			continue;
388 		if (ret)
389 			break;
390 
391 		if (bkey_ge(iter.pos, k->k.p))
392 			bch2_keylist_pop_front(&op->insert_keys);
393 		else
394 			bch2_cut_front(iter.pos, k);
395 	} while (!bch2_keylist_empty(keys));
396 
397 	bch2_trans_put(trans);
398 	bch2_bkey_buf_exit(&sk, c);
399 
400 	return ret;
401 }
402 
403 /* Writes */
404 
405 void bch2_write_op_error(struct bch_write_op *op, u64 offset, const char *fmt, ...)
406 {
407 	struct printbuf buf = PRINTBUF;
408 
409 	if (op->subvol) {
410 		bch2_inum_offset_err_msg(op->c, &buf,
411 					 (subvol_inum) { op->subvol, op->pos.inode, },
412 					 offset << 9);
413 	} else {
414 		struct bpos pos = op->pos;
415 		pos.offset = offset;
416 		bch2_inum_snap_offset_err_msg(op->c, &buf, pos);
417 	}
418 
419 	prt_str(&buf, "write error: ");
420 
421 	va_list args;
422 	va_start(args, fmt);
423 	prt_vprintf(&buf, fmt, args);
424 	va_end(args);
425 
426 	if (op->flags & BCH_WRITE_move) {
427 		struct data_update *u = container_of(op, struct data_update, op);
428 
429 		prt_printf(&buf, "\n  from internal move ");
430 		bch2_bkey_val_to_text(&buf, op->c, bkey_i_to_s_c(u->k.k));
431 	}
432 
433 	bch_err_ratelimited(op->c, "%s", buf.buf);
434 	printbuf_exit(&buf);
435 }
436 
437 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
438 			       enum bch_data_type type,
439 			       const struct bkey_i *k,
440 			       bool nocow)
441 {
442 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
443 	struct bch_write_bio *n;
444 
445 	BUG_ON(c->opts.nochanges);
446 
447 	bkey_for_each_ptr(ptrs, ptr) {
448 		/*
449 		 * XXX: btree writes should be using io_ref[WRITE], but we
450 		 * aren't retrying failed btree writes yet (due to device
451 		 * removal/ro):
452 		 */
453 		struct bch_dev *ca = nocow
454 			? bch2_dev_have_ref(c, ptr->dev)
455 			: bch2_dev_get_ioref(c, ptr->dev, type == BCH_DATA_btree ? READ : WRITE);
456 
457 		if (to_entry(ptr + 1) < ptrs.end) {
458 			n = to_wbio(bio_alloc_clone(NULL, &wbio->bio, GFP_NOFS, &c->replica_set));
459 
460 			n->bio.bi_end_io	= wbio->bio.bi_end_io;
461 			n->bio.bi_private	= wbio->bio.bi_private;
462 			n->parent		= wbio;
463 			n->split		= true;
464 			n->bounce		= false;
465 			n->put_bio		= true;
466 			n->bio.bi_opf		= wbio->bio.bi_opf;
467 			bio_inc_remaining(&wbio->bio);
468 		} else {
469 			n = wbio;
470 			n->split		= false;
471 		}
472 
473 		n->c			= c;
474 		n->dev			= ptr->dev;
475 		n->have_ioref		= ca != NULL;
476 		n->nocow		= nocow;
477 		n->submit_time		= local_clock();
478 		n->inode_offset		= bkey_start_offset(&k->k);
479 		if (nocow)
480 			n->nocow_bucket	= PTR_BUCKET_NR(ca, ptr);
481 		n->bio.bi_iter.bi_sector = ptr->offset;
482 
483 		if (likely(n->have_ioref)) {
484 			this_cpu_add(ca->io_done->sectors[WRITE][type],
485 				     bio_sectors(&n->bio));
486 
487 			bio_set_dev(&n->bio, ca->disk_sb.bdev);
488 
489 			if (type != BCH_DATA_btree && unlikely(c->opts.no_data_io)) {
490 				bio_endio(&n->bio);
491 				continue;
492 			}
493 
494 			submit_bio(&n->bio);
495 		} else {
496 			n->bio.bi_status	= BLK_STS_REMOVED;
497 			bio_endio(&n->bio);
498 		}
499 	}
500 }
501 
502 static void __bch2_write(struct bch_write_op *);
503 
504 static void bch2_write_done(struct closure *cl)
505 {
506 	struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
507 	struct bch_fs *c = op->c;
508 
509 	EBUG_ON(op->open_buckets.nr);
510 
511 	bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
512 	bch2_disk_reservation_put(c, &op->res);
513 
514 	if (!(op->flags & BCH_WRITE_move))
515 		bch2_write_ref_put(c, BCH_WRITE_REF_write);
516 	bch2_keylist_free(&op->insert_keys, op->inline_keys);
517 
518 	EBUG_ON(cl->parent);
519 	closure_debug_destroy(cl);
520 	if (op->end_io)
521 		op->end_io(op);
522 }
523 
524 static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
525 {
526 	struct keylist *keys = &op->insert_keys;
527 	struct bkey_i *src, *dst = keys->keys, *n;
528 
529 	for (src = keys->keys; src != keys->top; src = n) {
530 		n = bkey_next(src);
531 
532 		if (bkey_extent_is_direct_data(&src->k)) {
533 			bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr,
534 					    test_bit(ptr->dev, op->failed.d));
535 
536 			if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src)))
537 				return -BCH_ERR_data_write_io;
538 		}
539 
540 		if (dst != src)
541 			memmove_u64s_down(dst, src, src->k.u64s);
542 		dst = bkey_next(dst);
543 	}
544 
545 	keys->top = dst;
546 	return 0;
547 }
548 
549 /**
550  * __bch2_write_index - after a write, update index to point to new data
551  * @op:		bch_write_op to process
552  */
553 static void __bch2_write_index(struct bch_write_op *op)
554 {
555 	struct bch_fs *c = op->c;
556 	struct keylist *keys = &op->insert_keys;
557 	unsigned dev;
558 	int ret = 0;
559 
560 	if (unlikely(op->flags & BCH_WRITE_io_error)) {
561 		ret = bch2_write_drop_io_error_ptrs(op);
562 		if (ret)
563 			goto err;
564 	}
565 
566 	if (!bch2_keylist_empty(keys)) {
567 		u64 sectors_start = keylist_sectors(keys);
568 
569 		ret = !(op->flags & BCH_WRITE_move)
570 			? bch2_write_index_default(op)
571 			: bch2_data_update_index_update(op);
572 
573 		BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
574 		BUG_ON(keylist_sectors(keys) && !ret);
575 
576 		op->written += sectors_start - keylist_sectors(keys);
577 
578 		if (unlikely(ret && !bch2_err_matches(ret, EROFS))) {
579 			struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
580 
581 			bch2_write_op_error(op, bkey_start_offset(&insert->k),
582 					    "btree update error: %s", bch2_err_str(ret));
583 		}
584 
585 		if (ret)
586 			goto err;
587 	}
588 out:
589 	/* If some a bucket wasn't written, we can't erasure code it: */
590 	for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
591 		bch2_open_bucket_write_error(c, &op->open_buckets, dev, -BCH_ERR_data_write_io);
592 
593 	bch2_open_buckets_put(c, &op->open_buckets);
594 	return;
595 err:
596 	keys->top = keys->keys;
597 	op->error = ret;
598 	op->flags |= BCH_WRITE_submitted;
599 	goto out;
600 }
601 
602 static inline void __wp_update_state(struct write_point *wp, enum write_point_state state)
603 {
604 	if (state != wp->state) {
605 		struct task_struct *p = current;
606 		u64 now = ktime_get_ns();
607 		u64 runtime = p->se.sum_exec_runtime +
608 			(now - p->se.exec_start);
609 
610 		if (state == WRITE_POINT_runnable)
611 			wp->last_runtime = runtime;
612 		else if (wp->state == WRITE_POINT_runnable)
613 			wp->time[WRITE_POINT_running] += runtime - wp->last_runtime;
614 
615 		if (wp->last_state_change &&
616 		    time_after64(now, wp->last_state_change))
617 			wp->time[wp->state] += now - wp->last_state_change;
618 		wp->state = state;
619 		wp->last_state_change = now;
620 	}
621 }
622 
623 static inline void wp_update_state(struct write_point *wp, bool running)
624 {
625 	enum write_point_state state;
626 
627 	state = running			 ? WRITE_POINT_runnable:
628 		!list_empty(&wp->writes) ? WRITE_POINT_waiting_io
629 					 : WRITE_POINT_stopped;
630 
631 	__wp_update_state(wp, state);
632 }
633 
634 static CLOSURE_CALLBACK(bch2_write_index)
635 {
636 	closure_type(op, struct bch_write_op, cl);
637 	struct write_point *wp = op->wp;
638 	struct workqueue_struct *wq = index_update_wq(op);
639 	unsigned long flags;
640 
641 	if ((op->flags & BCH_WRITE_submitted) &&
642 	    (op->flags & BCH_WRITE_move))
643 		bch2_bio_free_pages_pool(op->c, &op->wbio.bio);
644 
645 	spin_lock_irqsave(&wp->writes_lock, flags);
646 	if (wp->state == WRITE_POINT_waiting_io)
647 		__wp_update_state(wp, WRITE_POINT_waiting_work);
648 	list_add_tail(&op->wp_list, &wp->writes);
649 	spin_unlock_irqrestore (&wp->writes_lock, flags);
650 
651 	queue_work(wq, &wp->index_update_work);
652 }
653 
654 static inline void bch2_write_queue(struct bch_write_op *op, struct write_point *wp)
655 {
656 	op->wp = wp;
657 
658 	if (wp->state == WRITE_POINT_stopped) {
659 		spin_lock_irq(&wp->writes_lock);
660 		__wp_update_state(wp, WRITE_POINT_waiting_io);
661 		spin_unlock_irq(&wp->writes_lock);
662 	}
663 }
664 
665 void bch2_write_point_do_index_updates(struct work_struct *work)
666 {
667 	struct write_point *wp =
668 		container_of(work, struct write_point, index_update_work);
669 	struct bch_write_op *op;
670 
671 	while (1) {
672 		spin_lock_irq(&wp->writes_lock);
673 		op = list_pop_entry(&wp->writes, struct bch_write_op, wp_list);
674 		wp_update_state(wp, op != NULL);
675 		spin_unlock_irq(&wp->writes_lock);
676 
677 		if (!op)
678 			break;
679 
680 		op->flags |= BCH_WRITE_in_worker;
681 
682 		__bch2_write_index(op);
683 
684 		if (!(op->flags & BCH_WRITE_submitted))
685 			__bch2_write(op);
686 		else
687 			bch2_write_done(&op->cl);
688 	}
689 }
690 
691 static void bch2_write_endio(struct bio *bio)
692 {
693 	struct closure *cl		= bio->bi_private;
694 	struct bch_write_op *op		= container_of(cl, struct bch_write_op, cl);
695 	struct bch_write_bio *wbio	= to_wbio(bio);
696 	struct bch_write_bio *parent	= wbio->split ? wbio->parent : NULL;
697 	struct bch_fs *c		= wbio->c;
698 	struct bch_dev *ca		= wbio->have_ioref
699 		? bch2_dev_have_ref(c, wbio->dev)
700 		: NULL;
701 
702 	bch2_account_io_completion(ca, BCH_MEMBER_ERROR_write,
703 				   wbio->submit_time, !bio->bi_status);
704 
705 	if (unlikely(bio->bi_status)) {
706 		if (ca)
707 			bch_err_inum_offset_ratelimited(ca,
708 					    op->pos.inode,
709 					    wbio->inode_offset << 9,
710 					    "data write error: %s",
711 					    bch2_blk_status_to_str(bio->bi_status));
712 		else
713 			bch_err_inum_offset_ratelimited(c,
714 					    op->pos.inode,
715 					    wbio->inode_offset << 9,
716 					    "data write error: %s",
717 					    bch2_blk_status_to_str(bio->bi_status));
718 		set_bit(wbio->dev, op->failed.d);
719 		op->flags |= BCH_WRITE_io_error;
720 	}
721 
722 	if (wbio->nocow) {
723 		bch2_bucket_nocow_unlock(&c->nocow_locks,
724 					 POS(ca->dev_idx, wbio->nocow_bucket),
725 					 BUCKET_NOCOW_LOCK_UPDATE);
726 		set_bit(wbio->dev, op->devs_need_flush->d);
727 	}
728 
729 	if (wbio->have_ioref)
730 		percpu_ref_put(&ca->io_ref[WRITE]);
731 
732 	if (wbio->bounce)
733 		bch2_bio_free_pages_pool(c, bio);
734 
735 	if (wbio->put_bio)
736 		bio_put(bio);
737 
738 	if (parent)
739 		bio_endio(&parent->bio);
740 	else
741 		closure_put(cl);
742 }
743 
744 static void init_append_extent(struct bch_write_op *op,
745 			       struct write_point *wp,
746 			       struct bversion version,
747 			       struct bch_extent_crc_unpacked crc)
748 {
749 	struct bkey_i_extent *e;
750 
751 	op->pos.offset += crc.uncompressed_size;
752 
753 	e = bkey_extent_init(op->insert_keys.top);
754 	e->k.p		= op->pos;
755 	e->k.size	= crc.uncompressed_size;
756 	e->k.bversion	= version;
757 
758 	if (crc.csum_type ||
759 	    crc.compression_type ||
760 	    crc.nonce)
761 		bch2_extent_crc_append(&e->k_i, crc);
762 
763 	bch2_alloc_sectors_append_ptrs_inlined(op->c, wp, &e->k_i, crc.compressed_size,
764 				       op->flags & BCH_WRITE_cached);
765 
766 	bch2_keylist_push(&op->insert_keys);
767 }
768 
769 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
770 					struct write_point *wp,
771 					struct bio *src,
772 					bool *page_alloc_failed,
773 					void *buf)
774 {
775 	struct bch_write_bio *wbio;
776 	struct bio *bio;
777 	unsigned output_available =
778 		min(wp->sectors_free << 9, src->bi_iter.bi_size);
779 	unsigned pages = DIV_ROUND_UP(output_available +
780 				      (buf
781 				       ? ((unsigned long) buf & (PAGE_SIZE - 1))
782 				       : 0), PAGE_SIZE);
783 
784 	pages = min(pages, BIO_MAX_VECS);
785 
786 	bio = bio_alloc_bioset(NULL, pages, 0,
787 			       GFP_NOFS, &c->bio_write);
788 	wbio			= wbio_init(bio);
789 	wbio->put_bio		= true;
790 	/* copy WRITE_SYNC flag */
791 	wbio->bio.bi_opf	= src->bi_opf;
792 
793 	if (buf) {
794 		bch2_bio_map(bio, buf, output_available);
795 		return bio;
796 	}
797 
798 	wbio->bounce		= true;
799 
800 	/*
801 	 * We can't use mempool for more than c->sb.encoded_extent_max
802 	 * worth of pages, but we'd like to allocate more if we can:
803 	 */
804 	bch2_bio_alloc_pages_pool(c, bio,
805 				  min_t(unsigned, output_available,
806 					c->opts.encoded_extent_max));
807 
808 	if (bio->bi_iter.bi_size < output_available)
809 		*page_alloc_failed =
810 			bch2_bio_alloc_pages(bio,
811 					     output_available -
812 					     bio->bi_iter.bi_size,
813 					     GFP_NOFS) != 0;
814 
815 	return bio;
816 }
817 
818 static int bch2_write_rechecksum(struct bch_fs *c,
819 				 struct bch_write_op *op,
820 				 unsigned new_csum_type)
821 {
822 	struct bio *bio = &op->wbio.bio;
823 	struct bch_extent_crc_unpacked new_crc;
824 
825 	/* bch2_rechecksum_bio() can't encrypt or decrypt data: */
826 
827 	if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
828 	    bch2_csum_type_is_encryption(new_csum_type))
829 		new_csum_type = op->crc.csum_type;
830 
831 	int ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
832 				      NULL, &new_crc,
833 				      op->crc.offset, op->crc.live_size,
834 				      new_csum_type);
835 	if (ret)
836 		return ret;
837 
838 	bio_advance(bio, op->crc.offset << 9);
839 	bio->bi_iter.bi_size = op->crc.live_size << 9;
840 	op->crc = new_crc;
841 	return 0;
842 }
843 
844 static noinline int bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
845 {
846 	struct bch_fs *c = op->c;
847 	struct bio *bio = &op->wbio.bio;
848 	struct bch_csum csum;
849 	int ret = 0;
850 
851 	BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
852 
853 	/* Can we just write the entire extent as is? */
854 	if (op->crc.uncompressed_size == op->crc.live_size &&
855 	    op->crc.uncompressed_size <= c->opts.encoded_extent_max >> 9 &&
856 	    op->crc.compressed_size <= wp->sectors_free &&
857 	    (op->crc.compression_type == bch2_compression_opt_to_type(op->compression_opt) ||
858 	     op->incompressible)) {
859 		if (!crc_is_compressed(op->crc) &&
860 		    op->csum_type != op->crc.csum_type) {
861 			ret = bch2_write_rechecksum(c, op, op->csum_type);
862 			if (ret)
863 				return ret;
864 		}
865 
866 		return 1;
867 	}
868 
869 	/*
870 	 * If the data is compressed and we couldn't write the entire extent as
871 	 * is, we have to decompress it:
872 	 */
873 	if (crc_is_compressed(op->crc)) {
874 		/* Last point we can still verify checksum: */
875 		struct nonce nonce = extent_nonce(op->version, op->crc);
876 		csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, bio);
877 		if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
878 			goto csum_err;
879 
880 		if (bch2_csum_type_is_encryption(op->crc.csum_type)) {
881 			ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, bio);
882 			if (ret)
883 				return ret;
884 
885 			op->crc.csum_type = 0;
886 			op->crc.csum = (struct bch_csum) { 0, 0 };
887 		}
888 
889 		ret = bch2_bio_uncompress_inplace(op, bio);
890 		if (ret)
891 			return ret;
892 	}
893 
894 	/*
895 	 * No longer have compressed data after this point - data might be
896 	 * encrypted:
897 	 */
898 
899 	/*
900 	 * If the data is checksummed and we're only writing a subset,
901 	 * rechecksum and adjust bio to point to currently live data:
902 	 */
903 	if (op->crc.live_size != op->crc.uncompressed_size ||
904 	    op->crc.csum_type != op->csum_type) {
905 		ret = bch2_write_rechecksum(c, op, op->csum_type);
906 		if (ret)
907 			return ret;
908 	}
909 
910 	/*
911 	 * If we want to compress the data, it has to be decrypted:
912 	 */
913 	if (bch2_csum_type_is_encryption(op->crc.csum_type) &&
914 	    (op->compression_opt || op->crc.csum_type != op->csum_type)) {
915 		struct nonce nonce = extent_nonce(op->version, op->crc);
916 		csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, bio);
917 		if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
918 			goto csum_err;
919 
920 		ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, bio);
921 		if (ret)
922 			return ret;
923 
924 		op->crc.csum_type = 0;
925 		op->crc.csum = (struct bch_csum) { 0, 0 };
926 	}
927 
928 	return 0;
929 csum_err:
930 	bch2_write_op_error(op, op->pos.offset,
931 		"error verifying existing checksum while moving existing data (memory corruption?)\n"
932 		"  expected %0llx:%0llx got %0llx:%0llx type %s",
933 		op->crc.csum.hi,
934 		op->crc.csum.lo,
935 		csum.hi,
936 		csum.lo,
937 		op->crc.csum_type < BCH_CSUM_NR
938 		? __bch2_csum_types[op->crc.csum_type]
939 		: "(unknown)");
940 	return -BCH_ERR_data_write_csum;
941 }
942 
943 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
944 			     struct bio **_dst)
945 {
946 	struct bch_fs *c = op->c;
947 	struct bio *src = &op->wbio.bio, *dst = src;
948 	struct bvec_iter saved_iter;
949 	void *ec_buf;
950 	unsigned total_output = 0, total_input = 0;
951 	bool bounce = false;
952 	bool page_alloc_failed = false;
953 	int ret, more = 0;
954 
955 	if (op->incompressible)
956 		op->compression_opt = 0;
957 
958 	BUG_ON(!bio_sectors(src));
959 
960 	ec_buf = bch2_writepoint_ec_buf(c, wp);
961 
962 	if (unlikely(op->flags & BCH_WRITE_data_encoded)) {
963 		ret = bch2_write_prep_encoded_data(op, wp);
964 		if (ret < 0)
965 			goto err;
966 		if (ret) {
967 			if (ec_buf) {
968 				dst = bch2_write_bio_alloc(c, wp, src,
969 							   &page_alloc_failed,
970 							   ec_buf);
971 				bio_copy_data(dst, src);
972 				bounce = true;
973 			}
974 			init_append_extent(op, wp, op->version, op->crc);
975 			goto do_write;
976 		}
977 	}
978 
979 	if (ec_buf ||
980 	    op->compression_opt ||
981 	    (op->csum_type &&
982 	     !(op->flags & BCH_WRITE_pages_stable)) ||
983 	    (bch2_csum_type_is_encryption(op->csum_type) &&
984 	     !(op->flags & BCH_WRITE_pages_owned))) {
985 		dst = bch2_write_bio_alloc(c, wp, src,
986 					   &page_alloc_failed,
987 					   ec_buf);
988 		bounce = true;
989 	}
990 
991 #ifdef CONFIG_BCACHEFS_DEBUG
992 	unsigned write_corrupt_ratio = READ_ONCE(bch2_write_corrupt_ratio);
993 	if (!bounce && write_corrupt_ratio) {
994 		dst = bch2_write_bio_alloc(c, wp, src,
995 					   &page_alloc_failed,
996 					   ec_buf);
997 		bounce = true;
998 	}
999 #endif
1000 	saved_iter = dst->bi_iter;
1001 
1002 	do {
1003 		struct bch_extent_crc_unpacked crc = { 0 };
1004 		struct bversion version = op->version;
1005 		size_t dst_len = 0, src_len = 0;
1006 
1007 		if (page_alloc_failed &&
1008 		    dst->bi_iter.bi_size  < (wp->sectors_free << 9) &&
1009 		    dst->bi_iter.bi_size < c->opts.encoded_extent_max)
1010 			break;
1011 
1012 		BUG_ON(op->compression_opt &&
1013 		       (op->flags & BCH_WRITE_data_encoded) &&
1014 		       bch2_csum_type_is_encryption(op->crc.csum_type));
1015 		BUG_ON(op->compression_opt && !bounce);
1016 
1017 		crc.compression_type = op->incompressible
1018 			? BCH_COMPRESSION_TYPE_incompressible
1019 			: op->compression_opt
1020 			? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
1021 					    op->compression_opt)
1022 			: 0;
1023 		if (!crc_is_compressed(crc)) {
1024 			dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
1025 			dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
1026 
1027 			if (op->csum_type)
1028 				dst_len = min_t(unsigned, dst_len,
1029 						c->opts.encoded_extent_max);
1030 
1031 			if (bounce) {
1032 				swap(dst->bi_iter.bi_size, dst_len);
1033 				bio_copy_data(dst, src);
1034 				swap(dst->bi_iter.bi_size, dst_len);
1035 			}
1036 
1037 			src_len = dst_len;
1038 		}
1039 
1040 		BUG_ON(!src_len || !dst_len);
1041 
1042 		if (bch2_csum_type_is_encryption(op->csum_type)) {
1043 			if (bversion_zero(version)) {
1044 				version.lo = atomic64_inc_return(&c->key_version);
1045 			} else {
1046 				crc.nonce = op->nonce;
1047 				op->nonce += src_len >> 9;
1048 			}
1049 		}
1050 
1051 		if ((op->flags & BCH_WRITE_data_encoded) &&
1052 		    !crc_is_compressed(crc) &&
1053 		    bch2_csum_type_is_encryption(op->crc.csum_type) ==
1054 		    bch2_csum_type_is_encryption(op->csum_type)) {
1055 			u8 compression_type = crc.compression_type;
1056 			u16 nonce = crc.nonce;
1057 			/*
1058 			 * Note: when we're using rechecksum(), we need to be
1059 			 * checksumming @src because it has all the data our
1060 			 * existing checksum covers - if we bounced (because we
1061 			 * were trying to compress), @dst will only have the
1062 			 * part of the data the new checksum will cover.
1063 			 *
1064 			 * But normally we want to be checksumming post bounce,
1065 			 * because part of the reason for bouncing is so the
1066 			 * data can't be modified (by userspace) while it's in
1067 			 * flight.
1068 			 */
1069 			ret = bch2_rechecksum_bio(c, src, version, op->crc,
1070 					&crc, &op->crc,
1071 					src_len >> 9,
1072 					bio_sectors(src) - (src_len >> 9),
1073 					op->csum_type);
1074 			if (ret)
1075 				goto err;
1076 			/*
1077 			 * rchecksum_bio sets compression_type on crc from op->crc,
1078 			 * this isn't always correct as sometimes we're changing
1079 			 * an extent from uncompressed to incompressible.
1080 			 */
1081 			crc.compression_type = compression_type;
1082 			crc.nonce = nonce;
1083 		} else {
1084 			if ((op->flags & BCH_WRITE_data_encoded) &&
1085 			    (ret = bch2_rechecksum_bio(c, src, version, op->crc,
1086 					NULL, &op->crc,
1087 					src_len >> 9,
1088 					bio_sectors(src) - (src_len >> 9),
1089 					op->crc.csum_type)))
1090 				goto err;
1091 
1092 			crc.compressed_size	= dst_len >> 9;
1093 			crc.uncompressed_size	= src_len >> 9;
1094 			crc.live_size		= src_len >> 9;
1095 
1096 			swap(dst->bi_iter.bi_size, dst_len);
1097 			ret = bch2_encrypt_bio(c, op->csum_type,
1098 					       extent_nonce(version, crc), dst);
1099 			if (ret)
1100 				goto err;
1101 
1102 			crc.csum = bch2_checksum_bio(c, op->csum_type,
1103 					 extent_nonce(version, crc), dst);
1104 			crc.csum_type = op->csum_type;
1105 			swap(dst->bi_iter.bi_size, dst_len);
1106 		}
1107 
1108 		init_append_extent(op, wp, version, crc);
1109 
1110 #ifdef CONFIG_BCACHEFS_DEBUG
1111 		if (write_corrupt_ratio) {
1112 			swap(dst->bi_iter.bi_size, dst_len);
1113 			bch2_maybe_corrupt_bio(dst, write_corrupt_ratio);
1114 			swap(dst->bi_iter.bi_size, dst_len);
1115 		}
1116 #endif
1117 
1118 		if (dst != src)
1119 			bio_advance(dst, dst_len);
1120 		bio_advance(src, src_len);
1121 		total_output	+= dst_len;
1122 		total_input	+= src_len;
1123 	} while (dst->bi_iter.bi_size &&
1124 		 src->bi_iter.bi_size &&
1125 		 wp->sectors_free &&
1126 		 !bch2_keylist_realloc(&op->insert_keys,
1127 				      op->inline_keys,
1128 				      ARRAY_SIZE(op->inline_keys),
1129 				      BKEY_EXTENT_U64s_MAX));
1130 
1131 	more = src->bi_iter.bi_size != 0;
1132 
1133 	dst->bi_iter = saved_iter;
1134 
1135 	if (dst == src && more) {
1136 		BUG_ON(total_output != total_input);
1137 
1138 		dst = bio_split(src, total_input >> 9,
1139 				GFP_NOFS, &c->bio_write);
1140 		wbio_init(dst)->put_bio	= true;
1141 		/* copy WRITE_SYNC flag */
1142 		dst->bi_opf		= src->bi_opf;
1143 	}
1144 
1145 	dst->bi_iter.bi_size = total_output;
1146 do_write:
1147 	*_dst = dst;
1148 	return more;
1149 err:
1150 	if (to_wbio(dst)->bounce)
1151 		bch2_bio_free_pages_pool(c, dst);
1152 	if (to_wbio(dst)->put_bio)
1153 		bio_put(dst);
1154 
1155 	return ret;
1156 }
1157 
1158 static bool bch2_extent_is_writeable(struct bch_write_op *op,
1159 				     struct bkey_s_c k)
1160 {
1161 	struct bch_fs *c = op->c;
1162 	struct bkey_s_c_extent e;
1163 	struct extent_ptr_decoded p;
1164 	const union bch_extent_entry *entry;
1165 	unsigned replicas = 0;
1166 
1167 	if (k.k->type != KEY_TYPE_extent)
1168 		return false;
1169 
1170 	e = bkey_s_c_to_extent(k);
1171 
1172 	rcu_read_lock();
1173 	extent_for_each_ptr_decode(e, p, entry) {
1174 		if (crc_is_encoded(p.crc) || p.has_ec) {
1175 			rcu_read_unlock();
1176 			return false;
1177 		}
1178 
1179 		replicas += bch2_extent_ptr_durability(c, &p);
1180 	}
1181 	rcu_read_unlock();
1182 
1183 	return replicas >= op->opts.data_replicas;
1184 }
1185 
1186 static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
1187 						  struct btree_iter *iter,
1188 						  struct bkey_i *orig,
1189 						  struct bkey_s_c k,
1190 						  u64 new_i_size)
1191 {
1192 	if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) {
1193 		/* trace this */
1194 		return 0;
1195 	}
1196 
1197 	struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
1198 	int ret = PTR_ERR_OR_ZERO(new);
1199 	if (ret)
1200 		return ret;
1201 
1202 	bch2_cut_front(bkey_start_pos(&orig->k), new);
1203 	bch2_cut_back(orig->k.p, new);
1204 
1205 	struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
1206 	bkey_for_each_ptr(ptrs, ptr)
1207 		ptr->unwritten = 0;
1208 
1209 	/*
1210 	 * Note that we're not calling bch2_subvol_get_snapshot() in this path -
1211 	 * that was done when we kicked off the write, and here it's important
1212 	 * that we update the extent that we wrote to - even if a snapshot has
1213 	 * since been created. The write is still outstanding, so we're ok
1214 	 * w.r.t. snapshot atomicity:
1215 	 */
1216 	return  bch2_extent_update_i_size_sectors(trans, iter,
1217 					min(new->k.p.offset << 9, new_i_size), 0) ?:
1218 		bch2_trans_update(trans, iter, new,
1219 				  BTREE_UPDATE_internal_snapshot_node);
1220 }
1221 
1222 static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
1223 {
1224 	struct bch_fs *c = op->c;
1225 	struct btree_trans *trans = bch2_trans_get(c);
1226 	int ret = 0;
1227 
1228 	for_each_keylist_key(&op->insert_keys, orig) {
1229 		ret = for_each_btree_key_max_commit(trans, iter, BTREE_ID_extents,
1230 				     bkey_start_pos(&orig->k), orig->k.p,
1231 				     BTREE_ITER_intent, k,
1232 				     NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
1233 			bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size);
1234 		}));
1235 		if (ret)
1236 			break;
1237 	}
1238 
1239 	bch2_trans_put(trans);
1240 
1241 	if (ret && !bch2_err_matches(ret, EROFS)) {
1242 		struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
1243 		bch2_write_op_error(op, bkey_start_offset(&insert->k),
1244 				    "btree update error: %s", bch2_err_str(ret));
1245 	}
1246 
1247 	if (ret)
1248 		op->error = ret;
1249 }
1250 
1251 static void __bch2_nocow_write_done(struct bch_write_op *op)
1252 {
1253 	if (unlikely(op->flags & BCH_WRITE_io_error)) {
1254 		op->error = -BCH_ERR_data_write_io;
1255 	} else if (unlikely(op->flags & BCH_WRITE_convert_unwritten))
1256 		bch2_nocow_write_convert_unwritten(op);
1257 }
1258 
1259 static CLOSURE_CALLBACK(bch2_nocow_write_done)
1260 {
1261 	closure_type(op, struct bch_write_op, cl);
1262 
1263 	__bch2_nocow_write_done(op);
1264 	bch2_write_done(cl);
1265 }
1266 
1267 struct bucket_to_lock {
1268 	struct bpos		b;
1269 	unsigned		gen;
1270 	struct nocow_lock_bucket *l;
1271 };
1272 
1273 static void bch2_nocow_write(struct bch_write_op *op)
1274 {
1275 	struct bch_fs *c = op->c;
1276 	struct btree_trans *trans;
1277 	struct btree_iter iter;
1278 	struct bkey_s_c k;
1279 	DARRAY_PREALLOCATED(struct bucket_to_lock, 3) buckets;
1280 	u32 snapshot;
1281 	struct bucket_to_lock *stale_at;
1282 	int stale, ret;
1283 
1284 	if (op->flags & BCH_WRITE_move)
1285 		return;
1286 
1287 	darray_init(&buckets);
1288 	trans = bch2_trans_get(c);
1289 retry:
1290 	bch2_trans_begin(trans);
1291 
1292 	ret = bch2_subvolume_get_snapshot(trans, op->subvol, &snapshot);
1293 	if (unlikely(ret))
1294 		goto err;
1295 
1296 	bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1297 			     SPOS(op->pos.inode, op->pos.offset, snapshot),
1298 			     BTREE_ITER_slots);
1299 	while (1) {
1300 		struct bio *bio = &op->wbio.bio;
1301 
1302 		buckets.nr = 0;
1303 
1304 		ret = bch2_trans_relock(trans);
1305 		if (ret)
1306 			break;
1307 
1308 		k = bch2_btree_iter_peek_slot(trans, &iter);
1309 		ret = bkey_err(k);
1310 		if (ret)
1311 			break;
1312 
1313 		/* fall back to normal cow write path? */
1314 		if (unlikely(k.k->p.snapshot != snapshot ||
1315 			     !bch2_extent_is_writeable(op, k)))
1316 			break;
1317 
1318 		if (bch2_keylist_realloc(&op->insert_keys,
1319 					 op->inline_keys,
1320 					 ARRAY_SIZE(op->inline_keys),
1321 					 k.k->u64s))
1322 			break;
1323 
1324 		/* Get iorefs before dropping btree locks: */
1325 		struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1326 		bkey_for_each_ptr(ptrs, ptr) {
1327 			struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE);
1328 			if (unlikely(!ca))
1329 				goto err_get_ioref;
1330 
1331 			struct bpos b = PTR_BUCKET_POS(ca, ptr);
1332 			struct nocow_lock_bucket *l =
1333 				bucket_nocow_lock(&c->nocow_locks, bucket_to_u64(b));
1334 			prefetch(l);
1335 
1336 			/* XXX allocating memory with btree locks held - rare */
1337 			darray_push_gfp(&buckets, ((struct bucket_to_lock) {
1338 						   .b = b, .gen = ptr->gen, .l = l,
1339 						   }), GFP_KERNEL|__GFP_NOFAIL);
1340 
1341 			if (ptr->unwritten)
1342 				op->flags |= BCH_WRITE_convert_unwritten;
1343 		}
1344 
1345 		/* Unlock before taking nocow locks, doing IO: */
1346 		bkey_reassemble(op->insert_keys.top, k);
1347 		bch2_trans_unlock(trans);
1348 
1349 		bch2_cut_front(op->pos, op->insert_keys.top);
1350 		if (op->flags & BCH_WRITE_convert_unwritten)
1351 			bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
1352 
1353 		darray_for_each(buckets, i) {
1354 			struct bch_dev *ca = bch2_dev_have_ref(c, i->b.inode);
1355 
1356 			__bch2_bucket_nocow_lock(&c->nocow_locks, i->l,
1357 						 bucket_to_u64(i->b),
1358 						 BUCKET_NOCOW_LOCK_UPDATE);
1359 
1360 			int gen = bucket_gen_get(ca, i->b.offset);
1361 			stale = gen < 0 ? gen : gen_after(gen, i->gen);
1362 			if (unlikely(stale)) {
1363 				stale_at = i;
1364 				goto err_bucket_stale;
1365 			}
1366 		}
1367 
1368 		bio = &op->wbio.bio;
1369 		if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) {
1370 			bio = bio_split(bio, k.k->p.offset - op->pos.offset,
1371 					GFP_KERNEL, &c->bio_write);
1372 			wbio_init(bio)->put_bio = true;
1373 			bio->bi_opf = op->wbio.bio.bi_opf;
1374 		} else {
1375 			op->flags |= BCH_WRITE_submitted;
1376 		}
1377 
1378 		op->pos.offset += bio_sectors(bio);
1379 		op->written += bio_sectors(bio);
1380 
1381 		bio->bi_end_io	= bch2_write_endio;
1382 		bio->bi_private	= &op->cl;
1383 		bio->bi_opf |= REQ_OP_WRITE;
1384 		closure_get(&op->cl);
1385 
1386 		bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1387 					  op->insert_keys.top, true);
1388 
1389 		bch2_keylist_push(&op->insert_keys);
1390 		if (op->flags & BCH_WRITE_submitted)
1391 			break;
1392 		bch2_btree_iter_advance(trans, &iter);
1393 	}
1394 out:
1395 	bch2_trans_iter_exit(trans, &iter);
1396 err:
1397 	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1398 		goto retry;
1399 
1400 	bch2_trans_put(trans);
1401 	darray_exit(&buckets);
1402 
1403 	if (ret) {
1404 		bch2_write_op_error(op, op->pos.offset,
1405 				    "%s(): btree lookup error: %s", __func__, bch2_err_str(ret));
1406 		op->error = ret;
1407 		op->flags |= BCH_WRITE_submitted;
1408 	}
1409 
1410 	/* fallback to cow write path? */
1411 	if (!(op->flags & BCH_WRITE_submitted)) {
1412 		closure_sync(&op->cl);
1413 		__bch2_nocow_write_done(op);
1414 		op->insert_keys.top = op->insert_keys.keys;
1415 	} else if (op->flags & BCH_WRITE_sync) {
1416 		closure_sync(&op->cl);
1417 		bch2_nocow_write_done(&op->cl.work);
1418 	} else {
1419 		/*
1420 		 * XXX
1421 		 * needs to run out of process context because ei_quota_lock is
1422 		 * a mutex
1423 		 */
1424 		continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op));
1425 	}
1426 	return;
1427 err_get_ioref:
1428 	darray_for_each(buckets, i)
1429 		percpu_ref_put(&bch2_dev_have_ref(c, i->b.inode)->io_ref[WRITE]);
1430 
1431 	/* Fall back to COW path: */
1432 	goto out;
1433 err_bucket_stale:
1434 	darray_for_each(buckets, i) {
1435 		bch2_bucket_nocow_unlock(&c->nocow_locks, i->b, BUCKET_NOCOW_LOCK_UPDATE);
1436 		if (i == stale_at)
1437 			break;
1438 	}
1439 
1440 	struct printbuf buf = PRINTBUF;
1441 	if (bch2_fs_inconsistent_on(stale < 0, c,
1442 				    "pointer to invalid bucket in nocow path on device %llu\n  %s",
1443 				    stale_at->b.inode,
1444 				    (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1445 		ret = -BCH_ERR_data_write_invalid_ptr;
1446 	} else {
1447 		/* We can retry this: */
1448 		ret = -BCH_ERR_transaction_restart;
1449 	}
1450 	printbuf_exit(&buf);
1451 
1452 	goto err_get_ioref;
1453 }
1454 
1455 static void __bch2_write(struct bch_write_op *op)
1456 {
1457 	struct bch_fs *c = op->c;
1458 	struct write_point *wp = NULL;
1459 	struct bio *bio = NULL;
1460 	unsigned nofs_flags;
1461 	int ret;
1462 
1463 	nofs_flags = memalloc_nofs_save();
1464 
1465 	if (unlikely(op->opts.nocow && c->opts.nocow_enabled)) {
1466 		bch2_nocow_write(op);
1467 		if (op->flags & BCH_WRITE_submitted)
1468 			goto out_nofs_restore;
1469 	}
1470 again:
1471 	memset(&op->failed, 0, sizeof(op->failed));
1472 
1473 	do {
1474 		struct bkey_i *key_to_write;
1475 		unsigned key_to_write_offset = op->insert_keys.top_p -
1476 			op->insert_keys.keys_p;
1477 
1478 		/* +1 for possible cache device: */
1479 		if (op->open_buckets.nr + op->nr_replicas + 1 >
1480 		    ARRAY_SIZE(op->open_buckets.v))
1481 			break;
1482 
1483 		if (bch2_keylist_realloc(&op->insert_keys,
1484 					op->inline_keys,
1485 					ARRAY_SIZE(op->inline_keys),
1486 					BKEY_EXTENT_U64s_MAX))
1487 			break;
1488 
1489 		/*
1490 		 * The copygc thread is now global, which means it's no longer
1491 		 * freeing up space on specific disks, which means that
1492 		 * allocations for specific disks may hang arbitrarily long:
1493 		 */
1494 		ret = bch2_trans_run(c, lockrestart_do(trans,
1495 			bch2_alloc_sectors_start_trans(trans,
1496 				op->target,
1497 				op->opts.erasure_code && !(op->flags & BCH_WRITE_cached),
1498 				op->write_point,
1499 				&op->devs_have,
1500 				op->nr_replicas,
1501 				op->nr_replicas_required,
1502 				op->watermark,
1503 				op->flags,
1504 				&op->cl, &wp)));
1505 		if (unlikely(ret)) {
1506 			if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
1507 				break;
1508 
1509 			goto err;
1510 		}
1511 
1512 		EBUG_ON(!wp);
1513 
1514 		bch2_open_bucket_get(c, wp, &op->open_buckets);
1515 		ret = bch2_write_extent(op, wp, &bio);
1516 
1517 		bch2_alloc_sectors_done_inlined(c, wp);
1518 err:
1519 		if (ret <= 0) {
1520 			op->flags |= BCH_WRITE_submitted;
1521 
1522 			if (unlikely(ret < 0)) {
1523 				if (!(op->flags & BCH_WRITE_alloc_nowait))
1524 					bch2_write_op_error(op, op->pos.offset,
1525 							    "%s(): %s", __func__, bch2_err_str(ret));
1526 				op->error = ret;
1527 				break;
1528 			}
1529 		}
1530 
1531 		bio->bi_end_io	= bch2_write_endio;
1532 		bio->bi_private	= &op->cl;
1533 		bio->bi_opf |= REQ_OP_WRITE;
1534 
1535 		closure_get(bio->bi_private);
1536 
1537 		key_to_write = (void *) (op->insert_keys.keys_p +
1538 					 key_to_write_offset);
1539 
1540 		bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1541 					  key_to_write, false);
1542 	} while (ret);
1543 
1544 	/*
1545 	 * Sync or no?
1546 	 *
1547 	 * If we're running asynchronously, wne may still want to block
1548 	 * synchronously here if we weren't able to submit all of the IO at
1549 	 * once, as that signals backpressure to the caller.
1550 	 */
1551 	if ((op->flags & BCH_WRITE_sync) ||
1552 	    (!(op->flags & BCH_WRITE_submitted) &&
1553 	     !(op->flags & BCH_WRITE_in_worker))) {
1554 		bch2_wait_on_allocator(c, &op->cl);
1555 
1556 		__bch2_write_index(op);
1557 
1558 		if (!(op->flags & BCH_WRITE_submitted))
1559 			goto again;
1560 		bch2_write_done(&op->cl);
1561 	} else {
1562 		bch2_write_queue(op, wp);
1563 		continue_at(&op->cl, bch2_write_index, NULL);
1564 	}
1565 out_nofs_restore:
1566 	memalloc_nofs_restore(nofs_flags);
1567 }
1568 
1569 static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
1570 {
1571 	struct bio *bio = &op->wbio.bio;
1572 	struct bvec_iter iter;
1573 	struct bkey_i_inline_data *id;
1574 	unsigned sectors;
1575 	int ret;
1576 
1577 	memset(&op->failed, 0, sizeof(op->failed));
1578 
1579 	op->flags |= BCH_WRITE_wrote_data_inline;
1580 	op->flags |= BCH_WRITE_submitted;
1581 
1582 	bch2_check_set_feature(op->c, BCH_FEATURE_inline_data);
1583 
1584 	ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
1585 				   ARRAY_SIZE(op->inline_keys),
1586 				   BKEY_U64s + DIV_ROUND_UP(data_len, 8));
1587 	if (ret) {
1588 		op->error = ret;
1589 		goto err;
1590 	}
1591 
1592 	sectors = bio_sectors(bio);
1593 	op->pos.offset += sectors;
1594 
1595 	id = bkey_inline_data_init(op->insert_keys.top);
1596 	id->k.p		= op->pos;
1597 	id->k.bversion	= op->version;
1598 	id->k.size	= sectors;
1599 
1600 	iter = bio->bi_iter;
1601 	iter.bi_size = data_len;
1602 	memcpy_from_bio(id->v.data, bio, iter);
1603 
1604 	while (data_len & 7)
1605 		id->v.data[data_len++] = '\0';
1606 	set_bkey_val_bytes(&id->k, data_len);
1607 	bch2_keylist_push(&op->insert_keys);
1608 
1609 	__bch2_write_index(op);
1610 err:
1611 	bch2_write_done(&op->cl);
1612 }
1613 
1614 /**
1615  * bch2_write() - handle a write to a cache device or flash only volume
1616  * @cl:		&bch_write_op->cl
1617  *
1618  * This is the starting point for any data to end up in a cache device; it could
1619  * be from a normal write, or a writeback write, or a write to a flash only
1620  * volume - it's also used by the moving garbage collector to compact data in
1621  * mostly empty buckets.
1622  *
1623  * It first writes the data to the cache, creating a list of keys to be inserted
1624  * (if the data won't fit in a single open bucket, there will be multiple keys);
1625  * after the data is written it calls bch_journal, and after the keys have been
1626  * added to the next journal write they're inserted into the btree.
1627  *
1628  * If op->discard is true, instead of inserting the data it invalidates the
1629  * region of the cache represented by op->bio and op->inode.
1630  */
1631 CLOSURE_CALLBACK(bch2_write)
1632 {
1633 	closure_type(op, struct bch_write_op, cl);
1634 	struct bio *bio = &op->wbio.bio;
1635 	struct bch_fs *c = op->c;
1636 	unsigned data_len;
1637 
1638 	EBUG_ON(op->cl.parent);
1639 	BUG_ON(!op->nr_replicas);
1640 	BUG_ON(!op->write_point.v);
1641 	BUG_ON(bkey_eq(op->pos, POS_MAX));
1642 
1643 	if (op->flags & BCH_WRITE_only_specified_devs)
1644 		op->flags |= BCH_WRITE_alloc_nowait;
1645 
1646 	op->nr_replicas_required = min_t(unsigned, op->nr_replicas_required, op->nr_replicas);
1647 	op->start_time = local_clock();
1648 	bch2_keylist_init(&op->insert_keys, op->inline_keys);
1649 	wbio_init(bio)->put_bio = false;
1650 
1651 	if (unlikely(bio->bi_iter.bi_size & (c->opts.block_size - 1))) {
1652 		bch2_write_op_error(op, op->pos.offset, "misaligned write");
1653 		op->error = -BCH_ERR_data_write_misaligned;
1654 		goto err;
1655 	}
1656 
1657 	if (c->opts.nochanges) {
1658 		op->error = -BCH_ERR_erofs_no_writes;
1659 		goto err;
1660 	}
1661 
1662 	if (!(op->flags & BCH_WRITE_move) &&
1663 	    !bch2_write_ref_tryget(c, BCH_WRITE_REF_write)) {
1664 		op->error = -BCH_ERR_erofs_no_writes;
1665 		goto err;
1666 	}
1667 
1668 	if (!(op->flags & BCH_WRITE_move))
1669 		this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio));
1670 	bch2_increment_clock(c, bio_sectors(bio), WRITE);
1671 
1672 	data_len = min_t(u64, bio->bi_iter.bi_size,
1673 			 op->new_i_size - (op->pos.offset << 9));
1674 
1675 	if (c->opts.inline_data &&
1676 	    data_len <= min(block_bytes(c) / 2, 1024U)) {
1677 		bch2_write_data_inline(op, data_len);
1678 		return;
1679 	}
1680 
1681 	__bch2_write(op);
1682 	return;
1683 err:
1684 	bch2_disk_reservation_put(c, &op->res);
1685 
1686 	closure_debug_destroy(&op->cl);
1687 	if (op->end_io)
1688 		op->end_io(op);
1689 }
1690 
1691 static const char * const bch2_write_flags[] = {
1692 #define x(f)	#f,
1693 	BCH_WRITE_FLAGS()
1694 #undef x
1695 	NULL
1696 };
1697 
1698 void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
1699 {
1700 	if (!out->nr_tabstops)
1701 		printbuf_tabstop_push(out, 32);
1702 
1703 	prt_printf(out, "pos:\t");
1704 	bch2_bpos_to_text(out, op->pos);
1705 	prt_newline(out);
1706 	printbuf_indent_add(out, 2);
1707 
1708 	prt_printf(out, "started:\t");
1709 	bch2_pr_time_units(out, local_clock() - op->start_time);
1710 	prt_newline(out);
1711 
1712 	prt_printf(out, "flags:\t");
1713 	prt_bitflags(out, bch2_write_flags, op->flags);
1714 	prt_newline(out);
1715 
1716 	prt_printf(out, "nr_replicas:\t%u\n", op->nr_replicas);
1717 	prt_printf(out, "nr_replicas_required:\t%u\n", op->nr_replicas_required);
1718 
1719 	prt_printf(out, "ref:\t%u\n", closure_nr_remaining(&op->cl));
1720 
1721 	printbuf_indent_sub(out, 2);
1722 }
1723 
1724 void bch2_fs_io_write_exit(struct bch_fs *c)
1725 {
1726 	mempool_exit(&c->bio_bounce_pages);
1727 	bioset_exit(&c->replica_set);
1728 	bioset_exit(&c->bio_write);
1729 }
1730 
1731 int bch2_fs_io_write_init(struct bch_fs *c)
1732 {
1733 	if (bioset_init(&c->bio_write,   1, offsetof(struct bch_write_bio, bio), BIOSET_NEED_BVECS) ||
1734 	    bioset_init(&c->replica_set, 4, offsetof(struct bch_write_bio, bio), 0))
1735 		return -BCH_ERR_ENOMEM_bio_write_init;
1736 
1737 	if (mempool_init_page_pool(&c->bio_bounce_pages,
1738 				   max_t(unsigned,
1739 					 c->opts.btree_node_size,
1740 					 c->opts.encoded_extent_max) /
1741 				   PAGE_SIZE, 0))
1742 		return -BCH_ERR_ENOMEM_bio_bounce_pages_init;
1743 
1744 	return 0;
1745 }
1746