xref: /linux/fs/bcachefs/fs-io-direct.c (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef NO_BCACHEFS_FS
3 
4 #include "bcachefs.h"
5 #include "alloc_foreground.h"
6 #include "fs.h"
7 #include "fs-io.h"
8 #include "fs-io-direct.h"
9 #include "fs-io-pagecache.h"
10 #include "io_read.h"
11 #include "io_write.h"
12 
13 #include <linux/kthread.h>
14 #include <linux/pagemap.h>
15 #include <linux/prefetch.h>
16 #include <linux/task_io_accounting_ops.h>
17 
18 /* O_DIRECT reads */
19 
20 struct dio_read {
21 	struct closure			cl;
22 	struct kiocb			*req;
23 	long				ret;
24 	bool				should_dirty;
25 	struct bch_read_bio		rbio;
26 };
27 
28 static void bio_check_or_release(struct bio *bio, bool check_dirty)
29 {
30 	if (check_dirty) {
31 		bio_check_pages_dirty(bio);
32 	} else {
33 		bio_release_pages(bio, false);
34 		bio_put(bio);
35 	}
36 }
37 
38 static CLOSURE_CALLBACK(bch2_dio_read_complete)
39 {
40 	closure_type(dio, struct dio_read, cl);
41 
42 	dio->req->ki_complete(dio->req, dio->ret);
43 	bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
44 }
45 
46 static void bch2_direct_IO_read_endio(struct bio *bio)
47 {
48 	struct dio_read *dio = bio->bi_private;
49 
50 	if (bio->bi_status)
51 		dio->ret = blk_status_to_errno(bio->bi_status);
52 
53 	closure_put(&dio->cl);
54 }
55 
56 static void bch2_direct_IO_read_split_endio(struct bio *bio)
57 {
58 	struct dio_read *dio = bio->bi_private;
59 	bool should_dirty = dio->should_dirty;
60 
61 	bch2_direct_IO_read_endio(bio);
62 	bio_check_or_release(bio, should_dirty);
63 }
64 
65 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
66 {
67 	struct file *file = req->ki_filp;
68 	struct bch_inode_info *inode = file_bch_inode(file);
69 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
70 	struct bch_io_opts opts;
71 	struct dio_read *dio;
72 	struct bio *bio;
73 	loff_t offset = req->ki_pos;
74 	bool sync = is_sync_kiocb(req);
75 	size_t shorten;
76 	ssize_t ret;
77 
78 	bch2_inode_opts_get(&opts, c, &inode->ei_inode);
79 
80 	/* bios must be 512 byte aligned: */
81 	if ((offset|iter->count) & (SECTOR_SIZE - 1))
82 		return -EINVAL;
83 
84 	ret = min_t(loff_t, iter->count,
85 		    max_t(loff_t, 0, i_size_read(&inode->v) - offset));
86 
87 	if (!ret)
88 		return ret;
89 
90 	shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
91 	iter->count -= shorten;
92 
93 	bio = bio_alloc_bioset(NULL,
94 			       bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
95 			       REQ_OP_READ,
96 			       GFP_KERNEL,
97 			       &c->dio_read_bioset);
98 
99 	bio->bi_end_io = bch2_direct_IO_read_endio;
100 
101 	dio = container_of(bio, struct dio_read, rbio.bio);
102 	closure_init(&dio->cl, NULL);
103 
104 	/*
105 	 * this is a _really_ horrible hack just to avoid an atomic sub at the
106 	 * end:
107 	 */
108 	if (!sync) {
109 		set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
110 		atomic_set(&dio->cl.remaining,
111 			   CLOSURE_REMAINING_INITIALIZER -
112 			   CLOSURE_RUNNING +
113 			   CLOSURE_DESTRUCTOR);
114 	} else {
115 		atomic_set(&dio->cl.remaining,
116 			   CLOSURE_REMAINING_INITIALIZER + 1);
117 		dio->cl.closure_get_happened = true;
118 	}
119 
120 	dio->req	= req;
121 	dio->ret	= ret;
122 	/*
123 	 * This is one of the sketchier things I've encountered: we have to skip
124 	 * the dirtying of requests that are internal from the kernel (i.e. from
125 	 * loopback), because we'll deadlock on page_lock.
126 	 */
127 	dio->should_dirty = iter_is_iovec(iter);
128 
129 	goto start;
130 	while (iter->count) {
131 		bio = bio_alloc_bioset(NULL,
132 				       bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
133 				       REQ_OP_READ,
134 				       GFP_KERNEL,
135 				       &c->bio_read);
136 		bio->bi_end_io		= bch2_direct_IO_read_split_endio;
137 start:
138 		bio->bi_opf		= REQ_OP_READ|REQ_SYNC;
139 		bio->bi_iter.bi_sector	= offset >> 9;
140 		bio->bi_private		= dio;
141 
142 		ret = bio_iov_iter_get_pages(bio, iter);
143 		if (ret < 0) {
144 			/* XXX: fault inject this path */
145 			bio->bi_status = BLK_STS_RESOURCE;
146 			bio_endio(bio);
147 			break;
148 		}
149 
150 		offset += bio->bi_iter.bi_size;
151 
152 		if (dio->should_dirty)
153 			bio_set_pages_dirty(bio);
154 
155 		if (iter->count)
156 			closure_get(&dio->cl);
157 
158 		bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
159 	}
160 
161 	iter->count += shorten;
162 
163 	if (sync) {
164 		closure_sync(&dio->cl);
165 		closure_debug_destroy(&dio->cl);
166 		ret = dio->ret;
167 		bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
168 		return ret;
169 	} else {
170 		return -EIOCBQUEUED;
171 	}
172 }
173 
174 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
175 {
176 	struct file *file = iocb->ki_filp;
177 	struct bch_inode_info *inode = file_bch_inode(file);
178 	struct address_space *mapping = file->f_mapping;
179 	size_t count = iov_iter_count(iter);
180 	ssize_t ret;
181 
182 	if (!count)
183 		return 0; /* skip atime */
184 
185 	if (iocb->ki_flags & IOCB_DIRECT) {
186 		struct blk_plug plug;
187 
188 		if (unlikely(mapping->nrpages)) {
189 			ret = filemap_write_and_wait_range(mapping,
190 						iocb->ki_pos,
191 						iocb->ki_pos + count - 1);
192 			if (ret < 0)
193 				goto out;
194 		}
195 
196 		file_accessed(file);
197 
198 		blk_start_plug(&plug);
199 		ret = bch2_direct_IO_read(iocb, iter);
200 		blk_finish_plug(&plug);
201 
202 		if (ret >= 0)
203 			iocb->ki_pos += ret;
204 	} else {
205 		bch2_pagecache_add_get(inode);
206 		ret = generic_file_read_iter(iocb, iter);
207 		bch2_pagecache_add_put(inode);
208 	}
209 out:
210 	return bch2_err_class(ret);
211 }
212 
213 /* O_DIRECT writes */
214 
215 struct dio_write {
216 	struct kiocb			*req;
217 	struct address_space		*mapping;
218 	struct bch_inode_info		*inode;
219 	struct mm_struct		*mm;
220 	const struct iovec		*iov;
221 	unsigned			loop:1,
222 					extending:1,
223 					sync:1,
224 					flush:1;
225 	struct quota_res		quota_res;
226 	u64				written;
227 
228 	struct iov_iter			iter;
229 	struct iovec			inline_vecs[2];
230 
231 	/* must be last: */
232 	struct bch_write_op		op;
233 };
234 
235 static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
236 				       u64 offset, u64 size,
237 				       unsigned nr_replicas, bool compressed)
238 {
239 	struct btree_trans *trans = bch2_trans_get(c);
240 	struct btree_iter iter;
241 	struct bkey_s_c k;
242 	u64 end = offset + size;
243 	u32 snapshot;
244 	bool ret = true;
245 	int err;
246 retry:
247 	bch2_trans_begin(trans);
248 
249 	err = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
250 	if (err)
251 		goto err;
252 
253 	for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
254 			   SPOS(inum.inum, offset, snapshot),
255 			   BTREE_ITER_SLOTS, k, err) {
256 		if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
257 			break;
258 
259 		if (k.k->p.snapshot != snapshot ||
260 		    nr_replicas > bch2_bkey_replicas(c, k) ||
261 		    (!compressed && bch2_bkey_sectors_compressed(k))) {
262 			ret = false;
263 			break;
264 		}
265 	}
266 
267 	offset = iter.pos.offset;
268 	bch2_trans_iter_exit(trans, &iter);
269 err:
270 	if (bch2_err_matches(err, BCH_ERR_transaction_restart))
271 		goto retry;
272 	bch2_trans_put(trans);
273 
274 	return err ? false : ret;
275 }
276 
277 static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio)
278 {
279 	struct bch_fs *c = dio->op.c;
280 	struct bch_inode_info *inode = dio->inode;
281 	struct bio *bio = &dio->op.wbio.bio;
282 
283 	return bch2_check_range_allocated(c, inode_inum(inode),
284 				dio->op.pos.offset, bio_sectors(bio),
285 				dio->op.opts.data_replicas,
286 				dio->op.opts.compression != 0);
287 }
288 
289 static void bch2_dio_write_loop_async(struct bch_write_op *);
290 static __always_inline long bch2_dio_write_done(struct dio_write *dio);
291 
292 /*
293  * We're going to return -EIOCBQUEUED, but we haven't finished consuming the
294  * iov_iter yet, so we need to stash a copy of the iovec: it might be on the
295  * caller's stack, we're not guaranteed that it will live for the duration of
296  * the IO:
297  */
298 static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
299 {
300 	struct iovec *iov = dio->inline_vecs;
301 
302 	/*
303 	 * iov_iter has a single embedded iovec - nothing to do:
304 	 */
305 	if (iter_is_ubuf(&dio->iter))
306 		return 0;
307 
308 	/*
309 	 * We don't currently handle non-iovec iov_iters here - return an error,
310 	 * and we'll fall back to doing the IO synchronously:
311 	 */
312 	if (!iter_is_iovec(&dio->iter))
313 		return -1;
314 
315 	if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
316 		dio->iov = iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
317 				    GFP_KERNEL);
318 		if (unlikely(!iov))
319 			return -ENOMEM;
320 	}
321 
322 	memcpy(iov, dio->iter.__iov, dio->iter.nr_segs * sizeof(*iov));
323 	dio->iter.__iov = iov;
324 	return 0;
325 }
326 
327 static CLOSURE_CALLBACK(bch2_dio_write_flush_done)
328 {
329 	closure_type(dio, struct dio_write, op.cl);
330 	struct bch_fs *c = dio->op.c;
331 
332 	closure_debug_destroy(cl);
333 
334 	dio->op.error = bch2_journal_error(&c->journal);
335 
336 	bch2_dio_write_done(dio);
337 }
338 
339 static noinline void bch2_dio_write_flush(struct dio_write *dio)
340 {
341 	struct bch_fs *c = dio->op.c;
342 	struct bch_inode_unpacked inode;
343 	int ret;
344 
345 	dio->flush = 0;
346 
347 	closure_init(&dio->op.cl, NULL);
348 
349 	if (!dio->op.error) {
350 		ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode);
351 		if (ret) {
352 			dio->op.error = ret;
353 		} else {
354 			bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq,
355 						     &dio->op.cl);
356 			bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
357 		}
358 	}
359 
360 	if (dio->sync) {
361 		closure_sync(&dio->op.cl);
362 		closure_debug_destroy(&dio->op.cl);
363 	} else {
364 		continue_at(&dio->op.cl, bch2_dio_write_flush_done, NULL);
365 	}
366 }
367 
368 static __always_inline long bch2_dio_write_done(struct dio_write *dio)
369 {
370 	struct kiocb *req = dio->req;
371 	struct bch_inode_info *inode = dio->inode;
372 	bool sync = dio->sync;
373 	long ret;
374 
375 	if (unlikely(dio->flush)) {
376 		bch2_dio_write_flush(dio);
377 		if (!sync)
378 			return -EIOCBQUEUED;
379 	}
380 
381 	bch2_pagecache_block_put(inode);
382 
383 	kfree(dio->iov);
384 
385 	ret = dio->op.error ?: ((long) dio->written << 9);
386 	bio_put(&dio->op.wbio.bio);
387 
388 	/* inode->i_dio_count is our ref on inode and thus bch_fs */
389 	inode_dio_end(&inode->v);
390 
391 	if (ret < 0)
392 		ret = bch2_err_class(ret);
393 
394 	if (!sync) {
395 		req->ki_complete(req, ret);
396 		ret = -EIOCBQUEUED;
397 	}
398 	return ret;
399 }
400 
401 static __always_inline void bch2_dio_write_end(struct dio_write *dio)
402 {
403 	struct bch_fs *c = dio->op.c;
404 	struct kiocb *req = dio->req;
405 	struct bch_inode_info *inode = dio->inode;
406 	struct bio *bio = &dio->op.wbio.bio;
407 
408 	req->ki_pos	+= (u64) dio->op.written << 9;
409 	dio->written	+= dio->op.written;
410 
411 	if (dio->extending) {
412 		spin_lock(&inode->v.i_lock);
413 		if (req->ki_pos > inode->v.i_size)
414 			i_size_write(&inode->v, req->ki_pos);
415 		spin_unlock(&inode->v.i_lock);
416 	}
417 
418 	if (dio->op.i_sectors_delta || dio->quota_res.sectors) {
419 		mutex_lock(&inode->ei_quota_lock);
420 		__bch2_i_sectors_acct(c, inode, &dio->quota_res, dio->op.i_sectors_delta);
421 		__bch2_quota_reservation_put(c, inode, &dio->quota_res);
422 		mutex_unlock(&inode->ei_quota_lock);
423 	}
424 
425 	bio_release_pages(bio, false);
426 
427 	if (unlikely(dio->op.error))
428 		set_bit(EI_INODE_ERROR, &inode->ei_flags);
429 }
430 
431 static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
432 {
433 	struct bch_fs *c = dio->op.c;
434 	struct kiocb *req = dio->req;
435 	struct address_space *mapping = dio->mapping;
436 	struct bch_inode_info *inode = dio->inode;
437 	struct bch_io_opts opts;
438 	struct bio *bio = &dio->op.wbio.bio;
439 	unsigned unaligned, iter_count;
440 	bool sync = dio->sync, dropped_locks;
441 	long ret;
442 
443 	bch2_inode_opts_get(&opts, c, &inode->ei_inode);
444 
445 	while (1) {
446 		iter_count = dio->iter.count;
447 
448 		EBUG_ON(current->faults_disabled_mapping);
449 		current->faults_disabled_mapping = mapping;
450 
451 		ret = bio_iov_iter_get_pages(bio, &dio->iter);
452 
453 		dropped_locks = fdm_dropped_locks();
454 
455 		current->faults_disabled_mapping = NULL;
456 
457 		/*
458 		 * If the fault handler returned an error but also signalled
459 		 * that it dropped & retook ei_pagecache_lock, we just need to
460 		 * re-shoot down the page cache and retry:
461 		 */
462 		if (dropped_locks && ret)
463 			ret = 0;
464 
465 		if (unlikely(ret < 0))
466 			goto err;
467 
468 		if (unlikely(dropped_locks)) {
469 			ret = bch2_write_invalidate_inode_pages_range(mapping,
470 					req->ki_pos,
471 					req->ki_pos + iter_count - 1);
472 			if (unlikely(ret))
473 				goto err;
474 
475 			if (!bio->bi_iter.bi_size)
476 				continue;
477 		}
478 
479 		unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
480 		bio->bi_iter.bi_size -= unaligned;
481 		iov_iter_revert(&dio->iter, unaligned);
482 
483 		if (!bio->bi_iter.bi_size) {
484 			/*
485 			 * bio_iov_iter_get_pages was only able to get <
486 			 * blocksize worth of pages:
487 			 */
488 			ret = -EFAULT;
489 			goto err;
490 		}
491 
492 		bch2_write_op_init(&dio->op, c, opts);
493 		dio->op.end_io		= sync
494 			? NULL
495 			: bch2_dio_write_loop_async;
496 		dio->op.target		= dio->op.opts.foreground_target;
497 		dio->op.write_point	= writepoint_hashed((unsigned long) current);
498 		dio->op.nr_replicas	= dio->op.opts.data_replicas;
499 		dio->op.subvol		= inode->ei_subvol;
500 		dio->op.pos		= POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
501 		dio->op.devs_need_flush	= &inode->ei_devs_need_flush;
502 
503 		if (sync)
504 			dio->op.flags |= BCH_WRITE_SYNC;
505 		dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
506 
507 		ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
508 						 bio_sectors(bio), true);
509 		if (unlikely(ret))
510 			goto err;
511 
512 		ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
513 						dio->op.opts.data_replicas, 0);
514 		if (unlikely(ret) &&
515 		    !bch2_dio_write_check_allocated(dio))
516 			goto err;
517 
518 		task_io_account_write(bio->bi_iter.bi_size);
519 
520 		if (unlikely(dio->iter.count) &&
521 		    !dio->sync &&
522 		    !dio->loop &&
523 		    bch2_dio_write_copy_iov(dio))
524 			dio->sync = sync = true;
525 
526 		dio->loop = true;
527 		closure_call(&dio->op.cl, bch2_write, NULL, NULL);
528 
529 		if (!sync)
530 			return -EIOCBQUEUED;
531 
532 		bch2_dio_write_end(dio);
533 
534 		if (likely(!dio->iter.count) || dio->op.error)
535 			break;
536 
537 		bio_reset(bio, NULL, REQ_OP_WRITE);
538 	}
539 out:
540 	return bch2_dio_write_done(dio);
541 err:
542 	dio->op.error = ret;
543 
544 	bio_release_pages(bio, false);
545 
546 	bch2_quota_reservation_put(c, inode, &dio->quota_res);
547 	goto out;
548 }
549 
550 static noinline __cold void bch2_dio_write_continue(struct dio_write *dio)
551 {
552 	struct mm_struct *mm = dio->mm;
553 
554 	bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE);
555 
556 	if (mm)
557 		kthread_use_mm(mm);
558 	bch2_dio_write_loop(dio);
559 	if (mm)
560 		kthread_unuse_mm(mm);
561 }
562 
563 static void bch2_dio_write_loop_async(struct bch_write_op *op)
564 {
565 	struct dio_write *dio = container_of(op, struct dio_write, op);
566 
567 	bch2_dio_write_end(dio);
568 
569 	if (likely(!dio->iter.count) || dio->op.error)
570 		bch2_dio_write_done(dio);
571 	else
572 		bch2_dio_write_continue(dio);
573 }
574 
575 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
576 {
577 	struct file *file = req->ki_filp;
578 	struct address_space *mapping = file->f_mapping;
579 	struct bch_inode_info *inode = file_bch_inode(file);
580 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
581 	struct dio_write *dio;
582 	struct bio *bio;
583 	bool locked = true, extending;
584 	ssize_t ret;
585 
586 	prefetch(&c->opts);
587 	prefetch((void *) &c->opts + 64);
588 	prefetch(&inode->ei_inode);
589 	prefetch((void *) &inode->ei_inode + 64);
590 
591 	inode_lock(&inode->v);
592 
593 	ret = generic_write_checks(req, iter);
594 	if (unlikely(ret <= 0))
595 		goto err;
596 
597 	ret = file_remove_privs(file);
598 	if (unlikely(ret))
599 		goto err;
600 
601 	ret = file_update_time(file);
602 	if (unlikely(ret))
603 		goto err;
604 
605 	if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
606 		goto err;
607 
608 	inode_dio_begin(&inode->v);
609 	bch2_pagecache_block_get(inode);
610 
611 	extending = req->ki_pos + iter->count > inode->v.i_size;
612 	if (!extending) {
613 		inode_unlock(&inode->v);
614 		locked = false;
615 	}
616 
617 	bio = bio_alloc_bioset(NULL,
618 			       bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
619 			       REQ_OP_WRITE,
620 			       GFP_KERNEL,
621 			       &c->dio_write_bioset);
622 	dio = container_of(bio, struct dio_write, op.wbio.bio);
623 	dio->req		= req;
624 	dio->mapping		= mapping;
625 	dio->inode		= inode;
626 	dio->mm			= current->mm;
627 	dio->iov		= NULL;
628 	dio->loop		= false;
629 	dio->extending		= extending;
630 	dio->sync		= is_sync_kiocb(req) || extending;
631 	dio->flush		= iocb_is_dsync(req) && !c->opts.journal_flush_disabled;
632 	dio->quota_res.sectors	= 0;
633 	dio->written		= 0;
634 	dio->iter		= *iter;
635 	dio->op.c		= c;
636 
637 	if (unlikely(mapping->nrpages)) {
638 		ret = bch2_write_invalidate_inode_pages_range(mapping,
639 						req->ki_pos,
640 						req->ki_pos + iter->count - 1);
641 		if (unlikely(ret))
642 			goto err_put_bio;
643 	}
644 
645 	ret = bch2_dio_write_loop(dio);
646 err:
647 	if (locked)
648 		inode_unlock(&inode->v);
649 	return ret;
650 err_put_bio:
651 	bch2_pagecache_block_put(inode);
652 	bio_put(bio);
653 	inode_dio_end(&inode->v);
654 	goto err;
655 }
656 
657 void bch2_fs_fs_io_direct_exit(struct bch_fs *c)
658 {
659 	bioset_exit(&c->dio_write_bioset);
660 	bioset_exit(&c->dio_read_bioset);
661 }
662 
663 int bch2_fs_fs_io_direct_init(struct bch_fs *c)
664 {
665 	if (bioset_init(&c->dio_read_bioset,
666 			4, offsetof(struct dio_read, rbio.bio),
667 			BIOSET_NEED_BVECS))
668 		return -BCH_ERR_ENOMEM_dio_read_bioset_init;
669 
670 	if (bioset_init(&c->dio_write_bioset,
671 			4, offsetof(struct dio_write, op.wbio.bio),
672 			BIOSET_NEED_BVECS))
673 		return -BCH_ERR_ENOMEM_dio_write_bioset_init;
674 
675 	return 0;
676 }
677 
678 #endif /* NO_BCACHEFS_FS */
679