1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (c) 2016-2025 Christoph Hellwig.
5 */
6 #include <linux/bio-integrity.h>
7 #include <linux/blk-crypto.h>
8 #include <linux/fscrypt.h>
9 #include <linux/pagemap.h>
10 #include <linux/iomap.h>
11 #include <linux/task_io_accounting_ops.h>
12 #include <linux/fserror.h>
13 #include "internal.h"
14 #include "trace.h"
15
16 #include "../internal.h"
17
18 /*
19 * Private flags for iomap_dio, must not overlap with the public ones in
20 * iomap.h:
21 */
22 #define IOMAP_DIO_NO_INVALIDATE (1U << 26)
23 #define IOMAP_DIO_COMP_WORK (1U << 27)
24 #define IOMAP_DIO_WRITE_THROUGH (1U << 28)
25 #define IOMAP_DIO_NEED_SYNC (1U << 29)
26 #define IOMAP_DIO_WRITE (1U << 30)
27 #define IOMAP_DIO_USER_BACKED (1U << 31)
28
29 struct iomap_dio {
30 struct kiocb *iocb;
31 const struct iomap_dio_ops *dops;
32 loff_t i_size;
33 loff_t size;
34 atomic_t ref;
35 unsigned flags;
36 int error;
37 size_t done_before;
38 bool wait_for_completion;
39
40 union {
41 /* used during submission and for synchronous completion: */
42 struct {
43 struct iov_iter *iter;
44 struct task_struct *waiter;
45 } submit;
46
47 /* used for aio completion: */
48 struct {
49 struct work_struct work;
50 } aio;
51 };
52 };
53
iomap_dio_alloc_bio(const struct iomap_iter * iter,struct iomap_dio * dio,unsigned short nr_vecs,blk_opf_t opf)54 static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter,
55 struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf)
56 {
57 if (dio->dops && dio->dops->bio_set)
58 return bio_alloc_bioset(iter->iomap.bdev, nr_vecs, opf,
59 GFP_KERNEL, dio->dops->bio_set);
60 return bio_alloc(iter->iomap.bdev, nr_vecs, opf, GFP_KERNEL);
61 }
62
iomap_dio_submit_bio(const struct iomap_iter * iter,struct iomap_dio * dio,struct bio * bio,loff_t pos)63 static void iomap_dio_submit_bio(const struct iomap_iter *iter,
64 struct iomap_dio *dio, struct bio *bio, loff_t pos)
65 {
66 struct kiocb *iocb = dio->iocb;
67
68 atomic_inc(&dio->ref);
69
70 /* Sync dio can't be polled reliably */
71 if ((iocb->ki_flags & IOCB_HIPRI) && !is_sync_kiocb(iocb)) {
72 bio_set_polled(bio, iocb);
73 WRITE_ONCE(iocb->private, bio);
74 }
75
76 if (dio->dops && dio->dops->submit_io) {
77 dio->dops->submit_io(iter, bio, pos);
78 } else {
79 WARN_ON_ONCE(iter->iomap.flags & IOMAP_F_ANON_WRITE);
80 blk_crypto_submit_bio(bio);
81 }
82 }
83
iomap_dio_err_type(const struct iomap_dio * dio)84 static inline enum fserror_type iomap_dio_err_type(const struct iomap_dio *dio)
85 {
86 if (dio->flags & IOMAP_DIO_WRITE)
87 return FSERR_DIRECTIO_WRITE;
88 return FSERR_DIRECTIO_READ;
89 }
90
should_report_dio_fserror(const struct iomap_dio * dio)91 static inline bool should_report_dio_fserror(const struct iomap_dio *dio)
92 {
93 switch (dio->error) {
94 case 0:
95 case -EAGAIN:
96 case -ENOTBLK:
97 /* don't send fsnotify for success or magic retry codes */
98 return false;
99 default:
100 return true;
101 }
102 }
103
iomap_dio_complete(struct iomap_dio * dio)104 ssize_t iomap_dio_complete(struct iomap_dio *dio)
105 {
106 const struct iomap_dio_ops *dops = dio->dops;
107 struct kiocb *iocb = dio->iocb;
108 loff_t offset = iocb->ki_pos;
109 ssize_t ret = dio->error;
110
111 if (dops && dops->end_io)
112 ret = dops->end_io(iocb, dio->size, ret, dio->flags);
113 if (should_report_dio_fserror(dio))
114 fserror_report_io(file_inode(iocb->ki_filp),
115 iomap_dio_err_type(dio), offset, dio->size,
116 dio->error, GFP_NOFS);
117
118 if (likely(!ret)) {
119 ret = dio->size;
120 /* check for short read */
121 if (offset + ret > dio->i_size &&
122 !(dio->flags & IOMAP_DIO_WRITE))
123 ret = dio->i_size - offset;
124 }
125
126 /*
127 * Try again to invalidate clean pages which might have been cached by
128 * non-direct readahead, or faulted in by get_user_pages() if the source
129 * of the write was an mmap'ed region of the file we're writing. Either
130 * one is a pretty crazy thing to do, so we don't support it 100%. If
131 * this invalidation fails, tough, the write still worked...
132 *
133 * And this page cache invalidation has to be after ->end_io(), as some
134 * filesystems convert unwritten extents to real allocations in
135 * ->end_io() when necessary, otherwise a racing buffer read would cache
136 * zeros from unwritten extents.
137 */
138 if (!dio->error && dio->size && (dio->flags & IOMAP_DIO_WRITE) &&
139 !(dio->flags & IOMAP_DIO_NO_INVALIDATE))
140 kiocb_invalidate_post_direct_write(iocb, dio->size);
141
142 inode_dio_end(file_inode(iocb->ki_filp));
143
144 if (ret > 0) {
145 iocb->ki_pos += ret;
146
147 /*
148 * If this is a DSYNC write, make sure we push it to stable
149 * storage now that we've written data.
150 */
151 if (dio->flags & IOMAP_DIO_NEED_SYNC)
152 ret = generic_write_sync(iocb, ret);
153 if (ret > 0)
154 ret += dio->done_before;
155 }
156 trace_iomap_dio_complete(iocb, dio->error, ret);
157 kfree(dio);
158 return ret;
159 }
160 EXPORT_SYMBOL_GPL(iomap_dio_complete);
161
iomap_dio_complete_work(struct work_struct * work)162 static void iomap_dio_complete_work(struct work_struct *work)
163 {
164 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
165 struct kiocb *iocb = dio->iocb;
166
167 iocb->ki_complete(iocb, iomap_dio_complete(dio));
168 }
169
170 /*
171 * Set an error in the dio if none is set yet. We have to use cmpxchg
172 * as the submission context and the completion context(s) can race to
173 * update the error.
174 */
iomap_dio_set_error(struct iomap_dio * dio,int ret)175 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
176 {
177 cmpxchg(&dio->error, 0, ret);
178 }
179
180 /*
181 * Called when dio->ref reaches zero from an I/O completion.
182 */
iomap_dio_done(struct iomap_dio * dio)183 static void iomap_dio_done(struct iomap_dio *dio)
184 {
185 struct kiocb *iocb = dio->iocb;
186
187 if (dio->wait_for_completion) {
188 /*
189 * Synchronous I/O, task itself will handle any completion work
190 * that needs after IO. All we need to do is wake the task.
191 */
192 struct task_struct *waiter = dio->submit.waiter;
193
194 WRITE_ONCE(dio->submit.waiter, NULL);
195 blk_wake_io_task(waiter);
196 return;
197 }
198
199 /*
200 * Always run error completions in user context. These are not
201 * performance critical and some code relies on taking sleeping locks
202 * for error handling.
203 */
204 if (dio->error)
205 dio->flags |= IOMAP_DIO_COMP_WORK;
206
207 /*
208 * Never invalidate pages from this context to avoid deadlocks with
209 * buffered I/O completions when called from the ioend workqueue,
210 * or avoid sleeping when called directly from ->bi_end_io.
211 * Tough luck if you hit the tiny race with someone dirtying the range
212 * right between this check and the actual completion.
213 */
214 if ((dio->flags & IOMAP_DIO_WRITE) &&
215 !(dio->flags & IOMAP_DIO_COMP_WORK)) {
216 if (dio->iocb->ki_filp->f_mapping->nrpages)
217 dio->flags |= IOMAP_DIO_COMP_WORK;
218 else
219 dio->flags |= IOMAP_DIO_NO_INVALIDATE;
220 }
221
222 if (dio->flags & IOMAP_DIO_COMP_WORK) {
223 struct inode *inode = file_inode(iocb->ki_filp);
224
225 /*
226 * Async DIO completion that requires filesystem level
227 * completion work gets punted to a work queue to complete as
228 * the operation may require more IO to be issued to finalise
229 * filesystem metadata changes or guarantee data integrity.
230 */
231 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
232 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
233 return;
234 }
235
236 WRITE_ONCE(iocb->private, NULL);
237 iomap_dio_complete_work(&dio->aio.work);
238 }
239
__iomap_dio_bio_end_io(struct bio * bio,bool inline_completion)240 static void __iomap_dio_bio_end_io(struct bio *bio, bool inline_completion)
241 {
242 struct iomap_dio *dio = bio->bi_private;
243
244 if (bio_integrity(bio))
245 fs_bio_integrity_free(bio);
246
247 if (dio->flags & IOMAP_DIO_BOUNCE) {
248 bio_iov_iter_unbounce(bio, !!dio->error,
249 dio->flags & IOMAP_DIO_USER_BACKED);
250 bio_put(bio);
251 } else if (dio->flags & IOMAP_DIO_USER_BACKED) {
252 bio_check_pages_dirty(bio);
253 } else {
254 bio_release_pages(bio, false);
255 bio_put(bio);
256 }
257
258 /* Do not touch bio below, we just gave up our reference. */
259
260 if (atomic_dec_and_test(&dio->ref)) {
261 /*
262 * Avoid another context switch for the completion when already
263 * called from the ioend completion workqueue.
264 */
265 if (inline_completion)
266 dio->flags &= ~IOMAP_DIO_COMP_WORK;
267 iomap_dio_done(dio);
268 }
269 }
270
iomap_dio_bio_end_io(struct bio * bio)271 void iomap_dio_bio_end_io(struct bio *bio)
272 {
273 struct iomap_dio *dio = bio->bi_private;
274
275 if (bio->bi_status)
276 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
277 __iomap_dio_bio_end_io(bio, false);
278 }
279 EXPORT_SYMBOL_GPL(iomap_dio_bio_end_io);
280
iomap_finish_ioend_direct(struct iomap_ioend * ioend)281 u32 iomap_finish_ioend_direct(struct iomap_ioend *ioend)
282 {
283 struct iomap_dio *dio = ioend->io_bio.bi_private;
284 u32 vec_count = ioend->io_bio.bi_vcnt;
285
286 if (ioend->io_error)
287 iomap_dio_set_error(dio, ioend->io_error);
288 __iomap_dio_bio_end_io(&ioend->io_bio, true);
289
290 /*
291 * Return the number of bvecs completed as even direct I/O completions
292 * do significant per-folio work and we'll still want to give up the
293 * CPU after a lot of completions.
294 */
295 return vec_count;
296 }
297
iomap_dio_zero(const struct iomap_iter * iter,struct iomap_dio * dio,loff_t pos,unsigned len)298 static int iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
299 loff_t pos, unsigned len)
300 {
301 struct inode *inode = file_inode(dio->iocb->ki_filp);
302 struct bio *bio;
303 struct folio *zero_folio = largest_zero_folio();
304 int nr_vecs = max(1, i_blocksize(inode) / folio_size(zero_folio));
305
306 if (!len)
307 return 0;
308
309 /*
310 * This limit shall never be reached as most filesystems have a
311 * maximum blocksize of 64k.
312 */
313 if (WARN_ON_ONCE(nr_vecs > BIO_MAX_VECS))
314 return -EINVAL;
315
316 bio = iomap_dio_alloc_bio(iter, dio, nr_vecs,
317 REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
318 fscrypt_set_bio_crypt_ctx(bio, inode, pos, GFP_KERNEL);
319 bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
320 bio->bi_private = dio;
321 bio->bi_end_io = iomap_dio_bio_end_io;
322
323 while (len > 0) {
324 unsigned int io_len = min(len, folio_size(zero_folio));
325
326 bio_add_folio_nofail(bio, zero_folio, io_len, 0);
327 len -= io_len;
328 }
329 iomap_dio_submit_bio(iter, dio, bio, pos);
330
331 return 0;
332 }
333
iomap_dio_bio_iter_one(struct iomap_iter * iter,struct iomap_dio * dio,loff_t pos,unsigned int alignment,blk_opf_t op)334 static ssize_t iomap_dio_bio_iter_one(struct iomap_iter *iter,
335 struct iomap_dio *dio, loff_t pos, unsigned int alignment,
336 blk_opf_t op)
337 {
338 unsigned int nr_vecs;
339 struct bio *bio;
340 ssize_t ret;
341
342 if (dio->flags & IOMAP_DIO_BOUNCE)
343 nr_vecs = bio_iov_bounce_nr_vecs(dio->submit.iter, op);
344 else
345 nr_vecs = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
346
347 bio = iomap_dio_alloc_bio(iter, dio, nr_vecs, op);
348 fscrypt_set_bio_crypt_ctx(bio, iter->inode, pos, GFP_KERNEL);
349 bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
350 bio->bi_write_hint = iter->inode->i_write_hint;
351 bio->bi_ioprio = dio->iocb->ki_ioprio;
352 bio->bi_private = dio;
353 bio->bi_end_io = iomap_dio_bio_end_io;
354
355
356 if (dio->flags & IOMAP_DIO_BOUNCE)
357 ret = bio_iov_iter_bounce(bio, dio->submit.iter,
358 iomap_max_bio_size(&iter->iomap));
359 else
360 ret = bio_iov_iter_get_pages(bio, dio->submit.iter,
361 alignment - 1);
362 if (unlikely(ret))
363 goto out_put_bio;
364 ret = bio->bi_iter.bi_size;
365
366 /*
367 * An atomic write bio must cover the complete length. If it doesn't,
368 * error out.
369 */
370 if ((op & REQ_ATOMIC) && WARN_ON_ONCE(ret != iomap_length(iter))) {
371 ret = -EINVAL;
372 goto out_put_bio;
373 }
374
375 if (iter->iomap.flags & IOMAP_F_INTEGRITY) {
376 if (dio->flags & IOMAP_DIO_WRITE)
377 fs_bio_integrity_generate(bio);
378 else
379 fs_bio_integrity_alloc(bio);
380 }
381
382 if (dio->flags & IOMAP_DIO_WRITE)
383 task_io_account_write(ret);
384 else if ((dio->flags & IOMAP_DIO_USER_BACKED) &&
385 !(dio->flags & IOMAP_DIO_BOUNCE))
386 bio_set_pages_dirty(bio);
387
388 /*
389 * We can only poll for single bio I/Os.
390 */
391 if (iov_iter_count(dio->submit.iter))
392 dio->iocb->ki_flags &= ~IOCB_HIPRI;
393 iomap_dio_submit_bio(iter, dio, bio, pos);
394 return ret;
395
396 out_put_bio:
397 bio_put(bio);
398 return ret;
399 }
400
iomap_dio_bio_iter(struct iomap_iter * iter,struct iomap_dio * dio)401 static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
402 {
403 const struct iomap *iomap = &iter->iomap;
404 struct inode *inode = iter->inode;
405 unsigned int fs_block_size = i_blocksize(inode), pad;
406 const loff_t length = iomap_length(iter);
407 loff_t pos = iter->pos;
408 blk_opf_t bio_opf = REQ_SYNC | REQ_IDLE;
409 bool need_zeroout = false;
410 u64 copied = 0;
411 size_t orig_count;
412 unsigned int alignment;
413 ssize_t ret = 0;
414
415 /*
416 * File systems that write out of place and always allocate new blocks
417 * need each bio to be block aligned as that's the unit of allocation.
418 */
419 if (dio->flags & IOMAP_DIO_FSBLOCK_ALIGNED)
420 alignment = fs_block_size;
421 else
422 alignment = bdev_logical_block_size(iomap->bdev);
423
424 if ((pos | length) & (alignment - 1))
425 return -EINVAL;
426
427 if (dio->flags & IOMAP_DIO_WRITE) {
428 bool need_completion_work = true;
429
430 switch (iomap->type) {
431 case IOMAP_MAPPED:
432 /*
433 * Directly mapped I/O does not inherently need to do
434 * work at I/O completion time. But there are various
435 * cases below where this will get set again.
436 */
437 need_completion_work = false;
438 break;
439 case IOMAP_UNWRITTEN:
440 dio->flags |= IOMAP_DIO_UNWRITTEN;
441 need_zeroout = true;
442 break;
443 default:
444 break;
445 }
446
447 if (iomap->flags & IOMAP_F_ATOMIC_BIO) {
448 /*
449 * Ensure that the mapping covers the full write
450 * length, otherwise it won't be submitted as a single
451 * bio, which is required to use hardware atomics.
452 */
453 if (length != iter->len)
454 return -EINVAL;
455 bio_opf |= REQ_ATOMIC;
456 }
457
458 if (iomap->flags & IOMAP_F_SHARED) {
459 /*
460 * Unsharing of needs to update metadata at I/O
461 * completion time.
462 */
463 need_completion_work = true;
464 dio->flags |= IOMAP_DIO_COW;
465 }
466
467 if (iomap->flags & IOMAP_F_NEW) {
468 /*
469 * Newly allocated blocks might need recording in
470 * metadata at I/O completion time.
471 */
472 need_completion_work = true;
473 need_zeroout = true;
474 }
475
476 /*
477 * Use a FUA write if we need datasync semantics and this is a
478 * pure overwrite that doesn't require any metadata updates.
479 *
480 * This allows us to avoid cache flushes on I/O completion.
481 */
482 if (dio->flags & IOMAP_DIO_WRITE_THROUGH) {
483 if (!need_completion_work &&
484 !(iomap->flags & IOMAP_F_DIRTY) &&
485 (!bdev_write_cache(iomap->bdev) ||
486 bdev_fua(iomap->bdev)))
487 bio_opf |= REQ_FUA;
488 else
489 dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;
490 }
491
492 /*
493 * We can only do inline completion for pure overwrites that
494 * don't require additional I/O at completion time.
495 *
496 * This rules out writes that need zeroing or metdata updates to
497 * convert unwritten or shared extents.
498 *
499 * Writes that extend i_size are also not supported, but this is
500 * handled in __iomap_dio_rw().
501 */
502 if (need_completion_work)
503 dio->flags |= IOMAP_DIO_COMP_WORK;
504
505 bio_opf |= REQ_OP_WRITE;
506 } else {
507 bio_opf |= REQ_OP_READ;
508 }
509
510 /*
511 * Save the original count and trim the iter to just the extent we
512 * are operating on right now. The iter will be re-expanded once
513 * we are done.
514 */
515 orig_count = iov_iter_count(dio->submit.iter);
516 iov_iter_truncate(dio->submit.iter, length);
517
518 if (!iov_iter_count(dio->submit.iter))
519 goto out;
520
521 /*
522 * The rules for polled IO completions follow the guidelines as the
523 * ones we set for inline and deferred completions. If none of those
524 * are available for this IO, clear the polled flag.
525 */
526 if (dio->flags & IOMAP_DIO_COMP_WORK)
527 dio->iocb->ki_flags &= ~IOCB_HIPRI;
528
529 if (need_zeroout) {
530 /* zero out from the start of the block to the write offset */
531 pad = pos & (fs_block_size - 1);
532
533 ret = iomap_dio_zero(iter, dio, pos - pad, pad);
534 if (ret)
535 goto out;
536 }
537
538 do {
539 /*
540 * If completions already occurred and reported errors, give up now and
541 * don't bother submitting more bios.
542 */
543 if (unlikely(data_race(dio->error)))
544 goto out;
545
546 ret = iomap_dio_bio_iter_one(iter, dio, pos, alignment, bio_opf);
547 if (unlikely(ret < 0)) {
548 /*
549 * We have to stop part way through an IO. We must fall
550 * through to the sub-block tail zeroing here, otherwise
551 * this short IO may expose stale data in the tail of
552 * the block we haven't written data to.
553 */
554 break;
555 }
556 dio->size += ret;
557 copied += ret;
558 pos += ret;
559 ret = 0;
560 } while (iov_iter_count(dio->submit.iter));
561
562 /*
563 * We need to zeroout the tail of a sub-block write if the extent type
564 * requires zeroing or the write extends beyond EOF. If we don't zero
565 * the block tail in the latter case, we can expose stale data via mmap
566 * reads of the EOF block.
567 */
568 if (need_zeroout ||
569 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
570 /* zero out from the end of the write to the end of the block */
571 pad = pos & (fs_block_size - 1);
572 if (pad)
573 ret = iomap_dio_zero(iter, dio, pos,
574 fs_block_size - pad);
575 }
576 out:
577 /* Undo iter limitation to current extent */
578 iov_iter_reexpand(dio->submit.iter, orig_count - copied);
579 if (copied)
580 return iomap_iter_advance(iter, copied);
581 return ret;
582 }
583
iomap_dio_hole_iter(struct iomap_iter * iter,struct iomap_dio * dio)584 static int iomap_dio_hole_iter(struct iomap_iter *iter, struct iomap_dio *dio)
585 {
586 loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter);
587
588 dio->size += length;
589 if (!length)
590 return -EFAULT;
591 return iomap_iter_advance(iter, length);
592 }
593
iomap_dio_inline_iter(struct iomap_iter * iomi,struct iomap_dio * dio)594 static int iomap_dio_inline_iter(struct iomap_iter *iomi, struct iomap_dio *dio)
595 {
596 const struct iomap *iomap = &iomi->iomap;
597 struct iov_iter *iter = dio->submit.iter;
598 void *inline_data = iomap_inline_data(iomap, iomi->pos);
599 loff_t length = iomap_length(iomi);
600 loff_t pos = iomi->pos;
601 u64 copied;
602
603 if (WARN_ON_ONCE(!inline_data))
604 return -EIO;
605
606 if (WARN_ON_ONCE(!iomap_inline_data_valid(iomap)))
607 return -EIO;
608
609 if (dio->flags & IOMAP_DIO_WRITE) {
610 loff_t size = iomi->inode->i_size;
611
612 if (pos > size)
613 memset(iomap_inline_data(iomap, size), 0, pos - size);
614 copied = copy_from_iter(inline_data, length, iter);
615 if (copied) {
616 if (pos + copied > size)
617 i_size_write(iomi->inode, pos + copied);
618 mark_inode_dirty(iomi->inode);
619 }
620 } else {
621 copied = copy_to_iter(inline_data, length, iter);
622 }
623 dio->size += copied;
624 if (!copied)
625 return -EFAULT;
626 return iomap_iter_advance(iomi, copied);
627 }
628
iomap_dio_iter(struct iomap_iter * iter,struct iomap_dio * dio)629 static int iomap_dio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
630 {
631 switch (iter->iomap.type) {
632 case IOMAP_HOLE:
633 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
634 return -EIO;
635 return iomap_dio_hole_iter(iter, dio);
636 case IOMAP_UNWRITTEN:
637 if (!(dio->flags & IOMAP_DIO_WRITE))
638 return iomap_dio_hole_iter(iter, dio);
639 return iomap_dio_bio_iter(iter, dio);
640 case IOMAP_MAPPED:
641 return iomap_dio_bio_iter(iter, dio);
642 case IOMAP_INLINE:
643 return iomap_dio_inline_iter(iter, dio);
644 case IOMAP_DELALLOC:
645 /*
646 * DIO is not serialised against mmap() access at all, and so
647 * if the page_mkwrite occurs between the writeback and the
648 * iomap_iter() call in the DIO path, then it will see the
649 * DELALLOC block that the page-mkwrite allocated.
650 */
651 pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n",
652 dio->iocb->ki_filp, current->comm);
653 return -EIO;
654 default:
655 WARN_ON_ONCE(1);
656 return -EIO;
657 }
658 }
659
660 /*
661 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
662 * is being issued as AIO or not. This allows us to optimise pure data writes
663 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
664 * REQ_FLUSH post write. This is slightly tricky because a single request here
665 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
666 * may be pure data writes. In that case, we still need to do a full data sync
667 * completion.
668 *
669 * When page faults are disabled and @dio_flags includes IOMAP_DIO_PARTIAL,
670 * __iomap_dio_rw can return a partial result if it encounters a non-resident
671 * page in @iter after preparing a transfer. In that case, the non-resident
672 * pages can be faulted in and the request resumed with @done_before set to the
673 * number of bytes previously transferred. The request will then complete with
674 * the correct total number of bytes transferred; this is essential for
675 * completing partial requests asynchronously.
676 *
677 * Returns -ENOTBLK In case of a page invalidation invalidation failure for
678 * writes. The callers needs to fall back to buffered I/O in this case.
679 */
680 struct iomap_dio *
__iomap_dio_rw(struct kiocb * iocb,struct iov_iter * iter,const struct iomap_ops * ops,const struct iomap_dio_ops * dops,unsigned int dio_flags,void * private,size_t done_before)681 __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
682 const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
683 unsigned int dio_flags, void *private, size_t done_before)
684 {
685 struct inode *inode = file_inode(iocb->ki_filp);
686 struct iomap_iter iomi = {
687 .inode = inode,
688 .pos = iocb->ki_pos,
689 .len = iov_iter_count(iter),
690 .flags = IOMAP_DIRECT,
691 .private = private,
692 };
693 bool wait_for_completion =
694 is_sync_kiocb(iocb) || (dio_flags & IOMAP_DIO_FORCE_WAIT);
695 struct blk_plug plug;
696 struct iomap_dio *dio;
697 loff_t ret = 0;
698
699 trace_iomap_dio_rw_begin(iocb, iter, dio_flags, done_before);
700
701 if (!iomi.len)
702 return NULL;
703
704 dio = kmalloc_obj(*dio);
705 if (!dio)
706 return ERR_PTR(-ENOMEM);
707
708 dio->iocb = iocb;
709 atomic_set(&dio->ref, 1);
710 dio->size = 0;
711 dio->i_size = i_size_read(inode);
712 dio->dops = dops;
713 dio->error = 0;
714 dio->flags = dio_flags & (IOMAP_DIO_FSBLOCK_ALIGNED | IOMAP_DIO_BOUNCE);
715 dio->done_before = done_before;
716
717 dio->submit.iter = iter;
718 dio->submit.waiter = current;
719
720 if (iocb->ki_flags & IOCB_NOWAIT)
721 iomi.flags |= IOMAP_NOWAIT;
722
723 if (iov_iter_rw(iter) == READ) {
724 if (iomi.pos >= dio->i_size)
725 goto out_free_dio;
726
727 if (user_backed_iter(iter))
728 dio->flags |= IOMAP_DIO_USER_BACKED;
729
730 ret = kiocb_write_and_wait(iocb, iomi.len);
731 if (ret)
732 goto out_free_dio;
733 } else {
734 iomi.flags |= IOMAP_WRITE;
735 dio->flags |= IOMAP_DIO_WRITE;
736
737 if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) {
738 ret = -EAGAIN;
739 if (iomi.pos >= dio->i_size ||
740 iomi.pos + iomi.len > dio->i_size)
741 goto out_free_dio;
742 iomi.flags |= IOMAP_OVERWRITE_ONLY;
743 }
744
745 if (iocb->ki_flags & IOCB_ATOMIC)
746 iomi.flags |= IOMAP_ATOMIC;
747
748 /* for data sync or sync, we need sync completion processing */
749 if (iocb_is_dsync(iocb)) {
750 dio->flags |= IOMAP_DIO_NEED_SYNC;
751
752 /*
753 * For datasync only writes, we optimistically try using
754 * WRITE_THROUGH for this IO. This flag requires either
755 * FUA writes through the device's write cache, or a
756 * normal write to a device without a volatile write
757 * cache. For the former, Any non-FUA write that occurs
758 * will clear this flag, hence we know before completion
759 * whether a cache flush is necessary.
760 */
761 if (!(iocb->ki_flags & IOCB_SYNC))
762 dio->flags |= IOMAP_DIO_WRITE_THROUGH;
763 }
764
765 /*
766 * i_size updates must to happen from process context.
767 */
768 if (iomi.pos + iomi.len > dio->i_size)
769 dio->flags |= IOMAP_DIO_COMP_WORK;
770
771 /*
772 * Try to invalidate cache pages for the range we are writing.
773 * If this invalidation fails, let the caller fall back to
774 * buffered I/O.
775 */
776 ret = kiocb_invalidate_pages(iocb, iomi.len);
777 if (ret) {
778 if (ret != -EAGAIN) {
779 trace_iomap_dio_invalidate_fail(inode, iomi.pos,
780 iomi.len);
781 if (iocb->ki_flags & IOCB_ATOMIC) {
782 /*
783 * folio invalidation failed, maybe
784 * this is transient, unlock and see if
785 * the caller tries again.
786 */
787 ret = -EAGAIN;
788 } else {
789 /* fall back to buffered write */
790 ret = -ENOTBLK;
791 }
792 }
793 goto out_free_dio;
794 }
795 }
796
797 if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) {
798 ret = sb_init_dio_done_wq(inode->i_sb);
799 if (ret < 0)
800 goto out_free_dio;
801 }
802
803 inode_dio_begin(inode);
804
805 blk_start_plug(&plug);
806 while ((ret = iomap_iter(&iomi, ops)) > 0) {
807 iomi.status = iomap_dio_iter(&iomi, dio);
808
809 /*
810 * We can only poll for single bio I/Os.
811 */
812 iocb->ki_flags &= ~IOCB_HIPRI;
813 }
814
815 blk_finish_plug(&plug);
816
817 /*
818 * We only report that we've read data up to i_size.
819 * Revert iter to a state corresponding to that as some callers (such
820 * as the splice code) rely on it.
821 */
822 if (iov_iter_rw(iter) == READ && iomi.pos >= dio->i_size)
823 iov_iter_revert(iter, iomi.pos - dio->i_size);
824
825 if (ret == -EFAULT && dio->size && (dio_flags & IOMAP_DIO_PARTIAL)) {
826 if (!(iocb->ki_flags & IOCB_NOWAIT))
827 wait_for_completion = true;
828 ret = 0;
829 }
830
831 /* magic error code to fall back to buffered I/O */
832 if (ret == -ENOTBLK) {
833 wait_for_completion = true;
834 ret = 0;
835 }
836 if (ret < 0)
837 iomap_dio_set_error(dio, ret);
838
839 /*
840 * If all the writes we issued were already written through to the
841 * media, we don't need to flush the cache on IO completion. Clear the
842 * sync flag for this case.
843 *
844 * Otherwise clear the inline completion flag if any sync work is
845 * needed, as that needs to be performed from process context.
846 */
847 if (dio->flags & IOMAP_DIO_WRITE_THROUGH)
848 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
849 else if (dio->flags & IOMAP_DIO_NEED_SYNC)
850 dio->flags |= IOMAP_DIO_COMP_WORK;
851
852 /*
853 * We are about to drop our additional submission reference, which
854 * might be the last reference to the dio. There are three different
855 * ways we can progress here:
856 *
857 * (a) If this is the last reference we will always complete and free
858 * the dio ourselves.
859 * (b) If this is not the last reference, and we serve an asynchronous
860 * iocb, we must never touch the dio after the decrement, the
861 * I/O completion handler will complete and free it.
862 * (c) If this is not the last reference, but we serve a synchronous
863 * iocb, the I/O completion handler will wake us up on the drop
864 * of the final reference, and we will complete and free it here
865 * after we got woken by the I/O completion handler.
866 */
867 dio->wait_for_completion = wait_for_completion;
868 if (!atomic_dec_and_test(&dio->ref)) {
869 if (!wait_for_completion) {
870 trace_iomap_dio_rw_queued(inode, iomi.pos, iomi.len);
871 return ERR_PTR(-EIOCBQUEUED);
872 }
873
874 for (;;) {
875 set_current_state(TASK_UNINTERRUPTIBLE);
876 if (!READ_ONCE(dio->submit.waiter))
877 break;
878
879 blk_io_schedule();
880 }
881 __set_current_state(TASK_RUNNING);
882 }
883
884 return dio;
885
886 out_free_dio:
887 kfree(dio);
888 if (ret)
889 return ERR_PTR(ret);
890 return NULL;
891 }
892 EXPORT_SYMBOL_GPL(__iomap_dio_rw);
893
894 ssize_t
iomap_dio_rw(struct kiocb * iocb,struct iov_iter * iter,const struct iomap_ops * ops,const struct iomap_dio_ops * dops,unsigned int dio_flags,void * private,size_t done_before)895 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
896 const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
897 unsigned int dio_flags, void *private, size_t done_before)
898 {
899 struct iomap_dio *dio;
900
901 dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags, private,
902 done_before);
903 if (IS_ERR_OR_NULL(dio))
904 return PTR_ERR_OR_ZERO(dio);
905 return iomap_dio_complete(dio);
906 }
907 EXPORT_SYMBOL_GPL(iomap_dio_rw);
908