1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ext4/page-io.c
4 *
5 * This contains the new page_io functions for ext4
6 *
7 * Written by Theodore Ts'o, 2010.
8 */
9
10 #include <linux/blk-crypto.h>
11 #include <linux/fs.h>
12 #include <linux/time.h>
13 #include <linux/highuid.h>
14 #include <linux/pagemap.h>
15 #include <linux/quotaops.h>
16 #include <linux/string.h>
17 #include <linux/buffer_head.h>
18 #include <linux/writeback.h>
19 #include <linux/pagevec.h>
20 #include <linux/mpage.h>
21 #include <linux/namei.h>
22 #include <linux/uio.h>
23 #include <linux/bio.h>
24 #include <linux/workqueue.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/mm.h>
28 #include <linux/sched/mm.h>
29
30 #include "ext4_jbd2.h"
31 #include "xattr.h"
32 #include "acl.h"
33
34 static struct kmem_cache *io_end_cachep;
35 static struct kmem_cache *io_end_vec_cachep;
36
ext4_init_pageio(void)37 int __init ext4_init_pageio(void)
38 {
39 io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
40 if (io_end_cachep == NULL)
41 return -ENOMEM;
42
43 io_end_vec_cachep = KMEM_CACHE(ext4_io_end_vec, 0);
44 if (io_end_vec_cachep == NULL) {
45 kmem_cache_destroy(io_end_cachep);
46 return -ENOMEM;
47 }
48 return 0;
49 }
50
ext4_exit_pageio(void)51 void ext4_exit_pageio(void)
52 {
53 kmem_cache_destroy(io_end_cachep);
54 kmem_cache_destroy(io_end_vec_cachep);
55 }
56
ext4_alloc_io_end_vec(ext4_io_end_t * io_end)57 struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end)
58 {
59 struct ext4_io_end_vec *io_end_vec;
60
61 io_end_vec = kmem_cache_zalloc(io_end_vec_cachep, GFP_NOFS);
62 if (!io_end_vec)
63 return ERR_PTR(-ENOMEM);
64 INIT_LIST_HEAD(&io_end_vec->list);
65 list_add_tail(&io_end_vec->list, &io_end->list_vec);
66 return io_end_vec;
67 }
68
ext4_free_io_end_vec(ext4_io_end_t * io_end)69 static void ext4_free_io_end_vec(ext4_io_end_t *io_end)
70 {
71 struct ext4_io_end_vec *io_end_vec, *tmp;
72
73 if (list_empty(&io_end->list_vec))
74 return;
75 list_for_each_entry_safe(io_end_vec, tmp, &io_end->list_vec, list) {
76 list_del(&io_end_vec->list);
77 kmem_cache_free(io_end_vec_cachep, io_end_vec);
78 }
79 }
80
ext4_last_io_end_vec(ext4_io_end_t * io_end)81 struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end)
82 {
83 BUG_ON(list_empty(&io_end->list_vec));
84 return list_last_entry(&io_end->list_vec, struct ext4_io_end_vec, list);
85 }
86
87 /*
88 * Print an buffer I/O error compatible with the fs/buffer.c. This
89 * provides compatibility with dmesg scrapers that look for a specific
90 * buffer I/O error message. We really need a unified error reporting
91 * structure to userspace ala Digital Unix's uerf system, but it's
92 * probably not going to happen in my lifetime, due to LKML politics...
93 */
buffer_io_error(struct buffer_head * bh)94 static void buffer_io_error(struct buffer_head *bh)
95 {
96 printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n",
97 bh->b_bdev,
98 (unsigned long long)bh->b_blocknr);
99 }
100
ext4_finish_bio(struct bio * bio)101 static void ext4_finish_bio(struct bio *bio)
102 {
103 struct folio_iter fi;
104
105 bio_for_each_folio_all(fi, bio) {
106 struct folio *folio = fi.folio;
107 struct folio *io_folio = NULL;
108 struct buffer_head *bh, *head;
109 size_t bio_start = fi.offset;
110 size_t bio_end = bio_start + fi.length;
111 unsigned under_io = 0;
112 unsigned long flags;
113
114 if (fscrypt_is_bounce_folio(folio)) {
115 io_folio = folio;
116 folio = fscrypt_pagecache_folio(folio);
117 }
118
119 if (bio->bi_status) {
120 int err = blk_status_to_errno(bio->bi_status);
121 mapping_set_error(folio->mapping, err);
122 }
123 bh = head = folio_buffers(folio);
124 /*
125 * We check all buffers in the folio under b_uptodate_lock
126 * to avoid races with other end io clearing async_write flags
127 */
128 spin_lock_irqsave(&head->b_uptodate_lock, flags);
129 do {
130 if (bh_offset(bh) < bio_start ||
131 bh_offset(bh) + bh->b_size > bio_end) {
132 if (buffer_async_write(bh))
133 under_io++;
134 continue;
135 }
136 clear_buffer_async_write(bh);
137 if (bio->bi_status) {
138 set_buffer_write_io_error(bh);
139 buffer_io_error(bh);
140 }
141 } while ((bh = bh->b_this_page) != head);
142 spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
143 if (!under_io) {
144 fscrypt_free_bounce_page(&io_folio->page);
145 folio_end_writeback(folio);
146 }
147 }
148 }
149
ext4_release_io_end(ext4_io_end_t * io_end)150 static void ext4_release_io_end(ext4_io_end_t *io_end)
151 {
152 struct bio *bio, *next_bio;
153
154 BUG_ON(!list_empty(&io_end->list));
155 BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
156 WARN_ON(io_end->handle);
157
158 for (bio = io_end->bio; bio; bio = next_bio) {
159 next_bio = bio->bi_private;
160 ext4_finish_bio(bio);
161 bio_put(bio);
162 }
163 ext4_free_io_end_vec(io_end);
164 kmem_cache_free(io_end_cachep, io_end);
165 }
166
167 /*
168 * On successful IO, check a range of space and convert unwritten extents to
169 * written. On IO failure, check if journal abort is needed. Note that
170 * we are protected from truncate touching same part of extent tree by the
171 * fact that truncate code waits for all DIO to finish (thus exclusion from
172 * direct IO is achieved) and also waits for PageWriteback bits. Thus we
173 * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
174 * completed (happens from ext4_free_ioend()).
175 */
ext4_end_io_end(ext4_io_end_t * io_end)176 static int ext4_end_io_end(ext4_io_end_t *io_end)
177 {
178 struct inode *inode = io_end->inode;
179 handle_t *handle = io_end->handle;
180 struct super_block *sb = inode->i_sb;
181 int ret = 0;
182
183 ext4_debug("ext4_end_io_nolock: io_end 0x%p from inode %lu,list->next 0x%p,"
184 "list->prev 0x%p\n",
185 io_end, inode->i_ino, io_end->list.next, io_end->list.prev);
186
187 /*
188 * Do not convert the unwritten extents if data writeback fails,
189 * or stale data may be exposed.
190 */
191 io_end->handle = NULL; /* Following call will use up the handle */
192 if (unlikely(io_end->flag & EXT4_IO_END_FAILED)) {
193 ret = -EIO;
194 if (handle)
195 jbd2_journal_free_reserved(handle);
196
197 if (test_opt(sb, DATA_ERR_ABORT))
198 jbd2_journal_abort(EXT4_SB(sb)->s_journal, ret);
199 } else {
200 ret = ext4_convert_unwritten_io_end_vec(handle, io_end);
201 }
202 if (ret < 0 && !ext4_emergency_state(sb) &&
203 io_end->flag & EXT4_IO_END_UNWRITTEN) {
204 ext4_msg(sb, KERN_EMERG,
205 "failed to convert unwritten extents to written "
206 "extents -- potential data loss! "
207 "(inode %lu, error %d)", inode->i_ino, ret);
208 }
209
210 ext4_clear_io_unwritten_flag(io_end);
211 ext4_release_io_end(io_end);
212 return ret;
213 }
214
dump_completed_IO(struct inode * inode,struct list_head * head)215 static void dump_completed_IO(struct inode *inode, struct list_head *head)
216 {
217 #ifdef EXT4FS_DEBUG
218 struct list_head *cur, *before, *after;
219 ext4_io_end_t *io_end, *io_end0, *io_end1;
220
221 if (list_empty(head))
222 return;
223
224 ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
225 list_for_each_entry(io_end, head, list) {
226 cur = &io_end->list;
227 before = cur->prev;
228 io_end0 = container_of(before, ext4_io_end_t, list);
229 after = cur->next;
230 io_end1 = container_of(after, ext4_io_end_t, list);
231
232 ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
233 io_end, inode->i_ino, io_end0, io_end1);
234 }
235 #endif
236 }
237
ext4_io_end_defer_completion(ext4_io_end_t * io_end)238 static bool ext4_io_end_defer_completion(ext4_io_end_t *io_end)
239 {
240 if (io_end->flag & EXT4_IO_END_UNWRITTEN &&
241 !list_empty(&io_end->list_vec))
242 return true;
243 if (test_opt(io_end->inode->i_sb, DATA_ERR_ABORT) &&
244 io_end->flag & EXT4_IO_END_FAILED &&
245 !ext4_emergency_state(io_end->inode->i_sb))
246 return true;
247 return false;
248 }
249
250 /* Add the io_end to per-inode completed end_io list. */
ext4_add_complete_io(ext4_io_end_t * io_end)251 static void ext4_add_complete_io(ext4_io_end_t *io_end)
252 {
253 struct ext4_inode_info *ei = EXT4_I(io_end->inode);
254 struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
255 struct workqueue_struct *wq;
256 unsigned long flags;
257
258 /* Only reserved conversions or pending IO errors will enter here. */
259 WARN_ON(!(io_end->flag & EXT4_IO_END_DEFER_COMPLETION));
260 WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN &&
261 !io_end->handle && sbi->s_journal);
262 WARN_ON(!io_end->bio);
263
264 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
265 wq = sbi->rsv_conversion_wq;
266 if (list_empty(&ei->i_rsv_conversion_list))
267 queue_work(wq, &ei->i_rsv_conversion_work);
268 list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
269 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
270 }
271
ext4_do_flush_completed_IO(struct inode * inode,struct list_head * head)272 static int ext4_do_flush_completed_IO(struct inode *inode,
273 struct list_head *head)
274 {
275 ext4_io_end_t *io_end;
276 struct list_head unwritten;
277 unsigned long flags;
278 struct ext4_inode_info *ei = EXT4_I(inode);
279 int err, ret = 0;
280
281 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
282 dump_completed_IO(inode, head);
283 list_replace_init(head, &unwritten);
284 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
285
286 while (!list_empty(&unwritten)) {
287 io_end = list_entry(unwritten.next, ext4_io_end_t, list);
288 BUG_ON(!(io_end->flag & EXT4_IO_END_DEFER_COMPLETION));
289 list_del_init(&io_end->list);
290
291 err = ext4_end_io_end(io_end);
292 if (unlikely(!ret && err))
293 ret = err;
294 }
295 return ret;
296 }
297
298 /*
299 * Used to convert unwritten extents to written extents upon IO completion,
300 * or used to abort the journal upon IO errors.
301 */
ext4_end_io_rsv_work(struct work_struct * work)302 void ext4_end_io_rsv_work(struct work_struct *work)
303 {
304 struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
305 i_rsv_conversion_work);
306 ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
307 }
308
ext4_init_io_end(struct inode * inode,gfp_t flags)309 ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
310 {
311 ext4_io_end_t *io_end = kmem_cache_zalloc(io_end_cachep, flags);
312
313 if (io_end) {
314 io_end->inode = inode;
315 INIT_LIST_HEAD(&io_end->list);
316 INIT_LIST_HEAD(&io_end->list_vec);
317 refcount_set(&io_end->count, 1);
318 }
319 return io_end;
320 }
321
ext4_put_io_end_defer(ext4_io_end_t * io_end)322 void ext4_put_io_end_defer(ext4_io_end_t *io_end)
323 {
324 if (refcount_dec_and_test(&io_end->count)) {
325 if (ext4_io_end_defer_completion(io_end))
326 return ext4_add_complete_io(io_end);
327
328 ext4_release_io_end(io_end);
329 }
330 }
331
ext4_put_io_end(ext4_io_end_t * io_end)332 int ext4_put_io_end(ext4_io_end_t *io_end)
333 {
334 if (refcount_dec_and_test(&io_end->count)) {
335 if (ext4_io_end_defer_completion(io_end))
336 return ext4_end_io_end(io_end);
337
338 ext4_release_io_end(io_end);
339 }
340 return 0;
341 }
342
ext4_get_io_end(ext4_io_end_t * io_end)343 ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
344 {
345 refcount_inc(&io_end->count);
346 return io_end;
347 }
348
349 /* BIO completion function for page writeback */
ext4_end_bio(struct bio * bio)350 static void ext4_end_bio(struct bio *bio)
351 {
352 ext4_io_end_t *io_end = bio->bi_private;
353 sector_t bi_sector = bio->bi_iter.bi_sector;
354
355 if (WARN_ONCE(!io_end, "io_end is NULL: %pg: sector %Lu len %u err %d\n",
356 bio->bi_bdev,
357 (long long) bio->bi_iter.bi_sector,
358 (unsigned) bio_sectors(bio),
359 bio->bi_status)) {
360 ext4_finish_bio(bio);
361 bio_put(bio);
362 return;
363 }
364 bio->bi_end_io = NULL;
365
366 if (bio->bi_status) {
367 struct inode *inode = io_end->inode;
368
369 ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
370 "starting block %llu)",
371 bio->bi_status, inode->i_ino,
372 (unsigned long long)
373 bi_sector >> (inode->i_blkbits - 9));
374 io_end->flag |= EXT4_IO_END_FAILED;
375 mapping_set_error(inode->i_mapping,
376 blk_status_to_errno(bio->bi_status));
377 }
378
379 if (ext4_io_end_defer_completion(io_end)) {
380 /*
381 * Link bio into list hanging from io_end. We have to do it
382 * atomically as bio completions can be racing against each
383 * other.
384 */
385 bio->bi_private = xchg(&io_end->bio, bio);
386 ext4_put_io_end_defer(io_end);
387 } else {
388 /*
389 * Drop io_end reference early. Inode can get freed once
390 * we finish the bio.
391 */
392 ext4_put_io_end_defer(io_end);
393 ext4_finish_bio(bio);
394 bio_put(bio);
395 }
396 }
397
ext4_io_submit(struct ext4_io_submit * io)398 void ext4_io_submit(struct ext4_io_submit *io)
399 {
400 struct bio *bio = io->io_bio;
401
402 if (bio) {
403 if (io->io_wbc->sync_mode == WB_SYNC_ALL)
404 io->io_bio->bi_opf |= REQ_SYNC;
405 blk_crypto_submit_bio(io->io_bio);
406 }
407 io->io_bio = NULL;
408 }
409
ext4_io_submit_init(struct ext4_io_submit * io,struct writeback_control * wbc)410 void ext4_io_submit_init(struct ext4_io_submit *io,
411 struct writeback_control *wbc)
412 {
413 io->io_wbc = wbc;
414 io->io_bio = NULL;
415 io->io_end = NULL;
416 }
417
io_submit_init_bio(struct ext4_io_submit * io,struct buffer_head * bh)418 static void io_submit_init_bio(struct ext4_io_submit *io,
419 struct buffer_head *bh)
420 {
421 struct bio *bio;
422
423 /*
424 * bio_alloc will _always_ be able to allocate a bio if
425 * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
426 */
427 bio = bio_alloc(bh->b_bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOIO);
428 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
429 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
430 bio->bi_end_io = ext4_end_bio;
431 bio->bi_private = ext4_get_io_end(io->io_end);
432 io->io_bio = bio;
433 io->io_next_block = bh->b_blocknr;
434 wbc_init_bio(io->io_wbc, bio);
435 }
436
io_submit_add_bh(struct ext4_io_submit * io,struct inode * inode,struct folio * folio,struct folio * io_folio,struct buffer_head * bh)437 static void io_submit_add_bh(struct ext4_io_submit *io,
438 struct inode *inode,
439 struct folio *folio,
440 struct folio *io_folio,
441 struct buffer_head *bh)
442 {
443 if (io->io_bio && (bh->b_blocknr != io->io_next_block ||
444 !fscrypt_mergeable_bio_bh(io->io_bio, bh))) {
445 submit_and_retry:
446 ext4_io_submit(io);
447 }
448 if (io->io_bio == NULL) {
449 io_submit_init_bio(io, bh);
450 io->io_bio->bi_write_hint = inode->i_write_hint;
451 }
452 if (!bio_add_folio(io->io_bio, io_folio, bh->b_size, bh_offset(bh)))
453 goto submit_and_retry;
454 wbc_account_cgroup_owner(io->io_wbc, folio, bh->b_size);
455 io->io_next_block++;
456 }
457
ext4_bio_write_folio(struct ext4_io_submit * io,struct folio * folio,size_t len)458 int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *folio,
459 size_t len)
460 {
461 struct folio *io_folio = folio;
462 struct inode *inode = folio->mapping->host;
463 unsigned block_start;
464 struct buffer_head *bh, *head;
465 int ret = 0;
466 int nr_to_submit = 0;
467 struct writeback_control *wbc = io->io_wbc;
468 bool keep_towrite = false;
469
470 BUG_ON(!folio_test_locked(folio));
471 BUG_ON(folio_test_writeback(folio));
472
473 /*
474 * Comments copied from block_write_full_folio:
475 *
476 * The folio straddles i_size. It must be zeroed out on each and every
477 * writepage invocation because it may be mmapped. "A file is mapped
478 * in multiples of the page size. For a file that is not a multiple of
479 * the page size, the remaining memory is zeroed when mapped, and
480 * writes to that region are not written out to the file."
481 */
482 if (len < folio_size(folio))
483 folio_zero_segment(folio, len, folio_size(folio));
484 /*
485 * In the first loop we prepare and mark buffers to submit. We have to
486 * mark all buffers in the folio before submitting so that
487 * folio_end_writeback() cannot be called from ext4_end_bio() when IO
488 * on the first buffer finishes and we are still working on submitting
489 * the second buffer.
490 */
491 bh = head = folio_buffers(folio);
492 do {
493 block_start = bh_offset(bh);
494 if (block_start >= len) {
495 clear_buffer_dirty(bh);
496 set_buffer_uptodate(bh);
497 continue;
498 }
499 if (!buffer_dirty(bh) || buffer_delay(bh) ||
500 !buffer_mapped(bh) || buffer_unwritten(bh)) {
501 /* A hole? We can safely clear the dirty bit */
502 if (!buffer_mapped(bh))
503 clear_buffer_dirty(bh);
504 /*
505 * Keeping dirty some buffer we cannot write? Make sure
506 * to redirty the folio and keep TOWRITE tag so that
507 * racing WB_SYNC_ALL writeback does not skip the folio.
508 * This happens e.g. when doing writeout for
509 * transaction commit or when journalled data is not
510 * yet committed.
511 */
512 if (buffer_dirty(bh) ||
513 (buffer_jbd(bh) && buffer_jbddirty(bh))) {
514 if (!folio_test_dirty(folio))
515 folio_redirty_for_writepage(wbc, folio);
516 keep_towrite = true;
517 }
518 continue;
519 }
520 if (buffer_new(bh))
521 clear_buffer_new(bh);
522 set_buffer_async_write(bh);
523 clear_buffer_dirty(bh);
524 nr_to_submit++;
525 } while ((bh = bh->b_this_page) != head);
526
527 if (!nr_to_submit) {
528 /*
529 * We have nothing to submit. Just cycle the folio through
530 * writeback state to properly update xarray tags.
531 */
532 __folio_start_writeback(folio, keep_towrite);
533 folio_end_writeback(folio);
534 return 0;
535 }
536
537 bh = head = folio_buffers(folio);
538
539 /*
540 * If any blocks are being written to an encrypted file, encrypt them
541 * into a bounce page. For simplicity, just encrypt until the last
542 * block which might be needed. This may cause some unneeded blocks
543 * (e.g. holes) to be unnecessarily encrypted, but this is rare and
544 * can't happen in the common case of blocksize == PAGE_SIZE.
545 */
546 if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
547 gfp_t gfp_flags = GFP_NOFS;
548 unsigned int enc_bytes = round_up(len, i_blocksize(inode));
549 struct page *bounce_page;
550
551 /*
552 * Since bounce page allocation uses a mempool, we can only use
553 * a waiting mask (i.e. request guaranteed allocation) on the
554 * first page of the bio. Otherwise it can deadlock.
555 */
556 if (io->io_bio)
557 gfp_flags = GFP_NOWAIT;
558 retry_encrypt:
559 bounce_page = fscrypt_encrypt_pagecache_blocks(folio,
560 enc_bytes, 0, gfp_flags);
561 if (IS_ERR(bounce_page)) {
562 ret = PTR_ERR(bounce_page);
563 if (ret == -ENOMEM &&
564 (io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) {
565 gfp_t new_gfp_flags = GFP_NOFS;
566 if (io->io_bio)
567 ext4_io_submit(io);
568 else
569 new_gfp_flags |= __GFP_NOFAIL;
570 memalloc_retry_wait(gfp_flags);
571 gfp_flags = new_gfp_flags;
572 goto retry_encrypt;
573 }
574
575 printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
576 folio_redirty_for_writepage(wbc, folio);
577 do {
578 if (buffer_async_write(bh)) {
579 clear_buffer_async_write(bh);
580 set_buffer_dirty(bh);
581 }
582 bh = bh->b_this_page;
583 } while (bh != head);
584
585 return ret;
586 }
587 io_folio = page_folio(bounce_page);
588 }
589
590 __folio_start_writeback(folio, keep_towrite);
591
592 /* Now submit buffers to write */
593 do {
594 if (!buffer_async_write(bh))
595 continue;
596 io_submit_add_bh(io, inode, folio, io_folio, bh);
597 } while ((bh = bh->b_this_page) != head);
598
599 return 0;
600 }
601