1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2016-2025 Christoph Hellwig.
4 */
5 #include <linux/bio-integrity.h>
6 #include <linux/iomap.h>
7 #include <linux/list_sort.h>
8 #include <linux/pagemap.h>
9 #include <linux/writeback.h>
10 #include <linux/fserror.h>
11 #include "internal.h"
12 #include "trace.h"
13
14 struct bio_set iomap_ioend_bioset;
15 EXPORT_SYMBOL_GPL(iomap_ioend_bioset);
16
iomap_init_ioend(struct inode * inode,struct bio * bio,loff_t file_offset,u16 ioend_flags)17 struct iomap_ioend *iomap_init_ioend(struct inode *inode,
18 struct bio *bio, loff_t file_offset, u16 ioend_flags)
19 {
20 struct iomap_ioend *ioend = iomap_ioend_from_bio(bio);
21
22 atomic_set(&ioend->io_remaining, 1);
23 ioend->io_error = 0;
24 ioend->io_parent = NULL;
25 INIT_LIST_HEAD(&ioend->io_list);
26 ioend->io_flags = ioend_flags;
27 ioend->io_inode = inode;
28 ioend->io_offset = file_offset;
29 ioend->io_size = bio->bi_iter.bi_size;
30 ioend->io_sector = bio->bi_iter.bi_sector;
31 ioend->io_private = NULL;
32 return ioend;
33 }
34 EXPORT_SYMBOL_GPL(iomap_init_ioend);
35
36 /*
37 * We're now finished for good with this ioend structure. Update the folio
38 * state, release holds on bios, and finally free up memory. Do not use the
39 * ioend after this.
40 */
iomap_finish_ioend_buffered_write(struct iomap_ioend * ioend)41 static u32 iomap_finish_ioend_buffered_write(struct iomap_ioend *ioend)
42 {
43 struct inode *inode = ioend->io_inode;
44 struct bio *bio = &ioend->io_bio;
45 struct folio_iter fi;
46 u32 folio_count = 0;
47
48 if (ioend->io_error) {
49 mapping_set_error(inode->i_mapping, ioend->io_error);
50 if (!bio_flagged(bio, BIO_QUIET)) {
51 pr_err_ratelimited(
52 "%s: writeback error on inode %llu, offset %lld, sector %llu",
53 inode->i_sb->s_id, inode->i_ino,
54 ioend->io_offset, ioend->io_sector);
55 }
56 }
57
58 /* walk all folios in bio, ending page IO on them */
59 bio_for_each_folio_all(fi, bio) {
60 if (ioend->io_error)
61 fserror_report_io(inode, FSERR_BUFFERED_WRITE,
62 folio_pos(fi.folio) + fi.offset,
63 fi.length, ioend->io_error,
64 GFP_ATOMIC);
65 iomap_finish_folio_write(inode, fi.folio, fi.length);
66 folio_count++;
67 }
68
69 if (bio_integrity(bio))
70 fs_bio_integrity_free(bio);
71 bio_put(bio); /* frees the ioend */
72 return folio_count;
73 }
74
75 static DEFINE_SPINLOCK(failed_ioend_lock);
76 static LIST_HEAD(failed_ioend_list);
77
78 static void
iomap_fail_ioends(struct work_struct * work)79 iomap_fail_ioends(
80 struct work_struct *work)
81 {
82 struct iomap_ioend *ioend;
83 struct list_head tmp;
84 unsigned long flags;
85
86 spin_lock_irqsave(&failed_ioend_lock, flags);
87 list_replace_init(&failed_ioend_list, &tmp);
88 spin_unlock_irqrestore(&failed_ioend_lock, flags);
89
90 while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend,
91 io_list))) {
92 list_del_init(&ioend->io_list);
93 iomap_finish_ioend_buffered_write(ioend);
94 cond_resched();
95 }
96 }
97
98 static DECLARE_WORK(failed_ioend_work, iomap_fail_ioends);
99
iomap_fail_ioend_buffered(struct iomap_ioend * ioend)100 static void iomap_fail_ioend_buffered(struct iomap_ioend *ioend)
101 {
102 unsigned long flags;
103
104 /*
105 * Bounce I/O errors to a workqueue to avoid nested i_lock acquisitions
106 * in the fserror code. The caller no longer owns the ioend reference
107 * after the spinlock drops.
108 */
109 spin_lock_irqsave(&failed_ioend_lock, flags);
110 if (list_empty(&failed_ioend_list))
111 WARN_ON_ONCE(!schedule_work(&failed_ioend_work));
112 list_add_tail(&ioend->io_list, &failed_ioend_list);
113 spin_unlock_irqrestore(&failed_ioend_lock, flags);
114 }
115
ioend_writeback_end_bio(struct bio * bio)116 static void ioend_writeback_end_bio(struct bio *bio)
117 {
118 struct iomap_ioend *ioend = iomap_ioend_from_bio(bio);
119
120 ioend->io_error = blk_status_to_errno(bio->bi_status);
121 if (ioend->io_error) {
122 iomap_fail_ioend_buffered(ioend);
123 return;
124 }
125
126 iomap_finish_ioend_buffered_write(ioend);
127 }
128
129 /*
130 * We cannot cancel the ioend directly in case of an error, so call the bio end
131 * I/O handler with the error status here to run the normal I/O completion
132 * handler.
133 */
iomap_ioend_writeback_submit(struct iomap_writepage_ctx * wpc,int error)134 int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error)
135 {
136 struct iomap_ioend *ioend = wpc->wb_ctx;
137
138 if (!ioend->io_bio.bi_end_io)
139 ioend->io_bio.bi_end_io = ioend_writeback_end_bio;
140
141 if (WARN_ON_ONCE(wpc->iomap.flags & IOMAP_F_ANON_WRITE))
142 error = -EIO;
143
144 if (error) {
145 ioend->io_bio.bi_status = errno_to_blk_status(error);
146 bio_endio(&ioend->io_bio);
147 return error;
148 }
149
150 if (wpc->iomap.flags & IOMAP_F_INTEGRITY)
151 fs_bio_integrity_generate(&ioend->io_bio);
152 submit_bio(&ioend->io_bio);
153 return 0;
154 }
155 EXPORT_SYMBOL_GPL(iomap_ioend_writeback_submit);
156
iomap_alloc_ioend(struct iomap_writepage_ctx * wpc,loff_t pos,u16 ioend_flags)157 static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc,
158 loff_t pos, u16 ioend_flags)
159 {
160 struct bio *bio;
161
162 bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
163 REQ_OP_WRITE | wbc_to_write_flags(wpc->wbc),
164 GFP_NOFS, &iomap_ioend_bioset);
165 bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos);
166 bio->bi_write_hint = wpc->inode->i_write_hint;
167 wbc_init_bio(wpc->wbc, bio);
168 wpc->nr_folios = 0;
169 return iomap_init_ioend(wpc->inode, bio, pos, ioend_flags);
170 }
171
iomap_can_add_to_ioend(struct iomap_writepage_ctx * wpc,loff_t pos,unsigned int map_len,u16 ioend_flags)172 static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos,
173 unsigned int map_len, u16 ioend_flags)
174 {
175 struct iomap_ioend *ioend = wpc->wb_ctx;
176
177 if (ioend->io_bio.bi_iter.bi_size >
178 iomap_max_bio_size(&wpc->iomap) - map_len)
179 return false;
180 if (ioend_flags & IOMAP_IOEND_BOUNDARY)
181 return false;
182 if ((ioend_flags & IOMAP_IOEND_NOMERGE_FLAGS) !=
183 (ioend->io_flags & IOMAP_IOEND_NOMERGE_FLAGS))
184 return false;
185 if (pos != ioend->io_offset + ioend->io_size)
186 return false;
187 if (!(wpc->iomap.flags & IOMAP_F_ANON_WRITE) &&
188 iomap_sector(&wpc->iomap, pos) != bio_end_sector(&ioend->io_bio))
189 return false;
190 /*
191 * Limit ioend bio chain lengths to minimise IO completion latency. This
192 * also prevents long tight loops ending page writeback on all the
193 * folios in the ioend.
194 */
195 if (wpc->nr_folios >= IOEND_BATCH_SIZE)
196 return false;
197 return true;
198 }
199
200 /*
201 * Test to see if we have an existing ioend structure that we could append to
202 * first; otherwise finish off the current ioend and start another.
203 *
204 * If a new ioend is created and cached, the old ioend is submitted to the block
205 * layer instantly. Batching optimisations are provided by higher level block
206 * plugging.
207 *
208 * At the end of a writeback pass, there will be a cached ioend remaining on the
209 * writepage context that the caller will need to submit.
210 */
iomap_add_to_ioend(struct iomap_writepage_ctx * wpc,struct folio * folio,loff_t pos,loff_t end_pos,unsigned int dirty_len)211 ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
212 loff_t pos, loff_t end_pos, unsigned int dirty_len)
213 {
214 struct iomap_ioend *ioend = wpc->wb_ctx;
215 size_t poff = offset_in_folio(folio, pos);
216 unsigned int ioend_flags = 0;
217 unsigned int map_len = min_t(u64, dirty_len,
218 wpc->iomap.offset + wpc->iomap.length - pos);
219 int error;
220
221 trace_iomap_add_to_ioend(wpc->inode, pos, dirty_len, &wpc->iomap);
222
223 WARN_ON_ONCE(!folio->private && map_len < dirty_len);
224
225 switch (wpc->iomap.type) {
226 case IOMAP_UNWRITTEN:
227 ioend_flags |= IOMAP_IOEND_UNWRITTEN;
228 break;
229 case IOMAP_MAPPED:
230 break;
231 case IOMAP_HOLE:
232 return map_len;
233 default:
234 WARN_ON_ONCE(1);
235 return -EIO;
236 }
237
238 if (wpc->iomap.flags & IOMAP_F_SHARED)
239 ioend_flags |= IOMAP_IOEND_SHARED;
240 if (folio_test_dropbehind(folio))
241 ioend_flags |= IOMAP_IOEND_DONTCACHE;
242 if (pos == wpc->iomap.offset && (wpc->iomap.flags & IOMAP_F_BOUNDARY))
243 ioend_flags |= IOMAP_IOEND_BOUNDARY;
244
245 if (!ioend || !iomap_can_add_to_ioend(wpc, pos, map_len, ioend_flags)) {
246 new_ioend:
247 if (ioend) {
248 error = wpc->ops->writeback_submit(wpc, 0);
249 if (error)
250 return error;
251 }
252 wpc->wb_ctx = ioend = iomap_alloc_ioend(wpc, pos, ioend_flags);
253 }
254
255 if (!bio_add_folio(&ioend->io_bio, folio, map_len, poff))
256 goto new_ioend;
257
258 /*
259 * Clamp io_offset and io_size to the incore EOF so that ondisk
260 * file size updates in the ioend completion are byte-accurate.
261 * This avoids recovering files with zeroed tail regions when
262 * writeback races with appending writes:
263 *
264 * Thread 1: Thread 2:
265 * ------------ -----------
266 * write [A, A+B]
267 * update inode size to A+B
268 * submit I/O [A, A+BS]
269 * write [A+B, A+B+C]
270 * update inode size to A+B+C
271 * <I/O completes, updates disk size to min(A+B+C, A+BS)>
272 * <power failure>
273 *
274 * After reboot:
275 * 1) with A+B+C < A+BS, the file has zero padding in range
276 * [A+B, A+B+C]
277 *
278 * |< Block Size (BS) >|
279 * |DDDDDDDDDDDD0000000000000|
280 * ^ ^ ^
281 * A A+B A+B+C
282 * (EOF)
283 *
284 * 2) with A+B+C > A+BS, the file has zero padding in range
285 * [A+B, A+BS]
286 *
287 * |< Block Size (BS) >|< Block Size (BS) >|
288 * |DDDDDDDDDDDD0000000000000|00000000000000000000000000|
289 * ^ ^ ^ ^
290 * A A+B A+BS A+B+C
291 * (EOF)
292 *
293 * D = Valid Data
294 * 0 = Zero Padding
295 *
296 * Note that this defeats the ability to chain the ioends of
297 * appending writes.
298 */
299 ioend->io_size += map_len;
300 if (ioend->io_offset + ioend->io_size > end_pos)
301 ioend->io_size = end_pos - ioend->io_offset;
302
303 wbc_account_cgroup_owner(wpc->wbc, folio, map_len);
304 return map_len;
305 }
306 EXPORT_SYMBOL_GPL(iomap_add_to_ioend);
307
iomap_finish_ioend(struct iomap_ioend * ioend,int error)308 static u32 iomap_finish_ioend(struct iomap_ioend *ioend, int error)
309 {
310 if (ioend->io_parent) {
311 struct bio *bio = &ioend->io_bio;
312
313 ioend = ioend->io_parent;
314 bio_put(bio);
315 }
316
317 if (error)
318 cmpxchg(&ioend->io_error, 0, error);
319
320 if (!atomic_dec_and_test(&ioend->io_remaining))
321 return 0;
322
323 if (!ioend->io_error &&
324 bio_integrity(&ioend->io_bio) &&
325 bio_op(&ioend->io_bio) == REQ_OP_READ) {
326 ioend->io_error = fs_bio_integrity_verify(&ioend->io_bio,
327 ioend->io_sector, ioend->io_size);
328 }
329
330 if (ioend->io_flags & IOMAP_IOEND_DIRECT)
331 return iomap_finish_ioend_direct(ioend);
332 if (bio_op(&ioend->io_bio) == REQ_OP_READ)
333 return iomap_finish_ioend_buffered_read(ioend);
334 return iomap_finish_ioend_buffered_write(ioend);
335 }
336
337 /*
338 * Ioend completion routine for merged bios. This can only be called from task
339 * contexts as merged ioends can be of unbound length. Hence we have to break up
340 * the writeback completions into manageable chunks to avoid long scheduler
341 * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
342 * good batch processing throughput without creating adverse scheduler latency
343 * conditions.
344 */
iomap_finish_ioends(struct iomap_ioend * ioend,int error)345 void iomap_finish_ioends(struct iomap_ioend *ioend, int error)
346 {
347 struct list_head tmp;
348 u32 completions;
349
350 might_sleep();
351
352 list_replace_init(&ioend->io_list, &tmp);
353 completions = iomap_finish_ioend(ioend, error);
354
355 while (!list_empty(&tmp)) {
356 if (completions > IOEND_BATCH_SIZE * 8) {
357 cond_resched();
358 completions = 0;
359 }
360 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
361 list_del_init(&ioend->io_list);
362 completions += iomap_finish_ioend(ioend, error);
363 }
364 }
365 EXPORT_SYMBOL_GPL(iomap_finish_ioends);
366
367 /*
368 * We can merge two adjacent ioends if they have the same set of work to do.
369 */
iomap_ioend_can_merge(struct iomap_ioend * ioend,struct iomap_ioend * next)370 static bool iomap_ioend_can_merge(struct iomap_ioend *ioend,
371 struct iomap_ioend *next)
372 {
373 /*
374 * There is no point in merging reads as there is no completion
375 * processing that can be easily batched up for them.
376 */
377 if (bio_op(&ioend->io_bio) == REQ_OP_READ ||
378 bio_op(&next->io_bio) == REQ_OP_READ)
379 return false;
380
381 if (ioend->io_bio.bi_status != next->io_bio.bi_status)
382 return false;
383 if (next->io_flags & IOMAP_IOEND_BOUNDARY)
384 return false;
385 if ((ioend->io_flags & IOMAP_IOEND_NOMERGE_FLAGS) !=
386 (next->io_flags & IOMAP_IOEND_NOMERGE_FLAGS))
387 return false;
388 if (ioend->io_offset + ioend->io_size != next->io_offset)
389 return false;
390 /*
391 * Do not merge physically discontiguous ioends. The filesystem
392 * completion functions will have to iterate the physical
393 * discontiguities even if we merge the ioends at a logical level, so
394 * we don't gain anything by merging physical discontiguities here.
395 *
396 * We cannot use bio->bi_iter.bi_sector here as it is modified during
397 * submission so does not point to the start sector of the bio at
398 * completion.
399 */
400 if (ioend->io_sector + (ioend->io_size >> SECTOR_SHIFT) !=
401 next->io_sector)
402 return false;
403 return true;
404 }
405
iomap_ioend_try_merge(struct iomap_ioend * ioend,struct list_head * more_ioends)406 void iomap_ioend_try_merge(struct iomap_ioend *ioend,
407 struct list_head *more_ioends)
408 {
409 struct iomap_ioend *next;
410
411 INIT_LIST_HEAD(&ioend->io_list);
412
413 while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
414 io_list))) {
415 if (!iomap_ioend_can_merge(ioend, next))
416 break;
417 list_move_tail(&next->io_list, &ioend->io_list);
418 ioend->io_size += next->io_size;
419 }
420 }
421 EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
422
iomap_ioend_compare(void * priv,const struct list_head * a,const struct list_head * b)423 static int iomap_ioend_compare(void *priv, const struct list_head *a,
424 const struct list_head *b)
425 {
426 struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
427 struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
428
429 if (ia->io_offset < ib->io_offset)
430 return -1;
431 if (ia->io_offset > ib->io_offset)
432 return 1;
433 return 0;
434 }
435
iomap_sort_ioends(struct list_head * ioend_list)436 void iomap_sort_ioends(struct list_head *ioend_list)
437 {
438 list_sort(NULL, ioend_list, iomap_ioend_compare);
439 }
440 EXPORT_SYMBOL_GPL(iomap_sort_ioends);
441
442 /*
443 * Split up to the first @max_len bytes from @ioend if the ioend covers more
444 * than @max_len bytes.
445 *
446 * If @is_append is set, the split will be based on the hardware limits for
447 * REQ_OP_ZONE_APPEND commands and can be less than @max_len if the hardware
448 * limits don't allow the entire @max_len length.
449 *
450 * The bio embedded into @ioend must be a REQ_OP_WRITE because the block layer
451 * does not allow splitting REQ_OP_ZONE_APPEND bios. The file systems has to
452 * switch the operation after this call, but before submitting the bio.
453 */
iomap_split_ioend(struct iomap_ioend * ioend,unsigned int max_len,bool is_append)454 struct iomap_ioend *iomap_split_ioend(struct iomap_ioend *ioend,
455 unsigned int max_len, bool is_append)
456 {
457 struct bio *bio = &ioend->io_bio;
458 struct iomap_ioend *split_ioend;
459 unsigned int nr_segs;
460 int sector_offset;
461 struct bio *split;
462
463 if (is_append) {
464 struct queue_limits *lim = bdev_limits(bio->bi_bdev);
465
466 max_len = min(max_len,
467 lim->max_zone_append_sectors << SECTOR_SHIFT);
468
469 sector_offset = bio_split_rw_at(bio, lim, &nr_segs, max_len);
470 if (unlikely(sector_offset < 0))
471 return ERR_PTR(sector_offset);
472 if (!sector_offset)
473 return NULL;
474 } else {
475 if (bio->bi_iter.bi_size <= max_len)
476 return NULL;
477 sector_offset = max_len >> SECTOR_SHIFT;
478 }
479
480 /* ensure the split ioend is still block size aligned */
481 sector_offset = ALIGN_DOWN(sector_offset << SECTOR_SHIFT,
482 i_blocksize(ioend->io_inode)) >> SECTOR_SHIFT;
483
484 split = bio_split(bio, sector_offset, GFP_NOFS, &iomap_ioend_bioset);
485 if (IS_ERR(split))
486 return ERR_CAST(split);
487 split->bi_private = bio->bi_private;
488 split->bi_end_io = bio->bi_end_io;
489
490 split_ioend = iomap_init_ioend(ioend->io_inode, split, ioend->io_offset,
491 ioend->io_flags);
492 split_ioend->io_parent = ioend;
493
494 atomic_inc(&ioend->io_remaining);
495 ioend->io_offset += split_ioend->io_size;
496 ioend->io_size -= split_ioend->io_size;
497
498 split_ioend->io_sector = ioend->io_sector;
499 if (!is_append)
500 ioend->io_sector += (split_ioend->io_size >> SECTOR_SHIFT);
501 return split_ioend;
502 }
503 EXPORT_SYMBOL_GPL(iomap_split_ioend);
504
iomap_ioend_init(void)505 static int __init iomap_ioend_init(void)
506 {
507 return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
508 offsetof(struct iomap_ioend, io_bio),
509 BIOSET_NEED_BVECS);
510 }
511 fs_initcall(iomap_ioend_init);
512