xref: /linux/include/linux/iomap.h (revision 1885cdbfbb51ede3637166c895d0b8040c9899cc)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_IOMAP_H
3 #define LINUX_IOMAP_H 1
4 
5 #include <linux/atomic.h>
6 #include <linux/bitmap.h>
7 #include <linux/blk_types.h>
8 #include <linux/mm.h>
9 #include <linux/types.h>
10 #include <linux/mm_types.h>
11 #include <linux/blkdev.h>
12 #include <linux/pagevec.h>
13 
14 struct address_space;
15 struct fiemap_extent_info;
16 struct inode;
17 struct iomap_iter;
18 struct iomap_dio;
19 struct iomap_writepage_ctx;
20 struct iomap_read_folio_ctx;
21 struct iov_iter;
22 struct kiocb;
23 struct page;
24 struct vm_area_struct;
25 struct vm_fault;
26 
27 /*
28  * Types of block ranges for iomap mappings:
29  */
30 #define IOMAP_HOLE	0	/* no blocks allocated, need allocation */
31 #define IOMAP_DELALLOC	1	/* delayed allocation blocks */
32 #define IOMAP_MAPPED	2	/* blocks allocated at @addr */
33 #define IOMAP_UNWRITTEN	3	/* blocks allocated at @addr in unwritten state */
34 #define IOMAP_INLINE	4	/* data inline in the inode */
35 
36 /*
37  * Flags reported by the file system from iomap_begin:
38  *
39  * IOMAP_F_NEW indicates that the blocks have been newly allocated and need
40  * zeroing for areas that no data is copied to.
41  *
42  * IOMAP_F_DIRTY indicates the inode has uncommitted metadata needed to access
43  * written data and requires fdatasync to commit them to persistent storage.
44  * This needs to take into account metadata changes that *may* be made at IO
45  * completion, such as file size updates from direct IO.
46  *
47  * IOMAP_F_SHARED indicates that the blocks are shared, and will need to be
48  * unshared as part a write.
49  *
50  * IOMAP_F_MERGED indicates that the iomap contains the merge of multiple block
51  * mappings.
52  *
53  * IOMAP_F_BUFFER_HEAD indicates that the file system requires the use of
54  * buffer heads for this mapping.
55  *
56  * IOMAP_F_XATTR indicates that the iomap is for an extended attribute extent
57  * rather than a file data extent.
58  *
59  * IOMAP_F_BOUNDARY indicates that I/O and I/O completions for this iomap must
60  * never be merged with the mapping before it.
61  *
62  * IOMAP_F_ANON_WRITE indicates that (write) I/O does not have a target block
63  * assigned to it yet and the file system will do that in the bio submission
64  * handler, splitting the I/O as needed.
65  *
66  * IOMAP_F_ATOMIC_BIO indicates that (write) I/O will be issued as an atomic
67  * bio, i.e. set REQ_ATOMIC.
68  */
69 #define IOMAP_F_NEW		(1U << 0)
70 #define IOMAP_F_DIRTY		(1U << 1)
71 #define IOMAP_F_SHARED		(1U << 2)
72 #define IOMAP_F_MERGED		(1U << 3)
73 #ifdef CONFIG_BUFFER_HEAD
74 #define IOMAP_F_BUFFER_HEAD	(1U << 4)
75 #else
76 #define IOMAP_F_BUFFER_HEAD	0
77 #endif /* CONFIG_BUFFER_HEAD */
78 #define IOMAP_F_XATTR		(1U << 5)
79 #define IOMAP_F_BOUNDARY	(1U << 6)
80 #define IOMAP_F_ANON_WRITE	(1U << 7)
81 #define IOMAP_F_ATOMIC_BIO	(1U << 8)
82 
83 /*
84  * Flag reserved for file system specific usage
85  */
86 #define IOMAP_F_PRIVATE		(1U << 12)
87 
88 /*
89  * Flags set by the core iomap code during operations:
90  *
91  * IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size
92  * has changed as the result of this write operation.
93  *
94  * IOMAP_F_STALE indicates that the iomap is not valid any longer and the file
95  * range it covers needs to be remapped by the high level before the operation
96  * can proceed.
97  */
98 #define IOMAP_F_SIZE_CHANGED	(1U << 14)
99 #define IOMAP_F_STALE		(1U << 15)
100 
101 /*
102  * Magic value for addr:
103  */
104 #define IOMAP_NULL_ADDR -1ULL	/* addr is not valid */
105 
106 struct iomap {
107 	u64			addr; /* disk offset of mapping, bytes */
108 	loff_t			offset;	/* file offset of mapping, bytes */
109 	u64			length;	/* length of mapping, bytes */
110 	u16			type;	/* type of mapping */
111 	u16			flags;	/* flags for mapping */
112 	struct block_device	*bdev;	/* block device for I/O */
113 	struct dax_device	*dax_dev; /* dax_dev for dax operations */
114 	void			*inline_data;
115 	void			*private; /* filesystem private */
116 	u64			validity_cookie; /* used with .iomap_valid() */
117 };
118 
iomap_sector(const struct iomap * iomap,loff_t pos)119 static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos)
120 {
121 	if (iomap->flags & IOMAP_F_ANON_WRITE)
122 		return U64_MAX; /* invalid */
123 	return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
124 }
125 
126 /*
127  * Returns the inline data pointer for logical offset @pos.
128  */
iomap_inline_data(const struct iomap * iomap,loff_t pos)129 static inline void *iomap_inline_data(const struct iomap *iomap, loff_t pos)
130 {
131 	return iomap->inline_data + pos - iomap->offset;
132 }
133 
134 /*
135  * Check if the mapping's length is within the valid range for inline data.
136  * This is used to guard against accessing data beyond the page inline_data
137  * points at.
138  */
iomap_inline_data_valid(const struct iomap * iomap)139 static inline bool iomap_inline_data_valid(const struct iomap *iomap)
140 {
141 	return iomap->length <= PAGE_SIZE - offset_in_page(iomap->inline_data);
142 }
143 
144 /*
145  * When get_folio succeeds, put_folio will always be called to do any
146  * cleanup work necessary.  put_folio is responsible for unlocking and putting
147  * @folio.
148  */
149 struct iomap_write_ops {
150 	struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos,
151 			unsigned len);
152 	void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
153 			struct folio *folio);
154 
155 	/*
156 	 * Check that the cached iomap still maps correctly to the filesystem's
157 	 * internal extent map. FS internal extent maps can change while iomap
158 	 * is iterating a cached iomap, so this hook allows iomap to detect that
159 	 * the iomap needs to be refreshed during a long running write
160 	 * operation.
161 	 *
162 	 * The filesystem can store internal state (e.g. a sequence number) in
163 	 * iomap->validity_cookie when the iomap is first mapped to be able to
164 	 * detect changes between mapping time and whenever .iomap_valid() is
165 	 * called.
166 	 *
167 	 * This is called with the folio over the specified file position held
168 	 * locked by the iomap code.
169 	 */
170 	bool (*iomap_valid)(struct inode *inode, const struct iomap *iomap);
171 
172 	/*
173 	 * Optional if the filesystem wishes to provide a custom handler for
174 	 * reading in the contents of a folio, otherwise iomap will default to
175 	 * submitting a bio read request.
176 	 *
177 	 * The read must be done synchronously.
178 	 */
179 	int (*read_folio_range)(const struct iomap_iter *iter,
180 			struct folio *folio, loff_t pos, size_t len);
181 };
182 
183 /*
184  * Flags for iomap_begin / iomap_end.  No flag implies a read.
185  */
186 #define IOMAP_WRITE		(1 << 0) /* writing, must allocate blocks */
187 #define IOMAP_ZERO		(1 << 1) /* zeroing operation, may skip holes */
188 #define IOMAP_REPORT		(1 << 2) /* report extent status, e.g. FIEMAP */
189 #define IOMAP_FAULT		(1 << 3) /* mapping for page fault */
190 #define IOMAP_DIRECT		(1 << 4) /* direct I/O */
191 #define IOMAP_NOWAIT		(1 << 5) /* do not block */
192 #define IOMAP_OVERWRITE_ONLY	(1 << 6) /* only pure overwrites allowed */
193 #define IOMAP_UNSHARE		(1 << 7) /* unshare_file_range */
194 #ifdef CONFIG_FS_DAX
195 #define IOMAP_DAX		(1 << 8) /* DAX mapping */
196 #else
197 #define IOMAP_DAX		0
198 #endif /* CONFIG_FS_DAX */
199 #define IOMAP_ATOMIC		(1 << 9) /* torn-write protection */
200 #define IOMAP_DONTCACHE		(1 << 10)
201 
202 struct iomap_ops {
203 	/*
204 	 * Return the existing mapping at pos, or reserve space starting at
205 	 * pos for up to length, as long as we can do it as a single mapping.
206 	 * The actual length is returned in iomap->length.
207 	 */
208 	int (*iomap_begin)(struct inode *inode, loff_t pos, loff_t length,
209 			unsigned flags, struct iomap *iomap,
210 			struct iomap *srcmap);
211 
212 	/*
213 	 * Commit and/or unreserve space previous allocated using iomap_begin.
214 	 * Written indicates the length of the successful write operation which
215 	 * needs to be commited, while the rest needs to be unreserved.
216 	 * Written might be zero if no data was written.
217 	 */
218 	int (*iomap_end)(struct inode *inode, loff_t pos, loff_t length,
219 			ssize_t written, unsigned flags, struct iomap *iomap);
220 };
221 
222 /**
223  * struct iomap_iter - Iterate through a range of a file
224  * @inode: Set at the start of the iteration and should not change.
225  * @pos: The current file position we are operating on.  It is updated by
226  *	calls to iomap_iter().  Treat as read-only in the body.
227  * @len: The remaining length of the file segment we're operating on.
228  *	It is updated at the same time as @pos.
229  * @iter_start_pos: The original start pos for the current iomap. Used for
230  *	incremental iter advance.
231  * @status: Status of the most recent iteration. Zero on success or a negative
232  *	errno on error.
233  * @flags: Zero or more of the iomap_begin flags above.
234  * @iomap: Map describing the I/O iteration
235  * @srcmap: Source map for COW operations
236  */
237 struct iomap_iter {
238 	struct inode *inode;
239 	loff_t pos;
240 	u64 len;
241 	loff_t iter_start_pos;
242 	int status;
243 	unsigned flags;
244 	struct iomap iomap;
245 	struct iomap srcmap;
246 	struct folio_batch *fbatch;
247 	void *private;
248 };
249 
250 int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops);
251 int iomap_iter_advance(struct iomap_iter *iter, u64 count);
252 
253 /**
254  * iomap_length_trim - trimmed length of the current iomap iteration
255  * @iter: iteration structure
256  * @pos: File position to trim from.
257  * @len: Length of the mapping to trim to.
258  *
259  * Returns a trimmed length that the operation applies to for the current
260  * iteration.
261  */
iomap_length_trim(const struct iomap_iter * iter,loff_t pos,u64 len)262 static inline u64 iomap_length_trim(const struct iomap_iter *iter, loff_t pos,
263 		u64 len)
264 {
265 	u64 end = iter->iomap.offset + iter->iomap.length;
266 
267 	if (iter->srcmap.type != IOMAP_HOLE)
268 		end = min(end, iter->srcmap.offset + iter->srcmap.length);
269 	return min(len, end - pos);
270 }
271 
272 /**
273  * iomap_length - length of the current iomap iteration
274  * @iter: iteration structure
275  *
276  * Returns the length that the operation applies to for the current iteration.
277  */
iomap_length(const struct iomap_iter * iter)278 static inline u64 iomap_length(const struct iomap_iter *iter)
279 {
280 	return iomap_length_trim(iter, iter->pos, iter->len);
281 }
282 
283 /**
284  * iomap_iter_advance_full - advance by the full length of current map
285  */
iomap_iter_advance_full(struct iomap_iter * iter)286 static inline int iomap_iter_advance_full(struct iomap_iter *iter)
287 {
288 	return iomap_iter_advance(iter, iomap_length(iter));
289 }
290 
291 /**
292  * iomap_iter_srcmap - return the source map for the current iomap iteration
293  * @i: iteration structure
294  *
295  * Write operations on file systems with reflink support might require a
296  * source and a destination map.  This function retourns the source map
297  * for a given operation, which may or may no be identical to the destination
298  * map in &i->iomap.
299  */
iomap_iter_srcmap(const struct iomap_iter * i)300 static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i)
301 {
302 	if (i->srcmap.type != IOMAP_HOLE)
303 		return &i->srcmap;
304 	return &i->iomap;
305 }
306 
307 /*
308  * Return the file offset for the first unchanged block after a short write.
309  *
310  * If nothing was written, round @pos down to point at the first block in
311  * the range, else round up to include the partially written block.
312  */
iomap_last_written_block(struct inode * inode,loff_t pos,ssize_t written)313 static inline loff_t iomap_last_written_block(struct inode *inode, loff_t pos,
314 		ssize_t written)
315 {
316 	if (unlikely(!written))
317 		return round_down(pos, i_blocksize(inode));
318 	return round_up(pos + written, i_blocksize(inode));
319 }
320 
321 /*
322  * Check if the range needs to be unshared for a FALLOC_FL_UNSHARE_RANGE
323  * operation.
324  *
325  * Don't bother with blocks that are not shared to start with; or mappings that
326  * cannot be shared, such as inline data, delalloc reservations, holes or
327  * unwritten extents.
328  *
329  * Note that we use srcmap directly instead of iomap_iter_srcmap as unsharing
330  * requires providing a separate source map, and the presence of one is a good
331  * indicator that unsharing is needed, unlike IOMAP_F_SHARED which can be set
332  * for any data that goes into the COW fork for XFS.
333  */
iomap_want_unshare_iter(const struct iomap_iter * iter)334 static inline bool iomap_want_unshare_iter(const struct iomap_iter *iter)
335 {
336 	return (iter->iomap.flags & IOMAP_F_SHARED) &&
337 		iter->srcmap.type == IOMAP_MAPPED;
338 }
339 
340 ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
341 		const struct iomap_ops *ops,
342 		const struct iomap_write_ops *write_ops, void *private);
343 void iomap_read_folio(const struct iomap_ops *ops,
344 		struct iomap_read_folio_ctx *ctx);
345 void iomap_readahead(const struct iomap_ops *ops,
346 		struct iomap_read_folio_ctx *ctx);
347 bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
348 struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);
349 bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
350 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
351 bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
352 int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
353 		const struct iomap_ops *ops,
354 		const struct iomap_write_ops *write_ops);
355 loff_t iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t offset,
356 		loff_t length);
357 int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
358 		bool *did_zero, const struct iomap_ops *ops,
359 		const struct iomap_write_ops *write_ops, void *private);
360 int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
361 		const struct iomap_ops *ops,
362 		const struct iomap_write_ops *write_ops, void *private);
363 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops,
364 		void *private);
365 typedef void (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length,
366 		struct iomap *iomap);
367 void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
368 		loff_t end_byte, unsigned flags, struct iomap *iomap,
369 		iomap_punch_t punch);
370 
371 int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
372 		u64 start, u64 len, const struct iomap_ops *ops);
373 loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
374 		const struct iomap_ops *ops);
375 loff_t iomap_seek_data(struct inode *inode, loff_t offset,
376 		const struct iomap_ops *ops);
377 sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
378 		const struct iomap_ops *ops);
379 
380 /*
381  * Flags for iomap_ioend->io_flags.
382  */
383 /* shared COW extent */
384 #define IOMAP_IOEND_SHARED		(1U << 0)
385 /* unwritten extent */
386 #define IOMAP_IOEND_UNWRITTEN		(1U << 1)
387 /* don't merge into previous ioend */
388 #define IOMAP_IOEND_BOUNDARY		(1U << 2)
389 /* is direct I/O */
390 #define IOMAP_IOEND_DIRECT		(1U << 3)
391 /* is DONTCACHE I/O */
392 #define IOMAP_IOEND_DONTCACHE		(1U << 4)
393 
394 /*
395  * Flags that if set on either ioend prevent the merge of two ioends.
396  * (IOMAP_IOEND_BOUNDARY also prevents merges, but only one-way)
397  */
398 #define IOMAP_IOEND_NOMERGE_FLAGS \
399 	(IOMAP_IOEND_SHARED | IOMAP_IOEND_UNWRITTEN | IOMAP_IOEND_DIRECT | \
400 	 IOMAP_IOEND_DONTCACHE)
401 
402 /*
403  * Structure for writeback I/O completions.
404  *
405  * File systems can split a bio generated by iomap.  In that case the parent
406  * ioend it was split from is recorded in ioend->io_parent.
407  */
408 struct iomap_ioend {
409 	struct list_head	io_list;	/* next ioend in chain */
410 	u16			io_flags;	/* IOMAP_IOEND_* */
411 	struct inode		*io_inode;	/* file being written to */
412 	size_t			io_size;	/* size of the extent */
413 	atomic_t		io_remaining;	/* completetion defer count */
414 	int			io_error;	/* stashed away status */
415 	struct iomap_ioend	*io_parent;	/* parent for completions */
416 	loff_t			io_offset;	/* offset in the file */
417 	sector_t		io_sector;	/* start sector of ioend */
418 	void			*io_private;	/* file system private data */
419 	struct bio		io_bio;		/* MUST BE LAST! */
420 };
421 
iomap_ioend_from_bio(struct bio * bio)422 static inline struct iomap_ioend *iomap_ioend_from_bio(struct bio *bio)
423 {
424 	return container_of(bio, struct iomap_ioend, io_bio);
425 }
426 
427 struct iomap_writeback_ops {
428 	/*
429 	 * Performs writeback on the passed in range
430 	 *
431 	 * Can map arbitrarily large regions, but we need to call into it at
432 	 * least once per folio to allow the file systems to synchronize with
433 	 * the write path that could be invalidating mappings.
434 	 *
435 	 * An existing mapping from a previous call to this method can be reused
436 	 * by the file system if it is still valid.
437 	 *
438 	 * If this succeeds, iomap_finish_folio_write() must be called once
439 	 * writeback completes for the range, regardless of whether the
440 	 * writeback succeeded or failed.
441 	 *
442 	 * Returns the number of bytes processed or a negative errno.
443 	 */
444 	ssize_t (*writeback_range)(struct iomap_writepage_ctx *wpc,
445 			struct folio *folio, u64 pos, unsigned int len,
446 			u64 end_pos);
447 
448 	/*
449 	 * Submit a writeback context previously build up by ->writeback_range.
450 	 *
451 	 * Returns 0 if the context was successfully submitted, or a negative
452 	 * error code if not.  If @error is non-zero a failure occurred, and
453 	 * the writeback context should be completed with an error.
454 	 */
455 	int (*writeback_submit)(struct iomap_writepage_ctx *wpc, int error);
456 };
457 
458 struct iomap_writepage_ctx {
459 	struct iomap		iomap;
460 	struct inode		*inode;
461 	struct writeback_control *wbc;
462 	const struct iomap_writeback_ops *ops;
463 	u32			nr_folios;	/* folios added to the ioend */
464 	void			*wb_ctx;	/* pending writeback context */
465 };
466 
467 struct iomap_ioend *iomap_init_ioend(struct inode *inode, struct bio *bio,
468 		loff_t file_offset, u16 ioend_flags);
469 struct iomap_ioend *iomap_split_ioend(struct iomap_ioend *ioend,
470 		unsigned int max_len, bool is_append);
471 void iomap_finish_ioends(struct iomap_ioend *ioend, int error);
472 void iomap_ioend_try_merge(struct iomap_ioend *ioend,
473 		struct list_head *more_ioends);
474 void iomap_sort_ioends(struct list_head *ioend_list);
475 ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
476 		loff_t pos, loff_t end_pos, unsigned int dirty_len);
477 int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error);
478 
479 void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len,
480 		int error);
481 void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
482 		size_t len);
483 
484 int iomap_writeback_folio(struct iomap_writepage_ctx *wpc, struct folio *folio);
485 int iomap_writepages(struct iomap_writepage_ctx *wpc);
486 
487 struct iomap_read_folio_ctx {
488 	const struct iomap_read_ops *ops;
489 	struct folio		*cur_folio;
490 	struct readahead_control *rac;
491 	void			*read_ctx;
492 };
493 
494 struct iomap_read_ops {
495 	/*
496 	 * Read in a folio range.
497 	 *
498 	 * If this succeeds, iomap_finish_folio_read() must be called after the
499 	 * range is read in, regardless of whether the read succeeded or failed.
500 	 *
501 	 * Returns 0 on success or a negative error on failure.
502 	 */
503 	int (*read_folio_range)(const struct iomap_iter *iter,
504 			struct iomap_read_folio_ctx *ctx, size_t len);
505 
506 	/*
507 	 * Submit any pending read requests.
508 	 *
509 	 * This is optional.
510 	 */
511 	void (*submit_read)(struct iomap_read_folio_ctx *ctx);
512 };
513 
514 /*
515  * Flags for direct I/O ->end_io:
516  */
517 #define IOMAP_DIO_UNWRITTEN	(1 << 0)	/* covers unwritten extent(s) */
518 #define IOMAP_DIO_COW		(1 << 1)	/* covers COW extent(s) */
519 
520 struct iomap_dio_ops {
521 	int (*end_io)(struct kiocb *iocb, ssize_t size, int error,
522 		      unsigned flags);
523 	void (*submit_io)(const struct iomap_iter *iter, struct bio *bio,
524 		          loff_t file_offset);
525 
526 	/*
527 	 * Filesystems wishing to attach private information to a direct io bio
528 	 * must provide a ->submit_io method that attaches the additional
529 	 * information to the bio and changes the ->bi_end_io callback to a
530 	 * custom function.  This function should, at a minimum, perform any
531 	 * relevant post-processing of the bio and end with a call to
532 	 * iomap_dio_bio_end_io.
533 	 */
534 	struct bio_set *bio_set;
535 };
536 
537 /*
538  * Wait for the I/O to complete in iomap_dio_rw even if the kiocb is not
539  * synchronous.
540  */
541 #define IOMAP_DIO_FORCE_WAIT	(1 << 0)
542 
543 /*
544  * Do not allocate blocks or zero partial blocks, but instead fall back to
545  * the caller by returning -EAGAIN.  Used to optimize direct I/O writes that
546  * are not aligned to the file system block size.
547   */
548 #define IOMAP_DIO_OVERWRITE_ONLY	(1 << 1)
549 
550 /*
551  * When a page fault occurs, return a partial synchronous result and allow
552  * the caller to retry the rest of the operation after dealing with the page
553  * fault.
554  */
555 #define IOMAP_DIO_PARTIAL		(1 << 2)
556 
557 /*
558  * Ensure each bio is aligned to fs block size.
559  *
560  * For filesystems which need to calculate/verify the checksum of each fs
561  * block. Otherwise they may not be able to handle unaligned bios.
562  */
563 #define IOMAP_DIO_FSBLOCK_ALIGNED	(1 << 3)
564 
565 ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
566 		const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
567 		unsigned int dio_flags, void *private, size_t done_before);
568 struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
569 		const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
570 		unsigned int dio_flags, void *private, size_t done_before);
571 ssize_t iomap_dio_complete(struct iomap_dio *dio);
572 void iomap_dio_bio_end_io(struct bio *bio);
573 
574 #ifdef CONFIG_SWAP
575 struct file;
576 struct swap_info_struct;
577 
578 int iomap_swapfile_activate(struct swap_info_struct *sis,
579 		struct file *swap_file, sector_t *pagespan,
580 		const struct iomap_ops *ops);
581 #else
582 # define iomap_swapfile_activate(sis, swapfile, pagespan, ops)	(-EIO)
583 #endif /* CONFIG_SWAP */
584 
585 extern struct bio_set iomap_ioend_bioset;
586 
587 #ifdef CONFIG_BLOCK
588 extern const struct iomap_read_ops iomap_bio_read_ops;
589 
iomap_bio_read_folio(struct folio * folio,const struct iomap_ops * ops)590 static inline void iomap_bio_read_folio(struct folio *folio,
591 		const struct iomap_ops *ops)
592 {
593 	struct iomap_read_folio_ctx ctx = {
594 		.ops		= &iomap_bio_read_ops,
595 		.cur_folio	= folio,
596 	};
597 
598 	iomap_read_folio(ops, &ctx);
599 }
600 
iomap_bio_readahead(struct readahead_control * rac,const struct iomap_ops * ops)601 static inline void iomap_bio_readahead(struct readahead_control *rac,
602 		const struct iomap_ops *ops)
603 {
604 	struct iomap_read_folio_ctx ctx = {
605 		.ops		= &iomap_bio_read_ops,
606 		.rac		= rac,
607 	};
608 
609 	iomap_readahead(ops, &ctx);
610 }
611 #endif /* CONFIG_BLOCK */
612 
613 #endif /* LINUX_IOMAP_H */
614