xref: /linux/fs/xfs/xfs_buf.h (revision b1a54551dd9ed5ef1763b97b35a0999ca002b95c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #ifndef __XFS_BUF_H__
7 #define __XFS_BUF_H__
8 
9 #include <linux/list.h>
10 #include <linux/types.h>
11 #include <linux/spinlock.h>
12 #include <linux/mm.h>
13 #include <linux/fs.h>
14 #include <linux/dax.h>
15 #include <linux/uio.h>
16 #include <linux/list_lru.h>
17 
18 extern struct kmem_cache *xfs_buf_cache;
19 
20 /*
21  *	Base types
22  */
23 struct xfs_buf;
24 
25 #define XFS_BUF_DADDR_NULL	((xfs_daddr_t) (-1LL))
26 
27 #define XBF_READ	 (1u << 0) /* buffer intended for reading from device */
28 #define XBF_WRITE	 (1u << 1) /* buffer intended for writing to device */
29 #define XBF_READ_AHEAD	 (1u << 2) /* asynchronous read-ahead */
30 #define XBF_NO_IOACCT	 (1u << 3) /* bypass I/O accounting (non-LRU bufs) */
31 #define XBF_ASYNC	 (1u << 4) /* initiator will not wait for completion */
32 #define XBF_DONE	 (1u << 5) /* all pages in the buffer uptodate */
33 #define XBF_STALE	 (1u << 6) /* buffer has been staled, do not find it */
34 #define XBF_WRITE_FAIL	 (1u << 7) /* async writes have failed on this buffer */
35 
36 /* buffer type flags for write callbacks */
37 #define _XBF_INODES	 (1u << 16)/* inode buffer */
38 #define _XBF_DQUOTS	 (1u << 17)/* dquot buffer */
39 #define _XBF_LOGRECOVERY (1u << 18)/* log recovery buffer */
40 
41 /* flags used only internally */
42 #define _XBF_PAGES	 (1u << 20)/* backed by refcounted pages */
43 #define _XBF_KMEM	 (1u << 21)/* backed by heap memory */
44 #define _XBF_DELWRI_Q	 (1u << 22)/* buffer on a delwri queue */
45 
46 /* flags used only as arguments to access routines */
47 /*
48  * Online fsck is scanning the buffer cache for live buffers.  Do not warn
49  * about length mismatches during lookups and do not return stale buffers.
50  */
51 #define XBF_LIVESCAN	 (1u << 28)
52 #define XBF_INCORE	 (1u << 29)/* lookup only, return if found in cache */
53 #define XBF_TRYLOCK	 (1u << 30)/* lock requested, but do not wait */
54 #define XBF_UNMAPPED	 (1u << 31)/* do not map the buffer */
55 
56 
57 typedef unsigned int xfs_buf_flags_t;
58 
59 #define XFS_BUF_FLAGS \
60 	{ XBF_READ,		"READ" }, \
61 	{ XBF_WRITE,		"WRITE" }, \
62 	{ XBF_READ_AHEAD,	"READ_AHEAD" }, \
63 	{ XBF_NO_IOACCT,	"NO_IOACCT" }, \
64 	{ XBF_ASYNC,		"ASYNC" }, \
65 	{ XBF_DONE,		"DONE" }, \
66 	{ XBF_STALE,		"STALE" }, \
67 	{ XBF_WRITE_FAIL,	"WRITE_FAIL" }, \
68 	{ _XBF_INODES,		"INODES" }, \
69 	{ _XBF_DQUOTS,		"DQUOTS" }, \
70 	{ _XBF_LOGRECOVERY,	"LOG_RECOVERY" }, \
71 	{ _XBF_PAGES,		"PAGES" }, \
72 	{ _XBF_KMEM,		"KMEM" }, \
73 	{ _XBF_DELWRI_Q,	"DELWRI_Q" }, \
74 	/* The following interface flags should never be set */ \
75 	{ XBF_LIVESCAN,		"LIVESCAN" }, \
76 	{ XBF_INCORE,		"INCORE" }, \
77 	{ XBF_TRYLOCK,		"TRYLOCK" }, \
78 	{ XBF_UNMAPPED,		"UNMAPPED" }
79 
80 /*
81  * Internal state flags.
82  */
83 #define XFS_BSTATE_DISPOSE	 (1 << 0)	/* buffer being discarded */
84 #define XFS_BSTATE_IN_FLIGHT	 (1 << 1)	/* I/O in flight */
85 
86 /*
87  * The xfs_buftarg contains 2 notions of "sector size" -
88  *
89  * 1) The metadata sector size, which is the minimum unit and
90  *    alignment of IO which will be performed by metadata operations.
91  * 2) The device logical sector size
92  *
93  * The first is specified at mkfs time, and is stored on-disk in the
94  * superblock's sb_sectsize.
95  *
96  * The latter is derived from the underlying device, and controls direct IO
97  * alignment constraints.
98  */
99 typedef struct xfs_buftarg {
100 	dev_t			bt_dev;
101 	struct bdev_handle	*bt_bdev_handle;
102 	struct block_device	*bt_bdev;
103 	struct dax_device	*bt_daxdev;
104 	u64			bt_dax_part_off;
105 	struct xfs_mount	*bt_mount;
106 	unsigned int		bt_meta_sectorsize;
107 	size_t			bt_meta_sectormask;
108 	size_t			bt_logical_sectorsize;
109 	size_t			bt_logical_sectormask;
110 
111 	/* LRU control structures */
112 	struct shrinker		*bt_shrinker;
113 	struct list_lru		bt_lru;
114 
115 	struct percpu_counter	bt_io_count;
116 	struct ratelimit_state	bt_ioerror_rl;
117 } xfs_buftarg_t;
118 
119 #define XB_PAGES	2
120 
121 struct xfs_buf_map {
122 	xfs_daddr_t		bm_bn;	/* block number for I/O */
123 	int			bm_len;	/* size of I/O */
124 	unsigned int		bm_flags;
125 };
126 
127 /*
128  * Online fsck is scanning the buffer cache for live buffers.  Do not warn
129  * about length mismatches during lookups and do not return stale buffers.
130  */
131 #define XBM_LIVESCAN		(1U << 0)
132 
133 #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
134 	struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
135 
136 struct xfs_buf_ops {
137 	char *name;
138 	union {
139 		__be32 magic[2];	/* v4 and v5 on disk magic values */
140 		__be16 magic16[2];	/* v4 and v5 on disk magic values */
141 	};
142 	void (*verify_read)(struct xfs_buf *);
143 	void (*verify_write)(struct xfs_buf *);
144 	xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp);
145 };
146 
147 struct xfs_buf {
148 	/*
149 	 * first cacheline holds all the fields needed for an uncontended cache
150 	 * hit to be fully processed. The semaphore straddles the cacheline
151 	 * boundary, but the counter and lock sits on the first cacheline,
152 	 * which is the only bit that is touched if we hit the semaphore
153 	 * fast-path on locking.
154 	 */
155 	struct rhash_head	b_rhash_head;	/* pag buffer hash node */
156 
157 	xfs_daddr_t		b_rhash_key;	/* buffer cache index */
158 	int			b_length;	/* size of buffer in BBs */
159 	atomic_t		b_hold;		/* reference count */
160 	atomic_t		b_lru_ref;	/* lru reclaim ref count */
161 	xfs_buf_flags_t		b_flags;	/* status flags */
162 	struct semaphore	b_sema;		/* semaphore for lockables */
163 
164 	/*
165 	 * concurrent access to b_lru and b_lru_flags are protected by
166 	 * bt_lru_lock and not by b_sema
167 	 */
168 	struct list_head	b_lru;		/* lru list */
169 	spinlock_t		b_lock;		/* internal state lock */
170 	unsigned int		b_state;	/* internal state flags */
171 	int			b_io_error;	/* internal IO error state */
172 	wait_queue_head_t	b_waiters;	/* unpin waiters */
173 	struct list_head	b_list;
174 	struct xfs_perag	*b_pag;		/* contains rbtree root */
175 	struct xfs_mount	*b_mount;
176 	struct xfs_buftarg	*b_target;	/* buffer target (device) */
177 	void			*b_addr;	/* virtual address of buffer */
178 	struct work_struct	b_ioend_work;
179 	struct completion	b_iowait;	/* queue for I/O waiters */
180 	struct xfs_buf_log_item	*b_log_item;
181 	struct list_head	b_li_list;	/* Log items list head */
182 	struct xfs_trans	*b_transp;
183 	struct page		**b_pages;	/* array of page pointers */
184 	struct page		*b_page_array[XB_PAGES]; /* inline pages */
185 	struct xfs_buf_map	*b_maps;	/* compound buffer map */
186 	struct xfs_buf_map	__b_map;	/* inline compound buffer map */
187 	int			b_map_count;
188 	atomic_t		b_pin_count;	/* pin count */
189 	atomic_t		b_io_remaining;	/* #outstanding I/O requests */
190 	unsigned int		b_page_count;	/* size of page array */
191 	unsigned int		b_offset;	/* page offset of b_addr,
192 						   only for _XBF_KMEM buffers */
193 	int			b_error;	/* error code on I/O */
194 
195 	/*
196 	 * async write failure retry count. Initialised to zero on the first
197 	 * failure, then when it exceeds the maximum configured without a
198 	 * success the write is considered to be failed permanently and the
199 	 * iodone handler will take appropriate action.
200 	 *
201 	 * For retry timeouts, we record the jiffie of the first failure. This
202 	 * means that we can change the retry timeout for buffers already under
203 	 * I/O and thus avoid getting stuck in a retry loop with a long timeout.
204 	 *
205 	 * last_error is used to ensure that we are getting repeated errors, not
206 	 * different errors. e.g. a block device might change ENOSPC to EIO when
207 	 * a failure timeout occurs, so we want to re-initialise the error
208 	 * retry behaviour appropriately when that happens.
209 	 */
210 	int			b_retries;
211 	unsigned long		b_first_retry_time; /* in jiffies */
212 	int			b_last_error;
213 
214 	const struct xfs_buf_ops	*b_ops;
215 	struct rcu_head		b_rcu;
216 };
217 
218 /* Finding and Reading Buffers */
219 int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
220 		int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp);
221 int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
222 		int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp,
223 		const struct xfs_buf_ops *ops, xfs_failaddr_t fa);
224 void xfs_buf_readahead_map(struct xfs_buftarg *target,
225 			       struct xfs_buf_map *map, int nmaps,
226 			       const struct xfs_buf_ops *ops);
227 
228 static inline int
229 xfs_buf_incore(
230 	struct xfs_buftarg	*target,
231 	xfs_daddr_t		blkno,
232 	size_t			numblks,
233 	xfs_buf_flags_t		flags,
234 	struct xfs_buf		**bpp)
235 {
236 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
237 
238 	return xfs_buf_get_map(target, &map, 1, XBF_INCORE | flags, bpp);
239 }
240 
241 static inline int
242 xfs_buf_get(
243 	struct xfs_buftarg	*target,
244 	xfs_daddr_t		blkno,
245 	size_t			numblks,
246 	struct xfs_buf		**bpp)
247 {
248 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
249 
250 	return xfs_buf_get_map(target, &map, 1, 0, bpp);
251 }
252 
253 static inline int
254 xfs_buf_read(
255 	struct xfs_buftarg	*target,
256 	xfs_daddr_t		blkno,
257 	size_t			numblks,
258 	xfs_buf_flags_t		flags,
259 	struct xfs_buf		**bpp,
260 	const struct xfs_buf_ops *ops)
261 {
262 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
263 
264 	return xfs_buf_read_map(target, &map, 1, flags, bpp, ops,
265 			__builtin_return_address(0));
266 }
267 
268 static inline void
269 xfs_buf_readahead(
270 	struct xfs_buftarg	*target,
271 	xfs_daddr_t		blkno,
272 	size_t			numblks,
273 	const struct xfs_buf_ops *ops)
274 {
275 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
276 	return xfs_buf_readahead_map(target, &map, 1, ops);
277 }
278 
279 int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
280 		xfs_buf_flags_t flags, struct xfs_buf **bpp);
281 int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
282 		size_t numblks, xfs_buf_flags_t flags, struct xfs_buf **bpp,
283 		const struct xfs_buf_ops *ops);
284 int _xfs_buf_read(struct xfs_buf *bp, xfs_buf_flags_t flags);
285 void xfs_buf_hold(struct xfs_buf *bp);
286 
287 /* Releasing Buffers */
288 extern void xfs_buf_rele(struct xfs_buf *);
289 
290 /* Locking and Unlocking Buffers */
291 extern int xfs_buf_trylock(struct xfs_buf *);
292 extern void xfs_buf_lock(struct xfs_buf *);
293 extern void xfs_buf_unlock(struct xfs_buf *);
294 #define xfs_buf_islocked(bp) \
295 	((bp)->b_sema.count <= 0)
296 
297 static inline void xfs_buf_relse(struct xfs_buf *bp)
298 {
299 	xfs_buf_unlock(bp);
300 	xfs_buf_rele(bp);
301 }
302 
303 /* Buffer Read and Write Routines */
304 extern int xfs_bwrite(struct xfs_buf *bp);
305 
306 extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
307 		xfs_failaddr_t failaddr);
308 #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
309 extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa);
310 void xfs_buf_ioend_fail(struct xfs_buf *);
311 void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize);
312 void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa);
313 #define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
314 
315 /* Buffer Utility Routines */
316 extern void *xfs_buf_offset(struct xfs_buf *, size_t);
317 extern void xfs_buf_stale(struct xfs_buf *bp);
318 
319 /* Delayed Write Buffer Routines */
320 extern void xfs_buf_delwri_cancel(struct list_head *);
321 extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
322 void xfs_buf_delwri_queue_here(struct xfs_buf *bp, struct list_head *bl);
323 extern int xfs_buf_delwri_submit(struct list_head *);
324 extern int xfs_buf_delwri_submit_nowait(struct list_head *);
325 extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *);
326 
327 static inline xfs_daddr_t xfs_buf_daddr(struct xfs_buf *bp)
328 {
329 	return bp->b_maps[0].bm_bn;
330 }
331 
332 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref);
333 
334 /*
335  * If the buffer is already on the LRU, do nothing. Otherwise set the buffer
336  * up with a reference count of 0 so it will be tossed from the cache when
337  * released.
338  */
339 static inline void xfs_buf_oneshot(struct xfs_buf *bp)
340 {
341 	if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1)
342 		return;
343 	atomic_set(&bp->b_lru_ref, 0);
344 }
345 
346 static inline int xfs_buf_ispinned(struct xfs_buf *bp)
347 {
348 	return atomic_read(&bp->b_pin_count);
349 }
350 
351 static inline int
352 xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
353 {
354 	return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
355 				cksum_offset);
356 }
357 
358 static inline void
359 xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
360 {
361 	xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
362 			 cksum_offset);
363 }
364 
365 /*
366  *	Handling of buftargs.
367  */
368 struct xfs_buftarg *xfs_alloc_buftarg(struct xfs_mount *mp,
369 		struct bdev_handle *bdev_handle);
370 extern void xfs_free_buftarg(struct xfs_buftarg *);
371 extern void xfs_buftarg_wait(struct xfs_buftarg *);
372 extern void xfs_buftarg_drain(struct xfs_buftarg *);
373 extern int xfs_setsize_buftarg(struct xfs_buftarg *, unsigned int);
374 
375 #define xfs_getsize_buftarg(buftarg)	block_size((buftarg)->bt_bdev)
376 #define xfs_readonly_buftarg(buftarg)	bdev_read_only((buftarg)->bt_bdev)
377 
378 int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
379 bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic);
380 bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic);
381 
382 #endif	/* __XFS_BUF_H__ */
383