xref: /linux/fs/xfs/xfs_buf.h (revision d551d7bbf264ed11d897368e3670bda5b37b360e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #ifndef __XFS_BUF_H__
7 #define __XFS_BUF_H__
8 
9 #include <linux/list.h>
10 #include <linux/types.h>
11 #include <linux/spinlock.h>
12 #include <linux/mm.h>
13 #include <linux/fs.h>
14 #include <linux/dax.h>
15 #include <linux/uio.h>
16 #include <linux/list_lru.h>
17 
18 extern struct kmem_cache *xfs_buf_cache;
19 
20 /*
21  *	Base types
22  */
23 struct xfs_buf;
24 
25 #define XFS_BUF_DADDR_NULL	((xfs_daddr_t) (-1LL))
26 
27 #define XBF_READ	 (1u << 0) /* buffer intended for reading from device */
28 #define XBF_WRITE	 (1u << 1) /* buffer intended for writing to device */
29 #define XBF_READ_AHEAD	 (1u << 2) /* asynchronous read-ahead */
30 #define XBF_ASYNC	 (1u << 4) /* initiator will not wait for completion */
31 #define XBF_DONE	 (1u << 5) /* all pages in the buffer uptodate */
32 #define XBF_STALE	 (1u << 6) /* buffer has been staled, do not find it */
33 #define XBF_WRITE_FAIL	 (1u << 7) /* async writes have failed on this buffer */
34 
35 /* buffer type flags for write callbacks */
36 #define _XBF_LOGRECOVERY (1u << 18)/* log recovery buffer */
37 
38 /* flags used only internally */
39 #define _XBF_KMEM	 (1u << 21)/* backed by heap memory */
40 #define _XBF_DELWRI_Q	 (1u << 22)/* buffer on a delwri queue */
41 
42 /* flags used only as arguments to access routines */
43 /*
44  * Online fsck is scanning the buffer cache for live buffers.  Do not warn
45  * about length mismatches during lookups and do not return stale buffers.
46  */
47 #define XBF_LIVESCAN	 (1u << 28)
48 #define XBF_INCORE	 (1u << 29)/* lookup only, return if found in cache */
49 #define XBF_TRYLOCK	 (1u << 30)/* lock requested, but do not wait */
50 
51 
52 typedef unsigned int xfs_buf_flags_t;
53 
54 #define XFS_BUF_FLAGS \
55 	{ XBF_READ,		"READ" }, \
56 	{ XBF_WRITE,		"WRITE" }, \
57 	{ XBF_READ_AHEAD,	"READ_AHEAD" }, \
58 	{ XBF_ASYNC,		"ASYNC" }, \
59 	{ XBF_DONE,		"DONE" }, \
60 	{ XBF_STALE,		"STALE" }, \
61 	{ XBF_WRITE_FAIL,	"WRITE_FAIL" }, \
62 	{ _XBF_LOGRECOVERY,	"LOG_RECOVERY" }, \
63 	{ _XBF_KMEM,		"KMEM" }, \
64 	{ _XBF_DELWRI_Q,	"DELWRI_Q" }, \
65 	/* The following interface flags should never be set */ \
66 	{ XBF_LIVESCAN,		"LIVESCAN" }, \
67 	{ XBF_INCORE,		"INCORE" }, \
68 	{ XBF_TRYLOCK,		"TRYLOCK" }
69 
70 /*
71  * Internal state flags.
72  */
73 #define XFS_BSTATE_DISPOSE	 (1 << 0)	/* buffer being discarded */
74 
75 struct xfs_buf_cache {
76 	struct rhashtable	bc_hash;
77 };
78 
79 int xfs_buf_cache_init(struct xfs_buf_cache *bch);
80 void xfs_buf_cache_destroy(struct xfs_buf_cache *bch);
81 
82 /*
83  * The xfs_buftarg contains 2 notions of "sector size" -
84  *
85  * 1) The metadata sector size, which is the minimum unit and
86  *    alignment of IO which will be performed by metadata operations.
87  * 2) The device logical sector size
88  *
89  * The first is specified at mkfs time, and is stored on-disk in the
90  * superblock's sb_sectsize.
91  *
92  * The latter is derived from the underlying device, and controls direct IO
93  * alignment constraints.
94  */
95 struct xfs_buftarg {
96 	dev_t			bt_dev;
97 	struct block_device	*bt_bdev;
98 	struct dax_device	*bt_daxdev;
99 	struct file		*bt_file;
100 	u64			bt_dax_part_off;
101 	struct xfs_mount	*bt_mount;
102 	unsigned int		bt_meta_sectorsize;
103 	size_t			bt_meta_sectormask;
104 	size_t			bt_logical_sectorsize;
105 	size_t			bt_logical_sectormask;
106 
107 	/* LRU control structures */
108 	struct shrinker		*bt_shrinker;
109 	struct list_lru		bt_lru;
110 
111 	struct percpu_counter	bt_readahead_count;
112 	struct ratelimit_state	bt_ioerror_rl;
113 
114 	/* Hardware atomic write unit values, bytes */
115 	unsigned int		bt_awu_min;
116 	unsigned int		bt_awu_max;
117 
118 	/* built-in cache, if we're not using the perag one */
119 	struct xfs_buf_cache	bt_cache[];
120 };
121 
122 struct xfs_buf_map {
123 	xfs_daddr_t		bm_bn;	/* block number for I/O */
124 	int			bm_len;	/* size of I/O */
125 	unsigned int		bm_flags;
126 };
127 
128 /*
129  * Online fsck is scanning the buffer cache for live buffers.  Do not warn
130  * about length mismatches during lookups and do not return stale buffers.
131  */
132 #define XBM_LIVESCAN		(1U << 0)
133 
134 #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
135 	struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
136 
137 struct xfs_buf_ops {
138 	char *name;
139 	union {
140 		__be32 magic[2];	/* v4 and v5 on disk magic values */
141 		__be16 magic16[2];	/* v4 and v5 on disk magic values */
142 	};
143 	void (*verify_read)(struct xfs_buf *);
144 	void (*verify_write)(struct xfs_buf *);
145 	xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp);
146 };
147 
148 struct xfs_buf {
149 	/*
150 	 * first cacheline holds all the fields needed for an uncontended cache
151 	 * hit to be fully processed. The semaphore straddles the cacheline
152 	 * boundary, but the counter and lock sits on the first cacheline,
153 	 * which is the only bit that is touched if we hit the semaphore
154 	 * fast-path on locking.
155 	 */
156 	struct rhash_head	b_rhash_head;	/* pag buffer hash node */
157 
158 	xfs_daddr_t		b_rhash_key;	/* buffer cache index */
159 	int			b_length;	/* size of buffer in BBs */
160 	unsigned int		b_hold;		/* reference count */
161 	atomic_t		b_lru_ref;	/* lru reclaim ref count */
162 	xfs_buf_flags_t		b_flags;	/* status flags */
163 	struct semaphore	b_sema;		/* semaphore for lockables */
164 
165 	/*
166 	 * concurrent access to b_lru and b_lru_flags are protected by
167 	 * bt_lru_lock and not by b_sema
168 	 */
169 	struct list_head	b_lru;		/* lru list */
170 	spinlock_t		b_lock;		/* internal state lock */
171 	unsigned int		b_state;	/* internal state flags */
172 	wait_queue_head_t	b_waiters;	/* unpin waiters */
173 	struct list_head	b_list;
174 	struct xfs_perag	*b_pag;
175 	struct xfs_mount	*b_mount;
176 	struct xfs_buftarg	*b_target;	/* buffer target (device) */
177 	void			*b_addr;	/* virtual address of buffer */
178 	struct work_struct	b_ioend_work;
179 	struct completion	b_iowait;	/* queue for I/O waiters */
180 	struct xfs_buf_log_item	*b_log_item;
181 	struct list_head	b_li_list;	/* Log items list head */
182 	struct xfs_trans	*b_transp;
183 	struct xfs_buf_map	*b_maps;	/* compound buffer map */
184 	struct xfs_buf_map	__b_map;	/* inline compound buffer map */
185 	int			b_map_count;
186 	atomic_t		b_pin_count;	/* pin count */
187 	int			b_error;	/* error code on I/O */
188 	void			(*b_iodone)(struct xfs_buf *bp);
189 
190 	/*
191 	 * async write failure retry count. Initialised to zero on the first
192 	 * failure, then when it exceeds the maximum configured without a
193 	 * success the write is considered to be failed permanently and the
194 	 * iodone handler will take appropriate action.
195 	 *
196 	 * For retry timeouts, we record the jiffy of the first failure. This
197 	 * means that we can change the retry timeout for buffers already under
198 	 * I/O and thus avoid getting stuck in a retry loop with a long timeout.
199 	 *
200 	 * last_error is used to ensure that we are getting repeated errors, not
201 	 * different errors. e.g. a block device might change ENOSPC to EIO when
202 	 * a failure timeout occurs, so we want to re-initialise the error
203 	 * retry behaviour appropriately when that happens.
204 	 */
205 	int			b_retries;
206 	unsigned long		b_first_retry_time; /* in jiffies */
207 	int			b_last_error;
208 
209 	const struct xfs_buf_ops	*b_ops;
210 	struct rcu_head		b_rcu;
211 };
212 
213 /* Finding and Reading Buffers */
214 int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
215 		int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp);
216 int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
217 		int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp,
218 		const struct xfs_buf_ops *ops, xfs_failaddr_t fa);
219 void xfs_buf_readahead_map(struct xfs_buftarg *target,
220 			       struct xfs_buf_map *map, int nmaps,
221 			       const struct xfs_buf_ops *ops);
222 
223 static inline int
224 xfs_buf_incore(
225 	struct xfs_buftarg	*target,
226 	xfs_daddr_t		blkno,
227 	size_t			numblks,
228 	xfs_buf_flags_t		flags,
229 	struct xfs_buf		**bpp)
230 {
231 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
232 
233 	return xfs_buf_get_map(target, &map, 1, XBF_INCORE | flags, bpp);
234 }
235 
236 static inline int
237 xfs_buf_get(
238 	struct xfs_buftarg	*target,
239 	xfs_daddr_t		blkno,
240 	size_t			numblks,
241 	struct xfs_buf		**bpp)
242 {
243 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
244 
245 	return xfs_buf_get_map(target, &map, 1, 0, bpp);
246 }
247 
248 static inline int
249 xfs_buf_read(
250 	struct xfs_buftarg	*target,
251 	xfs_daddr_t		blkno,
252 	size_t			numblks,
253 	xfs_buf_flags_t		flags,
254 	struct xfs_buf		**bpp,
255 	const struct xfs_buf_ops *ops)
256 {
257 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
258 
259 	return xfs_buf_read_map(target, &map, 1, flags, bpp, ops,
260 			__builtin_return_address(0));
261 }
262 
263 static inline void
264 xfs_buf_readahead(
265 	struct xfs_buftarg	*target,
266 	xfs_daddr_t		blkno,
267 	size_t			numblks,
268 	const struct xfs_buf_ops *ops)
269 {
270 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
271 	return xfs_buf_readahead_map(target, &map, 1, ops);
272 }
273 
274 int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
275 		struct xfs_buf **bpp);
276 int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
277 		size_t numblks, struct xfs_buf **bpp,
278 		const struct xfs_buf_ops *ops);
279 int _xfs_buf_read(struct xfs_buf *bp);
280 void xfs_buf_hold(struct xfs_buf *bp);
281 
282 /* Releasing Buffers */
283 extern void xfs_buf_rele(struct xfs_buf *);
284 
285 /* Locking and Unlocking Buffers */
286 extern int xfs_buf_trylock(struct xfs_buf *);
287 extern void xfs_buf_lock(struct xfs_buf *);
288 extern void xfs_buf_unlock(struct xfs_buf *);
289 #define xfs_buf_islocked(bp) \
290 	((bp)->b_sema.count <= 0)
291 
292 static inline void xfs_buf_relse(struct xfs_buf *bp)
293 {
294 	xfs_buf_unlock(bp);
295 	xfs_buf_rele(bp);
296 }
297 
298 /* Buffer Read and Write Routines */
299 extern int xfs_bwrite(struct xfs_buf *bp);
300 
301 extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
302 		xfs_failaddr_t failaddr);
303 #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
304 extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa);
305 void xfs_buf_ioend_fail(struct xfs_buf *);
306 void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa);
307 #define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
308 
309 /* Buffer Utility Routines */
310 static inline void *xfs_buf_offset(struct xfs_buf *bp, size_t offset)
311 {
312 	return bp->b_addr + offset;
313 }
314 
315 static inline void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize)
316 {
317 	memset(bp->b_addr + boff, 0, bsize);
318 }
319 
320 extern void xfs_buf_stale(struct xfs_buf *bp);
321 
322 /* Delayed Write Buffer Routines */
323 extern void xfs_buf_delwri_cancel(struct list_head *);
324 extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
325 void xfs_buf_delwri_queue_here(struct xfs_buf *bp, struct list_head *bl);
326 extern int xfs_buf_delwri_submit(struct list_head *);
327 extern int xfs_buf_delwri_submit_nowait(struct list_head *);
328 
329 static inline xfs_daddr_t xfs_buf_daddr(struct xfs_buf *bp)
330 {
331 	return bp->b_maps[0].bm_bn;
332 }
333 
334 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref);
335 
336 /*
337  * If the buffer is already on the LRU, do nothing. Otherwise set the buffer
338  * up with a reference count of 0 so it will be tossed from the cache when
339  * released.
340  */
341 static inline void xfs_buf_oneshot(struct xfs_buf *bp)
342 {
343 	if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1)
344 		return;
345 	atomic_set(&bp->b_lru_ref, 0);
346 }
347 
348 static inline int xfs_buf_ispinned(struct xfs_buf *bp)
349 {
350 	return atomic_read(&bp->b_pin_count);
351 }
352 
353 static inline int
354 xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
355 {
356 	return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
357 				cksum_offset);
358 }
359 
360 static inline void
361 xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
362 {
363 	xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
364 			 cksum_offset);
365 }
366 
367 /*
368  *	Handling of buftargs.
369  */
370 struct xfs_buftarg *xfs_alloc_buftarg(struct xfs_mount *mp,
371 		struct file *bdev_file);
372 extern void xfs_free_buftarg(struct xfs_buftarg *);
373 extern void xfs_buftarg_wait(struct xfs_buftarg *);
374 extern void xfs_buftarg_drain(struct xfs_buftarg *);
375 int xfs_configure_buftarg(struct xfs_buftarg *btp, unsigned int sectorsize);
376 
377 #define xfs_readonly_buftarg(buftarg)	bdev_read_only((buftarg)->bt_bdev)
378 
379 int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
380 bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic);
381 bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic);
382 
383 /* for xfs_buf_mem.c only: */
384 int xfs_init_buftarg(struct xfs_buftarg *btp, size_t logical_sectorsize,
385 		const char *descr);
386 void xfs_destroy_buftarg(struct xfs_buftarg *btp);
387 
388 #endif	/* __XFS_BUF_H__ */
389