1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #ifndef __XFS_BUF_H__
7 #define __XFS_BUF_H__
8
9 #include <linux/list.h>
10 #include <linux/types.h>
11 #include <linux/spinlock.h>
12 #include <linux/mm.h>
13 #include <linux/fs.h>
14 #include <linux/dax.h>
15 #include <linux/uio.h>
16 #include <linux/list_lru.h>
17
18 extern struct kmem_cache *xfs_buf_cache;
19
20 /*
21 * Base types
22 */
23 struct xfs_buf;
24
25 #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
26
27 #define XBF_READ (1u << 0) /* buffer intended for reading from device */
28 #define XBF_WRITE (1u << 1) /* buffer intended for writing to device */
29 #define XBF_READ_AHEAD (1u << 2) /* asynchronous read-ahead */
30 #define XBF_ASYNC (1u << 4) /* initiator will not wait for completion */
31 #define XBF_DONE (1u << 5) /* all pages in the buffer uptodate */
32 #define XBF_STALE (1u << 6) /* buffer has been staled, do not find it */
33 #define XBF_WRITE_FAIL (1u << 7) /* async writes have failed on this buffer */
34
35 /* buffer type flags for write callbacks */
36 #define _XBF_LOGRECOVERY (1u << 18)/* log recovery buffer */
37
38 /* flags used only internally */
39 #define _XBF_KMEM (1u << 21)/* backed by heap memory */
40 #define _XBF_DELWRI_Q (1u << 22)/* buffer on a delwri queue */
41
42 /* flags used only as arguments to access routines */
43 /*
44 * Online fsck is scanning the buffer cache for live buffers. Do not warn
45 * about length mismatches during lookups and do not return stale buffers.
46 */
47 #define XBF_LIVESCAN (1u << 28)
48 #define XBF_INCORE (1u << 29)/* lookup only, return if found in cache */
49 #define XBF_TRYLOCK (1u << 30)/* lock requested, but do not wait */
50
51
52 typedef unsigned int xfs_buf_flags_t;
53
54 #define XFS_BUF_FLAGS \
55 { XBF_READ, "READ" }, \
56 { XBF_WRITE, "WRITE" }, \
57 { XBF_READ_AHEAD, "READ_AHEAD" }, \
58 { XBF_ASYNC, "ASYNC" }, \
59 { XBF_DONE, "DONE" }, \
60 { XBF_STALE, "STALE" }, \
61 { XBF_WRITE_FAIL, "WRITE_FAIL" }, \
62 { _XBF_LOGRECOVERY, "LOG_RECOVERY" }, \
63 { _XBF_KMEM, "KMEM" }, \
64 { _XBF_DELWRI_Q, "DELWRI_Q" }, \
65 /* The following interface flags should never be set */ \
66 { XBF_LIVESCAN, "LIVESCAN" }, \
67 { XBF_INCORE, "INCORE" }, \
68 { XBF_TRYLOCK, "TRYLOCK" }
69
70 /*
71 * Internal state flags.
72 */
73 #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
74
75 struct xfs_buf_cache {
76 struct rhashtable bc_hash;
77 };
78
79 int xfs_buf_cache_init(struct xfs_buf_cache *bch);
80 void xfs_buf_cache_destroy(struct xfs_buf_cache *bch);
81
82 /*
83 * The xfs_buftarg contains 2 notions of "sector size" -
84 *
85 * 1) The metadata sector size, which is the minimum unit and
86 * alignment of IO which will be performed by metadata operations.
87 * 2) The device logical sector size
88 *
89 * The first is specified at mkfs time, and is stored on-disk in the
90 * superblock's sb_sectsize.
91 *
92 * The latter is derived from the underlying device, and controls direct IO
93 * alignment constraints.
94 */
95 struct xfs_buftarg {
96 dev_t bt_dev;
97 struct file *bt_bdev_file;
98 struct block_device *bt_bdev;
99 struct dax_device *bt_daxdev;
100 struct file *bt_file;
101 u64 bt_dax_part_off;
102 struct xfs_mount *bt_mount;
103 unsigned int bt_meta_sectorsize;
104 size_t bt_meta_sectormask;
105 size_t bt_logical_sectorsize;
106 size_t bt_logical_sectormask;
107
108 /* LRU control structures */
109 struct shrinker *bt_shrinker;
110 struct list_lru bt_lru;
111
112 struct percpu_counter bt_readahead_count;
113 struct ratelimit_state bt_ioerror_rl;
114
115 /* Atomic write unit values */
116 unsigned int bt_bdev_awu_min;
117 unsigned int bt_bdev_awu_max;
118
119 /* built-in cache, if we're not using the perag one */
120 struct xfs_buf_cache bt_cache[];
121 };
122
123 struct xfs_buf_map {
124 xfs_daddr_t bm_bn; /* block number for I/O */
125 int bm_len; /* size of I/O */
126 unsigned int bm_flags;
127 };
128
129 /*
130 * Online fsck is scanning the buffer cache for live buffers. Do not warn
131 * about length mismatches during lookups and do not return stale buffers.
132 */
133 #define XBM_LIVESCAN (1U << 0)
134
135 #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
136 struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
137
138 struct xfs_buf_ops {
139 char *name;
140 union {
141 __be32 magic[2]; /* v4 and v5 on disk magic values */
142 __be16 magic16[2]; /* v4 and v5 on disk magic values */
143 };
144 void (*verify_read)(struct xfs_buf *);
145 void (*verify_write)(struct xfs_buf *);
146 xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp);
147 };
148
149 struct xfs_buf {
150 /*
151 * first cacheline holds all the fields needed for an uncontended cache
152 * hit to be fully processed. The semaphore straddles the cacheline
153 * boundary, but the counter and lock sits on the first cacheline,
154 * which is the only bit that is touched if we hit the semaphore
155 * fast-path on locking.
156 */
157 struct rhash_head b_rhash_head; /* pag buffer hash node */
158
159 xfs_daddr_t b_rhash_key; /* buffer cache index */
160 int b_length; /* size of buffer in BBs */
161 unsigned int b_hold; /* reference count */
162 atomic_t b_lru_ref; /* lru reclaim ref count */
163 xfs_buf_flags_t b_flags; /* status flags */
164 struct semaphore b_sema; /* semaphore for lockables */
165
166 /*
167 * concurrent access to b_lru and b_lru_flags are protected by
168 * bt_lru_lock and not by b_sema
169 */
170 struct list_head b_lru; /* lru list */
171 spinlock_t b_lock; /* internal state lock */
172 unsigned int b_state; /* internal state flags */
173 wait_queue_head_t b_waiters; /* unpin waiters */
174 struct list_head b_list;
175 struct xfs_perag *b_pag;
176 struct xfs_mount *b_mount;
177 struct xfs_buftarg *b_target; /* buffer target (device) */
178 void *b_addr; /* virtual address of buffer */
179 struct work_struct b_ioend_work;
180 struct completion b_iowait; /* queue for I/O waiters */
181 struct xfs_buf_log_item *b_log_item;
182 struct list_head b_li_list; /* Log items list head */
183 struct xfs_trans *b_transp;
184 struct xfs_buf_map *b_maps; /* compound buffer map */
185 struct xfs_buf_map __b_map; /* inline compound buffer map */
186 int b_map_count;
187 atomic_t b_pin_count; /* pin count */
188 int b_error; /* error code on I/O */
189 void (*b_iodone)(struct xfs_buf *bp);
190
191 /*
192 * async write failure retry count. Initialised to zero on the first
193 * failure, then when it exceeds the maximum configured without a
194 * success the write is considered to be failed permanently and the
195 * iodone handler will take appropriate action.
196 *
197 * For retry timeouts, we record the jiffy of the first failure. This
198 * means that we can change the retry timeout for buffers already under
199 * I/O and thus avoid getting stuck in a retry loop with a long timeout.
200 *
201 * last_error is used to ensure that we are getting repeated errors, not
202 * different errors. e.g. a block device might change ENOSPC to EIO when
203 * a failure timeout occurs, so we want to re-initialise the error
204 * retry behaviour appropriately when that happens.
205 */
206 int b_retries;
207 unsigned long b_first_retry_time; /* in jiffies */
208 int b_last_error;
209
210 const struct xfs_buf_ops *b_ops;
211 struct rcu_head b_rcu;
212 };
213
214 /* Finding and Reading Buffers */
215 int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
216 int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp);
217 int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
218 int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp,
219 const struct xfs_buf_ops *ops, xfs_failaddr_t fa);
220 void xfs_buf_readahead_map(struct xfs_buftarg *target,
221 struct xfs_buf_map *map, int nmaps,
222 const struct xfs_buf_ops *ops);
223
224 static inline int
xfs_buf_incore(struct xfs_buftarg * target,xfs_daddr_t blkno,size_t numblks,xfs_buf_flags_t flags,struct xfs_buf ** bpp)225 xfs_buf_incore(
226 struct xfs_buftarg *target,
227 xfs_daddr_t blkno,
228 size_t numblks,
229 xfs_buf_flags_t flags,
230 struct xfs_buf **bpp)
231 {
232 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
233
234 return xfs_buf_get_map(target, &map, 1, XBF_INCORE | flags, bpp);
235 }
236
237 static inline int
xfs_buf_get(struct xfs_buftarg * target,xfs_daddr_t blkno,size_t numblks,struct xfs_buf ** bpp)238 xfs_buf_get(
239 struct xfs_buftarg *target,
240 xfs_daddr_t blkno,
241 size_t numblks,
242 struct xfs_buf **bpp)
243 {
244 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
245
246 return xfs_buf_get_map(target, &map, 1, 0, bpp);
247 }
248
249 static inline int
xfs_buf_read(struct xfs_buftarg * target,xfs_daddr_t blkno,size_t numblks,xfs_buf_flags_t flags,struct xfs_buf ** bpp,const struct xfs_buf_ops * ops)250 xfs_buf_read(
251 struct xfs_buftarg *target,
252 xfs_daddr_t blkno,
253 size_t numblks,
254 xfs_buf_flags_t flags,
255 struct xfs_buf **bpp,
256 const struct xfs_buf_ops *ops)
257 {
258 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
259
260 return xfs_buf_read_map(target, &map, 1, flags, bpp, ops,
261 __builtin_return_address(0));
262 }
263
264 static inline void
xfs_buf_readahead(struct xfs_buftarg * target,xfs_daddr_t blkno,size_t numblks,const struct xfs_buf_ops * ops)265 xfs_buf_readahead(
266 struct xfs_buftarg *target,
267 xfs_daddr_t blkno,
268 size_t numblks,
269 const struct xfs_buf_ops *ops)
270 {
271 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
272 return xfs_buf_readahead_map(target, &map, 1, ops);
273 }
274
275 int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
276 struct xfs_buf **bpp);
277 int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
278 size_t numblks, struct xfs_buf **bpp,
279 const struct xfs_buf_ops *ops);
280 int _xfs_buf_read(struct xfs_buf *bp);
281 void xfs_buf_hold(struct xfs_buf *bp);
282
283 /* Releasing Buffers */
284 extern void xfs_buf_rele(struct xfs_buf *);
285
286 /* Locking and Unlocking Buffers */
287 extern int xfs_buf_trylock(struct xfs_buf *);
288 extern void xfs_buf_lock(struct xfs_buf *);
289 extern void xfs_buf_unlock(struct xfs_buf *);
290 #define xfs_buf_islocked(bp) \
291 ((bp)->b_sema.count <= 0)
292
xfs_buf_relse(struct xfs_buf * bp)293 static inline void xfs_buf_relse(struct xfs_buf *bp)
294 {
295 xfs_buf_unlock(bp);
296 xfs_buf_rele(bp);
297 }
298
299 /* Buffer Read and Write Routines */
300 extern int xfs_bwrite(struct xfs_buf *bp);
301
302 extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
303 xfs_failaddr_t failaddr);
304 #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
305 extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa);
306 void xfs_buf_ioend_fail(struct xfs_buf *);
307 void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa);
308 #define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
309
310 /* Buffer Utility Routines */
xfs_buf_offset(struct xfs_buf * bp,size_t offset)311 static inline void *xfs_buf_offset(struct xfs_buf *bp, size_t offset)
312 {
313 return bp->b_addr + offset;
314 }
315
xfs_buf_zero(struct xfs_buf * bp,size_t boff,size_t bsize)316 static inline void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize)
317 {
318 memset(bp->b_addr + boff, 0, bsize);
319 }
320
321 extern void xfs_buf_stale(struct xfs_buf *bp);
322
323 /* Delayed Write Buffer Routines */
324 extern void xfs_buf_delwri_cancel(struct list_head *);
325 extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
326 void xfs_buf_delwri_queue_here(struct xfs_buf *bp, struct list_head *bl);
327 extern int xfs_buf_delwri_submit(struct list_head *);
328 extern int xfs_buf_delwri_submit_nowait(struct list_head *);
329 extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *);
330
xfs_buf_daddr(struct xfs_buf * bp)331 static inline xfs_daddr_t xfs_buf_daddr(struct xfs_buf *bp)
332 {
333 return bp->b_maps[0].bm_bn;
334 }
335
336 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref);
337
338 /*
339 * If the buffer is already on the LRU, do nothing. Otherwise set the buffer
340 * up with a reference count of 0 so it will be tossed from the cache when
341 * released.
342 */
xfs_buf_oneshot(struct xfs_buf * bp)343 static inline void xfs_buf_oneshot(struct xfs_buf *bp)
344 {
345 if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1)
346 return;
347 atomic_set(&bp->b_lru_ref, 0);
348 }
349
xfs_buf_ispinned(struct xfs_buf * bp)350 static inline int xfs_buf_ispinned(struct xfs_buf *bp)
351 {
352 return atomic_read(&bp->b_pin_count);
353 }
354
355 static inline int
xfs_buf_verify_cksum(struct xfs_buf * bp,unsigned long cksum_offset)356 xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
357 {
358 return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
359 cksum_offset);
360 }
361
362 static inline void
xfs_buf_update_cksum(struct xfs_buf * bp,unsigned long cksum_offset)363 xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
364 {
365 xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
366 cksum_offset);
367 }
368
369 /*
370 * Handling of buftargs.
371 */
372 struct xfs_buftarg *xfs_alloc_buftarg(struct xfs_mount *mp,
373 struct file *bdev_file);
374 extern void xfs_free_buftarg(struct xfs_buftarg *);
375 extern void xfs_buftarg_wait(struct xfs_buftarg *);
376 extern void xfs_buftarg_drain(struct xfs_buftarg *);
377 extern int xfs_setsize_buftarg(struct xfs_buftarg *, unsigned int);
378
379 #define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
380 #define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
381
382 int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
383 bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic);
384 bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic);
385
386 /* for xfs_buf_mem.c only: */
387 int xfs_init_buftarg(struct xfs_buftarg *btp, size_t logical_sectorsize,
388 const char *descr);
389 void xfs_destroy_buftarg(struct xfs_buftarg *btp);
390
391 #endif /* __XFS_BUF_H__ */
392