xref: /linux/include/linux/buffer_head.h (revision eb98f304420c95d1169cc8c73d5427ca9ee29833)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * include/linux/buffer_head.h
4  *
5  * Everything to do with buffer_heads.
6  */
7 
8 #ifndef _LINUX_BUFFER_HEAD_H
9 #define _LINUX_BUFFER_HEAD_H
10 
11 #include <linux/types.h>
12 #include <linux/blk_types.h>
13 #include <linux/fs.h>
14 #include <linux/linkage.h>
15 #include <linux/pagemap.h>
16 #include <linux/wait.h>
17 #include <linux/atomic.h>
18 
19 enum bh_state_bits {
20 	BH_Uptodate,	/* Contains valid data */
21 	BH_Dirty,	/* Is dirty */
22 	BH_Lock,	/* Is locked */
23 	BH_Req,		/* Has been submitted for I/O */
24 
25 	BH_Mapped,	/* Has a disk mapping */
26 	BH_New,		/* Disk mapping was newly created by get_block */
27 	BH_Async_Read,	/* Is under end_buffer_async_read I/O */
28 	BH_Async_Write,	/* Is under end_buffer_async_write I/O */
29 	BH_Delay,	/* Buffer is not yet allocated on disk */
30 	BH_Boundary,	/* Block is followed by a discontiguity */
31 	BH_Write_EIO,	/* I/O error on write */
32 	BH_Unwritten,	/* Buffer is allocated on disk but not written */
33 	BH_Quiet,	/* Buffer Error Prinks to be quiet */
34 	BH_Meta,	/* Buffer contains metadata */
35 	BH_Prio,	/* Buffer should be submitted with REQ_PRIO */
36 	BH_Defer_Completion, /* Defer AIO completion to workqueue */
37 	BH_Migrate,     /* Buffer is being migrated (norefs) */
38 
39 	BH_PrivateStart,/* not a state bit, but the first bit available
40 			 * for private allocation by other entities
41 			 */
42 };
43 
44 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
45 
46 struct page;
47 struct buffer_head;
48 struct address_space;
49 typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
50 
51 /*
52  * Historically, a buffer_head was used to map a single block
53  * within a page, and of course as the unit of I/O through the
54  * filesystem and block layers.  Nowadays the basic I/O unit
55  * is the bio, and buffer_heads are used for extracting block
56  * mappings (via a get_block_t call), for tracking state within
57  * a folio (via a folio_mapping) and for wrapping bio submission
58  * for backward compatibility reasons (e.g. submit_bh).
59  */
60 struct buffer_head {
61 	unsigned long b_state;		/* buffer state bitmap (see above) */
62 	struct buffer_head *b_this_page;/* circular list of page's buffers */
63 	union {
64 		struct page *b_page;	/* the page this bh is mapped to */
65 		struct folio *b_folio;	/* the folio this bh is mapped to */
66 	};
67 
68 	sector_t b_blocknr;		/* start block number */
69 	size_t b_size;			/* size of mapping */
70 	char *b_data;			/* pointer to data within the page */
71 
72 	struct block_device *b_bdev;
73 	bh_end_io_t *b_end_io;		/* I/O completion */
74  	void *b_private;		/* reserved for b_end_io */
75 	struct list_head b_assoc_buffers; /* associated with another mapping */
76 	struct address_space *b_assoc_map;	/* mapping this buffer is
77 						   associated with */
78 	atomic_t b_count;		/* users using this buffer_head */
79 	spinlock_t b_uptodate_lock;	/* Used by the first bh in a page, to
80 					 * serialise IO completion of other
81 					 * buffers in the page */
82 };
83 
84 /*
85  * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
86  * and buffer_foo() functions.
87  * To avoid reset buffer flags that are already set, because that causes
88  * a costly cache line transition, check the flag first.
89  */
90 #define BUFFER_FNS(bit, name)						\
91 static __always_inline void set_buffer_##name(struct buffer_head *bh)	\
92 {									\
93 	if (!test_bit(BH_##bit, &(bh)->b_state))			\
94 		set_bit(BH_##bit, &(bh)->b_state);			\
95 }									\
96 static __always_inline void clear_buffer_##name(struct buffer_head *bh)	\
97 {									\
98 	clear_bit(BH_##bit, &(bh)->b_state);				\
99 }									\
100 static __always_inline int buffer_##name(const struct buffer_head *bh)	\
101 {									\
102 	return test_bit(BH_##bit, &(bh)->b_state);			\
103 }
104 
105 /*
106  * test_set_buffer_foo() and test_clear_buffer_foo()
107  */
108 #define TAS_BUFFER_FNS(bit, name)					\
109 static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
110 {									\
111 	return test_and_set_bit(BH_##bit, &(bh)->b_state);		\
112 }									\
113 static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
114 {									\
115 	return test_and_clear_bit(BH_##bit, &(bh)->b_state);		\
116 }									\
117 
118 /*
119  * Emit the buffer bitops functions.   Note that there are also functions
120  * of the form "mark_buffer_foo()".  These are higher-level functions which
121  * do something in addition to setting a b_state bit.
122  */
BUFFER_FNS(Dirty,dirty)123 BUFFER_FNS(Dirty, dirty)
124 TAS_BUFFER_FNS(Dirty, dirty)
125 BUFFER_FNS(Lock, locked)
126 BUFFER_FNS(Req, req)
127 TAS_BUFFER_FNS(Req, req)
128 BUFFER_FNS(Mapped, mapped)
129 BUFFER_FNS(New, new)
130 BUFFER_FNS(Async_Read, async_read)
131 BUFFER_FNS(Async_Write, async_write)
132 BUFFER_FNS(Delay, delay)
133 BUFFER_FNS(Boundary, boundary)
134 BUFFER_FNS(Write_EIO, write_io_error)
135 BUFFER_FNS(Unwritten, unwritten)
136 BUFFER_FNS(Meta, meta)
137 BUFFER_FNS(Prio, prio)
138 BUFFER_FNS(Defer_Completion, defer_completion)
139 
140 static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
141 {
142 	/*
143 	 * If somebody else already set this uptodate, they will
144 	 * have done the memory barrier, and a reader will thus
145 	 * see *some* valid buffer state.
146 	 *
147 	 * Any other serialization (with IO errors or whatever that
148 	 * might clear the bit) has to come from other state (eg BH_Lock).
149 	 */
150 	if (test_bit(BH_Uptodate, &bh->b_state))
151 		return;
152 
153 	/*
154 	 * make it consistent with folio_mark_uptodate
155 	 * pairs with smp_load_acquire in buffer_uptodate
156 	 */
157 	smp_mb__before_atomic();
158 	set_bit(BH_Uptodate, &bh->b_state);
159 }
160 
clear_buffer_uptodate(struct buffer_head * bh)161 static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
162 {
163 	clear_bit(BH_Uptodate, &bh->b_state);
164 }
165 
buffer_uptodate(const struct buffer_head * bh)166 static __always_inline int buffer_uptodate(const struct buffer_head *bh)
167 {
168 	/*
169 	 * make it consistent with folio_test_uptodate
170 	 * pairs with smp_mb__before_atomic in set_buffer_uptodate
171 	 */
172 	return test_bit_acquire(BH_Uptodate, &bh->b_state);
173 }
174 
bh_offset(const struct buffer_head * bh)175 static inline unsigned long bh_offset(const struct buffer_head *bh)
176 {
177 	return (unsigned long)(bh)->b_data & (page_size(bh->b_page) - 1);
178 }
179 
180 /* If we *know* page->private refers to buffer_heads */
181 #define page_buffers(page)					\
182 	({							\
183 		BUG_ON(!PagePrivate(page));			\
184 		((struct buffer_head *)page_private(page));	\
185 	})
186 #define folio_buffers(folio)		folio_get_private(folio)
187 
188 void buffer_check_dirty_writeback(struct folio *folio,
189 				     bool *dirty, bool *writeback);
190 
191 /*
192  * Declarations
193  */
194 
195 void mark_buffer_dirty(struct buffer_head *bh);
196 void mark_buffer_write_io_error(struct buffer_head *bh);
197 void touch_buffer(struct buffer_head *bh);
198 void folio_set_bh(struct buffer_head *bh, struct folio *folio,
199 		  unsigned long offset);
200 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
201 					gfp_t gfp);
202 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size);
203 struct buffer_head *create_empty_buffers(struct folio *folio,
204 		unsigned long blocksize, unsigned long b_state);
205 void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
206 void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
207 
208 /* Things to do with buffers at mapping->private_list */
209 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
210 int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
211 				  bool datasync);
212 int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
213 			  bool datasync);
214 void clean_bdev_aliases(struct block_device *bdev, sector_t block,
215 			sector_t len);
clean_bdev_bh_alias(struct buffer_head * bh)216 static inline void clean_bdev_bh_alias(struct buffer_head *bh)
217 {
218 	clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
219 }
220 
221 void mark_buffer_async_write(struct buffer_head *bh);
222 void __wait_on_buffer(struct buffer_head *);
223 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
224 struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
225 			unsigned size);
226 struct buffer_head *__find_get_block_nonatomic(struct block_device *bdev,
227 			sector_t block, unsigned size);
228 struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
229 		unsigned size, gfp_t gfp);
230 void __brelse(struct buffer_head *);
231 void __bforget(struct buffer_head *);
232 void __breadahead(struct block_device *, sector_t block, unsigned int size);
233 struct buffer_head *__bread_gfp(struct block_device *,
234 				sector_t block, unsigned size, gfp_t gfp);
235 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
236 void free_buffer_head(struct buffer_head * bh);
237 void unlock_buffer(struct buffer_head *bh);
238 void __lock_buffer(struct buffer_head *bh);
239 int sync_dirty_buffer(struct buffer_head *bh);
240 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
241 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
242 void submit_bh(blk_opf_t, struct buffer_head *);
243 void write_boundary_block(struct block_device *bdev,
244 			sector_t bblock, unsigned blocksize);
245 int bh_uptodate_or_lock(struct buffer_head *bh);
246 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
247 void __bh_read_batch(int nr, struct buffer_head *bhs[],
248 		     blk_opf_t op_flags, bool force_lock);
249 
250 /*
251  * Generic address_space_operations implementations for buffer_head-backed
252  * address_spaces.
253  */
254 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
255 int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
256 		void *get_block);
257 int __block_write_full_folio(struct inode *inode, struct folio *folio,
258 		get_block_t *get_block, struct writeback_control *wbc);
259 int block_read_full_folio(struct folio *, get_block_t *);
260 bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
261 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
262 		struct folio **foliop, get_block_t *get_block);
263 int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
264 		get_block_t *get_block);
265 int block_write_end(struct file *, struct address_space *,
266 				loff_t, unsigned len, unsigned copied,
267 				struct folio *, void *);
268 int generic_write_end(struct file *, struct address_space *,
269 				loff_t, unsigned len, unsigned copied,
270 				struct folio *, void *);
271 void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to);
272 int cont_write_begin(struct file *, struct address_space *, loff_t,
273 			unsigned, struct folio **, void **,
274 			get_block_t *, loff_t *);
275 int generic_cont_expand_simple(struct inode *inode, loff_t size);
276 void block_commit_write(struct folio *folio, size_t from, size_t to);
277 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
278 				get_block_t get_block);
279 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
280 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
281 
282 #ifdef CONFIG_MIGRATION
283 extern int buffer_migrate_folio(struct address_space *,
284 		struct folio *dst, struct folio *src, enum migrate_mode);
285 extern int buffer_migrate_folio_norefs(struct address_space *,
286 		struct folio *dst, struct folio *src, enum migrate_mode);
287 #else
288 #define buffer_migrate_folio NULL
289 #define buffer_migrate_folio_norefs NULL
290 #endif
291 
292 /*
293  * inline definitions
294  */
295 
get_bh(struct buffer_head * bh)296 static inline void get_bh(struct buffer_head *bh)
297 {
298         atomic_inc(&bh->b_count);
299 }
300 
put_bh(struct buffer_head * bh)301 static inline void put_bh(struct buffer_head *bh)
302 {
303         smp_mb__before_atomic();
304         atomic_dec(&bh->b_count);
305 }
306 
307 /**
308  * brelse - Release a buffer.
309  * @bh: The buffer to release.
310  *
311  * Decrement a buffer_head's reference count.  If @bh is NULL, this
312  * function is a no-op.
313  *
314  * If all buffers on a folio have zero reference count, are clean
315  * and unlocked, and if the folio is unlocked and not under writeback
316  * then try_to_free_buffers() may strip the buffers from the folio in
317  * preparation for freeing it (sometimes, rarely, buffers are removed
318  * from a folio but it ends up not being freed, and buffers may later
319  * be reattached).
320  *
321  * Context: Any context.
322  */
brelse(struct buffer_head * bh)323 static inline void brelse(struct buffer_head *bh)
324 {
325 	if (bh)
326 		__brelse(bh);
327 }
328 
329 /**
330  * bforget - Discard any dirty data in a buffer.
331  * @bh: The buffer to forget.
332  *
333  * Call this function instead of brelse() if the data written to a buffer
334  * no longer needs to be written back.  It will clear the buffer's dirty
335  * flag so writeback of this buffer will be skipped.
336  *
337  * Context: Any context.
338  */
bforget(struct buffer_head * bh)339 static inline void bforget(struct buffer_head *bh)
340 {
341 	if (bh)
342 		__bforget(bh);
343 }
344 
345 static inline struct buffer_head *
sb_bread(struct super_block * sb,sector_t block)346 sb_bread(struct super_block *sb, sector_t block)
347 {
348 	return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
349 }
350 
351 static inline struct buffer_head *
sb_bread_unmovable(struct super_block * sb,sector_t block)352 sb_bread_unmovable(struct super_block *sb, sector_t block)
353 {
354 	return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
355 }
356 
357 static inline void
sb_breadahead(struct super_block * sb,sector_t block)358 sb_breadahead(struct super_block *sb, sector_t block)
359 {
360 	__breadahead(sb->s_bdev, block, sb->s_blocksize);
361 }
362 
getblk_unmovable(struct block_device * bdev,sector_t block,unsigned size)363 static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
364 		sector_t block, unsigned size)
365 {
366 	gfp_t gfp;
367 
368 	gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
369 	gfp |= __GFP_NOFAIL;
370 
371 	return bdev_getblk(bdev, block, size, gfp);
372 }
373 
__getblk(struct block_device * bdev,sector_t block,unsigned size)374 static inline struct buffer_head *__getblk(struct block_device *bdev,
375 		sector_t block, unsigned size)
376 {
377 	gfp_t gfp;
378 
379 	gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
380 	gfp |= __GFP_MOVABLE | __GFP_NOFAIL;
381 
382 	return bdev_getblk(bdev, block, size, gfp);
383 }
384 
sb_getblk(struct super_block * sb,sector_t block)385 static inline struct buffer_head *sb_getblk(struct super_block *sb,
386 		sector_t block)
387 {
388 	return __getblk(sb->s_bdev, block, sb->s_blocksize);
389 }
390 
sb_getblk_gfp(struct super_block * sb,sector_t block,gfp_t gfp)391 static inline struct buffer_head *sb_getblk_gfp(struct super_block *sb,
392 		sector_t block, gfp_t gfp)
393 {
394 	return bdev_getblk(sb->s_bdev, block, sb->s_blocksize, gfp);
395 }
396 
397 static inline struct buffer_head *
sb_find_get_block(struct super_block * sb,sector_t block)398 sb_find_get_block(struct super_block *sb, sector_t block)
399 {
400 	return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
401 }
402 
403 static inline struct buffer_head *
sb_find_get_block_nonatomic(struct super_block * sb,sector_t block)404 sb_find_get_block_nonatomic(struct super_block *sb, sector_t block)
405 {
406 	return __find_get_block_nonatomic(sb->s_bdev, block, sb->s_blocksize);
407 }
408 
409 static inline void
map_bh(struct buffer_head * bh,struct super_block * sb,sector_t block)410 map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
411 {
412 	set_buffer_mapped(bh);
413 	bh->b_bdev = sb->s_bdev;
414 	bh->b_blocknr = block;
415 	bh->b_size = sb->s_blocksize;
416 }
417 
wait_on_buffer(struct buffer_head * bh)418 static inline void wait_on_buffer(struct buffer_head *bh)
419 {
420 	might_sleep();
421 	if (buffer_locked(bh))
422 		__wait_on_buffer(bh);
423 }
424 
trylock_buffer(struct buffer_head * bh)425 static inline int trylock_buffer(struct buffer_head *bh)
426 {
427 	return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
428 }
429 
lock_buffer(struct buffer_head * bh)430 static inline void lock_buffer(struct buffer_head *bh)
431 {
432 	might_sleep();
433 	if (!trylock_buffer(bh))
434 		__lock_buffer(bh);
435 }
436 
bh_readahead(struct buffer_head * bh,blk_opf_t op_flags)437 static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags)
438 {
439 	if (!buffer_uptodate(bh) && trylock_buffer(bh)) {
440 		if (!buffer_uptodate(bh))
441 			__bh_read(bh, op_flags, false);
442 		else
443 			unlock_buffer(bh);
444 	}
445 }
446 
bh_read_nowait(struct buffer_head * bh,blk_opf_t op_flags)447 static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags)
448 {
449 	if (!bh_uptodate_or_lock(bh))
450 		__bh_read(bh, op_flags, false);
451 }
452 
453 /* Returns 1 if buffer uptodated, 0 on success, and -EIO on error. */
bh_read(struct buffer_head * bh,blk_opf_t op_flags)454 static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags)
455 {
456 	if (bh_uptodate_or_lock(bh))
457 		return 1;
458 	return __bh_read(bh, op_flags, true);
459 }
460 
bh_read_batch(int nr,struct buffer_head * bhs[])461 static inline void bh_read_batch(int nr, struct buffer_head *bhs[])
462 {
463 	__bh_read_batch(nr, bhs, 0, true);
464 }
465 
bh_readahead_batch(int nr,struct buffer_head * bhs[],blk_opf_t op_flags)466 static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[],
467 				      blk_opf_t op_flags)
468 {
469 	__bh_read_batch(nr, bhs, op_flags, false);
470 }
471 
472 /**
473  * __bread() - Read a block.
474  * @bdev: The block device to read from.
475  * @block: Block number in units of block size.
476  * @size: The block size of this device in bytes.
477  *
478  * Read a specified block, and return the buffer head that refers
479  * to it.  The memory is allocated from the movable area so that it can
480  * be migrated.  The returned buffer head has its refcount increased.
481  * The caller should call brelse() when it has finished with the buffer.
482  *
483  * Context: May sleep waiting for I/O.
484  * Return: NULL if the block was unreadable.
485  */
__bread(struct block_device * bdev,sector_t block,unsigned size)486 static inline struct buffer_head *__bread(struct block_device *bdev,
487 		sector_t block, unsigned size)
488 {
489 	return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
490 }
491 
492 /**
493  * get_nth_bh - Get a reference on the n'th buffer after this one.
494  * @bh: The buffer to start counting from.
495  * @count: How many buffers to skip.
496  *
497  * This is primarily useful for finding the nth buffer in a folio; in
498  * that case you pass the head buffer and the byte offset in the folio
499  * divided by the block size.  It can be used for other purposes, but
500  * it will wrap at the end of the folio rather than returning NULL or
501  * proceeding to the next folio for you.
502  *
503  * Return: The requested buffer with an elevated refcount.
504  */
505 static inline __must_check
get_nth_bh(struct buffer_head * bh,unsigned int count)506 struct buffer_head *get_nth_bh(struct buffer_head *bh, unsigned int count)
507 {
508 	while (count--)
509 		bh = bh->b_this_page;
510 	get_bh(bh);
511 	return bh;
512 }
513 
514 bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
515 
516 #ifdef CONFIG_BUFFER_HEAD
517 
518 void buffer_init(void);
519 bool try_to_free_buffers(struct folio *folio);
520 int inode_has_buffers(struct inode *inode);
521 void invalidate_inode_buffers(struct inode *inode);
522 int remove_inode_buffers(struct inode *inode);
523 int sync_mapping_buffers(struct address_space *mapping);
524 void invalidate_bh_lrus(void);
525 void invalidate_bh_lrus_cpu(void);
526 bool has_bh_in_lru(int cpu, void *dummy);
527 extern int buffer_heads_over_limit;
528 
529 #else /* CONFIG_BUFFER_HEAD */
530 
buffer_init(void)531 static inline void buffer_init(void) {}
try_to_free_buffers(struct folio * folio)532 static inline bool try_to_free_buffers(struct folio *folio) { return true; }
inode_has_buffers(struct inode * inode)533 static inline int inode_has_buffers(struct inode *inode) { return 0; }
invalidate_inode_buffers(struct inode * inode)534 static inline void invalidate_inode_buffers(struct inode *inode) {}
remove_inode_buffers(struct inode * inode)535 static inline int remove_inode_buffers(struct inode *inode) { return 1; }
sync_mapping_buffers(struct address_space * mapping)536 static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
invalidate_bh_lrus(void)537 static inline void invalidate_bh_lrus(void) {}
invalidate_bh_lrus_cpu(void)538 static inline void invalidate_bh_lrus_cpu(void) {}
has_bh_in_lru(int cpu,void * dummy)539 static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
540 #define buffer_heads_over_limit 0
541 
542 #endif /* CONFIG_BUFFER_HEAD */
543 #endif /* _LINUX_BUFFER_HEAD_H */
544