1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * include/linux/buffer_head.h 4 * 5 * Everything to do with buffer_heads. 6 */ 7 8 #ifndef _LINUX_BUFFER_HEAD_H 9 #define _LINUX_BUFFER_HEAD_H 10 11 #include <linux/types.h> 12 #include <linux/blk_types.h> 13 #include <linux/fs.h> 14 #include <linux/linkage.h> 15 #include <linux/pagemap.h> 16 #include <linux/wait.h> 17 #include <linux/atomic.h> 18 19 enum bh_state_bits { 20 BH_Uptodate, /* Contains valid data */ 21 BH_Dirty, /* Is dirty */ 22 BH_Lock, /* Is locked */ 23 BH_Req, /* Has been submitted for I/O */ 24 25 BH_Mapped, /* Has a disk mapping */ 26 BH_New, /* Disk mapping was newly created by get_block */ 27 BH_Async_Read, /* Is under end_buffer_async_read I/O */ 28 BH_Async_Write, /* Is under end_buffer_async_write I/O */ 29 BH_Delay, /* Buffer is not yet allocated on disk */ 30 BH_Boundary, /* Block is followed by a discontiguity */ 31 BH_Write_EIO, /* I/O error on write */ 32 BH_Unwritten, /* Buffer is allocated on disk but not written */ 33 BH_Quiet, /* Buffer Error Prinks to be quiet */ 34 BH_Meta, /* Buffer contains metadata */ 35 BH_Prio, /* Buffer should be submitted with REQ_PRIO */ 36 BH_Defer_Completion, /* Defer AIO completion to workqueue */ 37 BH_Migrate, /* Buffer is being migrated (norefs) */ 38 39 BH_PrivateStart,/* not a state bit, but the first bit available 40 * for private allocation by other entities 41 */ 42 }; 43 44 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512) 45 46 struct page; 47 struct buffer_head; 48 struct address_space; 49 typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); 50 51 /* 52 * Historically, a buffer_head was used to map a single block 53 * within a page, and of course as the unit of I/O through the 54 * filesystem and block layers. Nowadays the basic I/O unit 55 * is the bio, and buffer_heads are used for extracting block 56 * mappings (via a get_block_t call), for tracking state within 57 * a folio (via a folio_mapping) and for wrapping bio submission 58 * for backward compatibility reasons (e.g. submit_bh). 59 */ 60 struct buffer_head { 61 unsigned long b_state; /* buffer state bitmap (see above) */ 62 struct buffer_head *b_this_page;/* circular list of page's buffers */ 63 union { 64 struct page *b_page; /* the page this bh is mapped to */ 65 struct folio *b_folio; /* the folio this bh is mapped to */ 66 }; 67 68 sector_t b_blocknr; /* start block number */ 69 size_t b_size; /* size of mapping */ 70 char *b_data; /* pointer to data within the page */ 71 72 struct block_device *b_bdev; 73 bh_end_io_t *b_end_io; /* I/O completion */ 74 void *b_private; /* reserved for b_end_io */ 75 struct list_head b_assoc_buffers; /* associated with another mapping */ 76 struct mapping_metadata_bhs *b_mmb; /* head of the list of metadata bhs 77 * this buffer is associated with */ 78 atomic_t b_count; /* users using this buffer_head */ 79 spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to 80 * serialise IO completion of other 81 * buffers in the page */ 82 }; 83 84 /* 85 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() 86 * and buffer_foo() functions. 87 * To avoid reset buffer flags that are already set, because that causes 88 * a costly cache line transition, check the flag first. 89 */ 90 #define BUFFER_FNS(bit, name) \ 91 static __always_inline void set_buffer_##name(struct buffer_head *bh) \ 92 { \ 93 if (!test_bit(BH_##bit, &(bh)->b_state)) \ 94 set_bit(BH_##bit, &(bh)->b_state); \ 95 } \ 96 static __always_inline void clear_buffer_##name(struct buffer_head *bh) \ 97 { \ 98 clear_bit(BH_##bit, &(bh)->b_state); \ 99 } \ 100 static __always_inline int buffer_##name(const struct buffer_head *bh) \ 101 { \ 102 return test_bit(BH_##bit, &(bh)->b_state); \ 103 } 104 105 /* 106 * test_set_buffer_foo() and test_clear_buffer_foo() 107 */ 108 #define TAS_BUFFER_FNS(bit, name) \ 109 static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \ 110 { \ 111 return test_and_set_bit(BH_##bit, &(bh)->b_state); \ 112 } \ 113 static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \ 114 { \ 115 return test_and_clear_bit(BH_##bit, &(bh)->b_state); \ 116 } \ 117 118 /* 119 * Emit the buffer bitops functions. Note that there are also functions 120 * of the form "mark_buffer_foo()". These are higher-level functions which 121 * do something in addition to setting a b_state bit. 122 */ 123 BUFFER_FNS(Dirty, dirty) 124 TAS_BUFFER_FNS(Dirty, dirty) 125 BUFFER_FNS(Lock, locked) 126 BUFFER_FNS(Req, req) 127 TAS_BUFFER_FNS(Req, req) 128 BUFFER_FNS(Mapped, mapped) 129 BUFFER_FNS(New, new) 130 BUFFER_FNS(Async_Read, async_read) 131 BUFFER_FNS(Async_Write, async_write) 132 BUFFER_FNS(Delay, delay) 133 BUFFER_FNS(Boundary, boundary) 134 BUFFER_FNS(Write_EIO, write_io_error) 135 BUFFER_FNS(Unwritten, unwritten) 136 BUFFER_FNS(Meta, meta) 137 BUFFER_FNS(Prio, prio) 138 BUFFER_FNS(Defer_Completion, defer_completion) 139 140 static __always_inline void set_buffer_uptodate(struct buffer_head *bh) 141 { 142 /* 143 * If somebody else already set this uptodate, they will 144 * have done the memory barrier, and a reader will thus 145 * see *some* valid buffer state. 146 * 147 * Any other serialization (with IO errors or whatever that 148 * might clear the bit) has to come from other state (eg BH_Lock). 149 */ 150 if (test_bit(BH_Uptodate, &bh->b_state)) 151 return; 152 153 /* 154 * make it consistent with folio_mark_uptodate 155 * pairs with smp_load_acquire in buffer_uptodate 156 */ 157 smp_mb__before_atomic(); 158 set_bit(BH_Uptodate, &bh->b_state); 159 } 160 161 static __always_inline void clear_buffer_uptodate(struct buffer_head *bh) 162 { 163 clear_bit(BH_Uptodate, &bh->b_state); 164 } 165 166 static __always_inline int buffer_uptodate(const struct buffer_head *bh) 167 { 168 /* 169 * make it consistent with folio_test_uptodate 170 * pairs with smp_mb__before_atomic in set_buffer_uptodate 171 */ 172 return test_bit_acquire(BH_Uptodate, &bh->b_state); 173 } 174 175 static inline unsigned long bh_offset(const struct buffer_head *bh) 176 { 177 return (unsigned long)(bh)->b_data & (page_size(bh->b_page) - 1); 178 } 179 180 /* If we *know* page->private refers to buffer_heads */ 181 #define page_buffers(page) \ 182 ({ \ 183 BUG_ON(!PagePrivate(page)); \ 184 ((struct buffer_head *)page_private(page)); \ 185 }) 186 #define folio_buffers(folio) folio_get_private(folio) 187 188 void buffer_check_dirty_writeback(struct folio *folio, 189 bool *dirty, bool *writeback); 190 191 /* 192 * Declarations 193 */ 194 195 void mark_buffer_dirty(struct buffer_head *bh); 196 void mark_buffer_write_io_error(struct buffer_head *bh); 197 void touch_buffer(struct buffer_head *bh); 198 void folio_set_bh(struct buffer_head *bh, struct folio *folio, 199 unsigned long offset); 200 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size, 201 gfp_t gfp); 202 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size); 203 struct buffer_head *create_empty_buffers(struct folio *folio, 204 unsigned long blocksize, unsigned long b_state); 205 void end_buffer_read_sync(struct buffer_head *bh, int uptodate); 206 void end_buffer_write_sync(struct buffer_head *bh, int uptodate); 207 208 /* Things to do with metadata buffers list */ 209 void mmb_mark_buffer_dirty(struct buffer_head *bh, struct mapping_metadata_bhs *mmb); 210 int mmb_fsync_noflush(struct file *file, struct mapping_metadata_bhs *mmb, 211 loff_t start, loff_t end, bool datasync); 212 int mmb_fsync(struct file *file, struct mapping_metadata_bhs *mmb, 213 loff_t start, loff_t end, bool datasync); 214 void clean_bdev_aliases(struct block_device *bdev, sector_t block, 215 sector_t len); 216 static inline void clean_bdev_bh_alias(struct buffer_head *bh) 217 { 218 clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1); 219 } 220 221 void mark_buffer_async_write(struct buffer_head *bh); 222 void __wait_on_buffer(struct buffer_head *); 223 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); 224 struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, 225 unsigned size); 226 struct buffer_head *__find_get_block_nonatomic(struct block_device *bdev, 227 sector_t block, unsigned size); 228 struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block, 229 unsigned size, gfp_t gfp); 230 void __brelse(struct buffer_head *); 231 void __bforget(struct buffer_head *); 232 void __breadahead(struct block_device *, sector_t block, unsigned int size); 233 struct buffer_head *__bread_gfp(struct block_device *, 234 sector_t block, unsigned size, gfp_t gfp); 235 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); 236 void free_buffer_head(struct buffer_head * bh); 237 void unlock_buffer(struct buffer_head *bh); 238 void __lock_buffer(struct buffer_head *bh); 239 int sync_dirty_buffer(struct buffer_head *bh); 240 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags); 241 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags); 242 void submit_bh(blk_opf_t, struct buffer_head *); 243 void write_boundary_block(struct block_device *bdev, 244 sector_t bblock, unsigned blocksize); 245 int bh_uptodate_or_lock(struct buffer_head *bh); 246 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait); 247 void __bh_read_batch(int nr, struct buffer_head *bhs[], 248 blk_opf_t op_flags, bool force_lock); 249 250 /* 251 * Generic address_space_operations implementations for buffer_head-backed 252 * address_spaces. 253 */ 254 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length); 255 int block_write_full_folio(struct folio *folio, struct writeback_control *wbc, 256 void *get_block); 257 int __block_write_full_folio(struct inode *inode, struct folio *folio, 258 get_block_t *get_block, struct writeback_control *wbc); 259 int block_read_full_folio(struct folio *, get_block_t *); 260 bool block_is_partially_uptodate(struct folio *, size_t from, size_t count); 261 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 262 struct folio **foliop, get_block_t *get_block); 263 int __block_write_begin(struct folio *folio, loff_t pos, unsigned len, 264 get_block_t *get_block); 265 int block_write_end(loff_t pos, unsigned len, unsigned copied, struct folio *); 266 int generic_write_end(const struct kiocb *, struct address_space *, 267 loff_t, unsigned len, unsigned copied, 268 struct folio *, void *); 269 void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to); 270 int cont_write_begin(const struct kiocb *, struct address_space *, loff_t, 271 unsigned, struct folio **, void **, 272 get_block_t *, loff_t *); 273 int generic_cont_expand_simple(struct inode *inode, loff_t size); 274 void block_commit_write(struct folio *folio, size_t from, size_t to); 275 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 276 get_block_t get_block); 277 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); 278 int block_truncate_page(struct address_space *, loff_t, get_block_t *); 279 280 #ifdef CONFIG_MIGRATION 281 extern int buffer_migrate_folio(struct address_space *, 282 struct folio *dst, struct folio *src, enum migrate_mode); 283 extern int buffer_migrate_folio_norefs(struct address_space *, 284 struct folio *dst, struct folio *src, enum migrate_mode); 285 #else 286 #define buffer_migrate_folio NULL 287 #define buffer_migrate_folio_norefs NULL 288 #endif 289 290 /* 291 * inline definitions 292 */ 293 294 static inline void get_bh(struct buffer_head *bh) 295 { 296 atomic_inc(&bh->b_count); 297 } 298 299 static inline void put_bh(struct buffer_head *bh) 300 { 301 smp_mb__before_atomic(); 302 atomic_dec(&bh->b_count); 303 } 304 305 /** 306 * brelse - Release a buffer. 307 * @bh: The buffer to release. 308 * 309 * Decrement a buffer_head's reference count. If @bh is NULL, this 310 * function is a no-op. 311 * 312 * If all buffers on a folio have zero reference count, are clean 313 * and unlocked, and if the folio is unlocked and not under writeback 314 * then try_to_free_buffers() may strip the buffers from the folio in 315 * preparation for freeing it (sometimes, rarely, buffers are removed 316 * from a folio but it ends up not being freed, and buffers may later 317 * be reattached). 318 * 319 * Context: Any context. 320 */ 321 static inline void brelse(struct buffer_head *bh) 322 { 323 if (bh) 324 __brelse(bh); 325 } 326 327 /** 328 * bforget - Discard any dirty data in a buffer. 329 * @bh: The buffer to forget. 330 * 331 * Call this function instead of brelse() if the data written to a buffer 332 * no longer needs to be written back. It will clear the buffer's dirty 333 * flag so writeback of this buffer will be skipped. 334 * 335 * Context: Any context. 336 */ 337 static inline void bforget(struct buffer_head *bh) 338 { 339 if (bh) 340 __bforget(bh); 341 } 342 343 static inline struct buffer_head * 344 sb_bread(struct super_block *sb, sector_t block) 345 { 346 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); 347 } 348 349 static inline struct buffer_head * 350 sb_bread_unmovable(struct super_block *sb, sector_t block) 351 { 352 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0); 353 } 354 355 static inline void 356 sb_breadahead(struct super_block *sb, sector_t block) 357 { 358 __breadahead(sb->s_bdev, block, sb->s_blocksize); 359 } 360 361 static inline struct buffer_head *getblk_unmovable(struct block_device *bdev, 362 sector_t block, unsigned size) 363 { 364 gfp_t gfp; 365 366 gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS); 367 gfp |= __GFP_NOFAIL; 368 369 return bdev_getblk(bdev, block, size, gfp); 370 } 371 372 static inline struct buffer_head *__getblk(struct block_device *bdev, 373 sector_t block, unsigned size) 374 { 375 gfp_t gfp; 376 377 gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS); 378 gfp |= __GFP_MOVABLE | __GFP_NOFAIL; 379 380 return bdev_getblk(bdev, block, size, gfp); 381 } 382 383 static inline struct buffer_head *sb_getblk(struct super_block *sb, 384 sector_t block) 385 { 386 return __getblk(sb->s_bdev, block, sb->s_blocksize); 387 } 388 389 static inline struct buffer_head *sb_getblk_gfp(struct super_block *sb, 390 sector_t block, gfp_t gfp) 391 { 392 return bdev_getblk(sb->s_bdev, block, sb->s_blocksize, gfp); 393 } 394 395 static inline struct buffer_head * 396 sb_find_get_block(struct super_block *sb, sector_t block) 397 { 398 return __find_get_block(sb->s_bdev, block, sb->s_blocksize); 399 } 400 401 static inline struct buffer_head * 402 sb_find_get_block_nonatomic(struct super_block *sb, sector_t block) 403 { 404 return __find_get_block_nonatomic(sb->s_bdev, block, sb->s_blocksize); 405 } 406 407 static inline void 408 map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) 409 { 410 set_buffer_mapped(bh); 411 bh->b_bdev = sb->s_bdev; 412 bh->b_blocknr = block; 413 bh->b_size = sb->s_blocksize; 414 } 415 416 static inline void wait_on_buffer(struct buffer_head *bh) 417 { 418 might_sleep(); 419 if (buffer_locked(bh)) 420 __wait_on_buffer(bh); 421 } 422 423 static inline int trylock_buffer(struct buffer_head *bh) 424 { 425 return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state)); 426 } 427 428 static inline void lock_buffer(struct buffer_head *bh) 429 { 430 might_sleep(); 431 if (!trylock_buffer(bh)) 432 __lock_buffer(bh); 433 } 434 435 static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags) 436 { 437 if (!buffer_uptodate(bh) && trylock_buffer(bh)) { 438 if (!buffer_uptodate(bh)) 439 __bh_read(bh, op_flags, false); 440 else 441 unlock_buffer(bh); 442 } 443 } 444 445 static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags) 446 { 447 if (!bh_uptodate_or_lock(bh)) 448 __bh_read(bh, op_flags, false); 449 } 450 451 /* Returns 1 if buffer uptodated, 0 on success, and -EIO on error. */ 452 static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags) 453 { 454 if (bh_uptodate_or_lock(bh)) 455 return 1; 456 return __bh_read(bh, op_flags, true); 457 } 458 459 static inline void bh_read_batch(int nr, struct buffer_head *bhs[]) 460 { 461 __bh_read_batch(nr, bhs, 0, true); 462 } 463 464 static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[], 465 blk_opf_t op_flags) 466 { 467 __bh_read_batch(nr, bhs, op_flags, false); 468 } 469 470 /** 471 * __bread() - Read a block. 472 * @bdev: The block device to read from. 473 * @block: Block number in units of block size. 474 * @size: The block size of this device in bytes. 475 * 476 * Read a specified block, and return the buffer head that refers 477 * to it. The memory is allocated from the movable area so that it can 478 * be migrated. The returned buffer head has its refcount increased. 479 * The caller should call brelse() when it has finished with the buffer. 480 * 481 * Context: May sleep waiting for I/O. 482 * Return: NULL if the block was unreadable. 483 */ 484 static inline struct buffer_head *__bread(struct block_device *bdev, 485 sector_t block, unsigned size) 486 { 487 return __bread_gfp(bdev, block, size, __GFP_MOVABLE); 488 } 489 490 /** 491 * get_nth_bh - Get a reference on the n'th buffer after this one. 492 * @bh: The buffer to start counting from. 493 * @count: How many buffers to skip. 494 * 495 * This is primarily useful for finding the nth buffer in a folio; in 496 * that case you pass the head buffer and the byte offset in the folio 497 * divided by the block size. It can be used for other purposes, but 498 * it will wrap at the end of the folio rather than returning NULL or 499 * proceeding to the next folio for you. 500 * 501 * Return: The requested buffer with an elevated refcount. 502 */ 503 static inline __must_check 504 struct buffer_head *get_nth_bh(struct buffer_head *bh, unsigned int count) 505 { 506 while (count--) 507 bh = bh->b_this_page; 508 get_bh(bh); 509 return bh; 510 } 511 512 bool block_dirty_folio(struct address_space *mapping, struct folio *folio); 513 514 #ifdef CONFIG_BUFFER_HEAD 515 516 void buffer_init(void); 517 bool try_to_free_buffers(struct folio *folio); 518 void mmb_init(struct mapping_metadata_bhs *mmb, struct address_space *mapping); 519 bool mmb_has_buffers(struct mapping_metadata_bhs *mmb); 520 void mmb_invalidate(struct mapping_metadata_bhs *mmb); 521 int mmb_sync(struct mapping_metadata_bhs *mmb); 522 void invalidate_bh_lrus(void); 523 void invalidate_bh_lrus_cpu(void); 524 bool has_bh_in_lru(int cpu, void *dummy); 525 extern int buffer_heads_over_limit; 526 527 #else /* CONFIG_BUFFER_HEAD */ 528 529 static inline void buffer_init(void) {} 530 static inline bool try_to_free_buffers(struct folio *folio) { return true; } 531 static inline int mmb_sync(struct mapping_metadata_bhs *mmb) { return 0; } 532 static inline void invalidate_bh_lrus(void) {} 533 static inline void invalidate_bh_lrus_cpu(void) {} 534 static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; } 535 #define buffer_heads_over_limit 0 536 537 #endif /* CONFIG_BUFFER_HEAD */ 538 #endif /* _LINUX_BUFFER_HEAD_H */ 539