Lines Matching refs:bh
57 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
62 inline void touch_buffer(struct buffer_head *bh) in touch_buffer() argument
64 trace_block_touch_buffer(bh); in touch_buffer()
65 folio_mark_accessed(bh->b_folio); in touch_buffer()
69 void __lock_buffer(struct buffer_head *bh) in __lock_buffer() argument
71 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __lock_buffer()
75 void unlock_buffer(struct buffer_head *bh) in unlock_buffer() argument
77 clear_bit_unlock(BH_Lock, &bh->b_state); in unlock_buffer()
79 wake_up_bit(&bh->b_state, BH_Lock); in unlock_buffer()
91 struct buffer_head *head, *bh; in buffer_check_dirty_writeback() local
104 bh = head; in buffer_check_dirty_writeback()
106 if (buffer_locked(bh)) in buffer_check_dirty_writeback()
109 if (buffer_dirty(bh)) in buffer_check_dirty_writeback()
112 bh = bh->b_this_page; in buffer_check_dirty_writeback()
113 } while (bh != head); in buffer_check_dirty_writeback()
121 void __wait_on_buffer(struct buffer_head * bh) in __wait_on_buffer() argument
123 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __wait_on_buffer()
127 static void buffer_io_error(struct buffer_head *bh, char *msg) in buffer_io_error() argument
129 if (!test_bit(BH_Quiet, &bh->b_state)) in buffer_io_error()
132 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg); in buffer_io_error()
143 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) in __end_buffer_read_notouch() argument
146 set_buffer_uptodate(bh); in __end_buffer_read_notouch()
149 clear_buffer_uptodate(bh); in __end_buffer_read_notouch()
151 unlock_buffer(bh); in __end_buffer_read_notouch()
158 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) in end_buffer_read_sync() argument
160 put_bh(bh); in end_buffer_read_sync()
161 __end_buffer_read_notouch(bh, uptodate); in end_buffer_read_sync()
165 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) in end_buffer_write_sync() argument
168 set_buffer_uptodate(bh); in end_buffer_write_sync()
170 buffer_io_error(bh, ", lost sync page write"); in end_buffer_write_sync()
171 mark_buffer_write_io_error(bh); in end_buffer_write_sync()
172 clear_buffer_uptodate(bh); in end_buffer_write_sync()
174 unlock_buffer(bh); in end_buffer_write_sync()
175 put_bh(bh); in end_buffer_write_sync()
186 struct buffer_head *bh; in __find_get_block_slow() local
219 bh = head; in __find_get_block_slow()
221 if (!buffer_mapped(bh)) in __find_get_block_slow()
223 else if (bh->b_blocknr == block) { in __find_get_block_slow()
224 ret = bh; in __find_get_block_slow()
225 get_bh(bh); in __find_get_block_slow()
228 bh = bh->b_this_page; in __find_get_block_slow()
229 } while (bh != head); in __find_get_block_slow()
242 (unsigned long long)bh->b_blocknr, in __find_get_block_slow()
243 bh->b_state, bh->b_size, bdev, in __find_get_block_slow()
256 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) in end_buffer_async_read() argument
264 BUG_ON(!buffer_async_read(bh)); in end_buffer_async_read()
266 folio = bh->b_folio; in end_buffer_async_read()
268 set_buffer_uptodate(bh); in end_buffer_async_read()
270 clear_buffer_uptodate(bh); in end_buffer_async_read()
271 buffer_io_error(bh, ", async page read"); in end_buffer_async_read()
281 clear_buffer_async_read(bh); in end_buffer_async_read()
282 unlock_buffer(bh); in end_buffer_async_read()
283 tmp = bh; in end_buffer_async_read()
292 } while (tmp != bh); in end_buffer_async_read()
304 struct buffer_head *bh; member
311 struct buffer_head *bh = ctx->bh; in verify_bh() local
314 valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh)); in verify_bh()
315 end_buffer_async_read(bh, valid); in verify_bh()
319 static bool need_fsverity(struct buffer_head *bh) in need_fsverity() argument
321 struct folio *folio = bh->b_folio; in need_fsverity()
333 struct buffer_head *bh = ctx->bh; in decrypt_bh() local
336 err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size, in decrypt_bh()
337 bh_offset(bh)); in decrypt_bh()
338 if (err == 0 && need_fsverity(bh)) { in decrypt_bh()
348 end_buffer_async_read(bh, err == 0); in decrypt_bh()
356 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) in end_buffer_async_read_io() argument
358 struct inode *inode = bh->b_folio->mapping->host; in end_buffer_async_read_io()
360 bool verify = need_fsverity(bh); in end_buffer_async_read_io()
368 ctx->bh = bh; in end_buffer_async_read_io()
380 end_buffer_async_read(bh, uptodate); in end_buffer_async_read_io()
387 static void end_buffer_async_write(struct buffer_head *bh, int uptodate) in end_buffer_async_write() argument
394 BUG_ON(!buffer_async_write(bh)); in end_buffer_async_write()
396 folio = bh->b_folio; in end_buffer_async_write()
398 set_buffer_uptodate(bh); in end_buffer_async_write()
400 buffer_io_error(bh, ", lost async page write"); in end_buffer_async_write()
401 mark_buffer_write_io_error(bh); in end_buffer_async_write()
402 clear_buffer_uptodate(bh); in end_buffer_async_write()
408 clear_buffer_async_write(bh); in end_buffer_async_write()
409 unlock_buffer(bh); in end_buffer_async_write()
410 tmp = bh->b_this_page; in end_buffer_async_write()
411 while (tmp != bh) { in end_buffer_async_write()
447 static void mark_buffer_async_read(struct buffer_head *bh) in mark_buffer_async_read() argument
449 bh->b_end_io = end_buffer_async_read_io; in mark_buffer_async_read()
450 set_buffer_async_read(bh); in mark_buffer_async_read()
453 static void mark_buffer_async_write_endio(struct buffer_head *bh, in mark_buffer_async_write_endio() argument
456 bh->b_end_io = handler; in mark_buffer_async_write_endio()
457 set_buffer_async_write(bh); in mark_buffer_async_write_endio()
460 void mark_buffer_async_write(struct buffer_head *bh) in mark_buffer_async_write() argument
462 mark_buffer_async_write_endio(bh, end_buffer_async_write); in mark_buffer_async_write()
519 static void __remove_assoc_queue(struct buffer_head *bh) in __remove_assoc_queue() argument
521 list_del_init(&bh->b_assoc_buffers); in __remove_assoc_queue()
522 WARN_ON(!bh->b_assoc_map); in __remove_assoc_queue()
523 bh->b_assoc_map = NULL; in __remove_assoc_queue()
543 struct buffer_head *bh; in osync_buffers_list() local
550 bh = BH_ENTRY(p); in osync_buffers_list()
551 if (buffer_locked(bh)) { in osync_buffers_list()
552 get_bh(bh); in osync_buffers_list()
554 wait_on_buffer(bh); in osync_buffers_list()
555 if (!buffer_uptodate(bh)) in osync_buffers_list()
557 brelse(bh); in osync_buffers_list()
668 struct buffer_head *bh; in write_boundary_block() local
670 bh = __find_get_block_nonatomic(bdev, bblock + 1, blocksize); in write_boundary_block()
671 if (bh) { in write_boundary_block()
672 if (buffer_dirty(bh)) in write_boundary_block()
673 write_dirty_buffer(bh, 0); in write_boundary_block()
674 put_bh(bh); in write_boundary_block()
678 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) in mark_buffer_dirty_inode() argument
681 struct address_space *buffer_mapping = bh->b_folio->mapping; in mark_buffer_dirty_inode()
683 mark_buffer_dirty(bh); in mark_buffer_dirty_inode()
689 if (!bh->b_assoc_map) { in mark_buffer_dirty_inode()
691 list_move_tail(&bh->b_assoc_buffers, in mark_buffer_dirty_inode()
693 bh->b_assoc_map = mapping; in mark_buffer_dirty_inode()
739 struct buffer_head *bh = head; in block_dirty_folio() local
742 set_buffer_dirty(bh); in block_dirty_folio()
743 bh = bh->b_this_page; in block_dirty_folio()
744 } while (bh != head); in block_dirty_folio()
784 struct buffer_head *bh; in fsync_buffers_list() local
794 bh = BH_ENTRY(list->next); in fsync_buffers_list()
795 mapping = bh->b_assoc_map; in fsync_buffers_list()
796 __remove_assoc_queue(bh); in fsync_buffers_list()
800 if (buffer_dirty(bh) || buffer_locked(bh)) { in fsync_buffers_list()
801 list_add(&bh->b_assoc_buffers, &tmp); in fsync_buffers_list()
802 bh->b_assoc_map = mapping; in fsync_buffers_list()
803 if (buffer_dirty(bh)) { in fsync_buffers_list()
804 get_bh(bh); in fsync_buffers_list()
813 write_dirty_buffer(bh, REQ_SYNC); in fsync_buffers_list()
821 brelse(bh); in fsync_buffers_list()
832 bh = BH_ENTRY(tmp.prev); in fsync_buffers_list()
833 get_bh(bh); in fsync_buffers_list()
834 mapping = bh->b_assoc_map; in fsync_buffers_list()
835 __remove_assoc_queue(bh); in fsync_buffers_list()
839 if (buffer_dirty(bh)) { in fsync_buffers_list()
840 list_add(&bh->b_assoc_buffers, in fsync_buffers_list()
842 bh->b_assoc_map = mapping; in fsync_buffers_list()
845 wait_on_buffer(bh); in fsync_buffers_list()
846 if (!buffer_uptodate(bh)) in fsync_buffers_list()
848 brelse(bh); in fsync_buffers_list()
900 struct buffer_head *bh = BH_ENTRY(list->next); in remove_inode_buffers() local
901 if (buffer_dirty(bh)) { in remove_inode_buffers()
905 __remove_assoc_queue(bh); in remove_inode_buffers()
924 struct buffer_head *bh, *head; in folio_alloc_buffers() local
935 bh = alloc_buffer_head(gfp); in folio_alloc_buffers()
936 if (!bh) in folio_alloc_buffers()
939 bh->b_this_page = head; in folio_alloc_buffers()
940 bh->b_blocknr = -1; in folio_alloc_buffers()
941 head = bh; in folio_alloc_buffers()
943 bh->b_size = size; in folio_alloc_buffers()
946 folio_set_bh(bh, folio, offset); in folio_alloc_buffers()
957 bh = head; in folio_alloc_buffers()
959 free_buffer_head(bh); in folio_alloc_buffers()
978 struct buffer_head *bh, *tail; in link_dev_buffers() local
980 bh = head; in link_dev_buffers()
982 tail = bh; in link_dev_buffers()
983 bh = bh->b_this_page; in link_dev_buffers()
984 } while (bh); in link_dev_buffers()
1008 struct buffer_head *bh = head; in folio_init_buffers() local
1014 if (!buffer_mapped(bh)) { in folio_init_buffers()
1015 bh->b_end_io = NULL; in folio_init_buffers()
1016 bh->b_private = NULL; in folio_init_buffers()
1017 bh->b_bdev = bdev; in folio_init_buffers()
1018 bh->b_blocknr = block; in folio_init_buffers()
1020 set_buffer_uptodate(bh); in folio_init_buffers()
1022 set_buffer_mapped(bh); in folio_init_buffers()
1025 bh = bh->b_this_page; in folio_init_buffers()
1026 } while (bh != head); in folio_init_buffers()
1047 struct buffer_head *bh; in grow_dev_folio() local
1055 bh = folio_buffers(folio); in grow_dev_folio()
1056 if (bh) { in grow_dev_folio()
1057 if (bh->b_size == size) { in grow_dev_folio()
1075 bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT); in grow_dev_folio()
1076 if (!bh) in grow_dev_folio()
1085 link_dev_buffers(folio, bh); in grow_dev_folio()
1132 struct buffer_head *bh; in __getblk_slow() local
1138 bh = __find_get_block_nonatomic(bdev, block, size); in __getblk_slow()
1140 bh = __find_get_block(bdev, block, size); in __getblk_slow()
1141 if (bh) in __getblk_slow()
1142 return bh; in __getblk_slow()
1181 void mark_buffer_dirty(struct buffer_head *bh) in mark_buffer_dirty() argument
1183 WARN_ON_ONCE(!buffer_uptodate(bh)); in mark_buffer_dirty()
1185 trace_block_dirty_buffer(bh); in mark_buffer_dirty()
1193 if (buffer_dirty(bh)) { in mark_buffer_dirty()
1195 if (buffer_dirty(bh)) in mark_buffer_dirty()
1199 if (!test_set_buffer_dirty(bh)) { in mark_buffer_dirty()
1200 struct folio *folio = bh->b_folio; in mark_buffer_dirty()
1214 void mark_buffer_write_io_error(struct buffer_head *bh) in mark_buffer_write_io_error() argument
1216 set_buffer_write_io_error(bh); in mark_buffer_write_io_error()
1218 if (bh->b_folio && bh->b_folio->mapping) in mark_buffer_write_io_error()
1219 mapping_set_error(bh->b_folio->mapping, -EIO); in mark_buffer_write_io_error()
1220 if (bh->b_assoc_map) in mark_buffer_write_io_error()
1221 mapping_set_error(bh->b_assoc_map, -EIO); in mark_buffer_write_io_error()
1231 void __brelse(struct buffer_head *bh) in __brelse() argument
1233 if (atomic_read(&bh->b_count)) { in __brelse()
1234 put_bh(bh); in __brelse()
1248 void __bforget(struct buffer_head *bh) in __bforget() argument
1250 clear_buffer_dirty(bh); in __bforget()
1251 if (bh->b_assoc_map) { in __bforget()
1252 struct address_space *buffer_mapping = bh->b_folio->mapping; in __bforget()
1255 list_del_init(&bh->b_assoc_buffers); in __bforget()
1256 bh->b_assoc_map = NULL; in __bforget()
1259 __brelse(bh); in __bforget()
1263 static struct buffer_head *__bread_slow(struct buffer_head *bh) in __bread_slow() argument
1265 lock_buffer(bh); in __bread_slow()
1266 if (buffer_uptodate(bh)) { in __bread_slow()
1267 unlock_buffer(bh); in __bread_slow()
1268 return bh; in __bread_slow()
1270 get_bh(bh); in __bread_slow()
1271 bh->b_end_io = end_buffer_read_sync; in __bread_slow()
1272 submit_bh(REQ_OP_READ, bh); in __bread_slow()
1273 wait_on_buffer(bh); in __bread_slow()
1274 if (buffer_uptodate(bh)) in __bread_slow()
1275 return bh; in __bread_slow()
1277 brelse(bh); in __bread_slow()
1323 static void bh_lru_install(struct buffer_head *bh) in bh_lru_install() argument
1325 struct buffer_head *evictee = bh; in bh_lru_install()
1346 if (evictee == bh) { in bh_lru_install()
1352 get_bh(bh); in bh_lru_install()
1373 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); in lookup_bh_lru() local
1375 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && in lookup_bh_lru()
1376 bh->b_size == size) { in lookup_bh_lru()
1383 __this_cpu_write(bh_lrus.bhs[0], bh); in lookup_bh_lru()
1385 get_bh(bh); in lookup_bh_lru()
1386 ret = bh; in lookup_bh_lru()
1404 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); in find_get_block_common() local
1406 if (bh == NULL) { in find_get_block_common()
1408 bh = __find_get_block_slow(bdev, block, atomic); in find_get_block_common()
1409 if (bh) in find_get_block_common()
1410 bh_lru_install(bh); in find_get_block_common()
1412 touch_buffer(bh); in find_get_block_common()
1414 return bh; in find_get_block_common()
1450 struct buffer_head *bh; in bdev_getblk() local
1453 bh = __find_get_block_nonatomic(bdev, block, size); in bdev_getblk()
1455 bh = __find_get_block(bdev, block, size); in bdev_getblk()
1458 if (bh) in bdev_getblk()
1459 return bh; in bdev_getblk()
1470 struct buffer_head *bh = bdev_getblk(bdev, block, size, in __breadahead() local
1473 if (likely(bh)) { in __breadahead()
1474 bh_readahead(bh, REQ_RAHEAD); in __breadahead()
1475 brelse(bh); in __breadahead()
1505 struct buffer_head *bh; in __bread_gfp() local
1515 bh = bdev_getblk(bdev, block, size, gfp); in __bread_gfp()
1517 if (likely(bh) && !buffer_uptodate(bh)) in __bread_gfp()
1518 bh = __bread_slow(bh); in __bread_gfp()
1519 return bh; in __bread_gfp()
1578 void folio_set_bh(struct buffer_head *bh, struct folio *folio, in folio_set_bh() argument
1581 bh->b_folio = folio; in folio_set_bh()
1587 bh->b_data = (char *)(0 + offset); in folio_set_bh()
1589 bh->b_data = folio_address(folio) + offset; in folio_set_bh()
1602 static void discard_buffer(struct buffer_head * bh) in discard_buffer() argument
1606 lock_buffer(bh); in discard_buffer()
1607 clear_buffer_dirty(bh); in discard_buffer()
1608 bh->b_bdev = NULL; in discard_buffer()
1609 b_state = READ_ONCE(bh->b_state); in discard_buffer()
1611 } while (!try_cmpxchg_relaxed(&bh->b_state, &b_state, in discard_buffer()
1613 unlock_buffer(bh); in discard_buffer()
1633 struct buffer_head *head, *bh, *next; in block_invalidate_folio() local
1648 bh = head; in block_invalidate_folio()
1650 size_t next_off = curr_off + bh->b_size; in block_invalidate_folio()
1651 next = bh->b_this_page; in block_invalidate_folio()
1663 discard_buffer(bh); in block_invalidate_folio()
1665 bh = next; in block_invalidate_folio()
1666 } while (bh != head); in block_invalidate_folio()
1688 struct buffer_head *bh, *head, *tail; in create_empty_buffers() local
1692 bh = head; in create_empty_buffers()
1694 bh->b_state |= b_state; in create_empty_buffers()
1695 tail = bh; in create_empty_buffers()
1696 bh = bh->b_this_page; in create_empty_buffers()
1697 } while (bh); in create_empty_buffers()
1702 bh = head; in create_empty_buffers()
1705 set_buffer_dirty(bh); in create_empty_buffers()
1707 set_buffer_uptodate(bh); in create_empty_buffers()
1708 bh = bh->b_this_page; in create_empty_buffers()
1709 } while (bh != head); in create_empty_buffers()
1746 struct buffer_head *bh; in clean_bdev_aliases() local
1768 bh = head; in clean_bdev_aliases()
1770 if (!buffer_mapped(bh) || (bh->b_blocknr < block)) in clean_bdev_aliases()
1772 if (bh->b_blocknr >= block + len) in clean_bdev_aliases()
1774 clear_buffer_dirty(bh); in clean_bdev_aliases()
1775 wait_on_buffer(bh); in clean_bdev_aliases()
1776 clear_buffer_req(bh); in clean_bdev_aliases()
1778 bh = bh->b_this_page; in clean_bdev_aliases()
1779 } while (bh != head); in clean_bdev_aliases()
1796 struct buffer_head *bh; in folio_create_buffers() local
1800 bh = folio_buffers(folio); in folio_create_buffers()
1801 if (!bh) in folio_create_buffers()
1802 bh = create_empty_buffers(folio, in folio_create_buffers()
1804 return bh; in folio_create_buffers()
1842 struct buffer_head *bh, *head; in __block_write_full_folio() local
1860 bh = head; in __block_write_full_folio()
1861 blocksize = bh->b_size; in __block_write_full_folio()
1880 clear_buffer_dirty(bh); in __block_write_full_folio()
1881 set_buffer_uptodate(bh); in __block_write_full_folio()
1882 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && in __block_write_full_folio()
1883 buffer_dirty(bh)) { in __block_write_full_folio()
1884 WARN_ON(bh->b_size != blocksize); in __block_write_full_folio()
1885 err = get_block(inode, block, bh, 1); in __block_write_full_folio()
1888 clear_buffer_delay(bh); in __block_write_full_folio()
1889 if (buffer_new(bh)) { in __block_write_full_folio()
1891 clear_buffer_new(bh); in __block_write_full_folio()
1892 clean_bdev_bh_alias(bh); in __block_write_full_folio()
1895 bh = bh->b_this_page; in __block_write_full_folio()
1897 } while (bh != head); in __block_write_full_folio()
1900 if (!buffer_mapped(bh)) in __block_write_full_folio()
1910 lock_buffer(bh); in __block_write_full_folio()
1911 } else if (!trylock_buffer(bh)) { in __block_write_full_folio()
1915 if (test_clear_buffer_dirty(bh)) { in __block_write_full_folio()
1916 mark_buffer_async_write_endio(bh, in __block_write_full_folio()
1919 unlock_buffer(bh); in __block_write_full_folio()
1921 } while ((bh = bh->b_this_page) != head); in __block_write_full_folio()
1931 struct buffer_head *next = bh->b_this_page; in __block_write_full_folio()
1932 if (buffer_async_write(bh)) { in __block_write_full_folio()
1933 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, in __block_write_full_folio()
1937 bh = next; in __block_write_full_folio()
1938 } while (bh != head); in __block_write_full_folio()
1965 bh = head; in __block_write_full_folio()
1968 if (buffer_mapped(bh) && buffer_dirty(bh) && in __block_write_full_folio()
1969 !buffer_delay(bh)) { in __block_write_full_folio()
1970 lock_buffer(bh); in __block_write_full_folio()
1971 mark_buffer_async_write_endio(bh, in __block_write_full_folio()
1978 clear_buffer_dirty(bh); in __block_write_full_folio()
1980 } while ((bh = bh->b_this_page) != head); in __block_write_full_folio()
1985 struct buffer_head *next = bh->b_this_page; in __block_write_full_folio()
1986 if (buffer_async_write(bh)) { in __block_write_full_folio()
1987 clear_buffer_dirty(bh); in __block_write_full_folio()
1988 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, in __block_write_full_folio()
1992 bh = next; in __block_write_full_folio()
1993 } while (bh != head); in __block_write_full_folio()
2007 struct buffer_head *head, *bh; in folio_zero_new_buffers() local
2014 bh = head; in folio_zero_new_buffers()
2017 block_end = block_start + bh->b_size; in folio_zero_new_buffers()
2019 if (buffer_new(bh)) { in folio_zero_new_buffers()
2028 set_buffer_uptodate(bh); in folio_zero_new_buffers()
2031 clear_buffer_new(bh); in folio_zero_new_buffers()
2032 mark_buffer_dirty(bh); in folio_zero_new_buffers()
2037 bh = bh->b_this_page; in folio_zero_new_buffers()
2038 } while (bh != head); in folio_zero_new_buffers()
2043 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, in iomap_to_bh() argument
2048 bh->b_bdev = iomap->bdev; in iomap_to_bh()
2066 if (!buffer_uptodate(bh) || in iomap_to_bh()
2068 set_buffer_new(bh); in iomap_to_bh()
2071 if (!buffer_uptodate(bh) || in iomap_to_bh()
2073 set_buffer_new(bh); in iomap_to_bh()
2074 set_buffer_uptodate(bh); in iomap_to_bh()
2075 set_buffer_mapped(bh); in iomap_to_bh()
2076 set_buffer_delay(bh); in iomap_to_bh()
2084 set_buffer_new(bh); in iomap_to_bh()
2085 set_buffer_unwritten(bh); in iomap_to_bh()
2098 set_buffer_new(bh); in iomap_to_bh()
2100 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >> in iomap_to_bh()
2102 set_buffer_mapped(bh); in iomap_to_bh()
2120 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; in __block_write_begin_int() local
2130 for (bh = head, block_start = 0; bh != head || !block_start; in __block_write_begin_int()
2131 block++, block_start=block_end, bh = bh->b_this_page) { in __block_write_begin_int()
2135 if (!buffer_uptodate(bh)) in __block_write_begin_int()
2136 set_buffer_uptodate(bh); in __block_write_begin_int()
2140 if (buffer_new(bh)) in __block_write_begin_int()
2141 clear_buffer_new(bh); in __block_write_begin_int()
2142 if (!buffer_mapped(bh)) { in __block_write_begin_int()
2143 WARN_ON(bh->b_size != blocksize); in __block_write_begin_int()
2145 err = get_block(inode, block, bh, 1); in __block_write_begin_int()
2147 err = iomap_to_bh(inode, block, bh, iomap); in __block_write_begin_int()
2151 if (buffer_new(bh)) { in __block_write_begin_int()
2152 clean_bdev_bh_alias(bh); in __block_write_begin_int()
2154 clear_buffer_new(bh); in __block_write_begin_int()
2155 set_buffer_uptodate(bh); in __block_write_begin_int()
2156 mark_buffer_dirty(bh); in __block_write_begin_int()
2167 if (!buffer_uptodate(bh)) in __block_write_begin_int()
2168 set_buffer_uptodate(bh); in __block_write_begin_int()
2171 if (!buffer_uptodate(bh) && !buffer_delay(bh) && in __block_write_begin_int()
2172 !buffer_unwritten(bh) && in __block_write_begin_int()
2174 bh_read_nowait(bh, 0); in __block_write_begin_int()
2175 *wait_bh++=bh; in __block_write_begin_int()
2203 struct buffer_head *bh, *head; in block_commit_write() local
2205 bh = head = folio_buffers(folio); in block_commit_write()
2206 if (!bh) in block_commit_write()
2208 blocksize = bh->b_size; in block_commit_write()
2214 if (!buffer_uptodate(bh)) in block_commit_write()
2217 set_buffer_uptodate(bh); in block_commit_write()
2218 mark_buffer_dirty(bh); in block_commit_write()
2220 if (buffer_new(bh)) in block_commit_write()
2221 clear_buffer_new(bh); in block_commit_write()
2224 bh = bh->b_this_page; in block_commit_write()
2225 } while (bh != head); in block_commit_write()
2350 struct buffer_head *bh, *head; in block_is_partially_uptodate() local
2362 bh = head; in block_is_partially_uptodate()
2367 if (!buffer_uptodate(bh)) { in block_is_partially_uptodate()
2375 bh = bh->b_this_page; in block_is_partially_uptodate()
2376 } while (bh != head); in block_is_partially_uptodate()
2393 struct buffer_head *bh, *head, *prev = NULL; in block_read_full_folio() local
2408 bh = head; in block_read_full_folio()
2411 if (buffer_uptodate(bh)) in block_read_full_folio()
2414 if (!buffer_mapped(bh)) { in block_read_full_folio()
2419 WARN_ON(bh->b_size != blocksize); in block_read_full_folio()
2420 err = get_block(inode, iblock, bh, 0); in block_read_full_folio()
2424 if (!buffer_mapped(bh)) { in block_read_full_folio()
2425 folio_zero_range(folio, bh_offset(bh), in block_read_full_folio()
2428 set_buffer_uptodate(bh); in block_read_full_folio()
2435 if (buffer_uptodate(bh)) in block_read_full_folio()
2439 lock_buffer(bh); in block_read_full_folio()
2440 if (buffer_uptodate(bh)) { in block_read_full_folio()
2441 unlock_buffer(bh); in block_read_full_folio()
2445 mark_buffer_async_read(bh); in block_read_full_folio()
2448 prev = bh; in block_read_full_folio()
2449 } while (iblock++, (bh = bh->b_this_page) != head); in block_read_full_folio()
2663 struct buffer_head *bh; in block_truncate_page() local
2680 bh = folio_buffers(folio); in block_truncate_page()
2681 if (!bh) in block_truncate_page()
2682 bh = create_empty_buffers(folio, blocksize, 0); in block_truncate_page()
2688 bh = bh->b_this_page; in block_truncate_page()
2693 if (!buffer_mapped(bh)) { in block_truncate_page()
2694 WARN_ON(bh->b_size != blocksize); in block_truncate_page()
2695 err = get_block(inode, iblock, bh, 0); in block_truncate_page()
2699 if (!buffer_mapped(bh)) in block_truncate_page()
2705 set_buffer_uptodate(bh); in block_truncate_page()
2707 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { in block_truncate_page()
2708 err = bh_read(bh, 0); in block_truncate_page()
2715 mark_buffer_dirty(bh); in block_truncate_page()
2771 struct buffer_head *bh = bio->bi_private; in end_bio_bh_io_sync() local
2774 set_bit(BH_Quiet, &bh->b_state); in end_bio_bh_io_sync()
2776 bh->b_end_io(bh, !bio->bi_status); in end_bio_bh_io_sync()
2780 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, in submit_bh_wbc() argument
2787 BUG_ON(!buffer_locked(bh)); in submit_bh_wbc()
2788 BUG_ON(!buffer_mapped(bh)); in submit_bh_wbc()
2789 BUG_ON(!bh->b_end_io); in submit_bh_wbc()
2790 BUG_ON(buffer_delay(bh)); in submit_bh_wbc()
2791 BUG_ON(buffer_unwritten(bh)); in submit_bh_wbc()
2796 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) in submit_bh_wbc()
2797 clear_buffer_write_io_error(bh); in submit_bh_wbc()
2799 if (buffer_meta(bh)) in submit_bh_wbc()
2801 if (buffer_prio(bh)) in submit_bh_wbc()
2804 bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO); in submit_bh_wbc()
2806 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); in submit_bh_wbc()
2808 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in submit_bh_wbc()
2811 bio_add_folio_nofail(bio, bh->b_folio, bh->b_size, bh_offset(bh)); in submit_bh_wbc()
2814 bio->bi_private = bh; in submit_bh_wbc()
2821 wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size); in submit_bh_wbc()
2827 void submit_bh(blk_opf_t opf, struct buffer_head *bh) in submit_bh() argument
2829 submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL); in submit_bh()
2833 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) in write_dirty_buffer() argument
2835 lock_buffer(bh); in write_dirty_buffer()
2836 if (!test_clear_buffer_dirty(bh)) { in write_dirty_buffer()
2837 unlock_buffer(bh); in write_dirty_buffer()
2840 bh->b_end_io = end_buffer_write_sync; in write_dirty_buffer()
2841 get_bh(bh); in write_dirty_buffer()
2842 submit_bh(REQ_OP_WRITE | op_flags, bh); in write_dirty_buffer()
2851 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) in __sync_dirty_buffer() argument
2853 WARN_ON(atomic_read(&bh->b_count) < 1); in __sync_dirty_buffer()
2854 lock_buffer(bh); in __sync_dirty_buffer()
2855 if (test_clear_buffer_dirty(bh)) { in __sync_dirty_buffer()
2860 if (!buffer_mapped(bh)) { in __sync_dirty_buffer()
2861 unlock_buffer(bh); in __sync_dirty_buffer()
2865 get_bh(bh); in __sync_dirty_buffer()
2866 bh->b_end_io = end_buffer_write_sync; in __sync_dirty_buffer()
2867 submit_bh(REQ_OP_WRITE | op_flags, bh); in __sync_dirty_buffer()
2868 wait_on_buffer(bh); in __sync_dirty_buffer()
2869 if (!buffer_uptodate(bh)) in __sync_dirty_buffer()
2872 unlock_buffer(bh); in __sync_dirty_buffer()
2878 int sync_dirty_buffer(struct buffer_head *bh) in sync_dirty_buffer() argument
2880 return __sync_dirty_buffer(bh, REQ_SYNC); in sync_dirty_buffer()
2884 static inline int buffer_busy(struct buffer_head *bh) in buffer_busy() argument
2886 return atomic_read(&bh->b_count) | in buffer_busy()
2887 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); in buffer_busy()
2894 struct buffer_head *bh; in drop_buffers() local
2896 bh = head; in drop_buffers()
2898 if (buffer_busy(bh)) in drop_buffers()
2900 bh = bh->b_this_page; in drop_buffers()
2901 } while (bh != head); in drop_buffers()
2904 struct buffer_head *next = bh->b_this_page; in drop_buffers()
2906 if (bh->b_assoc_map) in drop_buffers()
2907 __remove_assoc_queue(bh); in drop_buffers()
2908 bh = next; in drop_buffers()
2909 } while (bh != head); in drop_buffers()
2978 struct buffer_head *bh = buffers_to_free; in try_to_free_buffers() local
2981 struct buffer_head *next = bh->b_this_page; in try_to_free_buffers()
2982 free_buffer_head(bh); in try_to_free_buffers()
2983 bh = next; in try_to_free_buffers()
2984 } while (bh != buffers_to_free); in try_to_free_buffers()
3038 void free_buffer_head(struct buffer_head *bh) in free_buffer_head() argument
3040 BUG_ON(!list_empty(&bh->b_assoc_buffers)); in free_buffer_head()
3041 kmem_cache_free(bh_cachep, bh); in free_buffer_head()
3070 int bh_uptodate_or_lock(struct buffer_head *bh) in bh_uptodate_or_lock() argument
3072 if (!buffer_uptodate(bh)) { in bh_uptodate_or_lock()
3073 lock_buffer(bh); in bh_uptodate_or_lock()
3074 if (!buffer_uptodate(bh)) in bh_uptodate_or_lock()
3076 unlock_buffer(bh); in bh_uptodate_or_lock()
3090 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait) in __bh_read() argument
3094 BUG_ON(!buffer_locked(bh)); in __bh_read()
3096 get_bh(bh); in __bh_read()
3097 bh->b_end_io = end_buffer_read_sync; in __bh_read()
3098 submit_bh(REQ_OP_READ | op_flags, bh); in __bh_read()
3100 wait_on_buffer(bh); in __bh_read()
3101 if (!buffer_uptodate(bh)) in __bh_read()
3124 struct buffer_head *bh = bhs[i]; in __bh_read_batch() local
3126 if (buffer_uptodate(bh)) in __bh_read_batch()
3130 lock_buffer(bh); in __bh_read_batch()
3132 if (!trylock_buffer(bh)) in __bh_read_batch()
3135 if (buffer_uptodate(bh)) { in __bh_read_batch()
3136 unlock_buffer(bh); in __bh_read_batch()
3140 bh->b_end_io = end_buffer_read_sync; in __bh_read_batch()
3141 get_bh(bh); in __bh_read_batch()
3142 submit_bh(REQ_OP_READ | op_flags, bh); in __bh_read_batch()