Lines Matching full:bp

50 static void xfs_buf_submit(struct xfs_buf *bp);
51 static int xfs_buf_iowait(struct xfs_buf *bp);
53 static inline bool xfs_buf_is_uncached(struct xfs_buf *bp) in xfs_buf_is_uncached() argument
55 return bp->b_rhash_key == XFS_BUF_DADDR_NULL; in xfs_buf_is_uncached()
68 struct xfs_buf *bp) in xfs_buf_stale() argument
70 ASSERT(xfs_buf_islocked(bp)); in xfs_buf_stale()
72 bp->b_flags |= XBF_STALE; in xfs_buf_stale()
79 bp->b_flags &= ~_XBF_DELWRI_Q; in xfs_buf_stale()
81 spin_lock(&bp->b_lock); in xfs_buf_stale()
82 atomic_set(&bp->b_lru_ref, 0); in xfs_buf_stale()
83 if (!(bp->b_state & XFS_BSTATE_DISPOSE) && in xfs_buf_stale()
84 (list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru))) in xfs_buf_stale()
85 bp->b_hold--; in xfs_buf_stale()
87 ASSERT(bp->b_hold >= 1); in xfs_buf_stale()
88 spin_unlock(&bp->b_lock); in xfs_buf_stale()
95 struct xfs_buf *bp = container_of(cb, struct xfs_buf, b_rcu); in xfs_buf_free_callback() local
97 if (bp->b_maps != &bp->__b_map) in xfs_buf_free_callback()
98 kfree(bp->b_maps); in xfs_buf_free_callback()
99 kmem_cache_free(xfs_buf_cache, bp); in xfs_buf_free_callback()
104 struct xfs_buf *bp) in xfs_buf_free() argument
106 unsigned int size = BBTOB(bp->b_length); in xfs_buf_free()
109 trace_xfs_buf_free(bp, _RET_IP_); in xfs_buf_free()
111 ASSERT(list_empty(&bp->b_lru)); in xfs_buf_free()
113 if (!xfs_buftarg_is_mem(bp->b_target) && size >= PAGE_SIZE) in xfs_buf_free()
116 if (is_vmalloc_addr(bp->b_addr)) in xfs_buf_free()
117 vfree(bp->b_addr); in xfs_buf_free()
118 else if (bp->b_flags & _XBF_KMEM) in xfs_buf_free()
119 kfree(bp->b_addr); in xfs_buf_free()
121 folio_put(virt_to_folio(bp->b_addr)); in xfs_buf_free()
123 call_rcu(&bp->b_rcu, xfs_buf_free_callback); in xfs_buf_free()
128 struct xfs_buf *bp, in xfs_buf_alloc_kmem() argument
135 bp->b_addr = kmalloc(size, gfp_mask | __GFP_NOFAIL); in xfs_buf_alloc_kmem()
136 if (!bp->b_addr) in xfs_buf_alloc_kmem()
144 if (WARN_ON_ONCE(!IS_ALIGNED((unsigned long)bp->b_addr, size))) { in xfs_buf_alloc_kmem()
145 kfree(bp->b_addr); in xfs_buf_alloc_kmem()
146 bp->b_addr = NULL; in xfs_buf_alloc_kmem()
149 bp->b_flags |= _XBF_KMEM; in xfs_buf_alloc_kmem()
150 trace_xfs_buf_backing_kmem(bp, _RET_IP_); in xfs_buf_alloc_kmem()
176 struct xfs_buf *bp, in xfs_buf_alloc_backing_mem() argument
179 size_t size = BBTOB(bp->b_length); in xfs_buf_alloc_backing_mem()
183 if (xfs_buftarg_is_mem(bp->b_target)) in xfs_buf_alloc_backing_mem()
184 return xmbuf_map_backing_mem(bp); in xfs_buf_alloc_backing_mem()
200 return xfs_buf_alloc_kmem(bp, size, gfp_mask); in xfs_buf_alloc_backing_mem()
230 trace_xfs_buf_backing_fallback(bp, _RET_IP_); in xfs_buf_alloc_backing_mem()
233 bp->b_addr = folio_address(folio); in xfs_buf_alloc_backing_mem()
234 trace_xfs_buf_backing_folio(bp, _RET_IP_); in xfs_buf_alloc_backing_mem()
239 bp->b_addr = __vmalloc(size, gfp_mask); in xfs_buf_alloc_backing_mem()
240 if (bp->b_addr) in xfs_buf_alloc_backing_mem()
244 XFS_STATS_INC(bp->b_mount, xb_page_retries); in xfs_buf_alloc_backing_mem()
248 trace_xfs_buf_backing_vmalloc(bp, _RET_IP_); in xfs_buf_alloc_backing_mem()
260 struct xfs_buf *bp; in xfs_buf_alloc() local
265 bp = kmem_cache_zalloc(xfs_buf_cache, in xfs_buf_alloc()
280 bp->b_hold = 1; in xfs_buf_alloc()
281 sema_init(&bp->b_sema, 0); /* held, no waiters */ in xfs_buf_alloc()
283 spin_lock_init(&bp->b_lock); in xfs_buf_alloc()
284 atomic_set(&bp->b_lru_ref, 1); in xfs_buf_alloc()
285 init_completion(&bp->b_iowait); in xfs_buf_alloc()
286 INIT_LIST_HEAD(&bp->b_lru); in xfs_buf_alloc()
287 INIT_LIST_HEAD(&bp->b_list); in xfs_buf_alloc()
288 INIT_LIST_HEAD(&bp->b_li_list); in xfs_buf_alloc()
289 bp->b_target = target; in xfs_buf_alloc()
290 bp->b_mount = target->bt_mount; in xfs_buf_alloc()
291 bp->b_flags = flags; in xfs_buf_alloc()
292 bp->b_rhash_key = map[0].bm_bn; in xfs_buf_alloc()
293 bp->b_length = 0; in xfs_buf_alloc()
294 bp->b_map_count = nmaps; in xfs_buf_alloc()
296 bp->b_maps = &bp->__b_map; in xfs_buf_alloc()
298 bp->b_maps = kcalloc(nmaps, sizeof(struct xfs_buf_map), in xfs_buf_alloc()
301 bp->b_maps[i].bm_bn = map[i].bm_bn; in xfs_buf_alloc()
302 bp->b_maps[i].bm_len = map[i].bm_len; in xfs_buf_alloc()
303 bp->b_length += map[i].bm_len; in xfs_buf_alloc()
306 atomic_set(&bp->b_pin_count, 0); in xfs_buf_alloc()
307 init_waitqueue_head(&bp->b_waiters); in xfs_buf_alloc()
309 XFS_STATS_INC(bp->b_mount, xb_create); in xfs_buf_alloc()
310 trace_xfs_buf_init(bp, _RET_IP_); in xfs_buf_alloc()
312 error = xfs_buf_alloc_backing_mem(bp, flags); in xfs_buf_alloc()
314 xfs_buf_free(bp); in xfs_buf_alloc()
318 *bpp = bp; in xfs_buf_alloc()
331 const struct xfs_buf *bp = obj; in _xfs_buf_obj_cmp() local
339 if (bp->b_rhash_key != map->bm_bn) in _xfs_buf_obj_cmp()
342 if (unlikely(bp->b_length != map->bm_len)) { in _xfs_buf_obj_cmp()
355 ASSERT(bp->b_flags & XBF_STALE); in _xfs_buf_obj_cmp()
413 struct xfs_buf *bp, in xfs_buf_find_lock() argument
417 if (!xfs_buf_trylock(bp)) { in xfs_buf_find_lock()
418 XFS_STATS_INC(bp->b_mount, xb_busy_locked); in xfs_buf_find_lock()
422 xfs_buf_lock(bp); in xfs_buf_find_lock()
423 XFS_STATS_INC(bp->b_mount, xb_get_locked_waited); in xfs_buf_find_lock()
431 if (bp->b_flags & XBF_STALE) { in xfs_buf_find_lock()
433 xfs_buf_unlock(bp); in xfs_buf_find_lock()
436 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); in xfs_buf_find_lock()
437 bp->b_flags &= _XBF_KMEM; in xfs_buf_find_lock()
438 bp->b_ops = NULL; in xfs_buf_find_lock()
445 struct xfs_buf *bp) in xfs_buf_try_hold() argument
447 spin_lock(&bp->b_lock); in xfs_buf_try_hold()
448 if (bp->b_hold == 0) { in xfs_buf_try_hold()
449 spin_unlock(&bp->b_lock); in xfs_buf_try_hold()
452 bp->b_hold++; in xfs_buf_try_hold()
453 spin_unlock(&bp->b_lock); in xfs_buf_try_hold()
464 struct xfs_buf *bp; in xfs_buf_lookup() local
468 bp = rhashtable_lookup(&bch->bc_hash, map, xfs_buf_hash_params); in xfs_buf_lookup()
469 if (!bp || !xfs_buf_try_hold(bp)) { in xfs_buf_lookup()
475 error = xfs_buf_find_lock(bp, flags); in xfs_buf_lookup()
477 xfs_buf_rele(bp); in xfs_buf_lookup()
481 trace_xfs_buf_find(bp, flags, _RET_IP_); in xfs_buf_lookup()
482 *bpp = bp; in xfs_buf_lookup()
502 struct xfs_buf *bp; in xfs_buf_find_insert() local
513 bp = rhashtable_lookup_get_insert_fast(&bch->bc_hash, in xfs_buf_find_insert()
515 if (IS_ERR(bp)) { in xfs_buf_find_insert()
517 error = PTR_ERR(bp); in xfs_buf_find_insert()
520 if (bp && xfs_buf_try_hold(bp)) { in xfs_buf_find_insert()
523 error = xfs_buf_find_lock(bp, flags); in xfs_buf_find_insert()
525 xfs_buf_rele(bp); in xfs_buf_find_insert()
527 *bpp = bp; in xfs_buf_find_insert()
580 struct xfs_buf *bp = NULL; in xfs_buf_get_map() local
597 error = xfs_buf_lookup(bch, &cmap, flags, &bp); in xfs_buf_get_map()
602 if (unlikely(!bp)) { in xfs_buf_get_map()
610 flags, &bp); in xfs_buf_get_map()
624 xfs_buf_ioerror(bp, 0); in xfs_buf_get_map()
627 trace_xfs_buf_get(bp, flags, _RET_IP_); in xfs_buf_get_map()
628 *bpp = bp; in xfs_buf_get_map()
639 struct xfs_buf *bp) in _xfs_buf_read() argument
641 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL); in _xfs_buf_read()
643 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD | XBF_DONE); in _xfs_buf_read()
644 bp->b_flags |= XBF_READ; in _xfs_buf_read()
645 xfs_buf_submit(bp); in _xfs_buf_read()
646 return xfs_buf_iowait(bp); in _xfs_buf_read()
668 struct xfs_buf *bp, in xfs_buf_reverify() argument
671 ASSERT(bp->b_flags & XBF_DONE); in xfs_buf_reverify()
672 ASSERT(bp->b_error == 0); in xfs_buf_reverify()
674 if (!ops || bp->b_ops) in xfs_buf_reverify()
677 bp->b_ops = ops; in xfs_buf_reverify()
678 bp->b_ops->verify_read(bp); in xfs_buf_reverify()
679 if (bp->b_error) in xfs_buf_reverify()
680 bp->b_flags &= ~XBF_DONE; in xfs_buf_reverify()
681 return bp->b_error; in xfs_buf_reverify()
694 struct xfs_buf *bp; in xfs_buf_read_map() local
702 error = xfs_buf_get_map(target, map, nmaps, flags, &bp); in xfs_buf_read_map()
706 trace_xfs_buf_read(bp, flags, _RET_IP_); in xfs_buf_read_map()
708 if (!(bp->b_flags & XBF_DONE)) { in xfs_buf_read_map()
711 bp->b_ops = ops; in xfs_buf_read_map()
712 error = _xfs_buf_read(bp); in xfs_buf_read_map()
715 error = xfs_buf_reverify(bp, ops); in xfs_buf_read_map()
718 bp->b_flags &= ~XBF_READ; in xfs_buf_read_map()
719 ASSERT(bp->b_ops != NULL || ops == NULL); in xfs_buf_read_map()
741 xfs_buf_ioerror_alert(bp, fa); in xfs_buf_read_map()
743 bp->b_flags &= ~XBF_DONE; in xfs_buf_read_map()
744 xfs_buf_stale(bp); in xfs_buf_read_map()
745 xfs_buf_relse(bp); in xfs_buf_read_map()
753 *bpp = bp; in xfs_buf_read_map()
769 struct xfs_buf *bp; in xfs_buf_readahead_map() local
778 if (xfs_buf_get_map(target, map, nmaps, flags | XBF_TRYLOCK, &bp)) in xfs_buf_readahead_map()
780 trace_xfs_buf_readahead(bp, 0, _RET_IP_); in xfs_buf_readahead_map()
782 if (bp->b_flags & XBF_DONE) { in xfs_buf_readahead_map()
783 xfs_buf_reverify(bp, ops); in xfs_buf_readahead_map()
784 xfs_buf_relse(bp); in xfs_buf_readahead_map()
788 bp->b_ops = ops; in xfs_buf_readahead_map()
789 bp->b_flags &= ~(XBF_WRITE | XBF_DONE); in xfs_buf_readahead_map()
790 bp->b_flags |= flags; in xfs_buf_readahead_map()
792 xfs_buf_submit(bp); in xfs_buf_readahead_map()
809 struct xfs_buf *bp; in xfs_buf_read_uncached() local
814 error = xfs_buf_get_uncached(target, numblks, &bp); in xfs_buf_read_uncached()
819 ASSERT(bp->b_map_count == 1); in xfs_buf_read_uncached()
820 bp->b_rhash_key = XFS_BUF_DADDR_NULL; in xfs_buf_read_uncached()
821 bp->b_maps[0].bm_bn = daddr; in xfs_buf_read_uncached()
822 bp->b_flags |= XBF_READ; in xfs_buf_read_uncached()
823 bp->b_ops = ops; in xfs_buf_read_uncached()
825 xfs_buf_submit(bp); in xfs_buf_read_uncached()
826 error = xfs_buf_iowait(bp); in xfs_buf_read_uncached()
828 xfs_buf_relse(bp); in xfs_buf_read_uncached()
832 *bpp = bp; in xfs_buf_read_uncached()
858 struct xfs_buf *bp) in xfs_buf_hold() argument
860 trace_xfs_buf_hold(bp, _RET_IP_); in xfs_buf_hold()
862 spin_lock(&bp->b_lock); in xfs_buf_hold()
863 bp->b_hold++; in xfs_buf_hold()
864 spin_unlock(&bp->b_lock); in xfs_buf_hold()
869 struct xfs_buf *bp) in xfs_buf_rele_uncached() argument
871 ASSERT(list_empty(&bp->b_lru)); in xfs_buf_rele_uncached()
873 spin_lock(&bp->b_lock); in xfs_buf_rele_uncached()
874 if (--bp->b_hold) { in xfs_buf_rele_uncached()
875 spin_unlock(&bp->b_lock); in xfs_buf_rele_uncached()
878 spin_unlock(&bp->b_lock); in xfs_buf_rele_uncached()
879 xfs_buf_free(bp); in xfs_buf_rele_uncached()
884 struct xfs_buf *bp) in xfs_buf_rele_cached() argument
886 struct xfs_buftarg *btp = bp->b_target; in xfs_buf_rele_cached()
887 struct xfs_perag *pag = bp->b_pag; in xfs_buf_rele_cached()
891 trace_xfs_buf_rele(bp, _RET_IP_); in xfs_buf_rele_cached()
893 spin_lock(&bp->b_lock); in xfs_buf_rele_cached()
894 ASSERT(bp->b_hold >= 1); in xfs_buf_rele_cached()
895 if (bp->b_hold > 1) { in xfs_buf_rele_cached()
896 bp->b_hold--; in xfs_buf_rele_cached()
901 if (atomic_read(&bp->b_lru_ref)) { in xfs_buf_rele_cached()
907 if (list_lru_add_obj(&btp->bt_lru, &bp->b_lru)) in xfs_buf_rele_cached()
908 bp->b_state &= ~XFS_BSTATE_DISPOSE; in xfs_buf_rele_cached()
910 bp->b_hold--; in xfs_buf_rele_cached()
912 bp->b_hold--; in xfs_buf_rele_cached()
919 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) { in xfs_buf_rele_cached()
920 list_lru_del_obj(&btp->bt_lru, &bp->b_lru); in xfs_buf_rele_cached()
922 ASSERT(list_empty(&bp->b_lru)); in xfs_buf_rele_cached()
925 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); in xfs_buf_rele_cached()
926 rhashtable_remove_fast(&bch->bc_hash, &bp->b_rhash_head, in xfs_buf_rele_cached()
934 spin_unlock(&bp->b_lock); in xfs_buf_rele_cached()
937 xfs_buf_free(bp); in xfs_buf_rele_cached()
945 struct xfs_buf *bp) in xfs_buf_rele() argument
947 trace_xfs_buf_rele(bp, _RET_IP_); in xfs_buf_rele()
948 if (xfs_buf_is_uncached(bp)) in xfs_buf_rele()
949 xfs_buf_rele_uncached(bp); in xfs_buf_rele()
951 xfs_buf_rele_cached(bp); in xfs_buf_rele()
967 struct xfs_buf *bp) in xfs_buf_trylock() argument
971 locked = down_trylock(&bp->b_sema) == 0; in xfs_buf_trylock()
973 trace_xfs_buf_trylock(bp, _RET_IP_); in xfs_buf_trylock()
975 trace_xfs_buf_trylock_fail(bp, _RET_IP_); in xfs_buf_trylock()
990 struct xfs_buf *bp) in xfs_buf_lock() argument
992 trace_xfs_buf_lock(bp, _RET_IP_); in xfs_buf_lock()
994 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) in xfs_buf_lock()
995 xfs_log_force(bp->b_mount, 0); in xfs_buf_lock()
996 down(&bp->b_sema); in xfs_buf_lock()
998 trace_xfs_buf_lock_done(bp, _RET_IP_); in xfs_buf_lock()
1003 struct xfs_buf *bp) in xfs_buf_unlock() argument
1005 ASSERT(xfs_buf_islocked(bp)); in xfs_buf_unlock()
1007 up(&bp->b_sema); in xfs_buf_unlock()
1008 trace_xfs_buf_unlock(bp, _RET_IP_); in xfs_buf_unlock()
1013 struct xfs_buf *bp) in xfs_buf_wait_unpin() argument
1017 if (atomic_read(&bp->b_pin_count) == 0) in xfs_buf_wait_unpin()
1020 add_wait_queue(&bp->b_waiters, &wait); in xfs_buf_wait_unpin()
1023 if (atomic_read(&bp->b_pin_count) == 0) in xfs_buf_wait_unpin()
1027 remove_wait_queue(&bp->b_waiters, &wait); in xfs_buf_wait_unpin()
1033 struct xfs_buf *bp) in xfs_buf_ioerror_alert_ratelimited() argument
1038 if (bp->b_target != lasttarg || in xfs_buf_ioerror_alert_ratelimited()
1041 xfs_buf_ioerror_alert(bp, __this_address); in xfs_buf_ioerror_alert_ratelimited()
1043 lasttarg = bp->b_target; in xfs_buf_ioerror_alert_ratelimited()
1052 struct xfs_buf *bp, in xfs_buf_ioerror_permanent() argument
1055 struct xfs_mount *mp = bp->b_mount; in xfs_buf_ioerror_permanent()
1058 ++bp->b_retries > cfg->max_retries) in xfs_buf_ioerror_permanent()
1061 time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time)) in xfs_buf_ioerror_permanent()
1073 * caller handle the error in bp->b_error appropriately.
1090 struct xfs_buf *bp) in xfs_buf_ioend_handle_error() argument
1092 struct xfs_mount *mp = bp->b_mount; in xfs_buf_ioend_handle_error()
1103 xfs_buf_ioerror_alert_ratelimited(bp); in xfs_buf_ioend_handle_error()
1109 if (bp->b_flags & _XBF_LOGRECOVERY) { in xfs_buf_ioend_handle_error()
1117 if (!(bp->b_flags & XBF_ASYNC)) in xfs_buf_ioend_handle_error()
1120 trace_xfs_buf_iodone_async(bp, _RET_IP_); in xfs_buf_ioend_handle_error()
1122 cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error); in xfs_buf_ioend_handle_error()
1123 if (bp->b_last_error != bp->b_error || in xfs_buf_ioend_handle_error()
1124 !(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL))) { in xfs_buf_ioend_handle_error()
1125 bp->b_last_error = bp->b_error; in xfs_buf_ioend_handle_error()
1127 !bp->b_first_retry_time) in xfs_buf_ioend_handle_error()
1128 bp->b_first_retry_time = jiffies; in xfs_buf_ioend_handle_error()
1136 if (xfs_buf_ioerror_permanent(bp, cfg)) { in xfs_buf_ioend_handle_error()
1142 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) { in xfs_buf_ioend_handle_error()
1147 xfs_buf_ioerror(bp, 0); in xfs_buf_ioend_handle_error()
1148 xfs_buf_relse(bp); in xfs_buf_ioend_handle_error()
1152 xfs_buf_ioerror(bp, 0); in xfs_buf_ioend_handle_error()
1153 bp->b_flags |= (XBF_DONE | XBF_WRITE_FAIL); in xfs_buf_ioend_handle_error()
1154 reinit_completion(&bp->b_iowait); in xfs_buf_ioend_handle_error()
1155 xfs_buf_submit(bp); in xfs_buf_ioend_handle_error()
1158 xfs_buf_stale(bp); in xfs_buf_ioend_handle_error()
1159 bp->b_flags |= XBF_DONE; in xfs_buf_ioend_handle_error()
1160 bp->b_flags &= ~XBF_WRITE; in xfs_buf_ioend_handle_error()
1161 trace_xfs_buf_error_relse(bp, _RET_IP_); in xfs_buf_ioend_handle_error()
1168 struct xfs_buf *bp) in __xfs_buf_ioend() argument
1170 trace_xfs_buf_iodone(bp, _RET_IP_); in __xfs_buf_ioend()
1172 if (bp->b_flags & XBF_READ) { in __xfs_buf_ioend()
1173 if (!bp->b_error && is_vmalloc_addr(bp->b_addr)) in __xfs_buf_ioend()
1174 invalidate_kernel_vmap_range(bp->b_addr, in __xfs_buf_ioend()
1175 roundup(BBTOB(bp->b_length), PAGE_SIZE)); in __xfs_buf_ioend()
1176 if (!bp->b_error && bp->b_ops) in __xfs_buf_ioend()
1177 bp->b_ops->verify_read(bp); in __xfs_buf_ioend()
1178 if (!bp->b_error) in __xfs_buf_ioend()
1179 bp->b_flags |= XBF_DONE; in __xfs_buf_ioend()
1180 if (bp->b_flags & XBF_READ_AHEAD) in __xfs_buf_ioend()
1181 percpu_counter_dec(&bp->b_target->bt_readahead_count); in __xfs_buf_ioend()
1183 if (!bp->b_error) { in __xfs_buf_ioend()
1184 bp->b_flags &= ~XBF_WRITE_FAIL; in __xfs_buf_ioend()
1185 bp->b_flags |= XBF_DONE; in __xfs_buf_ioend()
1188 if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp)) in __xfs_buf_ioend()
1192 bp->b_last_error = 0; in __xfs_buf_ioend()
1193 bp->b_retries = 0; in __xfs_buf_ioend()
1194 bp->b_first_retry_time = 0; in __xfs_buf_ioend()
1201 if (bp->b_log_item) in __xfs_buf_ioend()
1202 xfs_buf_item_done(bp); in __xfs_buf_ioend()
1204 if (bp->b_iodone) in __xfs_buf_ioend()
1205 bp->b_iodone(bp); in __xfs_buf_ioend()
1208 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD | in __xfs_buf_ioend()
1215 struct xfs_buf *bp) in xfs_buf_ioend() argument
1217 if (!__xfs_buf_ioend(bp)) in xfs_buf_ioend()
1219 if (bp->b_flags & XBF_ASYNC) in xfs_buf_ioend()
1220 xfs_buf_relse(bp); in xfs_buf_ioend()
1222 complete(&bp->b_iowait); in xfs_buf_ioend()
1229 struct xfs_buf *bp = in xfs_buf_ioend_work() local
1232 if (__xfs_buf_ioend(bp)) in xfs_buf_ioend_work()
1233 xfs_buf_relse(bp); in xfs_buf_ioend_work()
1238 struct xfs_buf *bp, in __xfs_buf_ioerror() argument
1243 bp->b_error = error; in __xfs_buf_ioerror()
1244 trace_xfs_buf_ioerror(bp, error, failaddr); in __xfs_buf_ioerror()
1249 struct xfs_buf *bp, in xfs_buf_ioerror_alert() argument
1252 xfs_buf_alert_ratelimited(bp, "XFS: metadata IO error", in xfs_buf_ioerror_alert()
1254 func, (uint64_t)xfs_buf_daddr(bp), in xfs_buf_ioerror_alert()
1255 bp->b_length, -bp->b_error); in xfs_buf_ioerror_alert()
1266 struct xfs_buf *bp) in xfs_buf_ioend_fail() argument
1268 bp->b_flags &= ~XBF_DONE; in xfs_buf_ioend_fail()
1269 xfs_buf_stale(bp); in xfs_buf_ioend_fail()
1270 xfs_buf_ioerror(bp, -EIO); in xfs_buf_ioend_fail()
1271 xfs_buf_ioend(bp); in xfs_buf_ioend_fail()
1276 struct xfs_buf *bp) in xfs_bwrite() argument
1280 ASSERT(xfs_buf_islocked(bp)); in xfs_bwrite()
1282 bp->b_flags |= XBF_WRITE; in xfs_bwrite()
1283 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | in xfs_bwrite()
1286 xfs_buf_submit(bp); in xfs_bwrite()
1287 error = xfs_buf_iowait(bp); in xfs_bwrite()
1289 xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR); in xfs_bwrite()
1297 struct xfs_buf *bp = bio->bi_private; in xfs_buf_bio_end_io() local
1300 xfs_buf_ioerror(bp, blk_status_to_errno(bio->bi_status)); in xfs_buf_bio_end_io()
1301 else if ((bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) && in xfs_buf_bio_end_io()
1302 XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR)) in xfs_buf_bio_end_io()
1303 xfs_buf_ioerror(bp, -EIO); in xfs_buf_bio_end_io()
1305 if (bp->b_flags & XBF_ASYNC) { in xfs_buf_bio_end_io()
1306 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work); in xfs_buf_bio_end_io()
1307 queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work); in xfs_buf_bio_end_io()
1309 complete(&bp->b_iowait); in xfs_buf_bio_end_io()
1317 struct xfs_buf *bp) in xfs_buf_bio_op() argument
1321 if (bp->b_flags & XBF_WRITE) { in xfs_buf_bio_op()
1325 if (bp->b_flags & XBF_READ_AHEAD) in xfs_buf_bio_op()
1334 struct xfs_buf *bp) in xfs_buf_submit_bio() argument
1336 unsigned int len = BBTOB(bp->b_length); in xfs_buf_submit_bio()
1337 unsigned int nr_vecs = bio_add_max_vecs(bp->b_addr, len); in xfs_buf_submit_bio()
1342 bio = bio_alloc(bp->b_target->bt_bdev, nr_vecs, xfs_buf_bio_op(bp), in xfs_buf_submit_bio()
1344 if (is_vmalloc_addr(bp->b_addr)) in xfs_buf_submit_bio()
1345 bio_add_vmalloc(bio, bp->b_addr, len); in xfs_buf_submit_bio()
1347 bio_add_virt_nofail(bio, bp->b_addr, len); in xfs_buf_submit_bio()
1348 bio->bi_private = bp; in xfs_buf_submit_bio()
1357 for (map = 0; map < bp->b_map_count - 1; map++) { in xfs_buf_submit_bio()
1360 split = bio_split(bio, bp->b_maps[map].bm_len, GFP_NOFS, in xfs_buf_submit_bio()
1362 split->bi_iter.bi_sector = bp->b_maps[map].bm_bn; in xfs_buf_submit_bio()
1366 bio->bi_iter.bi_sector = bp->b_maps[map].bm_bn; in xfs_buf_submit_bio()
1376 struct xfs_buf *bp) in xfs_buf_iowait() argument
1378 ASSERT(!(bp->b_flags & XBF_ASYNC)); in xfs_buf_iowait()
1381 trace_xfs_buf_iowait(bp, _RET_IP_); in xfs_buf_iowait()
1382 wait_for_completion(&bp->b_iowait); in xfs_buf_iowait()
1383 trace_xfs_buf_iowait_done(bp, _RET_IP_); in xfs_buf_iowait()
1384 } while (!__xfs_buf_ioend(bp)); in xfs_buf_iowait()
1386 return bp->b_error; in xfs_buf_iowait()
1395 struct xfs_buf *bp) in xfs_buf_verify_write() argument
1397 if (bp->b_ops) { in xfs_buf_verify_write()
1398 bp->b_ops->verify_write(bp); in xfs_buf_verify_write()
1399 if (bp->b_error) in xfs_buf_verify_write()
1401 } else if (bp->b_rhash_key != XFS_BUF_DADDR_NULL) { in xfs_buf_verify_write()
1406 if (xfs_has_crc(bp->b_mount)) { in xfs_buf_verify_write()
1407 xfs_warn(bp->b_mount, in xfs_buf_verify_write()
1409 __func__, xfs_buf_daddr(bp), in xfs_buf_verify_write()
1410 bp->b_length); in xfs_buf_verify_write()
1411 xfs_hex_dump(bp->b_addr, XFS_CORRUPTION_DUMP_LEN); in xfs_buf_verify_write()
1427 struct xfs_buf *bp) in xfs_buf_submit() argument
1429 trace_xfs_buf_submit(bp, _RET_IP_); in xfs_buf_submit()
1431 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); in xfs_buf_submit()
1448 if (bp->b_mount->m_log && xlog_is_shutdown(bp->b_mount->m_log)) { in xfs_buf_submit()
1449 xfs_buf_ioend_fail(bp); in xfs_buf_submit()
1453 if (bp->b_flags & XBF_WRITE) in xfs_buf_submit()
1454 xfs_buf_wait_unpin(bp); in xfs_buf_submit()
1460 bp->b_error = 0; in xfs_buf_submit()
1462 if ((bp->b_flags & XBF_WRITE) && !xfs_buf_verify_write(bp)) { in xfs_buf_submit()
1463 xfs_force_shutdown(bp->b_mount, SHUTDOWN_CORRUPT_INCORE); in xfs_buf_submit()
1464 xfs_buf_ioend(bp); in xfs_buf_submit()
1469 if (xfs_buftarg_is_mem(bp->b_target)) { in xfs_buf_submit()
1470 xfs_buf_ioend(bp); in xfs_buf_submit()
1474 xfs_buf_submit_bio(bp); in xfs_buf_submit()
1490 struct xfs_buf *bp, in __xfs_buf_mark_corrupt() argument
1493 ASSERT(bp->b_flags & XBF_DONE); in __xfs_buf_mark_corrupt()
1495 xfs_buf_corruption_error(bp, fa); in __xfs_buf_mark_corrupt()
1496 xfs_buf_stale(bp); in __xfs_buf_mark_corrupt()
1515 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); in xfs_buftarg_drain_rele() local
1518 if (!spin_trylock(&bp->b_lock)) in xfs_buftarg_drain_rele()
1520 if (bp->b_hold > 1) { in xfs_buftarg_drain_rele()
1522 spin_unlock(&bp->b_lock); in xfs_buftarg_drain_rele()
1523 trace_xfs_buf_drain_buftarg(bp, _RET_IP_); in xfs_buftarg_drain_rele()
1531 atomic_set(&bp->b_lru_ref, 0); in xfs_buftarg_drain_rele()
1532 bp->b_state |= XFS_BSTATE_DISPOSE; in xfs_buftarg_drain_rele()
1534 spin_unlock(&bp->b_lock); in xfs_buftarg_drain_rele()
1577 struct xfs_buf *bp; in xfs_buftarg_drain() local
1578 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); in xfs_buftarg_drain()
1579 list_del_init(&bp->b_lru); in xfs_buftarg_drain()
1580 if (bp->b_flags & XBF_WRITE_FAIL) { in xfs_buftarg_drain()
1582 xfs_buf_alert_ratelimited(bp, in xfs_buftarg_drain()
1585 (long long)xfs_buf_daddr(bp)); in xfs_buftarg_drain()
1587 xfs_buf_rele(bp); in xfs_buftarg_drain()
1612 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); in xfs_buftarg_isolate() local
1616 * we are inverting the lru lock/bp->b_lock here, so use a trylock. in xfs_buftarg_isolate()
1619 if (!spin_trylock(&bp->b_lock)) in xfs_buftarg_isolate()
1626 if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) { in xfs_buftarg_isolate()
1627 spin_unlock(&bp->b_lock); in xfs_buftarg_isolate()
1631 bp->b_state |= XFS_BSTATE_DISPOSE; in xfs_buftarg_isolate()
1633 spin_unlock(&bp->b_lock); in xfs_buftarg_isolate()
1650 struct xfs_buf *bp; in xfs_buftarg_shrink_scan() local
1651 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); in xfs_buftarg_shrink_scan()
1652 list_del_init(&bp->b_lru); in xfs_buftarg_shrink_scan()
1653 xfs_buf_rele(bp); in xfs_buftarg_shrink_scan()
1836 struct xfs_buf *bp) in xfs_buf_list_del() argument
1838 list_del_init(&bp->b_list); in xfs_buf_list_del()
1839 wake_up_var(&bp->b_list); in xfs_buf_list_del()
1852 struct xfs_buf *bp; in xfs_buf_delwri_cancel() local
1855 bp = list_first_entry(list, struct xfs_buf, b_list); in xfs_buf_delwri_cancel()
1857 xfs_buf_lock(bp); in xfs_buf_delwri_cancel()
1858 bp->b_flags &= ~_XBF_DELWRI_Q; in xfs_buf_delwri_cancel()
1859 xfs_buf_list_del(bp); in xfs_buf_delwri_cancel()
1860 xfs_buf_relse(bp); in xfs_buf_delwri_cancel()
1877 struct xfs_buf *bp, in xfs_buf_delwri_queue() argument
1880 ASSERT(xfs_buf_islocked(bp)); in xfs_buf_delwri_queue()
1881 ASSERT(!(bp->b_flags & XBF_READ)); in xfs_buf_delwri_queue()
1888 if (bp->b_flags & _XBF_DELWRI_Q) { in xfs_buf_delwri_queue()
1889 trace_xfs_buf_delwri_queued(bp, _RET_IP_); in xfs_buf_delwri_queue()
1893 trace_xfs_buf_delwri_queue(bp, _RET_IP_); in xfs_buf_delwri_queue()
1903 bp->b_flags |= _XBF_DELWRI_Q; in xfs_buf_delwri_queue()
1904 if (list_empty(&bp->b_list)) { in xfs_buf_delwri_queue()
1905 xfs_buf_hold(bp); in xfs_buf_delwri_queue()
1906 list_add_tail(&bp->b_list, list); in xfs_buf_delwri_queue()
1920 struct xfs_buf *bp, in xfs_buf_delwri_queue_here() argument
1929 while (!list_empty(&bp->b_list)) { in xfs_buf_delwri_queue_here()
1930 xfs_buf_unlock(bp); in xfs_buf_delwri_queue_here()
1931 wait_var_event(&bp->b_list, list_empty(&bp->b_list)); in xfs_buf_delwri_queue_here()
1932 xfs_buf_lock(bp); in xfs_buf_delwri_queue_here()
1935 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); in xfs_buf_delwri_queue_here()
1937 xfs_buf_delwri_queue(bp, buffer_list); in xfs_buf_delwri_queue_here()
1952 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); in xfs_buf_cmp() local
1955 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn; in xfs_buf_cmp()
1965 struct xfs_buf *bp) in xfs_buf_delwri_submit_prep() argument
1973 if (!(bp->b_flags & _XBF_DELWRI_Q)) { in xfs_buf_delwri_submit_prep()
1974 xfs_buf_list_del(bp); in xfs_buf_delwri_submit_prep()
1975 xfs_buf_relse(bp); in xfs_buf_delwri_submit_prep()
1979 trace_xfs_buf_delwri_split(bp, _RET_IP_); in xfs_buf_delwri_submit_prep()
1980 bp->b_flags &= ~_XBF_DELWRI_Q; in xfs_buf_delwri_submit_prep()
1981 bp->b_flags |= XBF_WRITE; in xfs_buf_delwri_submit_prep()
2005 struct xfs_buf *bp, *n; in xfs_buf_delwri_submit_nowait() local
2012 list_for_each_entry_safe(bp, n, buffer_list, b_list) { in xfs_buf_delwri_submit_nowait()
2013 if (!xfs_buf_trylock(bp)) in xfs_buf_delwri_submit_nowait()
2015 if (xfs_buf_ispinned(bp)) { in xfs_buf_delwri_submit_nowait()
2016 xfs_buf_unlock(bp); in xfs_buf_delwri_submit_nowait()
2020 if (!xfs_buf_delwri_submit_prep(bp)) in xfs_buf_delwri_submit_nowait()
2022 bp->b_flags |= XBF_ASYNC; in xfs_buf_delwri_submit_nowait()
2023 xfs_buf_list_del(bp); in xfs_buf_delwri_submit_nowait()
2024 xfs_buf_submit(bp); in xfs_buf_delwri_submit_nowait()
2045 struct xfs_buf *bp, *n; in xfs_buf_delwri_submit() local
2051 list_for_each_entry_safe(bp, n, buffer_list, b_list) { in xfs_buf_delwri_submit()
2052 xfs_buf_lock(bp); in xfs_buf_delwri_submit()
2053 if (!xfs_buf_delwri_submit_prep(bp)) in xfs_buf_delwri_submit()
2055 bp->b_flags &= ~XBF_ASYNC; in xfs_buf_delwri_submit()
2056 list_move_tail(&bp->b_list, &wait_list); in xfs_buf_delwri_submit()
2057 xfs_buf_submit(bp); in xfs_buf_delwri_submit()
2063 bp = list_first_entry(&wait_list, struct xfs_buf, b_list); in xfs_buf_delwri_submit()
2065 xfs_buf_list_del(bp); in xfs_buf_delwri_submit()
2071 error2 = xfs_buf_iowait(bp); in xfs_buf_delwri_submit()
2072 xfs_buf_relse(bp); in xfs_buf_delwri_submit()
2080 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref) in xfs_buf_set_ref() argument
2087 if (XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_LRU_REF)) in xfs_buf_set_ref()
2090 atomic_set(&bp->b_lru_ref, lru_ref); in xfs_buf_set_ref()
2100 struct xfs_buf *bp, in xfs_verify_magic() argument
2103 struct xfs_mount *mp = bp->b_mount; in xfs_verify_magic()
2107 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx])) in xfs_verify_magic()
2109 return dmagic == bp->b_ops->magic[idx]; in xfs_verify_magic()
2118 struct xfs_buf *bp, in xfs_verify_magic16() argument
2121 struct xfs_mount *mp = bp->b_mount; in xfs_verify_magic16()
2125 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx])) in xfs_verify_magic16()
2127 return dmagic == bp->b_ops->magic16[idx]; in xfs_verify_magic16()