xref: /linux/fs/xfs/xfs_buf.c (revision 86f5536004a61a0c797c14a248fc976f03f55cd5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include <linux/backing-dev.h>
8 #include <linux/dax.h>
9 
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_trace.h"
16 #include "xfs_log.h"
17 #include "xfs_log_recover.h"
18 #include "xfs_log_priv.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
21 #include "xfs_errortag.h"
22 #include "xfs_error.h"
23 #include "xfs_ag.h"
24 #include "xfs_buf_mem.h"
25 #include "xfs_notify_failure.h"
26 
27 struct kmem_cache *xfs_buf_cache;
28 
29 /*
30  * Locking orders
31  *
32  * xfs_buf_ioacct_inc:
33  * xfs_buf_ioacct_dec:
34  *	b_sema (caller holds)
35  *	  b_lock
36  *
37  * xfs_buf_stale:
38  *	b_sema (caller holds)
39  *	  b_lock
40  *	    lru_lock
41  *
42  * xfs_buf_rele:
43  *	b_lock
44  *	  pag_buf_lock
45  *	    lru_lock
46  *
47  * xfs_buftarg_drain_rele
48  *	lru_lock
49  *	  b_lock (trylock due to inversion)
50  *
51  * xfs_buftarg_isolate
52  *	lru_lock
53  *	  b_lock (trylock due to inversion)
54  */
55 
56 static void xfs_buf_submit(struct xfs_buf *bp);
57 static int xfs_buf_iowait(struct xfs_buf *bp);
58 
59 static inline bool xfs_buf_is_uncached(struct xfs_buf *bp)
60 {
61 	return bp->b_rhash_key == XFS_BUF_DADDR_NULL;
62 }
63 
64 static inline int
65 xfs_buf_is_vmapped(
66 	struct xfs_buf	*bp)
67 {
68 	/*
69 	 * Return true if the buffer is vmapped.
70 	 *
71 	 * b_addr is null if the buffer is not mapped, but the code is clever
72 	 * enough to know it doesn't have to map a single page, so the check has
73 	 * to be both for b_addr and bp->b_page_count > 1.
74 	 */
75 	return bp->b_addr && bp->b_page_count > 1;
76 }
77 
78 static inline int
79 xfs_buf_vmap_len(
80 	struct xfs_buf	*bp)
81 {
82 	return (bp->b_page_count * PAGE_SIZE);
83 }
84 
85 /*
86  * Bump the I/O in flight count on the buftarg if we haven't yet done so for
87  * this buffer. The count is incremented once per buffer (per hold cycle)
88  * because the corresponding decrement is deferred to buffer release. Buffers
89  * can undergo I/O multiple times in a hold-release cycle and per buffer I/O
90  * tracking adds unnecessary overhead. This is used for sychronization purposes
91  * with unmount (see xfs_buftarg_drain()), so all we really need is a count of
92  * in-flight buffers.
93  *
94  * Buffers that are never released (e.g., superblock, iclog buffers) must set
95  * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
96  * never reaches zero and unmount hangs indefinitely.
97  */
98 static inline void
99 xfs_buf_ioacct_inc(
100 	struct xfs_buf	*bp)
101 {
102 	if (bp->b_flags & XBF_NO_IOACCT)
103 		return;
104 
105 	ASSERT(bp->b_flags & XBF_ASYNC);
106 	spin_lock(&bp->b_lock);
107 	if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
108 		bp->b_state |= XFS_BSTATE_IN_FLIGHT;
109 		percpu_counter_inc(&bp->b_target->bt_io_count);
110 	}
111 	spin_unlock(&bp->b_lock);
112 }
113 
114 /*
115  * Clear the in-flight state on a buffer about to be released to the LRU or
116  * freed and unaccount from the buftarg.
117  */
118 static inline void
119 __xfs_buf_ioacct_dec(
120 	struct xfs_buf	*bp)
121 {
122 	lockdep_assert_held(&bp->b_lock);
123 
124 	if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
125 		bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
126 		percpu_counter_dec(&bp->b_target->bt_io_count);
127 	}
128 }
129 
130 /*
131  * When we mark a buffer stale, we remove the buffer from the LRU and clear the
132  * b_lru_ref count so that the buffer is freed immediately when the buffer
133  * reference count falls to zero. If the buffer is already on the LRU, we need
134  * to remove the reference that LRU holds on the buffer.
135  *
136  * This prevents build-up of stale buffers on the LRU.
137  */
138 void
139 xfs_buf_stale(
140 	struct xfs_buf	*bp)
141 {
142 	ASSERT(xfs_buf_islocked(bp));
143 
144 	bp->b_flags |= XBF_STALE;
145 
146 	/*
147 	 * Clear the delwri status so that a delwri queue walker will not
148 	 * flush this buffer to disk now that it is stale. The delwri queue has
149 	 * a reference to the buffer, so this is safe to do.
150 	 */
151 	bp->b_flags &= ~_XBF_DELWRI_Q;
152 
153 	/*
154 	 * Once the buffer is marked stale and unlocked, a subsequent lookup
155 	 * could reset b_flags. There is no guarantee that the buffer is
156 	 * unaccounted (released to LRU) before that occurs. Drop in-flight
157 	 * status now to preserve accounting consistency.
158 	 */
159 	spin_lock(&bp->b_lock);
160 	__xfs_buf_ioacct_dec(bp);
161 
162 	atomic_set(&bp->b_lru_ref, 0);
163 	if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
164 	    (list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru)))
165 		bp->b_hold--;
166 
167 	ASSERT(bp->b_hold >= 1);
168 	spin_unlock(&bp->b_lock);
169 }
170 
171 static int
172 xfs_buf_get_maps(
173 	struct xfs_buf		*bp,
174 	int			map_count)
175 {
176 	ASSERT(bp->b_maps == NULL);
177 	bp->b_map_count = map_count;
178 
179 	if (map_count == 1) {
180 		bp->b_maps = &bp->__b_map;
181 		return 0;
182 	}
183 
184 	bp->b_maps = kzalloc(map_count * sizeof(struct xfs_buf_map),
185 			GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
186 	if (!bp->b_maps)
187 		return -ENOMEM;
188 	return 0;
189 }
190 
191 static void
192 xfs_buf_free_maps(
193 	struct xfs_buf	*bp)
194 {
195 	if (bp->b_maps != &bp->__b_map) {
196 		kfree(bp->b_maps);
197 		bp->b_maps = NULL;
198 	}
199 }
200 
201 static int
202 _xfs_buf_alloc(
203 	struct xfs_buftarg	*target,
204 	struct xfs_buf_map	*map,
205 	int			nmaps,
206 	xfs_buf_flags_t		flags,
207 	struct xfs_buf		**bpp)
208 {
209 	struct xfs_buf		*bp;
210 	int			error;
211 	int			i;
212 
213 	*bpp = NULL;
214 	bp = kmem_cache_zalloc(xfs_buf_cache,
215 			GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
216 
217 	/*
218 	 * We don't want certain flags to appear in b_flags unless they are
219 	 * specifically set by later operations on the buffer.
220 	 */
221 	flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
222 
223 	spin_lock_init(&bp->b_lock);
224 	bp->b_hold = 1;
225 	atomic_set(&bp->b_lru_ref, 1);
226 	init_completion(&bp->b_iowait);
227 	INIT_LIST_HEAD(&bp->b_lru);
228 	INIT_LIST_HEAD(&bp->b_list);
229 	INIT_LIST_HEAD(&bp->b_li_list);
230 	sema_init(&bp->b_sema, 0); /* held, no waiters */
231 	bp->b_target = target;
232 	bp->b_mount = target->bt_mount;
233 	bp->b_flags = flags;
234 
235 	/*
236 	 * Set length and io_length to the same value initially.
237 	 * I/O routines should use io_length, which will be the same in
238 	 * most cases but may be reset (e.g. XFS recovery).
239 	 */
240 	error = xfs_buf_get_maps(bp, nmaps);
241 	if (error)  {
242 		kmem_cache_free(xfs_buf_cache, bp);
243 		return error;
244 	}
245 
246 	bp->b_rhash_key = map[0].bm_bn;
247 	bp->b_length = 0;
248 	for (i = 0; i < nmaps; i++) {
249 		bp->b_maps[i].bm_bn = map[i].bm_bn;
250 		bp->b_maps[i].bm_len = map[i].bm_len;
251 		bp->b_length += map[i].bm_len;
252 	}
253 
254 	atomic_set(&bp->b_pin_count, 0);
255 	init_waitqueue_head(&bp->b_waiters);
256 
257 	XFS_STATS_INC(bp->b_mount, xb_create);
258 	trace_xfs_buf_init(bp, _RET_IP_);
259 
260 	*bpp = bp;
261 	return 0;
262 }
263 
264 static void
265 xfs_buf_free_pages(
266 	struct xfs_buf	*bp)
267 {
268 	uint		i;
269 
270 	ASSERT(bp->b_flags & _XBF_PAGES);
271 
272 	if (xfs_buf_is_vmapped(bp))
273 		vm_unmap_ram(bp->b_addr, bp->b_page_count);
274 
275 	for (i = 0; i < bp->b_page_count; i++) {
276 		if (bp->b_pages[i])
277 			__free_page(bp->b_pages[i]);
278 	}
279 	mm_account_reclaimed_pages(bp->b_page_count);
280 
281 	if (bp->b_pages != bp->b_page_array)
282 		kfree(bp->b_pages);
283 	bp->b_pages = NULL;
284 	bp->b_flags &= ~_XBF_PAGES;
285 }
286 
287 static void
288 xfs_buf_free_callback(
289 	struct callback_head	*cb)
290 {
291 	struct xfs_buf		*bp = container_of(cb, struct xfs_buf, b_rcu);
292 
293 	xfs_buf_free_maps(bp);
294 	kmem_cache_free(xfs_buf_cache, bp);
295 }
296 
297 static void
298 xfs_buf_free(
299 	struct xfs_buf		*bp)
300 {
301 	trace_xfs_buf_free(bp, _RET_IP_);
302 
303 	ASSERT(list_empty(&bp->b_lru));
304 
305 	if (xfs_buftarg_is_mem(bp->b_target))
306 		xmbuf_unmap_page(bp);
307 	else if (bp->b_flags & _XBF_PAGES)
308 		xfs_buf_free_pages(bp);
309 	else if (bp->b_flags & _XBF_KMEM)
310 		kfree(bp->b_addr);
311 
312 	call_rcu(&bp->b_rcu, xfs_buf_free_callback);
313 }
314 
315 static int
316 xfs_buf_alloc_kmem(
317 	struct xfs_buf	*bp,
318 	xfs_buf_flags_t	flags)
319 {
320 	gfp_t		gfp_mask = GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL;
321 	size_t		size = BBTOB(bp->b_length);
322 
323 	/* Assure zeroed buffer for non-read cases. */
324 	if (!(flags & XBF_READ))
325 		gfp_mask |= __GFP_ZERO;
326 
327 	bp->b_addr = kmalloc(size, gfp_mask);
328 	if (!bp->b_addr)
329 		return -ENOMEM;
330 
331 	if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
332 	    ((unsigned long)bp->b_addr & PAGE_MASK)) {
333 		/* b_addr spans two pages - use alloc_page instead */
334 		kfree(bp->b_addr);
335 		bp->b_addr = NULL;
336 		return -ENOMEM;
337 	}
338 	bp->b_offset = offset_in_page(bp->b_addr);
339 	bp->b_pages = bp->b_page_array;
340 	bp->b_pages[0] = kmem_to_page(bp->b_addr);
341 	bp->b_page_count = 1;
342 	bp->b_flags |= _XBF_KMEM;
343 	return 0;
344 }
345 
346 static int
347 xfs_buf_alloc_pages(
348 	struct xfs_buf	*bp,
349 	xfs_buf_flags_t	flags)
350 {
351 	gfp_t		gfp_mask = GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOWARN;
352 	long		filled = 0;
353 
354 	if (flags & XBF_READ_AHEAD)
355 		gfp_mask |= __GFP_NORETRY;
356 
357 	/* Make sure that we have a page list */
358 	bp->b_page_count = DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE);
359 	if (bp->b_page_count <= XB_PAGES) {
360 		bp->b_pages = bp->b_page_array;
361 	} else {
362 		bp->b_pages = kzalloc(sizeof(struct page *) * bp->b_page_count,
363 					gfp_mask);
364 		if (!bp->b_pages)
365 			return -ENOMEM;
366 	}
367 	bp->b_flags |= _XBF_PAGES;
368 
369 	/* Assure zeroed buffer for non-read cases. */
370 	if (!(flags & XBF_READ))
371 		gfp_mask |= __GFP_ZERO;
372 
373 	/*
374 	 * Bulk filling of pages can take multiple calls. Not filling the entire
375 	 * array is not an allocation failure, so don't back off if we get at
376 	 * least one extra page.
377 	 */
378 	for (;;) {
379 		long	last = filled;
380 
381 		filled = alloc_pages_bulk(gfp_mask, bp->b_page_count,
382 					  bp->b_pages);
383 		if (filled == bp->b_page_count) {
384 			XFS_STATS_INC(bp->b_mount, xb_page_found);
385 			break;
386 		}
387 
388 		if (filled != last)
389 			continue;
390 
391 		if (flags & XBF_READ_AHEAD) {
392 			xfs_buf_free_pages(bp);
393 			return -ENOMEM;
394 		}
395 
396 		XFS_STATS_INC(bp->b_mount, xb_page_retries);
397 		memalloc_retry_wait(gfp_mask);
398 	}
399 	return 0;
400 }
401 
402 /*
403  *	Map buffer into kernel address-space if necessary.
404  */
405 STATIC int
406 _xfs_buf_map_pages(
407 	struct xfs_buf		*bp,
408 	xfs_buf_flags_t		flags)
409 {
410 	ASSERT(bp->b_flags & _XBF_PAGES);
411 	if (bp->b_page_count == 1) {
412 		/* A single page buffer is always mappable */
413 		bp->b_addr = page_address(bp->b_pages[0]);
414 	} else if (flags & XBF_UNMAPPED) {
415 		bp->b_addr = NULL;
416 	} else {
417 		int retried = 0;
418 		unsigned nofs_flag;
419 
420 		/*
421 		 * vm_map_ram() will allocate auxiliary structures (e.g.
422 		 * pagetables) with GFP_KERNEL, yet we often under a scoped nofs
423 		 * context here. Mixing GFP_KERNEL with GFP_NOFS allocations
424 		 * from the same call site that can be run from both above and
425 		 * below memory reclaim causes lockdep false positives. Hence we
426 		 * always need to force this allocation to nofs context because
427 		 * we can't pass __GFP_NOLOCKDEP down to auxillary structures to
428 		 * prevent false positive lockdep reports.
429 		 *
430 		 * XXX(dgc): I think dquot reclaim is the only place we can get
431 		 * to this function from memory reclaim context now. If we fix
432 		 * that like we've fixed inode reclaim to avoid writeback from
433 		 * reclaim, this nofs wrapping can go away.
434 		 */
435 		nofs_flag = memalloc_nofs_save();
436 		do {
437 			bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
438 						-1);
439 			if (bp->b_addr)
440 				break;
441 			vm_unmap_aliases();
442 		} while (retried++ <= 1);
443 		memalloc_nofs_restore(nofs_flag);
444 
445 		if (!bp->b_addr)
446 			return -ENOMEM;
447 	}
448 
449 	return 0;
450 }
451 
452 /*
453  *	Finding and Reading Buffers
454  */
455 static int
456 _xfs_buf_obj_cmp(
457 	struct rhashtable_compare_arg	*arg,
458 	const void			*obj)
459 {
460 	const struct xfs_buf_map	*map = arg->key;
461 	const struct xfs_buf		*bp = obj;
462 
463 	/*
464 	 * The key hashing in the lookup path depends on the key being the
465 	 * first element of the compare_arg, make sure to assert this.
466 	 */
467 	BUILD_BUG_ON(offsetof(struct xfs_buf_map, bm_bn) != 0);
468 
469 	if (bp->b_rhash_key != map->bm_bn)
470 		return 1;
471 
472 	if (unlikely(bp->b_length != map->bm_len)) {
473 		/*
474 		 * found a block number match. If the range doesn't
475 		 * match, the only way this is allowed is if the buffer
476 		 * in the cache is stale and the transaction that made
477 		 * it stale has not yet committed. i.e. we are
478 		 * reallocating a busy extent. Skip this buffer and
479 		 * continue searching for an exact match.
480 		 *
481 		 * Note: If we're scanning for incore buffers to stale, don't
482 		 * complain if we find non-stale buffers.
483 		 */
484 		if (!(map->bm_flags & XBM_LIVESCAN))
485 			ASSERT(bp->b_flags & XBF_STALE);
486 		return 1;
487 	}
488 	return 0;
489 }
490 
491 static const struct rhashtable_params xfs_buf_hash_params = {
492 	.min_size		= 32,	/* empty AGs have minimal footprint */
493 	.nelem_hint		= 16,
494 	.key_len		= sizeof(xfs_daddr_t),
495 	.key_offset		= offsetof(struct xfs_buf, b_rhash_key),
496 	.head_offset		= offsetof(struct xfs_buf, b_rhash_head),
497 	.automatic_shrinking	= true,
498 	.obj_cmpfn		= _xfs_buf_obj_cmp,
499 };
500 
501 int
502 xfs_buf_cache_init(
503 	struct xfs_buf_cache	*bch)
504 {
505 	spin_lock_init(&bch->bc_lock);
506 	return rhashtable_init(&bch->bc_hash, &xfs_buf_hash_params);
507 }
508 
509 void
510 xfs_buf_cache_destroy(
511 	struct xfs_buf_cache	*bch)
512 {
513 	rhashtable_destroy(&bch->bc_hash);
514 }
515 
516 static int
517 xfs_buf_map_verify(
518 	struct xfs_buftarg	*btp,
519 	struct xfs_buf_map	*map)
520 {
521 	xfs_daddr_t		eofs;
522 
523 	/* Check for IOs smaller than the sector size / not sector aligned */
524 	ASSERT(!(BBTOB(map->bm_len) < btp->bt_meta_sectorsize));
525 	ASSERT(!(BBTOB(map->bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
526 
527 	/*
528 	 * Corrupted block numbers can get through to here, unfortunately, so we
529 	 * have to check that the buffer falls within the filesystem bounds.
530 	 */
531 	eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
532 	if (map->bm_bn < 0 || map->bm_bn >= eofs) {
533 		xfs_alert(btp->bt_mount,
534 			  "%s: daddr 0x%llx out of range, EOFS 0x%llx",
535 			  __func__, map->bm_bn, eofs);
536 		WARN_ON(1);
537 		return -EFSCORRUPTED;
538 	}
539 	return 0;
540 }
541 
542 static int
543 xfs_buf_find_lock(
544 	struct xfs_buf          *bp,
545 	xfs_buf_flags_t		flags)
546 {
547 	if (flags & XBF_TRYLOCK) {
548 		if (!xfs_buf_trylock(bp)) {
549 			XFS_STATS_INC(bp->b_mount, xb_busy_locked);
550 			return -EAGAIN;
551 		}
552 	} else {
553 		xfs_buf_lock(bp);
554 		XFS_STATS_INC(bp->b_mount, xb_get_locked_waited);
555 	}
556 
557 	/*
558 	 * if the buffer is stale, clear all the external state associated with
559 	 * it. We need to keep flags such as how we allocated the buffer memory
560 	 * intact here.
561 	 */
562 	if (bp->b_flags & XBF_STALE) {
563 		if (flags & XBF_LIVESCAN) {
564 			xfs_buf_unlock(bp);
565 			return -ENOENT;
566 		}
567 		ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
568 		bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
569 		bp->b_ops = NULL;
570 	}
571 	return 0;
572 }
573 
574 static bool
575 xfs_buf_try_hold(
576 	struct xfs_buf		*bp)
577 {
578 	spin_lock(&bp->b_lock);
579 	if (bp->b_hold == 0) {
580 		spin_unlock(&bp->b_lock);
581 		return false;
582 	}
583 	bp->b_hold++;
584 	spin_unlock(&bp->b_lock);
585 	return true;
586 }
587 
588 static inline int
589 xfs_buf_lookup(
590 	struct xfs_buf_cache	*bch,
591 	struct xfs_buf_map	*map,
592 	xfs_buf_flags_t		flags,
593 	struct xfs_buf		**bpp)
594 {
595 	struct xfs_buf          *bp;
596 	int			error;
597 
598 	rcu_read_lock();
599 	bp = rhashtable_lookup(&bch->bc_hash, map, xfs_buf_hash_params);
600 	if (!bp || !xfs_buf_try_hold(bp)) {
601 		rcu_read_unlock();
602 		return -ENOENT;
603 	}
604 	rcu_read_unlock();
605 
606 	error = xfs_buf_find_lock(bp, flags);
607 	if (error) {
608 		xfs_buf_rele(bp);
609 		return error;
610 	}
611 
612 	trace_xfs_buf_find(bp, flags, _RET_IP_);
613 	*bpp = bp;
614 	return 0;
615 }
616 
617 /*
618  * Insert the new_bp into the hash table. This consumes the perag reference
619  * taken for the lookup regardless of the result of the insert.
620  */
621 static int
622 xfs_buf_find_insert(
623 	struct xfs_buftarg	*btp,
624 	struct xfs_buf_cache	*bch,
625 	struct xfs_perag	*pag,
626 	struct xfs_buf_map	*cmap,
627 	struct xfs_buf_map	*map,
628 	int			nmaps,
629 	xfs_buf_flags_t		flags,
630 	struct xfs_buf		**bpp)
631 {
632 	struct xfs_buf		*new_bp;
633 	struct xfs_buf		*bp;
634 	int			error;
635 
636 	error = _xfs_buf_alloc(btp, map, nmaps, flags, &new_bp);
637 	if (error)
638 		goto out_drop_pag;
639 
640 	if (xfs_buftarg_is_mem(new_bp->b_target)) {
641 		error = xmbuf_map_page(new_bp);
642 	} else if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
643 		   xfs_buf_alloc_kmem(new_bp, flags) < 0) {
644 		/*
645 		 * For buffers that fit entirely within a single page, first
646 		 * attempt to allocate the memory from the heap to minimise
647 		 * memory usage. If we can't get heap memory for these small
648 		 * buffers, we fall back to using the page allocator.
649 		 */
650 		error = xfs_buf_alloc_pages(new_bp, flags);
651 	}
652 	if (error)
653 		goto out_free_buf;
654 
655 	spin_lock(&bch->bc_lock);
656 	bp = rhashtable_lookup_get_insert_fast(&bch->bc_hash,
657 			&new_bp->b_rhash_head, xfs_buf_hash_params);
658 	if (IS_ERR(bp)) {
659 		error = PTR_ERR(bp);
660 		spin_unlock(&bch->bc_lock);
661 		goto out_free_buf;
662 	}
663 	if (bp && xfs_buf_try_hold(bp)) {
664 		/* found an existing buffer */
665 		spin_unlock(&bch->bc_lock);
666 		error = xfs_buf_find_lock(bp, flags);
667 		if (error)
668 			xfs_buf_rele(bp);
669 		else
670 			*bpp = bp;
671 		goto out_free_buf;
672 	}
673 
674 	/* The new buffer keeps the perag reference until it is freed. */
675 	new_bp->b_pag = pag;
676 	spin_unlock(&bch->bc_lock);
677 	*bpp = new_bp;
678 	return 0;
679 
680 out_free_buf:
681 	xfs_buf_free(new_bp);
682 out_drop_pag:
683 	if (pag)
684 		xfs_perag_put(pag);
685 	return error;
686 }
687 
688 static inline struct xfs_perag *
689 xfs_buftarg_get_pag(
690 	struct xfs_buftarg		*btp,
691 	const struct xfs_buf_map	*map)
692 {
693 	struct xfs_mount		*mp = btp->bt_mount;
694 
695 	if (xfs_buftarg_is_mem(btp))
696 		return NULL;
697 	return xfs_perag_get(mp, xfs_daddr_to_agno(mp, map->bm_bn));
698 }
699 
700 static inline struct xfs_buf_cache *
701 xfs_buftarg_buf_cache(
702 	struct xfs_buftarg		*btp,
703 	struct xfs_perag		*pag)
704 {
705 	if (pag)
706 		return &pag->pag_bcache;
707 	return btp->bt_cache;
708 }
709 
710 /*
711  * Assembles a buffer covering the specified range. The code is optimised for
712  * cache hits, as metadata intensive workloads will see 3 orders of magnitude
713  * more hits than misses.
714  */
715 int
716 xfs_buf_get_map(
717 	struct xfs_buftarg	*btp,
718 	struct xfs_buf_map	*map,
719 	int			nmaps,
720 	xfs_buf_flags_t		flags,
721 	struct xfs_buf		**bpp)
722 {
723 	struct xfs_buf_cache	*bch;
724 	struct xfs_perag	*pag;
725 	struct xfs_buf		*bp = NULL;
726 	struct xfs_buf_map	cmap = { .bm_bn = map[0].bm_bn };
727 	int			error;
728 	int			i;
729 
730 	if (flags & XBF_LIVESCAN)
731 		cmap.bm_flags |= XBM_LIVESCAN;
732 	for (i = 0; i < nmaps; i++)
733 		cmap.bm_len += map[i].bm_len;
734 
735 	error = xfs_buf_map_verify(btp, &cmap);
736 	if (error)
737 		return error;
738 
739 	pag = xfs_buftarg_get_pag(btp, &cmap);
740 	bch = xfs_buftarg_buf_cache(btp, pag);
741 
742 	error = xfs_buf_lookup(bch, &cmap, flags, &bp);
743 	if (error && error != -ENOENT)
744 		goto out_put_perag;
745 
746 	/* cache hits always outnumber misses by at least 10:1 */
747 	if (unlikely(!bp)) {
748 		XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
749 
750 		if (flags & XBF_INCORE)
751 			goto out_put_perag;
752 
753 		/* xfs_buf_find_insert() consumes the perag reference. */
754 		error = xfs_buf_find_insert(btp, bch, pag, &cmap, map, nmaps,
755 				flags, &bp);
756 		if (error)
757 			return error;
758 	} else {
759 		XFS_STATS_INC(btp->bt_mount, xb_get_locked);
760 		if (pag)
761 			xfs_perag_put(pag);
762 	}
763 
764 	/* We do not hold a perag reference anymore. */
765 	if (!bp->b_addr) {
766 		error = _xfs_buf_map_pages(bp, flags);
767 		if (unlikely(error)) {
768 			xfs_warn_ratelimited(btp->bt_mount,
769 				"%s: failed to map %u pages", __func__,
770 				bp->b_page_count);
771 			xfs_buf_relse(bp);
772 			return error;
773 		}
774 	}
775 
776 	/*
777 	 * Clear b_error if this is a lookup from a caller that doesn't expect
778 	 * valid data to be found in the buffer.
779 	 */
780 	if (!(flags & XBF_READ))
781 		xfs_buf_ioerror(bp, 0);
782 
783 	XFS_STATS_INC(btp->bt_mount, xb_get);
784 	trace_xfs_buf_get(bp, flags, _RET_IP_);
785 	*bpp = bp;
786 	return 0;
787 
788 out_put_perag:
789 	if (pag)
790 		xfs_perag_put(pag);
791 	return error;
792 }
793 
794 int
795 _xfs_buf_read(
796 	struct xfs_buf		*bp,
797 	xfs_buf_flags_t		flags)
798 {
799 	ASSERT(!(flags & XBF_WRITE));
800 	ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
801 
802 	bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD | XBF_DONE);
803 	bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
804 
805 	xfs_buf_submit(bp);
806 	if (flags & XBF_ASYNC)
807 		return 0;
808 	return xfs_buf_iowait(bp);
809 }
810 
811 /*
812  * Reverify a buffer found in cache without an attached ->b_ops.
813  *
814  * If the caller passed an ops structure and the buffer doesn't have ops
815  * assigned, set the ops and use it to verify the contents. If verification
816  * fails, clear XBF_DONE. We assume the buffer has no recorded errors and is
817  * already in XBF_DONE state on entry.
818  *
819  * Under normal operations, every in-core buffer is verified on read I/O
820  * completion. There are two scenarios that can lead to in-core buffers without
821  * an assigned ->b_ops. The first is during log recovery of buffers on a V4
822  * filesystem, though these buffers are purged at the end of recovery. The
823  * other is online repair, which intentionally reads with a NULL buffer ops to
824  * run several verifiers across an in-core buffer in order to establish buffer
825  * type.  If repair can't establish that, the buffer will be left in memory
826  * with NULL buffer ops.
827  */
828 int
829 xfs_buf_reverify(
830 	struct xfs_buf		*bp,
831 	const struct xfs_buf_ops *ops)
832 {
833 	ASSERT(bp->b_flags & XBF_DONE);
834 	ASSERT(bp->b_error == 0);
835 
836 	if (!ops || bp->b_ops)
837 		return 0;
838 
839 	bp->b_ops = ops;
840 	bp->b_ops->verify_read(bp);
841 	if (bp->b_error)
842 		bp->b_flags &= ~XBF_DONE;
843 	return bp->b_error;
844 }
845 
846 int
847 xfs_buf_read_map(
848 	struct xfs_buftarg	*target,
849 	struct xfs_buf_map	*map,
850 	int			nmaps,
851 	xfs_buf_flags_t		flags,
852 	struct xfs_buf		**bpp,
853 	const struct xfs_buf_ops *ops,
854 	xfs_failaddr_t		fa)
855 {
856 	struct xfs_buf		*bp;
857 	int			error;
858 
859 	flags |= XBF_READ;
860 	*bpp = NULL;
861 
862 	error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
863 	if (error)
864 		return error;
865 
866 	trace_xfs_buf_read(bp, flags, _RET_IP_);
867 
868 	if (!(bp->b_flags & XBF_DONE)) {
869 		/* Initiate the buffer read and wait. */
870 		XFS_STATS_INC(target->bt_mount, xb_get_read);
871 		bp->b_ops = ops;
872 		error = _xfs_buf_read(bp, flags);
873 
874 		/* Readahead iodone already dropped the buffer, so exit. */
875 		if (flags & XBF_ASYNC)
876 			return 0;
877 	} else {
878 		/* Buffer already read; all we need to do is check it. */
879 		error = xfs_buf_reverify(bp, ops);
880 
881 		/* Readahead already finished; drop the buffer and exit. */
882 		if (flags & XBF_ASYNC) {
883 			xfs_buf_relse(bp);
884 			return 0;
885 		}
886 
887 		/* We do not want read in the flags */
888 		bp->b_flags &= ~XBF_READ;
889 		ASSERT(bp->b_ops != NULL || ops == NULL);
890 	}
891 
892 	/*
893 	 * If we've had a read error, then the contents of the buffer are
894 	 * invalid and should not be used. To ensure that a followup read tries
895 	 * to pull the buffer from disk again, we clear the XBF_DONE flag and
896 	 * mark the buffer stale. This ensures that anyone who has a current
897 	 * reference to the buffer will interpret it's contents correctly and
898 	 * future cache lookups will also treat it as an empty, uninitialised
899 	 * buffer.
900 	 */
901 	if (error) {
902 		/*
903 		 * Check against log shutdown for error reporting because
904 		 * metadata writeback may require a read first and we need to
905 		 * report errors in metadata writeback until the log is shut
906 		 * down. High level transaction read functions already check
907 		 * against mount shutdown, anyway, so we only need to be
908 		 * concerned about low level IO interactions here.
909 		 */
910 		if (!xlog_is_shutdown(target->bt_mount->m_log))
911 			xfs_buf_ioerror_alert(bp, fa);
912 
913 		bp->b_flags &= ~XBF_DONE;
914 		xfs_buf_stale(bp);
915 		xfs_buf_relse(bp);
916 
917 		/* bad CRC means corrupted metadata */
918 		if (error == -EFSBADCRC)
919 			error = -EFSCORRUPTED;
920 		return error;
921 	}
922 
923 	*bpp = bp;
924 	return 0;
925 }
926 
927 /*
928  *	If we are not low on memory then do the readahead in a deadlock
929  *	safe manner.
930  */
931 void
932 xfs_buf_readahead_map(
933 	struct xfs_buftarg	*target,
934 	struct xfs_buf_map	*map,
935 	int			nmaps,
936 	const struct xfs_buf_ops *ops)
937 {
938 	struct xfs_buf		*bp;
939 
940 	/*
941 	 * Currently we don't have a good means or justification for performing
942 	 * xmbuf_map_page asynchronously, so we don't do readahead.
943 	 */
944 	if (xfs_buftarg_is_mem(target))
945 		return;
946 
947 	xfs_buf_read_map(target, map, nmaps,
948 		     XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops,
949 		     __this_address);
950 }
951 
952 /*
953  * Read an uncached buffer from disk. Allocates and returns a locked
954  * buffer containing the disk contents or nothing. Uncached buffers always have
955  * a cache index of XFS_BUF_DADDR_NULL so we can easily determine if the buffer
956  * is cached or uncached during fault diagnosis.
957  */
958 int
959 xfs_buf_read_uncached(
960 	struct xfs_buftarg	*target,
961 	xfs_daddr_t		daddr,
962 	size_t			numblks,
963 	xfs_buf_flags_t		flags,
964 	struct xfs_buf		**bpp,
965 	const struct xfs_buf_ops *ops)
966 {
967 	struct xfs_buf		*bp;
968 	int			error;
969 
970 	*bpp = NULL;
971 
972 	error = xfs_buf_get_uncached(target, numblks, flags, &bp);
973 	if (error)
974 		return error;
975 
976 	/* set up the buffer for a read IO */
977 	ASSERT(bp->b_map_count == 1);
978 	bp->b_rhash_key = XFS_BUF_DADDR_NULL;
979 	bp->b_maps[0].bm_bn = daddr;
980 	bp->b_flags |= XBF_READ;
981 	bp->b_ops = ops;
982 
983 	xfs_buf_submit(bp);
984 	error = xfs_buf_iowait(bp);
985 	if (error) {
986 		xfs_buf_relse(bp);
987 		return error;
988 	}
989 
990 	*bpp = bp;
991 	return 0;
992 }
993 
994 int
995 xfs_buf_get_uncached(
996 	struct xfs_buftarg	*target,
997 	size_t			numblks,
998 	xfs_buf_flags_t		flags,
999 	struct xfs_buf		**bpp)
1000 {
1001 	int			error;
1002 	struct xfs_buf		*bp;
1003 	DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
1004 
1005 	*bpp = NULL;
1006 
1007 	/* flags might contain irrelevant bits, pass only what we care about */
1008 	error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp);
1009 	if (error)
1010 		return error;
1011 
1012 	if (xfs_buftarg_is_mem(bp->b_target))
1013 		error = xmbuf_map_page(bp);
1014 	else
1015 		error = xfs_buf_alloc_pages(bp, flags);
1016 	if (error)
1017 		goto fail_free_buf;
1018 
1019 	error = _xfs_buf_map_pages(bp, 0);
1020 	if (unlikely(error)) {
1021 		xfs_warn(target->bt_mount,
1022 			"%s: failed to map pages", __func__);
1023 		goto fail_free_buf;
1024 	}
1025 
1026 	trace_xfs_buf_get_uncached(bp, _RET_IP_);
1027 	*bpp = bp;
1028 	return 0;
1029 
1030 fail_free_buf:
1031 	xfs_buf_free(bp);
1032 	return error;
1033 }
1034 
1035 /*
1036  *	Increment reference count on buffer, to hold the buffer concurrently
1037  *	with another thread which may release (free) the buffer asynchronously.
1038  *	Must hold the buffer already to call this function.
1039  */
1040 void
1041 xfs_buf_hold(
1042 	struct xfs_buf		*bp)
1043 {
1044 	trace_xfs_buf_hold(bp, _RET_IP_);
1045 
1046 	spin_lock(&bp->b_lock);
1047 	bp->b_hold++;
1048 	spin_unlock(&bp->b_lock);
1049 }
1050 
1051 static void
1052 xfs_buf_rele_uncached(
1053 	struct xfs_buf		*bp)
1054 {
1055 	ASSERT(list_empty(&bp->b_lru));
1056 
1057 	spin_lock(&bp->b_lock);
1058 	if (--bp->b_hold) {
1059 		spin_unlock(&bp->b_lock);
1060 		return;
1061 	}
1062 	__xfs_buf_ioacct_dec(bp);
1063 	spin_unlock(&bp->b_lock);
1064 	xfs_buf_free(bp);
1065 }
1066 
1067 static void
1068 xfs_buf_rele_cached(
1069 	struct xfs_buf		*bp)
1070 {
1071 	struct xfs_buftarg	*btp = bp->b_target;
1072 	struct xfs_perag	*pag = bp->b_pag;
1073 	struct xfs_buf_cache	*bch = xfs_buftarg_buf_cache(btp, pag);
1074 	bool			freebuf = false;
1075 
1076 	trace_xfs_buf_rele(bp, _RET_IP_);
1077 
1078 	spin_lock(&bp->b_lock);
1079 	ASSERT(bp->b_hold >= 1);
1080 	if (bp->b_hold > 1) {
1081 		/*
1082 		 * Drop the in-flight state if the buffer is already on the LRU
1083 		 * and it holds the only reference. This is racy because we
1084 		 * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
1085 		 * ensures the decrement occurs only once per-buf.
1086 		 */
1087 		if (--bp->b_hold == 1 && !list_empty(&bp->b_lru))
1088 			__xfs_buf_ioacct_dec(bp);
1089 		goto out_unlock;
1090 	}
1091 
1092 	/* we are asked to drop the last reference */
1093 	spin_lock(&bch->bc_lock);
1094 	__xfs_buf_ioacct_dec(bp);
1095 	if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
1096 		/*
1097 		 * If the buffer is added to the LRU, keep the reference to the
1098 		 * buffer for the LRU and clear the (now stale) dispose list
1099 		 * state flag, else drop the reference.
1100 		 */
1101 		if (list_lru_add_obj(&btp->bt_lru, &bp->b_lru))
1102 			bp->b_state &= ~XFS_BSTATE_DISPOSE;
1103 		else
1104 			bp->b_hold--;
1105 		spin_unlock(&bch->bc_lock);
1106 	} else {
1107 		bp->b_hold--;
1108 		/*
1109 		 * most of the time buffers will already be removed from the
1110 		 * LRU, so optimise that case by checking for the
1111 		 * XFS_BSTATE_DISPOSE flag indicating the last list the buffer
1112 		 * was on was the disposal list
1113 		 */
1114 		if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
1115 			list_lru_del_obj(&btp->bt_lru, &bp->b_lru);
1116 		} else {
1117 			ASSERT(list_empty(&bp->b_lru));
1118 		}
1119 
1120 		ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1121 		rhashtable_remove_fast(&bch->bc_hash, &bp->b_rhash_head,
1122 				xfs_buf_hash_params);
1123 		spin_unlock(&bch->bc_lock);
1124 		if (pag)
1125 			xfs_perag_put(pag);
1126 		freebuf = true;
1127 	}
1128 
1129 out_unlock:
1130 	spin_unlock(&bp->b_lock);
1131 
1132 	if (freebuf)
1133 		xfs_buf_free(bp);
1134 }
1135 
1136 /*
1137  * Release a hold on the specified buffer.
1138  */
1139 void
1140 xfs_buf_rele(
1141 	struct xfs_buf		*bp)
1142 {
1143 	trace_xfs_buf_rele(bp, _RET_IP_);
1144 	if (xfs_buf_is_uncached(bp))
1145 		xfs_buf_rele_uncached(bp);
1146 	else
1147 		xfs_buf_rele_cached(bp);
1148 }
1149 
1150 /*
1151  *	Lock a buffer object, if it is not already locked.
1152  *
1153  *	If we come across a stale, pinned, locked buffer, we know that we are
1154  *	being asked to lock a buffer that has been reallocated. Because it is
1155  *	pinned, we know that the log has not been pushed to disk and hence it
1156  *	will still be locked.  Rather than continuing to have trylock attempts
1157  *	fail until someone else pushes the log, push it ourselves before
1158  *	returning.  This means that the xfsaild will not get stuck trying
1159  *	to push on stale inode buffers.
1160  */
1161 int
1162 xfs_buf_trylock(
1163 	struct xfs_buf		*bp)
1164 {
1165 	int			locked;
1166 
1167 	locked = down_trylock(&bp->b_sema) == 0;
1168 	if (locked)
1169 		trace_xfs_buf_trylock(bp, _RET_IP_);
1170 	else
1171 		trace_xfs_buf_trylock_fail(bp, _RET_IP_);
1172 	return locked;
1173 }
1174 
1175 /*
1176  *	Lock a buffer object.
1177  *
1178  *	If we come across a stale, pinned, locked buffer, we know that we
1179  *	are being asked to lock a buffer that has been reallocated. Because
1180  *	it is pinned, we know that the log has not been pushed to disk and
1181  *	hence it will still be locked. Rather than sleeping until someone
1182  *	else pushes the log, push it ourselves before trying to get the lock.
1183  */
1184 void
1185 xfs_buf_lock(
1186 	struct xfs_buf		*bp)
1187 {
1188 	trace_xfs_buf_lock(bp, _RET_IP_);
1189 
1190 	if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
1191 		xfs_log_force(bp->b_mount, 0);
1192 	down(&bp->b_sema);
1193 
1194 	trace_xfs_buf_lock_done(bp, _RET_IP_);
1195 }
1196 
1197 void
1198 xfs_buf_unlock(
1199 	struct xfs_buf		*bp)
1200 {
1201 	ASSERT(xfs_buf_islocked(bp));
1202 
1203 	up(&bp->b_sema);
1204 	trace_xfs_buf_unlock(bp, _RET_IP_);
1205 }
1206 
1207 STATIC void
1208 xfs_buf_wait_unpin(
1209 	struct xfs_buf		*bp)
1210 {
1211 	DECLARE_WAITQUEUE	(wait, current);
1212 
1213 	if (atomic_read(&bp->b_pin_count) == 0)
1214 		return;
1215 
1216 	add_wait_queue(&bp->b_waiters, &wait);
1217 	for (;;) {
1218 		set_current_state(TASK_UNINTERRUPTIBLE);
1219 		if (atomic_read(&bp->b_pin_count) == 0)
1220 			break;
1221 		io_schedule();
1222 	}
1223 	remove_wait_queue(&bp->b_waiters, &wait);
1224 	set_current_state(TASK_RUNNING);
1225 }
1226 
1227 static void
1228 xfs_buf_ioerror_alert_ratelimited(
1229 	struct xfs_buf		*bp)
1230 {
1231 	static unsigned long	lasttime;
1232 	static struct xfs_buftarg *lasttarg;
1233 
1234 	if (bp->b_target != lasttarg ||
1235 	    time_after(jiffies, (lasttime + 5*HZ))) {
1236 		lasttime = jiffies;
1237 		xfs_buf_ioerror_alert(bp, __this_address);
1238 	}
1239 	lasttarg = bp->b_target;
1240 }
1241 
1242 /*
1243  * Account for this latest trip around the retry handler, and decide if
1244  * we've failed enough times to constitute a permanent failure.
1245  */
1246 static bool
1247 xfs_buf_ioerror_permanent(
1248 	struct xfs_buf		*bp,
1249 	struct xfs_error_cfg	*cfg)
1250 {
1251 	struct xfs_mount	*mp = bp->b_mount;
1252 
1253 	if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
1254 	    ++bp->b_retries > cfg->max_retries)
1255 		return true;
1256 	if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1257 	    time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
1258 		return true;
1259 
1260 	/* At unmount we may treat errors differently */
1261 	if (xfs_is_unmounting(mp) && mp->m_fail_unmount)
1262 		return true;
1263 
1264 	return false;
1265 }
1266 
1267 /*
1268  * On a sync write or shutdown we just want to stale the buffer and let the
1269  * caller handle the error in bp->b_error appropriately.
1270  *
1271  * If the write was asynchronous then no one will be looking for the error.  If
1272  * this is the first failure of this type, clear the error state and write the
1273  * buffer out again. This means we always retry an async write failure at least
1274  * once, but we also need to set the buffer up to behave correctly now for
1275  * repeated failures.
1276  *
1277  * If we get repeated async write failures, then we take action according to the
1278  * error configuration we have been set up to use.
1279  *
1280  * Returns true if this function took care of error handling and the caller must
1281  * not touch the buffer again.  Return false if the caller should proceed with
1282  * normal I/O completion handling.
1283  */
1284 static bool
1285 xfs_buf_ioend_handle_error(
1286 	struct xfs_buf		*bp)
1287 {
1288 	struct xfs_mount	*mp = bp->b_mount;
1289 	struct xfs_error_cfg	*cfg;
1290 	struct xfs_log_item	*lip;
1291 
1292 	/*
1293 	 * If we've already shutdown the journal because of I/O errors, there's
1294 	 * no point in giving this a retry.
1295 	 */
1296 	if (xlog_is_shutdown(mp->m_log))
1297 		goto out_stale;
1298 
1299 	xfs_buf_ioerror_alert_ratelimited(bp);
1300 
1301 	/*
1302 	 * We're not going to bother about retrying this during recovery.
1303 	 * One strike!
1304 	 */
1305 	if (bp->b_flags & _XBF_LOGRECOVERY) {
1306 		xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1307 		return false;
1308 	}
1309 
1310 	/*
1311 	 * Synchronous writes will have callers process the error.
1312 	 */
1313 	if (!(bp->b_flags & XBF_ASYNC))
1314 		goto out_stale;
1315 
1316 	trace_xfs_buf_iodone_async(bp, _RET_IP_);
1317 
1318 	cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
1319 	if (bp->b_last_error != bp->b_error ||
1320 	    !(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL))) {
1321 		bp->b_last_error = bp->b_error;
1322 		if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1323 		    !bp->b_first_retry_time)
1324 			bp->b_first_retry_time = jiffies;
1325 		goto resubmit;
1326 	}
1327 
1328 	/*
1329 	 * Permanent error - we need to trigger a shutdown if we haven't already
1330 	 * to indicate that inconsistency will result from this action.
1331 	 */
1332 	if (xfs_buf_ioerror_permanent(bp, cfg)) {
1333 		xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1334 		goto out_stale;
1335 	}
1336 
1337 	/* Still considered a transient error. Caller will schedule retries. */
1338 	list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
1339 		set_bit(XFS_LI_FAILED, &lip->li_flags);
1340 		clear_bit(XFS_LI_FLUSHING, &lip->li_flags);
1341 	}
1342 
1343 	xfs_buf_ioerror(bp, 0);
1344 	xfs_buf_relse(bp);
1345 	return true;
1346 
1347 resubmit:
1348 	xfs_buf_ioerror(bp, 0);
1349 	bp->b_flags |= (XBF_DONE | XBF_WRITE_FAIL);
1350 	xfs_buf_submit(bp);
1351 	return true;
1352 out_stale:
1353 	xfs_buf_stale(bp);
1354 	bp->b_flags |= XBF_DONE;
1355 	bp->b_flags &= ~XBF_WRITE;
1356 	trace_xfs_buf_error_relse(bp, _RET_IP_);
1357 	return false;
1358 }
1359 
1360 static void
1361 xfs_buf_ioend(
1362 	struct xfs_buf	*bp)
1363 {
1364 	trace_xfs_buf_iodone(bp, _RET_IP_);
1365 
1366 	if (bp->b_flags & XBF_READ) {
1367 		if (!bp->b_error && xfs_buf_is_vmapped(bp))
1368 			invalidate_kernel_vmap_range(bp->b_addr,
1369 					xfs_buf_vmap_len(bp));
1370 		if (!bp->b_error && bp->b_ops)
1371 			bp->b_ops->verify_read(bp);
1372 		if (!bp->b_error)
1373 			bp->b_flags |= XBF_DONE;
1374 	} else {
1375 		if (!bp->b_error) {
1376 			bp->b_flags &= ~XBF_WRITE_FAIL;
1377 			bp->b_flags |= XBF_DONE;
1378 		}
1379 
1380 		if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp))
1381 			return;
1382 
1383 		/* clear the retry state */
1384 		bp->b_last_error = 0;
1385 		bp->b_retries = 0;
1386 		bp->b_first_retry_time = 0;
1387 
1388 		/*
1389 		 * Note that for things like remote attribute buffers, there may
1390 		 * not be a buffer log item here, so processing the buffer log
1391 		 * item must remain optional.
1392 		 */
1393 		if (bp->b_log_item)
1394 			xfs_buf_item_done(bp);
1395 
1396 		if (bp->b_iodone)
1397 			bp->b_iodone(bp);
1398 	}
1399 
1400 	bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD |
1401 			 _XBF_LOGRECOVERY);
1402 
1403 	if (bp->b_flags & XBF_ASYNC)
1404 		xfs_buf_relse(bp);
1405 	else
1406 		complete(&bp->b_iowait);
1407 }
1408 
1409 static void
1410 xfs_buf_ioend_work(
1411 	struct work_struct	*work)
1412 {
1413 	struct xfs_buf		*bp =
1414 		container_of(work, struct xfs_buf, b_ioend_work);
1415 
1416 	xfs_buf_ioend(bp);
1417 }
1418 
1419 static void
1420 xfs_buf_ioend_async(
1421 	struct xfs_buf	*bp)
1422 {
1423 	INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
1424 	queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
1425 }
1426 
1427 void
1428 __xfs_buf_ioerror(
1429 	struct xfs_buf		*bp,
1430 	int			error,
1431 	xfs_failaddr_t		failaddr)
1432 {
1433 	ASSERT(error <= 0 && error >= -1000);
1434 	bp->b_error = error;
1435 	trace_xfs_buf_ioerror(bp, error, failaddr);
1436 }
1437 
1438 void
1439 xfs_buf_ioerror_alert(
1440 	struct xfs_buf		*bp,
1441 	xfs_failaddr_t		func)
1442 {
1443 	xfs_buf_alert_ratelimited(bp, "XFS: metadata IO error",
1444 		"metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d",
1445 				  func, (uint64_t)xfs_buf_daddr(bp),
1446 				  bp->b_length, -bp->b_error);
1447 }
1448 
1449 /*
1450  * To simulate an I/O failure, the buffer must be locked and held with at least
1451  * three references. The LRU reference is dropped by the stale call. The buf
1452  * item reference is dropped via ioend processing. The third reference is owned
1453  * by the caller and is dropped on I/O completion if the buffer is XBF_ASYNC.
1454  */
1455 void
1456 xfs_buf_ioend_fail(
1457 	struct xfs_buf	*bp)
1458 {
1459 	bp->b_flags &= ~XBF_DONE;
1460 	xfs_buf_stale(bp);
1461 	xfs_buf_ioerror(bp, -EIO);
1462 	xfs_buf_ioend(bp);
1463 }
1464 
1465 int
1466 xfs_bwrite(
1467 	struct xfs_buf		*bp)
1468 {
1469 	int			error;
1470 
1471 	ASSERT(xfs_buf_islocked(bp));
1472 
1473 	bp->b_flags |= XBF_WRITE;
1474 	bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
1475 			 XBF_DONE);
1476 
1477 	xfs_buf_submit(bp);
1478 	error = xfs_buf_iowait(bp);
1479 	if (error)
1480 		xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
1481 	return error;
1482 }
1483 
1484 static void
1485 xfs_buf_bio_end_io(
1486 	struct bio		*bio)
1487 {
1488 	struct xfs_buf		*bp = bio->bi_private;
1489 
1490 	if (bio->bi_status)
1491 		xfs_buf_ioerror(bp, blk_status_to_errno(bio->bi_status));
1492 	else if ((bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) &&
1493 		 XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR))
1494 		xfs_buf_ioerror(bp, -EIO);
1495 
1496 	xfs_buf_ioend_async(bp);
1497 	bio_put(bio);
1498 }
1499 
1500 static inline blk_opf_t
1501 xfs_buf_bio_op(
1502 	struct xfs_buf		*bp)
1503 {
1504 	blk_opf_t		op;
1505 
1506 	if (bp->b_flags & XBF_WRITE) {
1507 		op = REQ_OP_WRITE;
1508 	} else {
1509 		op = REQ_OP_READ;
1510 		if (bp->b_flags & XBF_READ_AHEAD)
1511 			op |= REQ_RAHEAD;
1512 	}
1513 
1514 	return op | REQ_META;
1515 }
1516 
1517 static void
1518 xfs_buf_submit_bio(
1519 	struct xfs_buf		*bp)
1520 {
1521 	unsigned int		size = BBTOB(bp->b_length);
1522 	unsigned int		map = 0, p;
1523 	struct blk_plug		plug;
1524 	struct bio		*bio;
1525 
1526 	bio = bio_alloc(bp->b_target->bt_bdev, bp->b_page_count,
1527 			xfs_buf_bio_op(bp), GFP_NOIO);
1528 	bio->bi_private = bp;
1529 	bio->bi_end_io = xfs_buf_bio_end_io;
1530 
1531 	if (bp->b_flags & _XBF_KMEM) {
1532 		__bio_add_page(bio, virt_to_page(bp->b_addr), size,
1533 				bp->b_offset);
1534 	} else {
1535 		for (p = 0; p < bp->b_page_count; p++)
1536 			__bio_add_page(bio, bp->b_pages[p], PAGE_SIZE, 0);
1537 		bio->bi_iter.bi_size = size; /* limit to the actual size used */
1538 
1539 		if (xfs_buf_is_vmapped(bp))
1540 			flush_kernel_vmap_range(bp->b_addr,
1541 					xfs_buf_vmap_len(bp));
1542 	}
1543 
1544 	/*
1545 	 * If there is more than one map segment, split out a new bio for each
1546 	 * map except of the last one.  The last map is handled by the
1547 	 * remainder of the original bio outside the loop.
1548 	 */
1549 	blk_start_plug(&plug);
1550 	for (map = 0; map < bp->b_map_count - 1; map++) {
1551 		struct bio	*split;
1552 
1553 		split = bio_split(bio, bp->b_maps[map].bm_len, GFP_NOFS,
1554 				&fs_bio_set);
1555 		split->bi_iter.bi_sector = bp->b_maps[map].bm_bn;
1556 		bio_chain(split, bio);
1557 		submit_bio(split);
1558 	}
1559 	bio->bi_iter.bi_sector = bp->b_maps[map].bm_bn;
1560 	submit_bio(bio);
1561 	blk_finish_plug(&plug);
1562 }
1563 
1564 /*
1565  * Wait for I/O completion of a sync buffer and return the I/O error code.
1566  */
1567 static int
1568 xfs_buf_iowait(
1569 	struct xfs_buf	*bp)
1570 {
1571 	ASSERT(!(bp->b_flags & XBF_ASYNC));
1572 
1573 	trace_xfs_buf_iowait(bp, _RET_IP_);
1574 	wait_for_completion(&bp->b_iowait);
1575 	trace_xfs_buf_iowait_done(bp, _RET_IP_);
1576 
1577 	return bp->b_error;
1578 }
1579 
1580 /*
1581  * Run the write verifier callback function if it exists. If this fails, mark
1582  * the buffer with an error and do not dispatch the I/O.
1583  */
1584 static bool
1585 xfs_buf_verify_write(
1586 	struct xfs_buf		*bp)
1587 {
1588 	if (bp->b_ops) {
1589 		bp->b_ops->verify_write(bp);
1590 		if (bp->b_error)
1591 			return false;
1592 	} else if (bp->b_rhash_key != XFS_BUF_DADDR_NULL) {
1593 		/*
1594 		 * Non-crc filesystems don't attach verifiers during log
1595 		 * recovery, so don't warn for such filesystems.
1596 		 */
1597 		if (xfs_has_crc(bp->b_mount)) {
1598 			xfs_warn(bp->b_mount,
1599 				"%s: no buf ops on daddr 0x%llx len %d",
1600 				__func__, xfs_buf_daddr(bp),
1601 				bp->b_length);
1602 			xfs_hex_dump(bp->b_addr, XFS_CORRUPTION_DUMP_LEN);
1603 			dump_stack();
1604 		}
1605 	}
1606 
1607 	return true;
1608 }
1609 
1610 /*
1611  * Buffer I/O submission path, read or write. Asynchronous submission transfers
1612  * the buffer lock ownership and the current reference to the IO. It is not
1613  * safe to reference the buffer after a call to this function unless the caller
1614  * holds an additional reference itself.
1615  */
1616 static void
1617 xfs_buf_submit(
1618 	struct xfs_buf	*bp)
1619 {
1620 	trace_xfs_buf_submit(bp, _RET_IP_);
1621 
1622 	ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1623 
1624 	/*
1625 	 * On log shutdown we stale and complete the buffer immediately. We can
1626 	 * be called to read the superblock before the log has been set up, so
1627 	 * be careful checking the log state.
1628 	 *
1629 	 * Checking the mount shutdown state here can result in the log tail
1630 	 * moving inappropriately on disk as the log may not yet be shut down.
1631 	 * i.e. failing this buffer on mount shutdown can remove it from the AIL
1632 	 * and move the tail of the log forwards without having written this
1633 	 * buffer to disk. This corrupts the log tail state in memory, and
1634 	 * because the log may not be shut down yet, it can then be propagated
1635 	 * to disk before the log is shutdown. Hence we check log shutdown
1636 	 * state here rather than mount state to avoid corrupting the log tail
1637 	 * on shutdown.
1638 	 */
1639 	if (bp->b_mount->m_log && xlog_is_shutdown(bp->b_mount->m_log)) {
1640 		xfs_buf_ioend_fail(bp);
1641 		return;
1642 	}
1643 
1644 	if (bp->b_flags & XBF_WRITE)
1645 		xfs_buf_wait_unpin(bp);
1646 
1647 	/*
1648 	 * Make sure we capture only current IO errors rather than stale errors
1649 	 * left over from previous use of the buffer (e.g. failed readahead).
1650 	 */
1651 	bp->b_error = 0;
1652 
1653 	if (bp->b_flags & XBF_ASYNC)
1654 		xfs_buf_ioacct_inc(bp);
1655 
1656 	if ((bp->b_flags & XBF_WRITE) && !xfs_buf_verify_write(bp)) {
1657 		xfs_force_shutdown(bp->b_mount, SHUTDOWN_CORRUPT_INCORE);
1658 		xfs_buf_ioend(bp);
1659 		return;
1660 	}
1661 
1662 	/* In-memory targets are directly mapped, no I/O required. */
1663 	if (xfs_buftarg_is_mem(bp->b_target)) {
1664 		xfs_buf_ioend(bp);
1665 		return;
1666 	}
1667 
1668 	xfs_buf_submit_bio(bp);
1669 }
1670 
1671 void *
1672 xfs_buf_offset(
1673 	struct xfs_buf		*bp,
1674 	size_t			offset)
1675 {
1676 	struct page		*page;
1677 
1678 	if (bp->b_addr)
1679 		return bp->b_addr + offset;
1680 
1681 	page = bp->b_pages[offset >> PAGE_SHIFT];
1682 	return page_address(page) + (offset & (PAGE_SIZE-1));
1683 }
1684 
1685 void
1686 xfs_buf_zero(
1687 	struct xfs_buf		*bp,
1688 	size_t			boff,
1689 	size_t			bsize)
1690 {
1691 	size_t			bend;
1692 
1693 	bend = boff + bsize;
1694 	while (boff < bend) {
1695 		struct page	*page;
1696 		int		page_index, page_offset, csize;
1697 
1698 		page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1699 		page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1700 		page = bp->b_pages[page_index];
1701 		csize = min_t(size_t, PAGE_SIZE - page_offset,
1702 				      BBTOB(bp->b_length) - boff);
1703 
1704 		ASSERT((csize + page_offset) <= PAGE_SIZE);
1705 
1706 		memset(page_address(page) + page_offset, 0, csize);
1707 
1708 		boff += csize;
1709 	}
1710 }
1711 
1712 /*
1713  * Log a message about and stale a buffer that a caller has decided is corrupt.
1714  *
1715  * This function should be called for the kinds of metadata corruption that
1716  * cannot be detect from a verifier, such as incorrect inter-block relationship
1717  * data.  Do /not/ call this function from a verifier function.
1718  *
1719  * The buffer must be XBF_DONE prior to the call.  Afterwards, the buffer will
1720  * be marked stale, but b_error will not be set.  The caller is responsible for
1721  * releasing the buffer or fixing it.
1722  */
1723 void
1724 __xfs_buf_mark_corrupt(
1725 	struct xfs_buf		*bp,
1726 	xfs_failaddr_t		fa)
1727 {
1728 	ASSERT(bp->b_flags & XBF_DONE);
1729 
1730 	xfs_buf_corruption_error(bp, fa);
1731 	xfs_buf_stale(bp);
1732 }
1733 
1734 /*
1735  *	Handling of buffer targets (buftargs).
1736  */
1737 
1738 /*
1739  * Wait for any bufs with callbacks that have been submitted but have not yet
1740  * returned. These buffers will have an elevated hold count, so wait on those
1741  * while freeing all the buffers only held by the LRU.
1742  */
1743 static enum lru_status
1744 xfs_buftarg_drain_rele(
1745 	struct list_head	*item,
1746 	struct list_lru_one	*lru,
1747 	void			*arg)
1748 
1749 {
1750 	struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru);
1751 	struct list_head	*dispose = arg;
1752 
1753 	if (!spin_trylock(&bp->b_lock))
1754 		return LRU_SKIP;
1755 	if (bp->b_hold > 1) {
1756 		/* need to wait, so skip it this pass */
1757 		spin_unlock(&bp->b_lock);
1758 		trace_xfs_buf_drain_buftarg(bp, _RET_IP_);
1759 		return LRU_SKIP;
1760 	}
1761 
1762 	/*
1763 	 * clear the LRU reference count so the buffer doesn't get
1764 	 * ignored in xfs_buf_rele().
1765 	 */
1766 	atomic_set(&bp->b_lru_ref, 0);
1767 	bp->b_state |= XFS_BSTATE_DISPOSE;
1768 	list_lru_isolate_move(lru, item, dispose);
1769 	spin_unlock(&bp->b_lock);
1770 	return LRU_REMOVED;
1771 }
1772 
1773 /*
1774  * Wait for outstanding I/O on the buftarg to complete.
1775  */
1776 void
1777 xfs_buftarg_wait(
1778 	struct xfs_buftarg	*btp)
1779 {
1780 	/*
1781 	 * First wait on the buftarg I/O count for all in-flight buffers to be
1782 	 * released. This is critical as new buffers do not make the LRU until
1783 	 * they are released.
1784 	 *
1785 	 * Next, flush the buffer workqueue to ensure all completion processing
1786 	 * has finished. Just waiting on buffer locks is not sufficient for
1787 	 * async IO as the reference count held over IO is not released until
1788 	 * after the buffer lock is dropped. Hence we need to ensure here that
1789 	 * all reference counts have been dropped before we start walking the
1790 	 * LRU list.
1791 	 */
1792 	while (percpu_counter_sum(&btp->bt_io_count))
1793 		delay(100);
1794 	flush_workqueue(btp->bt_mount->m_buf_workqueue);
1795 }
1796 
1797 void
1798 xfs_buftarg_drain(
1799 	struct xfs_buftarg	*btp)
1800 {
1801 	LIST_HEAD(dispose);
1802 	int			loop = 0;
1803 	bool			write_fail = false;
1804 
1805 	xfs_buftarg_wait(btp);
1806 
1807 	/* loop until there is nothing left on the lru list. */
1808 	while (list_lru_count(&btp->bt_lru)) {
1809 		list_lru_walk(&btp->bt_lru, xfs_buftarg_drain_rele,
1810 			      &dispose, LONG_MAX);
1811 
1812 		while (!list_empty(&dispose)) {
1813 			struct xfs_buf *bp;
1814 			bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1815 			list_del_init(&bp->b_lru);
1816 			if (bp->b_flags & XBF_WRITE_FAIL) {
1817 				write_fail = true;
1818 				xfs_buf_alert_ratelimited(bp,
1819 					"XFS: Corruption Alert",
1820 "Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!",
1821 					(long long)xfs_buf_daddr(bp));
1822 			}
1823 			xfs_buf_rele(bp);
1824 		}
1825 		if (loop++ != 0)
1826 			delay(100);
1827 	}
1828 
1829 	/*
1830 	 * If one or more failed buffers were freed, that means dirty metadata
1831 	 * was thrown away. This should only ever happen after I/O completion
1832 	 * handling has elevated I/O error(s) to permanent failures and shuts
1833 	 * down the journal.
1834 	 */
1835 	if (write_fail) {
1836 		ASSERT(xlog_is_shutdown(btp->bt_mount->m_log));
1837 		xfs_alert(btp->bt_mount,
1838 	      "Please run xfs_repair to determine the extent of the problem.");
1839 	}
1840 }
1841 
1842 static enum lru_status
1843 xfs_buftarg_isolate(
1844 	struct list_head	*item,
1845 	struct list_lru_one	*lru,
1846 	void			*arg)
1847 {
1848 	struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru);
1849 	struct list_head	*dispose = arg;
1850 
1851 	/*
1852 	 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1853 	 * If we fail to get the lock, just skip it.
1854 	 */
1855 	if (!spin_trylock(&bp->b_lock))
1856 		return LRU_SKIP;
1857 	/*
1858 	 * Decrement the b_lru_ref count unless the value is already
1859 	 * zero. If the value is already zero, we need to reclaim the
1860 	 * buffer, otherwise it gets another trip through the LRU.
1861 	 */
1862 	if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1863 		spin_unlock(&bp->b_lock);
1864 		return LRU_ROTATE;
1865 	}
1866 
1867 	bp->b_state |= XFS_BSTATE_DISPOSE;
1868 	list_lru_isolate_move(lru, item, dispose);
1869 	spin_unlock(&bp->b_lock);
1870 	return LRU_REMOVED;
1871 }
1872 
1873 static unsigned long
1874 xfs_buftarg_shrink_scan(
1875 	struct shrinker		*shrink,
1876 	struct shrink_control	*sc)
1877 {
1878 	struct xfs_buftarg	*btp = shrink->private_data;
1879 	LIST_HEAD(dispose);
1880 	unsigned long		freed;
1881 
1882 	freed = list_lru_shrink_walk(&btp->bt_lru, sc,
1883 				     xfs_buftarg_isolate, &dispose);
1884 
1885 	while (!list_empty(&dispose)) {
1886 		struct xfs_buf *bp;
1887 		bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1888 		list_del_init(&bp->b_lru);
1889 		xfs_buf_rele(bp);
1890 	}
1891 
1892 	return freed;
1893 }
1894 
1895 static unsigned long
1896 xfs_buftarg_shrink_count(
1897 	struct shrinker		*shrink,
1898 	struct shrink_control	*sc)
1899 {
1900 	struct xfs_buftarg	*btp = shrink->private_data;
1901 	return list_lru_shrink_count(&btp->bt_lru, sc);
1902 }
1903 
1904 void
1905 xfs_destroy_buftarg(
1906 	struct xfs_buftarg	*btp)
1907 {
1908 	shrinker_free(btp->bt_shrinker);
1909 	ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
1910 	percpu_counter_destroy(&btp->bt_io_count);
1911 	list_lru_destroy(&btp->bt_lru);
1912 }
1913 
1914 void
1915 xfs_free_buftarg(
1916 	struct xfs_buftarg	*btp)
1917 {
1918 	xfs_destroy_buftarg(btp);
1919 	fs_put_dax(btp->bt_daxdev, btp->bt_mount);
1920 	/* the main block device is closed by kill_block_super */
1921 	if (btp->bt_bdev != btp->bt_mount->m_super->s_bdev)
1922 		bdev_fput(btp->bt_bdev_file);
1923 	kfree(btp);
1924 }
1925 
1926 int
1927 xfs_setsize_buftarg(
1928 	struct xfs_buftarg	*btp,
1929 	unsigned int		sectorsize)
1930 {
1931 	/* Set up metadata sector size info */
1932 	btp->bt_meta_sectorsize = sectorsize;
1933 	btp->bt_meta_sectormask = sectorsize - 1;
1934 
1935 	if (set_blocksize(btp->bt_bdev_file, sectorsize)) {
1936 		xfs_warn(btp->bt_mount,
1937 			"Cannot set_blocksize to %u on device %pg",
1938 			sectorsize, btp->bt_bdev);
1939 		return -EINVAL;
1940 	}
1941 
1942 	return 0;
1943 }
1944 
1945 int
1946 xfs_init_buftarg(
1947 	struct xfs_buftarg		*btp,
1948 	size_t				logical_sectorsize,
1949 	const char			*descr)
1950 {
1951 	/* Set up device logical sector size mask */
1952 	btp->bt_logical_sectorsize = logical_sectorsize;
1953 	btp->bt_logical_sectormask = logical_sectorsize - 1;
1954 
1955 	/*
1956 	 * Buffer IO error rate limiting. Limit it to no more than 10 messages
1957 	 * per 30 seconds so as to not spam logs too much on repeated errors.
1958 	 */
1959 	ratelimit_state_init(&btp->bt_ioerror_rl, 30 * HZ,
1960 			     DEFAULT_RATELIMIT_BURST);
1961 
1962 	if (list_lru_init(&btp->bt_lru))
1963 		return -ENOMEM;
1964 	if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
1965 		goto out_destroy_lru;
1966 
1967 	btp->bt_shrinker =
1968 		shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-buf:%s", descr);
1969 	if (!btp->bt_shrinker)
1970 		goto out_destroy_io_count;
1971 	btp->bt_shrinker->count_objects = xfs_buftarg_shrink_count;
1972 	btp->bt_shrinker->scan_objects = xfs_buftarg_shrink_scan;
1973 	btp->bt_shrinker->private_data = btp;
1974 	shrinker_register(btp->bt_shrinker);
1975 	return 0;
1976 
1977 out_destroy_io_count:
1978 	percpu_counter_destroy(&btp->bt_io_count);
1979 out_destroy_lru:
1980 	list_lru_destroy(&btp->bt_lru);
1981 	return -ENOMEM;
1982 }
1983 
1984 struct xfs_buftarg *
1985 xfs_alloc_buftarg(
1986 	struct xfs_mount	*mp,
1987 	struct file		*bdev_file)
1988 {
1989 	struct xfs_buftarg	*btp;
1990 	const struct dax_holder_operations *ops = NULL;
1991 
1992 #if defined(CONFIG_FS_DAX) && defined(CONFIG_MEMORY_FAILURE)
1993 	ops = &xfs_dax_holder_operations;
1994 #endif
1995 	btp = kzalloc(sizeof(*btp), GFP_KERNEL | __GFP_NOFAIL);
1996 
1997 	btp->bt_mount = mp;
1998 	btp->bt_bdev_file = bdev_file;
1999 	btp->bt_bdev = file_bdev(bdev_file);
2000 	btp->bt_dev = btp->bt_bdev->bd_dev;
2001 	btp->bt_daxdev = fs_dax_get_by_bdev(btp->bt_bdev, &btp->bt_dax_part_off,
2002 					    mp, ops);
2003 
2004 	if (bdev_can_atomic_write(btp->bt_bdev)) {
2005 		btp->bt_bdev_awu_min = bdev_atomic_write_unit_min_bytes(
2006 						btp->bt_bdev);
2007 		btp->bt_bdev_awu_max = bdev_atomic_write_unit_max_bytes(
2008 						btp->bt_bdev);
2009 	}
2010 
2011 	/*
2012 	 * When allocating the buftargs we have not yet read the super block and
2013 	 * thus don't know the file system sector size yet.
2014 	 */
2015 	if (xfs_setsize_buftarg(btp, bdev_logical_block_size(btp->bt_bdev)))
2016 		goto error_free;
2017 	if (xfs_init_buftarg(btp, bdev_logical_block_size(btp->bt_bdev),
2018 			mp->m_super->s_id))
2019 		goto error_free;
2020 
2021 	return btp;
2022 
2023 error_free:
2024 	kfree(btp);
2025 	return NULL;
2026 }
2027 
2028 static inline void
2029 xfs_buf_list_del(
2030 	struct xfs_buf		*bp)
2031 {
2032 	list_del_init(&bp->b_list);
2033 	wake_up_var(&bp->b_list);
2034 }
2035 
2036 /*
2037  * Cancel a delayed write list.
2038  *
2039  * Remove each buffer from the list, clear the delwri queue flag and drop the
2040  * associated buffer reference.
2041  */
2042 void
2043 xfs_buf_delwri_cancel(
2044 	struct list_head	*list)
2045 {
2046 	struct xfs_buf		*bp;
2047 
2048 	while (!list_empty(list)) {
2049 		bp = list_first_entry(list, struct xfs_buf, b_list);
2050 
2051 		xfs_buf_lock(bp);
2052 		bp->b_flags &= ~_XBF_DELWRI_Q;
2053 		xfs_buf_list_del(bp);
2054 		xfs_buf_relse(bp);
2055 	}
2056 }
2057 
2058 /*
2059  * Add a buffer to the delayed write list.
2060  *
2061  * This queues a buffer for writeout if it hasn't already been.  Note that
2062  * neither this routine nor the buffer list submission functions perform
2063  * any internal synchronization.  It is expected that the lists are thread-local
2064  * to the callers.
2065  *
2066  * Returns true if we queued up the buffer, or false if it already had
2067  * been on the buffer list.
2068  */
2069 bool
2070 xfs_buf_delwri_queue(
2071 	struct xfs_buf		*bp,
2072 	struct list_head	*list)
2073 {
2074 	ASSERT(xfs_buf_islocked(bp));
2075 	ASSERT(!(bp->b_flags & XBF_READ));
2076 
2077 	/*
2078 	 * If the buffer is already marked delwri it already is queued up
2079 	 * by someone else for imediate writeout.  Just ignore it in that
2080 	 * case.
2081 	 */
2082 	if (bp->b_flags & _XBF_DELWRI_Q) {
2083 		trace_xfs_buf_delwri_queued(bp, _RET_IP_);
2084 		return false;
2085 	}
2086 
2087 	trace_xfs_buf_delwri_queue(bp, _RET_IP_);
2088 
2089 	/*
2090 	 * If a buffer gets written out synchronously or marked stale while it
2091 	 * is on a delwri list we lazily remove it. To do this, the other party
2092 	 * clears the  _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
2093 	 * It remains referenced and on the list.  In a rare corner case it
2094 	 * might get readded to a delwri list after the synchronous writeout, in
2095 	 * which case we need just need to re-add the flag here.
2096 	 */
2097 	bp->b_flags |= _XBF_DELWRI_Q;
2098 	if (list_empty(&bp->b_list)) {
2099 		xfs_buf_hold(bp);
2100 		list_add_tail(&bp->b_list, list);
2101 	}
2102 
2103 	return true;
2104 }
2105 
2106 /*
2107  * Queue a buffer to this delwri list as part of a data integrity operation.
2108  * If the buffer is on any other delwri list, we'll wait for that to clear
2109  * so that the caller can submit the buffer for IO and wait for the result.
2110  * Callers must ensure the buffer is not already on the list.
2111  */
2112 void
2113 xfs_buf_delwri_queue_here(
2114 	struct xfs_buf		*bp,
2115 	struct list_head	*buffer_list)
2116 {
2117 	/*
2118 	 * We need this buffer to end up on the /caller's/ delwri list, not any
2119 	 * old list.  This can happen if the buffer is marked stale (which
2120 	 * clears DELWRI_Q) after the AIL queues the buffer to its list but
2121 	 * before the AIL has a chance to submit the list.
2122 	 */
2123 	while (!list_empty(&bp->b_list)) {
2124 		xfs_buf_unlock(bp);
2125 		wait_var_event(&bp->b_list, list_empty(&bp->b_list));
2126 		xfs_buf_lock(bp);
2127 	}
2128 
2129 	ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
2130 
2131 	xfs_buf_delwri_queue(bp, buffer_list);
2132 }
2133 
2134 /*
2135  * Compare function is more complex than it needs to be because
2136  * the return value is only 32 bits and we are doing comparisons
2137  * on 64 bit values
2138  */
2139 static int
2140 xfs_buf_cmp(
2141 	void			*priv,
2142 	const struct list_head	*a,
2143 	const struct list_head	*b)
2144 {
2145 	struct xfs_buf	*ap = container_of(a, struct xfs_buf, b_list);
2146 	struct xfs_buf	*bp = container_of(b, struct xfs_buf, b_list);
2147 	xfs_daddr_t		diff;
2148 
2149 	diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
2150 	if (diff < 0)
2151 		return -1;
2152 	if (diff > 0)
2153 		return 1;
2154 	return 0;
2155 }
2156 
2157 static bool
2158 xfs_buf_delwri_submit_prep(
2159 	struct xfs_buf		*bp)
2160 {
2161 	/*
2162 	 * Someone else might have written the buffer synchronously or marked it
2163 	 * stale in the meantime.  In that case only the _XBF_DELWRI_Q flag got
2164 	 * cleared, and we have to drop the reference and remove it from the
2165 	 * list here.
2166 	 */
2167 	if (!(bp->b_flags & _XBF_DELWRI_Q)) {
2168 		xfs_buf_list_del(bp);
2169 		xfs_buf_relse(bp);
2170 		return false;
2171 	}
2172 
2173 	trace_xfs_buf_delwri_split(bp, _RET_IP_);
2174 	bp->b_flags &= ~_XBF_DELWRI_Q;
2175 	bp->b_flags |= XBF_WRITE;
2176 	return true;
2177 }
2178 
2179 /*
2180  * Write out a buffer list asynchronously.
2181  *
2182  * This will take the @buffer_list, write all non-locked and non-pinned buffers
2183  * out and not wait for I/O completion on any of the buffers.  This interface
2184  * is only safely useable for callers that can track I/O completion by higher
2185  * level means, e.g. AIL pushing as the @buffer_list is consumed in this
2186  * function.
2187  *
2188  * Note: this function will skip buffers it would block on, and in doing so
2189  * leaves them on @buffer_list so they can be retried on a later pass. As such,
2190  * it is up to the caller to ensure that the buffer list is fully submitted or
2191  * cancelled appropriately when they are finished with the list. Failure to
2192  * cancel or resubmit the list until it is empty will result in leaked buffers
2193  * at unmount time.
2194  */
2195 int
2196 xfs_buf_delwri_submit_nowait(
2197 	struct list_head	*buffer_list)
2198 {
2199 	struct xfs_buf		*bp, *n;
2200 	int			pinned = 0;
2201 	struct blk_plug		plug;
2202 
2203 	list_sort(NULL, buffer_list, xfs_buf_cmp);
2204 
2205 	blk_start_plug(&plug);
2206 	list_for_each_entry_safe(bp, n, buffer_list, b_list) {
2207 		if (!xfs_buf_trylock(bp))
2208 			continue;
2209 		if (xfs_buf_ispinned(bp)) {
2210 			xfs_buf_unlock(bp);
2211 			pinned++;
2212 			continue;
2213 		}
2214 		if (!xfs_buf_delwri_submit_prep(bp))
2215 			continue;
2216 		bp->b_flags |= XBF_ASYNC;
2217 		xfs_buf_list_del(bp);
2218 		xfs_buf_submit(bp);
2219 	}
2220 	blk_finish_plug(&plug);
2221 
2222 	return pinned;
2223 }
2224 
2225 /*
2226  * Write out a buffer list synchronously.
2227  *
2228  * This will take the @buffer_list, write all buffers out and wait for I/O
2229  * completion on all of the buffers. @buffer_list is consumed by the function,
2230  * so callers must have some other way of tracking buffers if they require such
2231  * functionality.
2232  */
2233 int
2234 xfs_buf_delwri_submit(
2235 	struct list_head	*buffer_list)
2236 {
2237 	LIST_HEAD		(wait_list);
2238 	int			error = 0, error2;
2239 	struct xfs_buf		*bp, *n;
2240 	struct blk_plug		plug;
2241 
2242 	list_sort(NULL, buffer_list, xfs_buf_cmp);
2243 
2244 	blk_start_plug(&plug);
2245 	list_for_each_entry_safe(bp, n, buffer_list, b_list) {
2246 		xfs_buf_lock(bp);
2247 		if (!xfs_buf_delwri_submit_prep(bp))
2248 			continue;
2249 		bp->b_flags &= ~XBF_ASYNC;
2250 		list_move_tail(&bp->b_list, &wait_list);
2251 		xfs_buf_submit(bp);
2252 	}
2253 	blk_finish_plug(&plug);
2254 
2255 	/* Wait for IO to complete. */
2256 	while (!list_empty(&wait_list)) {
2257 		bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
2258 
2259 		xfs_buf_list_del(bp);
2260 
2261 		/*
2262 		 * Wait on the locked buffer, check for errors and unlock and
2263 		 * release the delwri queue reference.
2264 		 */
2265 		error2 = xfs_buf_iowait(bp);
2266 		xfs_buf_relse(bp);
2267 		if (!error)
2268 			error = error2;
2269 	}
2270 
2271 	return error;
2272 }
2273 
2274 /*
2275  * Push a single buffer on a delwri queue.
2276  *
2277  * The purpose of this function is to submit a single buffer of a delwri queue
2278  * and return with the buffer still on the original queue.
2279  *
2280  * The buffer locking and queue management logic between _delwri_pushbuf() and
2281  * _delwri_queue() guarantee that the buffer cannot be queued to another list
2282  * before returning.
2283  */
2284 int
2285 xfs_buf_delwri_pushbuf(
2286 	struct xfs_buf		*bp,
2287 	struct list_head	*buffer_list)
2288 {
2289 	int			error;
2290 
2291 	ASSERT(bp->b_flags & _XBF_DELWRI_Q);
2292 
2293 	trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_);
2294 
2295 	xfs_buf_lock(bp);
2296 	bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC);
2297 	bp->b_flags |= XBF_WRITE;
2298 	xfs_buf_submit(bp);
2299 
2300 	/*
2301 	 * The buffer is now locked, under I/O but still on the original delwri
2302 	 * queue. Wait for I/O completion, restore the DELWRI_Q flag and
2303 	 * return with the buffer unlocked and still on the original queue.
2304 	 */
2305 	error = xfs_buf_iowait(bp);
2306 	bp->b_flags |= _XBF_DELWRI_Q;
2307 	xfs_buf_unlock(bp);
2308 
2309 	return error;
2310 }
2311 
2312 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
2313 {
2314 	/*
2315 	 * Set the lru reference count to 0 based on the error injection tag.
2316 	 * This allows userspace to disrupt buffer caching for debug/testing
2317 	 * purposes.
2318 	 */
2319 	if (XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_LRU_REF))
2320 		lru_ref = 0;
2321 
2322 	atomic_set(&bp->b_lru_ref, lru_ref);
2323 }
2324 
2325 /*
2326  * Verify an on-disk magic value against the magic value specified in the
2327  * verifier structure. The verifier magic is in disk byte order so the caller is
2328  * expected to pass the value directly from disk.
2329  */
2330 bool
2331 xfs_verify_magic(
2332 	struct xfs_buf		*bp,
2333 	__be32			dmagic)
2334 {
2335 	struct xfs_mount	*mp = bp->b_mount;
2336 	int			idx;
2337 
2338 	idx = xfs_has_crc(mp);
2339 	if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx]))
2340 		return false;
2341 	return dmagic == bp->b_ops->magic[idx];
2342 }
2343 /*
2344  * Verify an on-disk magic value against the magic value specified in the
2345  * verifier structure. The verifier magic is in disk byte order so the caller is
2346  * expected to pass the value directly from disk.
2347  */
2348 bool
2349 xfs_verify_magic16(
2350 	struct xfs_buf		*bp,
2351 	__be16			dmagic)
2352 {
2353 	struct xfs_mount	*mp = bp->b_mount;
2354 	int			idx;
2355 
2356 	idx = xfs_has_crc(mp);
2357 	if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx]))
2358 		return false;
2359 	return dmagic == bp->b_ops->magic16[idx];
2360 }
2361