xref: /linux/fs/xfs/libxfs/xfs_refcount.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2016 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_defer.h"
14 #include "xfs_btree.h"
15 #include "xfs_bmap.h"
16 #include "xfs_refcount_btree.h"
17 #include "xfs_alloc.h"
18 #include "xfs_errortag.h"
19 #include "xfs_error.h"
20 #include "xfs_trace.h"
21 #include "xfs_trans.h"
22 #include "xfs_bit.h"
23 #include "xfs_refcount.h"
24 #include "xfs_rmap.h"
25 #include "xfs_ag.h"
26 #include "xfs_health.h"
27 #include "xfs_refcount_item.h"
28 
29 struct kmem_cache	*xfs_refcount_intent_cache;
30 
31 /* Allowable refcount adjustment amounts. */
32 enum xfs_refc_adjust_op {
33 	XFS_REFCOUNT_ADJUST_INCREASE	= 1,
34 	XFS_REFCOUNT_ADJUST_DECREASE	= -1,
35 	XFS_REFCOUNT_ADJUST_COW_ALLOC	= 0,
36 	XFS_REFCOUNT_ADJUST_COW_FREE	= -1,
37 };
38 
39 STATIC int __xfs_refcount_cow_alloc(struct xfs_btree_cur *rcur,
40 		xfs_agblock_t agbno, xfs_extlen_t aglen);
41 STATIC int __xfs_refcount_cow_free(struct xfs_btree_cur *rcur,
42 		xfs_agblock_t agbno, xfs_extlen_t aglen);
43 
44 /*
45  * Look up the first record less than or equal to [bno, len] in the btree
46  * given by cur.
47  */
48 int
49 xfs_refcount_lookup_le(
50 	struct xfs_btree_cur	*cur,
51 	enum xfs_refc_domain	domain,
52 	xfs_agblock_t		bno,
53 	int			*stat)
54 {
55 	trace_xfs_refcount_lookup(cur,
56 			xfs_refcount_encode_startblock(bno, domain),
57 			XFS_LOOKUP_LE);
58 	cur->bc_rec.rc.rc_startblock = bno;
59 	cur->bc_rec.rc.rc_blockcount = 0;
60 	cur->bc_rec.rc.rc_domain = domain;
61 	return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
62 }
63 
64 /*
65  * Look up the first record greater than or equal to [bno, len] in the btree
66  * given by cur.
67  */
68 int
69 xfs_refcount_lookup_ge(
70 	struct xfs_btree_cur	*cur,
71 	enum xfs_refc_domain	domain,
72 	xfs_agblock_t		bno,
73 	int			*stat)
74 {
75 	trace_xfs_refcount_lookup(cur,
76 			xfs_refcount_encode_startblock(bno, domain),
77 			XFS_LOOKUP_GE);
78 	cur->bc_rec.rc.rc_startblock = bno;
79 	cur->bc_rec.rc.rc_blockcount = 0;
80 	cur->bc_rec.rc.rc_domain = domain;
81 	return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
82 }
83 
84 /*
85  * Look up the first record equal to [bno, len] in the btree
86  * given by cur.
87  */
88 int
89 xfs_refcount_lookup_eq(
90 	struct xfs_btree_cur	*cur,
91 	enum xfs_refc_domain	domain,
92 	xfs_agblock_t		bno,
93 	int			*stat)
94 {
95 	trace_xfs_refcount_lookup(cur,
96 			xfs_refcount_encode_startblock(bno, domain),
97 			XFS_LOOKUP_LE);
98 	cur->bc_rec.rc.rc_startblock = bno;
99 	cur->bc_rec.rc.rc_blockcount = 0;
100 	cur->bc_rec.rc.rc_domain = domain;
101 	return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
102 }
103 
104 /* Convert on-disk record to in-core format. */
105 void
106 xfs_refcount_btrec_to_irec(
107 	const union xfs_btree_rec	*rec,
108 	struct xfs_refcount_irec	*irec)
109 {
110 	uint32_t			start;
111 
112 	start = be32_to_cpu(rec->refc.rc_startblock);
113 	if (start & XFS_REFC_COWFLAG) {
114 		start &= ~XFS_REFC_COWFLAG;
115 		irec->rc_domain = XFS_REFC_DOMAIN_COW;
116 	} else {
117 		irec->rc_domain = XFS_REFC_DOMAIN_SHARED;
118 	}
119 
120 	irec->rc_startblock = start;
121 	irec->rc_blockcount = be32_to_cpu(rec->refc.rc_blockcount);
122 	irec->rc_refcount = be32_to_cpu(rec->refc.rc_refcount);
123 }
124 
125 /* Simple checks for refcount records. */
126 xfs_failaddr_t
127 xfs_refcount_check_irec(
128 	struct xfs_perag		*pag,
129 	const struct xfs_refcount_irec	*irec)
130 {
131 	if (irec->rc_blockcount == 0 || irec->rc_blockcount > MAXREFCEXTLEN)
132 		return __this_address;
133 
134 	if (!xfs_refcount_check_domain(irec))
135 		return __this_address;
136 
137 	/* check for valid extent range, including overflow */
138 	if (!xfs_verify_agbext(pag, irec->rc_startblock, irec->rc_blockcount))
139 		return __this_address;
140 
141 	if (irec->rc_refcount == 0 || irec->rc_refcount > MAXREFCOUNT)
142 		return __this_address;
143 
144 	return NULL;
145 }
146 
147 static inline int
148 xfs_refcount_complain_bad_rec(
149 	struct xfs_btree_cur		*cur,
150 	xfs_failaddr_t			fa,
151 	const struct xfs_refcount_irec	*irec)
152 {
153 	struct xfs_mount		*mp = cur->bc_mp;
154 
155 	xfs_warn(mp,
156  "Refcount BTree record corruption in AG %d detected at %pS!",
157 				cur->bc_ag.pag->pag_agno, fa);
158 	xfs_warn(mp,
159 		"Start block 0x%x, block count 0x%x, references 0x%x",
160 		irec->rc_startblock, irec->rc_blockcount, irec->rc_refcount);
161 	xfs_btree_mark_sick(cur);
162 	return -EFSCORRUPTED;
163 }
164 
165 /*
166  * Get the data from the pointed-to record.
167  */
168 int
169 xfs_refcount_get_rec(
170 	struct xfs_btree_cur		*cur,
171 	struct xfs_refcount_irec	*irec,
172 	int				*stat)
173 {
174 	union xfs_btree_rec		*rec;
175 	xfs_failaddr_t			fa;
176 	int				error;
177 
178 	error = xfs_btree_get_rec(cur, &rec, stat);
179 	if (error || !*stat)
180 		return error;
181 
182 	xfs_refcount_btrec_to_irec(rec, irec);
183 	fa = xfs_refcount_check_irec(cur->bc_ag.pag, irec);
184 	if (fa)
185 		return xfs_refcount_complain_bad_rec(cur, fa, irec);
186 
187 	trace_xfs_refcount_get(cur, irec);
188 	return 0;
189 }
190 
191 /*
192  * Update the record referred to by cur to the value given
193  * by [bno, len, refcount].
194  * This either works (return 0) or gets an EFSCORRUPTED error.
195  */
196 STATIC int
197 xfs_refcount_update(
198 	struct xfs_btree_cur		*cur,
199 	struct xfs_refcount_irec	*irec)
200 {
201 	union xfs_btree_rec	rec;
202 	uint32_t		start;
203 	int			error;
204 
205 	trace_xfs_refcount_update(cur, irec);
206 
207 	start = xfs_refcount_encode_startblock(irec->rc_startblock,
208 			irec->rc_domain);
209 	rec.refc.rc_startblock = cpu_to_be32(start);
210 	rec.refc.rc_blockcount = cpu_to_be32(irec->rc_blockcount);
211 	rec.refc.rc_refcount = cpu_to_be32(irec->rc_refcount);
212 
213 	error = xfs_btree_update(cur, &rec);
214 	if (error)
215 		trace_xfs_refcount_update_error(cur, error, _RET_IP_);
216 	return error;
217 }
218 
219 /*
220  * Insert the record referred to by cur to the value given
221  * by [bno, len, refcount].
222  * This either works (return 0) or gets an EFSCORRUPTED error.
223  */
224 int
225 xfs_refcount_insert(
226 	struct xfs_btree_cur		*cur,
227 	struct xfs_refcount_irec	*irec,
228 	int				*i)
229 {
230 	int				error;
231 
232 	trace_xfs_refcount_insert(cur, irec);
233 
234 	cur->bc_rec.rc.rc_startblock = irec->rc_startblock;
235 	cur->bc_rec.rc.rc_blockcount = irec->rc_blockcount;
236 	cur->bc_rec.rc.rc_refcount = irec->rc_refcount;
237 	cur->bc_rec.rc.rc_domain = irec->rc_domain;
238 
239 	error = xfs_btree_insert(cur, i);
240 	if (error)
241 		goto out_error;
242 	if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) {
243 		xfs_btree_mark_sick(cur);
244 		error = -EFSCORRUPTED;
245 		goto out_error;
246 	}
247 
248 out_error:
249 	if (error)
250 		trace_xfs_refcount_insert_error(cur, error, _RET_IP_);
251 	return error;
252 }
253 
254 /*
255  * Remove the record referred to by cur, then set the pointer to the spot
256  * where the record could be re-inserted, in case we want to increment or
257  * decrement the cursor.
258  * This either works (return 0) or gets an EFSCORRUPTED error.
259  */
260 STATIC int
261 xfs_refcount_delete(
262 	struct xfs_btree_cur	*cur,
263 	int			*i)
264 {
265 	struct xfs_refcount_irec	irec;
266 	int			found_rec;
267 	int			error;
268 
269 	error = xfs_refcount_get_rec(cur, &irec, &found_rec);
270 	if (error)
271 		goto out_error;
272 	if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
273 		xfs_btree_mark_sick(cur);
274 		error = -EFSCORRUPTED;
275 		goto out_error;
276 	}
277 	trace_xfs_refcount_delete(cur, &irec);
278 	error = xfs_btree_delete(cur, i);
279 	if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) {
280 		xfs_btree_mark_sick(cur);
281 		error = -EFSCORRUPTED;
282 		goto out_error;
283 	}
284 	if (error)
285 		goto out_error;
286 	error = xfs_refcount_lookup_ge(cur, irec.rc_domain, irec.rc_startblock,
287 			&found_rec);
288 out_error:
289 	if (error)
290 		trace_xfs_refcount_delete_error(cur, error, _RET_IP_);
291 	return error;
292 }
293 
294 /*
295  * Adjusting the Reference Count
296  *
297  * As stated elsewhere, the reference count btree (refcbt) stores
298  * >1 reference counts for extents of physical blocks.  In this
299  * operation, we're either raising or lowering the reference count of
300  * some subrange stored in the tree:
301  *
302  *      <------ adjustment range ------>
303  * ----+   +---+-----+ +--+--------+---------
304  *  2  |   | 3 |  4  | |17|   55   |   10
305  * ----+   +---+-----+ +--+--------+---------
306  * X axis is physical blocks number;
307  * reference counts are the numbers inside the rectangles
308  *
309  * The first thing we need to do is to ensure that there are no
310  * refcount extents crossing either boundary of the range to be
311  * adjusted.  For any extent that does cross a boundary, split it into
312  * two extents so that we can increment the refcount of one of the
313  * pieces later:
314  *
315  *      <------ adjustment range ------>
316  * ----+   +---+-----+ +--+--------+----+----
317  *  2  |   | 3 |  2  | |17|   55   | 10 | 10
318  * ----+   +---+-----+ +--+--------+----+----
319  *
320  * For this next step, let's assume that all the physical blocks in
321  * the adjustment range are mapped to a file and are therefore in use
322  * at least once.  Therefore, we can infer that any gap in the
323  * refcount tree within the adjustment range represents a physical
324  * extent with refcount == 1:
325  *
326  *      <------ adjustment range ------>
327  * ----+---+---+-----+-+--+--------+----+----
328  *  2  |"1"| 3 |  2  |1|17|   55   | 10 | 10
329  * ----+---+---+-----+-+--+--------+----+----
330  *      ^
331  *
332  * For each extent that falls within the interval range, figure out
333  * which extent is to the left or the right of that extent.  Now we
334  * have a left, current, and right extent.  If the new reference count
335  * of the center extent enables us to merge left, center, and right
336  * into one record covering all three, do so.  If the center extent is
337  * at the left end of the range, abuts the left extent, and its new
338  * reference count matches the left extent's record, then merge them.
339  * If the center extent is at the right end of the range, abuts the
340  * right extent, and the reference counts match, merge those.  In the
341  * example, we can left merge (assuming an increment operation):
342  *
343  *      <------ adjustment range ------>
344  * --------+---+-----+-+--+--------+----+----
345  *    2    | 3 |  2  |1|17|   55   | 10 | 10
346  * --------+---+-----+-+--+--------+----+----
347  *          ^
348  *
349  * For all other extents within the range, adjust the reference count
350  * or delete it if the refcount falls below 2.  If we were
351  * incrementing, the end result looks like this:
352  *
353  *      <------ adjustment range ------>
354  * --------+---+-----+-+--+--------+----+----
355  *    2    | 4 |  3  |2|18|   56   | 11 | 10
356  * --------+---+-----+-+--+--------+----+----
357  *
358  * The result of a decrement operation looks as such:
359  *
360  *      <------ adjustment range ------>
361  * ----+   +---+       +--+--------+----+----
362  *  2  |   | 2 |       |16|   54   |  9 | 10
363  * ----+   +---+       +--+--------+----+----
364  *      DDDD    111111DD
365  *
366  * The blocks marked "D" are freed; the blocks marked "1" are only
367  * referenced once and therefore the record is removed from the
368  * refcount btree.
369  */
370 
371 /* Next block after this extent. */
372 static inline xfs_agblock_t
373 xfs_refc_next(
374 	struct xfs_refcount_irec	*rc)
375 {
376 	return rc->rc_startblock + rc->rc_blockcount;
377 }
378 
379 /*
380  * Split a refcount extent that crosses agbno.
381  */
382 STATIC int
383 xfs_refcount_split_extent(
384 	struct xfs_btree_cur		*cur,
385 	enum xfs_refc_domain		domain,
386 	xfs_agblock_t			agbno,
387 	bool				*shape_changed)
388 {
389 	struct xfs_refcount_irec	rcext, tmp;
390 	int				found_rec;
391 	int				error;
392 
393 	*shape_changed = false;
394 	error = xfs_refcount_lookup_le(cur, domain, agbno, &found_rec);
395 	if (error)
396 		goto out_error;
397 	if (!found_rec)
398 		return 0;
399 
400 	error = xfs_refcount_get_rec(cur, &rcext, &found_rec);
401 	if (error)
402 		goto out_error;
403 	if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
404 		xfs_btree_mark_sick(cur);
405 		error = -EFSCORRUPTED;
406 		goto out_error;
407 	}
408 	if (rcext.rc_domain != domain)
409 		return 0;
410 	if (rcext.rc_startblock == agbno || xfs_refc_next(&rcext) <= agbno)
411 		return 0;
412 
413 	*shape_changed = true;
414 	trace_xfs_refcount_split_extent(cur, &rcext, agbno);
415 
416 	/* Establish the right extent. */
417 	tmp = rcext;
418 	tmp.rc_startblock = agbno;
419 	tmp.rc_blockcount -= (agbno - rcext.rc_startblock);
420 	error = xfs_refcount_update(cur, &tmp);
421 	if (error)
422 		goto out_error;
423 
424 	/* Insert the left extent. */
425 	tmp = rcext;
426 	tmp.rc_blockcount = agbno - rcext.rc_startblock;
427 	error = xfs_refcount_insert(cur, &tmp, &found_rec);
428 	if (error)
429 		goto out_error;
430 	if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
431 		xfs_btree_mark_sick(cur);
432 		error = -EFSCORRUPTED;
433 		goto out_error;
434 	}
435 	return error;
436 
437 out_error:
438 	trace_xfs_refcount_split_extent_error(cur, error, _RET_IP_);
439 	return error;
440 }
441 
442 /*
443  * Merge the left, center, and right extents.
444  */
445 STATIC int
446 xfs_refcount_merge_center_extents(
447 	struct xfs_btree_cur		*cur,
448 	struct xfs_refcount_irec	*left,
449 	struct xfs_refcount_irec	*center,
450 	struct xfs_refcount_irec	*right,
451 	unsigned long long		extlen,
452 	xfs_extlen_t			*aglen)
453 {
454 	int				error;
455 	int				found_rec;
456 
457 	trace_xfs_refcount_merge_center_extents(cur, left, center, right);
458 
459 	ASSERT(left->rc_domain == center->rc_domain);
460 	ASSERT(right->rc_domain == center->rc_domain);
461 
462 	/*
463 	 * Make sure the center and right extents are not in the btree.
464 	 * If the center extent was synthesized, the first delete call
465 	 * removes the right extent and we skip the second deletion.
466 	 * If center and right were in the btree, then the first delete
467 	 * call removes the center and the second one removes the right
468 	 * extent.
469 	 */
470 	error = xfs_refcount_lookup_ge(cur, center->rc_domain,
471 			center->rc_startblock, &found_rec);
472 	if (error)
473 		goto out_error;
474 	if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
475 		xfs_btree_mark_sick(cur);
476 		error = -EFSCORRUPTED;
477 		goto out_error;
478 	}
479 
480 	error = xfs_refcount_delete(cur, &found_rec);
481 	if (error)
482 		goto out_error;
483 	if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
484 		xfs_btree_mark_sick(cur);
485 		error = -EFSCORRUPTED;
486 		goto out_error;
487 	}
488 
489 	if (center->rc_refcount > 1) {
490 		error = xfs_refcount_delete(cur, &found_rec);
491 		if (error)
492 			goto out_error;
493 		if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
494 			xfs_btree_mark_sick(cur);
495 			error = -EFSCORRUPTED;
496 			goto out_error;
497 		}
498 	}
499 
500 	/* Enlarge the left extent. */
501 	error = xfs_refcount_lookup_le(cur, left->rc_domain,
502 			left->rc_startblock, &found_rec);
503 	if (error)
504 		goto out_error;
505 	if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
506 		xfs_btree_mark_sick(cur);
507 		error = -EFSCORRUPTED;
508 		goto out_error;
509 	}
510 
511 	left->rc_blockcount = extlen;
512 	error = xfs_refcount_update(cur, left);
513 	if (error)
514 		goto out_error;
515 
516 	*aglen = 0;
517 	return error;
518 
519 out_error:
520 	trace_xfs_refcount_merge_center_extents_error(cur, error, _RET_IP_);
521 	return error;
522 }
523 
524 /*
525  * Merge with the left extent.
526  */
527 STATIC int
528 xfs_refcount_merge_left_extent(
529 	struct xfs_btree_cur		*cur,
530 	struct xfs_refcount_irec	*left,
531 	struct xfs_refcount_irec	*cleft,
532 	xfs_agblock_t			*agbno,
533 	xfs_extlen_t			*aglen)
534 {
535 	int				error;
536 	int				found_rec;
537 
538 	trace_xfs_refcount_merge_left_extent(cur, left, cleft);
539 
540 	ASSERT(left->rc_domain == cleft->rc_domain);
541 
542 	/* If the extent at agbno (cleft) wasn't synthesized, remove it. */
543 	if (cleft->rc_refcount > 1) {
544 		error = xfs_refcount_lookup_le(cur, cleft->rc_domain,
545 				cleft->rc_startblock, &found_rec);
546 		if (error)
547 			goto out_error;
548 		if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
549 			xfs_btree_mark_sick(cur);
550 			error = -EFSCORRUPTED;
551 			goto out_error;
552 		}
553 
554 		error = xfs_refcount_delete(cur, &found_rec);
555 		if (error)
556 			goto out_error;
557 		if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
558 			xfs_btree_mark_sick(cur);
559 			error = -EFSCORRUPTED;
560 			goto out_error;
561 		}
562 	}
563 
564 	/* Enlarge the left extent. */
565 	error = xfs_refcount_lookup_le(cur, left->rc_domain,
566 			left->rc_startblock, &found_rec);
567 	if (error)
568 		goto out_error;
569 	if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
570 		xfs_btree_mark_sick(cur);
571 		error = -EFSCORRUPTED;
572 		goto out_error;
573 	}
574 
575 	left->rc_blockcount += cleft->rc_blockcount;
576 	error = xfs_refcount_update(cur, left);
577 	if (error)
578 		goto out_error;
579 
580 	*agbno += cleft->rc_blockcount;
581 	*aglen -= cleft->rc_blockcount;
582 	return error;
583 
584 out_error:
585 	trace_xfs_refcount_merge_left_extent_error(cur, error, _RET_IP_);
586 	return error;
587 }
588 
589 /*
590  * Merge with the right extent.
591  */
592 STATIC int
593 xfs_refcount_merge_right_extent(
594 	struct xfs_btree_cur		*cur,
595 	struct xfs_refcount_irec	*right,
596 	struct xfs_refcount_irec	*cright,
597 	xfs_extlen_t			*aglen)
598 {
599 	int				error;
600 	int				found_rec;
601 
602 	trace_xfs_refcount_merge_right_extent(cur, cright, right);
603 
604 	ASSERT(right->rc_domain == cright->rc_domain);
605 
606 	/*
607 	 * If the extent ending at agbno+aglen (cright) wasn't synthesized,
608 	 * remove it.
609 	 */
610 	if (cright->rc_refcount > 1) {
611 		error = xfs_refcount_lookup_le(cur, cright->rc_domain,
612 				cright->rc_startblock, &found_rec);
613 		if (error)
614 			goto out_error;
615 		if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
616 			xfs_btree_mark_sick(cur);
617 			error = -EFSCORRUPTED;
618 			goto out_error;
619 		}
620 
621 		error = xfs_refcount_delete(cur, &found_rec);
622 		if (error)
623 			goto out_error;
624 		if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
625 			xfs_btree_mark_sick(cur);
626 			error = -EFSCORRUPTED;
627 			goto out_error;
628 		}
629 	}
630 
631 	/* Enlarge the right extent. */
632 	error = xfs_refcount_lookup_le(cur, right->rc_domain,
633 			right->rc_startblock, &found_rec);
634 	if (error)
635 		goto out_error;
636 	if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
637 		xfs_btree_mark_sick(cur);
638 		error = -EFSCORRUPTED;
639 		goto out_error;
640 	}
641 
642 	right->rc_startblock -= cright->rc_blockcount;
643 	right->rc_blockcount += cright->rc_blockcount;
644 	error = xfs_refcount_update(cur, right);
645 	if (error)
646 		goto out_error;
647 
648 	*aglen -= cright->rc_blockcount;
649 	return error;
650 
651 out_error:
652 	trace_xfs_refcount_merge_right_extent_error(cur, error, _RET_IP_);
653 	return error;
654 }
655 
656 /*
657  * Find the left extent and the one after it (cleft).  This function assumes
658  * that we've already split any extent crossing agbno.
659  */
660 STATIC int
661 xfs_refcount_find_left_extents(
662 	struct xfs_btree_cur		*cur,
663 	struct xfs_refcount_irec	*left,
664 	struct xfs_refcount_irec	*cleft,
665 	enum xfs_refc_domain		domain,
666 	xfs_agblock_t			agbno,
667 	xfs_extlen_t			aglen)
668 {
669 	struct xfs_refcount_irec	tmp;
670 	int				error;
671 	int				found_rec;
672 
673 	left->rc_startblock = cleft->rc_startblock = NULLAGBLOCK;
674 	error = xfs_refcount_lookup_le(cur, domain, agbno - 1, &found_rec);
675 	if (error)
676 		goto out_error;
677 	if (!found_rec)
678 		return 0;
679 
680 	error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
681 	if (error)
682 		goto out_error;
683 	if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
684 		xfs_btree_mark_sick(cur);
685 		error = -EFSCORRUPTED;
686 		goto out_error;
687 	}
688 
689 	if (tmp.rc_domain != domain)
690 		return 0;
691 	if (xfs_refc_next(&tmp) != agbno)
692 		return 0;
693 	/* We have a left extent; retrieve (or invent) the next right one */
694 	*left = tmp;
695 
696 	error = xfs_btree_increment(cur, 0, &found_rec);
697 	if (error)
698 		goto out_error;
699 	if (found_rec) {
700 		error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
701 		if (error)
702 			goto out_error;
703 		if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
704 			xfs_btree_mark_sick(cur);
705 			error = -EFSCORRUPTED;
706 			goto out_error;
707 		}
708 
709 		if (tmp.rc_domain != domain)
710 			goto not_found;
711 
712 		/* if tmp starts at the end of our range, just use that */
713 		if (tmp.rc_startblock == agbno)
714 			*cleft = tmp;
715 		else {
716 			/*
717 			 * There's a gap in the refcntbt at the start of the
718 			 * range we're interested in (refcount == 1) so
719 			 * synthesize the implied extent and pass it back.
720 			 * We assume here that the agbno/aglen range was
721 			 * passed in from a data fork extent mapping and
722 			 * therefore is allocated to exactly one owner.
723 			 */
724 			cleft->rc_startblock = agbno;
725 			cleft->rc_blockcount = min(aglen,
726 					tmp.rc_startblock - agbno);
727 			cleft->rc_refcount = 1;
728 			cleft->rc_domain = domain;
729 		}
730 	} else {
731 not_found:
732 		/*
733 		 * No extents, so pretend that there's one covering the whole
734 		 * range.
735 		 */
736 		cleft->rc_startblock = agbno;
737 		cleft->rc_blockcount = aglen;
738 		cleft->rc_refcount = 1;
739 		cleft->rc_domain = domain;
740 	}
741 	trace_xfs_refcount_find_left_extent(cur, left, cleft, agbno);
742 	return error;
743 
744 out_error:
745 	trace_xfs_refcount_find_left_extent_error(cur, error, _RET_IP_);
746 	return error;
747 }
748 
749 /*
750  * Find the right extent and the one before it (cright).  This function
751  * assumes that we've already split any extents crossing agbno + aglen.
752  */
753 STATIC int
754 xfs_refcount_find_right_extents(
755 	struct xfs_btree_cur		*cur,
756 	struct xfs_refcount_irec	*right,
757 	struct xfs_refcount_irec	*cright,
758 	enum xfs_refc_domain		domain,
759 	xfs_agblock_t			agbno,
760 	xfs_extlen_t			aglen)
761 {
762 	struct xfs_refcount_irec	tmp;
763 	int				error;
764 	int				found_rec;
765 
766 	right->rc_startblock = cright->rc_startblock = NULLAGBLOCK;
767 	error = xfs_refcount_lookup_ge(cur, domain, agbno + aglen, &found_rec);
768 	if (error)
769 		goto out_error;
770 	if (!found_rec)
771 		return 0;
772 
773 	error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
774 	if (error)
775 		goto out_error;
776 	if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
777 		xfs_btree_mark_sick(cur);
778 		error = -EFSCORRUPTED;
779 		goto out_error;
780 	}
781 
782 	if (tmp.rc_domain != domain)
783 		return 0;
784 	if (tmp.rc_startblock != agbno + aglen)
785 		return 0;
786 	/* We have a right extent; retrieve (or invent) the next left one */
787 	*right = tmp;
788 
789 	error = xfs_btree_decrement(cur, 0, &found_rec);
790 	if (error)
791 		goto out_error;
792 	if (found_rec) {
793 		error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
794 		if (error)
795 			goto out_error;
796 		if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
797 			xfs_btree_mark_sick(cur);
798 			error = -EFSCORRUPTED;
799 			goto out_error;
800 		}
801 
802 		if (tmp.rc_domain != domain)
803 			goto not_found;
804 
805 		/* if tmp ends at the end of our range, just use that */
806 		if (xfs_refc_next(&tmp) == agbno + aglen)
807 			*cright = tmp;
808 		else {
809 			/*
810 			 * There's a gap in the refcntbt at the end of the
811 			 * range we're interested in (refcount == 1) so
812 			 * create the implied extent and pass it back.
813 			 * We assume here that the agbno/aglen range was
814 			 * passed in from a data fork extent mapping and
815 			 * therefore is allocated to exactly one owner.
816 			 */
817 			cright->rc_startblock = max(agbno, xfs_refc_next(&tmp));
818 			cright->rc_blockcount = right->rc_startblock -
819 					cright->rc_startblock;
820 			cright->rc_refcount = 1;
821 			cright->rc_domain = domain;
822 		}
823 	} else {
824 not_found:
825 		/*
826 		 * No extents, so pretend that there's one covering the whole
827 		 * range.
828 		 */
829 		cright->rc_startblock = agbno;
830 		cright->rc_blockcount = aglen;
831 		cright->rc_refcount = 1;
832 		cright->rc_domain = domain;
833 	}
834 	trace_xfs_refcount_find_right_extent(cur, cright, right,
835 			agbno + aglen);
836 	return error;
837 
838 out_error:
839 	trace_xfs_refcount_find_right_extent_error(cur, error, _RET_IP_);
840 	return error;
841 }
842 
843 /* Is this extent valid? */
844 static inline bool
845 xfs_refc_valid(
846 	const struct xfs_refcount_irec	*rc)
847 {
848 	return rc->rc_startblock != NULLAGBLOCK;
849 }
850 
851 static inline xfs_nlink_t
852 xfs_refc_merge_refcount(
853 	const struct xfs_refcount_irec	*irec,
854 	enum xfs_refc_adjust_op		adjust)
855 {
856 	/* Once a record hits MAXREFCOUNT, it is pinned there forever */
857 	if (irec->rc_refcount == MAXREFCOUNT)
858 		return MAXREFCOUNT;
859 	return irec->rc_refcount + adjust;
860 }
861 
862 static inline bool
863 xfs_refc_want_merge_center(
864 	const struct xfs_refcount_irec	*left,
865 	const struct xfs_refcount_irec	*cleft,
866 	const struct xfs_refcount_irec	*cright,
867 	const struct xfs_refcount_irec	*right,
868 	bool				cleft_is_cright,
869 	enum xfs_refc_adjust_op		adjust,
870 	unsigned long long		*ulenp)
871 {
872 	unsigned long long		ulen = left->rc_blockcount;
873 	xfs_nlink_t			new_refcount;
874 
875 	/*
876 	 * To merge with a center record, both shoulder records must be
877 	 * adjacent to the record we want to adjust.  This is only true if
878 	 * find_left and find_right made all four records valid.
879 	 */
880 	if (!xfs_refc_valid(left)  || !xfs_refc_valid(right) ||
881 	    !xfs_refc_valid(cleft) || !xfs_refc_valid(cright))
882 		return false;
883 
884 	/* There must only be one record for the entire range. */
885 	if (!cleft_is_cright)
886 		return false;
887 
888 	/* The shoulder record refcounts must match the new refcount. */
889 	new_refcount = xfs_refc_merge_refcount(cleft, adjust);
890 	if (left->rc_refcount != new_refcount)
891 		return false;
892 	if (right->rc_refcount != new_refcount)
893 		return false;
894 
895 	/*
896 	 * The new record cannot exceed the max length.  ulen is a ULL as the
897 	 * individual record block counts can be up to (u32 - 1) in length
898 	 * hence we need to catch u32 addition overflows here.
899 	 */
900 	ulen += cleft->rc_blockcount + right->rc_blockcount;
901 	if (ulen >= MAXREFCEXTLEN)
902 		return false;
903 
904 	*ulenp = ulen;
905 	return true;
906 }
907 
908 static inline bool
909 xfs_refc_want_merge_left(
910 	const struct xfs_refcount_irec	*left,
911 	const struct xfs_refcount_irec	*cleft,
912 	enum xfs_refc_adjust_op		adjust)
913 {
914 	unsigned long long		ulen = left->rc_blockcount;
915 	xfs_nlink_t			new_refcount;
916 
917 	/*
918 	 * For a left merge, the left shoulder record must be adjacent to the
919 	 * start of the range.  If this is true, find_left made left and cleft
920 	 * contain valid contents.
921 	 */
922 	if (!xfs_refc_valid(left) || !xfs_refc_valid(cleft))
923 		return false;
924 
925 	/* Left shoulder record refcount must match the new refcount. */
926 	new_refcount = xfs_refc_merge_refcount(cleft, adjust);
927 	if (left->rc_refcount != new_refcount)
928 		return false;
929 
930 	/*
931 	 * The new record cannot exceed the max length.  ulen is a ULL as the
932 	 * individual record block counts can be up to (u32 - 1) in length
933 	 * hence we need to catch u32 addition overflows here.
934 	 */
935 	ulen += cleft->rc_blockcount;
936 	if (ulen >= MAXREFCEXTLEN)
937 		return false;
938 
939 	return true;
940 }
941 
942 static inline bool
943 xfs_refc_want_merge_right(
944 	const struct xfs_refcount_irec	*cright,
945 	const struct xfs_refcount_irec	*right,
946 	enum xfs_refc_adjust_op		adjust)
947 {
948 	unsigned long long		ulen = right->rc_blockcount;
949 	xfs_nlink_t			new_refcount;
950 
951 	/*
952 	 * For a right merge, the right shoulder record must be adjacent to the
953 	 * end of the range.  If this is true, find_right made cright and right
954 	 * contain valid contents.
955 	 */
956 	if (!xfs_refc_valid(right) || !xfs_refc_valid(cright))
957 		return false;
958 
959 	/* Right shoulder record refcount must match the new refcount. */
960 	new_refcount = xfs_refc_merge_refcount(cright, adjust);
961 	if (right->rc_refcount != new_refcount)
962 		return false;
963 
964 	/*
965 	 * The new record cannot exceed the max length.  ulen is a ULL as the
966 	 * individual record block counts can be up to (u32 - 1) in length
967 	 * hence we need to catch u32 addition overflows here.
968 	 */
969 	ulen += cright->rc_blockcount;
970 	if (ulen >= MAXREFCEXTLEN)
971 		return false;
972 
973 	return true;
974 }
975 
976 /*
977  * Try to merge with any extents on the boundaries of the adjustment range.
978  */
979 STATIC int
980 xfs_refcount_merge_extents(
981 	struct xfs_btree_cur	*cur,
982 	enum xfs_refc_domain	domain,
983 	xfs_agblock_t		*agbno,
984 	xfs_extlen_t		*aglen,
985 	enum xfs_refc_adjust_op adjust,
986 	bool			*shape_changed)
987 {
988 	struct xfs_refcount_irec	left = {0}, cleft = {0};
989 	struct xfs_refcount_irec	cright = {0}, right = {0};
990 	int				error;
991 	unsigned long long		ulen;
992 	bool				cequal;
993 
994 	*shape_changed = false;
995 	/*
996 	 * Find the extent just below agbno [left], just above agbno [cleft],
997 	 * just below (agbno + aglen) [cright], and just above (agbno + aglen)
998 	 * [right].
999 	 */
1000 	error = xfs_refcount_find_left_extents(cur, &left, &cleft, domain,
1001 			*agbno, *aglen);
1002 	if (error)
1003 		return error;
1004 	error = xfs_refcount_find_right_extents(cur, &right, &cright, domain,
1005 			*agbno, *aglen);
1006 	if (error)
1007 		return error;
1008 
1009 	/* No left or right extent to merge; exit. */
1010 	if (!xfs_refc_valid(&left) && !xfs_refc_valid(&right))
1011 		return 0;
1012 
1013 	cequal = (cleft.rc_startblock == cright.rc_startblock) &&
1014 		 (cleft.rc_blockcount == cright.rc_blockcount);
1015 
1016 	/* Try to merge left, cleft, and right.  cleft must == cright. */
1017 	if (xfs_refc_want_merge_center(&left, &cleft, &cright, &right, cequal,
1018 				adjust, &ulen)) {
1019 		*shape_changed = true;
1020 		return xfs_refcount_merge_center_extents(cur, &left, &cleft,
1021 				&right, ulen, aglen);
1022 	}
1023 
1024 	/* Try to merge left and cleft. */
1025 	if (xfs_refc_want_merge_left(&left, &cleft, adjust)) {
1026 		*shape_changed = true;
1027 		error = xfs_refcount_merge_left_extent(cur, &left, &cleft,
1028 				agbno, aglen);
1029 		if (error)
1030 			return error;
1031 
1032 		/*
1033 		 * If we just merged left + cleft and cleft == cright,
1034 		 * we no longer have a cright to merge with right.  We're done.
1035 		 */
1036 		if (cequal)
1037 			return 0;
1038 	}
1039 
1040 	/* Try to merge cright and right. */
1041 	if (xfs_refc_want_merge_right(&cright, &right, adjust)) {
1042 		*shape_changed = true;
1043 		return xfs_refcount_merge_right_extent(cur, &right, &cright,
1044 				aglen);
1045 	}
1046 
1047 	return 0;
1048 }
1049 
1050 /*
1051  * XXX: This is a pretty hand-wavy estimate.  The penalty for guessing
1052  * true incorrectly is a shutdown FS; the penalty for guessing false
1053  * incorrectly is more transaction rolls than might be necessary.
1054  * Be conservative here.
1055  */
1056 static bool
1057 xfs_refcount_still_have_space(
1058 	struct xfs_btree_cur		*cur)
1059 {
1060 	unsigned long			overhead;
1061 
1062 	/*
1063 	 * Worst case estimate: full splits of the free space and rmap btrees
1064 	 * to handle each of the shape changes to the refcount btree.
1065 	 */
1066 	overhead = xfs_allocfree_block_count(cur->bc_mp,
1067 				cur->bc_refc.shape_changes);
1068 	overhead += cur->bc_mp->m_refc_maxlevels;
1069 	overhead *= cur->bc_mp->m_sb.sb_blocksize;
1070 
1071 	/*
1072 	 * Only allow 2 refcount extent updates per transaction if the
1073 	 * refcount continue update "error" has been injected.
1074 	 */
1075 	if (cur->bc_refc.nr_ops > 2 &&
1076 	    XFS_TEST_ERROR(false, cur->bc_mp,
1077 			XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE))
1078 		return false;
1079 
1080 	if (cur->bc_refc.nr_ops == 0)
1081 		return true;
1082 	else if (overhead > cur->bc_tp->t_log_res)
1083 		return false;
1084 	return cur->bc_tp->t_log_res - overhead >
1085 		cur->bc_refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD;
1086 }
1087 
1088 /*
1089  * Adjust the refcounts of middle extents.  At this point we should have
1090  * split extents that crossed the adjustment range; merged with adjacent
1091  * extents; and updated agbno/aglen to reflect the merges.  Therefore,
1092  * all we have to do is update the extents inside [agbno, agbno + aglen].
1093  */
1094 STATIC int
1095 xfs_refcount_adjust_extents(
1096 	struct xfs_btree_cur	*cur,
1097 	xfs_agblock_t		*agbno,
1098 	xfs_extlen_t		*aglen,
1099 	enum xfs_refc_adjust_op	adj)
1100 {
1101 	struct xfs_refcount_irec	ext, tmp;
1102 	int				error;
1103 	int				found_rec, found_tmp;
1104 	xfs_fsblock_t			fsbno;
1105 
1106 	/* Merging did all the work already. */
1107 	if (*aglen == 0)
1108 		return 0;
1109 
1110 	error = xfs_refcount_lookup_ge(cur, XFS_REFC_DOMAIN_SHARED, *agbno,
1111 			&found_rec);
1112 	if (error)
1113 		goto out_error;
1114 
1115 	while (*aglen > 0 && xfs_refcount_still_have_space(cur)) {
1116 		error = xfs_refcount_get_rec(cur, &ext, &found_rec);
1117 		if (error)
1118 			goto out_error;
1119 		if (!found_rec || ext.rc_domain != XFS_REFC_DOMAIN_SHARED) {
1120 			ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks;
1121 			ext.rc_blockcount = 0;
1122 			ext.rc_refcount = 0;
1123 			ext.rc_domain = XFS_REFC_DOMAIN_SHARED;
1124 		}
1125 
1126 		/*
1127 		 * Deal with a hole in the refcount tree; if a file maps to
1128 		 * these blocks and there's no refcountbt record, pretend that
1129 		 * there is one with refcount == 1.
1130 		 */
1131 		if (ext.rc_startblock != *agbno) {
1132 			tmp.rc_startblock = *agbno;
1133 			tmp.rc_blockcount = min(*aglen,
1134 					ext.rc_startblock - *agbno);
1135 			tmp.rc_refcount = 1 + adj;
1136 			tmp.rc_domain = XFS_REFC_DOMAIN_SHARED;
1137 
1138 			trace_xfs_refcount_modify_extent(cur, &tmp);
1139 
1140 			/*
1141 			 * Either cover the hole (increment) or
1142 			 * delete the range (decrement).
1143 			 */
1144 			cur->bc_refc.nr_ops++;
1145 			if (tmp.rc_refcount) {
1146 				error = xfs_refcount_insert(cur, &tmp,
1147 						&found_tmp);
1148 				if (error)
1149 					goto out_error;
1150 				if (XFS_IS_CORRUPT(cur->bc_mp,
1151 						   found_tmp != 1)) {
1152 					xfs_btree_mark_sick(cur);
1153 					error = -EFSCORRUPTED;
1154 					goto out_error;
1155 				}
1156 			} else {
1157 				fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
1158 						cur->bc_ag.pag->pag_agno,
1159 						tmp.rc_startblock);
1160 				error = xfs_free_extent_later(cur->bc_tp, fsbno,
1161 						  tmp.rc_blockcount, NULL,
1162 						  XFS_AG_RESV_NONE, 0);
1163 				if (error)
1164 					goto out_error;
1165 			}
1166 
1167 			(*agbno) += tmp.rc_blockcount;
1168 			(*aglen) -= tmp.rc_blockcount;
1169 
1170 			/* Stop if there's nothing left to modify */
1171 			if (*aglen == 0 || !xfs_refcount_still_have_space(cur))
1172 				break;
1173 
1174 			/* Move the cursor to the start of ext. */
1175 			error = xfs_refcount_lookup_ge(cur,
1176 					XFS_REFC_DOMAIN_SHARED, *agbno,
1177 					&found_rec);
1178 			if (error)
1179 				goto out_error;
1180 		}
1181 
1182 		/*
1183 		 * A previous step trimmed agbno/aglen such that the end of the
1184 		 * range would not be in the middle of the record.  If this is
1185 		 * no longer the case, something is seriously wrong with the
1186 		 * btree.  Make sure we never feed the synthesized record into
1187 		 * the processing loop below.
1188 		 */
1189 		if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_blockcount == 0) ||
1190 		    XFS_IS_CORRUPT(cur->bc_mp, ext.rc_blockcount > *aglen)) {
1191 			xfs_btree_mark_sick(cur);
1192 			error = -EFSCORRUPTED;
1193 			goto out_error;
1194 		}
1195 
1196 		/*
1197 		 * Adjust the reference count and either update the tree
1198 		 * (incr) or free the blocks (decr).
1199 		 */
1200 		if (ext.rc_refcount == MAXREFCOUNT)
1201 			goto skip;
1202 		ext.rc_refcount += adj;
1203 		trace_xfs_refcount_modify_extent(cur, &ext);
1204 		cur->bc_refc.nr_ops++;
1205 		if (ext.rc_refcount > 1) {
1206 			error = xfs_refcount_update(cur, &ext);
1207 			if (error)
1208 				goto out_error;
1209 		} else if (ext.rc_refcount == 1) {
1210 			error = xfs_refcount_delete(cur, &found_rec);
1211 			if (error)
1212 				goto out_error;
1213 			if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
1214 				xfs_btree_mark_sick(cur);
1215 				error = -EFSCORRUPTED;
1216 				goto out_error;
1217 			}
1218 			goto advloop;
1219 		} else {
1220 			fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
1221 					cur->bc_ag.pag->pag_agno,
1222 					ext.rc_startblock);
1223 			error = xfs_free_extent_later(cur->bc_tp, fsbno,
1224 					ext.rc_blockcount, NULL,
1225 					XFS_AG_RESV_NONE, 0);
1226 			if (error)
1227 				goto out_error;
1228 		}
1229 
1230 skip:
1231 		error = xfs_btree_increment(cur, 0, &found_rec);
1232 		if (error)
1233 			goto out_error;
1234 
1235 advloop:
1236 		(*agbno) += ext.rc_blockcount;
1237 		(*aglen) -= ext.rc_blockcount;
1238 	}
1239 
1240 	return error;
1241 out_error:
1242 	trace_xfs_refcount_modify_extent_error(cur, error, _RET_IP_);
1243 	return error;
1244 }
1245 
1246 /* Adjust the reference count of a range of AG blocks. */
1247 STATIC int
1248 xfs_refcount_adjust(
1249 	struct xfs_btree_cur	*cur,
1250 	xfs_agblock_t		*agbno,
1251 	xfs_extlen_t		*aglen,
1252 	enum xfs_refc_adjust_op	adj)
1253 {
1254 	bool			shape_changed;
1255 	int			shape_changes = 0;
1256 	int			error;
1257 
1258 	if (adj == XFS_REFCOUNT_ADJUST_INCREASE)
1259 		trace_xfs_refcount_increase(cur, *agbno, *aglen);
1260 	else
1261 		trace_xfs_refcount_decrease(cur, *agbno, *aglen);
1262 
1263 	/*
1264 	 * Ensure that no rcextents cross the boundary of the adjustment range.
1265 	 */
1266 	error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_SHARED,
1267 			*agbno, &shape_changed);
1268 	if (error)
1269 		goto out_error;
1270 	if (shape_changed)
1271 		shape_changes++;
1272 
1273 	error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_SHARED,
1274 			*agbno + *aglen, &shape_changed);
1275 	if (error)
1276 		goto out_error;
1277 	if (shape_changed)
1278 		shape_changes++;
1279 
1280 	/*
1281 	 * Try to merge with the left or right extents of the range.
1282 	 */
1283 	error = xfs_refcount_merge_extents(cur, XFS_REFC_DOMAIN_SHARED,
1284 			agbno, aglen, adj, &shape_changed);
1285 	if (error)
1286 		goto out_error;
1287 	if (shape_changed)
1288 		shape_changes++;
1289 	if (shape_changes)
1290 		cur->bc_refc.shape_changes++;
1291 
1292 	/* Now that we've taken care of the ends, adjust the middle extents */
1293 	error = xfs_refcount_adjust_extents(cur, agbno, aglen, adj);
1294 	if (error)
1295 		goto out_error;
1296 
1297 	return 0;
1298 
1299 out_error:
1300 	trace_xfs_refcount_adjust_error(cur, error, _RET_IP_);
1301 	return error;
1302 }
1303 
1304 /*
1305  * Set up a continuation a deferred refcount operation by updating the intent.
1306  * Checks to make sure we're not going to run off the end of the AG.
1307  */
1308 static inline int
1309 xfs_refcount_continue_op(
1310 	struct xfs_btree_cur		*cur,
1311 	struct xfs_refcount_intent	*ri,
1312 	xfs_agblock_t			new_agbno)
1313 {
1314 	struct xfs_mount		*mp = cur->bc_mp;
1315 	struct xfs_perag		*pag = cur->bc_ag.pag;
1316 
1317 	if (XFS_IS_CORRUPT(mp, !xfs_verify_agbext(pag, new_agbno,
1318 					ri->ri_blockcount))) {
1319 		xfs_btree_mark_sick(cur);
1320 		return -EFSCORRUPTED;
1321 	}
1322 
1323 	ri->ri_startblock = XFS_AGB_TO_FSB(mp, pag->pag_agno, new_agbno);
1324 
1325 	ASSERT(xfs_verify_fsbext(mp, ri->ri_startblock, ri->ri_blockcount));
1326 	ASSERT(pag->pag_agno == XFS_FSB_TO_AGNO(mp, ri->ri_startblock));
1327 
1328 	return 0;
1329 }
1330 
1331 /*
1332  * Process one of the deferred refcount operations.  We pass back the
1333  * btree cursor to maintain our lock on the btree between calls.
1334  * This saves time and eliminates a buffer deadlock between the
1335  * superblock and the AGF because we'll always grab them in the same
1336  * order.
1337  */
1338 int
1339 xfs_refcount_finish_one(
1340 	struct xfs_trans		*tp,
1341 	struct xfs_refcount_intent	*ri,
1342 	struct xfs_btree_cur		**pcur)
1343 {
1344 	struct xfs_mount		*mp = tp->t_mountp;
1345 	struct xfs_btree_cur		*rcur = *pcur;
1346 	struct xfs_buf			*agbp = NULL;
1347 	int				error = 0;
1348 	xfs_agblock_t			bno;
1349 	unsigned long			nr_ops = 0;
1350 	int				shape_changes = 0;
1351 
1352 	bno = XFS_FSB_TO_AGBNO(mp, ri->ri_startblock);
1353 
1354 	trace_xfs_refcount_deferred(mp, ri);
1355 
1356 	if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_REFCOUNT_FINISH_ONE))
1357 		return -EIO;
1358 
1359 	/*
1360 	 * If we haven't gotten a cursor or the cursor AG doesn't match
1361 	 * the startblock, get one now.
1362 	 */
1363 	if (rcur != NULL && rcur->bc_ag.pag != ri->ri_pag) {
1364 		nr_ops = rcur->bc_refc.nr_ops;
1365 		shape_changes = rcur->bc_refc.shape_changes;
1366 		xfs_btree_del_cursor(rcur, 0);
1367 		rcur = NULL;
1368 		*pcur = NULL;
1369 	}
1370 	if (rcur == NULL) {
1371 		error = xfs_alloc_read_agf(ri->ri_pag, tp,
1372 				XFS_ALLOC_FLAG_FREEING, &agbp);
1373 		if (error)
1374 			return error;
1375 
1376 		*pcur = rcur = xfs_refcountbt_init_cursor(mp, tp, agbp,
1377 							  ri->ri_pag);
1378 		rcur->bc_refc.nr_ops = nr_ops;
1379 		rcur->bc_refc.shape_changes = shape_changes;
1380 	}
1381 
1382 	switch (ri->ri_type) {
1383 	case XFS_REFCOUNT_INCREASE:
1384 		error = xfs_refcount_adjust(rcur, &bno, &ri->ri_blockcount,
1385 				XFS_REFCOUNT_ADJUST_INCREASE);
1386 		if (error)
1387 			return error;
1388 		if (ri->ri_blockcount > 0)
1389 			error = xfs_refcount_continue_op(rcur, ri, bno);
1390 		break;
1391 	case XFS_REFCOUNT_DECREASE:
1392 		error = xfs_refcount_adjust(rcur, &bno, &ri->ri_blockcount,
1393 				XFS_REFCOUNT_ADJUST_DECREASE);
1394 		if (error)
1395 			return error;
1396 		if (ri->ri_blockcount > 0)
1397 			error = xfs_refcount_continue_op(rcur, ri, bno);
1398 		break;
1399 	case XFS_REFCOUNT_ALLOC_COW:
1400 		error = __xfs_refcount_cow_alloc(rcur, bno, ri->ri_blockcount);
1401 		if (error)
1402 			return error;
1403 		ri->ri_blockcount = 0;
1404 		break;
1405 	case XFS_REFCOUNT_FREE_COW:
1406 		error = __xfs_refcount_cow_free(rcur, bno, ri->ri_blockcount);
1407 		if (error)
1408 			return error;
1409 		ri->ri_blockcount = 0;
1410 		break;
1411 	default:
1412 		ASSERT(0);
1413 		return -EFSCORRUPTED;
1414 	}
1415 	if (!error && ri->ri_blockcount > 0)
1416 		trace_xfs_refcount_finish_one_leftover(mp, ri);
1417 	return error;
1418 }
1419 
1420 /*
1421  * Record a refcount intent for later processing.
1422  */
1423 static void
1424 __xfs_refcount_add(
1425 	struct xfs_trans		*tp,
1426 	enum xfs_refcount_intent_type	type,
1427 	xfs_fsblock_t			startblock,
1428 	xfs_extlen_t			blockcount)
1429 {
1430 	struct xfs_refcount_intent	*ri;
1431 
1432 	ri = kmem_cache_alloc(xfs_refcount_intent_cache,
1433 			GFP_KERNEL | __GFP_NOFAIL);
1434 	INIT_LIST_HEAD(&ri->ri_list);
1435 	ri->ri_type = type;
1436 	ri->ri_startblock = startblock;
1437 	ri->ri_blockcount = blockcount;
1438 
1439 	xfs_refcount_defer_add(tp, ri);
1440 }
1441 
1442 /*
1443  * Increase the reference count of the blocks backing a file's extent.
1444  */
1445 void
1446 xfs_refcount_increase_extent(
1447 	struct xfs_trans		*tp,
1448 	struct xfs_bmbt_irec		*PREV)
1449 {
1450 	if (!xfs_has_reflink(tp->t_mountp))
1451 		return;
1452 
1453 	__xfs_refcount_add(tp, XFS_REFCOUNT_INCREASE, PREV->br_startblock,
1454 			PREV->br_blockcount);
1455 }
1456 
1457 /*
1458  * Decrease the reference count of the blocks backing a file's extent.
1459  */
1460 void
1461 xfs_refcount_decrease_extent(
1462 	struct xfs_trans		*tp,
1463 	struct xfs_bmbt_irec		*PREV)
1464 {
1465 	if (!xfs_has_reflink(tp->t_mountp))
1466 		return;
1467 
1468 	__xfs_refcount_add(tp, XFS_REFCOUNT_DECREASE, PREV->br_startblock,
1469 			PREV->br_blockcount);
1470 }
1471 
1472 /*
1473  * Given an AG extent, find the lowest-numbered run of shared blocks
1474  * within that range and return the range in fbno/flen.  If
1475  * find_end_of_shared is set, return the longest contiguous extent of
1476  * shared blocks; if not, just return the first extent we find.  If no
1477  * shared blocks are found, fbno and flen will be set to NULLAGBLOCK
1478  * and 0, respectively.
1479  */
1480 int
1481 xfs_refcount_find_shared(
1482 	struct xfs_btree_cur		*cur,
1483 	xfs_agblock_t			agbno,
1484 	xfs_extlen_t			aglen,
1485 	xfs_agblock_t			*fbno,
1486 	xfs_extlen_t			*flen,
1487 	bool				find_end_of_shared)
1488 {
1489 	struct xfs_refcount_irec	tmp;
1490 	int				i;
1491 	int				have;
1492 	int				error;
1493 
1494 	trace_xfs_refcount_find_shared(cur, agbno, aglen);
1495 
1496 	/* By default, skip the whole range */
1497 	*fbno = NULLAGBLOCK;
1498 	*flen = 0;
1499 
1500 	/* Try to find a refcount extent that crosses the start */
1501 	error = xfs_refcount_lookup_le(cur, XFS_REFC_DOMAIN_SHARED, agbno,
1502 			&have);
1503 	if (error)
1504 		goto out_error;
1505 	if (!have) {
1506 		/* No left extent, look at the next one */
1507 		error = xfs_btree_increment(cur, 0, &have);
1508 		if (error)
1509 			goto out_error;
1510 		if (!have)
1511 			goto done;
1512 	}
1513 	error = xfs_refcount_get_rec(cur, &tmp, &i);
1514 	if (error)
1515 		goto out_error;
1516 	if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
1517 		xfs_btree_mark_sick(cur);
1518 		error = -EFSCORRUPTED;
1519 		goto out_error;
1520 	}
1521 	if (tmp.rc_domain != XFS_REFC_DOMAIN_SHARED)
1522 		goto done;
1523 
1524 	/* If the extent ends before the start, look at the next one */
1525 	if (tmp.rc_startblock + tmp.rc_blockcount <= agbno) {
1526 		error = xfs_btree_increment(cur, 0, &have);
1527 		if (error)
1528 			goto out_error;
1529 		if (!have)
1530 			goto done;
1531 		error = xfs_refcount_get_rec(cur, &tmp, &i);
1532 		if (error)
1533 			goto out_error;
1534 		if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
1535 			xfs_btree_mark_sick(cur);
1536 			error = -EFSCORRUPTED;
1537 			goto out_error;
1538 		}
1539 		if (tmp.rc_domain != XFS_REFC_DOMAIN_SHARED)
1540 			goto done;
1541 	}
1542 
1543 	/* If the extent starts after the range we want, bail out */
1544 	if (tmp.rc_startblock >= agbno + aglen)
1545 		goto done;
1546 
1547 	/* We found the start of a shared extent! */
1548 	if (tmp.rc_startblock < agbno) {
1549 		tmp.rc_blockcount -= (agbno - tmp.rc_startblock);
1550 		tmp.rc_startblock = agbno;
1551 	}
1552 
1553 	*fbno = tmp.rc_startblock;
1554 	*flen = min(tmp.rc_blockcount, agbno + aglen - *fbno);
1555 	if (!find_end_of_shared)
1556 		goto done;
1557 
1558 	/* Otherwise, find the end of this shared extent */
1559 	while (*fbno + *flen < agbno + aglen) {
1560 		error = xfs_btree_increment(cur, 0, &have);
1561 		if (error)
1562 			goto out_error;
1563 		if (!have)
1564 			break;
1565 		error = xfs_refcount_get_rec(cur, &tmp, &i);
1566 		if (error)
1567 			goto out_error;
1568 		if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
1569 			xfs_btree_mark_sick(cur);
1570 			error = -EFSCORRUPTED;
1571 			goto out_error;
1572 		}
1573 		if (tmp.rc_domain != XFS_REFC_DOMAIN_SHARED ||
1574 		    tmp.rc_startblock >= agbno + aglen ||
1575 		    tmp.rc_startblock != *fbno + *flen)
1576 			break;
1577 		*flen = min(*flen + tmp.rc_blockcount, agbno + aglen - *fbno);
1578 	}
1579 
1580 done:
1581 	trace_xfs_refcount_find_shared_result(cur, *fbno, *flen);
1582 
1583 out_error:
1584 	if (error)
1585 		trace_xfs_refcount_find_shared_error(cur, error, _RET_IP_);
1586 	return error;
1587 }
1588 
1589 /*
1590  * Recovering CoW Blocks After a Crash
1591  *
1592  * Due to the way that the copy on write mechanism works, there's a window of
1593  * opportunity in which we can lose track of allocated blocks during a crash.
1594  * Because CoW uses delayed allocation in the in-core CoW fork, writeback
1595  * causes blocks to be allocated and stored in the CoW fork.  The blocks are
1596  * no longer in the free space btree but are not otherwise recorded anywhere
1597  * until the write completes and the blocks are mapped into the file.  A crash
1598  * in between allocation and remapping results in the replacement blocks being
1599  * lost.  This situation is exacerbated by the CoW extent size hint because
1600  * allocations can hang around for long time.
1601  *
1602  * However, there is a place where we can record these allocations before they
1603  * become mappings -- the reference count btree.  The btree does not record
1604  * extents with refcount == 1, so we can record allocations with a refcount of
1605  * 1.  Blocks being used for CoW writeout cannot be shared, so there should be
1606  * no conflict with shared block records.  These mappings should be created
1607  * when we allocate blocks to the CoW fork and deleted when they're removed
1608  * from the CoW fork.
1609  *
1610  * Minor nit: records for in-progress CoW allocations and records for shared
1611  * extents must never be merged, to preserve the property that (except for CoW
1612  * allocations) there are no refcount btree entries with refcount == 1.  The
1613  * only time this could potentially happen is when unsharing a block that's
1614  * adjacent to CoW allocations, so we must be careful to avoid this.
1615  *
1616  * At mount time we recover lost CoW allocations by searching the refcount
1617  * btree for these refcount == 1 mappings.  These represent CoW allocations
1618  * that were in progress at the time the filesystem went down, so we can free
1619  * them to get the space back.
1620  *
1621  * This mechanism is superior to creating EFIs for unmapped CoW extents for
1622  * several reasons -- first, EFIs pin the tail of the log and would have to be
1623  * periodically relogged to avoid filling up the log.  Second, CoW completions
1624  * will have to file an EFD and create new EFIs for whatever remains in the
1625  * CoW fork; this partially takes care of (1) but extent-size reservations
1626  * will have to periodically relog even if there's no writeout in progress.
1627  * This can happen if the CoW extent size hint is set, which you really want.
1628  * Third, EFIs cannot currently be automatically relogged into newer
1629  * transactions to advance the log tail.  Fourth, stuffing the log full of
1630  * EFIs places an upper bound on the number of CoW allocations that can be
1631  * held filesystem-wide at any given time.  Recording them in the refcount
1632  * btree doesn't require us to maintain any state in memory and doesn't pin
1633  * the log.
1634  */
1635 /*
1636  * Adjust the refcounts of CoW allocations.  These allocations are "magic"
1637  * in that they're not referenced anywhere else in the filesystem, so we
1638  * stash them in the refcount btree with a refcount of 1 until either file
1639  * remapping (or CoW cancellation) happens.
1640  */
1641 STATIC int
1642 xfs_refcount_adjust_cow_extents(
1643 	struct xfs_btree_cur	*cur,
1644 	xfs_agblock_t		agbno,
1645 	xfs_extlen_t		aglen,
1646 	enum xfs_refc_adjust_op	adj)
1647 {
1648 	struct xfs_refcount_irec	ext, tmp;
1649 	int				error;
1650 	int				found_rec, found_tmp;
1651 
1652 	if (aglen == 0)
1653 		return 0;
1654 
1655 	/* Find any overlapping refcount records */
1656 	error = xfs_refcount_lookup_ge(cur, XFS_REFC_DOMAIN_COW, agbno,
1657 			&found_rec);
1658 	if (error)
1659 		goto out_error;
1660 	error = xfs_refcount_get_rec(cur, &ext, &found_rec);
1661 	if (error)
1662 		goto out_error;
1663 	if (XFS_IS_CORRUPT(cur->bc_mp, found_rec &&
1664 				ext.rc_domain != XFS_REFC_DOMAIN_COW)) {
1665 		xfs_btree_mark_sick(cur);
1666 		error = -EFSCORRUPTED;
1667 		goto out_error;
1668 	}
1669 	if (!found_rec) {
1670 		ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks;
1671 		ext.rc_blockcount = 0;
1672 		ext.rc_refcount = 0;
1673 		ext.rc_domain = XFS_REFC_DOMAIN_COW;
1674 	}
1675 
1676 	switch (adj) {
1677 	case XFS_REFCOUNT_ADJUST_COW_ALLOC:
1678 		/* Adding a CoW reservation, there should be nothing here. */
1679 		if (XFS_IS_CORRUPT(cur->bc_mp,
1680 				   agbno + aglen > ext.rc_startblock)) {
1681 			xfs_btree_mark_sick(cur);
1682 			error = -EFSCORRUPTED;
1683 			goto out_error;
1684 		}
1685 
1686 		tmp.rc_startblock = agbno;
1687 		tmp.rc_blockcount = aglen;
1688 		tmp.rc_refcount = 1;
1689 		tmp.rc_domain = XFS_REFC_DOMAIN_COW;
1690 
1691 		trace_xfs_refcount_modify_extent(cur, &tmp);
1692 
1693 		error = xfs_refcount_insert(cur, &tmp,
1694 				&found_tmp);
1695 		if (error)
1696 			goto out_error;
1697 		if (XFS_IS_CORRUPT(cur->bc_mp, found_tmp != 1)) {
1698 			xfs_btree_mark_sick(cur);
1699 			error = -EFSCORRUPTED;
1700 			goto out_error;
1701 		}
1702 		break;
1703 	case XFS_REFCOUNT_ADJUST_COW_FREE:
1704 		/* Removing a CoW reservation, there should be one extent. */
1705 		if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_startblock != agbno)) {
1706 			xfs_btree_mark_sick(cur);
1707 			error = -EFSCORRUPTED;
1708 			goto out_error;
1709 		}
1710 		if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_blockcount != aglen)) {
1711 			xfs_btree_mark_sick(cur);
1712 			error = -EFSCORRUPTED;
1713 			goto out_error;
1714 		}
1715 		if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_refcount != 1)) {
1716 			xfs_btree_mark_sick(cur);
1717 			error = -EFSCORRUPTED;
1718 			goto out_error;
1719 		}
1720 
1721 		ext.rc_refcount = 0;
1722 		trace_xfs_refcount_modify_extent(cur, &ext);
1723 		error = xfs_refcount_delete(cur, &found_rec);
1724 		if (error)
1725 			goto out_error;
1726 		if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
1727 			xfs_btree_mark_sick(cur);
1728 			error = -EFSCORRUPTED;
1729 			goto out_error;
1730 		}
1731 		break;
1732 	default:
1733 		ASSERT(0);
1734 	}
1735 
1736 	return error;
1737 out_error:
1738 	trace_xfs_refcount_modify_extent_error(cur, error, _RET_IP_);
1739 	return error;
1740 }
1741 
1742 /*
1743  * Add or remove refcount btree entries for CoW reservations.
1744  */
1745 STATIC int
1746 xfs_refcount_adjust_cow(
1747 	struct xfs_btree_cur	*cur,
1748 	xfs_agblock_t		agbno,
1749 	xfs_extlen_t		aglen,
1750 	enum xfs_refc_adjust_op	adj)
1751 {
1752 	bool			shape_changed;
1753 	int			error;
1754 
1755 	/*
1756 	 * Ensure that no rcextents cross the boundary of the adjustment range.
1757 	 */
1758 	error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_COW,
1759 			agbno, &shape_changed);
1760 	if (error)
1761 		goto out_error;
1762 
1763 	error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_COW,
1764 			agbno + aglen, &shape_changed);
1765 	if (error)
1766 		goto out_error;
1767 
1768 	/*
1769 	 * Try to merge with the left or right extents of the range.
1770 	 */
1771 	error = xfs_refcount_merge_extents(cur, XFS_REFC_DOMAIN_COW, &agbno,
1772 			&aglen, adj, &shape_changed);
1773 	if (error)
1774 		goto out_error;
1775 
1776 	/* Now that we've taken care of the ends, adjust the middle extents */
1777 	error = xfs_refcount_adjust_cow_extents(cur, agbno, aglen, adj);
1778 	if (error)
1779 		goto out_error;
1780 
1781 	return 0;
1782 
1783 out_error:
1784 	trace_xfs_refcount_adjust_cow_error(cur, error, _RET_IP_);
1785 	return error;
1786 }
1787 
1788 /*
1789  * Record a CoW allocation in the refcount btree.
1790  */
1791 STATIC int
1792 __xfs_refcount_cow_alloc(
1793 	struct xfs_btree_cur	*rcur,
1794 	xfs_agblock_t		agbno,
1795 	xfs_extlen_t		aglen)
1796 {
1797 	trace_xfs_refcount_cow_increase(rcur, agbno, aglen);
1798 
1799 	/* Add refcount btree reservation */
1800 	return xfs_refcount_adjust_cow(rcur, agbno, aglen,
1801 			XFS_REFCOUNT_ADJUST_COW_ALLOC);
1802 }
1803 
1804 /*
1805  * Remove a CoW allocation from the refcount btree.
1806  */
1807 STATIC int
1808 __xfs_refcount_cow_free(
1809 	struct xfs_btree_cur	*rcur,
1810 	xfs_agblock_t		agbno,
1811 	xfs_extlen_t		aglen)
1812 {
1813 	trace_xfs_refcount_cow_decrease(rcur, agbno, aglen);
1814 
1815 	/* Remove refcount btree reservation */
1816 	return xfs_refcount_adjust_cow(rcur, agbno, aglen,
1817 			XFS_REFCOUNT_ADJUST_COW_FREE);
1818 }
1819 
1820 /* Record a CoW staging extent in the refcount btree. */
1821 void
1822 xfs_refcount_alloc_cow_extent(
1823 	struct xfs_trans		*tp,
1824 	xfs_fsblock_t			fsb,
1825 	xfs_extlen_t			len)
1826 {
1827 	struct xfs_mount		*mp = tp->t_mountp;
1828 
1829 	if (!xfs_has_reflink(mp))
1830 		return;
1831 
1832 	__xfs_refcount_add(tp, XFS_REFCOUNT_ALLOC_COW, fsb, len);
1833 
1834 	/* Add rmap entry */
1835 	xfs_rmap_alloc_extent(tp, XFS_FSB_TO_AGNO(mp, fsb),
1836 			XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
1837 }
1838 
1839 /* Forget a CoW staging event in the refcount btree. */
1840 void
1841 xfs_refcount_free_cow_extent(
1842 	struct xfs_trans		*tp,
1843 	xfs_fsblock_t			fsb,
1844 	xfs_extlen_t			len)
1845 {
1846 	struct xfs_mount		*mp = tp->t_mountp;
1847 
1848 	if (!xfs_has_reflink(mp))
1849 		return;
1850 
1851 	/* Remove rmap entry */
1852 	xfs_rmap_free_extent(tp, XFS_FSB_TO_AGNO(mp, fsb),
1853 			XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
1854 	__xfs_refcount_add(tp, XFS_REFCOUNT_FREE_COW, fsb, len);
1855 }
1856 
1857 struct xfs_refcount_recovery {
1858 	struct list_head		rr_list;
1859 	struct xfs_refcount_irec	rr_rrec;
1860 };
1861 
1862 /* Stuff an extent on the recovery list. */
1863 STATIC int
1864 xfs_refcount_recover_extent(
1865 	struct xfs_btree_cur		*cur,
1866 	const union xfs_btree_rec	*rec,
1867 	void				*priv)
1868 {
1869 	struct list_head		*debris = priv;
1870 	struct xfs_refcount_recovery	*rr;
1871 
1872 	if (XFS_IS_CORRUPT(cur->bc_mp,
1873 			   be32_to_cpu(rec->refc.rc_refcount) != 1)) {
1874 		xfs_btree_mark_sick(cur);
1875 		return -EFSCORRUPTED;
1876 	}
1877 
1878 	rr = kmalloc(sizeof(struct xfs_refcount_recovery),
1879 			GFP_KERNEL | __GFP_NOFAIL);
1880 	INIT_LIST_HEAD(&rr->rr_list);
1881 	xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec);
1882 
1883 	if (xfs_refcount_check_irec(cur->bc_ag.pag, &rr->rr_rrec) != NULL ||
1884 	    XFS_IS_CORRUPT(cur->bc_mp,
1885 			   rr->rr_rrec.rc_domain != XFS_REFC_DOMAIN_COW)) {
1886 		xfs_btree_mark_sick(cur);
1887 		kfree(rr);
1888 		return -EFSCORRUPTED;
1889 	}
1890 
1891 	list_add_tail(&rr->rr_list, debris);
1892 	return 0;
1893 }
1894 
1895 /* Find and remove leftover CoW reservations. */
1896 int
1897 xfs_refcount_recover_cow_leftovers(
1898 	struct xfs_mount		*mp,
1899 	struct xfs_perag		*pag)
1900 {
1901 	struct xfs_trans		*tp;
1902 	struct xfs_btree_cur		*cur;
1903 	struct xfs_buf			*agbp;
1904 	struct xfs_refcount_recovery	*rr, *n;
1905 	struct list_head		debris;
1906 	union xfs_btree_irec		low = {
1907 		.rc.rc_domain		= XFS_REFC_DOMAIN_COW,
1908 	};
1909 	union xfs_btree_irec		high = {
1910 		.rc.rc_domain		= XFS_REFC_DOMAIN_COW,
1911 		.rc.rc_startblock	= -1U,
1912 	};
1913 	xfs_fsblock_t			fsb;
1914 	int				error;
1915 
1916 	/* reflink filesystems mustn't have AGs larger than 2^31-1 blocks */
1917 	BUILD_BUG_ON(XFS_MAX_CRC_AG_BLOCKS >= XFS_REFC_COWFLAG);
1918 	if (mp->m_sb.sb_agblocks > XFS_MAX_CRC_AG_BLOCKS)
1919 		return -EOPNOTSUPP;
1920 
1921 	INIT_LIST_HEAD(&debris);
1922 
1923 	/*
1924 	 * In this first part, we use an empty transaction to gather up
1925 	 * all the leftover CoW extents so that we can subsequently
1926 	 * delete them.  The empty transaction is used to avoid
1927 	 * a buffer lock deadlock if there happens to be a loop in the
1928 	 * refcountbt because we're allowed to re-grab a buffer that is
1929 	 * already attached to our transaction.  When we're done
1930 	 * recording the CoW debris we cancel the (empty) transaction
1931 	 * and everything goes away cleanly.
1932 	 */
1933 	error = xfs_trans_alloc_empty(mp, &tp);
1934 	if (error)
1935 		return error;
1936 
1937 	error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
1938 	if (error)
1939 		goto out_trans;
1940 	cur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag);
1941 
1942 	/* Find all the leftover CoW staging extents. */
1943 	error = xfs_btree_query_range(cur, &low, &high,
1944 			xfs_refcount_recover_extent, &debris);
1945 	xfs_btree_del_cursor(cur, error);
1946 	xfs_trans_brelse(tp, agbp);
1947 	xfs_trans_cancel(tp);
1948 	if (error)
1949 		goto out_free;
1950 
1951 	/* Now iterate the list to free the leftovers */
1952 	list_for_each_entry_safe(rr, n, &debris, rr_list) {
1953 		/* Set up transaction. */
1954 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
1955 		if (error)
1956 			goto out_free;
1957 
1958 		/* Free the orphan record */
1959 		fsb = XFS_AGB_TO_FSB(mp, pag->pag_agno,
1960 				rr->rr_rrec.rc_startblock);
1961 		xfs_refcount_free_cow_extent(tp, fsb,
1962 				rr->rr_rrec.rc_blockcount);
1963 
1964 		/* Free the block. */
1965 		error = xfs_free_extent_later(tp, fsb,
1966 				rr->rr_rrec.rc_blockcount, NULL,
1967 				XFS_AG_RESV_NONE, 0);
1968 		if (error)
1969 			goto out_trans;
1970 
1971 		error = xfs_trans_commit(tp);
1972 		if (error)
1973 			goto out_free;
1974 
1975 		list_del(&rr->rr_list);
1976 		kfree(rr);
1977 	}
1978 
1979 	return error;
1980 out_trans:
1981 	xfs_trans_cancel(tp);
1982 out_free:
1983 	/* Free the leftover list */
1984 	list_for_each_entry_safe(rr, n, &debris, rr_list) {
1985 		list_del(&rr->rr_list);
1986 		kfree(rr);
1987 	}
1988 	return error;
1989 }
1990 
1991 /*
1992  * Scan part of the keyspace of the refcount records and tell us if the area
1993  * has no records, is fully mapped by records, or is partially filled.
1994  */
1995 int
1996 xfs_refcount_has_records(
1997 	struct xfs_btree_cur	*cur,
1998 	enum xfs_refc_domain	domain,
1999 	xfs_agblock_t		bno,
2000 	xfs_extlen_t		len,
2001 	enum xbtree_recpacking	*outcome)
2002 {
2003 	union xfs_btree_irec	low;
2004 	union xfs_btree_irec	high;
2005 
2006 	memset(&low, 0, sizeof(low));
2007 	low.rc.rc_startblock = bno;
2008 	memset(&high, 0xFF, sizeof(high));
2009 	high.rc.rc_startblock = bno + len - 1;
2010 	low.rc.rc_domain = high.rc.rc_domain = domain;
2011 
2012 	return xfs_btree_has_records(cur, &low, &high, NULL, outcome);
2013 }
2014 
2015 struct xfs_refcount_query_range_info {
2016 	xfs_refcount_query_range_fn	fn;
2017 	void				*priv;
2018 };
2019 
2020 /* Format btree record and pass to our callback. */
2021 STATIC int
2022 xfs_refcount_query_range_helper(
2023 	struct xfs_btree_cur		*cur,
2024 	const union xfs_btree_rec	*rec,
2025 	void				*priv)
2026 {
2027 	struct xfs_refcount_query_range_info	*query = priv;
2028 	struct xfs_refcount_irec	irec;
2029 	xfs_failaddr_t			fa;
2030 
2031 	xfs_refcount_btrec_to_irec(rec, &irec);
2032 	fa = xfs_refcount_check_irec(cur->bc_ag.pag, &irec);
2033 	if (fa)
2034 		return xfs_refcount_complain_bad_rec(cur, fa, &irec);
2035 
2036 	return query->fn(cur, &irec, query->priv);
2037 }
2038 
2039 /* Find all refcount records between two keys. */
2040 int
2041 xfs_refcount_query_range(
2042 	struct xfs_btree_cur		*cur,
2043 	const struct xfs_refcount_irec	*low_rec,
2044 	const struct xfs_refcount_irec	*high_rec,
2045 	xfs_refcount_query_range_fn	fn,
2046 	void				*priv)
2047 {
2048 	union xfs_btree_irec		low_brec = { .rc = *low_rec };
2049 	union xfs_btree_irec		high_brec = { .rc = *high_rec };
2050 	struct xfs_refcount_query_range_info query = { .priv = priv, .fn = fn };
2051 
2052 	return xfs_btree_query_range(cur, &low_brec, &high_brec,
2053 			xfs_refcount_query_range_helper, &query);
2054 }
2055 
2056 int __init
2057 xfs_refcount_intent_init_cache(void)
2058 {
2059 	xfs_refcount_intent_cache = kmem_cache_create("xfs_refc_intent",
2060 			sizeof(struct xfs_refcount_intent),
2061 			0, 0, NULL);
2062 
2063 	return xfs_refcount_intent_cache != NULL ? 0 : -ENOMEM;
2064 }
2065 
2066 void
2067 xfs_refcount_intent_destroy_cache(void)
2068 {
2069 	kmem_cache_destroy(xfs_refcount_intent_cache);
2070 	xfs_refcount_intent_cache = NULL;
2071 }
2072