xref: /linux/fs/xfs/xfs_refcount_item.c (revision cf9b52fa7d65362b648927d1d752ec99659f5c43)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2016 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs_platform.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_bit.h"
12 #include "xfs_shared.h"
13 #include "xfs_mount.h"
14 #include "xfs_defer.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_refcount_item.h"
18 #include "xfs_log.h"
19 #include "xfs_refcount.h"
20 #include "xfs_error.h"
21 #include "xfs_log_priv.h"
22 #include "xfs_log_recover.h"
23 #include "xfs_ag.h"
24 #include "xfs_btree.h"
25 #include "xfs_trace.h"
26 #include "xfs_rtgroup.h"
27 
28 struct kmem_cache	*xfs_cui_cache;
29 struct kmem_cache	*xfs_cud_cache;
30 
31 static const struct xfs_item_ops xfs_cui_item_ops;
32 
33 static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip)
34 {
35 	return container_of(lip, struct xfs_cui_log_item, cui_item);
36 }
37 
38 STATIC void
39 xfs_cui_item_free(
40 	struct xfs_cui_log_item	*cuip)
41 {
42 	kvfree(cuip->cui_item.li_lv_shadow);
43 	if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
44 		kfree(cuip);
45 	else
46 		kmem_cache_free(xfs_cui_cache, cuip);
47 }
48 
49 /*
50  * Freeing the CUI requires that we remove it from the AIL if it has already
51  * been placed there. However, the CUI may not yet have been placed in the AIL
52  * when called by xfs_cui_release() from CUD processing due to the ordering of
53  * committed vs unpin operations in bulk insert operations. Hence the reference
54  * count to ensure only the last caller frees the CUI.
55  */
56 STATIC void
57 xfs_cui_release(
58 	struct xfs_cui_log_item	*cuip)
59 {
60 	ASSERT(atomic_read(&cuip->cui_refcount) > 0);
61 	if (!atomic_dec_and_test(&cuip->cui_refcount))
62 		return;
63 
64 	xfs_trans_ail_delete(&cuip->cui_item, 0);
65 	xfs_cui_item_free(cuip);
66 }
67 
68 
69 STATIC void
70 xfs_cui_item_size(
71 	struct xfs_log_item	*lip,
72 	int			*nvecs,
73 	int			*nbytes)
74 {
75 	struct xfs_cui_log_item	*cuip = CUI_ITEM(lip);
76 
77 	*nvecs += 1;
78 	*nbytes += xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents);
79 }
80 
81 unsigned int xfs_cui_log_space(unsigned int nr)
82 {
83 	return xlog_item_space(1, xfs_cui_log_format_sizeof(nr));
84 }
85 
86 /*
87  * This is called to fill in the vector of log iovecs for the
88  * given cui log item. We use only 1 iovec, and we point that
89  * at the cui_log_format structure embedded in the cui item.
90  * It is at this point that we assert that all of the extent
91  * slots in the cui item have been filled.
92  */
93 STATIC void
94 xfs_cui_item_format(
95 	struct xfs_log_item	*lip,
96 	struct xlog_format_buf	*lfb)
97 {
98 	struct xfs_cui_log_item	*cuip = CUI_ITEM(lip);
99 
100 	ASSERT(atomic_read(&cuip->cui_next_extent) ==
101 			cuip->cui_format.cui_nextents);
102 	ASSERT(lip->li_type == XFS_LI_CUI || lip->li_type == XFS_LI_CUI_RT);
103 
104 	cuip->cui_format.cui_type = lip->li_type;
105 	cuip->cui_format.cui_size = 1;
106 
107 	xlog_format_copy(lfb, XLOG_REG_TYPE_CUI_FORMAT, &cuip->cui_format,
108 			xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents));
109 }
110 
111 /*
112  * The unpin operation is the last place an CUI is manipulated in the log. It is
113  * either inserted in the AIL or aborted in the event of a log I/O error. In
114  * either case, the CUI transaction has been successfully committed to make it
115  * this far. Therefore, we expect whoever committed the CUI to either construct
116  * and commit the CUD or drop the CUD's reference in the event of error. Simply
117  * drop the log's CUI reference now that the log is done with it.
118  */
119 STATIC void
120 xfs_cui_item_unpin(
121 	struct xfs_log_item	*lip,
122 	int			remove)
123 {
124 	struct xfs_cui_log_item	*cuip = CUI_ITEM(lip);
125 
126 	xfs_cui_release(cuip);
127 }
128 
129 /*
130  * The CUI has been either committed or aborted if the transaction has been
131  * cancelled. If the transaction was cancelled, an CUD isn't going to be
132  * constructed and thus we free the CUI here directly.
133  */
134 STATIC void
135 xfs_cui_item_release(
136 	struct xfs_log_item	*lip)
137 {
138 	xfs_cui_release(CUI_ITEM(lip));
139 }
140 
141 /*
142  * Allocate and initialize an cui item with the given number of extents.
143  */
144 STATIC struct xfs_cui_log_item *
145 xfs_cui_init(
146 	struct xfs_mount		*mp,
147 	unsigned short			item_type,
148 	uint				nextents)
149 {
150 	struct xfs_cui_log_item		*cuip;
151 
152 	ASSERT(nextents > 0);
153 	ASSERT(item_type == XFS_LI_CUI || item_type == XFS_LI_CUI_RT);
154 
155 	if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
156 		cuip = kzalloc(xfs_cui_log_item_sizeof(nextents),
157 				GFP_KERNEL | __GFP_NOFAIL);
158 	else
159 		cuip = kmem_cache_zalloc(xfs_cui_cache,
160 					 GFP_KERNEL | __GFP_NOFAIL);
161 
162 	xfs_log_item_init(mp, &cuip->cui_item, item_type, &xfs_cui_item_ops);
163 	cuip->cui_format.cui_nextents = nextents;
164 	cuip->cui_format.cui_id = (uintptr_t)(void *)cuip;
165 	atomic_set(&cuip->cui_next_extent, 0);
166 	atomic_set(&cuip->cui_refcount, 2);
167 
168 	return cuip;
169 }
170 
171 static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip)
172 {
173 	return container_of(lip, struct xfs_cud_log_item, cud_item);
174 }
175 
176 STATIC void
177 xfs_cud_item_size(
178 	struct xfs_log_item	*lip,
179 	int			*nvecs,
180 	int			*nbytes)
181 {
182 	*nvecs += 1;
183 	*nbytes += sizeof(struct xfs_cud_log_format);
184 }
185 
186 unsigned int xfs_cud_log_space(void)
187 {
188 	return xlog_item_space(1, sizeof(struct xfs_cud_log_format));
189 }
190 
191 /*
192  * This is called to fill in the vector of log iovecs for the
193  * given cud log item. We use only 1 iovec, and we point that
194  * at the cud_log_format structure embedded in the cud item.
195  * It is at this point that we assert that all of the extent
196  * slots in the cud item have been filled.
197  */
198 STATIC void
199 xfs_cud_item_format(
200 	struct xfs_log_item	*lip,
201 	struct xlog_format_buf	*lfb)
202 {
203 	struct xfs_cud_log_item	*cudp = CUD_ITEM(lip);
204 
205 	ASSERT(lip->li_type == XFS_LI_CUD || lip->li_type == XFS_LI_CUD_RT);
206 
207 	cudp->cud_format.cud_type = lip->li_type;
208 	cudp->cud_format.cud_size = 1;
209 
210 	xlog_format_copy(lfb, XLOG_REG_TYPE_CUD_FORMAT, &cudp->cud_format,
211 			sizeof(struct xfs_cud_log_format));
212 }
213 
214 /*
215  * The CUD is either committed or aborted if the transaction is cancelled. If
216  * the transaction is cancelled, drop our reference to the CUI and free the
217  * CUD.
218  */
219 STATIC void
220 xfs_cud_item_release(
221 	struct xfs_log_item	*lip)
222 {
223 	struct xfs_cud_log_item	*cudp = CUD_ITEM(lip);
224 
225 	xfs_cui_release(cudp->cud_cuip);
226 	kvfree(cudp->cud_item.li_lv_shadow);
227 	kmem_cache_free(xfs_cud_cache, cudp);
228 }
229 
230 static struct xfs_log_item *
231 xfs_cud_item_intent(
232 	struct xfs_log_item	*lip)
233 {
234 	return &CUD_ITEM(lip)->cud_cuip->cui_item;
235 }
236 
237 static const struct xfs_item_ops xfs_cud_item_ops = {
238 	.flags		= XFS_ITEM_RELEASE_WHEN_COMMITTED |
239 			  XFS_ITEM_INTENT_DONE,
240 	.iop_size	= xfs_cud_item_size,
241 	.iop_format	= xfs_cud_item_format,
242 	.iop_release	= xfs_cud_item_release,
243 	.iop_intent	= xfs_cud_item_intent,
244 };
245 
246 static inline struct xfs_refcount_intent *ci_entry(const struct list_head *e)
247 {
248 	return list_entry(e, struct xfs_refcount_intent, ri_list);
249 }
250 
251 static inline bool
252 xfs_cui_item_isrt(const struct xfs_log_item *lip)
253 {
254 	ASSERT(lip->li_type == XFS_LI_CUI || lip->li_type == XFS_LI_CUI_RT);
255 
256 	return lip->li_type == XFS_LI_CUI_RT;
257 }
258 
259 /* Sort refcount intents by AG. */
260 static int
261 xfs_refcount_update_diff_items(
262 	void				*priv,
263 	const struct list_head		*a,
264 	const struct list_head		*b)
265 {
266 	struct xfs_refcount_intent	*ra = ci_entry(a);
267 	struct xfs_refcount_intent	*rb = ci_entry(b);
268 
269 	return ra->ri_group->xg_gno - rb->ri_group->xg_gno;
270 }
271 
272 /* Log refcount updates in the intent item. */
273 STATIC void
274 xfs_refcount_update_log_item(
275 	struct xfs_trans		*tp,
276 	struct xfs_cui_log_item		*cuip,
277 	struct xfs_refcount_intent	*ri)
278 {
279 	uint				next_extent;
280 	struct xfs_phys_extent		*pmap;
281 
282 	/*
283 	 * atomic_inc_return gives us the value after the increment;
284 	 * we want to use it as an array index so we need to subtract 1 from
285 	 * it.
286 	 */
287 	next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1;
288 	ASSERT(next_extent < cuip->cui_format.cui_nextents);
289 	pmap = &cuip->cui_format.cui_extents[next_extent];
290 	pmap->pe_startblock = ri->ri_startblock;
291 	pmap->pe_len = ri->ri_blockcount;
292 
293 	pmap->pe_flags = 0;
294 	switch (ri->ri_type) {
295 	case XFS_REFCOUNT_INCREASE:
296 	case XFS_REFCOUNT_DECREASE:
297 	case XFS_REFCOUNT_ALLOC_COW:
298 	case XFS_REFCOUNT_FREE_COW:
299 		pmap->pe_flags |= ri->ri_type;
300 		break;
301 	default:
302 		ASSERT(0);
303 	}
304 }
305 
306 static struct xfs_log_item *
307 __xfs_refcount_update_create_intent(
308 	struct xfs_trans		*tp,
309 	struct list_head		*items,
310 	unsigned int			count,
311 	bool				sort,
312 	unsigned short			item_type)
313 {
314 	struct xfs_mount		*mp = tp->t_mountp;
315 	struct xfs_cui_log_item		*cuip;
316 	struct xfs_refcount_intent	*ri;
317 
318 	ASSERT(count > 0);
319 
320 	cuip = xfs_cui_init(mp, item_type, count);
321 	if (sort)
322 		list_sort(mp, items, xfs_refcount_update_diff_items);
323 	list_for_each_entry(ri, items, ri_list)
324 		xfs_refcount_update_log_item(tp, cuip, ri);
325 	return &cuip->cui_item;
326 }
327 
328 static struct xfs_log_item *
329 xfs_refcount_update_create_intent(
330 	struct xfs_trans		*tp,
331 	struct list_head		*items,
332 	unsigned int			count,
333 	bool				sort)
334 {
335 	return __xfs_refcount_update_create_intent(tp, items, count, sort,
336 			XFS_LI_CUI);
337 }
338 
339 static inline unsigned short
340 xfs_cud_type_from_cui(const struct xfs_cui_log_item *cuip)
341 {
342 	return xfs_cui_item_isrt(&cuip->cui_item) ? XFS_LI_CUD_RT : XFS_LI_CUD;
343 }
344 
345 /* Get an CUD so we can process all the deferred refcount updates. */
346 static struct xfs_log_item *
347 xfs_refcount_update_create_done(
348 	struct xfs_trans		*tp,
349 	struct xfs_log_item		*intent,
350 	unsigned int			count)
351 {
352 	struct xfs_cui_log_item		*cuip = CUI_ITEM(intent);
353 	struct xfs_cud_log_item		*cudp;
354 
355 	cudp = kmem_cache_zalloc(xfs_cud_cache, GFP_KERNEL | __GFP_NOFAIL);
356 	xfs_log_item_init(tp->t_mountp, &cudp->cud_item,
357 			xfs_cud_type_from_cui(cuip), &xfs_cud_item_ops);
358 	cudp->cud_cuip = cuip;
359 	cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id;
360 
361 	return &cudp->cud_item;
362 }
363 
364 /* Add this deferred CUI to the transaction. */
365 void
366 xfs_refcount_defer_add(
367 	struct xfs_trans		*tp,
368 	struct xfs_refcount_intent	*ri)
369 {
370 	struct xfs_mount		*mp = tp->t_mountp;
371 
372 	/*
373 	 * Deferred refcount updates for the realtime and data sections must
374 	 * use separate transactions to finish deferred work because updates to
375 	 * realtime metadata files can lock AGFs to allocate btree blocks and
376 	 * we don't want that mixing with the AGF locks taken to finish data
377 	 * section updates.
378 	 */
379 	ri->ri_group = xfs_group_intent_get(mp, ri->ri_startblock,
380 			ri->ri_realtime ? XG_TYPE_RTG : XG_TYPE_AG);
381 
382 	trace_xfs_refcount_defer(mp, ri);
383 	xfs_defer_add(tp, &ri->ri_list, ri->ri_realtime ?
384 			&xfs_rtrefcount_update_defer_type :
385 			&xfs_refcount_update_defer_type);
386 }
387 
388 /* Cancel a deferred refcount update. */
389 STATIC void
390 xfs_refcount_update_cancel_item(
391 	struct list_head		*item)
392 {
393 	struct xfs_refcount_intent	*ri = ci_entry(item);
394 
395 	xfs_group_intent_put(ri->ri_group);
396 	kmem_cache_free(xfs_refcount_intent_cache, ri);
397 }
398 
399 /* Process a deferred refcount update. */
400 STATIC int
401 xfs_refcount_update_finish_item(
402 	struct xfs_trans		*tp,
403 	struct xfs_log_item		*done,
404 	struct list_head		*item,
405 	struct xfs_btree_cur		**state)
406 {
407 	struct xfs_refcount_intent	*ri = ci_entry(item);
408 	int				error;
409 
410 	/* Did we run out of reservation?  Requeue what we didn't finish. */
411 	error = xfs_refcount_finish_one(tp, ri, state);
412 	if (!error && ri->ri_blockcount > 0) {
413 		ASSERT(ri->ri_type == XFS_REFCOUNT_INCREASE ||
414 		       ri->ri_type == XFS_REFCOUNT_DECREASE);
415 		return -EAGAIN;
416 	}
417 
418 	xfs_refcount_update_cancel_item(item);
419 	return error;
420 }
421 
422 /* Clean up after calling xfs_refcount_finish_one. */
423 STATIC void
424 xfs_refcount_finish_one_cleanup(
425 	struct xfs_trans	*tp,
426 	struct xfs_btree_cur	*rcur,
427 	int			error)
428 {
429 	struct xfs_buf		*agbp;
430 
431 	if (rcur == NULL)
432 		return;
433 	agbp = rcur->bc_ag.agbp;
434 	xfs_btree_del_cursor(rcur, error);
435 	if (error && agbp)
436 		xfs_trans_brelse(tp, agbp);
437 }
438 
439 /* Abort all pending CUIs. */
440 STATIC void
441 xfs_refcount_update_abort_intent(
442 	struct xfs_log_item		*intent)
443 {
444 	xfs_cui_release(CUI_ITEM(intent));
445 }
446 
447 /* Is this recovered CUI ok? */
448 static inline bool
449 xfs_cui_validate_phys(
450 	struct xfs_mount		*mp,
451 	bool				isrt,
452 	struct xfs_phys_extent		*pmap)
453 {
454 	if (!xfs_has_reflink(mp))
455 		return false;
456 
457 	if (pmap->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS)
458 		return false;
459 
460 	switch (pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) {
461 	case XFS_REFCOUNT_INCREASE:
462 	case XFS_REFCOUNT_DECREASE:
463 	case XFS_REFCOUNT_ALLOC_COW:
464 	case XFS_REFCOUNT_FREE_COW:
465 		break;
466 	default:
467 		return false;
468 	}
469 
470 	if (isrt)
471 		return xfs_verify_rtbext(mp, pmap->pe_startblock, pmap->pe_len);
472 
473 	return xfs_verify_fsbext(mp, pmap->pe_startblock, pmap->pe_len);
474 }
475 
476 static inline void
477 xfs_cui_recover_work(
478 	struct xfs_mount		*mp,
479 	struct xfs_defer_pending	*dfp,
480 	bool				isrt,
481 	struct xfs_phys_extent		*pmap)
482 {
483 	struct xfs_refcount_intent	*ri;
484 
485 	ri = kmem_cache_alloc(xfs_refcount_intent_cache,
486 			GFP_KERNEL | __GFP_NOFAIL);
487 	ri->ri_type = pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
488 	ri->ri_startblock = pmap->pe_startblock;
489 	ri->ri_blockcount = pmap->pe_len;
490 	ri->ri_group = xfs_group_intent_get(mp, pmap->pe_startblock,
491 			isrt ? XG_TYPE_RTG : XG_TYPE_AG);
492 	ri->ri_realtime = isrt;
493 
494 	xfs_defer_add_item(dfp, &ri->ri_list);
495 }
496 
497 /*
498  * Process a refcount update intent item that was recovered from the log.
499  * We need to update the refcountbt.
500  */
501 STATIC int
502 xfs_refcount_recover_work(
503 	struct xfs_defer_pending	*dfp,
504 	struct list_head		*capture_list)
505 {
506 	struct xfs_trans_res		resv;
507 	struct xfs_log_item		*lip = dfp->dfp_intent;
508 	struct xfs_cui_log_item		*cuip = CUI_ITEM(lip);
509 	struct xfs_trans		*tp;
510 	struct xfs_mount		*mp = lip->li_log->l_mp;
511 	bool				isrt = xfs_cui_item_isrt(lip);
512 	int				i;
513 	int				error = 0;
514 
515 	/*
516 	 * First check the validity of the extents described by the
517 	 * CUI.  If any are bad, then assume that all are bad and
518 	 * just toss the CUI.
519 	 */
520 	for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
521 		if (!xfs_cui_validate_phys(mp, isrt,
522 					&cuip->cui_format.cui_extents[i])) {
523 			XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
524 					&cuip->cui_format,
525 					sizeof(cuip->cui_format));
526 			return -EFSCORRUPTED;
527 		}
528 
529 		xfs_cui_recover_work(mp, dfp, isrt,
530 				&cuip->cui_format.cui_extents[i]);
531 	}
532 
533 	/*
534 	 * Under normal operation, refcount updates are deferred, so we
535 	 * wouldn't be adding them directly to a transaction.  All
536 	 * refcount updates manage reservation usage internally and
537 	 * dynamically by deferring work that won't fit in the
538 	 * transaction.  Normally, any work that needs to be deferred
539 	 * gets attached to the same defer_ops that scheduled the
540 	 * refcount update.  However, we're in log recovery here, so we
541 	 * use the passed in defer_ops and to finish up any work that
542 	 * doesn't fit.  We need to reserve enough blocks to handle a
543 	 * full btree split on either end of the refcount range.
544 	 */
545 	resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
546 	error = xfs_trans_alloc(mp, &resv, mp->m_refc_maxlevels * 2, 0,
547 			XFS_TRANS_RESERVE, &tp);
548 	if (error)
549 		return error;
550 
551 	error = xlog_recover_finish_intent(tp, dfp);
552 	if (error == -EFSCORRUPTED)
553 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
554 				&cuip->cui_format,
555 				sizeof(cuip->cui_format));
556 	if (error)
557 		goto abort_error;
558 
559 	return xfs_defer_ops_capture_and_commit(tp, capture_list);
560 
561 abort_error:
562 	xfs_trans_cancel(tp);
563 	return error;
564 }
565 
566 /* Relog an intent item to push the log tail forward. */
567 static struct xfs_log_item *
568 xfs_refcount_relog_intent(
569 	struct xfs_trans		*tp,
570 	struct xfs_log_item		*intent,
571 	struct xfs_log_item		*done_item)
572 {
573 	struct xfs_cui_log_item		*cuip;
574 	struct xfs_phys_extent		*pmap;
575 	unsigned int			count;
576 
577 	ASSERT(intent->li_type == XFS_LI_CUI ||
578 	       intent->li_type == XFS_LI_CUI_RT);
579 
580 	count = CUI_ITEM(intent)->cui_format.cui_nextents;
581 	pmap = CUI_ITEM(intent)->cui_format.cui_extents;
582 
583 	cuip = xfs_cui_init(tp->t_mountp, intent->li_type, count);
584 	memcpy(cuip->cui_format.cui_extents, pmap, count * sizeof(*pmap));
585 	atomic_set(&cuip->cui_next_extent, count);
586 
587 	return &cuip->cui_item;
588 }
589 
590 const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
591 	.name		= "refcount",
592 	.max_items	= XFS_CUI_MAX_FAST_EXTENTS,
593 	.create_intent	= xfs_refcount_update_create_intent,
594 	.abort_intent	= xfs_refcount_update_abort_intent,
595 	.create_done	= xfs_refcount_update_create_done,
596 	.finish_item	= xfs_refcount_update_finish_item,
597 	.finish_cleanup = xfs_refcount_finish_one_cleanup,
598 	.cancel_item	= xfs_refcount_update_cancel_item,
599 	.recover_work	= xfs_refcount_recover_work,
600 	.relog_intent	= xfs_refcount_relog_intent,
601 };
602 
603 #ifdef CONFIG_XFS_RT
604 static struct xfs_log_item *
605 xfs_rtrefcount_update_create_intent(
606 	struct xfs_trans		*tp,
607 	struct list_head		*items,
608 	unsigned int			count,
609 	bool				sort)
610 {
611 	return __xfs_refcount_update_create_intent(tp, items, count, sort,
612 			XFS_LI_CUI_RT);
613 }
614 
615 /* Process a deferred realtime refcount update. */
616 STATIC int
617 xfs_rtrefcount_update_finish_item(
618 	struct xfs_trans		*tp,
619 	struct xfs_log_item		*done,
620 	struct list_head		*item,
621 	struct xfs_btree_cur		**state)
622 {
623 	struct xfs_refcount_intent	*ri = ci_entry(item);
624 	int				error;
625 
626 	error = xfs_rtrefcount_finish_one(tp, ri, state);
627 
628 	/* Did we run out of reservation?  Requeue what we didn't finish. */
629 	if (!error && ri->ri_blockcount > 0) {
630 		ASSERT(ri->ri_type == XFS_REFCOUNT_INCREASE ||
631 		       ri->ri_type == XFS_REFCOUNT_DECREASE);
632 		return -EAGAIN;
633 	}
634 
635 	xfs_refcount_update_cancel_item(item);
636 	return error;
637 }
638 
639 /* Clean up after calling xfs_rtrefcount_finish_one. */
640 STATIC void
641 xfs_rtrefcount_finish_one_cleanup(
642 	struct xfs_trans	*tp,
643 	struct xfs_btree_cur	*rcur,
644 	int			error)
645 {
646 	if (rcur)
647 		xfs_btree_del_cursor(rcur, error);
648 }
649 
650 const struct xfs_defer_op_type xfs_rtrefcount_update_defer_type = {
651 	.name		= "rtrefcount",
652 	.max_items	= XFS_CUI_MAX_FAST_EXTENTS,
653 	.create_intent	= xfs_rtrefcount_update_create_intent,
654 	.abort_intent	= xfs_refcount_update_abort_intent,
655 	.create_done	= xfs_refcount_update_create_done,
656 	.finish_item	= xfs_rtrefcount_update_finish_item,
657 	.finish_cleanup = xfs_rtrefcount_finish_one_cleanup,
658 	.cancel_item	= xfs_refcount_update_cancel_item,
659 	.recover_work	= xfs_refcount_recover_work,
660 	.relog_intent	= xfs_refcount_relog_intent,
661 };
662 #else
663 const struct xfs_defer_op_type xfs_rtrefcount_update_defer_type = {
664 	.name		= "rtrefcount",
665 };
666 #endif /* CONFIG_XFS_RT */
667 
668 STATIC bool
669 xfs_cui_item_match(
670 	struct xfs_log_item	*lip,
671 	uint64_t		intent_id)
672 {
673 	return CUI_ITEM(lip)->cui_format.cui_id == intent_id;
674 }
675 
676 static const struct xfs_item_ops xfs_cui_item_ops = {
677 	.flags		= XFS_ITEM_INTENT,
678 	.iop_size	= xfs_cui_item_size,
679 	.iop_format	= xfs_cui_item_format,
680 	.iop_unpin	= xfs_cui_item_unpin,
681 	.iop_release	= xfs_cui_item_release,
682 	.iop_match	= xfs_cui_item_match,
683 };
684 
685 static inline void
686 xfs_cui_copy_format(
687 	struct xfs_cui_log_format	*dst,
688 	const struct xfs_cui_log_format	*src)
689 {
690 	unsigned int			i;
691 
692 	memcpy(dst, src, offsetof(struct xfs_cui_log_format, cui_extents));
693 
694 	for (i = 0; i < src->cui_nextents; i++)
695 		memcpy(&dst->cui_extents[i], &src->cui_extents[i],
696 				sizeof(struct xfs_phys_extent));
697 }
698 
699 /*
700  * This routine is called to create an in-core extent refcount update
701  * item from the cui format structure which was logged on disk.
702  * It allocates an in-core cui, copies the extents from the format
703  * structure into it, and adds the cui to the AIL with the given
704  * LSN.
705  */
706 STATIC int
707 xlog_recover_cui_commit_pass2(
708 	struct xlog			*log,
709 	struct list_head		*buffer_list,
710 	struct xlog_recover_item	*item,
711 	xfs_lsn_t			lsn)
712 {
713 	struct xfs_mount		*mp = log->l_mp;
714 	struct xfs_cui_log_item		*cuip;
715 	struct xfs_cui_log_format	*cui_formatp;
716 	size_t				len;
717 
718 	cui_formatp = item->ri_buf[0].iov_base;
719 
720 	if (item->ri_buf[0].iov_len < xfs_cui_log_format_sizeof(0)) {
721 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
722 				item->ri_buf[0].iov_base, item->ri_buf[0].iov_len);
723 		return -EFSCORRUPTED;
724 	}
725 
726 	len = xfs_cui_log_format_sizeof(cui_formatp->cui_nextents);
727 	if (item->ri_buf[0].iov_len != len) {
728 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
729 				item->ri_buf[0].iov_base, item->ri_buf[0].iov_len);
730 		return -EFSCORRUPTED;
731 	}
732 
733 	cuip = xfs_cui_init(mp, ITEM_TYPE(item), cui_formatp->cui_nextents);
734 	xfs_cui_copy_format(&cuip->cui_format, cui_formatp);
735 	atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
736 
737 	xlog_recover_intent_item(log, &cuip->cui_item, lsn,
738 			&xfs_refcount_update_defer_type);
739 	return 0;
740 }
741 
742 const struct xlog_recover_item_ops xlog_cui_item_ops = {
743 	.item_type		= XFS_LI_CUI,
744 	.commit_pass2		= xlog_recover_cui_commit_pass2,
745 };
746 
747 #ifdef CONFIG_XFS_RT
748 STATIC int
749 xlog_recover_rtcui_commit_pass2(
750 	struct xlog			*log,
751 	struct list_head		*buffer_list,
752 	struct xlog_recover_item	*item,
753 	xfs_lsn_t			lsn)
754 {
755 	struct xfs_mount		*mp = log->l_mp;
756 	struct xfs_cui_log_item		*cuip;
757 	struct xfs_cui_log_format	*cui_formatp;
758 	size_t				len;
759 
760 	cui_formatp = item->ri_buf[0].iov_base;
761 
762 	if (item->ri_buf[0].iov_len < xfs_cui_log_format_sizeof(0)) {
763 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
764 				item->ri_buf[0].iov_base, item->ri_buf[0].iov_len);
765 		return -EFSCORRUPTED;
766 	}
767 
768 	len = xfs_cui_log_format_sizeof(cui_formatp->cui_nextents);
769 	if (item->ri_buf[0].iov_len != len) {
770 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
771 				item->ri_buf[0].iov_base, item->ri_buf[0].iov_len);
772 		return -EFSCORRUPTED;
773 	}
774 
775 	cuip = xfs_cui_init(mp, ITEM_TYPE(item), cui_formatp->cui_nextents);
776 	xfs_cui_copy_format(&cuip->cui_format, cui_formatp);
777 	atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
778 
779 	xlog_recover_intent_item(log, &cuip->cui_item, lsn,
780 			&xfs_rtrefcount_update_defer_type);
781 	return 0;
782 }
783 #else
784 STATIC int
785 xlog_recover_rtcui_commit_pass2(
786 	struct xlog			*log,
787 	struct list_head		*buffer_list,
788 	struct xlog_recover_item	*item,
789 	xfs_lsn_t			lsn)
790 {
791 	XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
792 			item->ri_buf[0].iov_base, item->ri_buf[0].iov_len);
793 	return -EFSCORRUPTED;
794 }
795 #endif
796 
797 const struct xlog_recover_item_ops xlog_rtcui_item_ops = {
798 	.item_type		= XFS_LI_CUI_RT,
799 	.commit_pass2		= xlog_recover_rtcui_commit_pass2,
800 };
801 
802 /*
803  * This routine is called when an CUD format structure is found in a committed
804  * transaction in the log. Its purpose is to cancel the corresponding CUI if it
805  * was still in the log. To do this it searches the AIL for the CUI with an id
806  * equal to that in the CUD format structure. If we find it we drop the CUD
807  * reference, which removes the CUI from the AIL and frees it.
808  */
809 STATIC int
810 xlog_recover_cud_commit_pass2(
811 	struct xlog			*log,
812 	struct list_head		*buffer_list,
813 	struct xlog_recover_item	*item,
814 	xfs_lsn_t			lsn)
815 {
816 	struct xfs_cud_log_format	*cud_formatp;
817 
818 	cud_formatp = item->ri_buf[0].iov_base;
819 	if (item->ri_buf[0].iov_len != sizeof(struct xfs_cud_log_format)) {
820 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
821 				item->ri_buf[0].iov_base, item->ri_buf[0].iov_len);
822 		return -EFSCORRUPTED;
823 	}
824 
825 	xlog_recover_release_intent(log, XFS_LI_CUI, cud_formatp->cud_cui_id);
826 	return 0;
827 }
828 
829 const struct xlog_recover_item_ops xlog_cud_item_ops = {
830 	.item_type		= XFS_LI_CUD,
831 	.commit_pass2		= xlog_recover_cud_commit_pass2,
832 };
833 
834 #ifdef CONFIG_XFS_RT
835 STATIC int
836 xlog_recover_rtcud_commit_pass2(
837 	struct xlog			*log,
838 	struct list_head		*buffer_list,
839 	struct xlog_recover_item	*item,
840 	xfs_lsn_t			lsn)
841 {
842 	struct xfs_cud_log_format	*cud_formatp;
843 
844 	cud_formatp = item->ri_buf[0].iov_base;
845 	if (item->ri_buf[0].iov_len != sizeof(struct xfs_cud_log_format)) {
846 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
847 				item->ri_buf[0].iov_base, item->ri_buf[0].iov_len);
848 		return -EFSCORRUPTED;
849 	}
850 
851 	xlog_recover_release_intent(log, XFS_LI_CUI_RT,
852 			cud_formatp->cud_cui_id);
853 	return 0;
854 }
855 #else
856 # define xlog_recover_rtcud_commit_pass2	xlog_recover_rtcui_commit_pass2
857 #endif
858 
859 const struct xlog_recover_item_ops xlog_rtcud_item_ops = {
860 	.item_type		= XFS_LI_CUD_RT,
861 	.commit_pass2		= xlog_recover_rtcud_commit_pass2,
862 };
863