xref: /linux/fs/xfs/xfs_refcount_item.c (revision 3c4fc7bf4c9e66fe71abcbf93f62f4ddb89b7f15)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2016 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_bit.h"
12 #include "xfs_shared.h"
13 #include "xfs_mount.h"
14 #include "xfs_defer.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_refcount_item.h"
18 #include "xfs_log.h"
19 #include "xfs_refcount.h"
20 #include "xfs_error.h"
21 #include "xfs_log_priv.h"
22 #include "xfs_log_recover.h"
23 
24 struct kmem_cache	*xfs_cui_cache;
25 struct kmem_cache	*xfs_cud_cache;
26 
27 static const struct xfs_item_ops xfs_cui_item_ops;
28 
29 static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip)
30 {
31 	return container_of(lip, struct xfs_cui_log_item, cui_item);
32 }
33 
34 STATIC void
35 xfs_cui_item_free(
36 	struct xfs_cui_log_item	*cuip)
37 {
38 	kmem_free(cuip->cui_item.li_lv_shadow);
39 	if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
40 		kmem_free(cuip);
41 	else
42 		kmem_cache_free(xfs_cui_cache, cuip);
43 }
44 
45 /*
46  * Freeing the CUI requires that we remove it from the AIL if it has already
47  * been placed there. However, the CUI may not yet have been placed in the AIL
48  * when called by xfs_cui_release() from CUD processing due to the ordering of
49  * committed vs unpin operations in bulk insert operations. Hence the reference
50  * count to ensure only the last caller frees the CUI.
51  */
52 STATIC void
53 xfs_cui_release(
54 	struct xfs_cui_log_item	*cuip)
55 {
56 	ASSERT(atomic_read(&cuip->cui_refcount) > 0);
57 	if (!atomic_dec_and_test(&cuip->cui_refcount))
58 		return;
59 
60 	xfs_trans_ail_delete(&cuip->cui_item, 0);
61 	xfs_cui_item_free(cuip);
62 }
63 
64 
65 STATIC void
66 xfs_cui_item_size(
67 	struct xfs_log_item	*lip,
68 	int			*nvecs,
69 	int			*nbytes)
70 {
71 	struct xfs_cui_log_item	*cuip = CUI_ITEM(lip);
72 
73 	*nvecs += 1;
74 	*nbytes += xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents);
75 }
76 
77 /*
78  * This is called to fill in the vector of log iovecs for the
79  * given cui log item. We use only 1 iovec, and we point that
80  * at the cui_log_format structure embedded in the cui item.
81  * It is at this point that we assert that all of the extent
82  * slots in the cui item have been filled.
83  */
84 STATIC void
85 xfs_cui_item_format(
86 	struct xfs_log_item	*lip,
87 	struct xfs_log_vec	*lv)
88 {
89 	struct xfs_cui_log_item	*cuip = CUI_ITEM(lip);
90 	struct xfs_log_iovec	*vecp = NULL;
91 
92 	ASSERT(atomic_read(&cuip->cui_next_extent) ==
93 			cuip->cui_format.cui_nextents);
94 
95 	cuip->cui_format.cui_type = XFS_LI_CUI;
96 	cuip->cui_format.cui_size = 1;
97 
98 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUI_FORMAT, &cuip->cui_format,
99 			xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents));
100 }
101 
102 /*
103  * The unpin operation is the last place an CUI is manipulated in the log. It is
104  * either inserted in the AIL or aborted in the event of a log I/O error. In
105  * either case, the CUI transaction has been successfully committed to make it
106  * this far. Therefore, we expect whoever committed the CUI to either construct
107  * and commit the CUD or drop the CUD's reference in the event of error. Simply
108  * drop the log's CUI reference now that the log is done with it.
109  */
110 STATIC void
111 xfs_cui_item_unpin(
112 	struct xfs_log_item	*lip,
113 	int			remove)
114 {
115 	struct xfs_cui_log_item	*cuip = CUI_ITEM(lip);
116 
117 	xfs_cui_release(cuip);
118 }
119 
120 /*
121  * The CUI has been either committed or aborted if the transaction has been
122  * cancelled. If the transaction was cancelled, an CUD isn't going to be
123  * constructed and thus we free the CUI here directly.
124  */
125 STATIC void
126 xfs_cui_item_release(
127 	struct xfs_log_item	*lip)
128 {
129 	xfs_cui_release(CUI_ITEM(lip));
130 }
131 
132 /*
133  * Allocate and initialize an cui item with the given number of extents.
134  */
135 STATIC struct xfs_cui_log_item *
136 xfs_cui_init(
137 	struct xfs_mount		*mp,
138 	uint				nextents)
139 
140 {
141 	struct xfs_cui_log_item		*cuip;
142 
143 	ASSERT(nextents > 0);
144 	if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
145 		cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents),
146 				0);
147 	else
148 		cuip = kmem_cache_zalloc(xfs_cui_cache,
149 					 GFP_KERNEL | __GFP_NOFAIL);
150 
151 	xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops);
152 	cuip->cui_format.cui_nextents = nextents;
153 	cuip->cui_format.cui_id = (uintptr_t)(void *)cuip;
154 	atomic_set(&cuip->cui_next_extent, 0);
155 	atomic_set(&cuip->cui_refcount, 2);
156 
157 	return cuip;
158 }
159 
160 static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip)
161 {
162 	return container_of(lip, struct xfs_cud_log_item, cud_item);
163 }
164 
165 STATIC void
166 xfs_cud_item_size(
167 	struct xfs_log_item	*lip,
168 	int			*nvecs,
169 	int			*nbytes)
170 {
171 	*nvecs += 1;
172 	*nbytes += sizeof(struct xfs_cud_log_format);
173 }
174 
175 /*
176  * This is called to fill in the vector of log iovecs for the
177  * given cud log item. We use only 1 iovec, and we point that
178  * at the cud_log_format structure embedded in the cud item.
179  * It is at this point that we assert that all of the extent
180  * slots in the cud item have been filled.
181  */
182 STATIC void
183 xfs_cud_item_format(
184 	struct xfs_log_item	*lip,
185 	struct xfs_log_vec	*lv)
186 {
187 	struct xfs_cud_log_item	*cudp = CUD_ITEM(lip);
188 	struct xfs_log_iovec	*vecp = NULL;
189 
190 	cudp->cud_format.cud_type = XFS_LI_CUD;
191 	cudp->cud_format.cud_size = 1;
192 
193 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUD_FORMAT, &cudp->cud_format,
194 			sizeof(struct xfs_cud_log_format));
195 }
196 
197 /*
198  * The CUD is either committed or aborted if the transaction is cancelled. If
199  * the transaction is cancelled, drop our reference to the CUI and free the
200  * CUD.
201  */
202 STATIC void
203 xfs_cud_item_release(
204 	struct xfs_log_item	*lip)
205 {
206 	struct xfs_cud_log_item	*cudp = CUD_ITEM(lip);
207 
208 	xfs_cui_release(cudp->cud_cuip);
209 	kmem_free(cudp->cud_item.li_lv_shadow);
210 	kmem_cache_free(xfs_cud_cache, cudp);
211 }
212 
213 static struct xfs_log_item *
214 xfs_cud_item_intent(
215 	struct xfs_log_item	*lip)
216 {
217 	return &CUD_ITEM(lip)->cud_cuip->cui_item;
218 }
219 
220 static const struct xfs_item_ops xfs_cud_item_ops = {
221 	.flags		= XFS_ITEM_RELEASE_WHEN_COMMITTED |
222 			  XFS_ITEM_INTENT_DONE,
223 	.iop_size	= xfs_cud_item_size,
224 	.iop_format	= xfs_cud_item_format,
225 	.iop_release	= xfs_cud_item_release,
226 	.iop_intent	= xfs_cud_item_intent,
227 };
228 
229 static struct xfs_cud_log_item *
230 xfs_trans_get_cud(
231 	struct xfs_trans		*tp,
232 	struct xfs_cui_log_item		*cuip)
233 {
234 	struct xfs_cud_log_item		*cudp;
235 
236 	cudp = kmem_cache_zalloc(xfs_cud_cache, GFP_KERNEL | __GFP_NOFAIL);
237 	xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD,
238 			  &xfs_cud_item_ops);
239 	cudp->cud_cuip = cuip;
240 	cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id;
241 
242 	xfs_trans_add_item(tp, &cudp->cud_item);
243 	return cudp;
244 }
245 
246 /*
247  * Finish an refcount update and log it to the CUD. Note that the
248  * transaction is marked dirty regardless of whether the refcount
249  * update succeeds or fails to support the CUI/CUD lifecycle rules.
250  */
251 static int
252 xfs_trans_log_finish_refcount_update(
253 	struct xfs_trans		*tp,
254 	struct xfs_cud_log_item		*cudp,
255 	enum xfs_refcount_intent_type	type,
256 	xfs_fsblock_t			startblock,
257 	xfs_extlen_t			blockcount,
258 	xfs_fsblock_t			*new_fsb,
259 	xfs_extlen_t			*new_len,
260 	struct xfs_btree_cur		**pcur)
261 {
262 	int				error;
263 
264 	error = xfs_refcount_finish_one(tp, type, startblock,
265 			blockcount, new_fsb, new_len, pcur);
266 
267 	/*
268 	 * Mark the transaction dirty, even on error. This ensures the
269 	 * transaction is aborted, which:
270 	 *
271 	 * 1.) releases the CUI and frees the CUD
272 	 * 2.) shuts down the filesystem
273 	 */
274 	tp->t_flags |= XFS_TRANS_DIRTY | XFS_TRANS_HAS_INTENT_DONE;
275 	set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
276 
277 	return error;
278 }
279 
280 /* Sort refcount intents by AG. */
281 static int
282 xfs_refcount_update_diff_items(
283 	void				*priv,
284 	const struct list_head		*a,
285 	const struct list_head		*b)
286 {
287 	struct xfs_mount		*mp = priv;
288 	struct xfs_refcount_intent	*ra;
289 	struct xfs_refcount_intent	*rb;
290 
291 	ra = container_of(a, struct xfs_refcount_intent, ri_list);
292 	rb = container_of(b, struct xfs_refcount_intent, ri_list);
293 	return  XFS_FSB_TO_AGNO(mp, ra->ri_startblock) -
294 		XFS_FSB_TO_AGNO(mp, rb->ri_startblock);
295 }
296 
297 /* Set the phys extent flags for this reverse mapping. */
298 static void
299 xfs_trans_set_refcount_flags(
300 	struct xfs_phys_extent		*refc,
301 	enum xfs_refcount_intent_type	type)
302 {
303 	refc->pe_flags = 0;
304 	switch (type) {
305 	case XFS_REFCOUNT_INCREASE:
306 	case XFS_REFCOUNT_DECREASE:
307 	case XFS_REFCOUNT_ALLOC_COW:
308 	case XFS_REFCOUNT_FREE_COW:
309 		refc->pe_flags |= type;
310 		break;
311 	default:
312 		ASSERT(0);
313 	}
314 }
315 
316 /* Log refcount updates in the intent item. */
317 STATIC void
318 xfs_refcount_update_log_item(
319 	struct xfs_trans		*tp,
320 	struct xfs_cui_log_item		*cuip,
321 	struct xfs_refcount_intent	*refc)
322 {
323 	uint				next_extent;
324 	struct xfs_phys_extent		*ext;
325 
326 	tp->t_flags |= XFS_TRANS_DIRTY;
327 	set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
328 
329 	/*
330 	 * atomic_inc_return gives us the value after the increment;
331 	 * we want to use it as an array index so we need to subtract 1 from
332 	 * it.
333 	 */
334 	next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1;
335 	ASSERT(next_extent < cuip->cui_format.cui_nextents);
336 	ext = &cuip->cui_format.cui_extents[next_extent];
337 	ext->pe_startblock = refc->ri_startblock;
338 	ext->pe_len = refc->ri_blockcount;
339 	xfs_trans_set_refcount_flags(ext, refc->ri_type);
340 }
341 
342 static struct xfs_log_item *
343 xfs_refcount_update_create_intent(
344 	struct xfs_trans		*tp,
345 	struct list_head		*items,
346 	unsigned int			count,
347 	bool				sort)
348 {
349 	struct xfs_mount		*mp = tp->t_mountp;
350 	struct xfs_cui_log_item		*cuip = xfs_cui_init(mp, count);
351 	struct xfs_refcount_intent	*refc;
352 
353 	ASSERT(count > 0);
354 
355 	xfs_trans_add_item(tp, &cuip->cui_item);
356 	if (sort)
357 		list_sort(mp, items, xfs_refcount_update_diff_items);
358 	list_for_each_entry(refc, items, ri_list)
359 		xfs_refcount_update_log_item(tp, cuip, refc);
360 	return &cuip->cui_item;
361 }
362 
363 /* Get an CUD so we can process all the deferred refcount updates. */
364 static struct xfs_log_item *
365 xfs_refcount_update_create_done(
366 	struct xfs_trans		*tp,
367 	struct xfs_log_item		*intent,
368 	unsigned int			count)
369 {
370 	return &xfs_trans_get_cud(tp, CUI_ITEM(intent))->cud_item;
371 }
372 
373 /* Process a deferred refcount update. */
374 STATIC int
375 xfs_refcount_update_finish_item(
376 	struct xfs_trans		*tp,
377 	struct xfs_log_item		*done,
378 	struct list_head		*item,
379 	struct xfs_btree_cur		**state)
380 {
381 	struct xfs_refcount_intent	*refc;
382 	xfs_fsblock_t			new_fsb;
383 	xfs_extlen_t			new_aglen;
384 	int				error;
385 
386 	refc = container_of(item, struct xfs_refcount_intent, ri_list);
387 	error = xfs_trans_log_finish_refcount_update(tp, CUD_ITEM(done),
388 			refc->ri_type, refc->ri_startblock, refc->ri_blockcount,
389 			&new_fsb, &new_aglen, state);
390 
391 	/* Did we run out of reservation?  Requeue what we didn't finish. */
392 	if (!error && new_aglen > 0) {
393 		ASSERT(refc->ri_type == XFS_REFCOUNT_INCREASE ||
394 		       refc->ri_type == XFS_REFCOUNT_DECREASE);
395 		refc->ri_startblock = new_fsb;
396 		refc->ri_blockcount = new_aglen;
397 		return -EAGAIN;
398 	}
399 	kmem_cache_free(xfs_refcount_intent_cache, refc);
400 	return error;
401 }
402 
403 /* Abort all pending CUIs. */
404 STATIC void
405 xfs_refcount_update_abort_intent(
406 	struct xfs_log_item		*intent)
407 {
408 	xfs_cui_release(CUI_ITEM(intent));
409 }
410 
411 /* Cancel a deferred refcount update. */
412 STATIC void
413 xfs_refcount_update_cancel_item(
414 	struct list_head		*item)
415 {
416 	struct xfs_refcount_intent	*refc;
417 
418 	refc = container_of(item, struct xfs_refcount_intent, ri_list);
419 	kmem_cache_free(xfs_refcount_intent_cache, refc);
420 }
421 
422 const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
423 	.max_items	= XFS_CUI_MAX_FAST_EXTENTS,
424 	.create_intent	= xfs_refcount_update_create_intent,
425 	.abort_intent	= xfs_refcount_update_abort_intent,
426 	.create_done	= xfs_refcount_update_create_done,
427 	.finish_item	= xfs_refcount_update_finish_item,
428 	.finish_cleanup = xfs_refcount_finish_one_cleanup,
429 	.cancel_item	= xfs_refcount_update_cancel_item,
430 };
431 
432 /* Is this recovered CUI ok? */
433 static inline bool
434 xfs_cui_validate_phys(
435 	struct xfs_mount		*mp,
436 	struct xfs_phys_extent		*refc)
437 {
438 	if (!xfs_has_reflink(mp))
439 		return false;
440 
441 	if (refc->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS)
442 		return false;
443 
444 	switch (refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) {
445 	case XFS_REFCOUNT_INCREASE:
446 	case XFS_REFCOUNT_DECREASE:
447 	case XFS_REFCOUNT_ALLOC_COW:
448 	case XFS_REFCOUNT_FREE_COW:
449 		break;
450 	default:
451 		return false;
452 	}
453 
454 	return xfs_verify_fsbext(mp, refc->pe_startblock, refc->pe_len);
455 }
456 
457 /*
458  * Process a refcount update intent item that was recovered from the log.
459  * We need to update the refcountbt.
460  */
461 STATIC int
462 xfs_cui_item_recover(
463 	struct xfs_log_item		*lip,
464 	struct list_head		*capture_list)
465 {
466 	struct xfs_bmbt_irec		irec;
467 	struct xfs_cui_log_item		*cuip = CUI_ITEM(lip);
468 	struct xfs_phys_extent		*refc;
469 	struct xfs_cud_log_item		*cudp;
470 	struct xfs_trans		*tp;
471 	struct xfs_btree_cur		*rcur = NULL;
472 	struct xfs_mount		*mp = lip->li_log->l_mp;
473 	xfs_fsblock_t			new_fsb;
474 	xfs_extlen_t			new_len;
475 	unsigned int			refc_type;
476 	bool				requeue_only = false;
477 	enum xfs_refcount_intent_type	type;
478 	int				i;
479 	int				error = 0;
480 
481 	/*
482 	 * First check the validity of the extents described by the
483 	 * CUI.  If any are bad, then assume that all are bad and
484 	 * just toss the CUI.
485 	 */
486 	for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
487 		if (!xfs_cui_validate_phys(mp,
488 					&cuip->cui_format.cui_extents[i])) {
489 			XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
490 					&cuip->cui_format,
491 					sizeof(cuip->cui_format));
492 			return -EFSCORRUPTED;
493 		}
494 	}
495 
496 	/*
497 	 * Under normal operation, refcount updates are deferred, so we
498 	 * wouldn't be adding them directly to a transaction.  All
499 	 * refcount updates manage reservation usage internally and
500 	 * dynamically by deferring work that won't fit in the
501 	 * transaction.  Normally, any work that needs to be deferred
502 	 * gets attached to the same defer_ops that scheduled the
503 	 * refcount update.  However, we're in log recovery here, so we
504 	 * use the passed in defer_ops and to finish up any work that
505 	 * doesn't fit.  We need to reserve enough blocks to handle a
506 	 * full btree split on either end of the refcount range.
507 	 */
508 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
509 			mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp);
510 	if (error)
511 		return error;
512 
513 	cudp = xfs_trans_get_cud(tp, cuip);
514 
515 	for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
516 		refc = &cuip->cui_format.cui_extents[i];
517 		refc_type = refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
518 		switch (refc_type) {
519 		case XFS_REFCOUNT_INCREASE:
520 		case XFS_REFCOUNT_DECREASE:
521 		case XFS_REFCOUNT_ALLOC_COW:
522 		case XFS_REFCOUNT_FREE_COW:
523 			type = refc_type;
524 			break;
525 		default:
526 			XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
527 			error = -EFSCORRUPTED;
528 			goto abort_error;
529 		}
530 		if (requeue_only) {
531 			new_fsb = refc->pe_startblock;
532 			new_len = refc->pe_len;
533 		} else
534 			error = xfs_trans_log_finish_refcount_update(tp, cudp,
535 				type, refc->pe_startblock, refc->pe_len,
536 				&new_fsb, &new_len, &rcur);
537 		if (error == -EFSCORRUPTED)
538 			XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
539 					refc, sizeof(*refc));
540 		if (error)
541 			goto abort_error;
542 
543 		/* Requeue what we didn't finish. */
544 		if (new_len > 0) {
545 			irec.br_startblock = new_fsb;
546 			irec.br_blockcount = new_len;
547 			switch (type) {
548 			case XFS_REFCOUNT_INCREASE:
549 				xfs_refcount_increase_extent(tp, &irec);
550 				break;
551 			case XFS_REFCOUNT_DECREASE:
552 				xfs_refcount_decrease_extent(tp, &irec);
553 				break;
554 			case XFS_REFCOUNT_ALLOC_COW:
555 				xfs_refcount_alloc_cow_extent(tp,
556 						irec.br_startblock,
557 						irec.br_blockcount);
558 				break;
559 			case XFS_REFCOUNT_FREE_COW:
560 				xfs_refcount_free_cow_extent(tp,
561 						irec.br_startblock,
562 						irec.br_blockcount);
563 				break;
564 			default:
565 				ASSERT(0);
566 			}
567 			requeue_only = true;
568 		}
569 	}
570 
571 	xfs_refcount_finish_one_cleanup(tp, rcur, error);
572 	return xfs_defer_ops_capture_and_commit(tp, capture_list);
573 
574 abort_error:
575 	xfs_refcount_finish_one_cleanup(tp, rcur, error);
576 	xfs_trans_cancel(tp);
577 	return error;
578 }
579 
580 STATIC bool
581 xfs_cui_item_match(
582 	struct xfs_log_item	*lip,
583 	uint64_t		intent_id)
584 {
585 	return CUI_ITEM(lip)->cui_format.cui_id == intent_id;
586 }
587 
588 /* Relog an intent item to push the log tail forward. */
589 static struct xfs_log_item *
590 xfs_cui_item_relog(
591 	struct xfs_log_item		*intent,
592 	struct xfs_trans		*tp)
593 {
594 	struct xfs_cud_log_item		*cudp;
595 	struct xfs_cui_log_item		*cuip;
596 	struct xfs_phys_extent		*extp;
597 	unsigned int			count;
598 
599 	count = CUI_ITEM(intent)->cui_format.cui_nextents;
600 	extp = CUI_ITEM(intent)->cui_format.cui_extents;
601 
602 	tp->t_flags |= XFS_TRANS_DIRTY;
603 	cudp = xfs_trans_get_cud(tp, CUI_ITEM(intent));
604 	set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
605 
606 	cuip = xfs_cui_init(tp->t_mountp, count);
607 	memcpy(cuip->cui_format.cui_extents, extp, count * sizeof(*extp));
608 	atomic_set(&cuip->cui_next_extent, count);
609 	xfs_trans_add_item(tp, &cuip->cui_item);
610 	set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
611 	return &cuip->cui_item;
612 }
613 
614 static const struct xfs_item_ops xfs_cui_item_ops = {
615 	.flags		= XFS_ITEM_INTENT,
616 	.iop_size	= xfs_cui_item_size,
617 	.iop_format	= xfs_cui_item_format,
618 	.iop_unpin	= xfs_cui_item_unpin,
619 	.iop_release	= xfs_cui_item_release,
620 	.iop_recover	= xfs_cui_item_recover,
621 	.iop_match	= xfs_cui_item_match,
622 	.iop_relog	= xfs_cui_item_relog,
623 };
624 
625 /*
626  * Copy an CUI format buffer from the given buf, and into the destination
627  * CUI format structure.  The CUI/CUD items were designed not to need any
628  * special alignment handling.
629  */
630 static int
631 xfs_cui_copy_format(
632 	struct xfs_log_iovec		*buf,
633 	struct xfs_cui_log_format	*dst_cui_fmt)
634 {
635 	struct xfs_cui_log_format	*src_cui_fmt;
636 	uint				len;
637 
638 	src_cui_fmt = buf->i_addr;
639 	len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents);
640 
641 	if (buf->i_len == len) {
642 		memcpy(dst_cui_fmt, src_cui_fmt, len);
643 		return 0;
644 	}
645 	XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
646 	return -EFSCORRUPTED;
647 }
648 
649 /*
650  * This routine is called to create an in-core extent refcount update
651  * item from the cui format structure which was logged on disk.
652  * It allocates an in-core cui, copies the extents from the format
653  * structure into it, and adds the cui to the AIL with the given
654  * LSN.
655  */
656 STATIC int
657 xlog_recover_cui_commit_pass2(
658 	struct xlog			*log,
659 	struct list_head		*buffer_list,
660 	struct xlog_recover_item	*item,
661 	xfs_lsn_t			lsn)
662 {
663 	int				error;
664 	struct xfs_mount		*mp = log->l_mp;
665 	struct xfs_cui_log_item		*cuip;
666 	struct xfs_cui_log_format	*cui_formatp;
667 
668 	cui_formatp = item->ri_buf[0].i_addr;
669 
670 	cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
671 	error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format);
672 	if (error) {
673 		xfs_cui_item_free(cuip);
674 		return error;
675 	}
676 	atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
677 	/*
678 	 * Insert the intent into the AIL directly and drop one reference so
679 	 * that finishing or canceling the work will drop the other.
680 	 */
681 	xfs_trans_ail_insert(log->l_ailp, &cuip->cui_item, lsn);
682 	xfs_cui_release(cuip);
683 	return 0;
684 }
685 
686 const struct xlog_recover_item_ops xlog_cui_item_ops = {
687 	.item_type		= XFS_LI_CUI,
688 	.commit_pass2		= xlog_recover_cui_commit_pass2,
689 };
690 
691 /*
692  * This routine is called when an CUD format structure is found in a committed
693  * transaction in the log. Its purpose is to cancel the corresponding CUI if it
694  * was still in the log. To do this it searches the AIL for the CUI with an id
695  * equal to that in the CUD format structure. If we find it we drop the CUD
696  * reference, which removes the CUI from the AIL and frees it.
697  */
698 STATIC int
699 xlog_recover_cud_commit_pass2(
700 	struct xlog			*log,
701 	struct list_head		*buffer_list,
702 	struct xlog_recover_item	*item,
703 	xfs_lsn_t			lsn)
704 {
705 	struct xfs_cud_log_format	*cud_formatp;
706 
707 	cud_formatp = item->ri_buf[0].i_addr;
708 	if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
709 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
710 		return -EFSCORRUPTED;
711 	}
712 
713 	xlog_recover_release_intent(log, XFS_LI_CUI, cud_formatp->cud_cui_id);
714 	return 0;
715 }
716 
717 const struct xlog_recover_item_ops xlog_cud_item_ops = {
718 	.item_type		= XFS_LI_CUD,
719 	.commit_pass2		= xlog_recover_cud_commit_pass2,
720 };
721