xref: /linux/fs/xfs/xfs_rmap_item.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2016 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_bit.h"
12 #include "xfs_shared.h"
13 #include "xfs_mount.h"
14 #include "xfs_defer.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_rmap_item.h"
18 #include "xfs_log.h"
19 #include "xfs_rmap.h"
20 #include "xfs_error.h"
21 #include "xfs_log_priv.h"
22 #include "xfs_log_recover.h"
23 #include "xfs_ag.h"
24 #include "xfs_btree.h"
25 #include "xfs_trace.h"
26 
27 struct kmem_cache	*xfs_rui_cache;
28 struct kmem_cache	*xfs_rud_cache;
29 
30 static const struct xfs_item_ops xfs_rui_item_ops;
31 
32 static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
33 {
34 	return container_of(lip, struct xfs_rui_log_item, rui_item);
35 }
36 
37 STATIC void
38 xfs_rui_item_free(
39 	struct xfs_rui_log_item	*ruip)
40 {
41 	kvfree(ruip->rui_item.li_lv_shadow);
42 	if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
43 		kfree(ruip);
44 	else
45 		kmem_cache_free(xfs_rui_cache, ruip);
46 }
47 
48 /*
49  * Freeing the RUI requires that we remove it from the AIL if it has already
50  * been placed there. However, the RUI may not yet have been placed in the AIL
51  * when called by xfs_rui_release() from RUD processing due to the ordering of
52  * committed vs unpin operations in bulk insert operations. Hence the reference
53  * count to ensure only the last caller frees the RUI.
54  */
55 STATIC void
56 xfs_rui_release(
57 	struct xfs_rui_log_item	*ruip)
58 {
59 	ASSERT(atomic_read(&ruip->rui_refcount) > 0);
60 	if (!atomic_dec_and_test(&ruip->rui_refcount))
61 		return;
62 
63 	xfs_trans_ail_delete(&ruip->rui_item, 0);
64 	xfs_rui_item_free(ruip);
65 }
66 
67 STATIC void
68 xfs_rui_item_size(
69 	struct xfs_log_item	*lip,
70 	int			*nvecs,
71 	int			*nbytes)
72 {
73 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
74 
75 	*nvecs += 1;
76 	*nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents);
77 }
78 
79 /*
80  * This is called to fill in the vector of log iovecs for the
81  * given rui log item. We use only 1 iovec, and we point that
82  * at the rui_log_format structure embedded in the rui item.
83  * It is at this point that we assert that all of the extent
84  * slots in the rui item have been filled.
85  */
86 STATIC void
87 xfs_rui_item_format(
88 	struct xfs_log_item	*lip,
89 	struct xfs_log_vec	*lv)
90 {
91 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
92 	struct xfs_log_iovec	*vecp = NULL;
93 
94 	ASSERT(atomic_read(&ruip->rui_next_extent) ==
95 			ruip->rui_format.rui_nextents);
96 
97 	ruip->rui_format.rui_type = XFS_LI_RUI;
98 	ruip->rui_format.rui_size = 1;
99 
100 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
101 			xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents));
102 }
103 
104 /*
105  * The unpin operation is the last place an RUI is manipulated in the log. It is
106  * either inserted in the AIL or aborted in the event of a log I/O error. In
107  * either case, the RUI transaction has been successfully committed to make it
108  * this far. Therefore, we expect whoever committed the RUI to either construct
109  * and commit the RUD or drop the RUD's reference in the event of error. Simply
110  * drop the log's RUI reference now that the log is done with it.
111  */
112 STATIC void
113 xfs_rui_item_unpin(
114 	struct xfs_log_item	*lip,
115 	int			remove)
116 {
117 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
118 
119 	xfs_rui_release(ruip);
120 }
121 
122 /*
123  * The RUI has been either committed or aborted if the transaction has been
124  * cancelled. If the transaction was cancelled, an RUD isn't going to be
125  * constructed and thus we free the RUI here directly.
126  */
127 STATIC void
128 xfs_rui_item_release(
129 	struct xfs_log_item	*lip)
130 {
131 	xfs_rui_release(RUI_ITEM(lip));
132 }
133 
134 /*
135  * Allocate and initialize an rui item with the given number of extents.
136  */
137 STATIC struct xfs_rui_log_item *
138 xfs_rui_init(
139 	struct xfs_mount		*mp,
140 	uint				nextents)
141 
142 {
143 	struct xfs_rui_log_item		*ruip;
144 
145 	ASSERT(nextents > 0);
146 	if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
147 		ruip = kzalloc(xfs_rui_log_item_sizeof(nextents),
148 				GFP_KERNEL | __GFP_NOFAIL);
149 	else
150 		ruip = kmem_cache_zalloc(xfs_rui_cache,
151 					 GFP_KERNEL | __GFP_NOFAIL);
152 
153 	xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
154 	ruip->rui_format.rui_nextents = nextents;
155 	ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
156 	atomic_set(&ruip->rui_next_extent, 0);
157 	atomic_set(&ruip->rui_refcount, 2);
158 
159 	return ruip;
160 }
161 
162 static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
163 {
164 	return container_of(lip, struct xfs_rud_log_item, rud_item);
165 }
166 
167 STATIC void
168 xfs_rud_item_size(
169 	struct xfs_log_item	*lip,
170 	int			*nvecs,
171 	int			*nbytes)
172 {
173 	*nvecs += 1;
174 	*nbytes += sizeof(struct xfs_rud_log_format);
175 }
176 
177 /*
178  * This is called to fill in the vector of log iovecs for the
179  * given rud log item. We use only 1 iovec, and we point that
180  * at the rud_log_format structure embedded in the rud item.
181  * It is at this point that we assert that all of the extent
182  * slots in the rud item have been filled.
183  */
184 STATIC void
185 xfs_rud_item_format(
186 	struct xfs_log_item	*lip,
187 	struct xfs_log_vec	*lv)
188 {
189 	struct xfs_rud_log_item	*rudp = RUD_ITEM(lip);
190 	struct xfs_log_iovec	*vecp = NULL;
191 
192 	rudp->rud_format.rud_type = XFS_LI_RUD;
193 	rudp->rud_format.rud_size = 1;
194 
195 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
196 			sizeof(struct xfs_rud_log_format));
197 }
198 
199 /*
200  * The RUD is either committed or aborted if the transaction is cancelled. If
201  * the transaction is cancelled, drop our reference to the RUI and free the
202  * RUD.
203  */
204 STATIC void
205 xfs_rud_item_release(
206 	struct xfs_log_item	*lip)
207 {
208 	struct xfs_rud_log_item	*rudp = RUD_ITEM(lip);
209 
210 	xfs_rui_release(rudp->rud_ruip);
211 	kvfree(rudp->rud_item.li_lv_shadow);
212 	kmem_cache_free(xfs_rud_cache, rudp);
213 }
214 
215 static struct xfs_log_item *
216 xfs_rud_item_intent(
217 	struct xfs_log_item	*lip)
218 {
219 	return &RUD_ITEM(lip)->rud_ruip->rui_item;
220 }
221 
222 static const struct xfs_item_ops xfs_rud_item_ops = {
223 	.flags		= XFS_ITEM_RELEASE_WHEN_COMMITTED |
224 			  XFS_ITEM_INTENT_DONE,
225 	.iop_size	= xfs_rud_item_size,
226 	.iop_format	= xfs_rud_item_format,
227 	.iop_release	= xfs_rud_item_release,
228 	.iop_intent	= xfs_rud_item_intent,
229 };
230 
231 static inline struct xfs_rmap_intent *ri_entry(const struct list_head *e)
232 {
233 	return list_entry(e, struct xfs_rmap_intent, ri_list);
234 }
235 
236 /* Sort rmap intents by AG. */
237 static int
238 xfs_rmap_update_diff_items(
239 	void				*priv,
240 	const struct list_head		*a,
241 	const struct list_head		*b)
242 {
243 	struct xfs_rmap_intent		*ra = ri_entry(a);
244 	struct xfs_rmap_intent		*rb = ri_entry(b);
245 
246 	return ra->ri_group->xg_gno - rb->ri_group->xg_gno;
247 }
248 
249 /* Log rmap updates in the intent item. */
250 STATIC void
251 xfs_rmap_update_log_item(
252 	struct xfs_trans		*tp,
253 	struct xfs_rui_log_item		*ruip,
254 	struct xfs_rmap_intent		*ri)
255 {
256 	uint				next_extent;
257 	struct xfs_map_extent		*map;
258 
259 	/*
260 	 * atomic_inc_return gives us the value after the increment;
261 	 * we want to use it as an array index so we need to subtract 1 from
262 	 * it.
263 	 */
264 	next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
265 	ASSERT(next_extent < ruip->rui_format.rui_nextents);
266 	map = &ruip->rui_format.rui_extents[next_extent];
267 	map->me_owner = ri->ri_owner;
268 	map->me_startblock = ri->ri_bmap.br_startblock;
269 	map->me_startoff = ri->ri_bmap.br_startoff;
270 	map->me_len = ri->ri_bmap.br_blockcount;
271 
272 	map->me_flags = 0;
273 	if (ri->ri_bmap.br_state == XFS_EXT_UNWRITTEN)
274 		map->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
275 	if (ri->ri_whichfork == XFS_ATTR_FORK)
276 		map->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
277 	switch (ri->ri_type) {
278 	case XFS_RMAP_MAP:
279 		map->me_flags |= XFS_RMAP_EXTENT_MAP;
280 		break;
281 	case XFS_RMAP_MAP_SHARED:
282 		map->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
283 		break;
284 	case XFS_RMAP_UNMAP:
285 		map->me_flags |= XFS_RMAP_EXTENT_UNMAP;
286 		break;
287 	case XFS_RMAP_UNMAP_SHARED:
288 		map->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
289 		break;
290 	case XFS_RMAP_CONVERT:
291 		map->me_flags |= XFS_RMAP_EXTENT_CONVERT;
292 		break;
293 	case XFS_RMAP_CONVERT_SHARED:
294 		map->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
295 		break;
296 	case XFS_RMAP_ALLOC:
297 		map->me_flags |= XFS_RMAP_EXTENT_ALLOC;
298 		break;
299 	case XFS_RMAP_FREE:
300 		map->me_flags |= XFS_RMAP_EXTENT_FREE;
301 		break;
302 	default:
303 		ASSERT(0);
304 	}
305 }
306 
307 static struct xfs_log_item *
308 xfs_rmap_update_create_intent(
309 	struct xfs_trans		*tp,
310 	struct list_head		*items,
311 	unsigned int			count,
312 	bool				sort)
313 {
314 	struct xfs_mount		*mp = tp->t_mountp;
315 	struct xfs_rui_log_item		*ruip = xfs_rui_init(mp, count);
316 	struct xfs_rmap_intent		*ri;
317 
318 	ASSERT(count > 0);
319 
320 	if (sort)
321 		list_sort(mp, items, xfs_rmap_update_diff_items);
322 	list_for_each_entry(ri, items, ri_list)
323 		xfs_rmap_update_log_item(tp, ruip, ri);
324 	return &ruip->rui_item;
325 }
326 
327 /* Get an RUD so we can process all the deferred rmap updates. */
328 static struct xfs_log_item *
329 xfs_rmap_update_create_done(
330 	struct xfs_trans		*tp,
331 	struct xfs_log_item		*intent,
332 	unsigned int			count)
333 {
334 	struct xfs_rui_log_item		*ruip = RUI_ITEM(intent);
335 	struct xfs_rud_log_item		*rudp;
336 
337 	rudp = kmem_cache_zalloc(xfs_rud_cache, GFP_KERNEL | __GFP_NOFAIL);
338 	xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD,
339 			  &xfs_rud_item_ops);
340 	rudp->rud_ruip = ruip;
341 	rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
342 
343 	return &rudp->rud_item;
344 }
345 
346 /* Add this deferred RUI to the transaction. */
347 void
348 xfs_rmap_defer_add(
349 	struct xfs_trans	*tp,
350 	struct xfs_rmap_intent	*ri)
351 {
352 	struct xfs_mount	*mp = tp->t_mountp;
353 
354 	trace_xfs_rmap_defer(mp, ri);
355 
356 	ri->ri_group = xfs_group_intent_get(mp, ri->ri_bmap.br_startblock,
357 			XG_TYPE_AG);
358 	xfs_defer_add(tp, &ri->ri_list, &xfs_rmap_update_defer_type);
359 }
360 
361 /* Cancel a deferred rmap update. */
362 STATIC void
363 xfs_rmap_update_cancel_item(
364 	struct list_head		*item)
365 {
366 	struct xfs_rmap_intent		*ri = ri_entry(item);
367 
368 	xfs_group_intent_put(ri->ri_group);
369 	kmem_cache_free(xfs_rmap_intent_cache, ri);
370 }
371 
372 /* Process a deferred rmap update. */
373 STATIC int
374 xfs_rmap_update_finish_item(
375 	struct xfs_trans		*tp,
376 	struct xfs_log_item		*done,
377 	struct list_head		*item,
378 	struct xfs_btree_cur		**state)
379 {
380 	struct xfs_rmap_intent		*ri = ri_entry(item);
381 	int				error;
382 
383 	error = xfs_rmap_finish_one(tp, ri, state);
384 
385 	xfs_rmap_update_cancel_item(item);
386 	return error;
387 }
388 
389 /* Clean up after calling xfs_rmap_finish_one. */
390 STATIC void
391 xfs_rmap_finish_one_cleanup(
392 	struct xfs_trans	*tp,
393 	struct xfs_btree_cur	*rcur,
394 	int			error)
395 {
396 	struct xfs_buf		*agbp = NULL;
397 
398 	if (rcur == NULL)
399 		return;
400 	agbp = rcur->bc_ag.agbp;
401 	xfs_btree_del_cursor(rcur, error);
402 	if (error && agbp)
403 		xfs_trans_brelse(tp, agbp);
404 }
405 
406 /* Abort all pending RUIs. */
407 STATIC void
408 xfs_rmap_update_abort_intent(
409 	struct xfs_log_item	*intent)
410 {
411 	xfs_rui_release(RUI_ITEM(intent));
412 }
413 
414 /* Is this recovered RUI ok? */
415 static inline bool
416 xfs_rui_validate_map(
417 	struct xfs_mount		*mp,
418 	struct xfs_map_extent		*map)
419 {
420 	if (!xfs_has_rmapbt(mp))
421 		return false;
422 
423 	if (map->me_flags & ~XFS_RMAP_EXTENT_FLAGS)
424 		return false;
425 
426 	switch (map->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
427 	case XFS_RMAP_EXTENT_MAP:
428 	case XFS_RMAP_EXTENT_MAP_SHARED:
429 	case XFS_RMAP_EXTENT_UNMAP:
430 	case XFS_RMAP_EXTENT_UNMAP_SHARED:
431 	case XFS_RMAP_EXTENT_CONVERT:
432 	case XFS_RMAP_EXTENT_CONVERT_SHARED:
433 	case XFS_RMAP_EXTENT_ALLOC:
434 	case XFS_RMAP_EXTENT_FREE:
435 		break;
436 	default:
437 		return false;
438 	}
439 
440 	if (!XFS_RMAP_NON_INODE_OWNER(map->me_owner) &&
441 	    !xfs_verify_ino(mp, map->me_owner))
442 		return false;
443 
444 	if (!xfs_verify_fileext(mp, map->me_startoff, map->me_len))
445 		return false;
446 
447 	return xfs_verify_fsbext(mp, map->me_startblock, map->me_len);
448 }
449 
450 static inline void
451 xfs_rui_recover_work(
452 	struct xfs_mount		*mp,
453 	struct xfs_defer_pending	*dfp,
454 	const struct xfs_map_extent	*map)
455 {
456 	struct xfs_rmap_intent		*ri;
457 
458 	ri = kmem_cache_alloc(xfs_rmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL);
459 
460 	switch (map->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
461 	case XFS_RMAP_EXTENT_MAP:
462 		ri->ri_type = XFS_RMAP_MAP;
463 		break;
464 	case XFS_RMAP_EXTENT_MAP_SHARED:
465 		ri->ri_type = XFS_RMAP_MAP_SHARED;
466 		break;
467 	case XFS_RMAP_EXTENT_UNMAP:
468 		ri->ri_type = XFS_RMAP_UNMAP;
469 		break;
470 	case XFS_RMAP_EXTENT_UNMAP_SHARED:
471 		ri->ri_type = XFS_RMAP_UNMAP_SHARED;
472 		break;
473 	case XFS_RMAP_EXTENT_CONVERT:
474 		ri->ri_type = XFS_RMAP_CONVERT;
475 		break;
476 	case XFS_RMAP_EXTENT_CONVERT_SHARED:
477 		ri->ri_type = XFS_RMAP_CONVERT_SHARED;
478 		break;
479 	case XFS_RMAP_EXTENT_ALLOC:
480 		ri->ri_type = XFS_RMAP_ALLOC;
481 		break;
482 	case XFS_RMAP_EXTENT_FREE:
483 		ri->ri_type = XFS_RMAP_FREE;
484 		break;
485 	default:
486 		ASSERT(0);
487 		return;
488 	}
489 
490 	ri->ri_owner = map->me_owner;
491 	ri->ri_whichfork = (map->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
492 			XFS_ATTR_FORK : XFS_DATA_FORK;
493 	ri->ri_bmap.br_startblock = map->me_startblock;
494 	ri->ri_bmap.br_startoff = map->me_startoff;
495 	ri->ri_bmap.br_blockcount = map->me_len;
496 	ri->ri_bmap.br_state = (map->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
497 			XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
498 	ri->ri_group = xfs_group_intent_get(mp, map->me_startblock, XG_TYPE_AG);
499 
500 	xfs_defer_add_item(dfp, &ri->ri_list);
501 }
502 
503 /*
504  * Process an rmap update intent item that was recovered from the log.
505  * We need to update the rmapbt.
506  */
507 STATIC int
508 xfs_rmap_recover_work(
509 	struct xfs_defer_pending	*dfp,
510 	struct list_head		*capture_list)
511 {
512 	struct xfs_trans_res		resv;
513 	struct xfs_log_item		*lip = dfp->dfp_intent;
514 	struct xfs_rui_log_item		*ruip = RUI_ITEM(lip);
515 	struct xfs_trans		*tp;
516 	struct xfs_mount		*mp = lip->li_log->l_mp;
517 	int				i;
518 	int				error = 0;
519 
520 	/*
521 	 * First check the validity of the extents described by the
522 	 * RUI.  If any are bad, then assume that all are bad and
523 	 * just toss the RUI.
524 	 */
525 	for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
526 		if (!xfs_rui_validate_map(mp,
527 					&ruip->rui_format.rui_extents[i])) {
528 			XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
529 					&ruip->rui_format,
530 					sizeof(ruip->rui_format));
531 			return -EFSCORRUPTED;
532 		}
533 
534 		xfs_rui_recover_work(mp, dfp, &ruip->rui_format.rui_extents[i]);
535 	}
536 
537 	resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
538 	error = xfs_trans_alloc(mp, &resv, mp->m_rmap_maxlevels, 0,
539 			XFS_TRANS_RESERVE, &tp);
540 	if (error)
541 		return error;
542 
543 	error = xlog_recover_finish_intent(tp, dfp);
544 	if (error == -EFSCORRUPTED)
545 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
546 				&ruip->rui_format,
547 				sizeof(ruip->rui_format));
548 	if (error)
549 		goto abort_error;
550 
551 	return xfs_defer_ops_capture_and_commit(tp, capture_list);
552 
553 abort_error:
554 	xfs_trans_cancel(tp);
555 	return error;
556 }
557 
558 /* Relog an intent item to push the log tail forward. */
559 static struct xfs_log_item *
560 xfs_rmap_relog_intent(
561 	struct xfs_trans		*tp,
562 	struct xfs_log_item		*intent,
563 	struct xfs_log_item		*done_item)
564 {
565 	struct xfs_rui_log_item		*ruip;
566 	struct xfs_map_extent		*map;
567 	unsigned int			count;
568 
569 	count = RUI_ITEM(intent)->rui_format.rui_nextents;
570 	map = RUI_ITEM(intent)->rui_format.rui_extents;
571 
572 	ruip = xfs_rui_init(tp->t_mountp, count);
573 	memcpy(ruip->rui_format.rui_extents, map, count * sizeof(*map));
574 	atomic_set(&ruip->rui_next_extent, count);
575 
576 	return &ruip->rui_item;
577 }
578 
579 const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
580 	.name		= "rmap",
581 	.max_items	= XFS_RUI_MAX_FAST_EXTENTS,
582 	.create_intent	= xfs_rmap_update_create_intent,
583 	.abort_intent	= xfs_rmap_update_abort_intent,
584 	.create_done	= xfs_rmap_update_create_done,
585 	.finish_item	= xfs_rmap_update_finish_item,
586 	.finish_cleanup = xfs_rmap_finish_one_cleanup,
587 	.cancel_item	= xfs_rmap_update_cancel_item,
588 	.recover_work	= xfs_rmap_recover_work,
589 	.relog_intent	= xfs_rmap_relog_intent,
590 };
591 
592 STATIC bool
593 xfs_rui_item_match(
594 	struct xfs_log_item	*lip,
595 	uint64_t		intent_id)
596 {
597 	return RUI_ITEM(lip)->rui_format.rui_id == intent_id;
598 }
599 
600 static const struct xfs_item_ops xfs_rui_item_ops = {
601 	.flags		= XFS_ITEM_INTENT,
602 	.iop_size	= xfs_rui_item_size,
603 	.iop_format	= xfs_rui_item_format,
604 	.iop_unpin	= xfs_rui_item_unpin,
605 	.iop_release	= xfs_rui_item_release,
606 	.iop_match	= xfs_rui_item_match,
607 };
608 
609 static inline void
610 xfs_rui_copy_format(
611 	struct xfs_rui_log_format	*dst,
612 	const struct xfs_rui_log_format	*src)
613 {
614 	unsigned int			i;
615 
616 	memcpy(dst, src, offsetof(struct xfs_rui_log_format, rui_extents));
617 
618 	for (i = 0; i < src->rui_nextents; i++)
619 		memcpy(&dst->rui_extents[i], &src->rui_extents[i],
620 				sizeof(struct xfs_map_extent));
621 }
622 
623 /*
624  * This routine is called to create an in-core extent rmap update
625  * item from the rui format structure which was logged on disk.
626  * It allocates an in-core rui, copies the extents from the format
627  * structure into it, and adds the rui to the AIL with the given
628  * LSN.
629  */
630 STATIC int
631 xlog_recover_rui_commit_pass2(
632 	struct xlog			*log,
633 	struct list_head		*buffer_list,
634 	struct xlog_recover_item	*item,
635 	xfs_lsn_t			lsn)
636 {
637 	struct xfs_mount		*mp = log->l_mp;
638 	struct xfs_rui_log_item		*ruip;
639 	struct xfs_rui_log_format	*rui_formatp;
640 	size_t				len;
641 
642 	rui_formatp = item->ri_buf[0].i_addr;
643 
644 	if (item->ri_buf[0].i_len < xfs_rui_log_format_sizeof(0)) {
645 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
646 				item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
647 		return -EFSCORRUPTED;
648 	}
649 
650 	len = xfs_rui_log_format_sizeof(rui_formatp->rui_nextents);
651 	if (item->ri_buf[0].i_len != len) {
652 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
653 				item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
654 		return -EFSCORRUPTED;
655 	}
656 
657 	ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
658 	xfs_rui_copy_format(&ruip->rui_format, rui_formatp);
659 	atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
660 
661 	xlog_recover_intent_item(log, &ruip->rui_item, lsn,
662 			&xfs_rmap_update_defer_type);
663 	return 0;
664 }
665 
666 const struct xlog_recover_item_ops xlog_rui_item_ops = {
667 	.item_type		= XFS_LI_RUI,
668 	.commit_pass2		= xlog_recover_rui_commit_pass2,
669 };
670 
671 /*
672  * This routine is called when an RUD format structure is found in a committed
673  * transaction in the log. Its purpose is to cancel the corresponding RUI if it
674  * was still in the log. To do this it searches the AIL for the RUI with an id
675  * equal to that in the RUD format structure. If we find it we drop the RUD
676  * reference, which removes the RUI from the AIL and frees it.
677  */
678 STATIC int
679 xlog_recover_rud_commit_pass2(
680 	struct xlog			*log,
681 	struct list_head		*buffer_list,
682 	struct xlog_recover_item	*item,
683 	xfs_lsn_t			lsn)
684 {
685 	struct xfs_rud_log_format	*rud_formatp;
686 
687 	rud_formatp = item->ri_buf[0].i_addr;
688 	if (item->ri_buf[0].i_len != sizeof(struct xfs_rud_log_format)) {
689 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
690 				rud_formatp, item->ri_buf[0].i_len);
691 		return -EFSCORRUPTED;
692 	}
693 
694 	xlog_recover_release_intent(log, XFS_LI_RUI, rud_formatp->rud_rui_id);
695 	return 0;
696 }
697 
698 const struct xlog_recover_item_ops xlog_rud_item_ops = {
699 	.item_type		= XFS_LI_RUD,
700 	.commit_pass2		= xlog_recover_rud_commit_pass2,
701 };
702