1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_bit.h"
12 #include "xfs_shared.h"
13 #include "xfs_mount.h"
14 #include "xfs_defer.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_rmap_item.h"
18 #include "xfs_log.h"
19 #include "xfs_rmap.h"
20 #include "xfs_error.h"
21 #include "xfs_log_priv.h"
22 #include "xfs_log_recover.h"
23 #include "xfs_ag.h"
24 #include "xfs_btree.h"
25 #include "xfs_trace.h"
26 #include "xfs_rtgroup.h"
27
28 struct kmem_cache *xfs_rui_cache;
29 struct kmem_cache *xfs_rud_cache;
30
31 static const struct xfs_item_ops xfs_rui_item_ops;
32
RUI_ITEM(struct xfs_log_item * lip)33 static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
34 {
35 return container_of(lip, struct xfs_rui_log_item, rui_item);
36 }
37
38 STATIC void
xfs_rui_item_free(struct xfs_rui_log_item * ruip)39 xfs_rui_item_free(
40 struct xfs_rui_log_item *ruip)
41 {
42 kvfree(ruip->rui_item.li_lv_shadow);
43 if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
44 kfree(ruip);
45 else
46 kmem_cache_free(xfs_rui_cache, ruip);
47 }
48
49 /*
50 * Freeing the RUI requires that we remove it from the AIL if it has already
51 * been placed there. However, the RUI may not yet have been placed in the AIL
52 * when called by xfs_rui_release() from RUD processing due to the ordering of
53 * committed vs unpin operations in bulk insert operations. Hence the reference
54 * count to ensure only the last caller frees the RUI.
55 */
56 STATIC void
xfs_rui_release(struct xfs_rui_log_item * ruip)57 xfs_rui_release(
58 struct xfs_rui_log_item *ruip)
59 {
60 ASSERT(atomic_read(&ruip->rui_refcount) > 0);
61 if (!atomic_dec_and_test(&ruip->rui_refcount))
62 return;
63
64 xfs_trans_ail_delete(&ruip->rui_item, 0);
65 xfs_rui_item_free(ruip);
66 }
67
68 STATIC void
xfs_rui_item_size(struct xfs_log_item * lip,int * nvecs,int * nbytes)69 xfs_rui_item_size(
70 struct xfs_log_item *lip,
71 int *nvecs,
72 int *nbytes)
73 {
74 struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
75
76 *nvecs += 1;
77 *nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents);
78 }
79
80 /*
81 * This is called to fill in the vector of log iovecs for the
82 * given rui log item. We use only 1 iovec, and we point that
83 * at the rui_log_format structure embedded in the rui item.
84 * It is at this point that we assert that all of the extent
85 * slots in the rui item have been filled.
86 */
87 STATIC void
xfs_rui_item_format(struct xfs_log_item * lip,struct xfs_log_vec * lv)88 xfs_rui_item_format(
89 struct xfs_log_item *lip,
90 struct xfs_log_vec *lv)
91 {
92 struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
93 struct xfs_log_iovec *vecp = NULL;
94
95 ASSERT(atomic_read(&ruip->rui_next_extent) ==
96 ruip->rui_format.rui_nextents);
97
98 ASSERT(lip->li_type == XFS_LI_RUI || lip->li_type == XFS_LI_RUI_RT);
99
100 ruip->rui_format.rui_type = lip->li_type;
101 ruip->rui_format.rui_size = 1;
102
103 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
104 xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents));
105 }
106
107 /*
108 * The unpin operation is the last place an RUI is manipulated in the log. It is
109 * either inserted in the AIL or aborted in the event of a log I/O error. In
110 * either case, the RUI transaction has been successfully committed to make it
111 * this far. Therefore, we expect whoever committed the RUI to either construct
112 * and commit the RUD or drop the RUD's reference in the event of error. Simply
113 * drop the log's RUI reference now that the log is done with it.
114 */
115 STATIC void
xfs_rui_item_unpin(struct xfs_log_item * lip,int remove)116 xfs_rui_item_unpin(
117 struct xfs_log_item *lip,
118 int remove)
119 {
120 struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
121
122 xfs_rui_release(ruip);
123 }
124
125 /*
126 * The RUI has been either committed or aborted if the transaction has been
127 * cancelled. If the transaction was cancelled, an RUD isn't going to be
128 * constructed and thus we free the RUI here directly.
129 */
130 STATIC void
xfs_rui_item_release(struct xfs_log_item * lip)131 xfs_rui_item_release(
132 struct xfs_log_item *lip)
133 {
134 xfs_rui_release(RUI_ITEM(lip));
135 }
136
137 /*
138 * Allocate and initialize an rui item with the given number of extents.
139 */
140 STATIC struct xfs_rui_log_item *
xfs_rui_init(struct xfs_mount * mp,unsigned short item_type,uint nextents)141 xfs_rui_init(
142 struct xfs_mount *mp,
143 unsigned short item_type,
144 uint nextents)
145
146 {
147 struct xfs_rui_log_item *ruip;
148
149 ASSERT(nextents > 0);
150 ASSERT(item_type == XFS_LI_RUI || item_type == XFS_LI_RUI_RT);
151
152 if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
153 ruip = kzalloc(xfs_rui_log_item_sizeof(nextents),
154 GFP_KERNEL | __GFP_NOFAIL);
155 else
156 ruip = kmem_cache_zalloc(xfs_rui_cache,
157 GFP_KERNEL | __GFP_NOFAIL);
158
159 xfs_log_item_init(mp, &ruip->rui_item, item_type, &xfs_rui_item_ops);
160 ruip->rui_format.rui_nextents = nextents;
161 ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
162 atomic_set(&ruip->rui_next_extent, 0);
163 atomic_set(&ruip->rui_refcount, 2);
164
165 return ruip;
166 }
167
RUD_ITEM(struct xfs_log_item * lip)168 static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
169 {
170 return container_of(lip, struct xfs_rud_log_item, rud_item);
171 }
172
173 STATIC void
xfs_rud_item_size(struct xfs_log_item * lip,int * nvecs,int * nbytes)174 xfs_rud_item_size(
175 struct xfs_log_item *lip,
176 int *nvecs,
177 int *nbytes)
178 {
179 *nvecs += 1;
180 *nbytes += sizeof(struct xfs_rud_log_format);
181 }
182
183 /*
184 * This is called to fill in the vector of log iovecs for the
185 * given rud log item. We use only 1 iovec, and we point that
186 * at the rud_log_format structure embedded in the rud item.
187 * It is at this point that we assert that all of the extent
188 * slots in the rud item have been filled.
189 */
190 STATIC void
xfs_rud_item_format(struct xfs_log_item * lip,struct xfs_log_vec * lv)191 xfs_rud_item_format(
192 struct xfs_log_item *lip,
193 struct xfs_log_vec *lv)
194 {
195 struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
196 struct xfs_log_iovec *vecp = NULL;
197
198 ASSERT(lip->li_type == XFS_LI_RUD || lip->li_type == XFS_LI_RUD_RT);
199
200 rudp->rud_format.rud_type = lip->li_type;
201 rudp->rud_format.rud_size = 1;
202
203 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
204 sizeof(struct xfs_rud_log_format));
205 }
206
207 /*
208 * The RUD is either committed or aborted if the transaction is cancelled. If
209 * the transaction is cancelled, drop our reference to the RUI and free the
210 * RUD.
211 */
212 STATIC void
xfs_rud_item_release(struct xfs_log_item * lip)213 xfs_rud_item_release(
214 struct xfs_log_item *lip)
215 {
216 struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
217
218 xfs_rui_release(rudp->rud_ruip);
219 kvfree(rudp->rud_item.li_lv_shadow);
220 kmem_cache_free(xfs_rud_cache, rudp);
221 }
222
223 static struct xfs_log_item *
xfs_rud_item_intent(struct xfs_log_item * lip)224 xfs_rud_item_intent(
225 struct xfs_log_item *lip)
226 {
227 return &RUD_ITEM(lip)->rud_ruip->rui_item;
228 }
229
230 static const struct xfs_item_ops xfs_rud_item_ops = {
231 .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED |
232 XFS_ITEM_INTENT_DONE,
233 .iop_size = xfs_rud_item_size,
234 .iop_format = xfs_rud_item_format,
235 .iop_release = xfs_rud_item_release,
236 .iop_intent = xfs_rud_item_intent,
237 };
238
ri_entry(const struct list_head * e)239 static inline struct xfs_rmap_intent *ri_entry(const struct list_head *e)
240 {
241 return list_entry(e, struct xfs_rmap_intent, ri_list);
242 }
243
244 static inline bool
xfs_rui_item_isrt(const struct xfs_log_item * lip)245 xfs_rui_item_isrt(const struct xfs_log_item *lip)
246 {
247 ASSERT(lip->li_type == XFS_LI_RUI || lip->li_type == XFS_LI_RUI_RT);
248
249 return lip->li_type == XFS_LI_RUI_RT;
250 }
251
252 /* Sort rmap intents by AG. */
253 static int
xfs_rmap_update_diff_items(void * priv,const struct list_head * a,const struct list_head * b)254 xfs_rmap_update_diff_items(
255 void *priv,
256 const struct list_head *a,
257 const struct list_head *b)
258 {
259 struct xfs_rmap_intent *ra = ri_entry(a);
260 struct xfs_rmap_intent *rb = ri_entry(b);
261
262 return ra->ri_group->xg_gno - rb->ri_group->xg_gno;
263 }
264
265 /* Log rmap updates in the intent item. */
266 STATIC void
xfs_rmap_update_log_item(struct xfs_trans * tp,struct xfs_rui_log_item * ruip,struct xfs_rmap_intent * ri)267 xfs_rmap_update_log_item(
268 struct xfs_trans *tp,
269 struct xfs_rui_log_item *ruip,
270 struct xfs_rmap_intent *ri)
271 {
272 uint next_extent;
273 struct xfs_map_extent *map;
274
275 /*
276 * atomic_inc_return gives us the value after the increment;
277 * we want to use it as an array index so we need to subtract 1 from
278 * it.
279 */
280 next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
281 ASSERT(next_extent < ruip->rui_format.rui_nextents);
282 map = &ruip->rui_format.rui_extents[next_extent];
283 map->me_owner = ri->ri_owner;
284 map->me_startblock = ri->ri_bmap.br_startblock;
285 map->me_startoff = ri->ri_bmap.br_startoff;
286 map->me_len = ri->ri_bmap.br_blockcount;
287
288 map->me_flags = 0;
289 if (ri->ri_bmap.br_state == XFS_EXT_UNWRITTEN)
290 map->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
291 if (ri->ri_whichfork == XFS_ATTR_FORK)
292 map->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
293 switch (ri->ri_type) {
294 case XFS_RMAP_MAP:
295 map->me_flags |= XFS_RMAP_EXTENT_MAP;
296 break;
297 case XFS_RMAP_MAP_SHARED:
298 map->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
299 break;
300 case XFS_RMAP_UNMAP:
301 map->me_flags |= XFS_RMAP_EXTENT_UNMAP;
302 break;
303 case XFS_RMAP_UNMAP_SHARED:
304 map->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
305 break;
306 case XFS_RMAP_CONVERT:
307 map->me_flags |= XFS_RMAP_EXTENT_CONVERT;
308 break;
309 case XFS_RMAP_CONVERT_SHARED:
310 map->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
311 break;
312 case XFS_RMAP_ALLOC:
313 map->me_flags |= XFS_RMAP_EXTENT_ALLOC;
314 break;
315 case XFS_RMAP_FREE:
316 map->me_flags |= XFS_RMAP_EXTENT_FREE;
317 break;
318 default:
319 ASSERT(0);
320 }
321 }
322
323 static struct xfs_log_item *
__xfs_rmap_update_create_intent(struct xfs_trans * tp,struct list_head * items,unsigned int count,bool sort,unsigned short item_type)324 __xfs_rmap_update_create_intent(
325 struct xfs_trans *tp,
326 struct list_head *items,
327 unsigned int count,
328 bool sort,
329 unsigned short item_type)
330 {
331 struct xfs_mount *mp = tp->t_mountp;
332 struct xfs_rui_log_item *ruip;
333 struct xfs_rmap_intent *ri;
334
335 ASSERT(count > 0);
336
337 ruip = xfs_rui_init(mp, item_type, count);
338 if (sort)
339 list_sort(mp, items, xfs_rmap_update_diff_items);
340 list_for_each_entry(ri, items, ri_list)
341 xfs_rmap_update_log_item(tp, ruip, ri);
342 return &ruip->rui_item;
343 }
344
345 static struct xfs_log_item *
xfs_rmap_update_create_intent(struct xfs_trans * tp,struct list_head * items,unsigned int count,bool sort)346 xfs_rmap_update_create_intent(
347 struct xfs_trans *tp,
348 struct list_head *items,
349 unsigned int count,
350 bool sort)
351 {
352 return __xfs_rmap_update_create_intent(tp, items, count, sort,
353 XFS_LI_RUI);
354 }
355
356 static inline unsigned short
xfs_rud_type_from_rui(const struct xfs_rui_log_item * ruip)357 xfs_rud_type_from_rui(const struct xfs_rui_log_item *ruip)
358 {
359 return xfs_rui_item_isrt(&ruip->rui_item) ? XFS_LI_RUD_RT : XFS_LI_RUD;
360 }
361
362 /* Get an RUD so we can process all the deferred rmap updates. */
363 static struct xfs_log_item *
xfs_rmap_update_create_done(struct xfs_trans * tp,struct xfs_log_item * intent,unsigned int count)364 xfs_rmap_update_create_done(
365 struct xfs_trans *tp,
366 struct xfs_log_item *intent,
367 unsigned int count)
368 {
369 struct xfs_rui_log_item *ruip = RUI_ITEM(intent);
370 struct xfs_rud_log_item *rudp;
371
372 rudp = kmem_cache_zalloc(xfs_rud_cache, GFP_KERNEL | __GFP_NOFAIL);
373 xfs_log_item_init(tp->t_mountp, &rudp->rud_item,
374 xfs_rud_type_from_rui(ruip), &xfs_rud_item_ops);
375 rudp->rud_ruip = ruip;
376 rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
377
378 return &rudp->rud_item;
379 }
380
381 /* Add this deferred RUI to the transaction. */
382 void
xfs_rmap_defer_add(struct xfs_trans * tp,struct xfs_rmap_intent * ri)383 xfs_rmap_defer_add(
384 struct xfs_trans *tp,
385 struct xfs_rmap_intent *ri)
386 {
387 struct xfs_mount *mp = tp->t_mountp;
388
389 /*
390 * Deferred rmap updates for the realtime and data sections must use
391 * separate transactions to finish deferred work because updates to
392 * realtime metadata files can lock AGFs to allocate btree blocks and
393 * we don't want that mixing with the AGF locks taken to finish data
394 * section updates.
395 */
396 ri->ri_group = xfs_group_intent_get(mp, ri->ri_bmap.br_startblock,
397 ri->ri_realtime ? XG_TYPE_RTG : XG_TYPE_AG);
398
399 trace_xfs_rmap_defer(mp, ri);
400 xfs_defer_add(tp, &ri->ri_list, ri->ri_realtime ?
401 &xfs_rtrmap_update_defer_type :
402 &xfs_rmap_update_defer_type);
403 }
404
405 /* Cancel a deferred rmap update. */
406 STATIC void
xfs_rmap_update_cancel_item(struct list_head * item)407 xfs_rmap_update_cancel_item(
408 struct list_head *item)
409 {
410 struct xfs_rmap_intent *ri = ri_entry(item);
411
412 xfs_group_intent_put(ri->ri_group);
413 kmem_cache_free(xfs_rmap_intent_cache, ri);
414 }
415
416 /* Process a deferred rmap update. */
417 STATIC int
xfs_rmap_update_finish_item(struct xfs_trans * tp,struct xfs_log_item * done,struct list_head * item,struct xfs_btree_cur ** state)418 xfs_rmap_update_finish_item(
419 struct xfs_trans *tp,
420 struct xfs_log_item *done,
421 struct list_head *item,
422 struct xfs_btree_cur **state)
423 {
424 struct xfs_rmap_intent *ri = ri_entry(item);
425 int error;
426
427 error = xfs_rmap_finish_one(tp, ri, state);
428
429 xfs_rmap_update_cancel_item(item);
430 return error;
431 }
432
433 /* Clean up after calling xfs_rmap_finish_one. */
434 STATIC void
xfs_rmap_finish_one_cleanup(struct xfs_trans * tp,struct xfs_btree_cur * rcur,int error)435 xfs_rmap_finish_one_cleanup(
436 struct xfs_trans *tp,
437 struct xfs_btree_cur *rcur,
438 int error)
439 {
440 struct xfs_buf *agbp = NULL;
441
442 if (rcur == NULL)
443 return;
444 agbp = rcur->bc_ag.agbp;
445 xfs_btree_del_cursor(rcur, error);
446 if (error && agbp)
447 xfs_trans_brelse(tp, agbp);
448 }
449
450 /* Abort all pending RUIs. */
451 STATIC void
xfs_rmap_update_abort_intent(struct xfs_log_item * intent)452 xfs_rmap_update_abort_intent(
453 struct xfs_log_item *intent)
454 {
455 xfs_rui_release(RUI_ITEM(intent));
456 }
457
458 /* Is this recovered RUI ok? */
459 static inline bool
xfs_rui_validate_map(struct xfs_mount * mp,bool isrt,struct xfs_map_extent * map)460 xfs_rui_validate_map(
461 struct xfs_mount *mp,
462 bool isrt,
463 struct xfs_map_extent *map)
464 {
465 if (!xfs_has_rmapbt(mp))
466 return false;
467
468 if (map->me_flags & ~XFS_RMAP_EXTENT_FLAGS)
469 return false;
470
471 switch (map->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
472 case XFS_RMAP_EXTENT_MAP:
473 case XFS_RMAP_EXTENT_MAP_SHARED:
474 case XFS_RMAP_EXTENT_UNMAP:
475 case XFS_RMAP_EXTENT_UNMAP_SHARED:
476 case XFS_RMAP_EXTENT_CONVERT:
477 case XFS_RMAP_EXTENT_CONVERT_SHARED:
478 case XFS_RMAP_EXTENT_ALLOC:
479 case XFS_RMAP_EXTENT_FREE:
480 break;
481 default:
482 return false;
483 }
484
485 if (!XFS_RMAP_NON_INODE_OWNER(map->me_owner) &&
486 !xfs_verify_ino(mp, map->me_owner))
487 return false;
488
489 if (!xfs_verify_fileext(mp, map->me_startoff, map->me_len))
490 return false;
491
492 if (isrt)
493 return xfs_verify_rtbext(mp, map->me_startblock, map->me_len);
494
495 return xfs_verify_fsbext(mp, map->me_startblock, map->me_len);
496 }
497
498 static inline void
xfs_rui_recover_work(struct xfs_mount * mp,struct xfs_defer_pending * dfp,bool isrt,const struct xfs_map_extent * map)499 xfs_rui_recover_work(
500 struct xfs_mount *mp,
501 struct xfs_defer_pending *dfp,
502 bool isrt,
503 const struct xfs_map_extent *map)
504 {
505 struct xfs_rmap_intent *ri;
506
507 ri = kmem_cache_alloc(xfs_rmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL);
508
509 switch (map->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
510 case XFS_RMAP_EXTENT_MAP:
511 ri->ri_type = XFS_RMAP_MAP;
512 break;
513 case XFS_RMAP_EXTENT_MAP_SHARED:
514 ri->ri_type = XFS_RMAP_MAP_SHARED;
515 break;
516 case XFS_RMAP_EXTENT_UNMAP:
517 ri->ri_type = XFS_RMAP_UNMAP;
518 break;
519 case XFS_RMAP_EXTENT_UNMAP_SHARED:
520 ri->ri_type = XFS_RMAP_UNMAP_SHARED;
521 break;
522 case XFS_RMAP_EXTENT_CONVERT:
523 ri->ri_type = XFS_RMAP_CONVERT;
524 break;
525 case XFS_RMAP_EXTENT_CONVERT_SHARED:
526 ri->ri_type = XFS_RMAP_CONVERT_SHARED;
527 break;
528 case XFS_RMAP_EXTENT_ALLOC:
529 ri->ri_type = XFS_RMAP_ALLOC;
530 break;
531 case XFS_RMAP_EXTENT_FREE:
532 ri->ri_type = XFS_RMAP_FREE;
533 break;
534 default:
535 ASSERT(0);
536 return;
537 }
538
539 ri->ri_owner = map->me_owner;
540 ri->ri_whichfork = (map->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
541 XFS_ATTR_FORK : XFS_DATA_FORK;
542 ri->ri_bmap.br_startblock = map->me_startblock;
543 ri->ri_bmap.br_startoff = map->me_startoff;
544 ri->ri_bmap.br_blockcount = map->me_len;
545 ri->ri_bmap.br_state = (map->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
546 XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
547 ri->ri_group = xfs_group_intent_get(mp, map->me_startblock,
548 isrt ? XG_TYPE_RTG : XG_TYPE_AG);
549 ri->ri_realtime = isrt;
550
551 xfs_defer_add_item(dfp, &ri->ri_list);
552 }
553
554 /*
555 * Process an rmap update intent item that was recovered from the log.
556 * We need to update the rmapbt.
557 */
558 STATIC int
xfs_rmap_recover_work(struct xfs_defer_pending * dfp,struct list_head * capture_list)559 xfs_rmap_recover_work(
560 struct xfs_defer_pending *dfp,
561 struct list_head *capture_list)
562 {
563 struct xfs_trans_res resv;
564 struct xfs_log_item *lip = dfp->dfp_intent;
565 struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
566 struct xfs_trans *tp;
567 struct xfs_mount *mp = lip->li_log->l_mp;
568 bool isrt = xfs_rui_item_isrt(lip);
569 int i;
570 int error = 0;
571
572 /*
573 * First check the validity of the extents described by the
574 * RUI. If any are bad, then assume that all are bad and
575 * just toss the RUI.
576 */
577 for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
578 if (!xfs_rui_validate_map(mp, isrt,
579 &ruip->rui_format.rui_extents[i])) {
580 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
581 &ruip->rui_format,
582 sizeof(ruip->rui_format));
583 return -EFSCORRUPTED;
584 }
585
586 xfs_rui_recover_work(mp, dfp, isrt,
587 &ruip->rui_format.rui_extents[i]);
588 }
589
590 resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
591 error = xfs_trans_alloc(mp, &resv, mp->m_rmap_maxlevels, 0,
592 XFS_TRANS_RESERVE, &tp);
593 if (error)
594 return error;
595
596 error = xlog_recover_finish_intent(tp, dfp);
597 if (error == -EFSCORRUPTED)
598 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
599 &ruip->rui_format,
600 sizeof(ruip->rui_format));
601 if (error)
602 goto abort_error;
603
604 return xfs_defer_ops_capture_and_commit(tp, capture_list);
605
606 abort_error:
607 xfs_trans_cancel(tp);
608 return error;
609 }
610
611 /* Relog an intent item to push the log tail forward. */
612 static struct xfs_log_item *
xfs_rmap_relog_intent(struct xfs_trans * tp,struct xfs_log_item * intent,struct xfs_log_item * done_item)613 xfs_rmap_relog_intent(
614 struct xfs_trans *tp,
615 struct xfs_log_item *intent,
616 struct xfs_log_item *done_item)
617 {
618 struct xfs_rui_log_item *ruip;
619 struct xfs_map_extent *map;
620 unsigned int count;
621
622 ASSERT(intent->li_type == XFS_LI_RUI ||
623 intent->li_type == XFS_LI_RUI_RT);
624
625 count = RUI_ITEM(intent)->rui_format.rui_nextents;
626 map = RUI_ITEM(intent)->rui_format.rui_extents;
627
628 ruip = xfs_rui_init(tp->t_mountp, intent->li_type, count);
629 memcpy(ruip->rui_format.rui_extents, map, count * sizeof(*map));
630 atomic_set(&ruip->rui_next_extent, count);
631
632 return &ruip->rui_item;
633 }
634
635 const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
636 .name = "rmap",
637 .max_items = XFS_RUI_MAX_FAST_EXTENTS,
638 .create_intent = xfs_rmap_update_create_intent,
639 .abort_intent = xfs_rmap_update_abort_intent,
640 .create_done = xfs_rmap_update_create_done,
641 .finish_item = xfs_rmap_update_finish_item,
642 .finish_cleanup = xfs_rmap_finish_one_cleanup,
643 .cancel_item = xfs_rmap_update_cancel_item,
644 .recover_work = xfs_rmap_recover_work,
645 .relog_intent = xfs_rmap_relog_intent,
646 };
647
648 #ifdef CONFIG_XFS_RT
649 static struct xfs_log_item *
xfs_rtrmap_update_create_intent(struct xfs_trans * tp,struct list_head * items,unsigned int count,bool sort)650 xfs_rtrmap_update_create_intent(
651 struct xfs_trans *tp,
652 struct list_head *items,
653 unsigned int count,
654 bool sort)
655 {
656 return __xfs_rmap_update_create_intent(tp, items, count, sort,
657 XFS_LI_RUI_RT);
658 }
659
660 /* Clean up after calling xfs_rmap_finish_one. */
661 STATIC void
xfs_rtrmap_finish_one_cleanup(struct xfs_trans * tp,struct xfs_btree_cur * rcur,int error)662 xfs_rtrmap_finish_one_cleanup(
663 struct xfs_trans *tp,
664 struct xfs_btree_cur *rcur,
665 int error)
666 {
667 if (rcur)
668 xfs_btree_del_cursor(rcur, error);
669 }
670
671 const struct xfs_defer_op_type xfs_rtrmap_update_defer_type = {
672 .name = "rtrmap",
673 .max_items = XFS_RUI_MAX_FAST_EXTENTS,
674 .create_intent = xfs_rtrmap_update_create_intent,
675 .abort_intent = xfs_rmap_update_abort_intent,
676 .create_done = xfs_rmap_update_create_done,
677 .finish_item = xfs_rmap_update_finish_item,
678 .finish_cleanup = xfs_rtrmap_finish_one_cleanup,
679 .cancel_item = xfs_rmap_update_cancel_item,
680 .recover_work = xfs_rmap_recover_work,
681 .relog_intent = xfs_rmap_relog_intent,
682 };
683 #else
684 const struct xfs_defer_op_type xfs_rtrmap_update_defer_type = {
685 .name = "rtrmap",
686 };
687 #endif
688
689 STATIC bool
xfs_rui_item_match(struct xfs_log_item * lip,uint64_t intent_id)690 xfs_rui_item_match(
691 struct xfs_log_item *lip,
692 uint64_t intent_id)
693 {
694 return RUI_ITEM(lip)->rui_format.rui_id == intent_id;
695 }
696
697 static const struct xfs_item_ops xfs_rui_item_ops = {
698 .flags = XFS_ITEM_INTENT,
699 .iop_size = xfs_rui_item_size,
700 .iop_format = xfs_rui_item_format,
701 .iop_unpin = xfs_rui_item_unpin,
702 .iop_release = xfs_rui_item_release,
703 .iop_match = xfs_rui_item_match,
704 };
705
706 static inline void
xfs_rui_copy_format(struct xfs_rui_log_format * dst,const struct xfs_rui_log_format * src)707 xfs_rui_copy_format(
708 struct xfs_rui_log_format *dst,
709 const struct xfs_rui_log_format *src)
710 {
711 unsigned int i;
712
713 memcpy(dst, src, offsetof(struct xfs_rui_log_format, rui_extents));
714
715 for (i = 0; i < src->rui_nextents; i++)
716 memcpy(&dst->rui_extents[i], &src->rui_extents[i],
717 sizeof(struct xfs_map_extent));
718 }
719
720 /*
721 * This routine is called to create an in-core extent rmap update
722 * item from the rui format structure which was logged on disk.
723 * It allocates an in-core rui, copies the extents from the format
724 * structure into it, and adds the rui to the AIL with the given
725 * LSN.
726 */
727 STATIC int
xlog_recover_rui_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)728 xlog_recover_rui_commit_pass2(
729 struct xlog *log,
730 struct list_head *buffer_list,
731 struct xlog_recover_item *item,
732 xfs_lsn_t lsn)
733 {
734 struct xfs_mount *mp = log->l_mp;
735 struct xfs_rui_log_item *ruip;
736 struct xfs_rui_log_format *rui_formatp;
737 size_t len;
738
739 rui_formatp = item->ri_buf[0].i_addr;
740
741 if (item->ri_buf[0].i_len < xfs_rui_log_format_sizeof(0)) {
742 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
743 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
744 return -EFSCORRUPTED;
745 }
746
747 len = xfs_rui_log_format_sizeof(rui_formatp->rui_nextents);
748 if (item->ri_buf[0].i_len != len) {
749 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
750 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
751 return -EFSCORRUPTED;
752 }
753
754 ruip = xfs_rui_init(mp, ITEM_TYPE(item), rui_formatp->rui_nextents);
755 xfs_rui_copy_format(&ruip->rui_format, rui_formatp);
756 atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
757
758 xlog_recover_intent_item(log, &ruip->rui_item, lsn,
759 &xfs_rmap_update_defer_type);
760 return 0;
761 }
762
763 const struct xlog_recover_item_ops xlog_rui_item_ops = {
764 .item_type = XFS_LI_RUI,
765 .commit_pass2 = xlog_recover_rui_commit_pass2,
766 };
767
768 #ifdef CONFIG_XFS_RT
769 STATIC int
xlog_recover_rtrui_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)770 xlog_recover_rtrui_commit_pass2(
771 struct xlog *log,
772 struct list_head *buffer_list,
773 struct xlog_recover_item *item,
774 xfs_lsn_t lsn)
775 {
776 struct xfs_mount *mp = log->l_mp;
777 struct xfs_rui_log_item *ruip;
778 struct xfs_rui_log_format *rui_formatp;
779 size_t len;
780
781 rui_formatp = item->ri_buf[0].i_addr;
782
783 if (item->ri_buf[0].i_len < xfs_rui_log_format_sizeof(0)) {
784 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
785 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
786 return -EFSCORRUPTED;
787 }
788
789 len = xfs_rui_log_format_sizeof(rui_formatp->rui_nextents);
790 if (item->ri_buf[0].i_len != len) {
791 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
792 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
793 return -EFSCORRUPTED;
794 }
795
796 ruip = xfs_rui_init(mp, ITEM_TYPE(item), rui_formatp->rui_nextents);
797 xfs_rui_copy_format(&ruip->rui_format, rui_formatp);
798 atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
799
800 xlog_recover_intent_item(log, &ruip->rui_item, lsn,
801 &xfs_rtrmap_update_defer_type);
802 return 0;
803 }
804 #else
805 STATIC int
xlog_recover_rtrui_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)806 xlog_recover_rtrui_commit_pass2(
807 struct xlog *log,
808 struct list_head *buffer_list,
809 struct xlog_recover_item *item,
810 xfs_lsn_t lsn)
811 {
812 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
813 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
814 return -EFSCORRUPTED;
815 }
816 #endif
817
818 const struct xlog_recover_item_ops xlog_rtrui_item_ops = {
819 .item_type = XFS_LI_RUI_RT,
820 .commit_pass2 = xlog_recover_rtrui_commit_pass2,
821 };
822
823 /*
824 * This routine is called when an RUD format structure is found in a committed
825 * transaction in the log. Its purpose is to cancel the corresponding RUI if it
826 * was still in the log. To do this it searches the AIL for the RUI with an id
827 * equal to that in the RUD format structure. If we find it we drop the RUD
828 * reference, which removes the RUI from the AIL and frees it.
829 */
830 STATIC int
xlog_recover_rud_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)831 xlog_recover_rud_commit_pass2(
832 struct xlog *log,
833 struct list_head *buffer_list,
834 struct xlog_recover_item *item,
835 xfs_lsn_t lsn)
836 {
837 struct xfs_rud_log_format *rud_formatp;
838
839 rud_formatp = item->ri_buf[0].i_addr;
840 if (item->ri_buf[0].i_len != sizeof(struct xfs_rud_log_format)) {
841 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
842 rud_formatp, item->ri_buf[0].i_len);
843 return -EFSCORRUPTED;
844 }
845
846 xlog_recover_release_intent(log, XFS_LI_RUI, rud_formatp->rud_rui_id);
847 return 0;
848 }
849
850 const struct xlog_recover_item_ops xlog_rud_item_ops = {
851 .item_type = XFS_LI_RUD,
852 .commit_pass2 = xlog_recover_rud_commit_pass2,
853 };
854
855 #ifdef CONFIG_XFS_RT
856 STATIC int
xlog_recover_rtrud_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)857 xlog_recover_rtrud_commit_pass2(
858 struct xlog *log,
859 struct list_head *buffer_list,
860 struct xlog_recover_item *item,
861 xfs_lsn_t lsn)
862 {
863 struct xfs_rud_log_format *rud_formatp;
864
865 rud_formatp = item->ri_buf[0].i_addr;
866 if (item->ri_buf[0].i_len != sizeof(struct xfs_rud_log_format)) {
867 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
868 rud_formatp, item->ri_buf[0].i_len);
869 return -EFSCORRUPTED;
870 }
871
872 xlog_recover_release_intent(log, XFS_LI_RUI_RT,
873 rud_formatp->rud_rui_id);
874 return 0;
875 }
876 #else
877 # define xlog_recover_rtrud_commit_pass2 xlog_recover_rtrui_commit_pass2
878 #endif
879
880 const struct xlog_recover_item_ops xlog_rtrud_item_ops = {
881 .item_type = XFS_LI_RUD_RT,
882 .commit_pass2 = xlog_recover_rtrud_commit_pass2,
883 };
884