1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_bit.h"
12 #include "xfs_shared.h"
13 #include "xfs_mount.h"
14 #include "xfs_defer.h"
15 #include "xfs_inode.h"
16 #include "xfs_trans.h"
17 #include "xfs_trans_priv.h"
18 #include "xfs_bmap_item.h"
19 #include "xfs_log.h"
20 #include "xfs_bmap.h"
21 #include "xfs_icache.h"
22 #include "xfs_bmap_btree.h"
23 #include "xfs_trans_space.h"
24 #include "xfs_error.h"
25 #include "xfs_log_priv.h"
26 #include "xfs_log_recover.h"
27 #include "xfs_ag.h"
28 #include "xfs_trace.h"
29
30 struct kmem_cache *xfs_bui_cache;
31 struct kmem_cache *xfs_bud_cache;
32
33 static const struct xfs_item_ops xfs_bui_item_ops;
34
BUI_ITEM(struct xfs_log_item * lip)35 static inline struct xfs_bui_log_item *BUI_ITEM(struct xfs_log_item *lip)
36 {
37 return container_of(lip, struct xfs_bui_log_item, bui_item);
38 }
39
40 STATIC void
xfs_bui_item_free(struct xfs_bui_log_item * buip)41 xfs_bui_item_free(
42 struct xfs_bui_log_item *buip)
43 {
44 kvfree(buip->bui_item.li_lv_shadow);
45 kmem_cache_free(xfs_bui_cache, buip);
46 }
47
48 /*
49 * Freeing the BUI requires that we remove it from the AIL if it has already
50 * been placed there. However, the BUI may not yet have been placed in the AIL
51 * when called by xfs_bui_release() from BUD processing due to the ordering of
52 * committed vs unpin operations in bulk insert operations. Hence the reference
53 * count to ensure only the last caller frees the BUI.
54 */
55 STATIC void
xfs_bui_release(struct xfs_bui_log_item * buip)56 xfs_bui_release(
57 struct xfs_bui_log_item *buip)
58 {
59 ASSERT(atomic_read(&buip->bui_refcount) > 0);
60 if (!atomic_dec_and_test(&buip->bui_refcount))
61 return;
62
63 xfs_trans_ail_delete(&buip->bui_item, 0);
64 xfs_bui_item_free(buip);
65 }
66
67
68 STATIC void
xfs_bui_item_size(struct xfs_log_item * lip,int * nvecs,int * nbytes)69 xfs_bui_item_size(
70 struct xfs_log_item *lip,
71 int *nvecs,
72 int *nbytes)
73 {
74 struct xfs_bui_log_item *buip = BUI_ITEM(lip);
75
76 *nvecs += 1;
77 *nbytes += xfs_bui_log_format_sizeof(buip->bui_format.bui_nextents);
78 }
79
xfs_bui_log_space(unsigned int nr)80 unsigned int xfs_bui_log_space(unsigned int nr)
81 {
82 return xlog_item_space(1, xfs_bui_log_format_sizeof(nr));
83 }
84
85 /*
86 * This is called to fill in the vector of log iovecs for the
87 * given bui log item. We use only 1 iovec, and we point that
88 * at the bui_log_format structure embedded in the bui item.
89 * It is at this point that we assert that all of the extent
90 * slots in the bui item have been filled.
91 */
92 STATIC void
xfs_bui_item_format(struct xfs_log_item * lip,struct xfs_log_vec * lv)93 xfs_bui_item_format(
94 struct xfs_log_item *lip,
95 struct xfs_log_vec *lv)
96 {
97 struct xfs_bui_log_item *buip = BUI_ITEM(lip);
98 struct xfs_log_iovec *vecp = NULL;
99
100 ASSERT(atomic_read(&buip->bui_next_extent) ==
101 buip->bui_format.bui_nextents);
102
103 buip->bui_format.bui_type = XFS_LI_BUI;
104 buip->bui_format.bui_size = 1;
105
106 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_BUI_FORMAT, &buip->bui_format,
107 xfs_bui_log_format_sizeof(buip->bui_format.bui_nextents));
108 }
109
110 /*
111 * The unpin operation is the last place an BUI is manipulated in the log. It is
112 * either inserted in the AIL or aborted in the event of a log I/O error. In
113 * either case, the BUI transaction has been successfully committed to make it
114 * this far. Therefore, we expect whoever committed the BUI to either construct
115 * and commit the BUD or drop the BUD's reference in the event of error. Simply
116 * drop the log's BUI reference now that the log is done with it.
117 */
118 STATIC void
xfs_bui_item_unpin(struct xfs_log_item * lip,int remove)119 xfs_bui_item_unpin(
120 struct xfs_log_item *lip,
121 int remove)
122 {
123 struct xfs_bui_log_item *buip = BUI_ITEM(lip);
124
125 xfs_bui_release(buip);
126 }
127
128 /*
129 * The BUI has been either committed or aborted if the transaction has been
130 * cancelled. If the transaction was cancelled, an BUD isn't going to be
131 * constructed and thus we free the BUI here directly.
132 */
133 STATIC void
xfs_bui_item_release(struct xfs_log_item * lip)134 xfs_bui_item_release(
135 struct xfs_log_item *lip)
136 {
137 xfs_bui_release(BUI_ITEM(lip));
138 }
139
140 /*
141 * Allocate and initialize an bui item with the given number of extents.
142 */
143 STATIC struct xfs_bui_log_item *
xfs_bui_init(struct xfs_mount * mp)144 xfs_bui_init(
145 struct xfs_mount *mp)
146
147 {
148 struct xfs_bui_log_item *buip;
149
150 buip = kmem_cache_zalloc(xfs_bui_cache, GFP_KERNEL | __GFP_NOFAIL);
151
152 xfs_log_item_init(mp, &buip->bui_item, XFS_LI_BUI, &xfs_bui_item_ops);
153 buip->bui_format.bui_nextents = XFS_BUI_MAX_FAST_EXTENTS;
154 buip->bui_format.bui_id = (uintptr_t)(void *)buip;
155 atomic_set(&buip->bui_next_extent, 0);
156 atomic_set(&buip->bui_refcount, 2);
157
158 return buip;
159 }
160
BUD_ITEM(struct xfs_log_item * lip)161 static inline struct xfs_bud_log_item *BUD_ITEM(struct xfs_log_item *lip)
162 {
163 return container_of(lip, struct xfs_bud_log_item, bud_item);
164 }
165
166 STATIC void
xfs_bud_item_size(struct xfs_log_item * lip,int * nvecs,int * nbytes)167 xfs_bud_item_size(
168 struct xfs_log_item *lip,
169 int *nvecs,
170 int *nbytes)
171 {
172 *nvecs += 1;
173 *nbytes += sizeof(struct xfs_bud_log_format);
174 }
175
xfs_bud_log_space(void)176 unsigned int xfs_bud_log_space(void)
177 {
178 return xlog_item_space(1, sizeof(struct xfs_bud_log_format));
179 }
180
181 /*
182 * This is called to fill in the vector of log iovecs for the
183 * given bud log item. We use only 1 iovec, and we point that
184 * at the bud_log_format structure embedded in the bud item.
185 * It is at this point that we assert that all of the extent
186 * slots in the bud item have been filled.
187 */
188 STATIC void
xfs_bud_item_format(struct xfs_log_item * lip,struct xfs_log_vec * lv)189 xfs_bud_item_format(
190 struct xfs_log_item *lip,
191 struct xfs_log_vec *lv)
192 {
193 struct xfs_bud_log_item *budp = BUD_ITEM(lip);
194 struct xfs_log_iovec *vecp = NULL;
195
196 budp->bud_format.bud_type = XFS_LI_BUD;
197 budp->bud_format.bud_size = 1;
198
199 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_BUD_FORMAT, &budp->bud_format,
200 sizeof(struct xfs_bud_log_format));
201 }
202
203 /*
204 * The BUD is either committed or aborted if the transaction is cancelled. If
205 * the transaction is cancelled, drop our reference to the BUI and free the
206 * BUD.
207 */
208 STATIC void
xfs_bud_item_release(struct xfs_log_item * lip)209 xfs_bud_item_release(
210 struct xfs_log_item *lip)
211 {
212 struct xfs_bud_log_item *budp = BUD_ITEM(lip);
213
214 xfs_bui_release(budp->bud_buip);
215 kvfree(budp->bud_item.li_lv_shadow);
216 kmem_cache_free(xfs_bud_cache, budp);
217 }
218
219 static struct xfs_log_item *
xfs_bud_item_intent(struct xfs_log_item * lip)220 xfs_bud_item_intent(
221 struct xfs_log_item *lip)
222 {
223 return &BUD_ITEM(lip)->bud_buip->bui_item;
224 }
225
226 static const struct xfs_item_ops xfs_bud_item_ops = {
227 .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED |
228 XFS_ITEM_INTENT_DONE,
229 .iop_size = xfs_bud_item_size,
230 .iop_format = xfs_bud_item_format,
231 .iop_release = xfs_bud_item_release,
232 .iop_intent = xfs_bud_item_intent,
233 };
234
bi_entry(const struct list_head * e)235 static inline struct xfs_bmap_intent *bi_entry(const struct list_head *e)
236 {
237 return list_entry(e, struct xfs_bmap_intent, bi_list);
238 }
239
240 /* Sort bmap intents by inode. */
241 static int
xfs_bmap_update_diff_items(void * priv,const struct list_head * a,const struct list_head * b)242 xfs_bmap_update_diff_items(
243 void *priv,
244 const struct list_head *a,
245 const struct list_head *b)
246 {
247 struct xfs_bmap_intent *ba = bi_entry(a);
248 struct xfs_bmap_intent *bb = bi_entry(b);
249
250 return ba->bi_owner->i_ino - bb->bi_owner->i_ino;
251 }
252
253 /* Log bmap updates in the intent item. */
254 STATIC void
xfs_bmap_update_log_item(struct xfs_trans * tp,struct xfs_bui_log_item * buip,struct xfs_bmap_intent * bi)255 xfs_bmap_update_log_item(
256 struct xfs_trans *tp,
257 struct xfs_bui_log_item *buip,
258 struct xfs_bmap_intent *bi)
259 {
260 uint next_extent;
261 struct xfs_map_extent *map;
262
263 /*
264 * atomic_inc_return gives us the value after the increment;
265 * we want to use it as an array index so we need to subtract 1 from
266 * it.
267 */
268 next_extent = atomic_inc_return(&buip->bui_next_extent) - 1;
269 ASSERT(next_extent < buip->bui_format.bui_nextents);
270 map = &buip->bui_format.bui_extents[next_extent];
271 map->me_owner = bi->bi_owner->i_ino;
272 map->me_startblock = bi->bi_bmap.br_startblock;
273 map->me_startoff = bi->bi_bmap.br_startoff;
274 map->me_len = bi->bi_bmap.br_blockcount;
275
276 switch (bi->bi_type) {
277 case XFS_BMAP_MAP:
278 case XFS_BMAP_UNMAP:
279 map->me_flags = bi->bi_type;
280 break;
281 default:
282 ASSERT(0);
283 }
284 if (bi->bi_bmap.br_state == XFS_EXT_UNWRITTEN)
285 map->me_flags |= XFS_BMAP_EXTENT_UNWRITTEN;
286 if (bi->bi_whichfork == XFS_ATTR_FORK)
287 map->me_flags |= XFS_BMAP_EXTENT_ATTR_FORK;
288 if (xfs_ifork_is_realtime(bi->bi_owner, bi->bi_whichfork))
289 map->me_flags |= XFS_BMAP_EXTENT_REALTIME;
290 }
291
292 static struct xfs_log_item *
xfs_bmap_update_create_intent(struct xfs_trans * tp,struct list_head * items,unsigned int count,bool sort)293 xfs_bmap_update_create_intent(
294 struct xfs_trans *tp,
295 struct list_head *items,
296 unsigned int count,
297 bool sort)
298 {
299 struct xfs_mount *mp = tp->t_mountp;
300 struct xfs_bui_log_item *buip = xfs_bui_init(mp);
301 struct xfs_bmap_intent *bi;
302
303 ASSERT(count == XFS_BUI_MAX_FAST_EXTENTS);
304
305 if (sort)
306 list_sort(mp, items, xfs_bmap_update_diff_items);
307 list_for_each_entry(bi, items, bi_list)
308 xfs_bmap_update_log_item(tp, buip, bi);
309 return &buip->bui_item;
310 }
311
312 /* Get an BUD so we can process all the deferred bmap updates. */
313 static struct xfs_log_item *
xfs_bmap_update_create_done(struct xfs_trans * tp,struct xfs_log_item * intent,unsigned int count)314 xfs_bmap_update_create_done(
315 struct xfs_trans *tp,
316 struct xfs_log_item *intent,
317 unsigned int count)
318 {
319 struct xfs_bui_log_item *buip = BUI_ITEM(intent);
320 struct xfs_bud_log_item *budp;
321
322 budp = kmem_cache_zalloc(xfs_bud_cache, GFP_KERNEL | __GFP_NOFAIL);
323 xfs_log_item_init(tp->t_mountp, &budp->bud_item, XFS_LI_BUD,
324 &xfs_bud_item_ops);
325 budp->bud_buip = buip;
326 budp->bud_format.bud_bui_id = buip->bui_format.bui_id;
327
328 return &budp->bud_item;
329 }
330
331 /* Take a passive ref to the group containing the space we're mapping. */
332 static inline void
xfs_bmap_update_get_group(struct xfs_mount * mp,struct xfs_bmap_intent * bi)333 xfs_bmap_update_get_group(
334 struct xfs_mount *mp,
335 struct xfs_bmap_intent *bi)
336 {
337 enum xfs_group_type type = XG_TYPE_AG;
338
339 if (xfs_ifork_is_realtime(bi->bi_owner, bi->bi_whichfork))
340 type = XG_TYPE_RTG;
341
342 /*
343 * Bump the intent count on behalf of the deferred rmap and refcount
344 * intent items that that we can queue when we finish this bmap work.
345 * This new intent item will bump the intent count before the bmap
346 * intent drops the intent count, ensuring that the intent count
347 * remains nonzero across the transaction roll.
348 */
349 bi->bi_group = xfs_group_intent_get(mp, bi->bi_bmap.br_startblock,
350 type);
351 }
352
353 /* Add this deferred BUI to the transaction. */
354 void
xfs_bmap_defer_add(struct xfs_trans * tp,struct xfs_bmap_intent * bi)355 xfs_bmap_defer_add(
356 struct xfs_trans *tp,
357 struct xfs_bmap_intent *bi)
358 {
359 xfs_bmap_update_get_group(tp->t_mountp, bi);
360
361 /*
362 * Ensure the deferred mapping is pre-recorded in i_delayed_blks.
363 *
364 * Otherwise stat can report zero blocks for an inode that actually has
365 * data when the entire mapping is in the process of being overwritten
366 * using the out of place write path. This is undone in xfs_bmapi_remap
367 * after it has incremented di_nblocks for a successful operation.
368 */
369 if (bi->bi_type == XFS_BMAP_MAP)
370 bi->bi_owner->i_delayed_blks += bi->bi_bmap.br_blockcount;
371
372 trace_xfs_bmap_defer(bi);
373 xfs_defer_add(tp, &bi->bi_list, &xfs_bmap_update_defer_type);
374 }
375
376 /* Cancel a deferred bmap update. */
377 STATIC void
xfs_bmap_update_cancel_item(struct list_head * item)378 xfs_bmap_update_cancel_item(
379 struct list_head *item)
380 {
381 struct xfs_bmap_intent *bi = bi_entry(item);
382
383 if (bi->bi_type == XFS_BMAP_MAP)
384 bi->bi_owner->i_delayed_blks -= bi->bi_bmap.br_blockcount;
385
386 xfs_group_intent_put(bi->bi_group);
387 kmem_cache_free(xfs_bmap_intent_cache, bi);
388 }
389
390 /* Process a deferred bmap update. */
391 STATIC int
xfs_bmap_update_finish_item(struct xfs_trans * tp,struct xfs_log_item * done,struct list_head * item,struct xfs_btree_cur ** state)392 xfs_bmap_update_finish_item(
393 struct xfs_trans *tp,
394 struct xfs_log_item *done,
395 struct list_head *item,
396 struct xfs_btree_cur **state)
397 {
398 struct xfs_bmap_intent *bi = bi_entry(item);
399 int error;
400
401 error = xfs_bmap_finish_one(tp, bi);
402 if (!error && bi->bi_bmap.br_blockcount > 0) {
403 ASSERT(bi->bi_type == XFS_BMAP_UNMAP);
404 return -EAGAIN;
405 }
406
407 xfs_bmap_update_cancel_item(item);
408 return error;
409 }
410
411 /* Abort all pending BUIs. */
412 STATIC void
xfs_bmap_update_abort_intent(struct xfs_log_item * intent)413 xfs_bmap_update_abort_intent(
414 struct xfs_log_item *intent)
415 {
416 xfs_bui_release(BUI_ITEM(intent));
417 }
418
419 /* Is this recovered BUI ok? */
420 static inline bool
xfs_bui_validate(struct xfs_mount * mp,struct xfs_bui_log_item * buip)421 xfs_bui_validate(
422 struct xfs_mount *mp,
423 struct xfs_bui_log_item *buip)
424 {
425 struct xfs_map_extent *map;
426
427 /* Only one mapping operation per BUI... */
428 if (buip->bui_format.bui_nextents != XFS_BUI_MAX_FAST_EXTENTS)
429 return false;
430
431 map = &buip->bui_format.bui_extents[0];
432
433 if (map->me_flags & ~XFS_BMAP_EXTENT_FLAGS)
434 return false;
435
436 switch (map->me_flags & XFS_BMAP_EXTENT_TYPE_MASK) {
437 case XFS_BMAP_MAP:
438 case XFS_BMAP_UNMAP:
439 break;
440 default:
441 return false;
442 }
443
444 if (!xfs_verify_ino(mp, map->me_owner))
445 return false;
446
447 if (!xfs_verify_fileext(mp, map->me_startoff, map->me_len))
448 return false;
449
450 if (map->me_flags & XFS_BMAP_EXTENT_REALTIME)
451 return xfs_verify_rtbext(mp, map->me_startblock, map->me_len);
452
453 return xfs_verify_fsbext(mp, map->me_startblock, map->me_len);
454 }
455
456 static inline struct xfs_bmap_intent *
xfs_bui_recover_work(struct xfs_mount * mp,struct xfs_defer_pending * dfp,struct xfs_inode ** ipp,struct xfs_map_extent * map)457 xfs_bui_recover_work(
458 struct xfs_mount *mp,
459 struct xfs_defer_pending *dfp,
460 struct xfs_inode **ipp,
461 struct xfs_map_extent *map)
462 {
463 struct xfs_bmap_intent *bi;
464 int error;
465
466 error = xlog_recover_iget(mp, map->me_owner, ipp);
467 if (error)
468 return ERR_PTR(error);
469
470 bi = kmem_cache_zalloc(xfs_bmap_intent_cache,
471 GFP_KERNEL | __GFP_NOFAIL);
472 bi->bi_whichfork = (map->me_flags & XFS_BMAP_EXTENT_ATTR_FORK) ?
473 XFS_ATTR_FORK : XFS_DATA_FORK;
474 bi->bi_type = map->me_flags & XFS_BMAP_EXTENT_TYPE_MASK;
475 bi->bi_bmap.br_startblock = map->me_startblock;
476 bi->bi_bmap.br_startoff = map->me_startoff;
477 bi->bi_bmap.br_blockcount = map->me_len;
478 bi->bi_bmap.br_state = (map->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ?
479 XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
480 bi->bi_owner = *ipp;
481 xfs_bmap_update_get_group(mp, bi);
482
483 /* see xfs_bmap_defer_add for details */
484 if (bi->bi_type == XFS_BMAP_MAP)
485 bi->bi_owner->i_delayed_blks += bi->bi_bmap.br_blockcount;
486 xfs_defer_add_item(dfp, &bi->bi_list);
487 return bi;
488 }
489
490 /*
491 * Process a bmap update intent item that was recovered from the log.
492 * We need to update some inode's bmbt.
493 */
494 STATIC int
xfs_bmap_recover_work(struct xfs_defer_pending * dfp,struct list_head * capture_list)495 xfs_bmap_recover_work(
496 struct xfs_defer_pending *dfp,
497 struct list_head *capture_list)
498 {
499 struct xfs_trans_res resv;
500 struct xfs_log_item *lip = dfp->dfp_intent;
501 struct xfs_bui_log_item *buip = BUI_ITEM(lip);
502 struct xfs_trans *tp;
503 struct xfs_inode *ip = NULL;
504 struct xfs_mount *mp = lip->li_log->l_mp;
505 struct xfs_map_extent *map;
506 struct xfs_bmap_intent *work;
507 int iext_delta;
508 int error = 0;
509
510 if (!xfs_bui_validate(mp, buip)) {
511 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
512 &buip->bui_format, sizeof(buip->bui_format));
513 return -EFSCORRUPTED;
514 }
515
516 map = &buip->bui_format.bui_extents[0];
517 work = xfs_bui_recover_work(mp, dfp, &ip, map);
518 if (IS_ERR(work))
519 return PTR_ERR(work);
520
521 /* Allocate transaction and do the work. */
522 resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
523 error = xfs_trans_alloc(mp, &resv,
524 XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp);
525 if (error)
526 goto err_rele;
527
528 xfs_ilock(ip, XFS_ILOCK_EXCL);
529 xfs_trans_ijoin(tp, ip, 0);
530
531 if (!!(map->me_flags & XFS_BMAP_EXTENT_REALTIME) !=
532 xfs_ifork_is_realtime(ip, work->bi_whichfork)) {
533 error = -EFSCORRUPTED;
534 goto err_cancel;
535 }
536
537 if (work->bi_type == XFS_BMAP_MAP)
538 iext_delta = XFS_IEXT_ADD_NOSPLIT_CNT;
539 else
540 iext_delta = XFS_IEXT_PUNCH_HOLE_CNT;
541
542 error = xfs_iext_count_extend(tp, ip, work->bi_whichfork, iext_delta);
543 if (error)
544 goto err_cancel;
545
546 error = xlog_recover_finish_intent(tp, dfp);
547 if (error == -EFSCORRUPTED)
548 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
549 &buip->bui_format, sizeof(buip->bui_format));
550 if (error)
551 goto err_cancel;
552
553 /*
554 * Commit transaction, which frees the transaction and saves the inode
555 * for later replay activities.
556 */
557 error = xfs_defer_ops_capture_and_commit(tp, capture_list);
558 if (error)
559 goto err_unlock;
560
561 xfs_iunlock(ip, XFS_ILOCK_EXCL);
562 xfs_irele(ip);
563 return 0;
564
565 err_cancel:
566 xfs_trans_cancel(tp);
567 err_unlock:
568 xfs_iunlock(ip, XFS_ILOCK_EXCL);
569 err_rele:
570 xfs_irele(ip);
571 return error;
572 }
573
574 /* Relog an intent item to push the log tail forward. */
575 static struct xfs_log_item *
xfs_bmap_relog_intent(struct xfs_trans * tp,struct xfs_log_item * intent,struct xfs_log_item * done_item)576 xfs_bmap_relog_intent(
577 struct xfs_trans *tp,
578 struct xfs_log_item *intent,
579 struct xfs_log_item *done_item)
580 {
581 struct xfs_bui_log_item *buip;
582 struct xfs_map_extent *map;
583 unsigned int count;
584
585 count = BUI_ITEM(intent)->bui_format.bui_nextents;
586 map = BUI_ITEM(intent)->bui_format.bui_extents;
587
588 buip = xfs_bui_init(tp->t_mountp);
589 memcpy(buip->bui_format.bui_extents, map, count * sizeof(*map));
590 atomic_set(&buip->bui_next_extent, count);
591
592 return &buip->bui_item;
593 }
594
595 const struct xfs_defer_op_type xfs_bmap_update_defer_type = {
596 .name = "bmap",
597 .max_items = XFS_BUI_MAX_FAST_EXTENTS,
598 .create_intent = xfs_bmap_update_create_intent,
599 .abort_intent = xfs_bmap_update_abort_intent,
600 .create_done = xfs_bmap_update_create_done,
601 .finish_item = xfs_bmap_update_finish_item,
602 .cancel_item = xfs_bmap_update_cancel_item,
603 .recover_work = xfs_bmap_recover_work,
604 .relog_intent = xfs_bmap_relog_intent,
605 };
606
607 STATIC bool
xfs_bui_item_match(struct xfs_log_item * lip,uint64_t intent_id)608 xfs_bui_item_match(
609 struct xfs_log_item *lip,
610 uint64_t intent_id)
611 {
612 return BUI_ITEM(lip)->bui_format.bui_id == intent_id;
613 }
614
615 static const struct xfs_item_ops xfs_bui_item_ops = {
616 .flags = XFS_ITEM_INTENT,
617 .iop_size = xfs_bui_item_size,
618 .iop_format = xfs_bui_item_format,
619 .iop_unpin = xfs_bui_item_unpin,
620 .iop_release = xfs_bui_item_release,
621 .iop_match = xfs_bui_item_match,
622 };
623
624 static inline void
xfs_bui_copy_format(struct xfs_bui_log_format * dst,const struct xfs_bui_log_format * src)625 xfs_bui_copy_format(
626 struct xfs_bui_log_format *dst,
627 const struct xfs_bui_log_format *src)
628 {
629 unsigned int i;
630
631 memcpy(dst, src, offsetof(struct xfs_bui_log_format, bui_extents));
632
633 for (i = 0; i < src->bui_nextents; i++)
634 memcpy(&dst->bui_extents[i], &src->bui_extents[i],
635 sizeof(struct xfs_map_extent));
636 }
637
638 /*
639 * This routine is called to create an in-core extent bmap update
640 * item from the bui format structure which was logged on disk.
641 * It allocates an in-core bui, copies the extents from the format
642 * structure into it, and adds the bui to the AIL with the given
643 * LSN.
644 */
645 STATIC int
xlog_recover_bui_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)646 xlog_recover_bui_commit_pass2(
647 struct xlog *log,
648 struct list_head *buffer_list,
649 struct xlog_recover_item *item,
650 xfs_lsn_t lsn)
651 {
652 struct xfs_mount *mp = log->l_mp;
653 struct xfs_bui_log_item *buip;
654 struct xfs_bui_log_format *bui_formatp;
655 size_t len;
656
657 bui_formatp = item->ri_buf[0].iov_base;
658
659 if (item->ri_buf[0].iov_len < xfs_bui_log_format_sizeof(0)) {
660 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
661 item->ri_buf[0].iov_base, item->ri_buf[0].iov_len);
662 return -EFSCORRUPTED;
663 }
664
665 if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) {
666 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
667 item->ri_buf[0].iov_base, item->ri_buf[0].iov_len);
668 return -EFSCORRUPTED;
669 }
670
671 len = xfs_bui_log_format_sizeof(bui_formatp->bui_nextents);
672 if (item->ri_buf[0].iov_len != len) {
673 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
674 item->ri_buf[0].iov_base, item->ri_buf[0].iov_len);
675 return -EFSCORRUPTED;
676 }
677
678 buip = xfs_bui_init(mp);
679 xfs_bui_copy_format(&buip->bui_format, bui_formatp);
680 atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
681
682 xlog_recover_intent_item(log, &buip->bui_item, lsn,
683 &xfs_bmap_update_defer_type);
684 return 0;
685 }
686
687 const struct xlog_recover_item_ops xlog_bui_item_ops = {
688 .item_type = XFS_LI_BUI,
689 .commit_pass2 = xlog_recover_bui_commit_pass2,
690 };
691
692 /*
693 * This routine is called when an BUD format structure is found in a committed
694 * transaction in the log. Its purpose is to cancel the corresponding BUI if it
695 * was still in the log. To do this it searches the AIL for the BUI with an id
696 * equal to that in the BUD format structure. If we find it we drop the BUD
697 * reference, which removes the BUI from the AIL and frees it.
698 */
699 STATIC int
xlog_recover_bud_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)700 xlog_recover_bud_commit_pass2(
701 struct xlog *log,
702 struct list_head *buffer_list,
703 struct xlog_recover_item *item,
704 xfs_lsn_t lsn)
705 {
706 struct xfs_bud_log_format *bud_formatp;
707
708 bud_formatp = item->ri_buf[0].iov_base;
709 if (item->ri_buf[0].iov_len != sizeof(struct xfs_bud_log_format)) {
710 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
711 item->ri_buf[0].iov_base, item->ri_buf[0].iov_len);
712 return -EFSCORRUPTED;
713 }
714
715 xlog_recover_release_intent(log, XFS_LI_BUI, bud_formatp->bud_bui_id);
716 return 0;
717 }
718
719 const struct xlog_recover_item_ops xlog_bud_item_ops = {
720 .item_type = XFS_LI_BUD,
721 .commit_pass2 = xlog_recover_bud_commit_pass2,
722 };
723