1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_bit.h"
12 #include "xfs_shared.h"
13 #include "xfs_mount.h"
14 #include "xfs_defer.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_refcount_item.h"
18 #include "xfs_log.h"
19 #include "xfs_refcount.h"
20 #include "xfs_error.h"
21 #include "xfs_log_priv.h"
22 #include "xfs_log_recover.h"
23 #include "xfs_ag.h"
24 #include "xfs_btree.h"
25 #include "xfs_trace.h"
26 #include "xfs_rtgroup.h"
27
28 struct kmem_cache *xfs_cui_cache;
29 struct kmem_cache *xfs_cud_cache;
30
31 static const struct xfs_item_ops xfs_cui_item_ops;
32
CUI_ITEM(struct xfs_log_item * lip)33 static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip)
34 {
35 return container_of(lip, struct xfs_cui_log_item, cui_item);
36 }
37
38 STATIC void
xfs_cui_item_free(struct xfs_cui_log_item * cuip)39 xfs_cui_item_free(
40 struct xfs_cui_log_item *cuip)
41 {
42 kvfree(cuip->cui_item.li_lv_shadow);
43 if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
44 kfree(cuip);
45 else
46 kmem_cache_free(xfs_cui_cache, cuip);
47 }
48
49 /*
50 * Freeing the CUI requires that we remove it from the AIL if it has already
51 * been placed there. However, the CUI may not yet have been placed in the AIL
52 * when called by xfs_cui_release() from CUD processing due to the ordering of
53 * committed vs unpin operations in bulk insert operations. Hence the reference
54 * count to ensure only the last caller frees the CUI.
55 */
56 STATIC void
xfs_cui_release(struct xfs_cui_log_item * cuip)57 xfs_cui_release(
58 struct xfs_cui_log_item *cuip)
59 {
60 ASSERT(atomic_read(&cuip->cui_refcount) > 0);
61 if (!atomic_dec_and_test(&cuip->cui_refcount))
62 return;
63
64 xfs_trans_ail_delete(&cuip->cui_item, 0);
65 xfs_cui_item_free(cuip);
66 }
67
68
69 STATIC void
xfs_cui_item_size(struct xfs_log_item * lip,int * nvecs,int * nbytes)70 xfs_cui_item_size(
71 struct xfs_log_item *lip,
72 int *nvecs,
73 int *nbytes)
74 {
75 struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
76
77 *nvecs += 1;
78 *nbytes += xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents);
79 }
80
81 /*
82 * This is called to fill in the vector of log iovecs for the
83 * given cui log item. We use only 1 iovec, and we point that
84 * at the cui_log_format structure embedded in the cui item.
85 * It is at this point that we assert that all of the extent
86 * slots in the cui item have been filled.
87 */
88 STATIC void
xfs_cui_item_format(struct xfs_log_item * lip,struct xfs_log_vec * lv)89 xfs_cui_item_format(
90 struct xfs_log_item *lip,
91 struct xfs_log_vec *lv)
92 {
93 struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
94 struct xfs_log_iovec *vecp = NULL;
95
96 ASSERT(atomic_read(&cuip->cui_next_extent) ==
97 cuip->cui_format.cui_nextents);
98 ASSERT(lip->li_type == XFS_LI_CUI || lip->li_type == XFS_LI_CUI_RT);
99
100 cuip->cui_format.cui_type = lip->li_type;
101 cuip->cui_format.cui_size = 1;
102
103 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUI_FORMAT, &cuip->cui_format,
104 xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents));
105 }
106
107 /*
108 * The unpin operation is the last place an CUI is manipulated in the log. It is
109 * either inserted in the AIL or aborted in the event of a log I/O error. In
110 * either case, the CUI transaction has been successfully committed to make it
111 * this far. Therefore, we expect whoever committed the CUI to either construct
112 * and commit the CUD or drop the CUD's reference in the event of error. Simply
113 * drop the log's CUI reference now that the log is done with it.
114 */
115 STATIC void
xfs_cui_item_unpin(struct xfs_log_item * lip,int remove)116 xfs_cui_item_unpin(
117 struct xfs_log_item *lip,
118 int remove)
119 {
120 struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
121
122 xfs_cui_release(cuip);
123 }
124
125 /*
126 * The CUI has been either committed or aborted if the transaction has been
127 * cancelled. If the transaction was cancelled, an CUD isn't going to be
128 * constructed and thus we free the CUI here directly.
129 */
130 STATIC void
xfs_cui_item_release(struct xfs_log_item * lip)131 xfs_cui_item_release(
132 struct xfs_log_item *lip)
133 {
134 xfs_cui_release(CUI_ITEM(lip));
135 }
136
137 /*
138 * Allocate and initialize an cui item with the given number of extents.
139 */
140 STATIC struct xfs_cui_log_item *
xfs_cui_init(struct xfs_mount * mp,unsigned short item_type,uint nextents)141 xfs_cui_init(
142 struct xfs_mount *mp,
143 unsigned short item_type,
144 uint nextents)
145 {
146 struct xfs_cui_log_item *cuip;
147
148 ASSERT(nextents > 0);
149 ASSERT(item_type == XFS_LI_CUI || item_type == XFS_LI_CUI_RT);
150
151 if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
152 cuip = kzalloc(xfs_cui_log_item_sizeof(nextents),
153 GFP_KERNEL | __GFP_NOFAIL);
154 else
155 cuip = kmem_cache_zalloc(xfs_cui_cache,
156 GFP_KERNEL | __GFP_NOFAIL);
157
158 xfs_log_item_init(mp, &cuip->cui_item, item_type, &xfs_cui_item_ops);
159 cuip->cui_format.cui_nextents = nextents;
160 cuip->cui_format.cui_id = (uintptr_t)(void *)cuip;
161 atomic_set(&cuip->cui_next_extent, 0);
162 atomic_set(&cuip->cui_refcount, 2);
163
164 return cuip;
165 }
166
CUD_ITEM(struct xfs_log_item * lip)167 static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip)
168 {
169 return container_of(lip, struct xfs_cud_log_item, cud_item);
170 }
171
172 STATIC void
xfs_cud_item_size(struct xfs_log_item * lip,int * nvecs,int * nbytes)173 xfs_cud_item_size(
174 struct xfs_log_item *lip,
175 int *nvecs,
176 int *nbytes)
177 {
178 *nvecs += 1;
179 *nbytes += sizeof(struct xfs_cud_log_format);
180 }
181
182 /*
183 * This is called to fill in the vector of log iovecs for the
184 * given cud log item. We use only 1 iovec, and we point that
185 * at the cud_log_format structure embedded in the cud item.
186 * It is at this point that we assert that all of the extent
187 * slots in the cud item have been filled.
188 */
189 STATIC void
xfs_cud_item_format(struct xfs_log_item * lip,struct xfs_log_vec * lv)190 xfs_cud_item_format(
191 struct xfs_log_item *lip,
192 struct xfs_log_vec *lv)
193 {
194 struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
195 struct xfs_log_iovec *vecp = NULL;
196
197 ASSERT(lip->li_type == XFS_LI_CUD || lip->li_type == XFS_LI_CUD_RT);
198
199 cudp->cud_format.cud_type = lip->li_type;
200 cudp->cud_format.cud_size = 1;
201
202 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUD_FORMAT, &cudp->cud_format,
203 sizeof(struct xfs_cud_log_format));
204 }
205
206 /*
207 * The CUD is either committed or aborted if the transaction is cancelled. If
208 * the transaction is cancelled, drop our reference to the CUI and free the
209 * CUD.
210 */
211 STATIC void
xfs_cud_item_release(struct xfs_log_item * lip)212 xfs_cud_item_release(
213 struct xfs_log_item *lip)
214 {
215 struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
216
217 xfs_cui_release(cudp->cud_cuip);
218 kvfree(cudp->cud_item.li_lv_shadow);
219 kmem_cache_free(xfs_cud_cache, cudp);
220 }
221
222 static struct xfs_log_item *
xfs_cud_item_intent(struct xfs_log_item * lip)223 xfs_cud_item_intent(
224 struct xfs_log_item *lip)
225 {
226 return &CUD_ITEM(lip)->cud_cuip->cui_item;
227 }
228
229 static const struct xfs_item_ops xfs_cud_item_ops = {
230 .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED |
231 XFS_ITEM_INTENT_DONE,
232 .iop_size = xfs_cud_item_size,
233 .iop_format = xfs_cud_item_format,
234 .iop_release = xfs_cud_item_release,
235 .iop_intent = xfs_cud_item_intent,
236 };
237
ci_entry(const struct list_head * e)238 static inline struct xfs_refcount_intent *ci_entry(const struct list_head *e)
239 {
240 return list_entry(e, struct xfs_refcount_intent, ri_list);
241 }
242
243 static inline bool
xfs_cui_item_isrt(const struct xfs_log_item * lip)244 xfs_cui_item_isrt(const struct xfs_log_item *lip)
245 {
246 ASSERT(lip->li_type == XFS_LI_CUI || lip->li_type == XFS_LI_CUI_RT);
247
248 return lip->li_type == XFS_LI_CUI_RT;
249 }
250
251 /* Sort refcount intents by AG. */
252 static int
xfs_refcount_update_diff_items(void * priv,const struct list_head * a,const struct list_head * b)253 xfs_refcount_update_diff_items(
254 void *priv,
255 const struct list_head *a,
256 const struct list_head *b)
257 {
258 struct xfs_refcount_intent *ra = ci_entry(a);
259 struct xfs_refcount_intent *rb = ci_entry(b);
260
261 return ra->ri_group->xg_gno - rb->ri_group->xg_gno;
262 }
263
264 /* Log refcount updates in the intent item. */
265 STATIC void
xfs_refcount_update_log_item(struct xfs_trans * tp,struct xfs_cui_log_item * cuip,struct xfs_refcount_intent * ri)266 xfs_refcount_update_log_item(
267 struct xfs_trans *tp,
268 struct xfs_cui_log_item *cuip,
269 struct xfs_refcount_intent *ri)
270 {
271 uint next_extent;
272 struct xfs_phys_extent *pmap;
273
274 /*
275 * atomic_inc_return gives us the value after the increment;
276 * we want to use it as an array index so we need to subtract 1 from
277 * it.
278 */
279 next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1;
280 ASSERT(next_extent < cuip->cui_format.cui_nextents);
281 pmap = &cuip->cui_format.cui_extents[next_extent];
282 pmap->pe_startblock = ri->ri_startblock;
283 pmap->pe_len = ri->ri_blockcount;
284
285 pmap->pe_flags = 0;
286 switch (ri->ri_type) {
287 case XFS_REFCOUNT_INCREASE:
288 case XFS_REFCOUNT_DECREASE:
289 case XFS_REFCOUNT_ALLOC_COW:
290 case XFS_REFCOUNT_FREE_COW:
291 pmap->pe_flags |= ri->ri_type;
292 break;
293 default:
294 ASSERT(0);
295 }
296 }
297
298 static struct xfs_log_item *
__xfs_refcount_update_create_intent(struct xfs_trans * tp,struct list_head * items,unsigned int count,bool sort,unsigned short item_type)299 __xfs_refcount_update_create_intent(
300 struct xfs_trans *tp,
301 struct list_head *items,
302 unsigned int count,
303 bool sort,
304 unsigned short item_type)
305 {
306 struct xfs_mount *mp = tp->t_mountp;
307 struct xfs_cui_log_item *cuip;
308 struct xfs_refcount_intent *ri;
309
310 ASSERT(count > 0);
311
312 cuip = xfs_cui_init(mp, item_type, count);
313 if (sort)
314 list_sort(mp, items, xfs_refcount_update_diff_items);
315 list_for_each_entry(ri, items, ri_list)
316 xfs_refcount_update_log_item(tp, cuip, ri);
317 return &cuip->cui_item;
318 }
319
320 static struct xfs_log_item *
xfs_refcount_update_create_intent(struct xfs_trans * tp,struct list_head * items,unsigned int count,bool sort)321 xfs_refcount_update_create_intent(
322 struct xfs_trans *tp,
323 struct list_head *items,
324 unsigned int count,
325 bool sort)
326 {
327 return __xfs_refcount_update_create_intent(tp, items, count, sort,
328 XFS_LI_CUI);
329 }
330
331 static inline unsigned short
xfs_cud_type_from_cui(const struct xfs_cui_log_item * cuip)332 xfs_cud_type_from_cui(const struct xfs_cui_log_item *cuip)
333 {
334 return xfs_cui_item_isrt(&cuip->cui_item) ? XFS_LI_CUD_RT : XFS_LI_CUD;
335 }
336
337 /* Get an CUD so we can process all the deferred refcount updates. */
338 static struct xfs_log_item *
xfs_refcount_update_create_done(struct xfs_trans * tp,struct xfs_log_item * intent,unsigned int count)339 xfs_refcount_update_create_done(
340 struct xfs_trans *tp,
341 struct xfs_log_item *intent,
342 unsigned int count)
343 {
344 struct xfs_cui_log_item *cuip = CUI_ITEM(intent);
345 struct xfs_cud_log_item *cudp;
346
347 cudp = kmem_cache_zalloc(xfs_cud_cache, GFP_KERNEL | __GFP_NOFAIL);
348 xfs_log_item_init(tp->t_mountp, &cudp->cud_item,
349 xfs_cud_type_from_cui(cuip), &xfs_cud_item_ops);
350 cudp->cud_cuip = cuip;
351 cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id;
352
353 return &cudp->cud_item;
354 }
355
356 /* Add this deferred CUI to the transaction. */
357 void
xfs_refcount_defer_add(struct xfs_trans * tp,struct xfs_refcount_intent * ri)358 xfs_refcount_defer_add(
359 struct xfs_trans *tp,
360 struct xfs_refcount_intent *ri)
361 {
362 struct xfs_mount *mp = tp->t_mountp;
363
364 /*
365 * Deferred refcount updates for the realtime and data sections must
366 * use separate transactions to finish deferred work because updates to
367 * realtime metadata files can lock AGFs to allocate btree blocks and
368 * we don't want that mixing with the AGF locks taken to finish data
369 * section updates.
370 */
371 ri->ri_group = xfs_group_intent_get(mp, ri->ri_startblock,
372 ri->ri_realtime ? XG_TYPE_RTG : XG_TYPE_AG);
373
374 trace_xfs_refcount_defer(mp, ri);
375 xfs_defer_add(tp, &ri->ri_list, ri->ri_realtime ?
376 &xfs_rtrefcount_update_defer_type :
377 &xfs_refcount_update_defer_type);
378 }
379
380 /* Cancel a deferred refcount update. */
381 STATIC void
xfs_refcount_update_cancel_item(struct list_head * item)382 xfs_refcount_update_cancel_item(
383 struct list_head *item)
384 {
385 struct xfs_refcount_intent *ri = ci_entry(item);
386
387 xfs_group_intent_put(ri->ri_group);
388 kmem_cache_free(xfs_refcount_intent_cache, ri);
389 }
390
391 /* Process a deferred refcount update. */
392 STATIC int
xfs_refcount_update_finish_item(struct xfs_trans * tp,struct xfs_log_item * done,struct list_head * item,struct xfs_btree_cur ** state)393 xfs_refcount_update_finish_item(
394 struct xfs_trans *tp,
395 struct xfs_log_item *done,
396 struct list_head *item,
397 struct xfs_btree_cur **state)
398 {
399 struct xfs_refcount_intent *ri = ci_entry(item);
400 int error;
401
402 /* Did we run out of reservation? Requeue what we didn't finish. */
403 error = xfs_refcount_finish_one(tp, ri, state);
404 if (!error && ri->ri_blockcount > 0) {
405 ASSERT(ri->ri_type == XFS_REFCOUNT_INCREASE ||
406 ri->ri_type == XFS_REFCOUNT_DECREASE);
407 return -EAGAIN;
408 }
409
410 xfs_refcount_update_cancel_item(item);
411 return error;
412 }
413
414 /* Clean up after calling xfs_refcount_finish_one. */
415 STATIC void
xfs_refcount_finish_one_cleanup(struct xfs_trans * tp,struct xfs_btree_cur * rcur,int error)416 xfs_refcount_finish_one_cleanup(
417 struct xfs_trans *tp,
418 struct xfs_btree_cur *rcur,
419 int error)
420 {
421 struct xfs_buf *agbp;
422
423 if (rcur == NULL)
424 return;
425 agbp = rcur->bc_ag.agbp;
426 xfs_btree_del_cursor(rcur, error);
427 if (error && agbp)
428 xfs_trans_brelse(tp, agbp);
429 }
430
431 /* Abort all pending CUIs. */
432 STATIC void
xfs_refcount_update_abort_intent(struct xfs_log_item * intent)433 xfs_refcount_update_abort_intent(
434 struct xfs_log_item *intent)
435 {
436 xfs_cui_release(CUI_ITEM(intent));
437 }
438
439 /* Is this recovered CUI ok? */
440 static inline bool
xfs_cui_validate_phys(struct xfs_mount * mp,bool isrt,struct xfs_phys_extent * pmap)441 xfs_cui_validate_phys(
442 struct xfs_mount *mp,
443 bool isrt,
444 struct xfs_phys_extent *pmap)
445 {
446 if (!xfs_has_reflink(mp))
447 return false;
448
449 if (pmap->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS)
450 return false;
451
452 switch (pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) {
453 case XFS_REFCOUNT_INCREASE:
454 case XFS_REFCOUNT_DECREASE:
455 case XFS_REFCOUNT_ALLOC_COW:
456 case XFS_REFCOUNT_FREE_COW:
457 break;
458 default:
459 return false;
460 }
461
462 if (isrt)
463 return xfs_verify_rtbext(mp, pmap->pe_startblock, pmap->pe_len);
464
465 return xfs_verify_fsbext(mp, pmap->pe_startblock, pmap->pe_len);
466 }
467
468 static inline void
xfs_cui_recover_work(struct xfs_mount * mp,struct xfs_defer_pending * dfp,bool isrt,struct xfs_phys_extent * pmap)469 xfs_cui_recover_work(
470 struct xfs_mount *mp,
471 struct xfs_defer_pending *dfp,
472 bool isrt,
473 struct xfs_phys_extent *pmap)
474 {
475 struct xfs_refcount_intent *ri;
476
477 ri = kmem_cache_alloc(xfs_refcount_intent_cache,
478 GFP_KERNEL | __GFP_NOFAIL);
479 ri->ri_type = pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
480 ri->ri_startblock = pmap->pe_startblock;
481 ri->ri_blockcount = pmap->pe_len;
482 ri->ri_group = xfs_group_intent_get(mp, pmap->pe_startblock,
483 isrt ? XG_TYPE_RTG : XG_TYPE_AG);
484 ri->ri_realtime = isrt;
485
486 xfs_defer_add_item(dfp, &ri->ri_list);
487 }
488
489 /*
490 * Process a refcount update intent item that was recovered from the log.
491 * We need to update the refcountbt.
492 */
493 STATIC int
xfs_refcount_recover_work(struct xfs_defer_pending * dfp,struct list_head * capture_list)494 xfs_refcount_recover_work(
495 struct xfs_defer_pending *dfp,
496 struct list_head *capture_list)
497 {
498 struct xfs_trans_res resv;
499 struct xfs_log_item *lip = dfp->dfp_intent;
500 struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
501 struct xfs_trans *tp;
502 struct xfs_mount *mp = lip->li_log->l_mp;
503 bool isrt = xfs_cui_item_isrt(lip);
504 int i;
505 int error = 0;
506
507 /*
508 * First check the validity of the extents described by the
509 * CUI. If any are bad, then assume that all are bad and
510 * just toss the CUI.
511 */
512 for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
513 if (!xfs_cui_validate_phys(mp, isrt,
514 &cuip->cui_format.cui_extents[i])) {
515 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
516 &cuip->cui_format,
517 sizeof(cuip->cui_format));
518 return -EFSCORRUPTED;
519 }
520
521 xfs_cui_recover_work(mp, dfp, isrt,
522 &cuip->cui_format.cui_extents[i]);
523 }
524
525 /*
526 * Under normal operation, refcount updates are deferred, so we
527 * wouldn't be adding them directly to a transaction. All
528 * refcount updates manage reservation usage internally and
529 * dynamically by deferring work that won't fit in the
530 * transaction. Normally, any work that needs to be deferred
531 * gets attached to the same defer_ops that scheduled the
532 * refcount update. However, we're in log recovery here, so we
533 * use the passed in defer_ops and to finish up any work that
534 * doesn't fit. We need to reserve enough blocks to handle a
535 * full btree split on either end of the refcount range.
536 */
537 resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
538 error = xfs_trans_alloc(mp, &resv, mp->m_refc_maxlevels * 2, 0,
539 XFS_TRANS_RESERVE, &tp);
540 if (error)
541 return error;
542
543 error = xlog_recover_finish_intent(tp, dfp);
544 if (error == -EFSCORRUPTED)
545 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
546 &cuip->cui_format,
547 sizeof(cuip->cui_format));
548 if (error)
549 goto abort_error;
550
551 return xfs_defer_ops_capture_and_commit(tp, capture_list);
552
553 abort_error:
554 xfs_trans_cancel(tp);
555 return error;
556 }
557
558 /* Relog an intent item to push the log tail forward. */
559 static struct xfs_log_item *
xfs_refcount_relog_intent(struct xfs_trans * tp,struct xfs_log_item * intent,struct xfs_log_item * done_item)560 xfs_refcount_relog_intent(
561 struct xfs_trans *tp,
562 struct xfs_log_item *intent,
563 struct xfs_log_item *done_item)
564 {
565 struct xfs_cui_log_item *cuip;
566 struct xfs_phys_extent *pmap;
567 unsigned int count;
568
569 ASSERT(intent->li_type == XFS_LI_CUI ||
570 intent->li_type == XFS_LI_CUI_RT);
571
572 count = CUI_ITEM(intent)->cui_format.cui_nextents;
573 pmap = CUI_ITEM(intent)->cui_format.cui_extents;
574
575 cuip = xfs_cui_init(tp->t_mountp, intent->li_type, count);
576 memcpy(cuip->cui_format.cui_extents, pmap, count * sizeof(*pmap));
577 atomic_set(&cuip->cui_next_extent, count);
578
579 return &cuip->cui_item;
580 }
581
582 const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
583 .name = "refcount",
584 .max_items = XFS_CUI_MAX_FAST_EXTENTS,
585 .create_intent = xfs_refcount_update_create_intent,
586 .abort_intent = xfs_refcount_update_abort_intent,
587 .create_done = xfs_refcount_update_create_done,
588 .finish_item = xfs_refcount_update_finish_item,
589 .finish_cleanup = xfs_refcount_finish_one_cleanup,
590 .cancel_item = xfs_refcount_update_cancel_item,
591 .recover_work = xfs_refcount_recover_work,
592 .relog_intent = xfs_refcount_relog_intent,
593 };
594
595 #ifdef CONFIG_XFS_RT
596 static struct xfs_log_item *
xfs_rtrefcount_update_create_intent(struct xfs_trans * tp,struct list_head * items,unsigned int count,bool sort)597 xfs_rtrefcount_update_create_intent(
598 struct xfs_trans *tp,
599 struct list_head *items,
600 unsigned int count,
601 bool sort)
602 {
603 return __xfs_refcount_update_create_intent(tp, items, count, sort,
604 XFS_LI_CUI_RT);
605 }
606
607 /* Process a deferred realtime refcount update. */
608 STATIC int
xfs_rtrefcount_update_finish_item(struct xfs_trans * tp,struct xfs_log_item * done,struct list_head * item,struct xfs_btree_cur ** state)609 xfs_rtrefcount_update_finish_item(
610 struct xfs_trans *tp,
611 struct xfs_log_item *done,
612 struct list_head *item,
613 struct xfs_btree_cur **state)
614 {
615 struct xfs_refcount_intent *ri = ci_entry(item);
616 int error;
617
618 error = xfs_rtrefcount_finish_one(tp, ri, state);
619
620 /* Did we run out of reservation? Requeue what we didn't finish. */
621 if (!error && ri->ri_blockcount > 0) {
622 ASSERT(ri->ri_type == XFS_REFCOUNT_INCREASE ||
623 ri->ri_type == XFS_REFCOUNT_DECREASE);
624 return -EAGAIN;
625 }
626
627 xfs_refcount_update_cancel_item(item);
628 return error;
629 }
630
631 /* Clean up after calling xfs_rtrefcount_finish_one. */
632 STATIC void
xfs_rtrefcount_finish_one_cleanup(struct xfs_trans * tp,struct xfs_btree_cur * rcur,int error)633 xfs_rtrefcount_finish_one_cleanup(
634 struct xfs_trans *tp,
635 struct xfs_btree_cur *rcur,
636 int error)
637 {
638 if (rcur)
639 xfs_btree_del_cursor(rcur, error);
640 }
641
642 const struct xfs_defer_op_type xfs_rtrefcount_update_defer_type = {
643 .name = "rtrefcount",
644 .max_items = XFS_CUI_MAX_FAST_EXTENTS,
645 .create_intent = xfs_rtrefcount_update_create_intent,
646 .abort_intent = xfs_refcount_update_abort_intent,
647 .create_done = xfs_refcount_update_create_done,
648 .finish_item = xfs_rtrefcount_update_finish_item,
649 .finish_cleanup = xfs_rtrefcount_finish_one_cleanup,
650 .cancel_item = xfs_refcount_update_cancel_item,
651 .recover_work = xfs_refcount_recover_work,
652 .relog_intent = xfs_refcount_relog_intent,
653 };
654 #else
655 const struct xfs_defer_op_type xfs_rtrefcount_update_defer_type = {
656 .name = "rtrefcount",
657 };
658 #endif /* CONFIG_XFS_RT */
659
660 STATIC bool
xfs_cui_item_match(struct xfs_log_item * lip,uint64_t intent_id)661 xfs_cui_item_match(
662 struct xfs_log_item *lip,
663 uint64_t intent_id)
664 {
665 return CUI_ITEM(lip)->cui_format.cui_id == intent_id;
666 }
667
668 static const struct xfs_item_ops xfs_cui_item_ops = {
669 .flags = XFS_ITEM_INTENT,
670 .iop_size = xfs_cui_item_size,
671 .iop_format = xfs_cui_item_format,
672 .iop_unpin = xfs_cui_item_unpin,
673 .iop_release = xfs_cui_item_release,
674 .iop_match = xfs_cui_item_match,
675 };
676
677 static inline void
xfs_cui_copy_format(struct xfs_cui_log_format * dst,const struct xfs_cui_log_format * src)678 xfs_cui_copy_format(
679 struct xfs_cui_log_format *dst,
680 const struct xfs_cui_log_format *src)
681 {
682 unsigned int i;
683
684 memcpy(dst, src, offsetof(struct xfs_cui_log_format, cui_extents));
685
686 for (i = 0; i < src->cui_nextents; i++)
687 memcpy(&dst->cui_extents[i], &src->cui_extents[i],
688 sizeof(struct xfs_phys_extent));
689 }
690
691 /*
692 * This routine is called to create an in-core extent refcount update
693 * item from the cui format structure which was logged on disk.
694 * It allocates an in-core cui, copies the extents from the format
695 * structure into it, and adds the cui to the AIL with the given
696 * LSN.
697 */
698 STATIC int
xlog_recover_cui_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)699 xlog_recover_cui_commit_pass2(
700 struct xlog *log,
701 struct list_head *buffer_list,
702 struct xlog_recover_item *item,
703 xfs_lsn_t lsn)
704 {
705 struct xfs_mount *mp = log->l_mp;
706 struct xfs_cui_log_item *cuip;
707 struct xfs_cui_log_format *cui_formatp;
708 size_t len;
709
710 cui_formatp = item->ri_buf[0].i_addr;
711
712 if (item->ri_buf[0].i_len < xfs_cui_log_format_sizeof(0)) {
713 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
714 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
715 return -EFSCORRUPTED;
716 }
717
718 len = xfs_cui_log_format_sizeof(cui_formatp->cui_nextents);
719 if (item->ri_buf[0].i_len != len) {
720 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
721 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
722 return -EFSCORRUPTED;
723 }
724
725 cuip = xfs_cui_init(mp, ITEM_TYPE(item), cui_formatp->cui_nextents);
726 xfs_cui_copy_format(&cuip->cui_format, cui_formatp);
727 atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
728
729 xlog_recover_intent_item(log, &cuip->cui_item, lsn,
730 &xfs_refcount_update_defer_type);
731 return 0;
732 }
733
734 const struct xlog_recover_item_ops xlog_cui_item_ops = {
735 .item_type = XFS_LI_CUI,
736 .commit_pass2 = xlog_recover_cui_commit_pass2,
737 };
738
739 #ifdef CONFIG_XFS_RT
740 STATIC int
xlog_recover_rtcui_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)741 xlog_recover_rtcui_commit_pass2(
742 struct xlog *log,
743 struct list_head *buffer_list,
744 struct xlog_recover_item *item,
745 xfs_lsn_t lsn)
746 {
747 struct xfs_mount *mp = log->l_mp;
748 struct xfs_cui_log_item *cuip;
749 struct xfs_cui_log_format *cui_formatp;
750 size_t len;
751
752 cui_formatp = item->ri_buf[0].i_addr;
753
754 if (item->ri_buf[0].i_len < xfs_cui_log_format_sizeof(0)) {
755 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
756 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
757 return -EFSCORRUPTED;
758 }
759
760 len = xfs_cui_log_format_sizeof(cui_formatp->cui_nextents);
761 if (item->ri_buf[0].i_len != len) {
762 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
763 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
764 return -EFSCORRUPTED;
765 }
766
767 cuip = xfs_cui_init(mp, ITEM_TYPE(item), cui_formatp->cui_nextents);
768 xfs_cui_copy_format(&cuip->cui_format, cui_formatp);
769 atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
770
771 xlog_recover_intent_item(log, &cuip->cui_item, lsn,
772 &xfs_rtrefcount_update_defer_type);
773 return 0;
774 }
775 #else
776 STATIC int
xlog_recover_rtcui_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)777 xlog_recover_rtcui_commit_pass2(
778 struct xlog *log,
779 struct list_head *buffer_list,
780 struct xlog_recover_item *item,
781 xfs_lsn_t lsn)
782 {
783 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
784 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
785 return -EFSCORRUPTED;
786 }
787 #endif
788
789 const struct xlog_recover_item_ops xlog_rtcui_item_ops = {
790 .item_type = XFS_LI_CUI_RT,
791 .commit_pass2 = xlog_recover_rtcui_commit_pass2,
792 };
793
794 /*
795 * This routine is called when an CUD format structure is found in a committed
796 * transaction in the log. Its purpose is to cancel the corresponding CUI if it
797 * was still in the log. To do this it searches the AIL for the CUI with an id
798 * equal to that in the CUD format structure. If we find it we drop the CUD
799 * reference, which removes the CUI from the AIL and frees it.
800 */
801 STATIC int
xlog_recover_cud_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)802 xlog_recover_cud_commit_pass2(
803 struct xlog *log,
804 struct list_head *buffer_list,
805 struct xlog_recover_item *item,
806 xfs_lsn_t lsn)
807 {
808 struct xfs_cud_log_format *cud_formatp;
809
810 cud_formatp = item->ri_buf[0].i_addr;
811 if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
812 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
813 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
814 return -EFSCORRUPTED;
815 }
816
817 xlog_recover_release_intent(log, XFS_LI_CUI, cud_formatp->cud_cui_id);
818 return 0;
819 }
820
821 const struct xlog_recover_item_ops xlog_cud_item_ops = {
822 .item_type = XFS_LI_CUD,
823 .commit_pass2 = xlog_recover_cud_commit_pass2,
824 };
825
826 #ifdef CONFIG_XFS_RT
827 STATIC int
xlog_recover_rtcud_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)828 xlog_recover_rtcud_commit_pass2(
829 struct xlog *log,
830 struct list_head *buffer_list,
831 struct xlog_recover_item *item,
832 xfs_lsn_t lsn)
833 {
834 struct xfs_cud_log_format *cud_formatp;
835
836 cud_formatp = item->ri_buf[0].i_addr;
837 if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
838 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
839 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
840 return -EFSCORRUPTED;
841 }
842
843 xlog_recover_release_intent(log, XFS_LI_CUI_RT,
844 cud_formatp->cud_cui_id);
845 return 0;
846 }
847 #else
848 # define xlog_recover_rtcud_commit_pass2 xlog_recover_rtcui_commit_pass2
849 #endif
850
851 const struct xlog_recover_item_ops xlog_rtcud_item_ops = {
852 .item_type = XFS_LI_CUD_RT,
853 .commit_pass2 = xlog_recover_rtcud_commit_pass2,
854 };
855