1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2016 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs_platform.h" 7 #include "xfs_fs.h" 8 #include "xfs_format.h" 9 #include "xfs_log_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_bit.h" 12 #include "xfs_shared.h" 13 #include "xfs_mount.h" 14 #include "xfs_defer.h" 15 #include "xfs_trans.h" 16 #include "xfs_trans_priv.h" 17 #include "xfs_rmap_item.h" 18 #include "xfs_log.h" 19 #include "xfs_rmap.h" 20 #include "xfs_error.h" 21 #include "xfs_log_priv.h" 22 #include "xfs_log_recover.h" 23 #include "xfs_ag.h" 24 #include "xfs_btree.h" 25 #include "xfs_trace.h" 26 #include "xfs_rtgroup.h" 27 28 struct kmem_cache *xfs_rui_cache; 29 struct kmem_cache *xfs_rud_cache; 30 31 static const struct xfs_item_ops xfs_rui_item_ops; 32 33 static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip) 34 { 35 return container_of(lip, struct xfs_rui_log_item, rui_item); 36 } 37 38 STATIC void 39 xfs_rui_item_free( 40 struct xfs_rui_log_item *ruip) 41 { 42 kvfree(ruip->rui_item.li_lv_shadow); 43 if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS) 44 kfree(ruip); 45 else 46 kmem_cache_free(xfs_rui_cache, ruip); 47 } 48 49 /* 50 * Freeing the RUI requires that we remove it from the AIL if it has already 51 * been placed there. However, the RUI may not yet have been placed in the AIL 52 * when called by xfs_rui_release() from RUD processing due to the ordering of 53 * committed vs unpin operations in bulk insert operations. Hence the reference 54 * count to ensure only the last caller frees the RUI. 55 */ 56 STATIC void 57 xfs_rui_release( 58 struct xfs_rui_log_item *ruip) 59 { 60 ASSERT(atomic_read(&ruip->rui_refcount) > 0); 61 if (!atomic_dec_and_test(&ruip->rui_refcount)) 62 return; 63 64 xfs_trans_ail_delete(&ruip->rui_item, 0); 65 xfs_rui_item_free(ruip); 66 } 67 68 STATIC void 69 xfs_rui_item_size( 70 struct xfs_log_item *lip, 71 int *nvecs, 72 int *nbytes) 73 { 74 struct xfs_rui_log_item *ruip = RUI_ITEM(lip); 75 76 *nvecs += 1; 77 *nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents); 78 } 79 80 unsigned int xfs_rui_log_space(unsigned int nr) 81 { 82 return xlog_item_space(1, xfs_rui_log_format_sizeof(nr)); 83 } 84 85 /* 86 * This is called to fill in the vector of log iovecs for the 87 * given rui log item. We use only 1 iovec, and we point that 88 * at the rui_log_format structure embedded in the rui item. 89 * It is at this point that we assert that all of the extent 90 * slots in the rui item have been filled. 91 */ 92 STATIC void 93 xfs_rui_item_format( 94 struct xfs_log_item *lip, 95 struct xlog_format_buf *lfb) 96 { 97 struct xfs_rui_log_item *ruip = RUI_ITEM(lip); 98 99 ASSERT(atomic_read(&ruip->rui_next_extent) == 100 ruip->rui_format.rui_nextents); 101 102 ASSERT(lip->li_type == XFS_LI_RUI || lip->li_type == XFS_LI_RUI_RT); 103 104 ruip->rui_format.rui_type = lip->li_type; 105 ruip->rui_format.rui_size = 1; 106 107 xlog_format_copy(lfb, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format, 108 xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents)); 109 } 110 111 /* 112 * The unpin operation is the last place an RUI is manipulated in the log. It is 113 * either inserted in the AIL or aborted in the event of a log I/O error. In 114 * either case, the RUI transaction has been successfully committed to make it 115 * this far. Therefore, we expect whoever committed the RUI to either construct 116 * and commit the RUD or drop the RUD's reference in the event of error. Simply 117 * drop the log's RUI reference now that the log is done with it. 118 */ 119 STATIC void 120 xfs_rui_item_unpin( 121 struct xfs_log_item *lip, 122 int remove) 123 { 124 struct xfs_rui_log_item *ruip = RUI_ITEM(lip); 125 126 xfs_rui_release(ruip); 127 } 128 129 /* 130 * The RUI has been either committed or aborted if the transaction has been 131 * cancelled. If the transaction was cancelled, an RUD isn't going to be 132 * constructed and thus we free the RUI here directly. 133 */ 134 STATIC void 135 xfs_rui_item_release( 136 struct xfs_log_item *lip) 137 { 138 xfs_rui_release(RUI_ITEM(lip)); 139 } 140 141 /* 142 * Allocate and initialize an rui item with the given number of extents. 143 */ 144 STATIC struct xfs_rui_log_item * 145 xfs_rui_init( 146 struct xfs_mount *mp, 147 unsigned short item_type, 148 uint nextents) 149 150 { 151 struct xfs_rui_log_item *ruip; 152 153 ASSERT(nextents > 0); 154 ASSERT(item_type == XFS_LI_RUI || item_type == XFS_LI_RUI_RT); 155 156 if (nextents > XFS_RUI_MAX_FAST_EXTENTS) 157 ruip = kzalloc(xfs_rui_log_item_sizeof(nextents), 158 GFP_KERNEL | __GFP_NOFAIL); 159 else 160 ruip = kmem_cache_zalloc(xfs_rui_cache, 161 GFP_KERNEL | __GFP_NOFAIL); 162 163 xfs_log_item_init(mp, &ruip->rui_item, item_type, &xfs_rui_item_ops); 164 ruip->rui_format.rui_nextents = nextents; 165 ruip->rui_format.rui_id = (uintptr_t)(void *)ruip; 166 atomic_set(&ruip->rui_next_extent, 0); 167 atomic_set(&ruip->rui_refcount, 2); 168 169 return ruip; 170 } 171 172 static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip) 173 { 174 return container_of(lip, struct xfs_rud_log_item, rud_item); 175 } 176 177 STATIC void 178 xfs_rud_item_size( 179 struct xfs_log_item *lip, 180 int *nvecs, 181 int *nbytes) 182 { 183 *nvecs += 1; 184 *nbytes += sizeof(struct xfs_rud_log_format); 185 } 186 187 unsigned int xfs_rud_log_space(void) 188 { 189 return xlog_item_space(1, sizeof(struct xfs_rud_log_format)); 190 } 191 192 /* 193 * This is called to fill in the vector of log iovecs for the 194 * given rud log item. We use only 1 iovec, and we point that 195 * at the rud_log_format structure embedded in the rud item. 196 * It is at this point that we assert that all of the extent 197 * slots in the rud item have been filled. 198 */ 199 STATIC void 200 xfs_rud_item_format( 201 struct xfs_log_item *lip, 202 struct xlog_format_buf *lfb) 203 { 204 struct xfs_rud_log_item *rudp = RUD_ITEM(lip); 205 206 ASSERT(lip->li_type == XFS_LI_RUD || lip->li_type == XFS_LI_RUD_RT); 207 208 rudp->rud_format.rud_type = lip->li_type; 209 rudp->rud_format.rud_size = 1; 210 211 xlog_format_copy(lfb, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format, 212 sizeof(struct xfs_rud_log_format)); 213 } 214 215 /* 216 * The RUD is either committed or aborted if the transaction is cancelled. If 217 * the transaction is cancelled, drop our reference to the RUI and free the 218 * RUD. 219 */ 220 STATIC void 221 xfs_rud_item_release( 222 struct xfs_log_item *lip) 223 { 224 struct xfs_rud_log_item *rudp = RUD_ITEM(lip); 225 226 xfs_rui_release(rudp->rud_ruip); 227 kvfree(rudp->rud_item.li_lv_shadow); 228 kmem_cache_free(xfs_rud_cache, rudp); 229 } 230 231 static struct xfs_log_item * 232 xfs_rud_item_intent( 233 struct xfs_log_item *lip) 234 { 235 return &RUD_ITEM(lip)->rud_ruip->rui_item; 236 } 237 238 static const struct xfs_item_ops xfs_rud_item_ops = { 239 .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED | 240 XFS_ITEM_INTENT_DONE, 241 .iop_size = xfs_rud_item_size, 242 .iop_format = xfs_rud_item_format, 243 .iop_release = xfs_rud_item_release, 244 .iop_intent = xfs_rud_item_intent, 245 }; 246 247 static inline struct xfs_rmap_intent *ri_entry(const struct list_head *e) 248 { 249 return list_entry(e, struct xfs_rmap_intent, ri_list); 250 } 251 252 static inline bool 253 xfs_rui_item_isrt(const struct xfs_log_item *lip) 254 { 255 ASSERT(lip->li_type == XFS_LI_RUI || lip->li_type == XFS_LI_RUI_RT); 256 257 return lip->li_type == XFS_LI_RUI_RT; 258 } 259 260 /* Sort rmap intents by AG. */ 261 static int 262 xfs_rmap_update_diff_items( 263 void *priv, 264 const struct list_head *a, 265 const struct list_head *b) 266 { 267 struct xfs_rmap_intent *ra = ri_entry(a); 268 struct xfs_rmap_intent *rb = ri_entry(b); 269 270 return ra->ri_group->xg_gno - rb->ri_group->xg_gno; 271 } 272 273 /* Log rmap updates in the intent item. */ 274 STATIC void 275 xfs_rmap_update_log_item( 276 struct xfs_trans *tp, 277 struct xfs_rui_log_item *ruip, 278 struct xfs_rmap_intent *ri) 279 { 280 uint next_extent; 281 struct xfs_map_extent *map; 282 283 /* 284 * atomic_inc_return gives us the value after the increment; 285 * we want to use it as an array index so we need to subtract 1 from 286 * it. 287 */ 288 next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1; 289 ASSERT(next_extent < ruip->rui_format.rui_nextents); 290 map = &ruip->rui_format.rui_extents[next_extent]; 291 map->me_owner = ri->ri_owner; 292 map->me_startblock = ri->ri_bmap.br_startblock; 293 map->me_startoff = ri->ri_bmap.br_startoff; 294 map->me_len = ri->ri_bmap.br_blockcount; 295 296 map->me_flags = 0; 297 if (ri->ri_bmap.br_state == XFS_EXT_UNWRITTEN) 298 map->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN; 299 if (ri->ri_whichfork == XFS_ATTR_FORK) 300 map->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK; 301 switch (ri->ri_type) { 302 case XFS_RMAP_MAP: 303 map->me_flags |= XFS_RMAP_EXTENT_MAP; 304 break; 305 case XFS_RMAP_MAP_SHARED: 306 map->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED; 307 break; 308 case XFS_RMAP_UNMAP: 309 map->me_flags |= XFS_RMAP_EXTENT_UNMAP; 310 break; 311 case XFS_RMAP_UNMAP_SHARED: 312 map->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED; 313 break; 314 case XFS_RMAP_CONVERT: 315 map->me_flags |= XFS_RMAP_EXTENT_CONVERT; 316 break; 317 case XFS_RMAP_CONVERT_SHARED: 318 map->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED; 319 break; 320 case XFS_RMAP_ALLOC: 321 map->me_flags |= XFS_RMAP_EXTENT_ALLOC; 322 break; 323 case XFS_RMAP_FREE: 324 map->me_flags |= XFS_RMAP_EXTENT_FREE; 325 break; 326 default: 327 ASSERT(0); 328 } 329 } 330 331 static struct xfs_log_item * 332 __xfs_rmap_update_create_intent( 333 struct xfs_trans *tp, 334 struct list_head *items, 335 unsigned int count, 336 bool sort, 337 unsigned short item_type) 338 { 339 struct xfs_mount *mp = tp->t_mountp; 340 struct xfs_rui_log_item *ruip; 341 struct xfs_rmap_intent *ri; 342 343 ASSERT(count > 0); 344 345 ruip = xfs_rui_init(mp, item_type, count); 346 if (sort) 347 list_sort(mp, items, xfs_rmap_update_diff_items); 348 list_for_each_entry(ri, items, ri_list) 349 xfs_rmap_update_log_item(tp, ruip, ri); 350 return &ruip->rui_item; 351 } 352 353 static struct xfs_log_item * 354 xfs_rmap_update_create_intent( 355 struct xfs_trans *tp, 356 struct list_head *items, 357 unsigned int count, 358 bool sort) 359 { 360 return __xfs_rmap_update_create_intent(tp, items, count, sort, 361 XFS_LI_RUI); 362 } 363 364 static inline unsigned short 365 xfs_rud_type_from_rui(const struct xfs_rui_log_item *ruip) 366 { 367 return xfs_rui_item_isrt(&ruip->rui_item) ? XFS_LI_RUD_RT : XFS_LI_RUD; 368 } 369 370 /* Get an RUD so we can process all the deferred rmap updates. */ 371 static struct xfs_log_item * 372 xfs_rmap_update_create_done( 373 struct xfs_trans *tp, 374 struct xfs_log_item *intent, 375 unsigned int count) 376 { 377 struct xfs_rui_log_item *ruip = RUI_ITEM(intent); 378 struct xfs_rud_log_item *rudp; 379 380 rudp = kmem_cache_zalloc(xfs_rud_cache, GFP_KERNEL | __GFP_NOFAIL); 381 xfs_log_item_init(tp->t_mountp, &rudp->rud_item, 382 xfs_rud_type_from_rui(ruip), &xfs_rud_item_ops); 383 rudp->rud_ruip = ruip; 384 rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id; 385 386 return &rudp->rud_item; 387 } 388 389 /* Add this deferred RUI to the transaction. */ 390 void 391 xfs_rmap_defer_add( 392 struct xfs_trans *tp, 393 struct xfs_rmap_intent *ri) 394 { 395 struct xfs_mount *mp = tp->t_mountp; 396 397 /* 398 * Deferred rmap updates for the realtime and data sections must use 399 * separate transactions to finish deferred work because updates to 400 * realtime metadata files can lock AGFs to allocate btree blocks and 401 * we don't want that mixing with the AGF locks taken to finish data 402 * section updates. 403 */ 404 ri->ri_group = xfs_group_intent_get(mp, ri->ri_bmap.br_startblock, 405 ri->ri_realtime ? XG_TYPE_RTG : XG_TYPE_AG); 406 407 trace_xfs_rmap_defer(mp, ri); 408 xfs_defer_add(tp, &ri->ri_list, ri->ri_realtime ? 409 &xfs_rtrmap_update_defer_type : 410 &xfs_rmap_update_defer_type); 411 } 412 413 /* Cancel a deferred rmap update. */ 414 STATIC void 415 xfs_rmap_update_cancel_item( 416 struct list_head *item) 417 { 418 struct xfs_rmap_intent *ri = ri_entry(item); 419 420 xfs_group_intent_put(ri->ri_group); 421 kmem_cache_free(xfs_rmap_intent_cache, ri); 422 } 423 424 /* Process a deferred rmap update. */ 425 STATIC int 426 xfs_rmap_update_finish_item( 427 struct xfs_trans *tp, 428 struct xfs_log_item *done, 429 struct list_head *item, 430 struct xfs_btree_cur **state) 431 { 432 struct xfs_rmap_intent *ri = ri_entry(item); 433 int error; 434 435 error = xfs_rmap_finish_one(tp, ri, state); 436 437 xfs_rmap_update_cancel_item(item); 438 return error; 439 } 440 441 /* Clean up after calling xfs_rmap_finish_one. */ 442 STATIC void 443 xfs_rmap_finish_one_cleanup( 444 struct xfs_trans *tp, 445 struct xfs_btree_cur *rcur, 446 int error) 447 { 448 struct xfs_buf *agbp = NULL; 449 450 if (rcur == NULL) 451 return; 452 agbp = rcur->bc_ag.agbp; 453 xfs_btree_del_cursor(rcur, error); 454 if (error && agbp) 455 xfs_trans_brelse(tp, agbp); 456 } 457 458 /* Abort all pending RUIs. */ 459 STATIC void 460 xfs_rmap_update_abort_intent( 461 struct xfs_log_item *intent) 462 { 463 xfs_rui_release(RUI_ITEM(intent)); 464 } 465 466 /* Is this recovered RUI ok? */ 467 static inline bool 468 xfs_rui_validate_map( 469 struct xfs_mount *mp, 470 bool isrt, 471 struct xfs_map_extent *map) 472 { 473 if (!xfs_has_rmapbt(mp)) 474 return false; 475 476 if (map->me_flags & ~XFS_RMAP_EXTENT_FLAGS) 477 return false; 478 479 switch (map->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) { 480 case XFS_RMAP_EXTENT_MAP: 481 case XFS_RMAP_EXTENT_MAP_SHARED: 482 case XFS_RMAP_EXTENT_UNMAP: 483 case XFS_RMAP_EXTENT_UNMAP_SHARED: 484 case XFS_RMAP_EXTENT_CONVERT: 485 case XFS_RMAP_EXTENT_CONVERT_SHARED: 486 case XFS_RMAP_EXTENT_ALLOC: 487 case XFS_RMAP_EXTENT_FREE: 488 break; 489 default: 490 return false; 491 } 492 493 if (!XFS_RMAP_NON_INODE_OWNER(map->me_owner) && 494 !xfs_verify_ino(mp, map->me_owner)) 495 return false; 496 497 if (!xfs_verify_fileext(mp, map->me_startoff, map->me_len)) 498 return false; 499 500 if (isrt) 501 return xfs_verify_rtbext(mp, map->me_startblock, map->me_len); 502 503 return xfs_verify_fsbext(mp, map->me_startblock, map->me_len); 504 } 505 506 static inline void 507 xfs_rui_recover_work( 508 struct xfs_mount *mp, 509 struct xfs_defer_pending *dfp, 510 bool isrt, 511 const struct xfs_map_extent *map) 512 { 513 struct xfs_rmap_intent *ri; 514 515 ri = kmem_cache_alloc(xfs_rmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL); 516 517 switch (map->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) { 518 case XFS_RMAP_EXTENT_MAP: 519 ri->ri_type = XFS_RMAP_MAP; 520 break; 521 case XFS_RMAP_EXTENT_MAP_SHARED: 522 ri->ri_type = XFS_RMAP_MAP_SHARED; 523 break; 524 case XFS_RMAP_EXTENT_UNMAP: 525 ri->ri_type = XFS_RMAP_UNMAP; 526 break; 527 case XFS_RMAP_EXTENT_UNMAP_SHARED: 528 ri->ri_type = XFS_RMAP_UNMAP_SHARED; 529 break; 530 case XFS_RMAP_EXTENT_CONVERT: 531 ri->ri_type = XFS_RMAP_CONVERT; 532 break; 533 case XFS_RMAP_EXTENT_CONVERT_SHARED: 534 ri->ri_type = XFS_RMAP_CONVERT_SHARED; 535 break; 536 case XFS_RMAP_EXTENT_ALLOC: 537 ri->ri_type = XFS_RMAP_ALLOC; 538 break; 539 case XFS_RMAP_EXTENT_FREE: 540 ri->ri_type = XFS_RMAP_FREE; 541 break; 542 default: 543 ASSERT(0); 544 return; 545 } 546 547 ri->ri_owner = map->me_owner; 548 ri->ri_whichfork = (map->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ? 549 XFS_ATTR_FORK : XFS_DATA_FORK; 550 ri->ri_bmap.br_startblock = map->me_startblock; 551 ri->ri_bmap.br_startoff = map->me_startoff; 552 ri->ri_bmap.br_blockcount = map->me_len; 553 ri->ri_bmap.br_state = (map->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ? 554 XFS_EXT_UNWRITTEN : XFS_EXT_NORM; 555 ri->ri_group = xfs_group_intent_get(mp, map->me_startblock, 556 isrt ? XG_TYPE_RTG : XG_TYPE_AG); 557 ri->ri_realtime = isrt; 558 559 xfs_defer_add_item(dfp, &ri->ri_list); 560 } 561 562 /* 563 * Process an rmap update intent item that was recovered from the log. 564 * We need to update the rmapbt. 565 */ 566 STATIC int 567 xfs_rmap_recover_work( 568 struct xfs_defer_pending *dfp, 569 struct list_head *capture_list) 570 { 571 struct xfs_trans_res resv; 572 struct xfs_log_item *lip = dfp->dfp_intent; 573 struct xfs_rui_log_item *ruip = RUI_ITEM(lip); 574 struct xfs_trans *tp; 575 struct xfs_mount *mp = lip->li_log->l_mp; 576 bool isrt = xfs_rui_item_isrt(lip); 577 int i; 578 int error = 0; 579 580 /* 581 * First check the validity of the extents described by the 582 * RUI. If any are bad, then assume that all are bad and 583 * just toss the RUI. 584 */ 585 for (i = 0; i < ruip->rui_format.rui_nextents; i++) { 586 if (!xfs_rui_validate_map(mp, isrt, 587 &ruip->rui_format.rui_extents[i])) { 588 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 589 &ruip->rui_format, 590 sizeof(ruip->rui_format)); 591 return -EFSCORRUPTED; 592 } 593 594 xfs_rui_recover_work(mp, dfp, isrt, 595 &ruip->rui_format.rui_extents[i]); 596 } 597 598 resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate); 599 error = xfs_trans_alloc(mp, &resv, mp->m_rmap_maxlevels, 0, 600 XFS_TRANS_RESERVE, &tp); 601 if (error) 602 return error; 603 604 error = xlog_recover_finish_intent(tp, dfp); 605 if (error == -EFSCORRUPTED) 606 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 607 &ruip->rui_format, 608 sizeof(ruip->rui_format)); 609 if (error) 610 goto abort_error; 611 612 return xfs_defer_ops_capture_and_commit(tp, capture_list); 613 614 abort_error: 615 xfs_trans_cancel(tp); 616 return error; 617 } 618 619 /* Relog an intent item to push the log tail forward. */ 620 static struct xfs_log_item * 621 xfs_rmap_relog_intent( 622 struct xfs_trans *tp, 623 struct xfs_log_item *intent, 624 struct xfs_log_item *done_item) 625 { 626 struct xfs_rui_log_item *ruip; 627 struct xfs_map_extent *map; 628 unsigned int count; 629 630 ASSERT(intent->li_type == XFS_LI_RUI || 631 intent->li_type == XFS_LI_RUI_RT); 632 633 count = RUI_ITEM(intent)->rui_format.rui_nextents; 634 map = RUI_ITEM(intent)->rui_format.rui_extents; 635 636 ruip = xfs_rui_init(tp->t_mountp, intent->li_type, count); 637 memcpy(ruip->rui_format.rui_extents, map, count * sizeof(*map)); 638 atomic_set(&ruip->rui_next_extent, count); 639 640 return &ruip->rui_item; 641 } 642 643 const struct xfs_defer_op_type xfs_rmap_update_defer_type = { 644 .name = "rmap", 645 .max_items = XFS_RUI_MAX_FAST_EXTENTS, 646 .create_intent = xfs_rmap_update_create_intent, 647 .abort_intent = xfs_rmap_update_abort_intent, 648 .create_done = xfs_rmap_update_create_done, 649 .finish_item = xfs_rmap_update_finish_item, 650 .finish_cleanup = xfs_rmap_finish_one_cleanup, 651 .cancel_item = xfs_rmap_update_cancel_item, 652 .recover_work = xfs_rmap_recover_work, 653 .relog_intent = xfs_rmap_relog_intent, 654 }; 655 656 #ifdef CONFIG_XFS_RT 657 static struct xfs_log_item * 658 xfs_rtrmap_update_create_intent( 659 struct xfs_trans *tp, 660 struct list_head *items, 661 unsigned int count, 662 bool sort) 663 { 664 return __xfs_rmap_update_create_intent(tp, items, count, sort, 665 XFS_LI_RUI_RT); 666 } 667 668 /* Clean up after calling xfs_rmap_finish_one. */ 669 STATIC void 670 xfs_rtrmap_finish_one_cleanup( 671 struct xfs_trans *tp, 672 struct xfs_btree_cur *rcur, 673 int error) 674 { 675 if (rcur) 676 xfs_btree_del_cursor(rcur, error); 677 } 678 679 const struct xfs_defer_op_type xfs_rtrmap_update_defer_type = { 680 .name = "rtrmap", 681 .max_items = XFS_RUI_MAX_FAST_EXTENTS, 682 .create_intent = xfs_rtrmap_update_create_intent, 683 .abort_intent = xfs_rmap_update_abort_intent, 684 .create_done = xfs_rmap_update_create_done, 685 .finish_item = xfs_rmap_update_finish_item, 686 .finish_cleanup = xfs_rtrmap_finish_one_cleanup, 687 .cancel_item = xfs_rmap_update_cancel_item, 688 .recover_work = xfs_rmap_recover_work, 689 .relog_intent = xfs_rmap_relog_intent, 690 }; 691 #else 692 const struct xfs_defer_op_type xfs_rtrmap_update_defer_type = { 693 .name = "rtrmap", 694 }; 695 #endif 696 697 STATIC bool 698 xfs_rui_item_match( 699 struct xfs_log_item *lip, 700 uint64_t intent_id) 701 { 702 return RUI_ITEM(lip)->rui_format.rui_id == intent_id; 703 } 704 705 static const struct xfs_item_ops xfs_rui_item_ops = { 706 .flags = XFS_ITEM_INTENT, 707 .iop_size = xfs_rui_item_size, 708 .iop_format = xfs_rui_item_format, 709 .iop_unpin = xfs_rui_item_unpin, 710 .iop_release = xfs_rui_item_release, 711 .iop_match = xfs_rui_item_match, 712 }; 713 714 static inline void 715 xfs_rui_copy_format( 716 struct xfs_rui_log_format *dst, 717 const struct xfs_rui_log_format *src) 718 { 719 unsigned int i; 720 721 memcpy(dst, src, offsetof(struct xfs_rui_log_format, rui_extents)); 722 723 for (i = 0; i < src->rui_nextents; i++) 724 memcpy(&dst->rui_extents[i], &src->rui_extents[i], 725 sizeof(struct xfs_map_extent)); 726 } 727 728 /* 729 * This routine is called to create an in-core extent rmap update 730 * item from the rui format structure which was logged on disk. 731 * It allocates an in-core rui, copies the extents from the format 732 * structure into it, and adds the rui to the AIL with the given 733 * LSN. 734 */ 735 STATIC int 736 xlog_recover_rui_commit_pass2( 737 struct xlog *log, 738 struct list_head *buffer_list, 739 struct xlog_recover_item *item, 740 xfs_lsn_t lsn) 741 { 742 struct xfs_mount *mp = log->l_mp; 743 struct xfs_rui_log_item *ruip; 744 struct xfs_rui_log_format *rui_formatp; 745 size_t len; 746 747 rui_formatp = item->ri_buf[0].iov_base; 748 749 if (item->ri_buf[0].iov_len < xfs_rui_log_format_sizeof(0)) { 750 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 751 item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); 752 return -EFSCORRUPTED; 753 } 754 755 len = xfs_rui_log_format_sizeof(rui_formatp->rui_nextents); 756 if (item->ri_buf[0].iov_len != len) { 757 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 758 item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); 759 return -EFSCORRUPTED; 760 } 761 762 ruip = xfs_rui_init(mp, ITEM_TYPE(item), rui_formatp->rui_nextents); 763 xfs_rui_copy_format(&ruip->rui_format, rui_formatp); 764 atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents); 765 766 xlog_recover_intent_item(log, &ruip->rui_item, lsn, 767 &xfs_rmap_update_defer_type); 768 return 0; 769 } 770 771 const struct xlog_recover_item_ops xlog_rui_item_ops = { 772 .item_type = XFS_LI_RUI, 773 .commit_pass2 = xlog_recover_rui_commit_pass2, 774 }; 775 776 #ifdef CONFIG_XFS_RT 777 STATIC int 778 xlog_recover_rtrui_commit_pass2( 779 struct xlog *log, 780 struct list_head *buffer_list, 781 struct xlog_recover_item *item, 782 xfs_lsn_t lsn) 783 { 784 struct xfs_mount *mp = log->l_mp; 785 struct xfs_rui_log_item *ruip; 786 struct xfs_rui_log_format *rui_formatp; 787 size_t len; 788 789 rui_formatp = item->ri_buf[0].iov_base; 790 791 if (item->ri_buf[0].iov_len < xfs_rui_log_format_sizeof(0)) { 792 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 793 item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); 794 return -EFSCORRUPTED; 795 } 796 797 len = xfs_rui_log_format_sizeof(rui_formatp->rui_nextents); 798 if (item->ri_buf[0].iov_len != len) { 799 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 800 item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); 801 return -EFSCORRUPTED; 802 } 803 804 ruip = xfs_rui_init(mp, ITEM_TYPE(item), rui_formatp->rui_nextents); 805 xfs_rui_copy_format(&ruip->rui_format, rui_formatp); 806 atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents); 807 808 xlog_recover_intent_item(log, &ruip->rui_item, lsn, 809 &xfs_rtrmap_update_defer_type); 810 return 0; 811 } 812 #else 813 STATIC int 814 xlog_recover_rtrui_commit_pass2( 815 struct xlog *log, 816 struct list_head *buffer_list, 817 struct xlog_recover_item *item, 818 xfs_lsn_t lsn) 819 { 820 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, 821 item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); 822 return -EFSCORRUPTED; 823 } 824 #endif 825 826 const struct xlog_recover_item_ops xlog_rtrui_item_ops = { 827 .item_type = XFS_LI_RUI_RT, 828 .commit_pass2 = xlog_recover_rtrui_commit_pass2, 829 }; 830 831 /* 832 * This routine is called when an RUD format structure is found in a committed 833 * transaction in the log. Its purpose is to cancel the corresponding RUI if it 834 * was still in the log. To do this it searches the AIL for the RUI with an id 835 * equal to that in the RUD format structure. If we find it we drop the RUD 836 * reference, which removes the RUI from the AIL and frees it. 837 */ 838 STATIC int 839 xlog_recover_rud_commit_pass2( 840 struct xlog *log, 841 struct list_head *buffer_list, 842 struct xlog_recover_item *item, 843 xfs_lsn_t lsn) 844 { 845 struct xfs_rud_log_format *rud_formatp; 846 847 rud_formatp = item->ri_buf[0].iov_base; 848 if (item->ri_buf[0].iov_len != sizeof(struct xfs_rud_log_format)) { 849 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, 850 rud_formatp, item->ri_buf[0].iov_len); 851 return -EFSCORRUPTED; 852 } 853 854 xlog_recover_release_intent(log, XFS_LI_RUI, rud_formatp->rud_rui_id); 855 return 0; 856 } 857 858 const struct xlog_recover_item_ops xlog_rud_item_ops = { 859 .item_type = XFS_LI_RUD, 860 .commit_pass2 = xlog_recover_rud_commit_pass2, 861 }; 862 863 #ifdef CONFIG_XFS_RT 864 STATIC int 865 xlog_recover_rtrud_commit_pass2( 866 struct xlog *log, 867 struct list_head *buffer_list, 868 struct xlog_recover_item *item, 869 xfs_lsn_t lsn) 870 { 871 struct xfs_rud_log_format *rud_formatp; 872 873 rud_formatp = item->ri_buf[0].iov_base; 874 if (item->ri_buf[0].iov_len != sizeof(struct xfs_rud_log_format)) { 875 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, 876 rud_formatp, item->ri_buf[0].iov_len); 877 return -EFSCORRUPTED; 878 } 879 880 xlog_recover_release_intent(log, XFS_LI_RUI_RT, 881 rud_formatp->rud_rui_id); 882 return 0; 883 } 884 #else 885 # define xlog_recover_rtrud_commit_pass2 xlog_recover_rtrui_commit_pass2 886 #endif 887 888 const struct xlog_recover_item_ops xlog_rtrud_item_ops = { 889 .item_type = XFS_LI_RUD_RT, 890 .commit_pass2 = xlog_recover_rtrud_commit_pass2, 891 }; 892