1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include <linux/stddef.h> 8 #include <linux/errno.h> 9 #include <linux/gfp.h> 10 #include <linux/pagemap.h> 11 #include <linux/init.h> 12 #include <linux/vmalloc.h> 13 #include <linux/bio.h> 14 #include <linux/sysctl.h> 15 #include <linux/proc_fs.h> 16 #include <linux/workqueue.h> 17 #include <linux/percpu.h> 18 #include <linux/blkdev.h> 19 #include <linux/hash.h> 20 #include <linux/kthread.h> 21 #include <linux/migrate.h> 22 #include <linux/backing-dev.h> 23 #include <linux/freezer.h> 24 25 #include "xfs_format.h" 26 #include "xfs_log_format.h" 27 #include "xfs_trans_resv.h" 28 #include "xfs_sb.h" 29 #include "xfs_mount.h" 30 #include "xfs_trace.h" 31 #include "xfs_log.h" 32 #include "xfs_errortag.h" 33 #include "xfs_error.h" 34 35 static kmem_zone_t *xfs_buf_zone; 36 37 #define xb_to_gfp(flags) \ 38 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN) 39 40 41 static inline int 42 xfs_buf_is_vmapped( 43 struct xfs_buf *bp) 44 { 45 /* 46 * Return true if the buffer is vmapped. 47 * 48 * b_addr is null if the buffer is not mapped, but the code is clever 49 * enough to know it doesn't have to map a single page, so the check has 50 * to be both for b_addr and bp->b_page_count > 1. 51 */ 52 return bp->b_addr && bp->b_page_count > 1; 53 } 54 55 static inline int 56 xfs_buf_vmap_len( 57 struct xfs_buf *bp) 58 { 59 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset; 60 } 61 62 /* 63 * Bump the I/O in flight count on the buftarg if we haven't yet done so for 64 * this buffer. The count is incremented once per buffer (per hold cycle) 65 * because the corresponding decrement is deferred to buffer release. Buffers 66 * can undergo I/O multiple times in a hold-release cycle and per buffer I/O 67 * tracking adds unnecessary overhead. This is used for sychronization purposes 68 * with unmount (see xfs_wait_buftarg()), so all we really need is a count of 69 * in-flight buffers. 70 * 71 * Buffers that are never released (e.g., superblock, iclog buffers) must set 72 * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count 73 * never reaches zero and unmount hangs indefinitely. 74 */ 75 static inline void 76 xfs_buf_ioacct_inc( 77 struct xfs_buf *bp) 78 { 79 if (bp->b_flags & XBF_NO_IOACCT) 80 return; 81 82 ASSERT(bp->b_flags & XBF_ASYNC); 83 spin_lock(&bp->b_lock); 84 if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) { 85 bp->b_state |= XFS_BSTATE_IN_FLIGHT; 86 percpu_counter_inc(&bp->b_target->bt_io_count); 87 } 88 spin_unlock(&bp->b_lock); 89 } 90 91 /* 92 * Clear the in-flight state on a buffer about to be released to the LRU or 93 * freed and unaccount from the buftarg. 94 */ 95 static inline void 96 __xfs_buf_ioacct_dec( 97 struct xfs_buf *bp) 98 { 99 lockdep_assert_held(&bp->b_lock); 100 101 if (bp->b_state & XFS_BSTATE_IN_FLIGHT) { 102 bp->b_state &= ~XFS_BSTATE_IN_FLIGHT; 103 percpu_counter_dec(&bp->b_target->bt_io_count); 104 } 105 } 106 107 static inline void 108 xfs_buf_ioacct_dec( 109 struct xfs_buf *bp) 110 { 111 spin_lock(&bp->b_lock); 112 __xfs_buf_ioacct_dec(bp); 113 spin_unlock(&bp->b_lock); 114 } 115 116 /* 117 * When we mark a buffer stale, we remove the buffer from the LRU and clear the 118 * b_lru_ref count so that the buffer is freed immediately when the buffer 119 * reference count falls to zero. If the buffer is already on the LRU, we need 120 * to remove the reference that LRU holds on the buffer. 121 * 122 * This prevents build-up of stale buffers on the LRU. 123 */ 124 void 125 xfs_buf_stale( 126 struct xfs_buf *bp) 127 { 128 ASSERT(xfs_buf_islocked(bp)); 129 130 bp->b_flags |= XBF_STALE; 131 132 /* 133 * Clear the delwri status so that a delwri queue walker will not 134 * flush this buffer to disk now that it is stale. The delwri queue has 135 * a reference to the buffer, so this is safe to do. 136 */ 137 bp->b_flags &= ~_XBF_DELWRI_Q; 138 139 /* 140 * Once the buffer is marked stale and unlocked, a subsequent lookup 141 * could reset b_flags. There is no guarantee that the buffer is 142 * unaccounted (released to LRU) before that occurs. Drop in-flight 143 * status now to preserve accounting consistency. 144 */ 145 spin_lock(&bp->b_lock); 146 __xfs_buf_ioacct_dec(bp); 147 148 atomic_set(&bp->b_lru_ref, 0); 149 if (!(bp->b_state & XFS_BSTATE_DISPOSE) && 150 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru))) 151 atomic_dec(&bp->b_hold); 152 153 ASSERT(atomic_read(&bp->b_hold) >= 1); 154 spin_unlock(&bp->b_lock); 155 } 156 157 static int 158 xfs_buf_get_maps( 159 struct xfs_buf *bp, 160 int map_count) 161 { 162 ASSERT(bp->b_maps == NULL); 163 bp->b_map_count = map_count; 164 165 if (map_count == 1) { 166 bp->b_maps = &bp->__b_map; 167 return 0; 168 } 169 170 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map), 171 KM_NOFS); 172 if (!bp->b_maps) 173 return -ENOMEM; 174 return 0; 175 } 176 177 /* 178 * Frees b_pages if it was allocated. 179 */ 180 static void 181 xfs_buf_free_maps( 182 struct xfs_buf *bp) 183 { 184 if (bp->b_maps != &bp->__b_map) { 185 kmem_free(bp->b_maps); 186 bp->b_maps = NULL; 187 } 188 } 189 190 struct xfs_buf * 191 _xfs_buf_alloc( 192 struct xfs_buftarg *target, 193 struct xfs_buf_map *map, 194 int nmaps, 195 xfs_buf_flags_t flags) 196 { 197 struct xfs_buf *bp; 198 int error; 199 int i; 200 201 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS); 202 if (unlikely(!bp)) 203 return NULL; 204 205 /* 206 * We don't want certain flags to appear in b_flags unless they are 207 * specifically set by later operations on the buffer. 208 */ 209 flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD); 210 211 atomic_set(&bp->b_hold, 1); 212 atomic_set(&bp->b_lru_ref, 1); 213 init_completion(&bp->b_iowait); 214 INIT_LIST_HEAD(&bp->b_lru); 215 INIT_LIST_HEAD(&bp->b_list); 216 INIT_LIST_HEAD(&bp->b_li_list); 217 sema_init(&bp->b_sema, 0); /* held, no waiters */ 218 spin_lock_init(&bp->b_lock); 219 bp->b_target = target; 220 bp->b_flags = flags; 221 222 /* 223 * Set length and io_length to the same value initially. 224 * I/O routines should use io_length, which will be the same in 225 * most cases but may be reset (e.g. XFS recovery). 226 */ 227 error = xfs_buf_get_maps(bp, nmaps); 228 if (error) { 229 kmem_zone_free(xfs_buf_zone, bp); 230 return NULL; 231 } 232 233 bp->b_bn = map[0].bm_bn; 234 bp->b_length = 0; 235 for (i = 0; i < nmaps; i++) { 236 bp->b_maps[i].bm_bn = map[i].bm_bn; 237 bp->b_maps[i].bm_len = map[i].bm_len; 238 bp->b_length += map[i].bm_len; 239 } 240 bp->b_io_length = bp->b_length; 241 242 atomic_set(&bp->b_pin_count, 0); 243 init_waitqueue_head(&bp->b_waiters); 244 245 XFS_STATS_INC(target->bt_mount, xb_create); 246 trace_xfs_buf_init(bp, _RET_IP_); 247 248 return bp; 249 } 250 251 /* 252 * Allocate a page array capable of holding a specified number 253 * of pages, and point the page buf at it. 254 */ 255 STATIC int 256 _xfs_buf_get_pages( 257 xfs_buf_t *bp, 258 int page_count) 259 { 260 /* Make sure that we have a page list */ 261 if (bp->b_pages == NULL) { 262 bp->b_page_count = page_count; 263 if (page_count <= XB_PAGES) { 264 bp->b_pages = bp->b_page_array; 265 } else { 266 bp->b_pages = kmem_alloc(sizeof(struct page *) * 267 page_count, KM_NOFS); 268 if (bp->b_pages == NULL) 269 return -ENOMEM; 270 } 271 memset(bp->b_pages, 0, sizeof(struct page *) * page_count); 272 } 273 return 0; 274 } 275 276 /* 277 * Frees b_pages if it was allocated. 278 */ 279 STATIC void 280 _xfs_buf_free_pages( 281 xfs_buf_t *bp) 282 { 283 if (bp->b_pages != bp->b_page_array) { 284 kmem_free(bp->b_pages); 285 bp->b_pages = NULL; 286 } 287 } 288 289 /* 290 * Releases the specified buffer. 291 * 292 * The modification state of any associated pages is left unchanged. 293 * The buffer must not be on any hash - use xfs_buf_rele instead for 294 * hashed and refcounted buffers 295 */ 296 void 297 xfs_buf_free( 298 xfs_buf_t *bp) 299 { 300 trace_xfs_buf_free(bp, _RET_IP_); 301 302 ASSERT(list_empty(&bp->b_lru)); 303 304 if (bp->b_flags & _XBF_PAGES) { 305 uint i; 306 307 if (xfs_buf_is_vmapped(bp)) 308 vm_unmap_ram(bp->b_addr - bp->b_offset, 309 bp->b_page_count); 310 311 for (i = 0; i < bp->b_page_count; i++) { 312 struct page *page = bp->b_pages[i]; 313 314 __free_page(page); 315 } 316 } else if (bp->b_flags & _XBF_KMEM) 317 kmem_free(bp->b_addr); 318 _xfs_buf_free_pages(bp); 319 xfs_buf_free_maps(bp); 320 kmem_zone_free(xfs_buf_zone, bp); 321 } 322 323 /* 324 * Allocates all the pages for buffer in question and builds it's page list. 325 */ 326 STATIC int 327 xfs_buf_allocate_memory( 328 xfs_buf_t *bp, 329 uint flags) 330 { 331 size_t size; 332 size_t nbytes, offset; 333 gfp_t gfp_mask = xb_to_gfp(flags); 334 unsigned short page_count, i; 335 xfs_off_t start, end; 336 int error; 337 338 /* 339 * for buffers that are contained within a single page, just allocate 340 * the memory from the heap - there's no need for the complexity of 341 * page arrays to keep allocation down to order 0. 342 */ 343 size = BBTOB(bp->b_length); 344 if (size < PAGE_SIZE) { 345 bp->b_addr = kmem_alloc(size, KM_NOFS); 346 if (!bp->b_addr) { 347 /* low memory - use alloc_page loop instead */ 348 goto use_alloc_page; 349 } 350 351 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != 352 ((unsigned long)bp->b_addr & PAGE_MASK)) { 353 /* b_addr spans two pages - use alloc_page instead */ 354 kmem_free(bp->b_addr); 355 bp->b_addr = NULL; 356 goto use_alloc_page; 357 } 358 bp->b_offset = offset_in_page(bp->b_addr); 359 bp->b_pages = bp->b_page_array; 360 bp->b_pages[0] = virt_to_page(bp->b_addr); 361 bp->b_page_count = 1; 362 bp->b_flags |= _XBF_KMEM; 363 return 0; 364 } 365 366 use_alloc_page: 367 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; 368 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1) 369 >> PAGE_SHIFT; 370 page_count = end - start; 371 error = _xfs_buf_get_pages(bp, page_count); 372 if (unlikely(error)) 373 return error; 374 375 offset = bp->b_offset; 376 bp->b_flags |= _XBF_PAGES; 377 378 for (i = 0; i < bp->b_page_count; i++) { 379 struct page *page; 380 uint retries = 0; 381 retry: 382 page = alloc_page(gfp_mask); 383 if (unlikely(page == NULL)) { 384 if (flags & XBF_READ_AHEAD) { 385 bp->b_page_count = i; 386 error = -ENOMEM; 387 goto out_free_pages; 388 } 389 390 /* 391 * This could deadlock. 392 * 393 * But until all the XFS lowlevel code is revamped to 394 * handle buffer allocation failures we can't do much. 395 */ 396 if (!(++retries % 100)) 397 xfs_err(NULL, 398 "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)", 399 current->comm, current->pid, 400 __func__, gfp_mask); 401 402 XFS_STATS_INC(bp->b_target->bt_mount, xb_page_retries); 403 congestion_wait(BLK_RW_ASYNC, HZ/50); 404 goto retry; 405 } 406 407 XFS_STATS_INC(bp->b_target->bt_mount, xb_page_found); 408 409 nbytes = min_t(size_t, size, PAGE_SIZE - offset); 410 size -= nbytes; 411 bp->b_pages[i] = page; 412 offset = 0; 413 } 414 return 0; 415 416 out_free_pages: 417 for (i = 0; i < bp->b_page_count; i++) 418 __free_page(bp->b_pages[i]); 419 bp->b_flags &= ~_XBF_PAGES; 420 return error; 421 } 422 423 /* 424 * Map buffer into kernel address-space if necessary. 425 */ 426 STATIC int 427 _xfs_buf_map_pages( 428 xfs_buf_t *bp, 429 uint flags) 430 { 431 ASSERT(bp->b_flags & _XBF_PAGES); 432 if (bp->b_page_count == 1) { 433 /* A single page buffer is always mappable */ 434 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 435 } else if (flags & XBF_UNMAPPED) { 436 bp->b_addr = NULL; 437 } else { 438 int retried = 0; 439 unsigned nofs_flag; 440 441 /* 442 * vm_map_ram() will allocate auxillary structures (e.g. 443 * pagetables) with GFP_KERNEL, yet we are likely to be under 444 * GFP_NOFS context here. Hence we need to tell memory reclaim 445 * that we are in such a context via PF_MEMALLOC_NOFS to prevent 446 * memory reclaim re-entering the filesystem here and 447 * potentially deadlocking. 448 */ 449 nofs_flag = memalloc_nofs_save(); 450 do { 451 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, 452 -1, PAGE_KERNEL); 453 if (bp->b_addr) 454 break; 455 vm_unmap_aliases(); 456 } while (retried++ <= 1); 457 memalloc_nofs_restore(nofs_flag); 458 459 if (!bp->b_addr) 460 return -ENOMEM; 461 bp->b_addr += bp->b_offset; 462 } 463 464 return 0; 465 } 466 467 /* 468 * Finding and Reading Buffers 469 */ 470 static int 471 _xfs_buf_obj_cmp( 472 struct rhashtable_compare_arg *arg, 473 const void *obj) 474 { 475 const struct xfs_buf_map *map = arg->key; 476 const struct xfs_buf *bp = obj; 477 478 /* 479 * The key hashing in the lookup path depends on the key being the 480 * first element of the compare_arg, make sure to assert this. 481 */ 482 BUILD_BUG_ON(offsetof(struct xfs_buf_map, bm_bn) != 0); 483 484 if (bp->b_bn != map->bm_bn) 485 return 1; 486 487 if (unlikely(bp->b_length != map->bm_len)) { 488 /* 489 * found a block number match. If the range doesn't 490 * match, the only way this is allowed is if the buffer 491 * in the cache is stale and the transaction that made 492 * it stale has not yet committed. i.e. we are 493 * reallocating a busy extent. Skip this buffer and 494 * continue searching for an exact match. 495 */ 496 ASSERT(bp->b_flags & XBF_STALE); 497 return 1; 498 } 499 return 0; 500 } 501 502 static const struct rhashtable_params xfs_buf_hash_params = { 503 .min_size = 32, /* empty AGs have minimal footprint */ 504 .nelem_hint = 16, 505 .key_len = sizeof(xfs_daddr_t), 506 .key_offset = offsetof(struct xfs_buf, b_bn), 507 .head_offset = offsetof(struct xfs_buf, b_rhash_head), 508 .automatic_shrinking = true, 509 .obj_cmpfn = _xfs_buf_obj_cmp, 510 }; 511 512 int 513 xfs_buf_hash_init( 514 struct xfs_perag *pag) 515 { 516 spin_lock_init(&pag->pag_buf_lock); 517 return rhashtable_init(&pag->pag_buf_hash, &xfs_buf_hash_params); 518 } 519 520 void 521 xfs_buf_hash_destroy( 522 struct xfs_perag *pag) 523 { 524 rhashtable_destroy(&pag->pag_buf_hash); 525 } 526 527 /* 528 * Look up a buffer in the buffer cache and return it referenced and locked 529 * in @found_bp. 530 * 531 * If @new_bp is supplied and we have a lookup miss, insert @new_bp into the 532 * cache. 533 * 534 * If XBF_TRYLOCK is set in @flags, only try to lock the buffer and return 535 * -EAGAIN if we fail to lock it. 536 * 537 * Return values are: 538 * -EFSCORRUPTED if have been supplied with an invalid address 539 * -EAGAIN on trylock failure 540 * -ENOENT if we fail to find a match and @new_bp was NULL 541 * 0, with @found_bp: 542 * - @new_bp if we inserted it into the cache 543 * - the buffer we found and locked. 544 */ 545 static int 546 xfs_buf_find( 547 struct xfs_buftarg *btp, 548 struct xfs_buf_map *map, 549 int nmaps, 550 xfs_buf_flags_t flags, 551 struct xfs_buf *new_bp, 552 struct xfs_buf **found_bp) 553 { 554 struct xfs_perag *pag; 555 xfs_buf_t *bp; 556 struct xfs_buf_map cmap = { .bm_bn = map[0].bm_bn }; 557 xfs_daddr_t eofs; 558 int i; 559 560 *found_bp = NULL; 561 562 for (i = 0; i < nmaps; i++) 563 cmap.bm_len += map[i].bm_len; 564 565 /* Check for IOs smaller than the sector size / not sector aligned */ 566 ASSERT(!(BBTOB(cmap.bm_len) < btp->bt_meta_sectorsize)); 567 ASSERT(!(BBTOB(cmap.bm_bn) & (xfs_off_t)btp->bt_meta_sectormask)); 568 569 /* 570 * Corrupted block numbers can get through to here, unfortunately, so we 571 * have to check that the buffer falls within the filesystem bounds. 572 */ 573 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks); 574 if (cmap.bm_bn < 0 || cmap.bm_bn >= eofs) { 575 xfs_alert(btp->bt_mount, 576 "%s: daddr 0x%llx out of range, EOFS 0x%llx", 577 __func__, cmap.bm_bn, eofs); 578 WARN_ON(1); 579 return -EFSCORRUPTED; 580 } 581 582 pag = xfs_perag_get(btp->bt_mount, 583 xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn)); 584 585 spin_lock(&pag->pag_buf_lock); 586 bp = rhashtable_lookup_fast(&pag->pag_buf_hash, &cmap, 587 xfs_buf_hash_params); 588 if (bp) { 589 atomic_inc(&bp->b_hold); 590 goto found; 591 } 592 593 /* No match found */ 594 if (!new_bp) { 595 XFS_STATS_INC(btp->bt_mount, xb_miss_locked); 596 spin_unlock(&pag->pag_buf_lock); 597 xfs_perag_put(pag); 598 return -ENOENT; 599 } 600 601 /* the buffer keeps the perag reference until it is freed */ 602 new_bp->b_pag = pag; 603 rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head, 604 xfs_buf_hash_params); 605 spin_unlock(&pag->pag_buf_lock); 606 *found_bp = new_bp; 607 return 0; 608 609 found: 610 spin_unlock(&pag->pag_buf_lock); 611 xfs_perag_put(pag); 612 613 if (!xfs_buf_trylock(bp)) { 614 if (flags & XBF_TRYLOCK) { 615 xfs_buf_rele(bp); 616 XFS_STATS_INC(btp->bt_mount, xb_busy_locked); 617 return -EAGAIN; 618 } 619 xfs_buf_lock(bp); 620 XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited); 621 } 622 623 /* 624 * if the buffer is stale, clear all the external state associated with 625 * it. We need to keep flags such as how we allocated the buffer memory 626 * intact here. 627 */ 628 if (bp->b_flags & XBF_STALE) { 629 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); 630 ASSERT(bp->b_iodone == NULL); 631 bp->b_flags &= _XBF_KMEM | _XBF_PAGES; 632 bp->b_ops = NULL; 633 } 634 635 trace_xfs_buf_find(bp, flags, _RET_IP_); 636 XFS_STATS_INC(btp->bt_mount, xb_get_locked); 637 *found_bp = bp; 638 return 0; 639 } 640 641 struct xfs_buf * 642 xfs_buf_incore( 643 struct xfs_buftarg *target, 644 xfs_daddr_t blkno, 645 size_t numblks, 646 xfs_buf_flags_t flags) 647 { 648 struct xfs_buf *bp; 649 int error; 650 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); 651 652 error = xfs_buf_find(target, &map, 1, flags, NULL, &bp); 653 if (error) 654 return NULL; 655 return bp; 656 } 657 658 /* 659 * Assembles a buffer covering the specified range. The code is optimised for 660 * cache hits, as metadata intensive workloads will see 3 orders of magnitude 661 * more hits than misses. 662 */ 663 struct xfs_buf * 664 xfs_buf_get_map( 665 struct xfs_buftarg *target, 666 struct xfs_buf_map *map, 667 int nmaps, 668 xfs_buf_flags_t flags) 669 { 670 struct xfs_buf *bp; 671 struct xfs_buf *new_bp; 672 int error = 0; 673 674 error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp); 675 676 switch (error) { 677 case 0: 678 /* cache hit */ 679 goto found; 680 case -EAGAIN: 681 /* cache hit, trylock failure, caller handles failure */ 682 ASSERT(flags & XBF_TRYLOCK); 683 return NULL; 684 case -ENOENT: 685 /* cache miss, go for insert */ 686 break; 687 case -EFSCORRUPTED: 688 default: 689 /* 690 * None of the higher layers understand failure types 691 * yet, so return NULL to signal a fatal lookup error. 692 */ 693 return NULL; 694 } 695 696 new_bp = _xfs_buf_alloc(target, map, nmaps, flags); 697 if (unlikely(!new_bp)) 698 return NULL; 699 700 error = xfs_buf_allocate_memory(new_bp, flags); 701 if (error) { 702 xfs_buf_free(new_bp); 703 return NULL; 704 } 705 706 error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp); 707 if (error) { 708 xfs_buf_free(new_bp); 709 return NULL; 710 } 711 712 if (bp != new_bp) 713 xfs_buf_free(new_bp); 714 715 found: 716 if (!bp->b_addr) { 717 error = _xfs_buf_map_pages(bp, flags); 718 if (unlikely(error)) { 719 xfs_warn(target->bt_mount, 720 "%s: failed to map pagesn", __func__); 721 xfs_buf_relse(bp); 722 return NULL; 723 } 724 } 725 726 /* 727 * Clear b_error if this is a lookup from a caller that doesn't expect 728 * valid data to be found in the buffer. 729 */ 730 if (!(flags & XBF_READ)) 731 xfs_buf_ioerror(bp, 0); 732 733 XFS_STATS_INC(target->bt_mount, xb_get); 734 trace_xfs_buf_get(bp, flags, _RET_IP_); 735 return bp; 736 } 737 738 STATIC int 739 _xfs_buf_read( 740 xfs_buf_t *bp, 741 xfs_buf_flags_t flags) 742 { 743 ASSERT(!(flags & XBF_WRITE)); 744 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL); 745 746 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD); 747 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); 748 749 return xfs_buf_submit(bp); 750 } 751 752 xfs_buf_t * 753 xfs_buf_read_map( 754 struct xfs_buftarg *target, 755 struct xfs_buf_map *map, 756 int nmaps, 757 xfs_buf_flags_t flags, 758 const struct xfs_buf_ops *ops) 759 { 760 struct xfs_buf *bp; 761 762 flags |= XBF_READ; 763 764 bp = xfs_buf_get_map(target, map, nmaps, flags); 765 if (bp) { 766 trace_xfs_buf_read(bp, flags, _RET_IP_); 767 768 if (!(bp->b_flags & XBF_DONE)) { 769 XFS_STATS_INC(target->bt_mount, xb_get_read); 770 bp->b_ops = ops; 771 _xfs_buf_read(bp, flags); 772 } else if (flags & XBF_ASYNC) { 773 /* 774 * Read ahead call which is already satisfied, 775 * drop the buffer 776 */ 777 xfs_buf_relse(bp); 778 return NULL; 779 } else { 780 /* We do not want read in the flags */ 781 bp->b_flags &= ~XBF_READ; 782 } 783 } 784 785 return bp; 786 } 787 788 /* 789 * If we are not low on memory then do the readahead in a deadlock 790 * safe manner. 791 */ 792 void 793 xfs_buf_readahead_map( 794 struct xfs_buftarg *target, 795 struct xfs_buf_map *map, 796 int nmaps, 797 const struct xfs_buf_ops *ops) 798 { 799 if (bdi_read_congested(target->bt_bdev->bd_bdi)) 800 return; 801 802 xfs_buf_read_map(target, map, nmaps, 803 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops); 804 } 805 806 /* 807 * Read an uncached buffer from disk. Allocates and returns a locked 808 * buffer containing the disk contents or nothing. 809 */ 810 int 811 xfs_buf_read_uncached( 812 struct xfs_buftarg *target, 813 xfs_daddr_t daddr, 814 size_t numblks, 815 int flags, 816 struct xfs_buf **bpp, 817 const struct xfs_buf_ops *ops) 818 { 819 struct xfs_buf *bp; 820 821 *bpp = NULL; 822 823 bp = xfs_buf_get_uncached(target, numblks, flags); 824 if (!bp) 825 return -ENOMEM; 826 827 /* set up the buffer for a read IO */ 828 ASSERT(bp->b_map_count == 1); 829 bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */ 830 bp->b_maps[0].bm_bn = daddr; 831 bp->b_flags |= XBF_READ; 832 bp->b_ops = ops; 833 834 xfs_buf_submit(bp); 835 if (bp->b_error) { 836 int error = bp->b_error; 837 xfs_buf_relse(bp); 838 return error; 839 } 840 841 *bpp = bp; 842 return 0; 843 } 844 845 /* 846 * Return a buffer allocated as an empty buffer and associated to external 847 * memory via xfs_buf_associate_memory() back to it's empty state. 848 */ 849 void 850 xfs_buf_set_empty( 851 struct xfs_buf *bp, 852 size_t numblks) 853 { 854 if (bp->b_pages) 855 _xfs_buf_free_pages(bp); 856 857 bp->b_pages = NULL; 858 bp->b_page_count = 0; 859 bp->b_addr = NULL; 860 bp->b_length = numblks; 861 bp->b_io_length = numblks; 862 863 ASSERT(bp->b_map_count == 1); 864 bp->b_bn = XFS_BUF_DADDR_NULL; 865 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL; 866 bp->b_maps[0].bm_len = bp->b_length; 867 } 868 869 static inline struct page * 870 mem_to_page( 871 void *addr) 872 { 873 if ((!is_vmalloc_addr(addr))) { 874 return virt_to_page(addr); 875 } else { 876 return vmalloc_to_page(addr); 877 } 878 } 879 880 int 881 xfs_buf_associate_memory( 882 xfs_buf_t *bp, 883 void *mem, 884 size_t len) 885 { 886 int rval; 887 int i = 0; 888 unsigned long pageaddr; 889 unsigned long offset; 890 size_t buflen; 891 int page_count; 892 893 pageaddr = (unsigned long)mem & PAGE_MASK; 894 offset = (unsigned long)mem - pageaddr; 895 buflen = PAGE_ALIGN(len + offset); 896 page_count = buflen >> PAGE_SHIFT; 897 898 /* Free any previous set of page pointers */ 899 if (bp->b_pages) 900 _xfs_buf_free_pages(bp); 901 902 bp->b_pages = NULL; 903 bp->b_addr = mem; 904 905 rval = _xfs_buf_get_pages(bp, page_count); 906 if (rval) 907 return rval; 908 909 bp->b_offset = offset; 910 911 for (i = 0; i < bp->b_page_count; i++) { 912 bp->b_pages[i] = mem_to_page((void *)pageaddr); 913 pageaddr += PAGE_SIZE; 914 } 915 916 bp->b_io_length = BTOBB(len); 917 bp->b_length = BTOBB(buflen); 918 919 return 0; 920 } 921 922 xfs_buf_t * 923 xfs_buf_get_uncached( 924 struct xfs_buftarg *target, 925 size_t numblks, 926 int flags) 927 { 928 unsigned long page_count; 929 int error, i; 930 struct xfs_buf *bp; 931 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks); 932 933 /* flags might contain irrelevant bits, pass only what we care about */ 934 bp = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT); 935 if (unlikely(bp == NULL)) 936 goto fail; 937 938 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT; 939 error = _xfs_buf_get_pages(bp, page_count); 940 if (error) 941 goto fail_free_buf; 942 943 for (i = 0; i < page_count; i++) { 944 bp->b_pages[i] = alloc_page(xb_to_gfp(flags)); 945 if (!bp->b_pages[i]) 946 goto fail_free_mem; 947 } 948 bp->b_flags |= _XBF_PAGES; 949 950 error = _xfs_buf_map_pages(bp, 0); 951 if (unlikely(error)) { 952 xfs_warn(target->bt_mount, 953 "%s: failed to map pages", __func__); 954 goto fail_free_mem; 955 } 956 957 trace_xfs_buf_get_uncached(bp, _RET_IP_); 958 return bp; 959 960 fail_free_mem: 961 while (--i >= 0) 962 __free_page(bp->b_pages[i]); 963 _xfs_buf_free_pages(bp); 964 fail_free_buf: 965 xfs_buf_free_maps(bp); 966 kmem_zone_free(xfs_buf_zone, bp); 967 fail: 968 return NULL; 969 } 970 971 /* 972 * Increment reference count on buffer, to hold the buffer concurrently 973 * with another thread which may release (free) the buffer asynchronously. 974 * Must hold the buffer already to call this function. 975 */ 976 void 977 xfs_buf_hold( 978 xfs_buf_t *bp) 979 { 980 trace_xfs_buf_hold(bp, _RET_IP_); 981 atomic_inc(&bp->b_hold); 982 } 983 984 /* 985 * Release a hold on the specified buffer. If the hold count is 1, the buffer is 986 * placed on LRU or freed (depending on b_lru_ref). 987 */ 988 void 989 xfs_buf_rele( 990 xfs_buf_t *bp) 991 { 992 struct xfs_perag *pag = bp->b_pag; 993 bool release; 994 bool freebuf = false; 995 996 trace_xfs_buf_rele(bp, _RET_IP_); 997 998 if (!pag) { 999 ASSERT(list_empty(&bp->b_lru)); 1000 if (atomic_dec_and_test(&bp->b_hold)) { 1001 xfs_buf_ioacct_dec(bp); 1002 xfs_buf_free(bp); 1003 } 1004 return; 1005 } 1006 1007 ASSERT(atomic_read(&bp->b_hold) > 0); 1008 1009 release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock); 1010 spin_lock(&bp->b_lock); 1011 if (!release) { 1012 /* 1013 * Drop the in-flight state if the buffer is already on the LRU 1014 * and it holds the only reference. This is racy because we 1015 * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT 1016 * ensures the decrement occurs only once per-buf. 1017 */ 1018 if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru)) 1019 __xfs_buf_ioacct_dec(bp); 1020 goto out_unlock; 1021 } 1022 1023 /* the last reference has been dropped ... */ 1024 __xfs_buf_ioacct_dec(bp); 1025 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { 1026 /* 1027 * If the buffer is added to the LRU take a new reference to the 1028 * buffer for the LRU and clear the (now stale) dispose list 1029 * state flag 1030 */ 1031 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) { 1032 bp->b_state &= ~XFS_BSTATE_DISPOSE; 1033 atomic_inc(&bp->b_hold); 1034 } 1035 spin_unlock(&pag->pag_buf_lock); 1036 } else { 1037 /* 1038 * most of the time buffers will already be removed from the 1039 * LRU, so optimise that case by checking for the 1040 * XFS_BSTATE_DISPOSE flag indicating the last list the buffer 1041 * was on was the disposal list 1042 */ 1043 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) { 1044 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru); 1045 } else { 1046 ASSERT(list_empty(&bp->b_lru)); 1047 } 1048 1049 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); 1050 rhashtable_remove_fast(&pag->pag_buf_hash, &bp->b_rhash_head, 1051 xfs_buf_hash_params); 1052 spin_unlock(&pag->pag_buf_lock); 1053 xfs_perag_put(pag); 1054 freebuf = true; 1055 } 1056 1057 out_unlock: 1058 spin_unlock(&bp->b_lock); 1059 1060 if (freebuf) 1061 xfs_buf_free(bp); 1062 } 1063 1064 1065 /* 1066 * Lock a buffer object, if it is not already locked. 1067 * 1068 * If we come across a stale, pinned, locked buffer, we know that we are 1069 * being asked to lock a buffer that has been reallocated. Because it is 1070 * pinned, we know that the log has not been pushed to disk and hence it 1071 * will still be locked. Rather than continuing to have trylock attempts 1072 * fail until someone else pushes the log, push it ourselves before 1073 * returning. This means that the xfsaild will not get stuck trying 1074 * to push on stale inode buffers. 1075 */ 1076 int 1077 xfs_buf_trylock( 1078 struct xfs_buf *bp) 1079 { 1080 int locked; 1081 1082 locked = down_trylock(&bp->b_sema) == 0; 1083 if (locked) 1084 trace_xfs_buf_trylock(bp, _RET_IP_); 1085 else 1086 trace_xfs_buf_trylock_fail(bp, _RET_IP_); 1087 return locked; 1088 } 1089 1090 /* 1091 * Lock a buffer object. 1092 * 1093 * If we come across a stale, pinned, locked buffer, we know that we 1094 * are being asked to lock a buffer that has been reallocated. Because 1095 * it is pinned, we know that the log has not been pushed to disk and 1096 * hence it will still be locked. Rather than sleeping until someone 1097 * else pushes the log, push it ourselves before trying to get the lock. 1098 */ 1099 void 1100 xfs_buf_lock( 1101 struct xfs_buf *bp) 1102 { 1103 trace_xfs_buf_lock(bp, _RET_IP_); 1104 1105 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) 1106 xfs_log_force(bp->b_target->bt_mount, 0); 1107 down(&bp->b_sema); 1108 1109 trace_xfs_buf_lock_done(bp, _RET_IP_); 1110 } 1111 1112 void 1113 xfs_buf_unlock( 1114 struct xfs_buf *bp) 1115 { 1116 ASSERT(xfs_buf_islocked(bp)); 1117 1118 up(&bp->b_sema); 1119 trace_xfs_buf_unlock(bp, _RET_IP_); 1120 } 1121 1122 STATIC void 1123 xfs_buf_wait_unpin( 1124 xfs_buf_t *bp) 1125 { 1126 DECLARE_WAITQUEUE (wait, current); 1127 1128 if (atomic_read(&bp->b_pin_count) == 0) 1129 return; 1130 1131 add_wait_queue(&bp->b_waiters, &wait); 1132 for (;;) { 1133 set_current_state(TASK_UNINTERRUPTIBLE); 1134 if (atomic_read(&bp->b_pin_count) == 0) 1135 break; 1136 io_schedule(); 1137 } 1138 remove_wait_queue(&bp->b_waiters, &wait); 1139 set_current_state(TASK_RUNNING); 1140 } 1141 1142 /* 1143 * Buffer Utility Routines 1144 */ 1145 1146 void 1147 xfs_buf_ioend( 1148 struct xfs_buf *bp) 1149 { 1150 bool read = bp->b_flags & XBF_READ; 1151 1152 trace_xfs_buf_iodone(bp, _RET_IP_); 1153 1154 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); 1155 1156 /* 1157 * Pull in IO completion errors now. We are guaranteed to be running 1158 * single threaded, so we don't need the lock to read b_io_error. 1159 */ 1160 if (!bp->b_error && bp->b_io_error) 1161 xfs_buf_ioerror(bp, bp->b_io_error); 1162 1163 /* Only validate buffers that were read without errors */ 1164 if (read && !bp->b_error && bp->b_ops) { 1165 ASSERT(!bp->b_iodone); 1166 bp->b_ops->verify_read(bp); 1167 } 1168 1169 if (!bp->b_error) 1170 bp->b_flags |= XBF_DONE; 1171 1172 if (bp->b_iodone) 1173 (*(bp->b_iodone))(bp); 1174 else if (bp->b_flags & XBF_ASYNC) 1175 xfs_buf_relse(bp); 1176 else 1177 complete(&bp->b_iowait); 1178 } 1179 1180 static void 1181 xfs_buf_ioend_work( 1182 struct work_struct *work) 1183 { 1184 struct xfs_buf *bp = 1185 container_of(work, xfs_buf_t, b_ioend_work); 1186 1187 xfs_buf_ioend(bp); 1188 } 1189 1190 static void 1191 xfs_buf_ioend_async( 1192 struct xfs_buf *bp) 1193 { 1194 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work); 1195 queue_work(bp->b_ioend_wq, &bp->b_ioend_work); 1196 } 1197 1198 void 1199 __xfs_buf_ioerror( 1200 xfs_buf_t *bp, 1201 int error, 1202 xfs_failaddr_t failaddr) 1203 { 1204 ASSERT(error <= 0 && error >= -1000); 1205 bp->b_error = error; 1206 trace_xfs_buf_ioerror(bp, error, failaddr); 1207 } 1208 1209 void 1210 xfs_buf_ioerror_alert( 1211 struct xfs_buf *bp, 1212 const char *func) 1213 { 1214 xfs_alert(bp->b_target->bt_mount, 1215 "metadata I/O error in \"%s\" at daddr 0x%llx len %d error %d", 1216 func, (uint64_t)XFS_BUF_ADDR(bp), bp->b_length, 1217 -bp->b_error); 1218 } 1219 1220 int 1221 xfs_bwrite( 1222 struct xfs_buf *bp) 1223 { 1224 int error; 1225 1226 ASSERT(xfs_buf_islocked(bp)); 1227 1228 bp->b_flags |= XBF_WRITE; 1229 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | 1230 XBF_WRITE_FAIL | XBF_DONE); 1231 1232 error = xfs_buf_submit(bp); 1233 if (error) { 1234 xfs_force_shutdown(bp->b_target->bt_mount, 1235 SHUTDOWN_META_IO_ERROR); 1236 } 1237 return error; 1238 } 1239 1240 static void 1241 xfs_buf_bio_end_io( 1242 struct bio *bio) 1243 { 1244 struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private; 1245 1246 /* 1247 * don't overwrite existing errors - otherwise we can lose errors on 1248 * buffers that require multiple bios to complete. 1249 */ 1250 if (bio->bi_status) { 1251 int error = blk_status_to_errno(bio->bi_status); 1252 1253 cmpxchg(&bp->b_io_error, 0, error); 1254 } 1255 1256 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) 1257 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); 1258 1259 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) 1260 xfs_buf_ioend_async(bp); 1261 bio_put(bio); 1262 } 1263 1264 static void 1265 xfs_buf_ioapply_map( 1266 struct xfs_buf *bp, 1267 int map, 1268 int *buf_offset, 1269 int *count, 1270 int op, 1271 int op_flags) 1272 { 1273 int page_index; 1274 int total_nr_pages = bp->b_page_count; 1275 int nr_pages; 1276 struct bio *bio; 1277 sector_t sector = bp->b_maps[map].bm_bn; 1278 int size; 1279 int offset; 1280 1281 /* skip the pages in the buffer before the start offset */ 1282 page_index = 0; 1283 offset = *buf_offset; 1284 while (offset >= PAGE_SIZE) { 1285 page_index++; 1286 offset -= PAGE_SIZE; 1287 } 1288 1289 /* 1290 * Limit the IO size to the length of the current vector, and update the 1291 * remaining IO count for the next time around. 1292 */ 1293 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count); 1294 *count -= size; 1295 *buf_offset += size; 1296 1297 next_chunk: 1298 atomic_inc(&bp->b_io_remaining); 1299 nr_pages = min(total_nr_pages, BIO_MAX_PAGES); 1300 1301 bio = bio_alloc(GFP_NOIO, nr_pages); 1302 bio_set_dev(bio, bp->b_target->bt_bdev); 1303 bio->bi_iter.bi_sector = sector; 1304 bio->bi_end_io = xfs_buf_bio_end_io; 1305 bio->bi_private = bp; 1306 bio_set_op_attrs(bio, op, op_flags); 1307 1308 for (; size && nr_pages; nr_pages--, page_index++) { 1309 int rbytes, nbytes = PAGE_SIZE - offset; 1310 1311 if (nbytes > size) 1312 nbytes = size; 1313 1314 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, 1315 offset); 1316 if (rbytes < nbytes) 1317 break; 1318 1319 offset = 0; 1320 sector += BTOBB(nbytes); 1321 size -= nbytes; 1322 total_nr_pages--; 1323 } 1324 1325 if (likely(bio->bi_iter.bi_size)) { 1326 if (xfs_buf_is_vmapped(bp)) { 1327 flush_kernel_vmap_range(bp->b_addr, 1328 xfs_buf_vmap_len(bp)); 1329 } 1330 submit_bio(bio); 1331 if (size) 1332 goto next_chunk; 1333 } else { 1334 /* 1335 * This is guaranteed not to be the last io reference count 1336 * because the caller (xfs_buf_submit) holds a count itself. 1337 */ 1338 atomic_dec(&bp->b_io_remaining); 1339 xfs_buf_ioerror(bp, -EIO); 1340 bio_put(bio); 1341 } 1342 1343 } 1344 1345 STATIC void 1346 _xfs_buf_ioapply( 1347 struct xfs_buf *bp) 1348 { 1349 struct blk_plug plug; 1350 int op; 1351 int op_flags = 0; 1352 int offset; 1353 int size; 1354 int i; 1355 1356 /* 1357 * Make sure we capture only current IO errors rather than stale errors 1358 * left over from previous use of the buffer (e.g. failed readahead). 1359 */ 1360 bp->b_error = 0; 1361 1362 /* 1363 * Initialize the I/O completion workqueue if we haven't yet or the 1364 * submitter has not opted to specify a custom one. 1365 */ 1366 if (!bp->b_ioend_wq) 1367 bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue; 1368 1369 if (bp->b_flags & XBF_WRITE) { 1370 op = REQ_OP_WRITE; 1371 if (bp->b_flags & XBF_SYNCIO) 1372 op_flags = REQ_SYNC; 1373 if (bp->b_flags & XBF_FUA) 1374 op_flags |= REQ_FUA; 1375 if (bp->b_flags & XBF_FLUSH) 1376 op_flags |= REQ_PREFLUSH; 1377 1378 /* 1379 * Run the write verifier callback function if it exists. If 1380 * this function fails it will mark the buffer with an error and 1381 * the IO should not be dispatched. 1382 */ 1383 if (bp->b_ops) { 1384 bp->b_ops->verify_write(bp); 1385 if (bp->b_error) { 1386 xfs_force_shutdown(bp->b_target->bt_mount, 1387 SHUTDOWN_CORRUPT_INCORE); 1388 return; 1389 } 1390 } else if (bp->b_bn != XFS_BUF_DADDR_NULL) { 1391 struct xfs_mount *mp = bp->b_target->bt_mount; 1392 1393 /* 1394 * non-crc filesystems don't attach verifiers during 1395 * log recovery, so don't warn for such filesystems. 1396 */ 1397 if (xfs_sb_version_hascrc(&mp->m_sb)) { 1398 xfs_warn(mp, 1399 "%s: no buf ops on daddr 0x%llx len %d", 1400 __func__, bp->b_bn, bp->b_length); 1401 xfs_hex_dump(bp->b_addr, 1402 XFS_CORRUPTION_DUMP_LEN); 1403 dump_stack(); 1404 } 1405 } 1406 } else if (bp->b_flags & XBF_READ_AHEAD) { 1407 op = REQ_OP_READ; 1408 op_flags = REQ_RAHEAD; 1409 } else { 1410 op = REQ_OP_READ; 1411 } 1412 1413 /* we only use the buffer cache for meta-data */ 1414 op_flags |= REQ_META; 1415 1416 /* 1417 * Walk all the vectors issuing IO on them. Set up the initial offset 1418 * into the buffer and the desired IO size before we start - 1419 * _xfs_buf_ioapply_vec() will modify them appropriately for each 1420 * subsequent call. 1421 */ 1422 offset = bp->b_offset; 1423 size = BBTOB(bp->b_io_length); 1424 blk_start_plug(&plug); 1425 for (i = 0; i < bp->b_map_count; i++) { 1426 xfs_buf_ioapply_map(bp, i, &offset, &size, op, op_flags); 1427 if (bp->b_error) 1428 break; 1429 if (size <= 0) 1430 break; /* all done */ 1431 } 1432 blk_finish_plug(&plug); 1433 } 1434 1435 /* 1436 * Wait for I/O completion of a sync buffer and return the I/O error code. 1437 */ 1438 static int 1439 xfs_buf_iowait( 1440 struct xfs_buf *bp) 1441 { 1442 ASSERT(!(bp->b_flags & XBF_ASYNC)); 1443 1444 trace_xfs_buf_iowait(bp, _RET_IP_); 1445 wait_for_completion(&bp->b_iowait); 1446 trace_xfs_buf_iowait_done(bp, _RET_IP_); 1447 1448 return bp->b_error; 1449 } 1450 1451 /* 1452 * Buffer I/O submission path, read or write. Asynchronous submission transfers 1453 * the buffer lock ownership and the current reference to the IO. It is not 1454 * safe to reference the buffer after a call to this function unless the caller 1455 * holds an additional reference itself. 1456 */ 1457 int 1458 __xfs_buf_submit( 1459 struct xfs_buf *bp, 1460 bool wait) 1461 { 1462 int error = 0; 1463 1464 trace_xfs_buf_submit(bp, _RET_IP_); 1465 1466 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); 1467 1468 /* on shutdown we stale and complete the buffer immediately */ 1469 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { 1470 xfs_buf_ioerror(bp, -EIO); 1471 bp->b_flags &= ~XBF_DONE; 1472 xfs_buf_stale(bp); 1473 if (bp->b_flags & XBF_ASYNC) 1474 xfs_buf_ioend(bp); 1475 return -EIO; 1476 } 1477 1478 /* 1479 * Grab a reference so the buffer does not go away underneath us. For 1480 * async buffers, I/O completion drops the callers reference, which 1481 * could occur before submission returns. 1482 */ 1483 xfs_buf_hold(bp); 1484 1485 if (bp->b_flags & XBF_WRITE) 1486 xfs_buf_wait_unpin(bp); 1487 1488 /* clear the internal error state to avoid spurious errors */ 1489 bp->b_io_error = 0; 1490 1491 /* 1492 * Set the count to 1 initially, this will stop an I/O completion 1493 * callout which happens before we have started all the I/O from calling 1494 * xfs_buf_ioend too early. 1495 */ 1496 atomic_set(&bp->b_io_remaining, 1); 1497 if (bp->b_flags & XBF_ASYNC) 1498 xfs_buf_ioacct_inc(bp); 1499 _xfs_buf_ioapply(bp); 1500 1501 /* 1502 * If _xfs_buf_ioapply failed, we can get back here with only the IO 1503 * reference we took above. If we drop it to zero, run completion so 1504 * that we don't return to the caller with completion still pending. 1505 */ 1506 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { 1507 if (bp->b_error || !(bp->b_flags & XBF_ASYNC)) 1508 xfs_buf_ioend(bp); 1509 else 1510 xfs_buf_ioend_async(bp); 1511 } 1512 1513 if (wait) 1514 error = xfs_buf_iowait(bp); 1515 1516 /* 1517 * Release the hold that keeps the buffer referenced for the entire 1518 * I/O. Note that if the buffer is async, it is not safe to reference 1519 * after this release. 1520 */ 1521 xfs_buf_rele(bp); 1522 return error; 1523 } 1524 1525 void * 1526 xfs_buf_offset( 1527 struct xfs_buf *bp, 1528 size_t offset) 1529 { 1530 struct page *page; 1531 1532 if (bp->b_addr) 1533 return bp->b_addr + offset; 1534 1535 offset += bp->b_offset; 1536 page = bp->b_pages[offset >> PAGE_SHIFT]; 1537 return page_address(page) + (offset & (PAGE_SIZE-1)); 1538 } 1539 1540 /* 1541 * Move data into or out of a buffer. 1542 */ 1543 void 1544 xfs_buf_iomove( 1545 xfs_buf_t *bp, /* buffer to process */ 1546 size_t boff, /* starting buffer offset */ 1547 size_t bsize, /* length to copy */ 1548 void *data, /* data address */ 1549 xfs_buf_rw_t mode) /* read/write/zero flag */ 1550 { 1551 size_t bend; 1552 1553 bend = boff + bsize; 1554 while (boff < bend) { 1555 struct page *page; 1556 int page_index, page_offset, csize; 1557 1558 page_index = (boff + bp->b_offset) >> PAGE_SHIFT; 1559 page_offset = (boff + bp->b_offset) & ~PAGE_MASK; 1560 page = bp->b_pages[page_index]; 1561 csize = min_t(size_t, PAGE_SIZE - page_offset, 1562 BBTOB(bp->b_io_length) - boff); 1563 1564 ASSERT((csize + page_offset) <= PAGE_SIZE); 1565 1566 switch (mode) { 1567 case XBRW_ZERO: 1568 memset(page_address(page) + page_offset, 0, csize); 1569 break; 1570 case XBRW_READ: 1571 memcpy(data, page_address(page) + page_offset, csize); 1572 break; 1573 case XBRW_WRITE: 1574 memcpy(page_address(page) + page_offset, data, csize); 1575 } 1576 1577 boff += csize; 1578 data += csize; 1579 } 1580 } 1581 1582 /* 1583 * Handling of buffer targets (buftargs). 1584 */ 1585 1586 /* 1587 * Wait for any bufs with callbacks that have been submitted but have not yet 1588 * returned. These buffers will have an elevated hold count, so wait on those 1589 * while freeing all the buffers only held by the LRU. 1590 */ 1591 static enum lru_status 1592 xfs_buftarg_wait_rele( 1593 struct list_head *item, 1594 struct list_lru_one *lru, 1595 spinlock_t *lru_lock, 1596 void *arg) 1597 1598 { 1599 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); 1600 struct list_head *dispose = arg; 1601 1602 if (atomic_read(&bp->b_hold) > 1) { 1603 /* need to wait, so skip it this pass */ 1604 trace_xfs_buf_wait_buftarg(bp, _RET_IP_); 1605 return LRU_SKIP; 1606 } 1607 if (!spin_trylock(&bp->b_lock)) 1608 return LRU_SKIP; 1609 1610 /* 1611 * clear the LRU reference count so the buffer doesn't get 1612 * ignored in xfs_buf_rele(). 1613 */ 1614 atomic_set(&bp->b_lru_ref, 0); 1615 bp->b_state |= XFS_BSTATE_DISPOSE; 1616 list_lru_isolate_move(lru, item, dispose); 1617 spin_unlock(&bp->b_lock); 1618 return LRU_REMOVED; 1619 } 1620 1621 void 1622 xfs_wait_buftarg( 1623 struct xfs_buftarg *btp) 1624 { 1625 LIST_HEAD(dispose); 1626 int loop = 0; 1627 1628 /* 1629 * First wait on the buftarg I/O count for all in-flight buffers to be 1630 * released. This is critical as new buffers do not make the LRU until 1631 * they are released. 1632 * 1633 * Next, flush the buffer workqueue to ensure all completion processing 1634 * has finished. Just waiting on buffer locks is not sufficient for 1635 * async IO as the reference count held over IO is not released until 1636 * after the buffer lock is dropped. Hence we need to ensure here that 1637 * all reference counts have been dropped before we start walking the 1638 * LRU list. 1639 */ 1640 while (percpu_counter_sum(&btp->bt_io_count)) 1641 delay(100); 1642 flush_workqueue(btp->bt_mount->m_buf_workqueue); 1643 1644 /* loop until there is nothing left on the lru list. */ 1645 while (list_lru_count(&btp->bt_lru)) { 1646 list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele, 1647 &dispose, LONG_MAX); 1648 1649 while (!list_empty(&dispose)) { 1650 struct xfs_buf *bp; 1651 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); 1652 list_del_init(&bp->b_lru); 1653 if (bp->b_flags & XBF_WRITE_FAIL) { 1654 xfs_alert(btp->bt_mount, 1655 "Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!", 1656 (long long)bp->b_bn); 1657 xfs_alert(btp->bt_mount, 1658 "Please run xfs_repair to determine the extent of the problem."); 1659 } 1660 xfs_buf_rele(bp); 1661 } 1662 if (loop++ != 0) 1663 delay(100); 1664 } 1665 } 1666 1667 static enum lru_status 1668 xfs_buftarg_isolate( 1669 struct list_head *item, 1670 struct list_lru_one *lru, 1671 spinlock_t *lru_lock, 1672 void *arg) 1673 { 1674 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); 1675 struct list_head *dispose = arg; 1676 1677 /* 1678 * we are inverting the lru lock/bp->b_lock here, so use a trylock. 1679 * If we fail to get the lock, just skip it. 1680 */ 1681 if (!spin_trylock(&bp->b_lock)) 1682 return LRU_SKIP; 1683 /* 1684 * Decrement the b_lru_ref count unless the value is already 1685 * zero. If the value is already zero, we need to reclaim the 1686 * buffer, otherwise it gets another trip through the LRU. 1687 */ 1688 if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) { 1689 spin_unlock(&bp->b_lock); 1690 return LRU_ROTATE; 1691 } 1692 1693 bp->b_state |= XFS_BSTATE_DISPOSE; 1694 list_lru_isolate_move(lru, item, dispose); 1695 spin_unlock(&bp->b_lock); 1696 return LRU_REMOVED; 1697 } 1698 1699 static unsigned long 1700 xfs_buftarg_shrink_scan( 1701 struct shrinker *shrink, 1702 struct shrink_control *sc) 1703 { 1704 struct xfs_buftarg *btp = container_of(shrink, 1705 struct xfs_buftarg, bt_shrinker); 1706 LIST_HEAD(dispose); 1707 unsigned long freed; 1708 1709 freed = list_lru_shrink_walk(&btp->bt_lru, sc, 1710 xfs_buftarg_isolate, &dispose); 1711 1712 while (!list_empty(&dispose)) { 1713 struct xfs_buf *bp; 1714 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); 1715 list_del_init(&bp->b_lru); 1716 xfs_buf_rele(bp); 1717 } 1718 1719 return freed; 1720 } 1721 1722 static unsigned long 1723 xfs_buftarg_shrink_count( 1724 struct shrinker *shrink, 1725 struct shrink_control *sc) 1726 { 1727 struct xfs_buftarg *btp = container_of(shrink, 1728 struct xfs_buftarg, bt_shrinker); 1729 return list_lru_shrink_count(&btp->bt_lru, sc); 1730 } 1731 1732 void 1733 xfs_free_buftarg( 1734 struct xfs_buftarg *btp) 1735 { 1736 unregister_shrinker(&btp->bt_shrinker); 1737 ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0); 1738 percpu_counter_destroy(&btp->bt_io_count); 1739 list_lru_destroy(&btp->bt_lru); 1740 1741 xfs_blkdev_issue_flush(btp); 1742 1743 kmem_free(btp); 1744 } 1745 1746 int 1747 xfs_setsize_buftarg( 1748 xfs_buftarg_t *btp, 1749 unsigned int sectorsize) 1750 { 1751 /* Set up metadata sector size info */ 1752 btp->bt_meta_sectorsize = sectorsize; 1753 btp->bt_meta_sectormask = sectorsize - 1; 1754 1755 if (set_blocksize(btp->bt_bdev, sectorsize)) { 1756 xfs_warn(btp->bt_mount, 1757 "Cannot set_blocksize to %u on device %pg", 1758 sectorsize, btp->bt_bdev); 1759 return -EINVAL; 1760 } 1761 1762 /* Set up device logical sector size mask */ 1763 btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev); 1764 btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1; 1765 1766 return 0; 1767 } 1768 1769 /* 1770 * When allocating the initial buffer target we have not yet 1771 * read in the superblock, so don't know what sized sectors 1772 * are being used at this early stage. Play safe. 1773 */ 1774 STATIC int 1775 xfs_setsize_buftarg_early( 1776 xfs_buftarg_t *btp, 1777 struct block_device *bdev) 1778 { 1779 return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev)); 1780 } 1781 1782 xfs_buftarg_t * 1783 xfs_alloc_buftarg( 1784 struct xfs_mount *mp, 1785 struct block_device *bdev, 1786 struct dax_device *dax_dev) 1787 { 1788 xfs_buftarg_t *btp; 1789 1790 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS); 1791 1792 btp->bt_mount = mp; 1793 btp->bt_dev = bdev->bd_dev; 1794 btp->bt_bdev = bdev; 1795 btp->bt_daxdev = dax_dev; 1796 1797 if (xfs_setsize_buftarg_early(btp, bdev)) 1798 goto error_free; 1799 1800 if (list_lru_init(&btp->bt_lru)) 1801 goto error_free; 1802 1803 if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL)) 1804 goto error_lru; 1805 1806 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count; 1807 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan; 1808 btp->bt_shrinker.seeks = DEFAULT_SEEKS; 1809 btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE; 1810 if (register_shrinker(&btp->bt_shrinker)) 1811 goto error_pcpu; 1812 return btp; 1813 1814 error_pcpu: 1815 percpu_counter_destroy(&btp->bt_io_count); 1816 error_lru: 1817 list_lru_destroy(&btp->bt_lru); 1818 error_free: 1819 kmem_free(btp); 1820 return NULL; 1821 } 1822 1823 /* 1824 * Cancel a delayed write list. 1825 * 1826 * Remove each buffer from the list, clear the delwri queue flag and drop the 1827 * associated buffer reference. 1828 */ 1829 void 1830 xfs_buf_delwri_cancel( 1831 struct list_head *list) 1832 { 1833 struct xfs_buf *bp; 1834 1835 while (!list_empty(list)) { 1836 bp = list_first_entry(list, struct xfs_buf, b_list); 1837 1838 xfs_buf_lock(bp); 1839 bp->b_flags &= ~_XBF_DELWRI_Q; 1840 list_del_init(&bp->b_list); 1841 xfs_buf_relse(bp); 1842 } 1843 } 1844 1845 /* 1846 * Add a buffer to the delayed write list. 1847 * 1848 * This queues a buffer for writeout if it hasn't already been. Note that 1849 * neither this routine nor the buffer list submission functions perform 1850 * any internal synchronization. It is expected that the lists are thread-local 1851 * to the callers. 1852 * 1853 * Returns true if we queued up the buffer, or false if it already had 1854 * been on the buffer list. 1855 */ 1856 bool 1857 xfs_buf_delwri_queue( 1858 struct xfs_buf *bp, 1859 struct list_head *list) 1860 { 1861 ASSERT(xfs_buf_islocked(bp)); 1862 ASSERT(!(bp->b_flags & XBF_READ)); 1863 1864 /* 1865 * If the buffer is already marked delwri it already is queued up 1866 * by someone else for imediate writeout. Just ignore it in that 1867 * case. 1868 */ 1869 if (bp->b_flags & _XBF_DELWRI_Q) { 1870 trace_xfs_buf_delwri_queued(bp, _RET_IP_); 1871 return false; 1872 } 1873 1874 trace_xfs_buf_delwri_queue(bp, _RET_IP_); 1875 1876 /* 1877 * If a buffer gets written out synchronously or marked stale while it 1878 * is on a delwri list we lazily remove it. To do this, the other party 1879 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone. 1880 * It remains referenced and on the list. In a rare corner case it 1881 * might get readded to a delwri list after the synchronous writeout, in 1882 * which case we need just need to re-add the flag here. 1883 */ 1884 bp->b_flags |= _XBF_DELWRI_Q; 1885 if (list_empty(&bp->b_list)) { 1886 atomic_inc(&bp->b_hold); 1887 list_add_tail(&bp->b_list, list); 1888 } 1889 1890 return true; 1891 } 1892 1893 /* 1894 * Compare function is more complex than it needs to be because 1895 * the return value is only 32 bits and we are doing comparisons 1896 * on 64 bit values 1897 */ 1898 static int 1899 xfs_buf_cmp( 1900 void *priv, 1901 struct list_head *a, 1902 struct list_head *b) 1903 { 1904 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list); 1905 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); 1906 xfs_daddr_t diff; 1907 1908 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn; 1909 if (diff < 0) 1910 return -1; 1911 if (diff > 0) 1912 return 1; 1913 return 0; 1914 } 1915 1916 /* 1917 * Submit buffers for write. If wait_list is specified, the buffers are 1918 * submitted using sync I/O and placed on the wait list such that the caller can 1919 * iowait each buffer. Otherwise async I/O is used and the buffers are released 1920 * at I/O completion time. In either case, buffers remain locked until I/O 1921 * completes and the buffer is released from the queue. 1922 */ 1923 static int 1924 xfs_buf_delwri_submit_buffers( 1925 struct list_head *buffer_list, 1926 struct list_head *wait_list) 1927 { 1928 struct xfs_buf *bp, *n; 1929 LIST_HEAD (submit_list); 1930 int pinned = 0; 1931 struct blk_plug plug; 1932 1933 list_sort(NULL, buffer_list, xfs_buf_cmp); 1934 1935 blk_start_plug(&plug); 1936 list_for_each_entry_safe(bp, n, buffer_list, b_list) { 1937 if (!wait_list) { 1938 if (xfs_buf_ispinned(bp)) { 1939 pinned++; 1940 continue; 1941 } 1942 if (!xfs_buf_trylock(bp)) 1943 continue; 1944 } else { 1945 xfs_buf_lock(bp); 1946 } 1947 1948 /* 1949 * Someone else might have written the buffer synchronously or 1950 * marked it stale in the meantime. In that case only the 1951 * _XBF_DELWRI_Q flag got cleared, and we have to drop the 1952 * reference and remove it from the list here. 1953 */ 1954 if (!(bp->b_flags & _XBF_DELWRI_Q)) { 1955 list_del_init(&bp->b_list); 1956 xfs_buf_relse(bp); 1957 continue; 1958 } 1959 1960 trace_xfs_buf_delwri_split(bp, _RET_IP_); 1961 1962 /* 1963 * If we have a wait list, each buffer (and associated delwri 1964 * queue reference) transfers to it and is submitted 1965 * synchronously. Otherwise, drop the buffer from the delwri 1966 * queue and submit async. 1967 */ 1968 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL); 1969 bp->b_flags |= XBF_WRITE; 1970 if (wait_list) { 1971 bp->b_flags &= ~XBF_ASYNC; 1972 list_move_tail(&bp->b_list, wait_list); 1973 } else { 1974 bp->b_flags |= XBF_ASYNC; 1975 list_del_init(&bp->b_list); 1976 } 1977 __xfs_buf_submit(bp, false); 1978 } 1979 blk_finish_plug(&plug); 1980 1981 return pinned; 1982 } 1983 1984 /* 1985 * Write out a buffer list asynchronously. 1986 * 1987 * This will take the @buffer_list, write all non-locked and non-pinned buffers 1988 * out and not wait for I/O completion on any of the buffers. This interface 1989 * is only safely useable for callers that can track I/O completion by higher 1990 * level means, e.g. AIL pushing as the @buffer_list is consumed in this 1991 * function. 1992 */ 1993 int 1994 xfs_buf_delwri_submit_nowait( 1995 struct list_head *buffer_list) 1996 { 1997 return xfs_buf_delwri_submit_buffers(buffer_list, NULL); 1998 } 1999 2000 /* 2001 * Write out a buffer list synchronously. 2002 * 2003 * This will take the @buffer_list, write all buffers out and wait for I/O 2004 * completion on all of the buffers. @buffer_list is consumed by the function, 2005 * so callers must have some other way of tracking buffers if they require such 2006 * functionality. 2007 */ 2008 int 2009 xfs_buf_delwri_submit( 2010 struct list_head *buffer_list) 2011 { 2012 LIST_HEAD (wait_list); 2013 int error = 0, error2; 2014 struct xfs_buf *bp; 2015 2016 xfs_buf_delwri_submit_buffers(buffer_list, &wait_list); 2017 2018 /* Wait for IO to complete. */ 2019 while (!list_empty(&wait_list)) { 2020 bp = list_first_entry(&wait_list, struct xfs_buf, b_list); 2021 2022 list_del_init(&bp->b_list); 2023 2024 /* 2025 * Wait on the locked buffer, check for errors and unlock and 2026 * release the delwri queue reference. 2027 */ 2028 error2 = xfs_buf_iowait(bp); 2029 xfs_buf_relse(bp); 2030 if (!error) 2031 error = error2; 2032 } 2033 2034 return error; 2035 } 2036 2037 /* 2038 * Push a single buffer on a delwri queue. 2039 * 2040 * The purpose of this function is to submit a single buffer of a delwri queue 2041 * and return with the buffer still on the original queue. The waiting delwri 2042 * buffer submission infrastructure guarantees transfer of the delwri queue 2043 * buffer reference to a temporary wait list. We reuse this infrastructure to 2044 * transfer the buffer back to the original queue. 2045 * 2046 * Note the buffer transitions from the queued state, to the submitted and wait 2047 * listed state and back to the queued state during this call. The buffer 2048 * locking and queue management logic between _delwri_pushbuf() and 2049 * _delwri_queue() guarantee that the buffer cannot be queued to another list 2050 * before returning. 2051 */ 2052 int 2053 xfs_buf_delwri_pushbuf( 2054 struct xfs_buf *bp, 2055 struct list_head *buffer_list) 2056 { 2057 LIST_HEAD (submit_list); 2058 int error; 2059 2060 ASSERT(bp->b_flags & _XBF_DELWRI_Q); 2061 2062 trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_); 2063 2064 /* 2065 * Isolate the buffer to a new local list so we can submit it for I/O 2066 * independently from the rest of the original list. 2067 */ 2068 xfs_buf_lock(bp); 2069 list_move(&bp->b_list, &submit_list); 2070 xfs_buf_unlock(bp); 2071 2072 /* 2073 * Delwri submission clears the DELWRI_Q buffer flag and returns with 2074 * the buffer on the wait list with the original reference. Rather than 2075 * bounce the buffer from a local wait list back to the original list 2076 * after I/O completion, reuse the original list as the wait list. 2077 */ 2078 xfs_buf_delwri_submit_buffers(&submit_list, buffer_list); 2079 2080 /* 2081 * The buffer is now locked, under I/O and wait listed on the original 2082 * delwri queue. Wait for I/O completion, restore the DELWRI_Q flag and 2083 * return with the buffer unlocked and on the original queue. 2084 */ 2085 error = xfs_buf_iowait(bp); 2086 bp->b_flags |= _XBF_DELWRI_Q; 2087 xfs_buf_unlock(bp); 2088 2089 return error; 2090 } 2091 2092 int __init 2093 xfs_buf_init(void) 2094 { 2095 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", 2096 KM_ZONE_HWALIGN, NULL); 2097 if (!xfs_buf_zone) 2098 goto out; 2099 2100 return 0; 2101 2102 out: 2103 return -ENOMEM; 2104 } 2105 2106 void 2107 xfs_buf_terminate(void) 2108 { 2109 kmem_zone_destroy(xfs_buf_zone); 2110 } 2111 2112 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref) 2113 { 2114 /* 2115 * Set the lru reference count to 0 based on the error injection tag. 2116 * This allows userspace to disrupt buffer caching for debug/testing 2117 * purposes. 2118 */ 2119 if (XFS_TEST_ERROR(false, bp->b_target->bt_mount, 2120 XFS_ERRTAG_BUF_LRU_REF)) 2121 lru_ref = 0; 2122 2123 atomic_set(&bp->b_lru_ref, lru_ref); 2124 } 2125