1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include <linux/stddef.h> 20 #include <linux/errno.h> 21 #include <linux/gfp.h> 22 #include <linux/pagemap.h> 23 #include <linux/init.h> 24 #include <linux/vmalloc.h> 25 #include <linux/bio.h> 26 #include <linux/sysctl.h> 27 #include <linux/proc_fs.h> 28 #include <linux/workqueue.h> 29 #include <linux/percpu.h> 30 #include <linux/blkdev.h> 31 #include <linux/hash.h> 32 #include <linux/kthread.h> 33 #include <linux/migrate.h> 34 #include <linux/backing-dev.h> 35 #include <linux/freezer.h> 36 37 #include "xfs_format.h" 38 #include "xfs_log_format.h" 39 #include "xfs_trans_resv.h" 40 #include "xfs_sb.h" 41 #include "xfs_mount.h" 42 #include "xfs_trace.h" 43 #include "xfs_log.h" 44 45 static kmem_zone_t *xfs_buf_zone; 46 47 #ifdef XFS_BUF_LOCK_TRACKING 48 # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid) 49 # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1) 50 # define XB_GET_OWNER(bp) ((bp)->b_last_holder) 51 #else 52 # define XB_SET_OWNER(bp) do { } while (0) 53 # define XB_CLEAR_OWNER(bp) do { } while (0) 54 # define XB_GET_OWNER(bp) do { } while (0) 55 #endif 56 57 #define xb_to_gfp(flags) \ 58 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN) 59 60 61 static inline int 62 xfs_buf_is_vmapped( 63 struct xfs_buf *bp) 64 { 65 /* 66 * Return true if the buffer is vmapped. 67 * 68 * b_addr is null if the buffer is not mapped, but the code is clever 69 * enough to know it doesn't have to map a single page, so the check has 70 * to be both for b_addr and bp->b_page_count > 1. 71 */ 72 return bp->b_addr && bp->b_page_count > 1; 73 } 74 75 static inline int 76 xfs_buf_vmap_len( 77 struct xfs_buf *bp) 78 { 79 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset; 80 } 81 82 /* 83 * When we mark a buffer stale, we remove the buffer from the LRU and clear the 84 * b_lru_ref count so that the buffer is freed immediately when the buffer 85 * reference count falls to zero. If the buffer is already on the LRU, we need 86 * to remove the reference that LRU holds on the buffer. 87 * 88 * This prevents build-up of stale buffers on the LRU. 89 */ 90 void 91 xfs_buf_stale( 92 struct xfs_buf *bp) 93 { 94 ASSERT(xfs_buf_islocked(bp)); 95 96 bp->b_flags |= XBF_STALE; 97 98 /* 99 * Clear the delwri status so that a delwri queue walker will not 100 * flush this buffer to disk now that it is stale. The delwri queue has 101 * a reference to the buffer, so this is safe to do. 102 */ 103 bp->b_flags &= ~_XBF_DELWRI_Q; 104 105 spin_lock(&bp->b_lock); 106 atomic_set(&bp->b_lru_ref, 0); 107 if (!(bp->b_state & XFS_BSTATE_DISPOSE) && 108 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru))) 109 atomic_dec(&bp->b_hold); 110 111 ASSERT(atomic_read(&bp->b_hold) >= 1); 112 spin_unlock(&bp->b_lock); 113 } 114 115 static int 116 xfs_buf_get_maps( 117 struct xfs_buf *bp, 118 int map_count) 119 { 120 ASSERT(bp->b_maps == NULL); 121 bp->b_map_count = map_count; 122 123 if (map_count == 1) { 124 bp->b_maps = &bp->__b_map; 125 return 0; 126 } 127 128 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map), 129 KM_NOFS); 130 if (!bp->b_maps) 131 return -ENOMEM; 132 return 0; 133 } 134 135 /* 136 * Frees b_pages if it was allocated. 137 */ 138 static void 139 xfs_buf_free_maps( 140 struct xfs_buf *bp) 141 { 142 if (bp->b_maps != &bp->__b_map) { 143 kmem_free(bp->b_maps); 144 bp->b_maps = NULL; 145 } 146 } 147 148 struct xfs_buf * 149 _xfs_buf_alloc( 150 struct xfs_buftarg *target, 151 struct xfs_buf_map *map, 152 int nmaps, 153 xfs_buf_flags_t flags) 154 { 155 struct xfs_buf *bp; 156 int error; 157 int i; 158 159 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS); 160 if (unlikely(!bp)) 161 return NULL; 162 163 /* 164 * We don't want certain flags to appear in b_flags unless they are 165 * specifically set by later operations on the buffer. 166 */ 167 flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD); 168 169 atomic_set(&bp->b_hold, 1); 170 atomic_set(&bp->b_lru_ref, 1); 171 init_completion(&bp->b_iowait); 172 INIT_LIST_HEAD(&bp->b_lru); 173 INIT_LIST_HEAD(&bp->b_list); 174 RB_CLEAR_NODE(&bp->b_rbnode); 175 sema_init(&bp->b_sema, 0); /* held, no waiters */ 176 spin_lock_init(&bp->b_lock); 177 XB_SET_OWNER(bp); 178 bp->b_target = target; 179 bp->b_flags = flags; 180 181 /* 182 * Set length and io_length to the same value initially. 183 * I/O routines should use io_length, which will be the same in 184 * most cases but may be reset (e.g. XFS recovery). 185 */ 186 error = xfs_buf_get_maps(bp, nmaps); 187 if (error) { 188 kmem_zone_free(xfs_buf_zone, bp); 189 return NULL; 190 } 191 192 bp->b_bn = map[0].bm_bn; 193 bp->b_length = 0; 194 for (i = 0; i < nmaps; i++) { 195 bp->b_maps[i].bm_bn = map[i].bm_bn; 196 bp->b_maps[i].bm_len = map[i].bm_len; 197 bp->b_length += map[i].bm_len; 198 } 199 bp->b_io_length = bp->b_length; 200 201 atomic_set(&bp->b_pin_count, 0); 202 init_waitqueue_head(&bp->b_waiters); 203 204 XFS_STATS_INC(xb_create); 205 trace_xfs_buf_init(bp, _RET_IP_); 206 207 return bp; 208 } 209 210 /* 211 * Allocate a page array capable of holding a specified number 212 * of pages, and point the page buf at it. 213 */ 214 STATIC int 215 _xfs_buf_get_pages( 216 xfs_buf_t *bp, 217 int page_count) 218 { 219 /* Make sure that we have a page list */ 220 if (bp->b_pages == NULL) { 221 bp->b_page_count = page_count; 222 if (page_count <= XB_PAGES) { 223 bp->b_pages = bp->b_page_array; 224 } else { 225 bp->b_pages = kmem_alloc(sizeof(struct page *) * 226 page_count, KM_NOFS); 227 if (bp->b_pages == NULL) 228 return -ENOMEM; 229 } 230 memset(bp->b_pages, 0, sizeof(struct page *) * page_count); 231 } 232 return 0; 233 } 234 235 /* 236 * Frees b_pages if it was allocated. 237 */ 238 STATIC void 239 _xfs_buf_free_pages( 240 xfs_buf_t *bp) 241 { 242 if (bp->b_pages != bp->b_page_array) { 243 kmem_free(bp->b_pages); 244 bp->b_pages = NULL; 245 } 246 } 247 248 /* 249 * Releases the specified buffer. 250 * 251 * The modification state of any associated pages is left unchanged. 252 * The buffer must not be on any hash - use xfs_buf_rele instead for 253 * hashed and refcounted buffers 254 */ 255 void 256 xfs_buf_free( 257 xfs_buf_t *bp) 258 { 259 trace_xfs_buf_free(bp, _RET_IP_); 260 261 ASSERT(list_empty(&bp->b_lru)); 262 263 if (bp->b_flags & _XBF_PAGES) { 264 uint i; 265 266 if (xfs_buf_is_vmapped(bp)) 267 vm_unmap_ram(bp->b_addr - bp->b_offset, 268 bp->b_page_count); 269 270 for (i = 0; i < bp->b_page_count; i++) { 271 struct page *page = bp->b_pages[i]; 272 273 __free_page(page); 274 } 275 } else if (bp->b_flags & _XBF_KMEM) 276 kmem_free(bp->b_addr); 277 _xfs_buf_free_pages(bp); 278 xfs_buf_free_maps(bp); 279 kmem_zone_free(xfs_buf_zone, bp); 280 } 281 282 /* 283 * Allocates all the pages for buffer in question and builds it's page list. 284 */ 285 STATIC int 286 xfs_buf_allocate_memory( 287 xfs_buf_t *bp, 288 uint flags) 289 { 290 size_t size; 291 size_t nbytes, offset; 292 gfp_t gfp_mask = xb_to_gfp(flags); 293 unsigned short page_count, i; 294 xfs_off_t start, end; 295 int error; 296 297 /* 298 * for buffers that are contained within a single page, just allocate 299 * the memory from the heap - there's no need for the complexity of 300 * page arrays to keep allocation down to order 0. 301 */ 302 size = BBTOB(bp->b_length); 303 if (size < PAGE_SIZE) { 304 bp->b_addr = kmem_alloc(size, KM_NOFS); 305 if (!bp->b_addr) { 306 /* low memory - use alloc_page loop instead */ 307 goto use_alloc_page; 308 } 309 310 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != 311 ((unsigned long)bp->b_addr & PAGE_MASK)) { 312 /* b_addr spans two pages - use alloc_page instead */ 313 kmem_free(bp->b_addr); 314 bp->b_addr = NULL; 315 goto use_alloc_page; 316 } 317 bp->b_offset = offset_in_page(bp->b_addr); 318 bp->b_pages = bp->b_page_array; 319 bp->b_pages[0] = virt_to_page(bp->b_addr); 320 bp->b_page_count = 1; 321 bp->b_flags |= _XBF_KMEM; 322 return 0; 323 } 324 325 use_alloc_page: 326 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; 327 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1) 328 >> PAGE_SHIFT; 329 page_count = end - start; 330 error = _xfs_buf_get_pages(bp, page_count); 331 if (unlikely(error)) 332 return error; 333 334 offset = bp->b_offset; 335 bp->b_flags |= _XBF_PAGES; 336 337 for (i = 0; i < bp->b_page_count; i++) { 338 struct page *page; 339 uint retries = 0; 340 retry: 341 page = alloc_page(gfp_mask); 342 if (unlikely(page == NULL)) { 343 if (flags & XBF_READ_AHEAD) { 344 bp->b_page_count = i; 345 error = -ENOMEM; 346 goto out_free_pages; 347 } 348 349 /* 350 * This could deadlock. 351 * 352 * But until all the XFS lowlevel code is revamped to 353 * handle buffer allocation failures we can't do much. 354 */ 355 if (!(++retries % 100)) 356 xfs_err(NULL, 357 "possible memory allocation deadlock in %s (mode:0x%x)", 358 __func__, gfp_mask); 359 360 XFS_STATS_INC(xb_page_retries); 361 congestion_wait(BLK_RW_ASYNC, HZ/50); 362 goto retry; 363 } 364 365 XFS_STATS_INC(xb_page_found); 366 367 nbytes = min_t(size_t, size, PAGE_SIZE - offset); 368 size -= nbytes; 369 bp->b_pages[i] = page; 370 offset = 0; 371 } 372 return 0; 373 374 out_free_pages: 375 for (i = 0; i < bp->b_page_count; i++) 376 __free_page(bp->b_pages[i]); 377 return error; 378 } 379 380 /* 381 * Map buffer into kernel address-space if necessary. 382 */ 383 STATIC int 384 _xfs_buf_map_pages( 385 xfs_buf_t *bp, 386 uint flags) 387 { 388 ASSERT(bp->b_flags & _XBF_PAGES); 389 if (bp->b_page_count == 1) { 390 /* A single page buffer is always mappable */ 391 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 392 } else if (flags & XBF_UNMAPPED) { 393 bp->b_addr = NULL; 394 } else { 395 int retried = 0; 396 unsigned noio_flag; 397 398 /* 399 * vm_map_ram() will allocate auxillary structures (e.g. 400 * pagetables) with GFP_KERNEL, yet we are likely to be under 401 * GFP_NOFS context here. Hence we need to tell memory reclaim 402 * that we are in such a context via PF_MEMALLOC_NOIO to prevent 403 * memory reclaim re-entering the filesystem here and 404 * potentially deadlocking. 405 */ 406 noio_flag = memalloc_noio_save(); 407 do { 408 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, 409 -1, PAGE_KERNEL); 410 if (bp->b_addr) 411 break; 412 vm_unmap_aliases(); 413 } while (retried++ <= 1); 414 memalloc_noio_restore(noio_flag); 415 416 if (!bp->b_addr) 417 return -ENOMEM; 418 bp->b_addr += bp->b_offset; 419 } 420 421 return 0; 422 } 423 424 /* 425 * Finding and Reading Buffers 426 */ 427 428 /* 429 * Look up, and creates if absent, a lockable buffer for 430 * a given range of an inode. The buffer is returned 431 * locked. No I/O is implied by this call. 432 */ 433 xfs_buf_t * 434 _xfs_buf_find( 435 struct xfs_buftarg *btp, 436 struct xfs_buf_map *map, 437 int nmaps, 438 xfs_buf_flags_t flags, 439 xfs_buf_t *new_bp) 440 { 441 size_t numbytes; 442 struct xfs_perag *pag; 443 struct rb_node **rbp; 444 struct rb_node *parent; 445 xfs_buf_t *bp; 446 xfs_daddr_t blkno = map[0].bm_bn; 447 xfs_daddr_t eofs; 448 int numblks = 0; 449 int i; 450 451 for (i = 0; i < nmaps; i++) 452 numblks += map[i].bm_len; 453 numbytes = BBTOB(numblks); 454 455 /* Check for IOs smaller than the sector size / not sector aligned */ 456 ASSERT(!(numbytes < btp->bt_meta_sectorsize)); 457 ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_meta_sectormask)); 458 459 /* 460 * Corrupted block numbers can get through to here, unfortunately, so we 461 * have to check that the buffer falls within the filesystem bounds. 462 */ 463 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks); 464 if (blkno < 0 || blkno >= eofs) { 465 /* 466 * XXX (dgc): we should really be returning -EFSCORRUPTED here, 467 * but none of the higher level infrastructure supports 468 * returning a specific error on buffer lookup failures. 469 */ 470 xfs_alert(btp->bt_mount, 471 "%s: Block out of range: block 0x%llx, EOFS 0x%llx ", 472 __func__, blkno, eofs); 473 WARN_ON(1); 474 return NULL; 475 } 476 477 /* get tree root */ 478 pag = xfs_perag_get(btp->bt_mount, 479 xfs_daddr_to_agno(btp->bt_mount, blkno)); 480 481 /* walk tree */ 482 spin_lock(&pag->pag_buf_lock); 483 rbp = &pag->pag_buf_tree.rb_node; 484 parent = NULL; 485 bp = NULL; 486 while (*rbp) { 487 parent = *rbp; 488 bp = rb_entry(parent, struct xfs_buf, b_rbnode); 489 490 if (blkno < bp->b_bn) 491 rbp = &(*rbp)->rb_left; 492 else if (blkno > bp->b_bn) 493 rbp = &(*rbp)->rb_right; 494 else { 495 /* 496 * found a block number match. If the range doesn't 497 * match, the only way this is allowed is if the buffer 498 * in the cache is stale and the transaction that made 499 * it stale has not yet committed. i.e. we are 500 * reallocating a busy extent. Skip this buffer and 501 * continue searching to the right for an exact match. 502 */ 503 if (bp->b_length != numblks) { 504 ASSERT(bp->b_flags & XBF_STALE); 505 rbp = &(*rbp)->rb_right; 506 continue; 507 } 508 atomic_inc(&bp->b_hold); 509 goto found; 510 } 511 } 512 513 /* No match found */ 514 if (new_bp) { 515 rb_link_node(&new_bp->b_rbnode, parent, rbp); 516 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree); 517 /* the buffer keeps the perag reference until it is freed */ 518 new_bp->b_pag = pag; 519 spin_unlock(&pag->pag_buf_lock); 520 } else { 521 XFS_STATS_INC(xb_miss_locked); 522 spin_unlock(&pag->pag_buf_lock); 523 xfs_perag_put(pag); 524 } 525 return new_bp; 526 527 found: 528 spin_unlock(&pag->pag_buf_lock); 529 xfs_perag_put(pag); 530 531 if (!xfs_buf_trylock(bp)) { 532 if (flags & XBF_TRYLOCK) { 533 xfs_buf_rele(bp); 534 XFS_STATS_INC(xb_busy_locked); 535 return NULL; 536 } 537 xfs_buf_lock(bp); 538 XFS_STATS_INC(xb_get_locked_waited); 539 } 540 541 /* 542 * if the buffer is stale, clear all the external state associated with 543 * it. We need to keep flags such as how we allocated the buffer memory 544 * intact here. 545 */ 546 if (bp->b_flags & XBF_STALE) { 547 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); 548 ASSERT(bp->b_iodone == NULL); 549 bp->b_flags &= _XBF_KMEM | _XBF_PAGES; 550 bp->b_ops = NULL; 551 } 552 553 trace_xfs_buf_find(bp, flags, _RET_IP_); 554 XFS_STATS_INC(xb_get_locked); 555 return bp; 556 } 557 558 /* 559 * Assembles a buffer covering the specified range. The code is optimised for 560 * cache hits, as metadata intensive workloads will see 3 orders of magnitude 561 * more hits than misses. 562 */ 563 struct xfs_buf * 564 xfs_buf_get_map( 565 struct xfs_buftarg *target, 566 struct xfs_buf_map *map, 567 int nmaps, 568 xfs_buf_flags_t flags) 569 { 570 struct xfs_buf *bp; 571 struct xfs_buf *new_bp; 572 int error = 0; 573 574 bp = _xfs_buf_find(target, map, nmaps, flags, NULL); 575 if (likely(bp)) 576 goto found; 577 578 new_bp = _xfs_buf_alloc(target, map, nmaps, flags); 579 if (unlikely(!new_bp)) 580 return NULL; 581 582 error = xfs_buf_allocate_memory(new_bp, flags); 583 if (error) { 584 xfs_buf_free(new_bp); 585 return NULL; 586 } 587 588 bp = _xfs_buf_find(target, map, nmaps, flags, new_bp); 589 if (!bp) { 590 xfs_buf_free(new_bp); 591 return NULL; 592 } 593 594 if (bp != new_bp) 595 xfs_buf_free(new_bp); 596 597 found: 598 if (!bp->b_addr) { 599 error = _xfs_buf_map_pages(bp, flags); 600 if (unlikely(error)) { 601 xfs_warn(target->bt_mount, 602 "%s: failed to map pagesn", __func__); 603 xfs_buf_relse(bp); 604 return NULL; 605 } 606 } 607 608 XFS_STATS_INC(xb_get); 609 trace_xfs_buf_get(bp, flags, _RET_IP_); 610 return bp; 611 } 612 613 STATIC int 614 _xfs_buf_read( 615 xfs_buf_t *bp, 616 xfs_buf_flags_t flags) 617 { 618 ASSERT(!(flags & XBF_WRITE)); 619 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL); 620 621 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD); 622 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); 623 624 if (flags & XBF_ASYNC) { 625 xfs_buf_submit(bp); 626 return 0; 627 } 628 return xfs_buf_submit_wait(bp); 629 } 630 631 xfs_buf_t * 632 xfs_buf_read_map( 633 struct xfs_buftarg *target, 634 struct xfs_buf_map *map, 635 int nmaps, 636 xfs_buf_flags_t flags, 637 const struct xfs_buf_ops *ops) 638 { 639 struct xfs_buf *bp; 640 641 flags |= XBF_READ; 642 643 bp = xfs_buf_get_map(target, map, nmaps, flags); 644 if (bp) { 645 trace_xfs_buf_read(bp, flags, _RET_IP_); 646 647 if (!XFS_BUF_ISDONE(bp)) { 648 XFS_STATS_INC(xb_get_read); 649 bp->b_ops = ops; 650 _xfs_buf_read(bp, flags); 651 } else if (flags & XBF_ASYNC) { 652 /* 653 * Read ahead call which is already satisfied, 654 * drop the buffer 655 */ 656 xfs_buf_relse(bp); 657 return NULL; 658 } else { 659 /* We do not want read in the flags */ 660 bp->b_flags &= ~XBF_READ; 661 } 662 } 663 664 return bp; 665 } 666 667 /* 668 * If we are not low on memory then do the readahead in a deadlock 669 * safe manner. 670 */ 671 void 672 xfs_buf_readahead_map( 673 struct xfs_buftarg *target, 674 struct xfs_buf_map *map, 675 int nmaps, 676 const struct xfs_buf_ops *ops) 677 { 678 if (bdi_read_congested(target->bt_bdi)) 679 return; 680 681 xfs_buf_read_map(target, map, nmaps, 682 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops); 683 } 684 685 /* 686 * Read an uncached buffer from disk. Allocates and returns a locked 687 * buffer containing the disk contents or nothing. 688 */ 689 int 690 xfs_buf_read_uncached( 691 struct xfs_buftarg *target, 692 xfs_daddr_t daddr, 693 size_t numblks, 694 int flags, 695 struct xfs_buf **bpp, 696 const struct xfs_buf_ops *ops) 697 { 698 struct xfs_buf *bp; 699 700 *bpp = NULL; 701 702 bp = xfs_buf_get_uncached(target, numblks, flags); 703 if (!bp) 704 return -ENOMEM; 705 706 /* set up the buffer for a read IO */ 707 ASSERT(bp->b_map_count == 1); 708 bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */ 709 bp->b_maps[0].bm_bn = daddr; 710 bp->b_flags |= XBF_READ; 711 bp->b_ops = ops; 712 713 xfs_buf_submit_wait(bp); 714 if (bp->b_error) { 715 int error = bp->b_error; 716 xfs_buf_relse(bp); 717 return error; 718 } 719 720 *bpp = bp; 721 return 0; 722 } 723 724 /* 725 * Return a buffer allocated as an empty buffer and associated to external 726 * memory via xfs_buf_associate_memory() back to it's empty state. 727 */ 728 void 729 xfs_buf_set_empty( 730 struct xfs_buf *bp, 731 size_t numblks) 732 { 733 if (bp->b_pages) 734 _xfs_buf_free_pages(bp); 735 736 bp->b_pages = NULL; 737 bp->b_page_count = 0; 738 bp->b_addr = NULL; 739 bp->b_length = numblks; 740 bp->b_io_length = numblks; 741 742 ASSERT(bp->b_map_count == 1); 743 bp->b_bn = XFS_BUF_DADDR_NULL; 744 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL; 745 bp->b_maps[0].bm_len = bp->b_length; 746 } 747 748 static inline struct page * 749 mem_to_page( 750 void *addr) 751 { 752 if ((!is_vmalloc_addr(addr))) { 753 return virt_to_page(addr); 754 } else { 755 return vmalloc_to_page(addr); 756 } 757 } 758 759 int 760 xfs_buf_associate_memory( 761 xfs_buf_t *bp, 762 void *mem, 763 size_t len) 764 { 765 int rval; 766 int i = 0; 767 unsigned long pageaddr; 768 unsigned long offset; 769 size_t buflen; 770 int page_count; 771 772 pageaddr = (unsigned long)mem & PAGE_MASK; 773 offset = (unsigned long)mem - pageaddr; 774 buflen = PAGE_ALIGN(len + offset); 775 page_count = buflen >> PAGE_SHIFT; 776 777 /* Free any previous set of page pointers */ 778 if (bp->b_pages) 779 _xfs_buf_free_pages(bp); 780 781 bp->b_pages = NULL; 782 bp->b_addr = mem; 783 784 rval = _xfs_buf_get_pages(bp, page_count); 785 if (rval) 786 return rval; 787 788 bp->b_offset = offset; 789 790 for (i = 0; i < bp->b_page_count; i++) { 791 bp->b_pages[i] = mem_to_page((void *)pageaddr); 792 pageaddr += PAGE_SIZE; 793 } 794 795 bp->b_io_length = BTOBB(len); 796 bp->b_length = BTOBB(buflen); 797 798 return 0; 799 } 800 801 xfs_buf_t * 802 xfs_buf_get_uncached( 803 struct xfs_buftarg *target, 804 size_t numblks, 805 int flags) 806 { 807 unsigned long page_count; 808 int error, i; 809 struct xfs_buf *bp; 810 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks); 811 812 bp = _xfs_buf_alloc(target, &map, 1, 0); 813 if (unlikely(bp == NULL)) 814 goto fail; 815 816 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT; 817 error = _xfs_buf_get_pages(bp, page_count); 818 if (error) 819 goto fail_free_buf; 820 821 for (i = 0; i < page_count; i++) { 822 bp->b_pages[i] = alloc_page(xb_to_gfp(flags)); 823 if (!bp->b_pages[i]) 824 goto fail_free_mem; 825 } 826 bp->b_flags |= _XBF_PAGES; 827 828 error = _xfs_buf_map_pages(bp, 0); 829 if (unlikely(error)) { 830 xfs_warn(target->bt_mount, 831 "%s: failed to map pages", __func__); 832 goto fail_free_mem; 833 } 834 835 trace_xfs_buf_get_uncached(bp, _RET_IP_); 836 return bp; 837 838 fail_free_mem: 839 while (--i >= 0) 840 __free_page(bp->b_pages[i]); 841 _xfs_buf_free_pages(bp); 842 fail_free_buf: 843 xfs_buf_free_maps(bp); 844 kmem_zone_free(xfs_buf_zone, bp); 845 fail: 846 return NULL; 847 } 848 849 /* 850 * Increment reference count on buffer, to hold the buffer concurrently 851 * with another thread which may release (free) the buffer asynchronously. 852 * Must hold the buffer already to call this function. 853 */ 854 void 855 xfs_buf_hold( 856 xfs_buf_t *bp) 857 { 858 trace_xfs_buf_hold(bp, _RET_IP_); 859 atomic_inc(&bp->b_hold); 860 } 861 862 /* 863 * Releases a hold on the specified buffer. If the 864 * the hold count is 1, calls xfs_buf_free. 865 */ 866 void 867 xfs_buf_rele( 868 xfs_buf_t *bp) 869 { 870 struct xfs_perag *pag = bp->b_pag; 871 872 trace_xfs_buf_rele(bp, _RET_IP_); 873 874 if (!pag) { 875 ASSERT(list_empty(&bp->b_lru)); 876 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode)); 877 if (atomic_dec_and_test(&bp->b_hold)) 878 xfs_buf_free(bp); 879 return; 880 } 881 882 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode)); 883 884 ASSERT(atomic_read(&bp->b_hold) > 0); 885 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) { 886 spin_lock(&bp->b_lock); 887 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { 888 /* 889 * If the buffer is added to the LRU take a new 890 * reference to the buffer for the LRU and clear the 891 * (now stale) dispose list state flag 892 */ 893 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) { 894 bp->b_state &= ~XFS_BSTATE_DISPOSE; 895 atomic_inc(&bp->b_hold); 896 } 897 spin_unlock(&bp->b_lock); 898 spin_unlock(&pag->pag_buf_lock); 899 } else { 900 /* 901 * most of the time buffers will already be removed from 902 * the LRU, so optimise that case by checking for the 903 * XFS_BSTATE_DISPOSE flag indicating the last list the 904 * buffer was on was the disposal list 905 */ 906 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) { 907 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru); 908 } else { 909 ASSERT(list_empty(&bp->b_lru)); 910 } 911 spin_unlock(&bp->b_lock); 912 913 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); 914 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree); 915 spin_unlock(&pag->pag_buf_lock); 916 xfs_perag_put(pag); 917 xfs_buf_free(bp); 918 } 919 } 920 } 921 922 923 /* 924 * Lock a buffer object, if it is not already locked. 925 * 926 * If we come across a stale, pinned, locked buffer, we know that we are 927 * being asked to lock a buffer that has been reallocated. Because it is 928 * pinned, we know that the log has not been pushed to disk and hence it 929 * will still be locked. Rather than continuing to have trylock attempts 930 * fail until someone else pushes the log, push it ourselves before 931 * returning. This means that the xfsaild will not get stuck trying 932 * to push on stale inode buffers. 933 */ 934 int 935 xfs_buf_trylock( 936 struct xfs_buf *bp) 937 { 938 int locked; 939 940 locked = down_trylock(&bp->b_sema) == 0; 941 if (locked) 942 XB_SET_OWNER(bp); 943 944 trace_xfs_buf_trylock(bp, _RET_IP_); 945 return locked; 946 } 947 948 /* 949 * Lock a buffer object. 950 * 951 * If we come across a stale, pinned, locked buffer, we know that we 952 * are being asked to lock a buffer that has been reallocated. Because 953 * it is pinned, we know that the log has not been pushed to disk and 954 * hence it will still be locked. Rather than sleeping until someone 955 * else pushes the log, push it ourselves before trying to get the lock. 956 */ 957 void 958 xfs_buf_lock( 959 struct xfs_buf *bp) 960 { 961 trace_xfs_buf_lock(bp, _RET_IP_); 962 963 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) 964 xfs_log_force(bp->b_target->bt_mount, 0); 965 down(&bp->b_sema); 966 XB_SET_OWNER(bp); 967 968 trace_xfs_buf_lock_done(bp, _RET_IP_); 969 } 970 971 void 972 xfs_buf_unlock( 973 struct xfs_buf *bp) 974 { 975 XB_CLEAR_OWNER(bp); 976 up(&bp->b_sema); 977 978 trace_xfs_buf_unlock(bp, _RET_IP_); 979 } 980 981 STATIC void 982 xfs_buf_wait_unpin( 983 xfs_buf_t *bp) 984 { 985 DECLARE_WAITQUEUE (wait, current); 986 987 if (atomic_read(&bp->b_pin_count) == 0) 988 return; 989 990 add_wait_queue(&bp->b_waiters, &wait); 991 for (;;) { 992 set_current_state(TASK_UNINTERRUPTIBLE); 993 if (atomic_read(&bp->b_pin_count) == 0) 994 break; 995 io_schedule(); 996 } 997 remove_wait_queue(&bp->b_waiters, &wait); 998 set_current_state(TASK_RUNNING); 999 } 1000 1001 /* 1002 * Buffer Utility Routines 1003 */ 1004 1005 void 1006 xfs_buf_ioend( 1007 struct xfs_buf *bp) 1008 { 1009 bool read = bp->b_flags & XBF_READ; 1010 1011 trace_xfs_buf_iodone(bp, _RET_IP_); 1012 1013 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); 1014 1015 /* 1016 * Pull in IO completion errors now. We are guaranteed to be running 1017 * single threaded, so we don't need the lock to read b_io_error. 1018 */ 1019 if (!bp->b_error && bp->b_io_error) 1020 xfs_buf_ioerror(bp, bp->b_io_error); 1021 1022 /* Only validate buffers that were read without errors */ 1023 if (read && !bp->b_error && bp->b_ops) { 1024 ASSERT(!bp->b_iodone); 1025 bp->b_ops->verify_read(bp); 1026 } 1027 1028 if (!bp->b_error) 1029 bp->b_flags |= XBF_DONE; 1030 1031 if (bp->b_iodone) 1032 (*(bp->b_iodone))(bp); 1033 else if (bp->b_flags & XBF_ASYNC) 1034 xfs_buf_relse(bp); 1035 else 1036 complete(&bp->b_iowait); 1037 } 1038 1039 static void 1040 xfs_buf_ioend_work( 1041 struct work_struct *work) 1042 { 1043 struct xfs_buf *bp = 1044 container_of(work, xfs_buf_t, b_ioend_work); 1045 1046 xfs_buf_ioend(bp); 1047 } 1048 1049 void 1050 xfs_buf_ioend_async( 1051 struct xfs_buf *bp) 1052 { 1053 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work); 1054 queue_work(bp->b_ioend_wq, &bp->b_ioend_work); 1055 } 1056 1057 void 1058 xfs_buf_ioerror( 1059 xfs_buf_t *bp, 1060 int error) 1061 { 1062 ASSERT(error <= 0 && error >= -1000); 1063 bp->b_error = error; 1064 trace_xfs_buf_ioerror(bp, error, _RET_IP_); 1065 } 1066 1067 void 1068 xfs_buf_ioerror_alert( 1069 struct xfs_buf *bp, 1070 const char *func) 1071 { 1072 xfs_alert(bp->b_target->bt_mount, 1073 "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d", 1074 (__uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length); 1075 } 1076 1077 int 1078 xfs_bwrite( 1079 struct xfs_buf *bp) 1080 { 1081 int error; 1082 1083 ASSERT(xfs_buf_islocked(bp)); 1084 1085 bp->b_flags |= XBF_WRITE; 1086 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | 1087 XBF_WRITE_FAIL | XBF_DONE); 1088 1089 error = xfs_buf_submit_wait(bp); 1090 if (error) { 1091 xfs_force_shutdown(bp->b_target->bt_mount, 1092 SHUTDOWN_META_IO_ERROR); 1093 } 1094 return error; 1095 } 1096 1097 STATIC void 1098 xfs_buf_bio_end_io( 1099 struct bio *bio, 1100 int error) 1101 { 1102 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; 1103 1104 /* 1105 * don't overwrite existing errors - otherwise we can lose errors on 1106 * buffers that require multiple bios to complete. 1107 */ 1108 if (error) { 1109 spin_lock(&bp->b_lock); 1110 if (!bp->b_io_error) 1111 bp->b_io_error = error; 1112 spin_unlock(&bp->b_lock); 1113 } 1114 1115 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) 1116 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); 1117 1118 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) 1119 xfs_buf_ioend_async(bp); 1120 bio_put(bio); 1121 } 1122 1123 static void 1124 xfs_buf_ioapply_map( 1125 struct xfs_buf *bp, 1126 int map, 1127 int *buf_offset, 1128 int *count, 1129 int rw) 1130 { 1131 int page_index; 1132 int total_nr_pages = bp->b_page_count; 1133 int nr_pages; 1134 struct bio *bio; 1135 sector_t sector = bp->b_maps[map].bm_bn; 1136 int size; 1137 int offset; 1138 1139 total_nr_pages = bp->b_page_count; 1140 1141 /* skip the pages in the buffer before the start offset */ 1142 page_index = 0; 1143 offset = *buf_offset; 1144 while (offset >= PAGE_SIZE) { 1145 page_index++; 1146 offset -= PAGE_SIZE; 1147 } 1148 1149 /* 1150 * Limit the IO size to the length of the current vector, and update the 1151 * remaining IO count for the next time around. 1152 */ 1153 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count); 1154 *count -= size; 1155 *buf_offset += size; 1156 1157 next_chunk: 1158 atomic_inc(&bp->b_io_remaining); 1159 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); 1160 if (nr_pages > total_nr_pages) 1161 nr_pages = total_nr_pages; 1162 1163 bio = bio_alloc(GFP_NOIO, nr_pages); 1164 bio->bi_bdev = bp->b_target->bt_bdev; 1165 bio->bi_iter.bi_sector = sector; 1166 bio->bi_end_io = xfs_buf_bio_end_io; 1167 bio->bi_private = bp; 1168 1169 1170 for (; size && nr_pages; nr_pages--, page_index++) { 1171 int rbytes, nbytes = PAGE_SIZE - offset; 1172 1173 if (nbytes > size) 1174 nbytes = size; 1175 1176 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, 1177 offset); 1178 if (rbytes < nbytes) 1179 break; 1180 1181 offset = 0; 1182 sector += BTOBB(nbytes); 1183 size -= nbytes; 1184 total_nr_pages--; 1185 } 1186 1187 if (likely(bio->bi_iter.bi_size)) { 1188 if (xfs_buf_is_vmapped(bp)) { 1189 flush_kernel_vmap_range(bp->b_addr, 1190 xfs_buf_vmap_len(bp)); 1191 } 1192 submit_bio(rw, bio); 1193 if (size) 1194 goto next_chunk; 1195 } else { 1196 /* 1197 * This is guaranteed not to be the last io reference count 1198 * because the caller (xfs_buf_submit) holds a count itself. 1199 */ 1200 atomic_dec(&bp->b_io_remaining); 1201 xfs_buf_ioerror(bp, -EIO); 1202 bio_put(bio); 1203 } 1204 1205 } 1206 1207 STATIC void 1208 _xfs_buf_ioapply( 1209 struct xfs_buf *bp) 1210 { 1211 struct blk_plug plug; 1212 int rw; 1213 int offset; 1214 int size; 1215 int i; 1216 1217 /* 1218 * Make sure we capture only current IO errors rather than stale errors 1219 * left over from previous use of the buffer (e.g. failed readahead). 1220 */ 1221 bp->b_error = 0; 1222 1223 /* 1224 * Initialize the I/O completion workqueue if we haven't yet or the 1225 * submitter has not opted to specify a custom one. 1226 */ 1227 if (!bp->b_ioend_wq) 1228 bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue; 1229 1230 if (bp->b_flags & XBF_WRITE) { 1231 if (bp->b_flags & XBF_SYNCIO) 1232 rw = WRITE_SYNC; 1233 else 1234 rw = WRITE; 1235 if (bp->b_flags & XBF_FUA) 1236 rw |= REQ_FUA; 1237 if (bp->b_flags & XBF_FLUSH) 1238 rw |= REQ_FLUSH; 1239 1240 /* 1241 * Run the write verifier callback function if it exists. If 1242 * this function fails it will mark the buffer with an error and 1243 * the IO should not be dispatched. 1244 */ 1245 if (bp->b_ops) { 1246 bp->b_ops->verify_write(bp); 1247 if (bp->b_error) { 1248 xfs_force_shutdown(bp->b_target->bt_mount, 1249 SHUTDOWN_CORRUPT_INCORE); 1250 return; 1251 } 1252 } else if (bp->b_bn != XFS_BUF_DADDR_NULL) { 1253 struct xfs_mount *mp = bp->b_target->bt_mount; 1254 1255 /* 1256 * non-crc filesystems don't attach verifiers during 1257 * log recovery, so don't warn for such filesystems. 1258 */ 1259 if (xfs_sb_version_hascrc(&mp->m_sb)) { 1260 xfs_warn(mp, 1261 "%s: no ops on block 0x%llx/0x%x", 1262 __func__, bp->b_bn, bp->b_length); 1263 xfs_hex_dump(bp->b_addr, 64); 1264 dump_stack(); 1265 } 1266 } 1267 } else if (bp->b_flags & XBF_READ_AHEAD) { 1268 rw = READA; 1269 } else { 1270 rw = READ; 1271 } 1272 1273 /* we only use the buffer cache for meta-data */ 1274 rw |= REQ_META; 1275 1276 /* 1277 * Walk all the vectors issuing IO on them. Set up the initial offset 1278 * into the buffer and the desired IO size before we start - 1279 * _xfs_buf_ioapply_vec() will modify them appropriately for each 1280 * subsequent call. 1281 */ 1282 offset = bp->b_offset; 1283 size = BBTOB(bp->b_io_length); 1284 blk_start_plug(&plug); 1285 for (i = 0; i < bp->b_map_count; i++) { 1286 xfs_buf_ioapply_map(bp, i, &offset, &size, rw); 1287 if (bp->b_error) 1288 break; 1289 if (size <= 0) 1290 break; /* all done */ 1291 } 1292 blk_finish_plug(&plug); 1293 } 1294 1295 /* 1296 * Asynchronous IO submission path. This transfers the buffer lock ownership and 1297 * the current reference to the IO. It is not safe to reference the buffer after 1298 * a call to this function unless the caller holds an additional reference 1299 * itself. 1300 */ 1301 void 1302 xfs_buf_submit( 1303 struct xfs_buf *bp) 1304 { 1305 trace_xfs_buf_submit(bp, _RET_IP_); 1306 1307 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); 1308 ASSERT(bp->b_flags & XBF_ASYNC); 1309 1310 /* on shutdown we stale and complete the buffer immediately */ 1311 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { 1312 xfs_buf_ioerror(bp, -EIO); 1313 bp->b_flags &= ~XBF_DONE; 1314 xfs_buf_stale(bp); 1315 xfs_buf_ioend(bp); 1316 return; 1317 } 1318 1319 if (bp->b_flags & XBF_WRITE) 1320 xfs_buf_wait_unpin(bp); 1321 1322 /* clear the internal error state to avoid spurious errors */ 1323 bp->b_io_error = 0; 1324 1325 /* 1326 * The caller's reference is released during I/O completion. 1327 * This occurs some time after the last b_io_remaining reference is 1328 * released, so after we drop our Io reference we have to have some 1329 * other reference to ensure the buffer doesn't go away from underneath 1330 * us. Take a direct reference to ensure we have safe access to the 1331 * buffer until we are finished with it. 1332 */ 1333 xfs_buf_hold(bp); 1334 1335 /* 1336 * Set the count to 1 initially, this will stop an I/O completion 1337 * callout which happens before we have started all the I/O from calling 1338 * xfs_buf_ioend too early. 1339 */ 1340 atomic_set(&bp->b_io_remaining, 1); 1341 _xfs_buf_ioapply(bp); 1342 1343 /* 1344 * If _xfs_buf_ioapply failed, we can get back here with only the IO 1345 * reference we took above. If we drop it to zero, run completion so 1346 * that we don't return to the caller with completion still pending. 1347 */ 1348 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { 1349 if (bp->b_error) 1350 xfs_buf_ioend(bp); 1351 else 1352 xfs_buf_ioend_async(bp); 1353 } 1354 1355 xfs_buf_rele(bp); 1356 /* Note: it is not safe to reference bp now we've dropped our ref */ 1357 } 1358 1359 /* 1360 * Synchronous buffer IO submission path, read or write. 1361 */ 1362 int 1363 xfs_buf_submit_wait( 1364 struct xfs_buf *bp) 1365 { 1366 int error; 1367 1368 trace_xfs_buf_submit_wait(bp, _RET_IP_); 1369 1370 ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC))); 1371 1372 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { 1373 xfs_buf_ioerror(bp, -EIO); 1374 xfs_buf_stale(bp); 1375 bp->b_flags &= ~XBF_DONE; 1376 return -EIO; 1377 } 1378 1379 if (bp->b_flags & XBF_WRITE) 1380 xfs_buf_wait_unpin(bp); 1381 1382 /* clear the internal error state to avoid spurious errors */ 1383 bp->b_io_error = 0; 1384 1385 /* 1386 * For synchronous IO, the IO does not inherit the submitters reference 1387 * count, nor the buffer lock. Hence we cannot release the reference we 1388 * are about to take until we've waited for all IO completion to occur, 1389 * including any xfs_buf_ioend_async() work that may be pending. 1390 */ 1391 xfs_buf_hold(bp); 1392 1393 /* 1394 * Set the count to 1 initially, this will stop an I/O completion 1395 * callout which happens before we have started all the I/O from calling 1396 * xfs_buf_ioend too early. 1397 */ 1398 atomic_set(&bp->b_io_remaining, 1); 1399 _xfs_buf_ioapply(bp); 1400 1401 /* 1402 * make sure we run completion synchronously if it raced with us and is 1403 * already complete. 1404 */ 1405 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) 1406 xfs_buf_ioend(bp); 1407 1408 /* wait for completion before gathering the error from the buffer */ 1409 trace_xfs_buf_iowait(bp, _RET_IP_); 1410 wait_for_completion(&bp->b_iowait); 1411 trace_xfs_buf_iowait_done(bp, _RET_IP_); 1412 error = bp->b_error; 1413 1414 /* 1415 * all done now, we can release the hold that keeps the buffer 1416 * referenced for the entire IO. 1417 */ 1418 xfs_buf_rele(bp); 1419 return error; 1420 } 1421 1422 void * 1423 xfs_buf_offset( 1424 struct xfs_buf *bp, 1425 size_t offset) 1426 { 1427 struct page *page; 1428 1429 if (bp->b_addr) 1430 return bp->b_addr + offset; 1431 1432 offset += bp->b_offset; 1433 page = bp->b_pages[offset >> PAGE_SHIFT]; 1434 return page_address(page) + (offset & (PAGE_SIZE-1)); 1435 } 1436 1437 /* 1438 * Move data into or out of a buffer. 1439 */ 1440 void 1441 xfs_buf_iomove( 1442 xfs_buf_t *bp, /* buffer to process */ 1443 size_t boff, /* starting buffer offset */ 1444 size_t bsize, /* length to copy */ 1445 void *data, /* data address */ 1446 xfs_buf_rw_t mode) /* read/write/zero flag */ 1447 { 1448 size_t bend; 1449 1450 bend = boff + bsize; 1451 while (boff < bend) { 1452 struct page *page; 1453 int page_index, page_offset, csize; 1454 1455 page_index = (boff + bp->b_offset) >> PAGE_SHIFT; 1456 page_offset = (boff + bp->b_offset) & ~PAGE_MASK; 1457 page = bp->b_pages[page_index]; 1458 csize = min_t(size_t, PAGE_SIZE - page_offset, 1459 BBTOB(bp->b_io_length) - boff); 1460 1461 ASSERT((csize + page_offset) <= PAGE_SIZE); 1462 1463 switch (mode) { 1464 case XBRW_ZERO: 1465 memset(page_address(page) + page_offset, 0, csize); 1466 break; 1467 case XBRW_READ: 1468 memcpy(data, page_address(page) + page_offset, csize); 1469 break; 1470 case XBRW_WRITE: 1471 memcpy(page_address(page) + page_offset, data, csize); 1472 } 1473 1474 boff += csize; 1475 data += csize; 1476 } 1477 } 1478 1479 /* 1480 * Handling of buffer targets (buftargs). 1481 */ 1482 1483 /* 1484 * Wait for any bufs with callbacks that have been submitted but have not yet 1485 * returned. These buffers will have an elevated hold count, so wait on those 1486 * while freeing all the buffers only held by the LRU. 1487 */ 1488 static enum lru_status 1489 xfs_buftarg_wait_rele( 1490 struct list_head *item, 1491 struct list_lru_one *lru, 1492 spinlock_t *lru_lock, 1493 void *arg) 1494 1495 { 1496 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); 1497 struct list_head *dispose = arg; 1498 1499 if (atomic_read(&bp->b_hold) > 1) { 1500 /* need to wait, so skip it this pass */ 1501 trace_xfs_buf_wait_buftarg(bp, _RET_IP_); 1502 return LRU_SKIP; 1503 } 1504 if (!spin_trylock(&bp->b_lock)) 1505 return LRU_SKIP; 1506 1507 /* 1508 * clear the LRU reference count so the buffer doesn't get 1509 * ignored in xfs_buf_rele(). 1510 */ 1511 atomic_set(&bp->b_lru_ref, 0); 1512 bp->b_state |= XFS_BSTATE_DISPOSE; 1513 list_lru_isolate_move(lru, item, dispose); 1514 spin_unlock(&bp->b_lock); 1515 return LRU_REMOVED; 1516 } 1517 1518 void 1519 xfs_wait_buftarg( 1520 struct xfs_buftarg *btp) 1521 { 1522 LIST_HEAD(dispose); 1523 int loop = 0; 1524 1525 /* loop until there is nothing left on the lru list. */ 1526 while (list_lru_count(&btp->bt_lru)) { 1527 list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele, 1528 &dispose, LONG_MAX); 1529 1530 while (!list_empty(&dispose)) { 1531 struct xfs_buf *bp; 1532 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); 1533 list_del_init(&bp->b_lru); 1534 if (bp->b_flags & XBF_WRITE_FAIL) { 1535 xfs_alert(btp->bt_mount, 1536 "Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n" 1537 "Please run xfs_repair to determine the extent of the problem.", 1538 (long long)bp->b_bn); 1539 } 1540 xfs_buf_rele(bp); 1541 } 1542 if (loop++ != 0) 1543 delay(100); 1544 } 1545 } 1546 1547 static enum lru_status 1548 xfs_buftarg_isolate( 1549 struct list_head *item, 1550 struct list_lru_one *lru, 1551 spinlock_t *lru_lock, 1552 void *arg) 1553 { 1554 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); 1555 struct list_head *dispose = arg; 1556 1557 /* 1558 * we are inverting the lru lock/bp->b_lock here, so use a trylock. 1559 * If we fail to get the lock, just skip it. 1560 */ 1561 if (!spin_trylock(&bp->b_lock)) 1562 return LRU_SKIP; 1563 /* 1564 * Decrement the b_lru_ref count unless the value is already 1565 * zero. If the value is already zero, we need to reclaim the 1566 * buffer, otherwise it gets another trip through the LRU. 1567 */ 1568 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) { 1569 spin_unlock(&bp->b_lock); 1570 return LRU_ROTATE; 1571 } 1572 1573 bp->b_state |= XFS_BSTATE_DISPOSE; 1574 list_lru_isolate_move(lru, item, dispose); 1575 spin_unlock(&bp->b_lock); 1576 return LRU_REMOVED; 1577 } 1578 1579 static unsigned long 1580 xfs_buftarg_shrink_scan( 1581 struct shrinker *shrink, 1582 struct shrink_control *sc) 1583 { 1584 struct xfs_buftarg *btp = container_of(shrink, 1585 struct xfs_buftarg, bt_shrinker); 1586 LIST_HEAD(dispose); 1587 unsigned long freed; 1588 1589 freed = list_lru_shrink_walk(&btp->bt_lru, sc, 1590 xfs_buftarg_isolate, &dispose); 1591 1592 while (!list_empty(&dispose)) { 1593 struct xfs_buf *bp; 1594 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); 1595 list_del_init(&bp->b_lru); 1596 xfs_buf_rele(bp); 1597 } 1598 1599 return freed; 1600 } 1601 1602 static unsigned long 1603 xfs_buftarg_shrink_count( 1604 struct shrinker *shrink, 1605 struct shrink_control *sc) 1606 { 1607 struct xfs_buftarg *btp = container_of(shrink, 1608 struct xfs_buftarg, bt_shrinker); 1609 return list_lru_shrink_count(&btp->bt_lru, sc); 1610 } 1611 1612 void 1613 xfs_free_buftarg( 1614 struct xfs_mount *mp, 1615 struct xfs_buftarg *btp) 1616 { 1617 unregister_shrinker(&btp->bt_shrinker); 1618 list_lru_destroy(&btp->bt_lru); 1619 1620 if (mp->m_flags & XFS_MOUNT_BARRIER) 1621 xfs_blkdev_issue_flush(btp); 1622 1623 kmem_free(btp); 1624 } 1625 1626 int 1627 xfs_setsize_buftarg( 1628 xfs_buftarg_t *btp, 1629 unsigned int sectorsize) 1630 { 1631 /* Set up metadata sector size info */ 1632 btp->bt_meta_sectorsize = sectorsize; 1633 btp->bt_meta_sectormask = sectorsize - 1; 1634 1635 if (set_blocksize(btp->bt_bdev, sectorsize)) { 1636 char name[BDEVNAME_SIZE]; 1637 1638 bdevname(btp->bt_bdev, name); 1639 1640 xfs_warn(btp->bt_mount, 1641 "Cannot set_blocksize to %u on device %s", 1642 sectorsize, name); 1643 return -EINVAL; 1644 } 1645 1646 /* Set up device logical sector size mask */ 1647 btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev); 1648 btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1; 1649 1650 return 0; 1651 } 1652 1653 /* 1654 * When allocating the initial buffer target we have not yet 1655 * read in the superblock, so don't know what sized sectors 1656 * are being used at this early stage. Play safe. 1657 */ 1658 STATIC int 1659 xfs_setsize_buftarg_early( 1660 xfs_buftarg_t *btp, 1661 struct block_device *bdev) 1662 { 1663 return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev)); 1664 } 1665 1666 xfs_buftarg_t * 1667 xfs_alloc_buftarg( 1668 struct xfs_mount *mp, 1669 struct block_device *bdev) 1670 { 1671 xfs_buftarg_t *btp; 1672 1673 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS); 1674 1675 btp->bt_mount = mp; 1676 btp->bt_dev = bdev->bd_dev; 1677 btp->bt_bdev = bdev; 1678 btp->bt_bdi = blk_get_backing_dev_info(bdev); 1679 1680 if (xfs_setsize_buftarg_early(btp, bdev)) 1681 goto error; 1682 1683 if (list_lru_init(&btp->bt_lru)) 1684 goto error; 1685 1686 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count; 1687 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan; 1688 btp->bt_shrinker.seeks = DEFAULT_SEEKS; 1689 btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE; 1690 register_shrinker(&btp->bt_shrinker); 1691 return btp; 1692 1693 error: 1694 kmem_free(btp); 1695 return NULL; 1696 } 1697 1698 /* 1699 * Add a buffer to the delayed write list. 1700 * 1701 * This queues a buffer for writeout if it hasn't already been. Note that 1702 * neither this routine nor the buffer list submission functions perform 1703 * any internal synchronization. It is expected that the lists are thread-local 1704 * to the callers. 1705 * 1706 * Returns true if we queued up the buffer, or false if it already had 1707 * been on the buffer list. 1708 */ 1709 bool 1710 xfs_buf_delwri_queue( 1711 struct xfs_buf *bp, 1712 struct list_head *list) 1713 { 1714 ASSERT(xfs_buf_islocked(bp)); 1715 ASSERT(!(bp->b_flags & XBF_READ)); 1716 1717 /* 1718 * If the buffer is already marked delwri it already is queued up 1719 * by someone else for imediate writeout. Just ignore it in that 1720 * case. 1721 */ 1722 if (bp->b_flags & _XBF_DELWRI_Q) { 1723 trace_xfs_buf_delwri_queued(bp, _RET_IP_); 1724 return false; 1725 } 1726 1727 trace_xfs_buf_delwri_queue(bp, _RET_IP_); 1728 1729 /* 1730 * If a buffer gets written out synchronously or marked stale while it 1731 * is on a delwri list we lazily remove it. To do this, the other party 1732 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone. 1733 * It remains referenced and on the list. In a rare corner case it 1734 * might get readded to a delwri list after the synchronous writeout, in 1735 * which case we need just need to re-add the flag here. 1736 */ 1737 bp->b_flags |= _XBF_DELWRI_Q; 1738 if (list_empty(&bp->b_list)) { 1739 atomic_inc(&bp->b_hold); 1740 list_add_tail(&bp->b_list, list); 1741 } 1742 1743 return true; 1744 } 1745 1746 /* 1747 * Compare function is more complex than it needs to be because 1748 * the return value is only 32 bits and we are doing comparisons 1749 * on 64 bit values 1750 */ 1751 static int 1752 xfs_buf_cmp( 1753 void *priv, 1754 struct list_head *a, 1755 struct list_head *b) 1756 { 1757 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list); 1758 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); 1759 xfs_daddr_t diff; 1760 1761 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn; 1762 if (diff < 0) 1763 return -1; 1764 if (diff > 0) 1765 return 1; 1766 return 0; 1767 } 1768 1769 static int 1770 __xfs_buf_delwri_submit( 1771 struct list_head *buffer_list, 1772 struct list_head *io_list, 1773 bool wait) 1774 { 1775 struct blk_plug plug; 1776 struct xfs_buf *bp, *n; 1777 int pinned = 0; 1778 1779 list_for_each_entry_safe(bp, n, buffer_list, b_list) { 1780 if (!wait) { 1781 if (xfs_buf_ispinned(bp)) { 1782 pinned++; 1783 continue; 1784 } 1785 if (!xfs_buf_trylock(bp)) 1786 continue; 1787 } else { 1788 xfs_buf_lock(bp); 1789 } 1790 1791 /* 1792 * Someone else might have written the buffer synchronously or 1793 * marked it stale in the meantime. In that case only the 1794 * _XBF_DELWRI_Q flag got cleared, and we have to drop the 1795 * reference and remove it from the list here. 1796 */ 1797 if (!(bp->b_flags & _XBF_DELWRI_Q)) { 1798 list_del_init(&bp->b_list); 1799 xfs_buf_relse(bp); 1800 continue; 1801 } 1802 1803 list_move_tail(&bp->b_list, io_list); 1804 trace_xfs_buf_delwri_split(bp, _RET_IP_); 1805 } 1806 1807 list_sort(NULL, io_list, xfs_buf_cmp); 1808 1809 blk_start_plug(&plug); 1810 list_for_each_entry_safe(bp, n, io_list, b_list) { 1811 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL); 1812 bp->b_flags |= XBF_WRITE | XBF_ASYNC; 1813 1814 /* 1815 * we do all Io submission async. This means if we need to wait 1816 * for IO completion we need to take an extra reference so the 1817 * buffer is still valid on the other side. 1818 */ 1819 if (wait) 1820 xfs_buf_hold(bp); 1821 else 1822 list_del_init(&bp->b_list); 1823 1824 xfs_buf_submit(bp); 1825 } 1826 blk_finish_plug(&plug); 1827 1828 return pinned; 1829 } 1830 1831 /* 1832 * Write out a buffer list asynchronously. 1833 * 1834 * This will take the @buffer_list, write all non-locked and non-pinned buffers 1835 * out and not wait for I/O completion on any of the buffers. This interface 1836 * is only safely useable for callers that can track I/O completion by higher 1837 * level means, e.g. AIL pushing as the @buffer_list is consumed in this 1838 * function. 1839 */ 1840 int 1841 xfs_buf_delwri_submit_nowait( 1842 struct list_head *buffer_list) 1843 { 1844 LIST_HEAD (io_list); 1845 return __xfs_buf_delwri_submit(buffer_list, &io_list, false); 1846 } 1847 1848 /* 1849 * Write out a buffer list synchronously. 1850 * 1851 * This will take the @buffer_list, write all buffers out and wait for I/O 1852 * completion on all of the buffers. @buffer_list is consumed by the function, 1853 * so callers must have some other way of tracking buffers if they require such 1854 * functionality. 1855 */ 1856 int 1857 xfs_buf_delwri_submit( 1858 struct list_head *buffer_list) 1859 { 1860 LIST_HEAD (io_list); 1861 int error = 0, error2; 1862 struct xfs_buf *bp; 1863 1864 __xfs_buf_delwri_submit(buffer_list, &io_list, true); 1865 1866 /* Wait for IO to complete. */ 1867 while (!list_empty(&io_list)) { 1868 bp = list_first_entry(&io_list, struct xfs_buf, b_list); 1869 1870 list_del_init(&bp->b_list); 1871 1872 /* locking the buffer will wait for async IO completion. */ 1873 xfs_buf_lock(bp); 1874 error2 = bp->b_error; 1875 xfs_buf_relse(bp); 1876 if (!error) 1877 error = error2; 1878 } 1879 1880 return error; 1881 } 1882 1883 int __init 1884 xfs_buf_init(void) 1885 { 1886 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", 1887 KM_ZONE_HWALIGN, NULL); 1888 if (!xfs_buf_zone) 1889 goto out; 1890 1891 return 0; 1892 1893 out: 1894 return -ENOMEM; 1895 } 1896 1897 void 1898 xfs_buf_terminate(void) 1899 { 1900 kmem_zone_destroy(xfs_buf_zone); 1901 } 1902