1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2014 by Chunwei Chen. All rights reserved. 23 * Copyright (c) 2019 by Delphix. All rights reserved. 24 */ 25 26 /* 27 * See abd.c for a general overview of the arc buffered data (ABD). 28 * 29 * Linear buffers act exactly like normal buffers and are always mapped into the 30 * kernel's virtual memory space, while scattered ABD data chunks are allocated 31 * as physical pages and then mapped in only while they are actually being 32 * accessed through one of the abd_* library functions. Using scattered ABDs 33 * provides several benefits: 34 * 35 * (1) They avoid use of kmem_*, preventing performance problems where running 36 * kmem_reap on very large memory systems never finishes and causes 37 * constant TLB shootdowns. 38 * 39 * (2) Fragmentation is less of an issue since when we are at the limit of 40 * allocatable space, we won't have to search around for a long free 41 * hole in the VA space for large ARC allocations. Each chunk is mapped in 42 * individually, so even if we are using HIGHMEM (see next point) we 43 * wouldn't need to worry about finding a contiguous address range. 44 * 45 * (3) If we are not using HIGHMEM, then all physical memory is always 46 * mapped into the kernel's address space, so we also avoid the map / 47 * unmap costs on each ABD access. 48 * 49 * If we are not using HIGHMEM, scattered buffers which have only one chunk 50 * can be treated as linear buffers, because they are contiguous in the 51 * kernel's virtual address space. See abd_alloc_chunks() for details. 52 */ 53 54 #include <sys/abd_impl.h> 55 #include <sys/param.h> 56 #include <sys/zio.h> 57 #include <sys/arc.h> 58 #include <sys/zfs_context.h> 59 #include <sys/zfs_znode.h> 60 #ifdef _KERNEL 61 #include <linux/kmap_compat.h> 62 #include <linux/scatterlist.h> 63 #else 64 #define MAX_ORDER 1 65 #endif 66 67 typedef struct abd_stats { 68 kstat_named_t abdstat_struct_size; 69 kstat_named_t abdstat_linear_cnt; 70 kstat_named_t abdstat_linear_data_size; 71 kstat_named_t abdstat_scatter_cnt; 72 kstat_named_t abdstat_scatter_data_size; 73 kstat_named_t abdstat_scatter_chunk_waste; 74 kstat_named_t abdstat_scatter_orders[MAX_ORDER]; 75 kstat_named_t abdstat_scatter_page_multi_chunk; 76 kstat_named_t abdstat_scatter_page_multi_zone; 77 kstat_named_t abdstat_scatter_page_alloc_retry; 78 kstat_named_t abdstat_scatter_sg_table_retry; 79 } abd_stats_t; 80 81 static abd_stats_t abd_stats = { 82 /* Amount of memory occupied by all of the abd_t struct allocations */ 83 { "struct_size", KSTAT_DATA_UINT64 }, 84 /* 85 * The number of linear ABDs which are currently allocated, excluding 86 * ABDs which don't own their data (for instance the ones which were 87 * allocated through abd_get_offset() and abd_get_from_buf()). If an 88 * ABD takes ownership of its buf then it will become tracked. 89 */ 90 { "linear_cnt", KSTAT_DATA_UINT64 }, 91 /* Amount of data stored in all linear ABDs tracked by linear_cnt */ 92 { "linear_data_size", KSTAT_DATA_UINT64 }, 93 /* 94 * The number of scatter ABDs which are currently allocated, excluding 95 * ABDs which don't own their data (for instance the ones which were 96 * allocated through abd_get_offset()). 97 */ 98 { "scatter_cnt", KSTAT_DATA_UINT64 }, 99 /* Amount of data stored in all scatter ABDs tracked by scatter_cnt */ 100 { "scatter_data_size", KSTAT_DATA_UINT64 }, 101 /* 102 * The amount of space wasted at the end of the last chunk across all 103 * scatter ABDs tracked by scatter_cnt. 104 */ 105 { "scatter_chunk_waste", KSTAT_DATA_UINT64 }, 106 /* 107 * The number of compound allocations of a given order. These 108 * allocations are spread over all currently allocated ABDs, and 109 * act as a measure of memory fragmentation. 110 */ 111 { { "scatter_order_N", KSTAT_DATA_UINT64 } }, 112 /* 113 * The number of scatter ABDs which contain multiple chunks. 114 * ABDs are preferentially allocated from the minimum number of 115 * contiguous multi-page chunks, a single chunk is optimal. 116 */ 117 { "scatter_page_multi_chunk", KSTAT_DATA_UINT64 }, 118 /* 119 * The number of scatter ABDs which are split across memory zones. 120 * ABDs are preferentially allocated using pages from a single zone. 121 */ 122 { "scatter_page_multi_zone", KSTAT_DATA_UINT64 }, 123 /* 124 * The total number of retries encountered when attempting to 125 * allocate the pages to populate the scatter ABD. 126 */ 127 { "scatter_page_alloc_retry", KSTAT_DATA_UINT64 }, 128 /* 129 * The total number of retries encountered when attempting to 130 * allocate the sg table for an ABD. 131 */ 132 { "scatter_sg_table_retry", KSTAT_DATA_UINT64 }, 133 }; 134 135 static struct { 136 wmsum_t abdstat_struct_size; 137 wmsum_t abdstat_linear_cnt; 138 wmsum_t abdstat_linear_data_size; 139 wmsum_t abdstat_scatter_cnt; 140 wmsum_t abdstat_scatter_data_size; 141 wmsum_t abdstat_scatter_chunk_waste; 142 wmsum_t abdstat_scatter_orders[MAX_ORDER]; 143 wmsum_t abdstat_scatter_page_multi_chunk; 144 wmsum_t abdstat_scatter_page_multi_zone; 145 wmsum_t abdstat_scatter_page_alloc_retry; 146 wmsum_t abdstat_scatter_sg_table_retry; 147 } abd_sums; 148 149 #define abd_for_each_sg(abd, sg, n, i) \ 150 for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i) 151 152 /* 153 * zfs_abd_scatter_min_size is the minimum allocation size to use scatter 154 * ABD's. Smaller allocations will use linear ABD's which uses 155 * zio_[data_]buf_alloc(). 156 * 157 * Scatter ABD's use at least one page each, so sub-page allocations waste 158 * some space when allocated as scatter (e.g. 2KB scatter allocation wastes 159 * half of each page). Using linear ABD's for small allocations means that 160 * they will be put on slabs which contain many allocations. This can 161 * improve memory efficiency, but it also makes it much harder for ARC 162 * evictions to actually free pages, because all the buffers on one slab need 163 * to be freed in order for the slab (and underlying pages) to be freed. 164 * Typically, 512B and 1KB kmem caches have 16 buffers per slab, so it's 165 * possible for them to actually waste more memory than scatter (one page per 166 * buf = wasting 3/4 or 7/8th; one buf per slab = wasting 15/16th). 167 * 168 * Spill blocks are typically 512B and are heavily used on systems running 169 * selinux with the default dnode size and the `xattr=sa` property set. 170 * 171 * By default we use linear allocations for 512B and 1KB, and scatter 172 * allocations for larger (1.5KB and up). 173 */ 174 static int zfs_abd_scatter_min_size = 512 * 3; 175 176 /* 177 * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose pages are 178 * just a single zero'd page. This allows us to conserve memory by 179 * only using a single zero page for the scatterlist. 180 */ 181 abd_t *abd_zero_scatter = NULL; 182 183 struct page; 184 /* 185 * _KERNEL - Will point to ZERO_PAGE if it is available or it will be 186 * an allocated zero'd PAGESIZE buffer. 187 * Userspace - Will be an allocated zero'ed PAGESIZE buffer. 188 * 189 * abd_zero_page is assigned to each of the pages of abd_zero_scatter. 190 */ 191 static struct page *abd_zero_page = NULL; 192 193 static kmem_cache_t *abd_cache = NULL; 194 static kstat_t *abd_ksp; 195 196 static uint_t 197 abd_chunkcnt_for_bytes(size_t size) 198 { 199 return (P2ROUNDUP(size, PAGESIZE) / PAGESIZE); 200 } 201 202 abd_t * 203 abd_alloc_struct_impl(size_t size) 204 { 205 /* 206 * In Linux we do not use the size passed in during ABD 207 * allocation, so we just ignore it. 208 */ 209 (void) size; 210 abd_t *abd = kmem_cache_alloc(abd_cache, KM_PUSHPAGE); 211 ASSERT3P(abd, !=, NULL); 212 ABDSTAT_INCR(abdstat_struct_size, sizeof (abd_t)); 213 214 return (abd); 215 } 216 217 void 218 abd_free_struct_impl(abd_t *abd) 219 { 220 kmem_cache_free(abd_cache, abd); 221 ABDSTAT_INCR(abdstat_struct_size, -(int)sizeof (abd_t)); 222 } 223 224 #ifdef _KERNEL 225 static unsigned zfs_abd_scatter_max_order = MAX_ORDER - 1; 226 227 /* 228 * Mark zfs data pages so they can be excluded from kernel crash dumps 229 */ 230 #ifdef _LP64 231 #define ABD_FILE_CACHE_PAGE 0x2F5ABDF11ECAC4E 232 233 static inline void 234 abd_mark_zfs_page(struct page *page) 235 { 236 get_page(page); 237 SetPagePrivate(page); 238 set_page_private(page, ABD_FILE_CACHE_PAGE); 239 } 240 241 static inline void 242 abd_unmark_zfs_page(struct page *page) 243 { 244 set_page_private(page, 0UL); 245 ClearPagePrivate(page); 246 put_page(page); 247 } 248 #else 249 #define abd_mark_zfs_page(page) 250 #define abd_unmark_zfs_page(page) 251 #endif /* _LP64 */ 252 253 #ifndef CONFIG_HIGHMEM 254 255 #ifndef __GFP_RECLAIM 256 #define __GFP_RECLAIM __GFP_WAIT 257 #endif 258 259 /* 260 * The goal is to minimize fragmentation by preferentially populating ABDs 261 * with higher order compound pages from a single zone. Allocation size is 262 * progressively decreased until it can be satisfied without performing 263 * reclaim or compaction. When necessary this function will degenerate to 264 * allocating individual pages and allowing reclaim to satisfy allocations. 265 */ 266 void 267 abd_alloc_chunks(abd_t *abd, size_t size) 268 { 269 struct list_head pages; 270 struct sg_table table; 271 struct scatterlist *sg; 272 struct page *page, *tmp_page = NULL; 273 gfp_t gfp = __GFP_NOWARN | GFP_NOIO; 274 gfp_t gfp_comp = (gfp | __GFP_NORETRY | __GFP_COMP) & ~__GFP_RECLAIM; 275 int max_order = MIN(zfs_abd_scatter_max_order, MAX_ORDER - 1); 276 int nr_pages = abd_chunkcnt_for_bytes(size); 277 int chunks = 0, zones = 0; 278 size_t remaining_size; 279 int nid = NUMA_NO_NODE; 280 int alloc_pages = 0; 281 282 INIT_LIST_HEAD(&pages); 283 284 while (alloc_pages < nr_pages) { 285 unsigned chunk_pages; 286 int order; 287 288 order = MIN(highbit64(nr_pages - alloc_pages) - 1, max_order); 289 chunk_pages = (1U << order); 290 291 page = alloc_pages_node(nid, order ? gfp_comp : gfp, order); 292 if (page == NULL) { 293 if (order == 0) { 294 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry); 295 schedule_timeout_interruptible(1); 296 } else { 297 max_order = MAX(0, order - 1); 298 } 299 continue; 300 } 301 302 list_add_tail(&page->lru, &pages); 303 304 if ((nid != NUMA_NO_NODE) && (page_to_nid(page) != nid)) 305 zones++; 306 307 nid = page_to_nid(page); 308 ABDSTAT_BUMP(abdstat_scatter_orders[order]); 309 chunks++; 310 alloc_pages += chunk_pages; 311 } 312 313 ASSERT3S(alloc_pages, ==, nr_pages); 314 315 while (sg_alloc_table(&table, chunks, gfp)) { 316 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry); 317 schedule_timeout_interruptible(1); 318 } 319 320 sg = table.sgl; 321 remaining_size = size; 322 list_for_each_entry_safe(page, tmp_page, &pages, lru) { 323 size_t sg_size = MIN(PAGESIZE << compound_order(page), 324 remaining_size); 325 sg_set_page(sg, page, sg_size, 0); 326 abd_mark_zfs_page(page); 327 remaining_size -= sg_size; 328 329 sg = sg_next(sg); 330 list_del(&page->lru); 331 } 332 333 /* 334 * These conditions ensure that a possible transformation to a linear 335 * ABD would be valid. 336 */ 337 ASSERT(!PageHighMem(sg_page(table.sgl))); 338 ASSERT0(ABD_SCATTER(abd).abd_offset); 339 340 if (table.nents == 1) { 341 /* 342 * Since there is only one entry, this ABD can be represented 343 * as a linear buffer. All single-page (4K) ABD's can be 344 * represented this way. Some multi-page ABD's can also be 345 * represented this way, if we were able to allocate a single 346 * "chunk" (higher-order "page" which represents a power-of-2 347 * series of physically-contiguous pages). This is often the 348 * case for 2-page (8K) ABD's. 349 * 350 * Representing a single-entry scatter ABD as a linear ABD 351 * has the performance advantage of avoiding the copy (and 352 * allocation) in abd_borrow_buf_copy / abd_return_buf_copy. 353 * A performance increase of around 5% has been observed for 354 * ARC-cached reads (of small blocks which can take advantage 355 * of this). 356 * 357 * Note that this optimization is only possible because the 358 * pages are always mapped into the kernel's address space. 359 * This is not the case for highmem pages, so the 360 * optimization can not be made there. 361 */ 362 abd->abd_flags |= ABD_FLAG_LINEAR; 363 abd->abd_flags |= ABD_FLAG_LINEAR_PAGE; 364 abd->abd_u.abd_linear.abd_sgl = table.sgl; 365 ABD_LINEAR_BUF(abd) = page_address(sg_page(table.sgl)); 366 } else if (table.nents > 1) { 367 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk); 368 abd->abd_flags |= ABD_FLAG_MULTI_CHUNK; 369 370 if (zones) { 371 ABDSTAT_BUMP(abdstat_scatter_page_multi_zone); 372 abd->abd_flags |= ABD_FLAG_MULTI_ZONE; 373 } 374 375 ABD_SCATTER(abd).abd_sgl = table.sgl; 376 ABD_SCATTER(abd).abd_nents = table.nents; 377 } 378 } 379 #else 380 381 /* 382 * Allocate N individual pages to construct a scatter ABD. This function 383 * makes no attempt to request contiguous pages and requires the minimal 384 * number of kernel interfaces. It's designed for maximum compatibility. 385 */ 386 void 387 abd_alloc_chunks(abd_t *abd, size_t size) 388 { 389 struct scatterlist *sg = NULL; 390 struct sg_table table; 391 struct page *page; 392 gfp_t gfp = __GFP_NOWARN | GFP_NOIO; 393 int nr_pages = abd_chunkcnt_for_bytes(size); 394 int i = 0; 395 396 while (sg_alloc_table(&table, nr_pages, gfp)) { 397 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry); 398 schedule_timeout_interruptible(1); 399 } 400 401 ASSERT3U(table.nents, ==, nr_pages); 402 ABD_SCATTER(abd).abd_sgl = table.sgl; 403 ABD_SCATTER(abd).abd_nents = nr_pages; 404 405 abd_for_each_sg(abd, sg, nr_pages, i) { 406 while ((page = __page_cache_alloc(gfp)) == NULL) { 407 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry); 408 schedule_timeout_interruptible(1); 409 } 410 411 ABDSTAT_BUMP(abdstat_scatter_orders[0]); 412 sg_set_page(sg, page, PAGESIZE, 0); 413 abd_mark_zfs_page(page); 414 } 415 416 if (nr_pages > 1) { 417 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk); 418 abd->abd_flags |= ABD_FLAG_MULTI_CHUNK; 419 } 420 } 421 #endif /* !CONFIG_HIGHMEM */ 422 423 /* 424 * This must be called if any of the sg_table allocation functions 425 * are called. 426 */ 427 static void 428 abd_free_sg_table(abd_t *abd) 429 { 430 struct sg_table table; 431 432 table.sgl = ABD_SCATTER(abd).abd_sgl; 433 table.nents = table.orig_nents = ABD_SCATTER(abd).abd_nents; 434 sg_free_table(&table); 435 } 436 437 void 438 abd_free_chunks(abd_t *abd) 439 { 440 struct scatterlist *sg = NULL; 441 struct page *page; 442 int nr_pages = ABD_SCATTER(abd).abd_nents; 443 int order, i = 0; 444 445 if (abd->abd_flags & ABD_FLAG_MULTI_ZONE) 446 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_zone); 447 448 if (abd->abd_flags & ABD_FLAG_MULTI_CHUNK) 449 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk); 450 451 abd_for_each_sg(abd, sg, nr_pages, i) { 452 page = sg_page(sg); 453 abd_unmark_zfs_page(page); 454 order = compound_order(page); 455 __free_pages(page, order); 456 ASSERT3U(sg->length, <=, PAGE_SIZE << order); 457 ABDSTAT_BUMPDOWN(abdstat_scatter_orders[order]); 458 } 459 abd_free_sg_table(abd); 460 } 461 462 /* 463 * Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where each page in 464 * the scatterlist will be set to the zero'd out buffer abd_zero_page. 465 */ 466 static void 467 abd_alloc_zero_scatter(void) 468 { 469 struct scatterlist *sg = NULL; 470 struct sg_table table; 471 gfp_t gfp = __GFP_NOWARN | GFP_NOIO; 472 int nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE); 473 int i = 0; 474 475 #if defined(HAVE_ZERO_PAGE_GPL_ONLY) 476 gfp_t gfp_zero_page = gfp | __GFP_ZERO; 477 while ((abd_zero_page = __page_cache_alloc(gfp_zero_page)) == NULL) { 478 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry); 479 schedule_timeout_interruptible(1); 480 } 481 abd_mark_zfs_page(abd_zero_page); 482 #else 483 abd_zero_page = ZERO_PAGE(0); 484 #endif /* HAVE_ZERO_PAGE_GPL_ONLY */ 485 486 while (sg_alloc_table(&table, nr_pages, gfp)) { 487 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry); 488 schedule_timeout_interruptible(1); 489 } 490 ASSERT3U(table.nents, ==, nr_pages); 491 492 abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE); 493 abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER; 494 ABD_SCATTER(abd_zero_scatter).abd_offset = 0; 495 ABD_SCATTER(abd_zero_scatter).abd_sgl = table.sgl; 496 ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages; 497 abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE; 498 abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK | ABD_FLAG_ZEROS; 499 500 abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) { 501 sg_set_page(sg, abd_zero_page, PAGESIZE, 0); 502 } 503 504 ABDSTAT_BUMP(abdstat_scatter_cnt); 505 ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE); 506 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk); 507 } 508 509 #else /* _KERNEL */ 510 511 #ifndef PAGE_SHIFT 512 #define PAGE_SHIFT (highbit64(PAGESIZE)-1) 513 #endif 514 515 #define zfs_kmap_atomic(chunk) ((void *)chunk) 516 #define zfs_kunmap_atomic(addr) do { (void)(addr); } while (0) 517 #define local_irq_save(flags) do { (void)(flags); } while (0) 518 #define local_irq_restore(flags) do { (void)(flags); } while (0) 519 #define nth_page(pg, i) \ 520 ((struct page *)((void *)(pg) + (i) * PAGESIZE)) 521 522 struct scatterlist { 523 struct page *page; 524 int length; 525 int end; 526 }; 527 528 static void 529 sg_init_table(struct scatterlist *sg, int nr) 530 { 531 memset(sg, 0, nr * sizeof (struct scatterlist)); 532 sg[nr - 1].end = 1; 533 } 534 535 /* 536 * This must be called if any of the sg_table allocation functions 537 * are called. 538 */ 539 static void 540 abd_free_sg_table(abd_t *abd) 541 { 542 int nents = ABD_SCATTER(abd).abd_nents; 543 vmem_free(ABD_SCATTER(abd).abd_sgl, 544 nents * sizeof (struct scatterlist)); 545 } 546 547 #define for_each_sg(sgl, sg, nr, i) \ 548 for ((i) = 0, (sg) = (sgl); (i) < (nr); (i)++, (sg) = sg_next(sg)) 549 550 static inline void 551 sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len, 552 unsigned int offset) 553 { 554 /* currently we don't use offset */ 555 ASSERT(offset == 0); 556 sg->page = page; 557 sg->length = len; 558 } 559 560 static inline struct page * 561 sg_page(struct scatterlist *sg) 562 { 563 return (sg->page); 564 } 565 566 static inline struct scatterlist * 567 sg_next(struct scatterlist *sg) 568 { 569 if (sg->end) 570 return (NULL); 571 572 return (sg + 1); 573 } 574 575 void 576 abd_alloc_chunks(abd_t *abd, size_t size) 577 { 578 unsigned nr_pages = abd_chunkcnt_for_bytes(size); 579 struct scatterlist *sg; 580 int i; 581 582 ABD_SCATTER(abd).abd_sgl = vmem_alloc(nr_pages * 583 sizeof (struct scatterlist), KM_SLEEP); 584 sg_init_table(ABD_SCATTER(abd).abd_sgl, nr_pages); 585 586 abd_for_each_sg(abd, sg, nr_pages, i) { 587 struct page *p = umem_alloc_aligned(PAGESIZE, 64, KM_SLEEP); 588 sg_set_page(sg, p, PAGESIZE, 0); 589 } 590 ABD_SCATTER(abd).abd_nents = nr_pages; 591 } 592 593 void 594 abd_free_chunks(abd_t *abd) 595 { 596 int i, n = ABD_SCATTER(abd).abd_nents; 597 struct scatterlist *sg; 598 599 abd_for_each_sg(abd, sg, n, i) { 600 struct page *p = nth_page(sg_page(sg), 0); 601 umem_free_aligned(p, PAGESIZE); 602 } 603 abd_free_sg_table(abd); 604 } 605 606 static void 607 abd_alloc_zero_scatter(void) 608 { 609 unsigned nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE); 610 struct scatterlist *sg; 611 int i; 612 613 abd_zero_page = umem_alloc_aligned(PAGESIZE, 64, KM_SLEEP); 614 memset(abd_zero_page, 0, PAGESIZE); 615 abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE); 616 abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER; 617 abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK | ABD_FLAG_ZEROS; 618 ABD_SCATTER(abd_zero_scatter).abd_offset = 0; 619 ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages; 620 abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE; 621 ABD_SCATTER(abd_zero_scatter).abd_sgl = vmem_alloc(nr_pages * 622 sizeof (struct scatterlist), KM_SLEEP); 623 624 sg_init_table(ABD_SCATTER(abd_zero_scatter).abd_sgl, nr_pages); 625 626 abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) { 627 sg_set_page(sg, abd_zero_page, PAGESIZE, 0); 628 } 629 630 ABDSTAT_BUMP(abdstat_scatter_cnt); 631 ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE); 632 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk); 633 } 634 635 #endif /* _KERNEL */ 636 637 boolean_t 638 abd_size_alloc_linear(size_t size) 639 { 640 return (!zfs_abd_scatter_enabled || size < zfs_abd_scatter_min_size); 641 } 642 643 void 644 abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op) 645 { 646 ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR); 647 int waste = P2ROUNDUP(abd->abd_size, PAGESIZE) - abd->abd_size; 648 if (op == ABDSTAT_INCR) { 649 ABDSTAT_BUMP(abdstat_scatter_cnt); 650 ABDSTAT_INCR(abdstat_scatter_data_size, abd->abd_size); 651 ABDSTAT_INCR(abdstat_scatter_chunk_waste, waste); 652 arc_space_consume(waste, ARC_SPACE_ABD_CHUNK_WASTE); 653 } else { 654 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt); 655 ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size); 656 ABDSTAT_INCR(abdstat_scatter_chunk_waste, -waste); 657 arc_space_return(waste, ARC_SPACE_ABD_CHUNK_WASTE); 658 } 659 } 660 661 void 662 abd_update_linear_stats(abd_t *abd, abd_stats_op_t op) 663 { 664 ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR); 665 if (op == ABDSTAT_INCR) { 666 ABDSTAT_BUMP(abdstat_linear_cnt); 667 ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size); 668 } else { 669 ABDSTAT_BUMPDOWN(abdstat_linear_cnt); 670 ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size); 671 } 672 } 673 674 void 675 abd_verify_scatter(abd_t *abd) 676 { 677 size_t n; 678 int i = 0; 679 struct scatterlist *sg = NULL; 680 681 ASSERT3U(ABD_SCATTER(abd).abd_nents, >, 0); 682 ASSERT3U(ABD_SCATTER(abd).abd_offset, <, 683 ABD_SCATTER(abd).abd_sgl->length); 684 n = ABD_SCATTER(abd).abd_nents; 685 abd_for_each_sg(abd, sg, n, i) { 686 ASSERT3P(sg_page(sg), !=, NULL); 687 } 688 } 689 690 static void 691 abd_free_zero_scatter(void) 692 { 693 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt); 694 ABDSTAT_INCR(abdstat_scatter_data_size, -(int)PAGESIZE); 695 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk); 696 697 abd_free_sg_table(abd_zero_scatter); 698 abd_free_struct(abd_zero_scatter); 699 abd_zero_scatter = NULL; 700 ASSERT3P(abd_zero_page, !=, NULL); 701 #if defined(_KERNEL) 702 #if defined(HAVE_ZERO_PAGE_GPL_ONLY) 703 abd_unmark_zfs_page(abd_zero_page); 704 __free_page(abd_zero_page); 705 #endif /* HAVE_ZERO_PAGE_GPL_ONLY */ 706 #else 707 umem_free_aligned(abd_zero_page, PAGESIZE); 708 #endif /* _KERNEL */ 709 } 710 711 static int 712 abd_kstats_update(kstat_t *ksp, int rw) 713 { 714 abd_stats_t *as = ksp->ks_data; 715 716 if (rw == KSTAT_WRITE) 717 return (EACCES); 718 as->abdstat_struct_size.value.ui64 = 719 wmsum_value(&abd_sums.abdstat_struct_size); 720 as->abdstat_linear_cnt.value.ui64 = 721 wmsum_value(&abd_sums.abdstat_linear_cnt); 722 as->abdstat_linear_data_size.value.ui64 = 723 wmsum_value(&abd_sums.abdstat_linear_data_size); 724 as->abdstat_scatter_cnt.value.ui64 = 725 wmsum_value(&abd_sums.abdstat_scatter_cnt); 726 as->abdstat_scatter_data_size.value.ui64 = 727 wmsum_value(&abd_sums.abdstat_scatter_data_size); 728 as->abdstat_scatter_chunk_waste.value.ui64 = 729 wmsum_value(&abd_sums.abdstat_scatter_chunk_waste); 730 for (int i = 0; i < MAX_ORDER; i++) { 731 as->abdstat_scatter_orders[i].value.ui64 = 732 wmsum_value(&abd_sums.abdstat_scatter_orders[i]); 733 } 734 as->abdstat_scatter_page_multi_chunk.value.ui64 = 735 wmsum_value(&abd_sums.abdstat_scatter_page_multi_chunk); 736 as->abdstat_scatter_page_multi_zone.value.ui64 = 737 wmsum_value(&abd_sums.abdstat_scatter_page_multi_zone); 738 as->abdstat_scatter_page_alloc_retry.value.ui64 = 739 wmsum_value(&abd_sums.abdstat_scatter_page_alloc_retry); 740 as->abdstat_scatter_sg_table_retry.value.ui64 = 741 wmsum_value(&abd_sums.abdstat_scatter_sg_table_retry); 742 return (0); 743 } 744 745 void 746 abd_init(void) 747 { 748 int i; 749 750 abd_cache = kmem_cache_create("abd_t", sizeof (abd_t), 751 0, NULL, NULL, NULL, NULL, NULL, 0); 752 753 wmsum_init(&abd_sums.abdstat_struct_size, 0); 754 wmsum_init(&abd_sums.abdstat_linear_cnt, 0); 755 wmsum_init(&abd_sums.abdstat_linear_data_size, 0); 756 wmsum_init(&abd_sums.abdstat_scatter_cnt, 0); 757 wmsum_init(&abd_sums.abdstat_scatter_data_size, 0); 758 wmsum_init(&abd_sums.abdstat_scatter_chunk_waste, 0); 759 for (i = 0; i < MAX_ORDER; i++) 760 wmsum_init(&abd_sums.abdstat_scatter_orders[i], 0); 761 wmsum_init(&abd_sums.abdstat_scatter_page_multi_chunk, 0); 762 wmsum_init(&abd_sums.abdstat_scatter_page_multi_zone, 0); 763 wmsum_init(&abd_sums.abdstat_scatter_page_alloc_retry, 0); 764 wmsum_init(&abd_sums.abdstat_scatter_sg_table_retry, 0); 765 766 abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED, 767 sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 768 if (abd_ksp != NULL) { 769 for (i = 0; i < MAX_ORDER; i++) { 770 snprintf(abd_stats.abdstat_scatter_orders[i].name, 771 KSTAT_STRLEN, "scatter_order_%d", i); 772 abd_stats.abdstat_scatter_orders[i].data_type = 773 KSTAT_DATA_UINT64; 774 } 775 abd_ksp->ks_data = &abd_stats; 776 abd_ksp->ks_update = abd_kstats_update; 777 kstat_install(abd_ksp); 778 } 779 780 abd_alloc_zero_scatter(); 781 } 782 783 void 784 abd_fini(void) 785 { 786 abd_free_zero_scatter(); 787 788 if (abd_ksp != NULL) { 789 kstat_delete(abd_ksp); 790 abd_ksp = NULL; 791 } 792 793 wmsum_fini(&abd_sums.abdstat_struct_size); 794 wmsum_fini(&abd_sums.abdstat_linear_cnt); 795 wmsum_fini(&abd_sums.abdstat_linear_data_size); 796 wmsum_fini(&abd_sums.abdstat_scatter_cnt); 797 wmsum_fini(&abd_sums.abdstat_scatter_data_size); 798 wmsum_fini(&abd_sums.abdstat_scatter_chunk_waste); 799 for (int i = 0; i < MAX_ORDER; i++) 800 wmsum_fini(&abd_sums.abdstat_scatter_orders[i]); 801 wmsum_fini(&abd_sums.abdstat_scatter_page_multi_chunk); 802 wmsum_fini(&abd_sums.abdstat_scatter_page_multi_zone); 803 wmsum_fini(&abd_sums.abdstat_scatter_page_alloc_retry); 804 wmsum_fini(&abd_sums.abdstat_scatter_sg_table_retry); 805 806 if (abd_cache) { 807 kmem_cache_destroy(abd_cache); 808 abd_cache = NULL; 809 } 810 } 811 812 void 813 abd_free_linear_page(abd_t *abd) 814 { 815 /* Transform it back into a scatter ABD for freeing */ 816 struct scatterlist *sg = abd->abd_u.abd_linear.abd_sgl; 817 abd->abd_flags &= ~ABD_FLAG_LINEAR; 818 abd->abd_flags &= ~ABD_FLAG_LINEAR_PAGE; 819 ABD_SCATTER(abd).abd_nents = 1; 820 ABD_SCATTER(abd).abd_offset = 0; 821 ABD_SCATTER(abd).abd_sgl = sg; 822 abd_free_chunks(abd); 823 824 abd_update_scatter_stats(abd, ABDSTAT_DECR); 825 } 826 827 /* 828 * If we're going to use this ABD for doing I/O using the block layer, the 829 * consumer of the ABD data doesn't care if it's scattered or not, and we don't 830 * plan to store this ABD in memory for a long period of time, we should 831 * allocate the ABD type that requires the least data copying to do the I/O. 832 * 833 * On Linux the optimal thing to do would be to use abd_get_offset() and 834 * construct a new ABD which shares the original pages thereby eliminating 835 * the copy. But for the moment a new linear ABD is allocated until this 836 * performance optimization can be implemented. 837 */ 838 abd_t * 839 abd_alloc_for_io(size_t size, boolean_t is_metadata) 840 { 841 return (abd_alloc(size, is_metadata)); 842 } 843 844 abd_t * 845 abd_get_offset_scatter(abd_t *abd, abd_t *sabd, size_t off, 846 size_t size) 847 { 848 (void) size; 849 int i = 0; 850 struct scatterlist *sg = NULL; 851 852 abd_verify(sabd); 853 ASSERT3U(off, <=, sabd->abd_size); 854 855 size_t new_offset = ABD_SCATTER(sabd).abd_offset + off; 856 857 if (abd == NULL) 858 abd = abd_alloc_struct(0); 859 860 /* 861 * Even if this buf is filesystem metadata, we only track that 862 * if we own the underlying data buffer, which is not true in 863 * this case. Therefore, we don't ever use ABD_FLAG_META here. 864 */ 865 866 abd_for_each_sg(sabd, sg, ABD_SCATTER(sabd).abd_nents, i) { 867 if (new_offset < sg->length) 868 break; 869 new_offset -= sg->length; 870 } 871 872 ABD_SCATTER(abd).abd_sgl = sg; 873 ABD_SCATTER(abd).abd_offset = new_offset; 874 ABD_SCATTER(abd).abd_nents = ABD_SCATTER(sabd).abd_nents - i; 875 876 return (abd); 877 } 878 879 /* 880 * Initialize the abd_iter. 881 */ 882 void 883 abd_iter_init(struct abd_iter *aiter, abd_t *abd) 884 { 885 ASSERT(!abd_is_gang(abd)); 886 abd_verify(abd); 887 aiter->iter_abd = abd; 888 aiter->iter_mapaddr = NULL; 889 aiter->iter_mapsize = 0; 890 aiter->iter_pos = 0; 891 if (abd_is_linear(abd)) { 892 aiter->iter_offset = 0; 893 aiter->iter_sg = NULL; 894 } else { 895 aiter->iter_offset = ABD_SCATTER(abd).abd_offset; 896 aiter->iter_sg = ABD_SCATTER(abd).abd_sgl; 897 } 898 } 899 900 /* 901 * This is just a helper function to see if we have exhausted the 902 * abd_iter and reached the end. 903 */ 904 boolean_t 905 abd_iter_at_end(struct abd_iter *aiter) 906 { 907 return (aiter->iter_pos == aiter->iter_abd->abd_size); 908 } 909 910 /* 911 * Advance the iterator by a certain amount. Cannot be called when a chunk is 912 * in use. This can be safely called when the aiter has already exhausted, in 913 * which case this does nothing. 914 */ 915 void 916 abd_iter_advance(struct abd_iter *aiter, size_t amount) 917 { 918 ASSERT3P(aiter->iter_mapaddr, ==, NULL); 919 ASSERT0(aiter->iter_mapsize); 920 921 /* There's nothing left to advance to, so do nothing */ 922 if (abd_iter_at_end(aiter)) 923 return; 924 925 aiter->iter_pos += amount; 926 aiter->iter_offset += amount; 927 if (!abd_is_linear(aiter->iter_abd)) { 928 while (aiter->iter_offset >= aiter->iter_sg->length) { 929 aiter->iter_offset -= aiter->iter_sg->length; 930 aiter->iter_sg = sg_next(aiter->iter_sg); 931 if (aiter->iter_sg == NULL) { 932 ASSERT0(aiter->iter_offset); 933 break; 934 } 935 } 936 } 937 } 938 939 /* 940 * Map the current chunk into aiter. This can be safely called when the aiter 941 * has already exhausted, in which case this does nothing. 942 */ 943 void 944 abd_iter_map(struct abd_iter *aiter) 945 { 946 void *paddr; 947 size_t offset = 0; 948 949 ASSERT3P(aiter->iter_mapaddr, ==, NULL); 950 ASSERT0(aiter->iter_mapsize); 951 952 /* There's nothing left to iterate over, so do nothing */ 953 if (abd_iter_at_end(aiter)) 954 return; 955 956 if (abd_is_linear(aiter->iter_abd)) { 957 ASSERT3U(aiter->iter_pos, ==, aiter->iter_offset); 958 offset = aiter->iter_offset; 959 aiter->iter_mapsize = aiter->iter_abd->abd_size - offset; 960 paddr = ABD_LINEAR_BUF(aiter->iter_abd); 961 } else { 962 offset = aiter->iter_offset; 963 aiter->iter_mapsize = MIN(aiter->iter_sg->length - offset, 964 aiter->iter_abd->abd_size - aiter->iter_pos); 965 966 paddr = zfs_kmap_atomic(sg_page(aiter->iter_sg)); 967 } 968 969 aiter->iter_mapaddr = (char *)paddr + offset; 970 } 971 972 /* 973 * Unmap the current chunk from aiter. This can be safely called when the aiter 974 * has already exhausted, in which case this does nothing. 975 */ 976 void 977 abd_iter_unmap(struct abd_iter *aiter) 978 { 979 /* There's nothing left to unmap, so do nothing */ 980 if (abd_iter_at_end(aiter)) 981 return; 982 983 if (!abd_is_linear(aiter->iter_abd)) { 984 /* LINTED E_FUNC_SET_NOT_USED */ 985 zfs_kunmap_atomic(aiter->iter_mapaddr - aiter->iter_offset); 986 } 987 988 ASSERT3P(aiter->iter_mapaddr, !=, NULL); 989 ASSERT3U(aiter->iter_mapsize, >, 0); 990 991 aiter->iter_mapaddr = NULL; 992 aiter->iter_mapsize = 0; 993 } 994 995 void 996 abd_cache_reap_now(void) 997 { 998 } 999 1000 #if defined(_KERNEL) 1001 /* 1002 * bio_nr_pages for ABD. 1003 * @off is the offset in @abd 1004 */ 1005 unsigned long 1006 abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off) 1007 { 1008 unsigned long pos; 1009 1010 if (abd_is_gang(abd)) { 1011 unsigned long count = 0; 1012 1013 for (abd_t *cabd = abd_gang_get_offset(abd, &off); 1014 cabd != NULL && size != 0; 1015 cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) { 1016 ASSERT3U(off, <, cabd->abd_size); 1017 int mysize = MIN(size, cabd->abd_size - off); 1018 count += abd_nr_pages_off(cabd, mysize, off); 1019 size -= mysize; 1020 off = 0; 1021 } 1022 return (count); 1023 } 1024 1025 if (abd_is_linear(abd)) 1026 pos = (unsigned long)abd_to_buf(abd) + off; 1027 else 1028 pos = ABD_SCATTER(abd).abd_offset + off; 1029 1030 return (((pos + size + PAGESIZE - 1) >> PAGE_SHIFT) - 1031 (pos >> PAGE_SHIFT)); 1032 } 1033 1034 static unsigned int 1035 bio_map(struct bio *bio, void *buf_ptr, unsigned int bio_size) 1036 { 1037 unsigned int offset, size, i; 1038 struct page *page; 1039 1040 offset = offset_in_page(buf_ptr); 1041 for (i = 0; i < bio->bi_max_vecs; i++) { 1042 size = PAGE_SIZE - offset; 1043 1044 if (bio_size <= 0) 1045 break; 1046 1047 if (size > bio_size) 1048 size = bio_size; 1049 1050 if (is_vmalloc_addr(buf_ptr)) 1051 page = vmalloc_to_page(buf_ptr); 1052 else 1053 page = virt_to_page(buf_ptr); 1054 1055 /* 1056 * Some network related block device uses tcp_sendpage, which 1057 * doesn't behave well when using 0-count page, this is a 1058 * safety net to catch them. 1059 */ 1060 ASSERT3S(page_count(page), >, 0); 1061 1062 if (bio_add_page(bio, page, size, offset) != size) 1063 break; 1064 1065 buf_ptr += size; 1066 bio_size -= size; 1067 offset = 0; 1068 } 1069 1070 return (bio_size); 1071 } 1072 1073 /* 1074 * bio_map for gang ABD. 1075 */ 1076 static unsigned int 1077 abd_gang_bio_map_off(struct bio *bio, abd_t *abd, 1078 unsigned int io_size, size_t off) 1079 { 1080 ASSERT(abd_is_gang(abd)); 1081 1082 for (abd_t *cabd = abd_gang_get_offset(abd, &off); 1083 cabd != NULL; 1084 cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) { 1085 ASSERT3U(off, <, cabd->abd_size); 1086 int size = MIN(io_size, cabd->abd_size - off); 1087 int remainder = abd_bio_map_off(bio, cabd, size, off); 1088 io_size -= (size - remainder); 1089 if (io_size == 0 || remainder > 0) 1090 return (io_size); 1091 off = 0; 1092 } 1093 ASSERT0(io_size); 1094 return (io_size); 1095 } 1096 1097 /* 1098 * bio_map for ABD. 1099 * @off is the offset in @abd 1100 * Remaining IO size is returned 1101 */ 1102 unsigned int 1103 abd_bio_map_off(struct bio *bio, abd_t *abd, 1104 unsigned int io_size, size_t off) 1105 { 1106 struct abd_iter aiter; 1107 1108 ASSERT3U(io_size, <=, abd->abd_size - off); 1109 if (abd_is_linear(abd)) 1110 return (bio_map(bio, ((char *)abd_to_buf(abd)) + off, io_size)); 1111 1112 ASSERT(!abd_is_linear(abd)); 1113 if (abd_is_gang(abd)) 1114 return (abd_gang_bio_map_off(bio, abd, io_size, off)); 1115 1116 abd_iter_init(&aiter, abd); 1117 abd_iter_advance(&aiter, off); 1118 1119 for (int i = 0; i < bio->bi_max_vecs; i++) { 1120 struct page *pg; 1121 size_t len, sgoff, pgoff; 1122 struct scatterlist *sg; 1123 1124 if (io_size <= 0) 1125 break; 1126 1127 sg = aiter.iter_sg; 1128 sgoff = aiter.iter_offset; 1129 pgoff = sgoff & (PAGESIZE - 1); 1130 len = MIN(io_size, PAGESIZE - pgoff); 1131 ASSERT(len > 0); 1132 1133 pg = nth_page(sg_page(sg), sgoff >> PAGE_SHIFT); 1134 if (bio_add_page(bio, pg, len, pgoff) != len) 1135 break; 1136 1137 io_size -= len; 1138 abd_iter_advance(&aiter, len); 1139 } 1140 1141 return (io_size); 1142 } 1143 1144 /* Tunable Parameters */ 1145 module_param(zfs_abd_scatter_enabled, int, 0644); 1146 MODULE_PARM_DESC(zfs_abd_scatter_enabled, 1147 "Toggle whether ABD allocations must be linear."); 1148 module_param(zfs_abd_scatter_min_size, int, 0644); 1149 MODULE_PARM_DESC(zfs_abd_scatter_min_size, 1150 "Minimum size of scatter allocations."); 1151 /* CSTYLED */ 1152 module_param(zfs_abd_scatter_max_order, uint, 0644); 1153 MODULE_PARM_DESC(zfs_abd_scatter_max_order, 1154 "Maximum order allocation used for a scatter ABD."); 1155 #endif 1156