xref: /freebsd/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c (revision e47161e5f1f01ef300c6e7efdb9c92e3a6c497ff)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2014 by Chunwei Chen. All rights reserved.
23  * Copyright (c) 2019 by Delphix. All rights reserved.
24  * Copyright (c) 2023, 2024, Klara Inc.
25  */
26 
27 /*
28  * See abd.c for a general overview of the arc buffered data (ABD).
29  *
30  * Linear buffers act exactly like normal buffers and are always mapped into the
31  * kernel's virtual memory space, while scattered ABD data chunks are allocated
32  * as physical pages and then mapped in only while they are actually being
33  * accessed through one of the abd_* library functions. Using scattered ABDs
34  * provides several benefits:
35  *
36  *  (1) They avoid use of kmem_*, preventing performance problems where running
37  *      kmem_reap on very large memory systems never finishes and causes
38  *      constant TLB shootdowns.
39  *
40  *  (2) Fragmentation is less of an issue since when we are at the limit of
41  *      allocatable space, we won't have to search around for a long free
42  *      hole in the VA space for large ARC allocations. Each chunk is mapped in
43  *      individually, so even if we are using HIGHMEM (see next point) we
44  *      wouldn't need to worry about finding a contiguous address range.
45  *
46  *  (3) If we are not using HIGHMEM, then all physical memory is always
47  *      mapped into the kernel's address space, so we also avoid the map /
48  *      unmap costs on each ABD access.
49  *
50  * If we are not using HIGHMEM, scattered buffers which have only one chunk
51  * can be treated as linear buffers, because they are contiguous in the
52  * kernel's virtual address space.  See abd_alloc_chunks() for details.
53  */
54 
55 #include <sys/abd_impl.h>
56 #include <sys/param.h>
57 #include <sys/zio.h>
58 #include <sys/arc.h>
59 #include <sys/zfs_context.h>
60 #include <sys/zfs_znode.h>
61 #include <linux/kmap_compat.h>
62 #include <linux/mm_compat.h>
63 #include <linux/scatterlist.h>
64 #include <linux/version.h>
65 
66 #if defined(MAX_ORDER)
67 #define	ABD_MAX_ORDER	(MAX_ORDER)
68 #elif defined(MAX_PAGE_ORDER)
69 #define	ABD_MAX_ORDER	(MAX_PAGE_ORDER)
70 #endif
71 
72 typedef struct abd_stats {
73 	kstat_named_t abdstat_struct_size;
74 	kstat_named_t abdstat_linear_cnt;
75 	kstat_named_t abdstat_linear_data_size;
76 	kstat_named_t abdstat_scatter_cnt;
77 	kstat_named_t abdstat_scatter_data_size;
78 	kstat_named_t abdstat_scatter_chunk_waste;
79 	kstat_named_t abdstat_scatter_orders[ABD_MAX_ORDER];
80 	kstat_named_t abdstat_scatter_page_multi_chunk;
81 	kstat_named_t abdstat_scatter_page_multi_zone;
82 	kstat_named_t abdstat_scatter_page_alloc_retry;
83 	kstat_named_t abdstat_scatter_sg_table_retry;
84 } abd_stats_t;
85 
86 static abd_stats_t abd_stats = {
87 	/* Amount of memory occupied by all of the abd_t struct allocations */
88 	{ "struct_size",			KSTAT_DATA_UINT64 },
89 	/*
90 	 * The number of linear ABDs which are currently allocated, excluding
91 	 * ABDs which don't own their data (for instance the ones which were
92 	 * allocated through abd_get_offset() and abd_get_from_buf()). If an
93 	 * ABD takes ownership of its buf then it will become tracked.
94 	 */
95 	{ "linear_cnt",				KSTAT_DATA_UINT64 },
96 	/* Amount of data stored in all linear ABDs tracked by linear_cnt */
97 	{ "linear_data_size",			KSTAT_DATA_UINT64 },
98 	/*
99 	 * The number of scatter ABDs which are currently allocated, excluding
100 	 * ABDs which don't own their data (for instance the ones which were
101 	 * allocated through abd_get_offset()).
102 	 */
103 	{ "scatter_cnt",			KSTAT_DATA_UINT64 },
104 	/* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
105 	{ "scatter_data_size",			KSTAT_DATA_UINT64 },
106 	/*
107 	 * The amount of space wasted at the end of the last chunk across all
108 	 * scatter ABDs tracked by scatter_cnt.
109 	 */
110 	{ "scatter_chunk_waste",		KSTAT_DATA_UINT64 },
111 	/*
112 	 * The number of compound allocations of a given order.  These
113 	 * allocations are spread over all currently allocated ABDs, and
114 	 * act as a measure of memory fragmentation.
115 	 */
116 	{ { "scatter_order_N",			KSTAT_DATA_UINT64 } },
117 	/*
118 	 * The number of scatter ABDs which contain multiple chunks.
119 	 * ABDs are preferentially allocated from the minimum number of
120 	 * contiguous multi-page chunks, a single chunk is optimal.
121 	 */
122 	{ "scatter_page_multi_chunk",		KSTAT_DATA_UINT64 },
123 	/*
124 	 * The number of scatter ABDs which are split across memory zones.
125 	 * ABDs are preferentially allocated using pages from a single zone.
126 	 */
127 	{ "scatter_page_multi_zone",		KSTAT_DATA_UINT64 },
128 	/*
129 	 *  The total number of retries encountered when attempting to
130 	 *  allocate the pages to populate the scatter ABD.
131 	 */
132 	{ "scatter_page_alloc_retry",		KSTAT_DATA_UINT64 },
133 	/*
134 	 *  The total number of retries encountered when attempting to
135 	 *  allocate the sg table for an ABD.
136 	 */
137 	{ "scatter_sg_table_retry",		KSTAT_DATA_UINT64 },
138 };
139 
140 static struct {
141 	wmsum_t abdstat_struct_size;
142 	wmsum_t abdstat_linear_cnt;
143 	wmsum_t abdstat_linear_data_size;
144 	wmsum_t abdstat_scatter_cnt;
145 	wmsum_t abdstat_scatter_data_size;
146 	wmsum_t abdstat_scatter_chunk_waste;
147 	wmsum_t abdstat_scatter_orders[ABD_MAX_ORDER];
148 	wmsum_t abdstat_scatter_page_multi_chunk;
149 	wmsum_t abdstat_scatter_page_multi_zone;
150 	wmsum_t abdstat_scatter_page_alloc_retry;
151 	wmsum_t abdstat_scatter_sg_table_retry;
152 } abd_sums;
153 
154 #define	abd_for_each_sg(abd, sg, n, i)	\
155 	for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i)
156 
157 /*
158  * zfs_abd_scatter_min_size is the minimum allocation size to use scatter
159  * ABD's.  Smaller allocations will use linear ABD's which uses
160  * zio_[data_]buf_alloc().
161  *
162  * Scatter ABD's use at least one page each, so sub-page allocations waste
163  * some space when allocated as scatter (e.g. 2KB scatter allocation wastes
164  * half of each page).  Using linear ABD's for small allocations means that
165  * they will be put on slabs which contain many allocations.  This can
166  * improve memory efficiency, but it also makes it much harder for ARC
167  * evictions to actually free pages, because all the buffers on one slab need
168  * to be freed in order for the slab (and underlying pages) to be freed.
169  * Typically, 512B and 1KB kmem caches have 16 buffers per slab, so it's
170  * possible for them to actually waste more memory than scatter (one page per
171  * buf = wasting 3/4 or 7/8th; one buf per slab = wasting 15/16th).
172  *
173  * Spill blocks are typically 512B and are heavily used on systems running
174  * selinux with the default dnode size and the `xattr=sa` property set.
175  *
176  * By default we use linear allocations for 512B and 1KB, and scatter
177  * allocations for larger (1.5KB and up).
178  */
179 static int zfs_abd_scatter_min_size = 512 * 3;
180 
181 /*
182  * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose pages are
183  * just a single zero'd page. This allows us to conserve memory by
184  * only using a single zero page for the scatterlist.
185  */
186 abd_t *abd_zero_scatter = NULL;
187 
188 struct page;
189 /*
190  * abd_zero_page is assigned to each of the pages of abd_zero_scatter. It will
191  * point to ZERO_PAGE if it is available or it will be an allocated zero'd
192  * PAGESIZE buffer.
193  */
194 static struct page *abd_zero_page = NULL;
195 
196 static kmem_cache_t *abd_cache = NULL;
197 static kstat_t *abd_ksp;
198 
199 static uint_t
200 abd_chunkcnt_for_bytes(size_t size)
201 {
202 	return (P2ROUNDUP(size, PAGESIZE) / PAGESIZE);
203 }
204 
205 abd_t *
206 abd_alloc_struct_impl(size_t size)
207 {
208 	/*
209 	 * In Linux we do not use the size passed in during ABD
210 	 * allocation, so we just ignore it.
211 	 */
212 	(void) size;
213 	abd_t *abd = kmem_cache_alloc(abd_cache, KM_PUSHPAGE);
214 	ASSERT3P(abd, !=, NULL);
215 	ABDSTAT_INCR(abdstat_struct_size, sizeof (abd_t));
216 
217 	return (abd);
218 }
219 
220 void
221 abd_free_struct_impl(abd_t *abd)
222 {
223 	kmem_cache_free(abd_cache, abd);
224 	ABDSTAT_INCR(abdstat_struct_size, -(int)sizeof (abd_t));
225 }
226 
227 static unsigned zfs_abd_scatter_max_order = ABD_MAX_ORDER - 1;
228 
229 /*
230  * Mark zfs data pages so they can be excluded from kernel crash dumps
231  */
232 #ifdef _LP64
233 #define	ABD_FILE_CACHE_PAGE	0x2F5ABDF11ECAC4E
234 
235 static inline void
236 abd_mark_zfs_page(struct page *page)
237 {
238 	get_page(page);
239 	SetPagePrivate(page);
240 	set_page_private(page, ABD_FILE_CACHE_PAGE);
241 }
242 
243 static inline void
244 abd_unmark_zfs_page(struct page *page)
245 {
246 	set_page_private(page, 0UL);
247 	ClearPagePrivate(page);
248 	put_page(page);
249 }
250 #else
251 #define	abd_mark_zfs_page(page)
252 #define	abd_unmark_zfs_page(page)
253 #endif /* _LP64 */
254 
255 #ifndef CONFIG_HIGHMEM
256 
257 #ifndef __GFP_RECLAIM
258 #define	__GFP_RECLAIM		__GFP_WAIT
259 #endif
260 
261 /*
262  * The goal is to minimize fragmentation by preferentially populating ABDs
263  * with higher order compound pages from a single zone.  Allocation size is
264  * progressively decreased until it can be satisfied without performing
265  * reclaim or compaction.  When necessary this function will degenerate to
266  * allocating individual pages and allowing reclaim to satisfy allocations.
267  */
268 void
269 abd_alloc_chunks(abd_t *abd, size_t size)
270 {
271 	struct list_head pages;
272 	struct sg_table table;
273 	struct scatterlist *sg;
274 	struct page *page, *tmp_page = NULL;
275 	gfp_t gfp = __GFP_RECLAIMABLE | __GFP_NOWARN | GFP_NOIO;
276 	gfp_t gfp_comp = (gfp | __GFP_NORETRY | __GFP_COMP) & ~__GFP_RECLAIM;
277 	unsigned int max_order = MIN(zfs_abd_scatter_max_order,
278 	    ABD_MAX_ORDER - 1);
279 	unsigned int nr_pages = abd_chunkcnt_for_bytes(size);
280 	unsigned int chunks = 0, zones = 0;
281 	size_t remaining_size;
282 	int nid = NUMA_NO_NODE;
283 	unsigned int alloc_pages = 0;
284 
285 	INIT_LIST_HEAD(&pages);
286 
287 	ASSERT3U(alloc_pages, <, nr_pages);
288 
289 	while (alloc_pages < nr_pages) {
290 		unsigned int chunk_pages;
291 		unsigned int order;
292 
293 		order = MIN(highbit64(nr_pages - alloc_pages) - 1, max_order);
294 		chunk_pages = (1U << order);
295 
296 		page = alloc_pages_node(nid, order ? gfp_comp : gfp, order);
297 		if (page == NULL) {
298 			if (order == 0) {
299 				ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
300 				schedule_timeout_interruptible(1);
301 			} else {
302 				max_order = MAX(0, order - 1);
303 			}
304 			continue;
305 		}
306 
307 		list_add_tail(&page->lru, &pages);
308 
309 		if ((nid != NUMA_NO_NODE) && (page_to_nid(page) != nid))
310 			zones++;
311 
312 		nid = page_to_nid(page);
313 		ABDSTAT_BUMP(abdstat_scatter_orders[order]);
314 		chunks++;
315 		alloc_pages += chunk_pages;
316 	}
317 
318 	ASSERT3S(alloc_pages, ==, nr_pages);
319 
320 	while (sg_alloc_table(&table, chunks, gfp)) {
321 		ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
322 		schedule_timeout_interruptible(1);
323 	}
324 
325 	sg = table.sgl;
326 	remaining_size = size;
327 	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
328 		size_t sg_size = MIN(PAGESIZE << compound_order(page),
329 		    remaining_size);
330 		sg_set_page(sg, page, sg_size, 0);
331 		abd_mark_zfs_page(page);
332 		remaining_size -= sg_size;
333 
334 		sg = sg_next(sg);
335 		list_del(&page->lru);
336 	}
337 
338 	/*
339 	 * These conditions ensure that a possible transformation to a linear
340 	 * ABD would be valid.
341 	 */
342 	ASSERT(!PageHighMem(sg_page(table.sgl)));
343 	ASSERT0(ABD_SCATTER(abd).abd_offset);
344 
345 	if (table.nents == 1) {
346 		/*
347 		 * Since there is only one entry, this ABD can be represented
348 		 * as a linear buffer.  All single-page (4K) ABD's can be
349 		 * represented this way.  Some multi-page ABD's can also be
350 		 * represented this way, if we were able to allocate a single
351 		 * "chunk" (higher-order "page" which represents a power-of-2
352 		 * series of physically-contiguous pages).  This is often the
353 		 * case for 2-page (8K) ABD's.
354 		 *
355 		 * Representing a single-entry scatter ABD as a linear ABD
356 		 * has the performance advantage of avoiding the copy (and
357 		 * allocation) in abd_borrow_buf_copy / abd_return_buf_copy.
358 		 * A performance increase of around 5% has been observed for
359 		 * ARC-cached reads (of small blocks which can take advantage
360 		 * of this).
361 		 *
362 		 * Note that this optimization is only possible because the
363 		 * pages are always mapped into the kernel's address space.
364 		 * This is not the case for highmem pages, so the
365 		 * optimization can not be made there.
366 		 */
367 		abd->abd_flags |= ABD_FLAG_LINEAR;
368 		abd->abd_flags |= ABD_FLAG_LINEAR_PAGE;
369 		abd->abd_u.abd_linear.abd_sgl = table.sgl;
370 		ABD_LINEAR_BUF(abd) = page_address(sg_page(table.sgl));
371 	} else if (table.nents > 1) {
372 		ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
373 		abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
374 
375 		if (zones) {
376 			ABDSTAT_BUMP(abdstat_scatter_page_multi_zone);
377 			abd->abd_flags |= ABD_FLAG_MULTI_ZONE;
378 		}
379 
380 		ABD_SCATTER(abd).abd_sgl = table.sgl;
381 		ABD_SCATTER(abd).abd_nents = table.nents;
382 	}
383 }
384 #else
385 
386 /*
387  * Allocate N individual pages to construct a scatter ABD.  This function
388  * makes no attempt to request contiguous pages and requires the minimal
389  * number of kernel interfaces.  It's designed for maximum compatibility.
390  */
391 void
392 abd_alloc_chunks(abd_t *abd, size_t size)
393 {
394 	struct scatterlist *sg = NULL;
395 	struct sg_table table;
396 	struct page *page;
397 	gfp_t gfp = __GFP_RECLAIMABLE | __GFP_NOWARN | GFP_NOIO;
398 	int nr_pages = abd_chunkcnt_for_bytes(size);
399 	int i = 0;
400 
401 	while (sg_alloc_table(&table, nr_pages, gfp)) {
402 		ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
403 		schedule_timeout_interruptible(1);
404 	}
405 
406 	ASSERT3U(table.nents, ==, nr_pages);
407 	ABD_SCATTER(abd).abd_sgl = table.sgl;
408 	ABD_SCATTER(abd).abd_nents = nr_pages;
409 
410 	abd_for_each_sg(abd, sg, nr_pages, i) {
411 		while ((page = __page_cache_alloc(gfp)) == NULL) {
412 			ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
413 			schedule_timeout_interruptible(1);
414 		}
415 
416 		ABDSTAT_BUMP(abdstat_scatter_orders[0]);
417 		sg_set_page(sg, page, PAGESIZE, 0);
418 		abd_mark_zfs_page(page);
419 	}
420 
421 	if (nr_pages > 1) {
422 		ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
423 		abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
424 	}
425 }
426 #endif /* !CONFIG_HIGHMEM */
427 
428 /*
429  * This must be called if any of the sg_table allocation functions
430  * are called.
431  */
432 static void
433 abd_free_sg_table(abd_t *abd)
434 {
435 	struct sg_table table;
436 
437 	table.sgl = ABD_SCATTER(abd).abd_sgl;
438 	table.nents = table.orig_nents = ABD_SCATTER(abd).abd_nents;
439 	sg_free_table(&table);
440 }
441 
442 void
443 abd_free_chunks(abd_t *abd)
444 {
445 	struct scatterlist *sg = NULL;
446 	struct page *page;
447 	int nr_pages = ABD_SCATTER(abd).abd_nents;
448 	int order, i = 0;
449 
450 	if (abd->abd_flags & ABD_FLAG_MULTI_ZONE)
451 		ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_zone);
452 
453 	if (abd->abd_flags & ABD_FLAG_MULTI_CHUNK)
454 		ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk);
455 
456 	abd_for_each_sg(abd, sg, nr_pages, i) {
457 		page = sg_page(sg);
458 		abd_unmark_zfs_page(page);
459 		order = compound_order(page);
460 		__free_pages(page, order);
461 		ASSERT3U(sg->length, <=, PAGE_SIZE << order);
462 		ABDSTAT_BUMPDOWN(abdstat_scatter_orders[order]);
463 	}
464 	abd_free_sg_table(abd);
465 }
466 
467 /*
468  * Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where each page in
469  * the scatterlist will be set to the zero'd out buffer abd_zero_page.
470  */
471 static void
472 abd_alloc_zero_scatter(void)
473 {
474 	struct scatterlist *sg = NULL;
475 	struct sg_table table;
476 	gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
477 	int nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
478 	int i = 0;
479 
480 #if defined(HAVE_ZERO_PAGE_GPL_ONLY)
481 	gfp_t gfp_zero_page = gfp | __GFP_ZERO;
482 	while ((abd_zero_page = __page_cache_alloc(gfp_zero_page)) == NULL) {
483 		ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
484 		schedule_timeout_interruptible(1);
485 	}
486 	abd_mark_zfs_page(abd_zero_page);
487 #else
488 	abd_zero_page = ZERO_PAGE(0);
489 #endif /* HAVE_ZERO_PAGE_GPL_ONLY */
490 
491 	while (sg_alloc_table(&table, nr_pages, gfp)) {
492 		ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
493 		schedule_timeout_interruptible(1);
494 	}
495 	ASSERT3U(table.nents, ==, nr_pages);
496 
497 	abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
498 	abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER;
499 	ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
500 	ABD_SCATTER(abd_zero_scatter).abd_sgl = table.sgl;
501 	ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages;
502 	abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
503 	abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK;
504 
505 	abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) {
506 		sg_set_page(sg, abd_zero_page, PAGESIZE, 0);
507 	}
508 
509 	ABDSTAT_BUMP(abdstat_scatter_cnt);
510 	ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE);
511 	ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
512 }
513 
514 boolean_t
515 abd_size_alloc_linear(size_t size)
516 {
517 	return (!zfs_abd_scatter_enabled || size < zfs_abd_scatter_min_size);
518 }
519 
520 void
521 abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op)
522 {
523 	ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
524 	int waste = P2ROUNDUP(abd->abd_size, PAGESIZE) - abd->abd_size;
525 	if (op == ABDSTAT_INCR) {
526 		ABDSTAT_BUMP(abdstat_scatter_cnt);
527 		ABDSTAT_INCR(abdstat_scatter_data_size, abd->abd_size);
528 		ABDSTAT_INCR(abdstat_scatter_chunk_waste, waste);
529 		arc_space_consume(waste, ARC_SPACE_ABD_CHUNK_WASTE);
530 	} else {
531 		ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
532 		ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
533 		ABDSTAT_INCR(abdstat_scatter_chunk_waste, -waste);
534 		arc_space_return(waste, ARC_SPACE_ABD_CHUNK_WASTE);
535 	}
536 }
537 
538 void
539 abd_update_linear_stats(abd_t *abd, abd_stats_op_t op)
540 {
541 	ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
542 	if (op == ABDSTAT_INCR) {
543 		ABDSTAT_BUMP(abdstat_linear_cnt);
544 		ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size);
545 	} else {
546 		ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
547 		ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
548 	}
549 }
550 
551 void
552 abd_verify_scatter(abd_t *abd)
553 {
554 	size_t n;
555 	int i = 0;
556 	struct scatterlist *sg = NULL;
557 
558 	ASSERT3U(ABD_SCATTER(abd).abd_nents, >, 0);
559 	ASSERT3U(ABD_SCATTER(abd).abd_offset, <,
560 	    ABD_SCATTER(abd).abd_sgl->length);
561 	n = ABD_SCATTER(abd).abd_nents;
562 	abd_for_each_sg(abd, sg, n, i) {
563 		ASSERT3P(sg_page(sg), !=, NULL);
564 	}
565 }
566 
567 static void
568 abd_free_zero_scatter(void)
569 {
570 	ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
571 	ABDSTAT_INCR(abdstat_scatter_data_size, -(int)PAGESIZE);
572 	ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk);
573 
574 	abd_free_sg_table(abd_zero_scatter);
575 	abd_free_struct(abd_zero_scatter);
576 	abd_zero_scatter = NULL;
577 	ASSERT3P(abd_zero_page, !=, NULL);
578 #if defined(HAVE_ZERO_PAGE_GPL_ONLY)
579 	abd_unmark_zfs_page(abd_zero_page);
580 	__free_page(abd_zero_page);
581 #endif /* HAVE_ZERO_PAGE_GPL_ONLY */
582 }
583 
584 static int
585 abd_kstats_update(kstat_t *ksp, int rw)
586 {
587 	abd_stats_t *as = ksp->ks_data;
588 
589 	if (rw == KSTAT_WRITE)
590 		return (EACCES);
591 	as->abdstat_struct_size.value.ui64 =
592 	    wmsum_value(&abd_sums.abdstat_struct_size);
593 	as->abdstat_linear_cnt.value.ui64 =
594 	    wmsum_value(&abd_sums.abdstat_linear_cnt);
595 	as->abdstat_linear_data_size.value.ui64 =
596 	    wmsum_value(&abd_sums.abdstat_linear_data_size);
597 	as->abdstat_scatter_cnt.value.ui64 =
598 	    wmsum_value(&abd_sums.abdstat_scatter_cnt);
599 	as->abdstat_scatter_data_size.value.ui64 =
600 	    wmsum_value(&abd_sums.abdstat_scatter_data_size);
601 	as->abdstat_scatter_chunk_waste.value.ui64 =
602 	    wmsum_value(&abd_sums.abdstat_scatter_chunk_waste);
603 	for (int i = 0; i < ABD_MAX_ORDER; i++) {
604 		as->abdstat_scatter_orders[i].value.ui64 =
605 		    wmsum_value(&abd_sums.abdstat_scatter_orders[i]);
606 	}
607 	as->abdstat_scatter_page_multi_chunk.value.ui64 =
608 	    wmsum_value(&abd_sums.abdstat_scatter_page_multi_chunk);
609 	as->abdstat_scatter_page_multi_zone.value.ui64 =
610 	    wmsum_value(&abd_sums.abdstat_scatter_page_multi_zone);
611 	as->abdstat_scatter_page_alloc_retry.value.ui64 =
612 	    wmsum_value(&abd_sums.abdstat_scatter_page_alloc_retry);
613 	as->abdstat_scatter_sg_table_retry.value.ui64 =
614 	    wmsum_value(&abd_sums.abdstat_scatter_sg_table_retry);
615 	return (0);
616 }
617 
618 void
619 abd_init(void)
620 {
621 	int i;
622 
623 	abd_cache = kmem_cache_create("abd_t", sizeof (abd_t),
624 	    0, NULL, NULL, NULL, NULL, NULL, KMC_RECLAIMABLE);
625 
626 	wmsum_init(&abd_sums.abdstat_struct_size, 0);
627 	wmsum_init(&abd_sums.abdstat_linear_cnt, 0);
628 	wmsum_init(&abd_sums.abdstat_linear_data_size, 0);
629 	wmsum_init(&abd_sums.abdstat_scatter_cnt, 0);
630 	wmsum_init(&abd_sums.abdstat_scatter_data_size, 0);
631 	wmsum_init(&abd_sums.abdstat_scatter_chunk_waste, 0);
632 	for (i = 0; i < ABD_MAX_ORDER; i++)
633 		wmsum_init(&abd_sums.abdstat_scatter_orders[i], 0);
634 	wmsum_init(&abd_sums.abdstat_scatter_page_multi_chunk, 0);
635 	wmsum_init(&abd_sums.abdstat_scatter_page_multi_zone, 0);
636 	wmsum_init(&abd_sums.abdstat_scatter_page_alloc_retry, 0);
637 	wmsum_init(&abd_sums.abdstat_scatter_sg_table_retry, 0);
638 
639 	abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
640 	    sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
641 	if (abd_ksp != NULL) {
642 		for (i = 0; i < ABD_MAX_ORDER; i++) {
643 			snprintf(abd_stats.abdstat_scatter_orders[i].name,
644 			    KSTAT_STRLEN, "scatter_order_%d", i);
645 			abd_stats.abdstat_scatter_orders[i].data_type =
646 			    KSTAT_DATA_UINT64;
647 		}
648 		abd_ksp->ks_data = &abd_stats;
649 		abd_ksp->ks_update = abd_kstats_update;
650 		kstat_install(abd_ksp);
651 	}
652 
653 	abd_alloc_zero_scatter();
654 }
655 
656 void
657 abd_fini(void)
658 {
659 	abd_free_zero_scatter();
660 
661 	if (abd_ksp != NULL) {
662 		kstat_delete(abd_ksp);
663 		abd_ksp = NULL;
664 	}
665 
666 	wmsum_fini(&abd_sums.abdstat_struct_size);
667 	wmsum_fini(&abd_sums.abdstat_linear_cnt);
668 	wmsum_fini(&abd_sums.abdstat_linear_data_size);
669 	wmsum_fini(&abd_sums.abdstat_scatter_cnt);
670 	wmsum_fini(&abd_sums.abdstat_scatter_data_size);
671 	wmsum_fini(&abd_sums.abdstat_scatter_chunk_waste);
672 	for (int i = 0; i < ABD_MAX_ORDER; i++)
673 		wmsum_fini(&abd_sums.abdstat_scatter_orders[i]);
674 	wmsum_fini(&abd_sums.abdstat_scatter_page_multi_chunk);
675 	wmsum_fini(&abd_sums.abdstat_scatter_page_multi_zone);
676 	wmsum_fini(&abd_sums.abdstat_scatter_page_alloc_retry);
677 	wmsum_fini(&abd_sums.abdstat_scatter_sg_table_retry);
678 
679 	if (abd_cache) {
680 		kmem_cache_destroy(abd_cache);
681 		abd_cache = NULL;
682 	}
683 }
684 
685 void
686 abd_free_linear_page(abd_t *abd)
687 {
688 	/* Transform it back into a scatter ABD for freeing */
689 	struct scatterlist *sg = abd->abd_u.abd_linear.abd_sgl;
690 	abd->abd_flags &= ~ABD_FLAG_LINEAR;
691 	abd->abd_flags &= ~ABD_FLAG_LINEAR_PAGE;
692 	ABD_SCATTER(abd).abd_nents = 1;
693 	ABD_SCATTER(abd).abd_offset = 0;
694 	ABD_SCATTER(abd).abd_sgl = sg;
695 	abd_free_chunks(abd);
696 
697 	abd_update_scatter_stats(abd, ABDSTAT_DECR);
698 }
699 
700 /*
701  * If we're going to use this ABD for doing I/O using the block layer, the
702  * consumer of the ABD data doesn't care if it's scattered or not, and we don't
703  * plan to store this ABD in memory for a long period of time, we should
704  * allocate the ABD type that requires the least data copying to do the I/O.
705  *
706  * On Linux the optimal thing to do would be to use abd_get_offset() and
707  * construct a new ABD which shares the original pages thereby eliminating
708  * the copy.  But for the moment a new linear ABD is allocated until this
709  * performance optimization can be implemented.
710  */
711 abd_t *
712 abd_alloc_for_io(size_t size, boolean_t is_metadata)
713 {
714 	return (abd_alloc(size, is_metadata));
715 }
716 
717 abd_t *
718 abd_get_offset_scatter(abd_t *abd, abd_t *sabd, size_t off,
719     size_t size)
720 {
721 	(void) size;
722 	int i = 0;
723 	struct scatterlist *sg = NULL;
724 
725 	abd_verify(sabd);
726 	ASSERT3U(off, <=, sabd->abd_size);
727 
728 	size_t new_offset = ABD_SCATTER(sabd).abd_offset + off;
729 
730 	if (abd == NULL)
731 		abd = abd_alloc_struct(0);
732 
733 	/*
734 	 * Even if this buf is filesystem metadata, we only track that
735 	 * if we own the underlying data buffer, which is not true in
736 	 * this case. Therefore, we don't ever use ABD_FLAG_META here.
737 	 */
738 
739 	abd_for_each_sg(sabd, sg, ABD_SCATTER(sabd).abd_nents, i) {
740 		if (new_offset < sg->length)
741 			break;
742 		new_offset -= sg->length;
743 	}
744 
745 	ABD_SCATTER(abd).abd_sgl = sg;
746 	ABD_SCATTER(abd).abd_offset = new_offset;
747 	ABD_SCATTER(abd).abd_nents = ABD_SCATTER(sabd).abd_nents - i;
748 
749 	return (abd);
750 }
751 
752 /*
753  * Initialize the abd_iter.
754  */
755 void
756 abd_iter_init(struct abd_iter *aiter, abd_t *abd)
757 {
758 	ASSERT(!abd_is_gang(abd));
759 	abd_verify(abd);
760 	memset(aiter, 0, sizeof (struct abd_iter));
761 	aiter->iter_abd = abd;
762 	if (!abd_is_linear(abd)) {
763 		aiter->iter_offset = ABD_SCATTER(abd).abd_offset;
764 		aiter->iter_sg = ABD_SCATTER(abd).abd_sgl;
765 	}
766 }
767 
768 /*
769  * This is just a helper function to see if we have exhausted the
770  * abd_iter and reached the end.
771  */
772 boolean_t
773 abd_iter_at_end(struct abd_iter *aiter)
774 {
775 	ASSERT3U(aiter->iter_pos, <=, aiter->iter_abd->abd_size);
776 	return (aiter->iter_pos == aiter->iter_abd->abd_size);
777 }
778 
779 /*
780  * Advance the iterator by a certain amount. Cannot be called when a chunk is
781  * in use. This can be safely called when the aiter has already exhausted, in
782  * which case this does nothing.
783  */
784 void
785 abd_iter_advance(struct abd_iter *aiter, size_t amount)
786 {
787 	/*
788 	 * Ensure that last chunk is not in use. abd_iterate_*() must clear
789 	 * this state (directly or abd_iter_unmap()) before advancing.
790 	 */
791 	ASSERT3P(aiter->iter_mapaddr, ==, NULL);
792 	ASSERT0(aiter->iter_mapsize);
793 	ASSERT3P(aiter->iter_page, ==, NULL);
794 	ASSERT0(aiter->iter_page_doff);
795 	ASSERT0(aiter->iter_page_dsize);
796 
797 	/* There's nothing left to advance to, so do nothing */
798 	if (abd_iter_at_end(aiter))
799 		return;
800 
801 	aiter->iter_pos += amount;
802 	aiter->iter_offset += amount;
803 	if (!abd_is_linear(aiter->iter_abd)) {
804 		while (aiter->iter_offset >= aiter->iter_sg->length) {
805 			aiter->iter_offset -= aiter->iter_sg->length;
806 			aiter->iter_sg = sg_next(aiter->iter_sg);
807 			if (aiter->iter_sg == NULL) {
808 				ASSERT0(aiter->iter_offset);
809 				break;
810 			}
811 		}
812 	}
813 }
814 
815 /*
816  * Map the current chunk into aiter. This can be safely called when the aiter
817  * has already exhausted, in which case this does nothing.
818  */
819 void
820 abd_iter_map(struct abd_iter *aiter)
821 {
822 	void *paddr;
823 	size_t offset = 0;
824 
825 	ASSERT3P(aiter->iter_mapaddr, ==, NULL);
826 	ASSERT0(aiter->iter_mapsize);
827 
828 	/* There's nothing left to iterate over, so do nothing */
829 	if (abd_iter_at_end(aiter))
830 		return;
831 
832 	if (abd_is_linear(aiter->iter_abd)) {
833 		ASSERT3U(aiter->iter_pos, ==, aiter->iter_offset);
834 		offset = aiter->iter_offset;
835 		aiter->iter_mapsize = aiter->iter_abd->abd_size - offset;
836 		paddr = ABD_LINEAR_BUF(aiter->iter_abd);
837 	} else {
838 		offset = aiter->iter_offset;
839 		aiter->iter_mapsize = MIN(aiter->iter_sg->length - offset,
840 		    aiter->iter_abd->abd_size - aiter->iter_pos);
841 
842 		paddr = zfs_kmap_local(sg_page(aiter->iter_sg));
843 	}
844 
845 	aiter->iter_mapaddr = (char *)paddr + offset;
846 }
847 
848 /*
849  * Unmap the current chunk from aiter. This can be safely called when the aiter
850  * has already exhausted, in which case this does nothing.
851  */
852 void
853 abd_iter_unmap(struct abd_iter *aiter)
854 {
855 	/* There's nothing left to unmap, so do nothing */
856 	if (abd_iter_at_end(aiter))
857 		return;
858 
859 	if (!abd_is_linear(aiter->iter_abd)) {
860 		/* LINTED E_FUNC_SET_NOT_USED */
861 		zfs_kunmap_local(aiter->iter_mapaddr - aiter->iter_offset);
862 	}
863 
864 	ASSERT3P(aiter->iter_mapaddr, !=, NULL);
865 	ASSERT3U(aiter->iter_mapsize, >, 0);
866 
867 	aiter->iter_mapaddr = NULL;
868 	aiter->iter_mapsize = 0;
869 }
870 
871 void
872 abd_cache_reap_now(void)
873 {
874 }
875 
876 /*
877  * This is abd_iter_page(), the function underneath abd_iterate_page_func().
878  * It yields the next page struct and data offset and size within it, without
879  * mapping it into the address space.
880  */
881 
882 /*
883  * "Compound pages" are a group of pages that can be referenced from a single
884  * struct page *. Its organised as a "head" page, followed by a series of
885  * "tail" pages.
886  *
887  * In OpenZFS, compound pages are allocated using the __GFP_COMP flag, which we
888  * get from scatter ABDs and SPL vmalloc slabs (ie >16K allocations). So a
889  * great many of the IO buffers we get are going to be of this type.
890  *
891  * The tail pages are just regular PAGESIZE pages, and can be safely used
892  * as-is. However, the head page has length covering itself and all the tail
893  * pages. If the ABD chunk spans multiple pages, then we can use the head page
894  * and a >PAGESIZE length, which is far more efficient.
895  *
896  * Before kernel 4.5 however, compound page heads were refcounted separately
897  * from tail pages, such that moving back to the head page would require us to
898  * take a reference to it and releasing it once we're completely finished with
899  * it. In practice, that means when our caller is done with the ABD, which we
900  * have no insight into from here. Rather than contort this API to track head
901  * page references on such ancient kernels, we disable this special compound
902  * page handling on 4.5, instead just using treating each page within it as a
903  * regular PAGESIZE page (which it is). This is slightly less efficient, but
904  * makes everything far simpler.
905  *
906  * The below test sets/clears ABD_ITER_COMPOUND_PAGES to enable/disable the
907  * special handling, and also defines the ABD_ITER_PAGE_SIZE(page) macro to
908  * understand compound pages, or not, as required.
909  */
910 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
911 #define	ABD_ITER_COMPOUND_PAGES		1
912 #define	ABD_ITER_PAGE_SIZE(page)	\
913 	(PageCompound(page) ? page_size(page) : PAGESIZE)
914 #else
915 #undef ABD_ITER_COMPOUND_PAGES
916 #define	ABD_ITER_PAGE_SIZE(page)	(PAGESIZE)
917 #endif
918 
919 void
920 abd_iter_page(struct abd_iter *aiter)
921 {
922 	if (abd_iter_at_end(aiter)) {
923 		aiter->iter_page = NULL;
924 		aiter->iter_page_doff = 0;
925 		aiter->iter_page_dsize = 0;
926 		return;
927 	}
928 
929 	struct page *page;
930 	size_t doff, dsize;
931 
932 	/*
933 	 * Find the page, and the start of the data within it. This is computed
934 	 * differently for linear and scatter ABDs; linear is referenced by
935 	 * virtual memory location, while scatter is referenced by page
936 	 * pointer.
937 	 */
938 	if (abd_is_linear(aiter->iter_abd)) {
939 		ASSERT3U(aiter->iter_pos, ==, aiter->iter_offset);
940 
941 		/* memory address at iter_pos */
942 		void *paddr = ABD_LINEAR_BUF(aiter->iter_abd) + aiter->iter_pos;
943 
944 		/* struct page for address */
945 		page = is_vmalloc_addr(paddr) ?
946 		    vmalloc_to_page(paddr) : virt_to_page(paddr);
947 
948 		/* offset of address within the page */
949 		doff = offset_in_page(paddr);
950 	} else {
951 		ASSERT(!abd_is_gang(aiter->iter_abd));
952 
953 		/* current scatter page */
954 		page = nth_page(sg_page(aiter->iter_sg),
955 		    aiter->iter_offset >> PAGE_SHIFT);
956 
957 		/* position within page */
958 		doff = aiter->iter_offset & (PAGESIZE - 1);
959 	}
960 
961 #ifdef ABD_ITER_COMPOUND_PAGES
962 	if (PageTail(page)) {
963 		/*
964 		 * If this is a compound tail page, move back to the head, and
965 		 * adjust the offset to match. This may let us yield a much
966 		 * larger amount of data from a single logical page, and so
967 		 * leave our caller with fewer pages to process.
968 		 */
969 		struct page *head = compound_head(page);
970 		doff += ((page - head) * PAGESIZE);
971 		page = head;
972 	}
973 #endif
974 
975 	ASSERT(page);
976 
977 	/*
978 	 * Compute the maximum amount of data we can take from this page. This
979 	 * is the smaller of:
980 	 * - the remaining space in the page
981 	 * - the remaining space in this scatterlist entry (which may not cover
982 	 *   the entire page)
983 	 * - the remaining space in the abd (which may not cover the entire
984 	 *   scatterlist entry)
985 	 */
986 	dsize = MIN(ABD_ITER_PAGE_SIZE(page) - doff,
987 	    aiter->iter_abd->abd_size - aiter->iter_pos);
988 	if (!abd_is_linear(aiter->iter_abd))
989 		dsize = MIN(dsize, aiter->iter_sg->length - aiter->iter_offset);
990 	ASSERT3U(dsize, >, 0);
991 
992 	/* final iterator outputs */
993 	aiter->iter_page = page;
994 	aiter->iter_page_doff = doff;
995 	aiter->iter_page_dsize = dsize;
996 }
997 
998 /*
999  * Note: ABD BIO functions only needed to support vdev_classic. See comments in
1000  * vdev_disk.c.
1001  */
1002 
1003 /*
1004  * bio_nr_pages for ABD.
1005  * @off is the offset in @abd
1006  */
1007 unsigned long
1008 abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off)
1009 {
1010 	unsigned long pos;
1011 
1012 	if (abd_is_gang(abd)) {
1013 		unsigned long count = 0;
1014 
1015 		for (abd_t *cabd = abd_gang_get_offset(abd, &off);
1016 		    cabd != NULL && size != 0;
1017 		    cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
1018 			ASSERT3U(off, <, cabd->abd_size);
1019 			int mysize = MIN(size, cabd->abd_size - off);
1020 			count += abd_nr_pages_off(cabd, mysize, off);
1021 			size -= mysize;
1022 			off = 0;
1023 		}
1024 		return (count);
1025 	}
1026 
1027 	if (abd_is_linear(abd))
1028 		pos = (unsigned long)abd_to_buf(abd) + off;
1029 	else
1030 		pos = ABD_SCATTER(abd).abd_offset + off;
1031 
1032 	return (((pos + size + PAGESIZE - 1) >> PAGE_SHIFT) -
1033 	    (pos >> PAGE_SHIFT));
1034 }
1035 
1036 static unsigned int
1037 bio_map(struct bio *bio, void *buf_ptr, unsigned int bio_size)
1038 {
1039 	unsigned int offset, size, i;
1040 	struct page *page;
1041 
1042 	offset = offset_in_page(buf_ptr);
1043 	for (i = 0; i < bio->bi_max_vecs; i++) {
1044 		size = PAGE_SIZE - offset;
1045 
1046 		if (bio_size <= 0)
1047 			break;
1048 
1049 		if (size > bio_size)
1050 			size = bio_size;
1051 
1052 		if (is_vmalloc_addr(buf_ptr))
1053 			page = vmalloc_to_page(buf_ptr);
1054 		else
1055 			page = virt_to_page(buf_ptr);
1056 
1057 		/*
1058 		 * Some network related block device uses tcp_sendpage, which
1059 		 * doesn't behave well when using 0-count page, this is a
1060 		 * safety net to catch them.
1061 		 */
1062 		ASSERT3S(page_count(page), >, 0);
1063 
1064 		if (bio_add_page(bio, page, size, offset) != size)
1065 			break;
1066 
1067 		buf_ptr += size;
1068 		bio_size -= size;
1069 		offset = 0;
1070 	}
1071 
1072 	return (bio_size);
1073 }
1074 
1075 /*
1076  * bio_map for gang ABD.
1077  */
1078 static unsigned int
1079 abd_gang_bio_map_off(struct bio *bio, abd_t *abd,
1080     unsigned int io_size, size_t off)
1081 {
1082 	ASSERT(abd_is_gang(abd));
1083 
1084 	for (abd_t *cabd = abd_gang_get_offset(abd, &off);
1085 	    cabd != NULL;
1086 	    cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
1087 		ASSERT3U(off, <, cabd->abd_size);
1088 		int size = MIN(io_size, cabd->abd_size - off);
1089 		int remainder = abd_bio_map_off(bio, cabd, size, off);
1090 		io_size -= (size - remainder);
1091 		if (io_size == 0 || remainder > 0)
1092 			return (io_size);
1093 		off = 0;
1094 	}
1095 	ASSERT0(io_size);
1096 	return (io_size);
1097 }
1098 
1099 /*
1100  * bio_map for ABD.
1101  * @off is the offset in @abd
1102  * Remaining IO size is returned
1103  */
1104 unsigned int
1105 abd_bio_map_off(struct bio *bio, abd_t *abd,
1106     unsigned int io_size, size_t off)
1107 {
1108 	struct abd_iter aiter;
1109 
1110 	ASSERT3U(io_size, <=, abd->abd_size - off);
1111 	if (abd_is_linear(abd))
1112 		return (bio_map(bio, ((char *)abd_to_buf(abd)) + off, io_size));
1113 
1114 	ASSERT(!abd_is_linear(abd));
1115 	if (abd_is_gang(abd))
1116 		return (abd_gang_bio_map_off(bio, abd, io_size, off));
1117 
1118 	abd_iter_init(&aiter, abd);
1119 	abd_iter_advance(&aiter, off);
1120 
1121 	for (int i = 0; i < bio->bi_max_vecs; i++) {
1122 		struct page *pg;
1123 		size_t len, sgoff, pgoff;
1124 		struct scatterlist *sg;
1125 
1126 		if (io_size <= 0)
1127 			break;
1128 
1129 		sg = aiter.iter_sg;
1130 		sgoff = aiter.iter_offset;
1131 		pgoff = sgoff & (PAGESIZE - 1);
1132 		len = MIN(io_size, PAGESIZE - pgoff);
1133 		ASSERT(len > 0);
1134 
1135 		pg = nth_page(sg_page(sg), sgoff >> PAGE_SHIFT);
1136 		if (bio_add_page(bio, pg, len, pgoff) != len)
1137 			break;
1138 
1139 		io_size -= len;
1140 		abd_iter_advance(&aiter, len);
1141 	}
1142 
1143 	return (io_size);
1144 }
1145 
1146 /* Tunable Parameters */
1147 module_param(zfs_abd_scatter_enabled, int, 0644);
1148 MODULE_PARM_DESC(zfs_abd_scatter_enabled,
1149 	"Toggle whether ABD allocations must be linear.");
1150 module_param(zfs_abd_scatter_min_size, int, 0644);
1151 MODULE_PARM_DESC(zfs_abd_scatter_min_size,
1152 	"Minimum size of scatter allocations.");
1153 /* CSTYLED */
1154 module_param(zfs_abd_scatter_max_order, uint, 0644);
1155 MODULE_PARM_DESC(zfs_abd_scatter_max_order,
1156 	"Maximum order allocation used for a scatter ABD.");
1157