xref: /freebsd/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2014 by Chunwei Chen. All rights reserved.
23  * Copyright (c) 2019 by Delphix. All rights reserved.
24  */
25 
26 /*
27  * See abd.c for a general overview of the arc buffered data (ABD).
28  *
29  * Linear buffers act exactly like normal buffers and are always mapped into the
30  * kernel's virtual memory space, while scattered ABD data chunks are allocated
31  * as physical pages and then mapped in only while they are actually being
32  * accessed through one of the abd_* library functions. Using scattered ABDs
33  * provides several benefits:
34  *
35  *  (1) They avoid use of kmem_*, preventing performance problems where running
36  *      kmem_reap on very large memory systems never finishes and causes
37  *      constant TLB shootdowns.
38  *
39  *  (2) Fragmentation is less of an issue since when we are at the limit of
40  *      allocatable space, we won't have to search around for a long free
41  *      hole in the VA space for large ARC allocations. Each chunk is mapped in
42  *      individually, so even if we are using HIGHMEM (see next point) we
43  *      wouldn't need to worry about finding a contiguous address range.
44  *
45  *  (3) If we are not using HIGHMEM, then all physical memory is always
46  *      mapped into the kernel's address space, so we also avoid the map /
47  *      unmap costs on each ABD access.
48  *
49  * If we are not using HIGHMEM, scattered buffers which have only one chunk
50  * can be treated as linear buffers, because they are contiguous in the
51  * kernel's virtual address space.  See abd_alloc_chunks() for details.
52  */
53 
54 #include <sys/abd_impl.h>
55 #include <sys/param.h>
56 #include <sys/zio.h>
57 #include <sys/arc.h>
58 #include <sys/zfs_context.h>
59 #include <sys/zfs_znode.h>
60 #ifdef _KERNEL
61 #include <linux/kmap_compat.h>
62 #include <linux/scatterlist.h>
63 #else
64 #define	MAX_ORDER	1
65 #endif
66 
67 typedef struct abd_stats {
68 	kstat_named_t abdstat_struct_size;
69 	kstat_named_t abdstat_linear_cnt;
70 	kstat_named_t abdstat_linear_data_size;
71 	kstat_named_t abdstat_scatter_cnt;
72 	kstat_named_t abdstat_scatter_data_size;
73 	kstat_named_t abdstat_scatter_chunk_waste;
74 	kstat_named_t abdstat_scatter_orders[MAX_ORDER];
75 	kstat_named_t abdstat_scatter_page_multi_chunk;
76 	kstat_named_t abdstat_scatter_page_multi_zone;
77 	kstat_named_t abdstat_scatter_page_alloc_retry;
78 	kstat_named_t abdstat_scatter_sg_table_retry;
79 } abd_stats_t;
80 
81 static abd_stats_t abd_stats = {
82 	/* Amount of memory occupied by all of the abd_t struct allocations */
83 	{ "struct_size",			KSTAT_DATA_UINT64 },
84 	/*
85 	 * The number of linear ABDs which are currently allocated, excluding
86 	 * ABDs which don't own their data (for instance the ones which were
87 	 * allocated through abd_get_offset() and abd_get_from_buf()). If an
88 	 * ABD takes ownership of its buf then it will become tracked.
89 	 */
90 	{ "linear_cnt",				KSTAT_DATA_UINT64 },
91 	/* Amount of data stored in all linear ABDs tracked by linear_cnt */
92 	{ "linear_data_size",			KSTAT_DATA_UINT64 },
93 	/*
94 	 * The number of scatter ABDs which are currently allocated, excluding
95 	 * ABDs which don't own their data (for instance the ones which were
96 	 * allocated through abd_get_offset()).
97 	 */
98 	{ "scatter_cnt",			KSTAT_DATA_UINT64 },
99 	/* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
100 	{ "scatter_data_size",			KSTAT_DATA_UINT64 },
101 	/*
102 	 * The amount of space wasted at the end of the last chunk across all
103 	 * scatter ABDs tracked by scatter_cnt.
104 	 */
105 	{ "scatter_chunk_waste",		KSTAT_DATA_UINT64 },
106 	/*
107 	 * The number of compound allocations of a given order.  These
108 	 * allocations are spread over all currently allocated ABDs, and
109 	 * act as a measure of memory fragmentation.
110 	 */
111 	{ { "scatter_order_N",			KSTAT_DATA_UINT64 } },
112 	/*
113 	 * The number of scatter ABDs which contain multiple chunks.
114 	 * ABDs are preferentially allocated from the minimum number of
115 	 * contiguous multi-page chunks, a single chunk is optimal.
116 	 */
117 	{ "scatter_page_multi_chunk",		KSTAT_DATA_UINT64 },
118 	/*
119 	 * The number of scatter ABDs which are split across memory zones.
120 	 * ABDs are preferentially allocated using pages from a single zone.
121 	 */
122 	{ "scatter_page_multi_zone",		KSTAT_DATA_UINT64 },
123 	/*
124 	 *  The total number of retries encountered when attempting to
125 	 *  allocate the pages to populate the scatter ABD.
126 	 */
127 	{ "scatter_page_alloc_retry",		KSTAT_DATA_UINT64 },
128 	/*
129 	 *  The total number of retries encountered when attempting to
130 	 *  allocate the sg table for an ABD.
131 	 */
132 	{ "scatter_sg_table_retry",		KSTAT_DATA_UINT64 },
133 };
134 
135 #define	abd_for_each_sg(abd, sg, n, i)	\
136 	for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i)
137 
138 unsigned zfs_abd_scatter_max_order = MAX_ORDER - 1;
139 
140 /*
141  * zfs_abd_scatter_min_size is the minimum allocation size to use scatter
142  * ABD's.  Smaller allocations will use linear ABD's which uses
143  * zio_[data_]buf_alloc().
144  *
145  * Scatter ABD's use at least one page each, so sub-page allocations waste
146  * some space when allocated as scatter (e.g. 2KB scatter allocation wastes
147  * half of each page).  Using linear ABD's for small allocations means that
148  * they will be put on slabs which contain many allocations.  This can
149  * improve memory efficiency, but it also makes it much harder for ARC
150  * evictions to actually free pages, because all the buffers on one slab need
151  * to be freed in order for the slab (and underlying pages) to be freed.
152  * Typically, 512B and 1KB kmem caches have 16 buffers per slab, so it's
153  * possible for them to actually waste more memory than scatter (one page per
154  * buf = wasting 3/4 or 7/8th; one buf per slab = wasting 15/16th).
155  *
156  * Spill blocks are typically 512B and are heavily used on systems running
157  * selinux with the default dnode size and the `xattr=sa` property set.
158  *
159  * By default we use linear allocations for 512B and 1KB, and scatter
160  * allocations for larger (1.5KB and up).
161  */
162 int zfs_abd_scatter_min_size = 512 * 3;
163 
164 /*
165  * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose pages are
166  * just a single zero'd page. This allows us to conserve memory by
167  * only using a single zero page for the scatterlist.
168  */
169 abd_t *abd_zero_scatter = NULL;
170 
171 struct page;
172 /*
173  * abd_zero_page we will be an allocated zero'd PAGESIZE buffer, which is
174  * assigned to set each of the pages of abd_zero_scatter.
175  */
176 static struct page *abd_zero_page = NULL;
177 
178 static kmem_cache_t *abd_cache = NULL;
179 static kstat_t *abd_ksp;
180 
181 static uint_t
182 abd_chunkcnt_for_bytes(size_t size)
183 {
184 	return (P2ROUNDUP(size, PAGESIZE) / PAGESIZE);
185 }
186 
187 abd_t *
188 abd_alloc_struct(size_t size)
189 {
190 	/*
191 	 * In Linux we do not use the size passed in during ABD
192 	 * allocation, so we just ignore it.
193 	 */
194 	abd_t *abd = kmem_cache_alloc(abd_cache, KM_PUSHPAGE);
195 	ASSERT3P(abd, !=, NULL);
196 	list_link_init(&abd->abd_gang_link);
197 	mutex_init(&abd->abd_mtx, NULL, MUTEX_DEFAULT, NULL);
198 	ABDSTAT_INCR(abdstat_struct_size, sizeof (abd_t));
199 
200 	return (abd);
201 }
202 
203 void
204 abd_free_struct(abd_t *abd)
205 {
206 	mutex_destroy(&abd->abd_mtx);
207 	ASSERT(!list_link_active(&abd->abd_gang_link));
208 	kmem_cache_free(abd_cache, abd);
209 	ABDSTAT_INCR(abdstat_struct_size, -(int)sizeof (abd_t));
210 }
211 
212 #ifdef _KERNEL
213 /*
214  * Mark zfs data pages so they can be excluded from kernel crash dumps
215  */
216 #ifdef _LP64
217 #define	ABD_FILE_CACHE_PAGE	0x2F5ABDF11ECAC4E
218 
219 static inline void
220 abd_mark_zfs_page(struct page *page)
221 {
222 	get_page(page);
223 	SetPagePrivate(page);
224 	set_page_private(page, ABD_FILE_CACHE_PAGE);
225 }
226 
227 static inline void
228 abd_unmark_zfs_page(struct page *page)
229 {
230 	set_page_private(page, 0UL);
231 	ClearPagePrivate(page);
232 	put_page(page);
233 }
234 #else
235 #define	abd_mark_zfs_page(page)
236 #define	abd_unmark_zfs_page(page)
237 #endif /* _LP64 */
238 
239 #ifndef CONFIG_HIGHMEM
240 
241 #ifndef __GFP_RECLAIM
242 #define	__GFP_RECLAIM		__GFP_WAIT
243 #endif
244 
245 /*
246  * The goal is to minimize fragmentation by preferentially populating ABDs
247  * with higher order compound pages from a single zone.  Allocation size is
248  * progressively decreased until it can be satisfied without performing
249  * reclaim or compaction.  When necessary this function will degenerate to
250  * allocating individual pages and allowing reclaim to satisfy allocations.
251  */
252 void
253 abd_alloc_chunks(abd_t *abd, size_t size)
254 {
255 	struct list_head pages;
256 	struct sg_table table;
257 	struct scatterlist *sg;
258 	struct page *page, *tmp_page = NULL;
259 	gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
260 	gfp_t gfp_comp = (gfp | __GFP_NORETRY | __GFP_COMP) & ~__GFP_RECLAIM;
261 	int max_order = MIN(zfs_abd_scatter_max_order, MAX_ORDER - 1);
262 	int nr_pages = abd_chunkcnt_for_bytes(size);
263 	int chunks = 0, zones = 0;
264 	size_t remaining_size;
265 	int nid = NUMA_NO_NODE;
266 	int alloc_pages = 0;
267 
268 	INIT_LIST_HEAD(&pages);
269 
270 	while (alloc_pages < nr_pages) {
271 		unsigned chunk_pages;
272 		int order;
273 
274 		order = MIN(highbit64(nr_pages - alloc_pages) - 1, max_order);
275 		chunk_pages = (1U << order);
276 
277 		page = alloc_pages_node(nid, order ? gfp_comp : gfp, order);
278 		if (page == NULL) {
279 			if (order == 0) {
280 				ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
281 				schedule_timeout_interruptible(1);
282 			} else {
283 				max_order = MAX(0, order - 1);
284 			}
285 			continue;
286 		}
287 
288 		list_add_tail(&page->lru, &pages);
289 
290 		if ((nid != NUMA_NO_NODE) && (page_to_nid(page) != nid))
291 			zones++;
292 
293 		nid = page_to_nid(page);
294 		ABDSTAT_BUMP(abdstat_scatter_orders[order]);
295 		chunks++;
296 		alloc_pages += chunk_pages;
297 	}
298 
299 	ASSERT3S(alloc_pages, ==, nr_pages);
300 
301 	while (sg_alloc_table(&table, chunks, gfp)) {
302 		ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
303 		schedule_timeout_interruptible(1);
304 	}
305 
306 	sg = table.sgl;
307 	remaining_size = size;
308 	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
309 		size_t sg_size = MIN(PAGESIZE << compound_order(page),
310 		    remaining_size);
311 		sg_set_page(sg, page, sg_size, 0);
312 		abd_mark_zfs_page(page);
313 		remaining_size -= sg_size;
314 
315 		sg = sg_next(sg);
316 		list_del(&page->lru);
317 	}
318 
319 	/*
320 	 * These conditions ensure that a possible transformation to a linear
321 	 * ABD would be valid.
322 	 */
323 	ASSERT(!PageHighMem(sg_page(table.sgl)));
324 	ASSERT0(ABD_SCATTER(abd).abd_offset);
325 
326 	if (table.nents == 1) {
327 		/*
328 		 * Since there is only one entry, this ABD can be represented
329 		 * as a linear buffer.  All single-page (4K) ABD's can be
330 		 * represented this way.  Some multi-page ABD's can also be
331 		 * represented this way, if we were able to allocate a single
332 		 * "chunk" (higher-order "page" which represents a power-of-2
333 		 * series of physically-contiguous pages).  This is often the
334 		 * case for 2-page (8K) ABD's.
335 		 *
336 		 * Representing a single-entry scatter ABD as a linear ABD
337 		 * has the performance advantage of avoiding the copy (and
338 		 * allocation) in abd_borrow_buf_copy / abd_return_buf_copy.
339 		 * A performance increase of around 5% has been observed for
340 		 * ARC-cached reads (of small blocks which can take advantage
341 		 * of this).
342 		 *
343 		 * Note that this optimization is only possible because the
344 		 * pages are always mapped into the kernel's address space.
345 		 * This is not the case for highmem pages, so the
346 		 * optimization can not be made there.
347 		 */
348 		abd->abd_flags |= ABD_FLAG_LINEAR;
349 		abd->abd_flags |= ABD_FLAG_LINEAR_PAGE;
350 		abd->abd_u.abd_linear.abd_sgl = table.sgl;
351 		ABD_LINEAR_BUF(abd) = page_address(sg_page(table.sgl));
352 	} else if (table.nents > 1) {
353 		ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
354 		abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
355 
356 		if (zones) {
357 			ABDSTAT_BUMP(abdstat_scatter_page_multi_zone);
358 			abd->abd_flags |= ABD_FLAG_MULTI_ZONE;
359 		}
360 
361 		ABD_SCATTER(abd).abd_sgl = table.sgl;
362 		ABD_SCATTER(abd).abd_nents = table.nents;
363 	}
364 }
365 #else
366 
367 /*
368  * Allocate N individual pages to construct a scatter ABD.  This function
369  * makes no attempt to request contiguous pages and requires the minimal
370  * number of kernel interfaces.  It's designed for maximum compatibility.
371  */
372 void
373 abd_alloc_chunks(abd_t *abd, size_t size)
374 {
375 	struct scatterlist *sg = NULL;
376 	struct sg_table table;
377 	struct page *page;
378 	gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
379 	int nr_pages = abd_chunkcnt_for_bytes(size);
380 	int i = 0;
381 
382 	while (sg_alloc_table(&table, nr_pages, gfp)) {
383 		ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
384 		schedule_timeout_interruptible(1);
385 	}
386 
387 	ASSERT3U(table.nents, ==, nr_pages);
388 	ABD_SCATTER(abd).abd_sgl = table.sgl;
389 	ABD_SCATTER(abd).abd_nents = nr_pages;
390 
391 	abd_for_each_sg(abd, sg, nr_pages, i) {
392 		while ((page = __page_cache_alloc(gfp)) == NULL) {
393 			ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
394 			schedule_timeout_interruptible(1);
395 		}
396 
397 		ABDSTAT_BUMP(abdstat_scatter_orders[0]);
398 		sg_set_page(sg, page, PAGESIZE, 0);
399 		abd_mark_zfs_page(page);
400 	}
401 
402 	if (nr_pages > 1) {
403 		ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
404 		abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
405 	}
406 }
407 #endif /* !CONFIG_HIGHMEM */
408 
409 /*
410  * This must be called if any of the sg_table allocation functions
411  * are called.
412  */
413 static void
414 abd_free_sg_table(abd_t *abd)
415 {
416 	struct sg_table table;
417 
418 	table.sgl = ABD_SCATTER(abd).abd_sgl;
419 	table.nents = table.orig_nents = ABD_SCATTER(abd).abd_nents;
420 	sg_free_table(&table);
421 }
422 
423 void
424 abd_free_chunks(abd_t *abd)
425 {
426 	struct scatterlist *sg = NULL;
427 	struct page *page;
428 	int nr_pages = ABD_SCATTER(abd).abd_nents;
429 	int order, i = 0;
430 
431 	if (abd->abd_flags & ABD_FLAG_MULTI_ZONE)
432 		ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_zone);
433 
434 	if (abd->abd_flags & ABD_FLAG_MULTI_CHUNK)
435 		ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk);
436 
437 	abd_for_each_sg(abd, sg, nr_pages, i) {
438 		page = sg_page(sg);
439 		abd_unmark_zfs_page(page);
440 		order = compound_order(page);
441 		__free_pages(page, order);
442 		ASSERT3U(sg->length, <=, PAGE_SIZE << order);
443 		ABDSTAT_BUMPDOWN(abdstat_scatter_orders[order]);
444 	}
445 	abd_free_sg_table(abd);
446 }
447 
448 /*
449  * Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where each page in
450  * the scatterlist will be set to the zero'd out buffer abd_zero_page.
451  */
452 static void
453 abd_alloc_zero_scatter(void)
454 {
455 	struct scatterlist *sg = NULL;
456 	struct sg_table table;
457 	gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
458 	gfp_t gfp_zero_page = gfp | __GFP_ZERO;
459 	int nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
460 	int i = 0;
461 
462 	while ((abd_zero_page = __page_cache_alloc(gfp_zero_page)) == NULL) {
463 		ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
464 		schedule_timeout_interruptible(1);
465 	}
466 	abd_mark_zfs_page(abd_zero_page);
467 
468 	while (sg_alloc_table(&table, nr_pages, gfp)) {
469 		ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
470 		schedule_timeout_interruptible(1);
471 	}
472 	ASSERT3U(table.nents, ==, nr_pages);
473 
474 	abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
475 	abd_zero_scatter->abd_flags = ABD_FLAG_OWNER;
476 	ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
477 	ABD_SCATTER(abd_zero_scatter).abd_sgl = table.sgl;
478 	ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages;
479 	abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
480 	abd_zero_scatter->abd_parent = NULL;
481 	abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK | ABD_FLAG_ZEROS;
482 	zfs_refcount_create(&abd_zero_scatter->abd_children);
483 
484 	abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) {
485 		sg_set_page(sg, abd_zero_page, PAGESIZE, 0);
486 	}
487 
488 	ABDSTAT_BUMP(abdstat_scatter_cnt);
489 	ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE);
490 	ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
491 }
492 
493 #else /* _KERNEL */
494 
495 #ifndef PAGE_SHIFT
496 #define	PAGE_SHIFT (highbit64(PAGESIZE)-1)
497 #endif
498 
499 #define	zfs_kmap_atomic(chunk, km)	((void *)chunk)
500 #define	zfs_kunmap_atomic(addr, km)	do { (void)(addr); } while (0)
501 #define	local_irq_save(flags)		do { (void)(flags); } while (0)
502 #define	local_irq_restore(flags)	do { (void)(flags); } while (0)
503 #define	nth_page(pg, i) \
504 	((struct page *)((void *)(pg) + (i) * PAGESIZE))
505 
506 struct scatterlist {
507 	struct page *page;
508 	int length;
509 	int end;
510 };
511 
512 static void
513 sg_init_table(struct scatterlist *sg, int nr)
514 {
515 	memset(sg, 0, nr * sizeof (struct scatterlist));
516 	sg[nr - 1].end = 1;
517 }
518 
519 /*
520  * This must be called if any of the sg_table allocation functions
521  * are called.
522  */
523 static void
524 abd_free_sg_table(abd_t *abd)
525 {
526 	int nents = ABD_SCATTER(abd).abd_nents;
527 	vmem_free(ABD_SCATTER(abd).abd_sgl,
528 	    nents * sizeof (struct scatterlist));
529 }
530 
531 #define	for_each_sg(sgl, sg, nr, i)	\
532 	for ((i) = 0, (sg) = (sgl); (i) < (nr); (i)++, (sg) = sg_next(sg))
533 
534 static inline void
535 sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
536     unsigned int offset)
537 {
538 	/* currently we don't use offset */
539 	ASSERT(offset == 0);
540 	sg->page = page;
541 	sg->length = len;
542 }
543 
544 static inline struct page *
545 sg_page(struct scatterlist *sg)
546 {
547 	return (sg->page);
548 }
549 
550 static inline struct scatterlist *
551 sg_next(struct scatterlist *sg)
552 {
553 	if (sg->end)
554 		return (NULL);
555 
556 	return (sg + 1);
557 }
558 
559 void
560 abd_alloc_chunks(abd_t *abd, size_t size)
561 {
562 	unsigned nr_pages = abd_chunkcnt_for_bytes(size);
563 	struct scatterlist *sg;
564 	int i;
565 
566 	ABD_SCATTER(abd).abd_sgl = vmem_alloc(nr_pages *
567 	    sizeof (struct scatterlist), KM_SLEEP);
568 	sg_init_table(ABD_SCATTER(abd).abd_sgl, nr_pages);
569 
570 	abd_for_each_sg(abd, sg, nr_pages, i) {
571 		struct page *p = umem_alloc_aligned(PAGESIZE, 64, KM_SLEEP);
572 		sg_set_page(sg, p, PAGESIZE, 0);
573 	}
574 	ABD_SCATTER(abd).abd_nents = nr_pages;
575 }
576 
577 void
578 abd_free_chunks(abd_t *abd)
579 {
580 	int i, n = ABD_SCATTER(abd).abd_nents;
581 	struct scatterlist *sg;
582 
583 	abd_for_each_sg(abd, sg, n, i) {
584 		for (int j = 0; j < sg->length; j += PAGESIZE) {
585 			struct page *p = nth_page(sg_page(sg), j >> PAGE_SHIFT);
586 			umem_free(p, PAGESIZE);
587 		}
588 	}
589 	abd_free_sg_table(abd);
590 }
591 
592 static void
593 abd_alloc_zero_scatter(void)
594 {
595 	unsigned nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
596 	struct scatterlist *sg;
597 	int i;
598 
599 	abd_zero_page = umem_alloc_aligned(PAGESIZE, 64, KM_SLEEP);
600 	memset(abd_zero_page, 0, PAGESIZE);
601 	abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
602 	abd_zero_scatter->abd_flags = ABD_FLAG_OWNER;
603 	abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK | ABD_FLAG_ZEROS;
604 	ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
605 	ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages;
606 	abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
607 	abd_zero_scatter->abd_parent = NULL;
608 	zfs_refcount_create(&abd_zero_scatter->abd_children);
609 	ABD_SCATTER(abd_zero_scatter).abd_sgl = vmem_alloc(nr_pages *
610 	    sizeof (struct scatterlist), KM_SLEEP);
611 
612 	sg_init_table(ABD_SCATTER(abd_zero_scatter).abd_sgl, nr_pages);
613 
614 	abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) {
615 		sg_set_page(sg, abd_zero_page, PAGESIZE, 0);
616 	}
617 
618 	ABDSTAT_BUMP(abdstat_scatter_cnt);
619 	ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE);
620 	ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
621 }
622 
623 #endif /* _KERNEL */
624 
625 boolean_t
626 abd_size_alloc_linear(size_t size)
627 {
628 	return (size < zfs_abd_scatter_min_size ? B_TRUE : B_FALSE);
629 }
630 
631 void
632 abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op)
633 {
634 	ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
635 	int waste = P2ROUNDUP(abd->abd_size, PAGESIZE) - abd->abd_size;
636 	if (op == ABDSTAT_INCR) {
637 		ABDSTAT_BUMP(abdstat_scatter_cnt);
638 		ABDSTAT_INCR(abdstat_scatter_data_size, abd->abd_size);
639 		ABDSTAT_INCR(abdstat_scatter_chunk_waste, waste);
640 		arc_space_consume(waste, ARC_SPACE_ABD_CHUNK_WASTE);
641 	} else {
642 		ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
643 		ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
644 		ABDSTAT_INCR(abdstat_scatter_chunk_waste, -waste);
645 		arc_space_return(waste, ARC_SPACE_ABD_CHUNK_WASTE);
646 	}
647 }
648 
649 void
650 abd_update_linear_stats(abd_t *abd, abd_stats_op_t op)
651 {
652 	ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
653 	if (op == ABDSTAT_INCR) {
654 		ABDSTAT_BUMP(abdstat_linear_cnt);
655 		ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size);
656 	} else {
657 		ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
658 		ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
659 	}
660 }
661 
662 void
663 abd_verify_scatter(abd_t *abd)
664 {
665 	size_t n;
666 	int i = 0;
667 	struct scatterlist *sg = NULL;
668 
669 	ASSERT3U(ABD_SCATTER(abd).abd_nents, >, 0);
670 	ASSERT3U(ABD_SCATTER(abd).abd_offset, <,
671 	    ABD_SCATTER(abd).abd_sgl->length);
672 	n = ABD_SCATTER(abd).abd_nents;
673 	abd_for_each_sg(abd, sg, n, i) {
674 		ASSERT3P(sg_page(sg), !=, NULL);
675 	}
676 }
677 
678 static void
679 abd_free_zero_scatter(void)
680 {
681 	zfs_refcount_destroy(&abd_zero_scatter->abd_children);
682 	ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
683 	ABDSTAT_INCR(abdstat_scatter_data_size, -(int)PAGESIZE);
684 	ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk);
685 
686 	abd_free_sg_table(abd_zero_scatter);
687 	abd_free_struct(abd_zero_scatter);
688 	abd_zero_scatter = NULL;
689 	ASSERT3P(abd_zero_page, !=, NULL);
690 #if defined(_KERNEL)
691 	abd_unmark_zfs_page(abd_zero_page);
692 	__free_page(abd_zero_page);
693 #else
694 	umem_free(abd_zero_page, PAGESIZE);
695 #endif /* _KERNEL */
696 }
697 
698 void
699 abd_init(void)
700 {
701 	int i;
702 
703 	abd_cache = kmem_cache_create("abd_t", sizeof (abd_t),
704 	    0, NULL, NULL, NULL, NULL, NULL, 0);
705 
706 	abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
707 	    sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
708 	if (abd_ksp != NULL) {
709 		for (i = 0; i < MAX_ORDER; i++) {
710 			snprintf(abd_stats.abdstat_scatter_orders[i].name,
711 			    KSTAT_STRLEN, "scatter_order_%d", i);
712 			abd_stats.abdstat_scatter_orders[i].data_type =
713 			    KSTAT_DATA_UINT64;
714 		}
715 		abd_ksp->ks_data = &abd_stats;
716 		kstat_install(abd_ksp);
717 	}
718 
719 	abd_alloc_zero_scatter();
720 }
721 
722 void
723 abd_fini(void)
724 {
725 	abd_free_zero_scatter();
726 
727 	if (abd_ksp != NULL) {
728 		kstat_delete(abd_ksp);
729 		abd_ksp = NULL;
730 	}
731 
732 	if (abd_cache) {
733 		kmem_cache_destroy(abd_cache);
734 		abd_cache = NULL;
735 	}
736 }
737 
738 void
739 abd_free_linear_page(abd_t *abd)
740 {
741 	/* Transform it back into a scatter ABD for freeing */
742 	struct scatterlist *sg = abd->abd_u.abd_linear.abd_sgl;
743 	abd->abd_flags &= ~ABD_FLAG_LINEAR;
744 	abd->abd_flags &= ~ABD_FLAG_LINEAR_PAGE;
745 	ABD_SCATTER(abd).abd_nents = 1;
746 	ABD_SCATTER(abd).abd_offset = 0;
747 	ABD_SCATTER(abd).abd_sgl = sg;
748 	abd_free_chunks(abd);
749 
750 	zfs_refcount_destroy(&abd->abd_children);
751 	abd_update_scatter_stats(abd, ABDSTAT_DECR);
752 	abd_free_struct(abd);
753 }
754 
755 /*
756  * If we're going to use this ABD for doing I/O using the block layer, the
757  * consumer of the ABD data doesn't care if it's scattered or not, and we don't
758  * plan to store this ABD in memory for a long period of time, we should
759  * allocate the ABD type that requires the least data copying to do the I/O.
760  *
761  * On Linux the optimal thing to do would be to use abd_get_offset() and
762  * construct a new ABD which shares the original pages thereby eliminating
763  * the copy.  But for the moment a new linear ABD is allocated until this
764  * performance optimization can be implemented.
765  */
766 abd_t *
767 abd_alloc_for_io(size_t size, boolean_t is_metadata)
768 {
769 	return (abd_alloc(size, is_metadata));
770 }
771 
772 abd_t *
773 abd_get_offset_scatter(abd_t *sabd, size_t off)
774 {
775 	abd_t *abd = NULL;
776 	int i = 0;
777 	struct scatterlist *sg = NULL;
778 
779 	abd_verify(sabd);
780 	ASSERT3U(off, <=, sabd->abd_size);
781 
782 	size_t new_offset = ABD_SCATTER(sabd).abd_offset + off;
783 
784 	abd = abd_alloc_struct(0);
785 
786 	/*
787 	 * Even if this buf is filesystem metadata, we only track that
788 	 * if we own the underlying data buffer, which is not true in
789 	 * this case. Therefore, we don't ever use ABD_FLAG_META here.
790 	 */
791 	abd->abd_flags = 0;
792 
793 	abd_for_each_sg(sabd, sg, ABD_SCATTER(sabd).abd_nents, i) {
794 		if (new_offset < sg->length)
795 			break;
796 		new_offset -= sg->length;
797 	}
798 
799 	ABD_SCATTER(abd).abd_sgl = sg;
800 	ABD_SCATTER(abd).abd_offset = new_offset;
801 	ABD_SCATTER(abd).abd_nents = ABD_SCATTER(sabd).abd_nents - i;
802 
803 	return (abd);
804 }
805 
806 /*
807  * Initialize the abd_iter.
808  */
809 void
810 abd_iter_init(struct abd_iter *aiter, abd_t *abd)
811 {
812 	ASSERT(!abd_is_gang(abd));
813 	abd_verify(abd);
814 	aiter->iter_abd = abd;
815 	aiter->iter_mapaddr = NULL;
816 	aiter->iter_mapsize = 0;
817 	aiter->iter_pos = 0;
818 	if (abd_is_linear(abd)) {
819 		aiter->iter_offset = 0;
820 		aiter->iter_sg = NULL;
821 	} else {
822 		aiter->iter_offset = ABD_SCATTER(abd).abd_offset;
823 		aiter->iter_sg = ABD_SCATTER(abd).abd_sgl;
824 	}
825 }
826 
827 /*
828  * This is just a helper function to see if we have exhausted the
829  * abd_iter and reached the end.
830  */
831 boolean_t
832 abd_iter_at_end(struct abd_iter *aiter)
833 {
834 	return (aiter->iter_pos == aiter->iter_abd->abd_size);
835 }
836 
837 /*
838  * Advance the iterator by a certain amount. Cannot be called when a chunk is
839  * in use. This can be safely called when the aiter has already exhausted, in
840  * which case this does nothing.
841  */
842 void
843 abd_iter_advance(struct abd_iter *aiter, size_t amount)
844 {
845 	ASSERT3P(aiter->iter_mapaddr, ==, NULL);
846 	ASSERT0(aiter->iter_mapsize);
847 
848 	/* There's nothing left to advance to, so do nothing */
849 	if (abd_iter_at_end(aiter))
850 		return;
851 
852 	aiter->iter_pos += amount;
853 	aiter->iter_offset += amount;
854 	if (!abd_is_linear(aiter->iter_abd)) {
855 		while (aiter->iter_offset >= aiter->iter_sg->length) {
856 			aiter->iter_offset -= aiter->iter_sg->length;
857 			aiter->iter_sg = sg_next(aiter->iter_sg);
858 			if (aiter->iter_sg == NULL) {
859 				ASSERT0(aiter->iter_offset);
860 				break;
861 			}
862 		}
863 	}
864 }
865 
866 /*
867  * Map the current chunk into aiter. This can be safely called when the aiter
868  * has already exhausted, in which case this does nothing.
869  */
870 void
871 abd_iter_map(struct abd_iter *aiter)
872 {
873 	void *paddr;
874 	size_t offset = 0;
875 
876 	ASSERT3P(aiter->iter_mapaddr, ==, NULL);
877 	ASSERT0(aiter->iter_mapsize);
878 
879 	/* There's nothing left to iterate over, so do nothing */
880 	if (abd_iter_at_end(aiter))
881 		return;
882 
883 	if (abd_is_linear(aiter->iter_abd)) {
884 		ASSERT3U(aiter->iter_pos, ==, aiter->iter_offset);
885 		offset = aiter->iter_offset;
886 		aiter->iter_mapsize = aiter->iter_abd->abd_size - offset;
887 		paddr = ABD_LINEAR_BUF(aiter->iter_abd);
888 	} else {
889 		offset = aiter->iter_offset;
890 		aiter->iter_mapsize = MIN(aiter->iter_sg->length - offset,
891 		    aiter->iter_abd->abd_size - aiter->iter_pos);
892 
893 		paddr = zfs_kmap_atomic(sg_page(aiter->iter_sg),
894 		    km_table[aiter->iter_km]);
895 	}
896 
897 	aiter->iter_mapaddr = (char *)paddr + offset;
898 }
899 
900 /*
901  * Unmap the current chunk from aiter. This can be safely called when the aiter
902  * has already exhausted, in which case this does nothing.
903  */
904 void
905 abd_iter_unmap(struct abd_iter *aiter)
906 {
907 	/* There's nothing left to unmap, so do nothing */
908 	if (abd_iter_at_end(aiter))
909 		return;
910 
911 	if (!abd_is_linear(aiter->iter_abd)) {
912 		/* LINTED E_FUNC_SET_NOT_USED */
913 		zfs_kunmap_atomic(aiter->iter_mapaddr - aiter->iter_offset,
914 		    km_table[aiter->iter_km]);
915 	}
916 
917 	ASSERT3P(aiter->iter_mapaddr, !=, NULL);
918 	ASSERT3U(aiter->iter_mapsize, >, 0);
919 
920 	aiter->iter_mapaddr = NULL;
921 	aiter->iter_mapsize = 0;
922 }
923 
924 void
925 abd_cache_reap_now(void)
926 {
927 }
928 
929 #if defined(_KERNEL)
930 /*
931  * bio_nr_pages for ABD.
932  * @off is the offset in @abd
933  */
934 unsigned long
935 abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off)
936 {
937 	unsigned long pos;
938 
939 	while (abd_is_gang(abd))
940 		abd = abd_gang_get_offset(abd, &off);
941 
942 	ASSERT(!abd_is_gang(abd));
943 	if (abd_is_linear(abd))
944 		pos = (unsigned long)abd_to_buf(abd) + off;
945 	else
946 		pos = ABD_SCATTER(abd).abd_offset + off;
947 
948 	return ((pos + size + PAGESIZE - 1) >> PAGE_SHIFT) -
949 	    (pos >> PAGE_SHIFT);
950 }
951 
952 static unsigned int
953 bio_map(struct bio *bio, void *buf_ptr, unsigned int bio_size)
954 {
955 	unsigned int offset, size, i;
956 	struct page *page;
957 
958 	offset = offset_in_page(buf_ptr);
959 	for (i = 0; i < bio->bi_max_vecs; i++) {
960 		size = PAGE_SIZE - offset;
961 
962 		if (bio_size <= 0)
963 			break;
964 
965 		if (size > bio_size)
966 			size = bio_size;
967 
968 		if (is_vmalloc_addr(buf_ptr))
969 			page = vmalloc_to_page(buf_ptr);
970 		else
971 			page = virt_to_page(buf_ptr);
972 
973 		/*
974 		 * Some network related block device uses tcp_sendpage, which
975 		 * doesn't behave well when using 0-count page, this is a
976 		 * safety net to catch them.
977 		 */
978 		ASSERT3S(page_count(page), >, 0);
979 
980 		if (bio_add_page(bio, page, size, offset) != size)
981 			break;
982 
983 		buf_ptr += size;
984 		bio_size -= size;
985 		offset = 0;
986 	}
987 
988 	return (bio_size);
989 }
990 
991 /*
992  * bio_map for gang ABD.
993  */
994 static unsigned int
995 abd_gang_bio_map_off(struct bio *bio, abd_t *abd,
996     unsigned int io_size, size_t off)
997 {
998 	ASSERT(abd_is_gang(abd));
999 
1000 	for (abd_t *cabd = abd_gang_get_offset(abd, &off);
1001 	    cabd != NULL;
1002 	    cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
1003 		ASSERT3U(off, <, cabd->abd_size);
1004 		int size = MIN(io_size, cabd->abd_size - off);
1005 		int remainder = abd_bio_map_off(bio, cabd, size, off);
1006 		io_size -= (size - remainder);
1007 		if (io_size == 0 || remainder > 0)
1008 			return (io_size);
1009 		off = 0;
1010 	}
1011 	ASSERT0(io_size);
1012 	return (io_size);
1013 }
1014 
1015 /*
1016  * bio_map for ABD.
1017  * @off is the offset in @abd
1018  * Remaining IO size is returned
1019  */
1020 unsigned int
1021 abd_bio_map_off(struct bio *bio, abd_t *abd,
1022     unsigned int io_size, size_t off)
1023 {
1024 	int i;
1025 	struct abd_iter aiter;
1026 
1027 	ASSERT3U(io_size, <=, abd->abd_size - off);
1028 	if (abd_is_linear(abd))
1029 		return (bio_map(bio, ((char *)abd_to_buf(abd)) + off, io_size));
1030 
1031 	ASSERT(!abd_is_linear(abd));
1032 	if (abd_is_gang(abd))
1033 		return (abd_gang_bio_map_off(bio, abd, io_size, off));
1034 
1035 	abd_iter_init(&aiter, abd);
1036 	abd_iter_advance(&aiter, off);
1037 
1038 	for (i = 0; i < bio->bi_max_vecs; i++) {
1039 		struct page *pg;
1040 		size_t len, sgoff, pgoff;
1041 		struct scatterlist *sg;
1042 
1043 		if (io_size <= 0)
1044 			break;
1045 
1046 		sg = aiter.iter_sg;
1047 		sgoff = aiter.iter_offset;
1048 		pgoff = sgoff & (PAGESIZE - 1);
1049 		len = MIN(io_size, PAGESIZE - pgoff);
1050 		ASSERT(len > 0);
1051 
1052 		pg = nth_page(sg_page(sg), sgoff >> PAGE_SHIFT);
1053 		if (bio_add_page(bio, pg, len, pgoff) != len)
1054 			break;
1055 
1056 		io_size -= len;
1057 		abd_iter_advance(&aiter, len);
1058 	}
1059 
1060 	return (io_size);
1061 }
1062 
1063 /* Tunable Parameters */
1064 module_param(zfs_abd_scatter_enabled, int, 0644);
1065 MODULE_PARM_DESC(zfs_abd_scatter_enabled,
1066 	"Toggle whether ABD allocations must be linear.");
1067 module_param(zfs_abd_scatter_min_size, int, 0644);
1068 MODULE_PARM_DESC(zfs_abd_scatter_min_size,
1069 	"Minimum size of scatter allocations.");
1070 /* CSTYLED */
1071 module_param(zfs_abd_scatter_max_order, uint, 0644);
1072 MODULE_PARM_DESC(zfs_abd_scatter_max_order,
1073 	"Maximum order allocation used for a scatter ABD.");
1074 #endif
1075