xref: /freebsd/sys/contrib/openzfs/module/zfs/abd.c (revision c66ec88fed842fbaad62c30d510644ceb7bd2d71)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2014 by Chunwei Chen. All rights reserved.
23  * Copyright (c) 2019 by Delphix. All rights reserved.
24  */
25 
26 /*
27  * ARC buffer data (ABD).
28  *
29  * ABDs are an abstract data structure for the ARC which can use two
30  * different ways of storing the underlying data:
31  *
32  * (a) Linear buffer. In this case, all the data in the ABD is stored in one
33  *     contiguous buffer in memory (from a zio_[data_]buf_* kmem cache).
34  *
35  *         +-------------------+
36  *         | ABD (linear)      |
37  *         |   abd_flags = ... |
38  *         |   abd_size = ...  |     +--------------------------------+
39  *         |   abd_buf ------------->| raw buffer of size abd_size    |
40  *         +-------------------+     +--------------------------------+
41  *              no abd_chunks
42  *
43  * (b) Scattered buffer. In this case, the data in the ABD is split into
44  *     equal-sized chunks (from the abd_chunk_cache kmem_cache), with pointers
45  *     to the chunks recorded in an array at the end of the ABD structure.
46  *
47  *         +-------------------+
48  *         | ABD (scattered)   |
49  *         |   abd_flags = ... |
50  *         |   abd_size = ...  |
51  *         |   abd_offset = 0  |                           +-----------+
52  *         |   abd_chunks[0] ----------------------------->| chunk 0   |
53  *         |   abd_chunks[1] ---------------------+        +-----------+
54  *         |   ...             |                  |        +-----------+
55  *         |   abd_chunks[N-1] ---------+         +------->| chunk 1   |
56  *         +-------------------+        |                  +-----------+
57  *                                      |                      ...
58  *                                      |                  +-----------+
59  *                                      +----------------->| chunk N-1 |
60  *                                                         +-----------+
61  *
62  * In addition to directly allocating a linear or scattered ABD, it is also
63  * possible to create an ABD by requesting the "sub-ABD" starting at an offset
64  * within an existing ABD. In linear buffers this is simple (set abd_buf of
65  * the new ABD to the starting point within the original raw buffer), but
66  * scattered ABDs are a little more complex. The new ABD makes a copy of the
67  * relevant abd_chunks pointers (but not the underlying data). However, to
68  * provide arbitrary rather than only chunk-aligned starting offsets, it also
69  * tracks an abd_offset field which represents the starting point of the data
70  * within the first chunk in abd_chunks. For both linear and scattered ABDs,
71  * creating an offset ABD marks the original ABD as the offset's parent, and the
72  * original ABD's abd_children refcount is incremented. This data allows us to
73  * ensure the root ABD isn't deleted before its children.
74  *
75  * Most consumers should never need to know what type of ABD they're using --
76  * the ABD public API ensures that it's possible to transparently switch from
77  * using a linear ABD to a scattered one when doing so would be beneficial.
78  *
79  * If you need to use the data within an ABD directly, if you know it's linear
80  * (because you allocated it) you can use abd_to_buf() to access the underlying
81  * raw buffer. Otherwise, you should use one of the abd_borrow_buf* functions
82  * which will allocate a raw buffer if necessary. Use the abd_return_buf*
83  * functions to return any raw buffers that are no longer necessary when you're
84  * done using them.
85  *
86  * There are a variety of ABD APIs that implement basic buffer operations:
87  * compare, copy, read, write, and fill with zeroes. If you need a custom
88  * function which progressively accesses the whole ABD, use the abd_iterate_*
89  * functions.
90  *
91  * As an additional feature, linear and scatter ABD's can be stitched together
92  * by using the gang ABD type (abd_alloc_gang_abd()). This allows for
93  * multiple ABDs to be viewed as a singular ABD.
94  *
95  * It is possible to make all ABDs linear by setting zfs_abd_scatter_enabled to
96  * B_FALSE.
97  */
98 
99 #include <sys/abd_impl.h>
100 #include <sys/param.h>
101 #include <sys/zio.h>
102 #include <sys/zfs_context.h>
103 #include <sys/zfs_znode.h>
104 
105 /* see block comment above for description */
106 int zfs_abd_scatter_enabled = B_TRUE;
107 
108 boolean_t
109 abd_is_linear(abd_t *abd)
110 {
111 	return ((abd->abd_flags & ABD_FLAG_LINEAR) != 0 ? B_TRUE : B_FALSE);
112 }
113 
114 boolean_t
115 abd_is_linear_page(abd_t *abd)
116 {
117 	return ((abd->abd_flags & ABD_FLAG_LINEAR_PAGE) != 0 ?
118 	    B_TRUE : B_FALSE);
119 }
120 
121 boolean_t
122 abd_is_gang(abd_t *abd)
123 {
124 	return ((abd->abd_flags & ABD_FLAG_GANG) != 0 ? B_TRUE :
125 	    B_FALSE);
126 }
127 
128 void
129 abd_verify(abd_t *abd)
130 {
131 	ASSERT3U(abd->abd_size, >, 0);
132 	ASSERT3U(abd->abd_size, <=, SPA_MAXBLOCKSIZE);
133 	ASSERT3U(abd->abd_flags, ==, abd->abd_flags & (ABD_FLAG_LINEAR |
134 	    ABD_FLAG_OWNER | ABD_FLAG_META | ABD_FLAG_MULTI_ZONE |
135 	    ABD_FLAG_MULTI_CHUNK | ABD_FLAG_LINEAR_PAGE | ABD_FLAG_GANG |
136 	    ABD_FLAG_GANG_FREE | ABD_FLAG_ZEROS));
137 	IMPLY(abd->abd_parent != NULL, !(abd->abd_flags & ABD_FLAG_OWNER));
138 	IMPLY(abd->abd_flags & ABD_FLAG_META, abd->abd_flags & ABD_FLAG_OWNER);
139 	if (abd_is_linear(abd)) {
140 		ASSERT3P(ABD_LINEAR_BUF(abd), !=, NULL);
141 	} else if (abd_is_gang(abd)) {
142 		uint_t child_sizes = 0;
143 		for (abd_t *cabd = list_head(&ABD_GANG(abd).abd_gang_chain);
144 		    cabd != NULL;
145 		    cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
146 			ASSERT(list_link_active(&cabd->abd_gang_link));
147 			child_sizes += cabd->abd_size;
148 			abd_verify(cabd);
149 		}
150 		ASSERT3U(abd->abd_size, ==, child_sizes);
151 	} else {
152 		abd_verify_scatter(abd);
153 	}
154 }
155 
156 uint_t
157 abd_get_size(abd_t *abd)
158 {
159 	abd_verify(abd);
160 	return (abd->abd_size);
161 }
162 
163 /*
164  * Allocate an ABD, along with its own underlying data buffers. Use this if you
165  * don't care whether the ABD is linear or not.
166  */
167 abd_t *
168 abd_alloc(size_t size, boolean_t is_metadata)
169 {
170 	if (!zfs_abd_scatter_enabled || abd_size_alloc_linear(size))
171 		return (abd_alloc_linear(size, is_metadata));
172 
173 	VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
174 
175 	abd_t *abd = abd_alloc_struct(size);
176 	abd->abd_flags = ABD_FLAG_OWNER;
177 	abd->abd_u.abd_scatter.abd_offset = 0;
178 	abd_alloc_chunks(abd, size);
179 
180 	if (is_metadata) {
181 		abd->abd_flags |= ABD_FLAG_META;
182 	}
183 	abd->abd_size = size;
184 	abd->abd_parent = NULL;
185 	zfs_refcount_create(&abd->abd_children);
186 
187 	abd_update_scatter_stats(abd, ABDSTAT_INCR);
188 
189 	return (abd);
190 }
191 
192 static void
193 abd_free_scatter(abd_t *abd)
194 {
195 	abd_free_chunks(abd);
196 
197 	zfs_refcount_destroy(&abd->abd_children);
198 	abd_update_scatter_stats(abd, ABDSTAT_DECR);
199 	abd_free_struct(abd);
200 }
201 
202 static void
203 abd_put_gang_abd(abd_t *abd)
204 {
205 	ASSERT(abd_is_gang(abd));
206 	abd_t *cabd;
207 
208 	while ((cabd = list_remove_head(&ABD_GANG(abd).abd_gang_chain))
209 	    != NULL) {
210 		ASSERT0(cabd->abd_flags & ABD_FLAG_GANG_FREE);
211 		abd->abd_size -= cabd->abd_size;
212 		abd_put(cabd);
213 	}
214 	ASSERT0(abd->abd_size);
215 	list_destroy(&ABD_GANG(abd).abd_gang_chain);
216 }
217 
218 /*
219  * Free an ABD allocated from abd_get_offset() or abd_get_from_buf(). Will not
220  * free the underlying scatterlist or buffer.
221  */
222 void
223 abd_put(abd_t *abd)
224 {
225 	if (abd == NULL)
226 		return;
227 
228 	abd_verify(abd);
229 	ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
230 
231 	if (abd->abd_parent != NULL) {
232 		(void) zfs_refcount_remove_many(&abd->abd_parent->abd_children,
233 		    abd->abd_size, abd);
234 	}
235 
236 	if (abd_is_gang(abd))
237 		abd_put_gang_abd(abd);
238 
239 	zfs_refcount_destroy(&abd->abd_children);
240 	abd_free_struct(abd);
241 }
242 
243 /*
244  * Allocate an ABD that must be linear, along with its own underlying data
245  * buffer. Only use this when it would be very annoying to write your ABD
246  * consumer with a scattered ABD.
247  */
248 abd_t *
249 abd_alloc_linear(size_t size, boolean_t is_metadata)
250 {
251 	abd_t *abd = abd_alloc_struct(0);
252 
253 	VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
254 
255 	abd->abd_flags = ABD_FLAG_LINEAR | ABD_FLAG_OWNER;
256 	if (is_metadata) {
257 		abd->abd_flags |= ABD_FLAG_META;
258 	}
259 	abd->abd_size = size;
260 	abd->abd_parent = NULL;
261 	zfs_refcount_create(&abd->abd_children);
262 
263 	if (is_metadata) {
264 		ABD_LINEAR_BUF(abd) = zio_buf_alloc(size);
265 	} else {
266 		ABD_LINEAR_BUF(abd) = zio_data_buf_alloc(size);
267 	}
268 
269 	abd_update_linear_stats(abd, ABDSTAT_INCR);
270 
271 	return (abd);
272 }
273 
274 static void
275 abd_free_linear(abd_t *abd)
276 {
277 	if (abd_is_linear_page(abd)) {
278 		abd_free_linear_page(abd);
279 		return;
280 	}
281 	if (abd->abd_flags & ABD_FLAG_META) {
282 		zio_buf_free(ABD_LINEAR_BUF(abd), abd->abd_size);
283 	} else {
284 		zio_data_buf_free(ABD_LINEAR_BUF(abd), abd->abd_size);
285 	}
286 
287 	zfs_refcount_destroy(&abd->abd_children);
288 	abd_update_linear_stats(abd, ABDSTAT_DECR);
289 
290 	abd_free_struct(abd);
291 }
292 
293 static void
294 abd_free_gang_abd(abd_t *abd)
295 {
296 	ASSERT(abd_is_gang(abd));
297 	abd_t *cabd = list_head(&ABD_GANG(abd).abd_gang_chain);
298 
299 	while (cabd != NULL) {
300 		/*
301 		 * We must acquire the child ABDs mutex to ensure that if it
302 		 * is being added to another gang ABD we will set the link
303 		 * as inactive when removing it from this gang ABD and before
304 		 * adding it to the other gang ABD.
305 		 */
306 		mutex_enter(&cabd->abd_mtx);
307 		ASSERT(list_link_active(&cabd->abd_gang_link));
308 		list_remove(&ABD_GANG(abd).abd_gang_chain, cabd);
309 		mutex_exit(&cabd->abd_mtx);
310 		abd->abd_size -= cabd->abd_size;
311 		if (cabd->abd_flags & ABD_FLAG_GANG_FREE) {
312 			if (cabd->abd_flags & ABD_FLAG_OWNER)
313 				abd_free(cabd);
314 			else
315 				abd_put(cabd);
316 		}
317 		cabd = list_head(&ABD_GANG(abd).abd_gang_chain);
318 	}
319 	ASSERT0(abd->abd_size);
320 	list_destroy(&ABD_GANG(abd).abd_gang_chain);
321 	zfs_refcount_destroy(&abd->abd_children);
322 	abd_free_struct(abd);
323 }
324 
325 /*
326  * Free an ABD. Only use this on ABDs allocated with abd_alloc(),
327  * abd_alloc_linear(), or abd_alloc_gang_abd().
328  */
329 void
330 abd_free(abd_t *abd)
331 {
332 	if (abd == NULL)
333 		return;
334 
335 	abd_verify(abd);
336 	ASSERT3P(abd->abd_parent, ==, NULL);
337 	ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
338 	if (abd_is_linear(abd))
339 		abd_free_linear(abd);
340 	else if (abd_is_gang(abd))
341 		abd_free_gang_abd(abd);
342 	else
343 		abd_free_scatter(abd);
344 }
345 
346 /*
347  * Allocate an ABD of the same format (same metadata flag, same scatterize
348  * setting) as another ABD.
349  */
350 abd_t *
351 abd_alloc_sametype(abd_t *sabd, size_t size)
352 {
353 	boolean_t is_metadata = (sabd->abd_flags & ABD_FLAG_META) != 0;
354 	if (abd_is_linear(sabd) &&
355 	    !abd_is_linear_page(sabd)) {
356 		return (abd_alloc_linear(size, is_metadata));
357 	} else {
358 		return (abd_alloc(size, is_metadata));
359 	}
360 }
361 
362 
363 /*
364  * Create gang ABD that will be the head of a list of ABD's. This is used
365  * to "chain" scatter/gather lists together when constructing aggregated
366  * IO's. To free this abd, abd_free() must be called.
367  */
368 abd_t *
369 abd_alloc_gang_abd(void)
370 {
371 	abd_t *abd;
372 
373 	abd = abd_alloc_struct(0);
374 	abd->abd_flags = ABD_FLAG_GANG | ABD_FLAG_OWNER;
375 	abd->abd_size = 0;
376 	abd->abd_parent = NULL;
377 	list_create(&ABD_GANG(abd).abd_gang_chain,
378 	    sizeof (abd_t), offsetof(abd_t, abd_gang_link));
379 	zfs_refcount_create(&abd->abd_children);
380 	return (abd);
381 }
382 
383 /*
384  * Add a child gang ABD to a parent gang ABDs chained list.
385  */
386 static void
387 abd_gang_add_gang(abd_t *pabd, abd_t *cabd, boolean_t free_on_free)
388 {
389 	ASSERT(abd_is_gang(pabd));
390 	ASSERT(abd_is_gang(cabd));
391 
392 	if (free_on_free) {
393 		/*
394 		 * If the parent is responsible for freeing the child gang
395 		 * ABD we will just splice the childs children ABD list to
396 		 * the parents list and immediately free the child gang ABD
397 		 * struct. The parent gang ABDs children from the child gang
398 		 * will retain all the free_on_free settings after being
399 		 * added to the parents list.
400 		 */
401 		pabd->abd_size += cabd->abd_size;
402 		list_move_tail(&ABD_GANG(pabd).abd_gang_chain,
403 		    &ABD_GANG(cabd).abd_gang_chain);
404 		ASSERT(list_is_empty(&ABD_GANG(cabd).abd_gang_chain));
405 		abd_verify(pabd);
406 		abd_free_struct(cabd);
407 	} else {
408 		for (abd_t *child = list_head(&ABD_GANG(cabd).abd_gang_chain);
409 		    child != NULL;
410 		    child = list_next(&ABD_GANG(cabd).abd_gang_chain, child)) {
411 			/*
412 			 * We always pass B_FALSE for free_on_free as it is the
413 			 * original child gang ABDs responsibilty to determine
414 			 * if any of its child ABDs should be free'd on the call
415 			 * to abd_free().
416 			 */
417 			abd_gang_add(pabd, child, B_FALSE);
418 		}
419 		abd_verify(pabd);
420 	}
421 }
422 
423 /*
424  * Add a child ABD to a gang ABD's chained list.
425  */
426 void
427 abd_gang_add(abd_t *pabd, abd_t *cabd, boolean_t free_on_free)
428 {
429 	ASSERT(abd_is_gang(pabd));
430 	abd_t *child_abd = NULL;
431 
432 	/*
433 	 * If the child being added is a gang ABD, we will add the
434 	 * childs ABDs to the parent gang ABD. This alllows us to account
435 	 * for the offset correctly in the parent gang ABD.
436 	 */
437 	if (abd_is_gang(cabd)) {
438 		ASSERT(!list_link_active(&cabd->abd_gang_link));
439 		ASSERT(!list_is_empty(&ABD_GANG(cabd).abd_gang_chain));
440 		return (abd_gang_add_gang(pabd, cabd, free_on_free));
441 	}
442 	ASSERT(!abd_is_gang(cabd));
443 
444 	/*
445 	 * In order to verify that an ABD is not already part of
446 	 * another gang ABD, we must lock the child ABD's abd_mtx
447 	 * to check its abd_gang_link status. We unlock the abd_mtx
448 	 * only after it is has been added to a gang ABD, which
449 	 * will update the abd_gang_link's status. See comment below
450 	 * for how an ABD can be in multiple gang ABD's simultaneously.
451 	 */
452 	mutex_enter(&cabd->abd_mtx);
453 	if (list_link_active(&cabd->abd_gang_link)) {
454 		/*
455 		 * If the child ABD is already part of another
456 		 * gang ABD then we must allocate a new
457 		 * ABD to use a separate link. We mark the newly
458 		 * allocated ABD with ABD_FLAG_GANG_FREE, before
459 		 * adding it to the gang ABD's list, to make the
460 		 * gang ABD aware that it is responsible to call
461 		 * abd_put(). We use abd_get_offset() in order
462 		 * to just allocate a new ABD but avoid copying the
463 		 * data over into the newly allocated ABD.
464 		 *
465 		 * An ABD may become part of multiple gang ABD's. For
466 		 * example, when writing ditto bocks, the same ABD
467 		 * is used to write 2 or 3 locations with 2 or 3
468 		 * zio_t's. Each of the zio's may be aggregated with
469 		 * different adjacent zio's. zio aggregation uses gang
470 		 * zio's, so the single ABD can become part of multiple
471 		 * gang zio's.
472 		 *
473 		 * The ASSERT below is to make sure that if
474 		 * free_on_free is passed as B_TRUE, the ABD can
475 		 * not be in multiple gang ABD's. The gang ABD
476 		 * can not be responsible for cleaning up the child
477 		 * ABD memory allocation if the ABD can be in
478 		 * multiple gang ABD's at one time.
479 		 */
480 		ASSERT3B(free_on_free, ==, B_FALSE);
481 		child_abd = abd_get_offset(cabd, 0);
482 		child_abd->abd_flags |= ABD_FLAG_GANG_FREE;
483 	} else {
484 		child_abd = cabd;
485 		if (free_on_free)
486 			child_abd->abd_flags |= ABD_FLAG_GANG_FREE;
487 	}
488 	ASSERT3P(child_abd, !=, NULL);
489 
490 	list_insert_tail(&ABD_GANG(pabd).abd_gang_chain, child_abd);
491 	mutex_exit(&cabd->abd_mtx);
492 	pabd->abd_size += child_abd->abd_size;
493 }
494 
495 /*
496  * Locate the ABD for the supplied offset in the gang ABD.
497  * Return a new offset relative to the returned ABD.
498  */
499 abd_t *
500 abd_gang_get_offset(abd_t *abd, size_t *off)
501 {
502 	abd_t *cabd;
503 
504 	ASSERT(abd_is_gang(abd));
505 	ASSERT3U(*off, <, abd->abd_size);
506 	for (cabd = list_head(&ABD_GANG(abd).abd_gang_chain); cabd != NULL;
507 	    cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
508 		if (*off >= cabd->abd_size)
509 			*off -= cabd->abd_size;
510 		else
511 			return (cabd);
512 	}
513 	VERIFY3P(cabd, !=, NULL);
514 	return (cabd);
515 }
516 
517 /*
518  * Allocate a new ABD to point to offset off of sabd. It shares the underlying
519  * buffer data with sabd. Use abd_put() to free. sabd must not be freed while
520  * any derived ABDs exist.
521  */
522 static abd_t *
523 abd_get_offset_impl(abd_t *sabd, size_t off, size_t size)
524 {
525 	abd_t *abd = NULL;
526 
527 	abd_verify(sabd);
528 	ASSERT3U(off, <=, sabd->abd_size);
529 
530 	if (abd_is_linear(sabd)) {
531 		abd = abd_alloc_struct(0);
532 
533 		/*
534 		 * Even if this buf is filesystem metadata, we only track that
535 		 * if we own the underlying data buffer, which is not true in
536 		 * this case. Therefore, we don't ever use ABD_FLAG_META here.
537 		 */
538 		abd->abd_flags = ABD_FLAG_LINEAR;
539 
540 		ABD_LINEAR_BUF(abd) = (char *)ABD_LINEAR_BUF(sabd) + off;
541 	} else if (abd_is_gang(sabd)) {
542 		size_t left = size;
543 		abd = abd_alloc_gang_abd();
544 		abd->abd_flags &= ~ABD_FLAG_OWNER;
545 		for (abd_t *cabd = abd_gang_get_offset(sabd, &off);
546 		    cabd != NULL && left > 0;
547 		    cabd = list_next(&ABD_GANG(sabd).abd_gang_chain, cabd)) {
548 			int csize = MIN(left, cabd->abd_size - off);
549 
550 			abd_t *nabd = abd_get_offset_impl(cabd, off, csize);
551 			abd_gang_add(abd, nabd, B_FALSE);
552 			left -= csize;
553 			off = 0;
554 		}
555 		ASSERT3U(left, ==, 0);
556 	} else {
557 		abd = abd_get_offset_scatter(sabd, off);
558 	}
559 
560 	abd->abd_size = size;
561 	abd->abd_parent = sabd;
562 	zfs_refcount_create(&abd->abd_children);
563 	(void) zfs_refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
564 	return (abd);
565 }
566 
567 abd_t *
568 abd_get_offset(abd_t *sabd, size_t off)
569 {
570 	size_t size = sabd->abd_size > off ? sabd->abd_size - off : 0;
571 	VERIFY3U(size, >, 0);
572 	return (abd_get_offset_impl(sabd, off, size));
573 }
574 
575 abd_t *
576 abd_get_offset_size(abd_t *sabd, size_t off, size_t size)
577 {
578 	ASSERT3U(off + size, <=, sabd->abd_size);
579 	return (abd_get_offset_impl(sabd, off, size));
580 }
581 
582 /*
583  * Return a size scatter ABD. In order to free the returned
584  * ABD abd_put() must be called.
585  */
586 abd_t *
587 abd_get_zeros(size_t size)
588 {
589 	ASSERT3P(abd_zero_scatter, !=, NULL);
590 	ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
591 	return (abd_get_offset_size(abd_zero_scatter, 0, size));
592 }
593 
594 /*
595  * Allocate a linear ABD structure for buf. You must free this with abd_put()
596  * since the resulting ABD doesn't own its own buffer.
597  */
598 abd_t *
599 abd_get_from_buf(void *buf, size_t size)
600 {
601 	abd_t *abd = abd_alloc_struct(0);
602 
603 	VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
604 
605 	/*
606 	 * Even if this buf is filesystem metadata, we only track that if we
607 	 * own the underlying data buffer, which is not true in this case.
608 	 * Therefore, we don't ever use ABD_FLAG_META here.
609 	 */
610 	abd->abd_flags = ABD_FLAG_LINEAR;
611 	abd->abd_size = size;
612 	abd->abd_parent = NULL;
613 	zfs_refcount_create(&abd->abd_children);
614 
615 	ABD_LINEAR_BUF(abd) = buf;
616 
617 	return (abd);
618 }
619 
620 /*
621  * Get the raw buffer associated with a linear ABD.
622  */
623 void *
624 abd_to_buf(abd_t *abd)
625 {
626 	ASSERT(abd_is_linear(abd));
627 	abd_verify(abd);
628 	return (ABD_LINEAR_BUF(abd));
629 }
630 
631 /*
632  * Borrow a raw buffer from an ABD without copying the contents of the ABD
633  * into the buffer. If the ABD is scattered, this will allocate a raw buffer
634  * whose contents are undefined. To copy over the existing data in the ABD, use
635  * abd_borrow_buf_copy() instead.
636  */
637 void *
638 abd_borrow_buf(abd_t *abd, size_t n)
639 {
640 	void *buf;
641 	abd_verify(abd);
642 	ASSERT3U(abd->abd_size, >=, n);
643 	if (abd_is_linear(abd)) {
644 		buf = abd_to_buf(abd);
645 	} else {
646 		buf = zio_buf_alloc(n);
647 	}
648 	(void) zfs_refcount_add_many(&abd->abd_children, n, buf);
649 	return (buf);
650 }
651 
652 void *
653 abd_borrow_buf_copy(abd_t *abd, size_t n)
654 {
655 	void *buf = abd_borrow_buf(abd, n);
656 	if (!abd_is_linear(abd)) {
657 		abd_copy_to_buf(buf, abd, n);
658 	}
659 	return (buf);
660 }
661 
662 /*
663  * Return a borrowed raw buffer to an ABD. If the ABD is scattered, this will
664  * not change the contents of the ABD and will ASSERT that you didn't modify
665  * the buffer since it was borrowed. If you want any changes you made to buf to
666  * be copied back to abd, use abd_return_buf_copy() instead.
667  */
668 void
669 abd_return_buf(abd_t *abd, void *buf, size_t n)
670 {
671 	abd_verify(abd);
672 	ASSERT3U(abd->abd_size, >=, n);
673 	if (abd_is_linear(abd)) {
674 		ASSERT3P(buf, ==, abd_to_buf(abd));
675 	} else {
676 		ASSERT0(abd_cmp_buf(abd, buf, n));
677 		zio_buf_free(buf, n);
678 	}
679 	(void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
680 }
681 
682 void
683 abd_return_buf_copy(abd_t *abd, void *buf, size_t n)
684 {
685 	if (!abd_is_linear(abd)) {
686 		abd_copy_from_buf(abd, buf, n);
687 	}
688 	abd_return_buf(abd, buf, n);
689 }
690 
691 void
692 abd_release_ownership_of_buf(abd_t *abd)
693 {
694 	ASSERT(abd_is_linear(abd));
695 	ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
696 
697 	/*
698 	 * abd_free() needs to handle LINEAR_PAGE ABD's specially.
699 	 * Since that flag does not survive the
700 	 * abd_release_ownership_of_buf() -> abd_get_from_buf() ->
701 	 * abd_take_ownership_of_buf() sequence, we don't allow releasing
702 	 * these "linear but not zio_[data_]buf_alloc()'ed" ABD's.
703 	 */
704 	ASSERT(!abd_is_linear_page(abd));
705 
706 	abd_verify(abd);
707 
708 	abd->abd_flags &= ~ABD_FLAG_OWNER;
709 	/* Disable this flag since we no longer own the data buffer */
710 	abd->abd_flags &= ~ABD_FLAG_META;
711 
712 	abd_update_linear_stats(abd, ABDSTAT_DECR);
713 }
714 
715 
716 /*
717  * Give this ABD ownership of the buffer that it's storing. Can only be used on
718  * linear ABDs which were allocated via abd_get_from_buf(), or ones allocated
719  * with abd_alloc_linear() which subsequently released ownership of their buf
720  * with abd_release_ownership_of_buf().
721  */
722 void
723 abd_take_ownership_of_buf(abd_t *abd, boolean_t is_metadata)
724 {
725 	ASSERT(abd_is_linear(abd));
726 	ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
727 	abd_verify(abd);
728 
729 	abd->abd_flags |= ABD_FLAG_OWNER;
730 	if (is_metadata) {
731 		abd->abd_flags |= ABD_FLAG_META;
732 	}
733 
734 	abd_update_linear_stats(abd, ABDSTAT_INCR);
735 }
736 
737 /*
738  * Initializes an abd_iter based on whether the abd is a gang ABD
739  * or just a single ABD.
740  */
741 static inline abd_t *
742 abd_init_abd_iter(abd_t *abd, struct abd_iter *aiter, size_t off)
743 {
744 	abd_t *cabd = NULL;
745 
746 	if (abd_is_gang(abd)) {
747 		cabd = abd_gang_get_offset(abd, &off);
748 		if (cabd) {
749 			abd_iter_init(aiter, cabd);
750 			abd_iter_advance(aiter, off);
751 		}
752 	} else {
753 		abd_iter_init(aiter, abd);
754 		abd_iter_advance(aiter, off);
755 	}
756 	return (cabd);
757 }
758 
759 /*
760  * Advances an abd_iter. We have to be careful with gang ABD as
761  * advancing could mean that we are at the end of a particular ABD and
762  * must grab the ABD in the gang ABD's list.
763  */
764 static inline abd_t *
765 abd_advance_abd_iter(abd_t *abd, abd_t *cabd, struct abd_iter *aiter,
766     size_t len)
767 {
768 	abd_iter_advance(aiter, len);
769 	if (abd_is_gang(abd) && abd_iter_at_end(aiter)) {
770 		ASSERT3P(cabd, !=, NULL);
771 		cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd);
772 		if (cabd) {
773 			abd_iter_init(aiter, cabd);
774 			abd_iter_advance(aiter, 0);
775 		}
776 	}
777 	return (cabd);
778 }
779 
780 int
781 abd_iterate_func(abd_t *abd, size_t off, size_t size,
782     abd_iter_func_t *func, void *private)
783 {
784 	struct abd_iter aiter;
785 	int ret = 0;
786 
787 	if (size == 0)
788 		return (0);
789 
790 	abd_verify(abd);
791 	ASSERT3U(off + size, <=, abd->abd_size);
792 
793 	boolean_t abd_multi = abd_is_gang(abd);
794 	abd_t *c_abd = abd_init_abd_iter(abd, &aiter, off);
795 
796 	while (size > 0) {
797 		/* If we are at the end of the gang ABD we are done */
798 		if (abd_multi && !c_abd)
799 			break;
800 
801 		abd_iter_map(&aiter);
802 
803 		size_t len = MIN(aiter.iter_mapsize, size);
804 		ASSERT3U(len, >, 0);
805 
806 		ret = func(aiter.iter_mapaddr, len, private);
807 
808 		abd_iter_unmap(&aiter);
809 
810 		if (ret != 0)
811 			break;
812 
813 		size -= len;
814 		c_abd = abd_advance_abd_iter(abd, c_abd, &aiter, len);
815 	}
816 
817 	return (ret);
818 }
819 
820 struct buf_arg {
821 	void *arg_buf;
822 };
823 
824 static int
825 abd_copy_to_buf_off_cb(void *buf, size_t size, void *private)
826 {
827 	struct buf_arg *ba_ptr = private;
828 
829 	(void) memcpy(ba_ptr->arg_buf, buf, size);
830 	ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
831 
832 	return (0);
833 }
834 
835 /*
836  * Copy abd to buf. (off is the offset in abd.)
837  */
838 void
839 abd_copy_to_buf_off(void *buf, abd_t *abd, size_t off, size_t size)
840 {
841 	struct buf_arg ba_ptr = { buf };
842 
843 	(void) abd_iterate_func(abd, off, size, abd_copy_to_buf_off_cb,
844 	    &ba_ptr);
845 }
846 
847 static int
848 abd_cmp_buf_off_cb(void *buf, size_t size, void *private)
849 {
850 	int ret;
851 	struct buf_arg *ba_ptr = private;
852 
853 	ret = memcmp(buf, ba_ptr->arg_buf, size);
854 	ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
855 
856 	return (ret);
857 }
858 
859 /*
860  * Compare the contents of abd to buf. (off is the offset in abd.)
861  */
862 int
863 abd_cmp_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
864 {
865 	struct buf_arg ba_ptr = { (void *) buf };
866 
867 	return (abd_iterate_func(abd, off, size, abd_cmp_buf_off_cb, &ba_ptr));
868 }
869 
870 static int
871 abd_copy_from_buf_off_cb(void *buf, size_t size, void *private)
872 {
873 	struct buf_arg *ba_ptr = private;
874 
875 	(void) memcpy(buf, ba_ptr->arg_buf, size);
876 	ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
877 
878 	return (0);
879 }
880 
881 /*
882  * Copy from buf to abd. (off is the offset in abd.)
883  */
884 void
885 abd_copy_from_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
886 {
887 	struct buf_arg ba_ptr = { (void *) buf };
888 
889 	(void) abd_iterate_func(abd, off, size, abd_copy_from_buf_off_cb,
890 	    &ba_ptr);
891 }
892 
893 /*ARGSUSED*/
894 static int
895 abd_zero_off_cb(void *buf, size_t size, void *private)
896 {
897 	(void) memset(buf, 0, size);
898 	return (0);
899 }
900 
901 /*
902  * Zero out the abd from a particular offset to the end.
903  */
904 void
905 abd_zero_off(abd_t *abd, size_t off, size_t size)
906 {
907 	(void) abd_iterate_func(abd, off, size, abd_zero_off_cb, NULL);
908 }
909 
910 /*
911  * Iterate over two ABDs and call func incrementally on the two ABDs' data in
912  * equal-sized chunks (passed to func as raw buffers). func could be called many
913  * times during this iteration.
914  */
915 int
916 abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff,
917     size_t size, abd_iter_func2_t *func, void *private)
918 {
919 	int ret = 0;
920 	struct abd_iter daiter, saiter;
921 	boolean_t dabd_is_gang_abd, sabd_is_gang_abd;
922 	abd_t *c_dabd, *c_sabd;
923 
924 	if (size == 0)
925 		return (0);
926 
927 	abd_verify(dabd);
928 	abd_verify(sabd);
929 
930 	ASSERT3U(doff + size, <=, dabd->abd_size);
931 	ASSERT3U(soff + size, <=, sabd->abd_size);
932 
933 	dabd_is_gang_abd = abd_is_gang(dabd);
934 	sabd_is_gang_abd = abd_is_gang(sabd);
935 	c_dabd = abd_init_abd_iter(dabd, &daiter, doff);
936 	c_sabd = abd_init_abd_iter(sabd, &saiter, soff);
937 
938 	while (size > 0) {
939 		/* if we are at the end of the gang ABD we are done */
940 		if ((dabd_is_gang_abd && !c_dabd) ||
941 		    (sabd_is_gang_abd && !c_sabd))
942 			break;
943 
944 		abd_iter_map(&daiter);
945 		abd_iter_map(&saiter);
946 
947 		size_t dlen = MIN(daiter.iter_mapsize, size);
948 		size_t slen = MIN(saiter.iter_mapsize, size);
949 		size_t len = MIN(dlen, slen);
950 		ASSERT(dlen > 0 || slen > 0);
951 
952 		ret = func(daiter.iter_mapaddr, saiter.iter_mapaddr, len,
953 		    private);
954 
955 		abd_iter_unmap(&saiter);
956 		abd_iter_unmap(&daiter);
957 
958 		if (ret != 0)
959 			break;
960 
961 		size -= len;
962 		c_dabd =
963 		    abd_advance_abd_iter(dabd, c_dabd, &daiter, len);
964 		c_sabd =
965 		    abd_advance_abd_iter(sabd, c_sabd, &saiter, len);
966 	}
967 
968 	return (ret);
969 }
970 
971 /*ARGSUSED*/
972 static int
973 abd_copy_off_cb(void *dbuf, void *sbuf, size_t size, void *private)
974 {
975 	(void) memcpy(dbuf, sbuf, size);
976 	return (0);
977 }
978 
979 /*
980  * Copy from sabd to dabd starting from soff and doff.
981  */
982 void
983 abd_copy_off(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff, size_t size)
984 {
985 	(void) abd_iterate_func2(dabd, sabd, doff, soff, size,
986 	    abd_copy_off_cb, NULL);
987 }
988 
989 /*ARGSUSED*/
990 static int
991 abd_cmp_cb(void *bufa, void *bufb, size_t size, void *private)
992 {
993 	return (memcmp(bufa, bufb, size));
994 }
995 
996 /*
997  * Compares the contents of two ABDs.
998  */
999 int
1000 abd_cmp(abd_t *dabd, abd_t *sabd)
1001 {
1002 	ASSERT3U(dabd->abd_size, ==, sabd->abd_size);
1003 	return (abd_iterate_func2(dabd, sabd, 0, 0, dabd->abd_size,
1004 	    abd_cmp_cb, NULL));
1005 }
1006 
1007 /*
1008  * Iterate over code ABDs and a data ABD and call @func_raidz_gen.
1009  *
1010  * @cabds          parity ABDs, must have equal size
1011  * @dabd           data ABD. Can be NULL (in this case @dsize = 0)
1012  * @func_raidz_gen should be implemented so that its behaviour
1013  *                 is the same when taking linear and when taking scatter
1014  */
1015 void
1016 abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd,
1017     ssize_t csize, ssize_t dsize, const unsigned parity,
1018     void (*func_raidz_gen)(void **, const void *, size_t, size_t))
1019 {
1020 	int i;
1021 	ssize_t len, dlen;
1022 	struct abd_iter caiters[3];
1023 	struct abd_iter daiter = {0};
1024 	void *caddrs[3];
1025 	unsigned long flags __maybe_unused = 0;
1026 	abd_t *c_cabds[3];
1027 	abd_t *c_dabd = NULL;
1028 	boolean_t cabds_is_gang_abd[3];
1029 	boolean_t dabd_is_gang_abd = B_FALSE;
1030 
1031 	ASSERT3U(parity, <=, 3);
1032 
1033 	for (i = 0; i < parity; i++) {
1034 		cabds_is_gang_abd[i] = abd_is_gang(cabds[i]);
1035 		c_cabds[i] = abd_init_abd_iter(cabds[i], &caiters[i], 0);
1036 	}
1037 
1038 	if (dabd) {
1039 		dabd_is_gang_abd = abd_is_gang(dabd);
1040 		c_dabd = abd_init_abd_iter(dabd, &daiter, 0);
1041 	}
1042 
1043 	ASSERT3S(dsize, >=, 0);
1044 
1045 	abd_enter_critical(flags);
1046 	while (csize > 0) {
1047 		/* if we are at the end of the gang ABD we are done */
1048 		if (dabd_is_gang_abd && !c_dabd)
1049 			break;
1050 
1051 		for (i = 0; i < parity; i++) {
1052 			/*
1053 			 * If we are at the end of the gang ABD we are
1054 			 * done.
1055 			 */
1056 			if (cabds_is_gang_abd[i] && !c_cabds[i])
1057 				break;
1058 			abd_iter_map(&caiters[i]);
1059 			caddrs[i] = caiters[i].iter_mapaddr;
1060 		}
1061 
1062 		len = csize;
1063 
1064 		if (dabd && dsize > 0)
1065 			abd_iter_map(&daiter);
1066 
1067 		switch (parity) {
1068 			case 3:
1069 				len = MIN(caiters[2].iter_mapsize, len);
1070 				/* falls through */
1071 			case 2:
1072 				len = MIN(caiters[1].iter_mapsize, len);
1073 				/* falls through */
1074 			case 1:
1075 				len = MIN(caiters[0].iter_mapsize, len);
1076 		}
1077 
1078 		/* must be progressive */
1079 		ASSERT3S(len, >, 0);
1080 
1081 		if (dabd && dsize > 0) {
1082 			/* this needs precise iter.length */
1083 			len = MIN(daiter.iter_mapsize, len);
1084 			dlen = len;
1085 		} else
1086 			dlen = 0;
1087 
1088 		/* must be progressive */
1089 		ASSERT3S(len, >, 0);
1090 		/*
1091 		 * The iterated function likely will not do well if each
1092 		 * segment except the last one is not multiple of 512 (raidz).
1093 		 */
1094 		ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
1095 
1096 		func_raidz_gen(caddrs, daiter.iter_mapaddr, len, dlen);
1097 
1098 		for (i = parity-1; i >= 0; i--) {
1099 			abd_iter_unmap(&caiters[i]);
1100 			c_cabds[i] =
1101 			    abd_advance_abd_iter(cabds[i], c_cabds[i],
1102 			    &caiters[i], len);
1103 		}
1104 
1105 		if (dabd && dsize > 0) {
1106 			abd_iter_unmap(&daiter);
1107 			c_dabd =
1108 			    abd_advance_abd_iter(dabd, c_dabd, &daiter,
1109 			    dlen);
1110 			dsize -= dlen;
1111 		}
1112 
1113 		csize -= len;
1114 
1115 		ASSERT3S(dsize, >=, 0);
1116 		ASSERT3S(csize, >=, 0);
1117 	}
1118 	abd_exit_critical(flags);
1119 }
1120 
1121 /*
1122  * Iterate over code ABDs and data reconstruction target ABDs and call
1123  * @func_raidz_rec. Function maps at most 6 pages atomically.
1124  *
1125  * @cabds           parity ABDs, must have equal size
1126  * @tabds           rec target ABDs, at most 3
1127  * @tsize           size of data target columns
1128  * @func_raidz_rec  expects syndrome data in target columns. Function
1129  *                  reconstructs data and overwrites target columns.
1130  */
1131 void
1132 abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds,
1133     ssize_t tsize, const unsigned parity,
1134     void (*func_raidz_rec)(void **t, const size_t tsize, void **c,
1135     const unsigned *mul),
1136     const unsigned *mul)
1137 {
1138 	int i;
1139 	ssize_t len;
1140 	struct abd_iter citers[3];
1141 	struct abd_iter xiters[3];
1142 	void *caddrs[3], *xaddrs[3];
1143 	unsigned long flags __maybe_unused = 0;
1144 	boolean_t cabds_is_gang_abd[3];
1145 	boolean_t tabds_is_gang_abd[3];
1146 	abd_t *c_cabds[3];
1147 	abd_t *c_tabds[3];
1148 
1149 	ASSERT3U(parity, <=, 3);
1150 
1151 	for (i = 0; i < parity; i++) {
1152 		cabds_is_gang_abd[i] = abd_is_gang(cabds[i]);
1153 		tabds_is_gang_abd[i] = abd_is_gang(tabds[i]);
1154 		c_cabds[i] =
1155 		    abd_init_abd_iter(cabds[i], &citers[i], 0);
1156 		c_tabds[i] =
1157 		    abd_init_abd_iter(tabds[i], &xiters[i], 0);
1158 	}
1159 
1160 	abd_enter_critical(flags);
1161 	while (tsize > 0) {
1162 
1163 		for (i = 0; i < parity; i++) {
1164 			/*
1165 			 * If we are at the end of the gang ABD we
1166 			 * are done.
1167 			 */
1168 			if (cabds_is_gang_abd[i] && !c_cabds[i])
1169 				break;
1170 			if (tabds_is_gang_abd[i] && !c_tabds[i])
1171 				break;
1172 			abd_iter_map(&citers[i]);
1173 			abd_iter_map(&xiters[i]);
1174 			caddrs[i] = citers[i].iter_mapaddr;
1175 			xaddrs[i] = xiters[i].iter_mapaddr;
1176 		}
1177 
1178 		len = tsize;
1179 		switch (parity) {
1180 			case 3:
1181 				len = MIN(xiters[2].iter_mapsize, len);
1182 				len = MIN(citers[2].iter_mapsize, len);
1183 				/* falls through */
1184 			case 2:
1185 				len = MIN(xiters[1].iter_mapsize, len);
1186 				len = MIN(citers[1].iter_mapsize, len);
1187 				/* falls through */
1188 			case 1:
1189 				len = MIN(xiters[0].iter_mapsize, len);
1190 				len = MIN(citers[0].iter_mapsize, len);
1191 		}
1192 		/* must be progressive */
1193 		ASSERT3S(len, >, 0);
1194 		/*
1195 		 * The iterated function likely will not do well if each
1196 		 * segment except the last one is not multiple of 512 (raidz).
1197 		 */
1198 		ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
1199 
1200 		func_raidz_rec(xaddrs, len, caddrs, mul);
1201 
1202 		for (i = parity-1; i >= 0; i--) {
1203 			abd_iter_unmap(&xiters[i]);
1204 			abd_iter_unmap(&citers[i]);
1205 			c_tabds[i] =
1206 			    abd_advance_abd_iter(tabds[i], c_tabds[i],
1207 			    &xiters[i], len);
1208 			c_cabds[i] =
1209 			    abd_advance_abd_iter(cabds[i], c_cabds[i],
1210 			    &citers[i], len);
1211 		}
1212 
1213 		tsize -= len;
1214 		ASSERT3S(tsize, >=, 0);
1215 	}
1216 	abd_exit_critical(flags);
1217 }
1218