1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright (c) 2014 by Chunwei Chen. All rights reserved.
24 * Copyright (c) 2019 by Delphix. All rights reserved.
25 */
26
27 /*
28 * ARC buffer data (ABD).
29 *
30 * ABDs are an abstract data structure for the ARC which can use two
31 * different ways of storing the underlying data:
32 *
33 * (a) Linear buffer. In this case, all the data in the ABD is stored in one
34 * contiguous buffer in memory (from a zio_[data_]buf_* kmem cache).
35 *
36 * +-------------------+
37 * | ABD (linear) |
38 * | abd_flags = ... |
39 * | abd_size = ... | +--------------------------------+
40 * | abd_buf ------------->| raw buffer of size abd_size |
41 * +-------------------+ +--------------------------------+
42 * no abd_chunks
43 *
44 * (b) Scattered buffer. In this case, the data in the ABD is split into
45 * equal-sized chunks (from the abd_chunk_cache kmem_cache), with pointers
46 * to the chunks recorded in an array at the end of the ABD structure.
47 *
48 * +-------------------+
49 * | ABD (scattered) |
50 * | abd_flags = ... |
51 * | abd_size = ... |
52 * | abd_offset = 0 | +-----------+
53 * | abd_chunks[0] ----------------------------->| chunk 0 |
54 * | abd_chunks[1] ---------------------+ +-----------+
55 * | ... | | +-----------+
56 * | abd_chunks[N-1] ---------+ +------->| chunk 1 |
57 * +-------------------+ | +-----------+
58 * | ...
59 * | +-----------+
60 * +----------------->| chunk N-1 |
61 * +-----------+
62 *
63 * In addition to directly allocating a linear or scattered ABD, it is also
64 * possible to create an ABD by requesting the "sub-ABD" starting at an offset
65 * within an existing ABD. In linear buffers this is simple (set abd_buf of
66 * the new ABD to the starting point within the original raw buffer), but
67 * scattered ABDs are a little more complex. The new ABD makes a copy of the
68 * relevant abd_chunks pointers (but not the underlying data). However, to
69 * provide arbitrary rather than only chunk-aligned starting offsets, it also
70 * tracks an abd_offset field which represents the starting point of the data
71 * within the first chunk in abd_chunks. For both linear and scattered ABDs,
72 * creating an offset ABD marks the original ABD as the offset's parent, and the
73 * original ABD's abd_children refcount is incremented. This data allows us to
74 * ensure the root ABD isn't deleted before its children.
75 *
76 * Most consumers should never need to know what type of ABD they're using --
77 * the ABD public API ensures that it's possible to transparently switch from
78 * using a linear ABD to a scattered one when doing so would be beneficial.
79 *
80 * If you need to use the data within an ABD directly, if you know it's linear
81 * (because you allocated it) you can use abd_to_buf() to access the underlying
82 * raw buffer. Otherwise, you should use one of the abd_borrow_buf* functions
83 * which will allocate a raw buffer if necessary. Use the abd_return_buf*
84 * functions to return any raw buffers that are no longer necessary when you're
85 * done using them.
86 *
87 * There are a variety of ABD APIs that implement basic buffer operations:
88 * compare, copy, read, write, and fill with zeroes. If you need a custom
89 * function which progressively accesses the whole ABD, use the abd_iterate_*
90 * functions.
91 *
92 * As an additional feature, linear and scatter ABD's can be stitched together
93 * by using the gang ABD type (abd_alloc_gang()). This allows for multiple ABDs
94 * to be viewed as a singular ABD.
95 *
96 * It is possible to make all ABDs linear by setting zfs_abd_scatter_enabled to
97 * B_FALSE.
98 */
99
100 #include <sys/abd_impl.h>
101 #include <sys/param.h>
102 #include <sys/zio.h>
103 #include <sys/zfs_context.h>
104 #include <sys/zfs_znode.h>
105
106 /* see block comment above for description */
107 int zfs_abd_scatter_enabled = B_TRUE;
108
109 void
abd_verify(abd_t * abd)110 abd_verify(abd_t *abd)
111 {
112 #ifdef ZFS_DEBUG
113 if (abd_is_from_pages(abd)) {
114 ASSERT3U(abd->abd_size, <=, DMU_MAX_ACCESS);
115 } else {
116 ASSERT3U(abd->abd_size, <=, SPA_MAXBLOCKSIZE);
117 }
118 ASSERT3U(abd->abd_flags, ==, abd->abd_flags & (ABD_FLAG_LINEAR |
119 ABD_FLAG_OWNER | ABD_FLAG_META | ABD_FLAG_MULTI_ZONE |
120 ABD_FLAG_MULTI_CHUNK | ABD_FLAG_LINEAR_PAGE | ABD_FLAG_GANG |
121 ABD_FLAG_GANG_FREE | ABD_FLAG_ALLOCD | ABD_FLAG_FROM_PAGES));
122 IMPLY(abd->abd_parent != NULL, !(abd->abd_flags & ABD_FLAG_OWNER));
123 IMPLY(abd->abd_flags & ABD_FLAG_META, abd->abd_flags & ABD_FLAG_OWNER);
124 if (abd_is_linear(abd)) {
125 ASSERT3U(abd->abd_size, >, 0);
126 ASSERT3P(ABD_LINEAR_BUF(abd), !=, NULL);
127 } else if (abd_is_gang(abd)) {
128 uint_t child_sizes = 0;
129 for (abd_t *cabd = list_head(&ABD_GANG(abd).abd_gang_chain);
130 cabd != NULL;
131 cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
132 ASSERT(list_link_active(&cabd->abd_gang_link));
133 child_sizes += cabd->abd_size;
134 abd_verify(cabd);
135 }
136 ASSERT3U(abd->abd_size, ==, child_sizes);
137 } else {
138 ASSERT3U(abd->abd_size, >, 0);
139 abd_verify_scatter(abd);
140 }
141 #endif
142 }
143
144 void
abd_init_struct(abd_t * abd)145 abd_init_struct(abd_t *abd)
146 {
147 list_link_init(&abd->abd_gang_link);
148 mutex_init(&abd->abd_mtx, NULL, MUTEX_DEFAULT, NULL);
149 abd->abd_flags = 0;
150 #ifdef ZFS_DEBUG
151 zfs_refcount_create(&abd->abd_children);
152 abd->abd_parent = NULL;
153 #endif
154 abd->abd_size = 0;
155 }
156
157 static void
abd_fini_struct(abd_t * abd)158 abd_fini_struct(abd_t *abd)
159 {
160 mutex_destroy(&abd->abd_mtx);
161 ASSERT(!list_link_active(&abd->abd_gang_link));
162 #ifdef ZFS_DEBUG
163 zfs_refcount_destroy(&abd->abd_children);
164 #endif
165 }
166
167 abd_t *
abd_alloc_struct(size_t size)168 abd_alloc_struct(size_t size)
169 {
170 abd_t *abd = abd_alloc_struct_impl(size);
171 abd_init_struct(abd);
172 abd->abd_flags |= ABD_FLAG_ALLOCD;
173 return (abd);
174 }
175
176 void
abd_free_struct(abd_t * abd)177 abd_free_struct(abd_t *abd)
178 {
179 abd_fini_struct(abd);
180 abd_free_struct_impl(abd);
181 }
182
183 /*
184 * Allocate an ABD, along with its own underlying data buffers. Use this if you
185 * don't care whether the ABD is linear or not.
186 */
187 abd_t *
abd_alloc(size_t size,boolean_t is_metadata)188 abd_alloc(size_t size, boolean_t is_metadata)
189 {
190 if (abd_size_alloc_linear(size))
191 return (abd_alloc_linear(size, is_metadata));
192
193 VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
194
195 abd_t *abd = abd_alloc_struct(size);
196 abd->abd_flags |= ABD_FLAG_OWNER;
197 abd->abd_u.abd_scatter.abd_offset = 0;
198 abd_alloc_chunks(abd, size);
199
200 if (is_metadata) {
201 abd->abd_flags |= ABD_FLAG_META;
202 }
203 abd->abd_size = size;
204
205 abd_update_scatter_stats(abd, ABDSTAT_INCR);
206
207 return (abd);
208 }
209
210 /*
211 * Allocate an ABD that must be linear, along with its own underlying data
212 * buffer. Only use this when it would be very annoying to write your ABD
213 * consumer with a scattered ABD.
214 */
215 abd_t *
abd_alloc_linear(size_t size,boolean_t is_metadata)216 abd_alloc_linear(size_t size, boolean_t is_metadata)
217 {
218 abd_t *abd = abd_alloc_struct(0);
219
220 VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
221
222 abd->abd_flags |= ABD_FLAG_LINEAR | ABD_FLAG_OWNER;
223 if (is_metadata) {
224 abd->abd_flags |= ABD_FLAG_META;
225 }
226 abd->abd_size = size;
227
228 if (is_metadata) {
229 ABD_LINEAR_BUF(abd) = zio_buf_alloc(size);
230 } else {
231 ABD_LINEAR_BUF(abd) = zio_data_buf_alloc(size);
232 }
233
234 abd_update_linear_stats(abd, ABDSTAT_INCR);
235
236 return (abd);
237 }
238
239 static void
abd_free_linear(abd_t * abd)240 abd_free_linear(abd_t *abd)
241 {
242 if (abd_is_linear_page(abd)) {
243 abd_free_linear_page(abd);
244 return;
245 }
246
247 if (abd->abd_flags & ABD_FLAG_META) {
248 zio_buf_free(ABD_LINEAR_BUF(abd), abd->abd_size);
249 } else {
250 zio_data_buf_free(ABD_LINEAR_BUF(abd), abd->abd_size);
251 }
252
253 abd_update_linear_stats(abd, ABDSTAT_DECR);
254 }
255
256 static void
abd_free_gang(abd_t * abd)257 abd_free_gang(abd_t *abd)
258 {
259 ASSERT(abd_is_gang(abd));
260 abd_t *cabd;
261
262 while ((cabd = list_head(&ABD_GANG(abd).abd_gang_chain)) != NULL) {
263 /*
264 * We must acquire the child ABDs mutex to ensure that if it
265 * is being added to another gang ABD we will set the link
266 * as inactive when removing it from this gang ABD and before
267 * adding it to the other gang ABD.
268 */
269 mutex_enter(&cabd->abd_mtx);
270 ASSERT(list_link_active(&cabd->abd_gang_link));
271 list_remove(&ABD_GANG(abd).abd_gang_chain, cabd);
272 mutex_exit(&cabd->abd_mtx);
273 if (cabd->abd_flags & ABD_FLAG_GANG_FREE)
274 abd_free(cabd);
275 }
276 list_destroy(&ABD_GANG(abd).abd_gang_chain);
277 }
278
279 static void
abd_free_scatter(abd_t * abd)280 abd_free_scatter(abd_t *abd)
281 {
282 abd_free_chunks(abd);
283 if (!abd_is_from_pages(abd))
284 abd_update_scatter_stats(abd, ABDSTAT_DECR);
285 }
286
287 /*
288 * Free an ABD. Use with any kind of abd: those created with abd_alloc_*()
289 * and abd_get_*(), including abd_get_offset_struct().
290 *
291 * If the ABD was created with abd_alloc_*(), the underlying data
292 * (scatterlist or linear buffer) will also be freed. (Subject to ownership
293 * changes via abd_*_ownership_of_buf().)
294 *
295 * Unless the ABD was created with abd_get_offset_struct(), the abd_t will
296 * also be freed.
297 */
298 void
abd_free(abd_t * abd)299 abd_free(abd_t *abd)
300 {
301 if (abd == NULL)
302 return;
303
304 abd_verify(abd);
305 #ifdef ZFS_DEBUG
306 IMPLY(abd->abd_flags & ABD_FLAG_OWNER, abd->abd_parent == NULL);
307 #endif
308
309 if (abd_is_gang(abd)) {
310 abd_free_gang(abd);
311 } else if (abd_is_linear(abd)) {
312 if (abd->abd_flags & ABD_FLAG_OWNER)
313 abd_free_linear(abd);
314 } else {
315 if (abd->abd_flags & ABD_FLAG_OWNER)
316 abd_free_scatter(abd);
317 }
318
319 #ifdef ZFS_DEBUG
320 if (abd->abd_parent != NULL) {
321 (void) zfs_refcount_remove_many(&abd->abd_parent->abd_children,
322 abd->abd_size, abd);
323 }
324 #endif
325
326 abd_fini_struct(abd);
327 if (abd->abd_flags & ABD_FLAG_ALLOCD)
328 abd_free_struct_impl(abd);
329 }
330
331 /*
332 * Allocate an ABD of the same format (same metadata flag, same scatterize
333 * setting) as another ABD.
334 */
335 abd_t *
abd_alloc_sametype(abd_t * sabd,size_t size)336 abd_alloc_sametype(abd_t *sabd, size_t size)
337 {
338 boolean_t is_metadata = (sabd->abd_flags & ABD_FLAG_META) != 0;
339 if (abd_is_linear(sabd) &&
340 !abd_is_linear_page(sabd)) {
341 return (abd_alloc_linear(size, is_metadata));
342 } else {
343 return (abd_alloc(size, is_metadata));
344 }
345 }
346
347 /*
348 * Create gang ABD that will be the head of a list of ABD's. This is used
349 * to "chain" scatter/gather lists together when constructing aggregated
350 * IO's. To free this abd, abd_free() must be called.
351 */
352 abd_t *
abd_alloc_gang(void)353 abd_alloc_gang(void)
354 {
355 abd_t *abd = abd_alloc_struct(0);
356 abd->abd_flags |= ABD_FLAG_GANG | ABD_FLAG_OWNER;
357 list_create(&ABD_GANG(abd).abd_gang_chain,
358 sizeof (abd_t), offsetof(abd_t, abd_gang_link));
359 return (abd);
360 }
361
362 /*
363 * Add a child gang ABD to a parent gang ABDs chained list.
364 */
365 static void
abd_gang_add_gang(abd_t * pabd,abd_t * cabd,boolean_t free_on_free)366 abd_gang_add_gang(abd_t *pabd, abd_t *cabd, boolean_t free_on_free)
367 {
368 ASSERT(abd_is_gang(pabd));
369 ASSERT(abd_is_gang(cabd));
370
371 if (free_on_free) {
372 /*
373 * If the parent is responsible for freeing the child gang
374 * ABD we will just splice the child's children ABD list to
375 * the parent's list and immediately free the child gang ABD
376 * struct. The parent gang ABDs children from the child gang
377 * will retain all the free_on_free settings after being
378 * added to the parents list.
379 */
380 #ifdef ZFS_DEBUG
381 /*
382 * If cabd had abd_parent, we have to drop it here. We can't
383 * transfer it to pabd, nor we can clear abd_size leaving it.
384 */
385 if (cabd->abd_parent != NULL) {
386 (void) zfs_refcount_remove_many(
387 &cabd->abd_parent->abd_children,
388 cabd->abd_size, cabd);
389 cabd->abd_parent = NULL;
390 }
391 #endif
392 pabd->abd_size += cabd->abd_size;
393 cabd->abd_size = 0;
394 list_move_tail(&ABD_GANG(pabd).abd_gang_chain,
395 &ABD_GANG(cabd).abd_gang_chain);
396 ASSERT(list_is_empty(&ABD_GANG(cabd).abd_gang_chain));
397 abd_verify(pabd);
398 abd_free(cabd);
399 } else {
400 for (abd_t *child = list_head(&ABD_GANG(cabd).abd_gang_chain);
401 child != NULL;
402 child = list_next(&ABD_GANG(cabd).abd_gang_chain, child)) {
403 /*
404 * We always pass B_FALSE for free_on_free as it is the
405 * original child gang ABDs responsibility to determine
406 * if any of its child ABDs should be free'd on the call
407 * to abd_free().
408 */
409 abd_gang_add(pabd, child, B_FALSE);
410 }
411 abd_verify(pabd);
412 }
413 }
414
415 /*
416 * Add a child ABD to a gang ABD's chained list.
417 */
418 void
abd_gang_add(abd_t * pabd,abd_t * cabd,boolean_t free_on_free)419 abd_gang_add(abd_t *pabd, abd_t *cabd, boolean_t free_on_free)
420 {
421 ASSERT(abd_is_gang(pabd));
422 abd_t *child_abd = NULL;
423
424 /*
425 * If the child being added is a gang ABD, we will add the
426 * child's ABDs to the parent gang ABD. This allows us to account
427 * for the offset correctly in the parent gang ABD.
428 */
429 if (abd_is_gang(cabd)) {
430 ASSERT(!list_link_active(&cabd->abd_gang_link));
431 return (abd_gang_add_gang(pabd, cabd, free_on_free));
432 }
433 ASSERT(!abd_is_gang(cabd));
434
435 /*
436 * In order to verify that an ABD is not already part of
437 * another gang ABD, we must lock the child ABD's abd_mtx
438 * to check its abd_gang_link status. We unlock the abd_mtx
439 * only after it is has been added to a gang ABD, which
440 * will update the abd_gang_link's status. See comment below
441 * for how an ABD can be in multiple gang ABD's simultaneously.
442 */
443 mutex_enter(&cabd->abd_mtx);
444 if (list_link_active(&cabd->abd_gang_link)) {
445 /*
446 * If the child ABD is already part of another
447 * gang ABD then we must allocate a new
448 * ABD to use a separate link. We mark the newly
449 * allocated ABD with ABD_FLAG_GANG_FREE, before
450 * adding it to the gang ABD's list, to make the
451 * gang ABD aware that it is responsible to call
452 * abd_free(). We use abd_get_offset() in order
453 * to just allocate a new ABD but avoid copying the
454 * data over into the newly allocated ABD.
455 *
456 * An ABD may become part of multiple gang ABD's. For
457 * example, when writing ditto bocks, the same ABD
458 * is used to write 2 or 3 locations with 2 or 3
459 * zio_t's. Each of the zio's may be aggregated with
460 * different adjacent zio's. zio aggregation uses gang
461 * zio's, so the single ABD can become part of multiple
462 * gang zio's.
463 *
464 * The ASSERT below is to make sure that if
465 * free_on_free is passed as B_TRUE, the ABD can
466 * not be in multiple gang ABD's. The gang ABD
467 * can not be responsible for cleaning up the child
468 * ABD memory allocation if the ABD can be in
469 * multiple gang ABD's at one time.
470 */
471 ASSERT3B(free_on_free, ==, B_FALSE);
472 child_abd = abd_get_offset(cabd, 0);
473 child_abd->abd_flags |= ABD_FLAG_GANG_FREE;
474 } else {
475 child_abd = cabd;
476 if (free_on_free)
477 child_abd->abd_flags |= ABD_FLAG_GANG_FREE;
478 }
479 ASSERT3P(child_abd, !=, NULL);
480
481 list_insert_tail(&ABD_GANG(pabd).abd_gang_chain, child_abd);
482 mutex_exit(&cabd->abd_mtx);
483 pabd->abd_size += child_abd->abd_size;
484 }
485
486 /*
487 * Locate the ABD for the supplied offset in the gang ABD.
488 * Return a new offset relative to the returned ABD.
489 */
490 abd_t *
abd_gang_get_offset(abd_t * abd,size_t * off)491 abd_gang_get_offset(abd_t *abd, size_t *off)
492 {
493 abd_t *cabd;
494
495 ASSERT(abd_is_gang(abd));
496 ASSERT3U(*off, <, abd->abd_size);
497 for (cabd = list_head(&ABD_GANG(abd).abd_gang_chain); cabd != NULL;
498 cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
499 if (*off >= cabd->abd_size)
500 *off -= cabd->abd_size;
501 else
502 return (cabd);
503 }
504 VERIFY3P(cabd, !=, NULL);
505 return (cabd);
506 }
507
508 /*
509 * Allocate a new ABD, using the provided struct (if non-NULL, and if
510 * circumstances allow - otherwise allocate the struct). The returned ABD will
511 * point to offset off of sabd. It shares the underlying buffer data with sabd.
512 * Use abd_free() to free. sabd must not be freed while any derived ABDs exist.
513 */
514 static abd_t *
abd_get_offset_impl(abd_t * abd,abd_t * sabd,size_t off,size_t size)515 abd_get_offset_impl(abd_t *abd, abd_t *sabd, size_t off, size_t size)
516 {
517 abd_verify(sabd);
518 ASSERT3U(off + size, <=, sabd->abd_size);
519
520 if (abd_is_linear(sabd)) {
521 if (abd == NULL)
522 abd = abd_alloc_struct(0);
523 /*
524 * Even if this buf is filesystem metadata, we only track that
525 * if we own the underlying data buffer, which is not true in
526 * this case. Therefore, we don't ever use ABD_FLAG_META here.
527 */
528 abd->abd_flags |= ABD_FLAG_LINEAR;
529
530 /*
531 * User pages from Direct I/O requests may be in a single page
532 * (ABD_FLAG_LINEAR_PAGE), and we must make sure to still flag
533 * that here for abd. This is required because we have to be
534 * careful when borrowing the buffer from the ABD because we
535 * can not place user pages under write protection on Linux.
536 * See the comments in abd_os.c for abd_borrow_buf(),
537 * abd_borrow_buf_copy(), abd_return_buf() and
538 * abd_return_buf_copy().
539 */
540 if (abd_is_from_pages(sabd)) {
541 abd->abd_flags |= ABD_FLAG_FROM_PAGES |
542 ABD_FLAG_LINEAR_PAGE;
543 }
544
545 ABD_LINEAR_BUF(abd) = (char *)ABD_LINEAR_BUF(sabd) + off;
546 } else if (abd_is_gang(sabd)) {
547 size_t left = size;
548 if (abd == NULL) {
549 abd = abd_alloc_gang();
550 } else {
551 abd->abd_flags |= ABD_FLAG_GANG;
552 list_create(&ABD_GANG(abd).abd_gang_chain,
553 sizeof (abd_t), offsetof(abd_t, abd_gang_link));
554 }
555
556 abd->abd_flags &= ~ABD_FLAG_OWNER;
557 for (abd_t *cabd = abd_gang_get_offset(sabd, &off);
558 cabd != NULL && left > 0;
559 cabd = list_next(&ABD_GANG(sabd).abd_gang_chain, cabd)) {
560 int csize = MIN(left, cabd->abd_size - off);
561
562 abd_t *nabd = abd_get_offset_size(cabd, off, csize);
563 abd_gang_add(abd, nabd, B_TRUE);
564 left -= csize;
565 off = 0;
566 }
567 ASSERT0(left);
568 } else {
569 abd = abd_get_offset_scatter(abd, sabd, off, size);
570 }
571
572 ASSERT3P(abd, !=, NULL);
573 abd->abd_size = size;
574 #ifdef ZFS_DEBUG
575 abd->abd_parent = sabd;
576 (void) zfs_refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
577 #endif
578 return (abd);
579 }
580
581 /*
582 * Like abd_get_offset_size(), but memory for the abd_t is provided by the
583 * caller. Using this routine can improve performance by avoiding the cost
584 * of allocating memory for the abd_t struct, and updating the abd stats.
585 * Usually, the provided abd is returned, but in some circumstances (FreeBSD,
586 * if sabd is scatter and size is more than 2 pages) a new abd_t may need to
587 * be allocated. Therefore callers should be careful to use the returned
588 * abd_t*.
589 */
590 abd_t *
abd_get_offset_struct(abd_t * abd,abd_t * sabd,size_t off,size_t size)591 abd_get_offset_struct(abd_t *abd, abd_t *sabd, size_t off, size_t size)
592 {
593 abd_t *result;
594 abd_init_struct(abd);
595 result = abd_get_offset_impl(abd, sabd, off, size);
596 if (result != abd)
597 abd_fini_struct(abd);
598 return (result);
599 }
600
601 abd_t *
abd_get_offset(abd_t * sabd,size_t off)602 abd_get_offset(abd_t *sabd, size_t off)
603 {
604 size_t size = sabd->abd_size > off ? sabd->abd_size - off : 0;
605 VERIFY3U(size, >, 0);
606 return (abd_get_offset_impl(NULL, sabd, off, size));
607 }
608
609 abd_t *
abd_get_offset_size(abd_t * sabd,size_t off,size_t size)610 abd_get_offset_size(abd_t *sabd, size_t off, size_t size)
611 {
612 ASSERT3U(off + size, <=, sabd->abd_size);
613 return (abd_get_offset_impl(NULL, sabd, off, size));
614 }
615
616 /*
617 * Return a size scatter ABD containing only zeros.
618 */
619 abd_t *
abd_get_zeros(size_t size)620 abd_get_zeros(size_t size)
621 {
622 ASSERT3P(abd_zero_scatter, !=, NULL);
623 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
624 return (abd_get_offset_size(abd_zero_scatter, 0, size));
625 }
626
627 /*
628 * Create a linear ABD for an existing buf.
629 */
630 static abd_t *
abd_get_from_buf_impl(abd_t * abd,void * buf,size_t size)631 abd_get_from_buf_impl(abd_t *abd, void *buf, size_t size)
632 {
633 VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
634
635 /*
636 * Even if this buf is filesystem metadata, we only track that if we
637 * own the underlying data buffer, which is not true in this case.
638 * Therefore, we don't ever use ABD_FLAG_META here.
639 */
640 abd->abd_flags |= ABD_FLAG_LINEAR;
641 abd->abd_size = size;
642
643 ABD_LINEAR_BUF(abd) = buf;
644
645 return (abd);
646 }
647
648 abd_t *
abd_get_from_buf(void * buf,size_t size)649 abd_get_from_buf(void *buf, size_t size)
650 {
651 abd_t *abd = abd_alloc_struct(0);
652 return (abd_get_from_buf_impl(abd, buf, size));
653 }
654
655 abd_t *
abd_get_from_buf_struct(abd_t * abd,void * buf,size_t size)656 abd_get_from_buf_struct(abd_t *abd, void *buf, size_t size)
657 {
658 abd_init_struct(abd);
659 return (abd_get_from_buf_impl(abd, buf, size));
660 }
661
662 /*
663 * Get the raw buffer associated with a linear ABD.
664 */
665 void *
abd_to_buf(abd_t * abd)666 abd_to_buf(abd_t *abd)
667 {
668 ASSERT(abd_is_linear(abd));
669 abd_verify(abd);
670 return (ABD_LINEAR_BUF(abd));
671 }
672
673 void
abd_release_ownership_of_buf(abd_t * abd)674 abd_release_ownership_of_buf(abd_t *abd)
675 {
676 ASSERT(abd_is_linear(abd));
677 ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
678
679 /*
680 * abd_free() needs to handle LINEAR_PAGE ABD's specially.
681 * Since that flag does not survive the
682 * abd_release_ownership_of_buf() -> abd_get_from_buf() ->
683 * abd_take_ownership_of_buf() sequence, we don't allow releasing
684 * these "linear but not zio_[data_]buf_alloc()'ed" ABD's.
685 */
686 ASSERT(!abd_is_linear_page(abd));
687
688 abd_verify(abd);
689
690 abd->abd_flags &= ~ABD_FLAG_OWNER;
691 /* Disable this flag since we no longer own the data buffer */
692 abd->abd_flags &= ~ABD_FLAG_META;
693
694 abd_update_linear_stats(abd, ABDSTAT_DECR);
695 }
696
697
698 /*
699 * Give this ABD ownership of the buffer that it's storing. Can only be used on
700 * linear ABDs which were allocated via abd_get_from_buf(), or ones allocated
701 * with abd_alloc_linear() which subsequently released ownership of their buf
702 * with abd_release_ownership_of_buf().
703 */
704 void
abd_take_ownership_of_buf(abd_t * abd,boolean_t is_metadata)705 abd_take_ownership_of_buf(abd_t *abd, boolean_t is_metadata)
706 {
707 ASSERT(abd_is_linear(abd));
708 ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
709 abd_verify(abd);
710
711 abd->abd_flags |= ABD_FLAG_OWNER;
712 if (is_metadata) {
713 abd->abd_flags |= ABD_FLAG_META;
714 }
715
716 abd_update_linear_stats(abd, ABDSTAT_INCR);
717 }
718
719 /*
720 * Initializes an abd_iter based on whether the abd is a gang ABD
721 * or just a single ABD.
722 */
723 static inline abd_t *
abd_init_abd_iter(abd_t * abd,struct abd_iter * aiter,size_t off)724 abd_init_abd_iter(abd_t *abd, struct abd_iter *aiter, size_t off)
725 {
726 abd_t *cabd = NULL;
727
728 if (abd_is_gang(abd)) {
729 cabd = abd_gang_get_offset(abd, &off);
730 if (cabd) {
731 abd_iter_init(aiter, cabd);
732 abd_iter_advance(aiter, off);
733 }
734 } else {
735 abd_iter_init(aiter, abd);
736 abd_iter_advance(aiter, off);
737 }
738 return (cabd);
739 }
740
741 /*
742 * Advances an abd_iter. We have to be careful with gang ABD as
743 * advancing could mean that we are at the end of a particular ABD and
744 * must grab the ABD in the gang ABD's list.
745 */
746 static inline abd_t *
abd_advance_abd_iter(abd_t * abd,abd_t * cabd,struct abd_iter * aiter,size_t len)747 abd_advance_abd_iter(abd_t *abd, abd_t *cabd, struct abd_iter *aiter,
748 size_t len)
749 {
750 abd_iter_advance(aiter, len);
751 if (abd_is_gang(abd) && abd_iter_at_end(aiter)) {
752 ASSERT3P(cabd, !=, NULL);
753 cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd);
754 if (cabd) {
755 abd_iter_init(aiter, cabd);
756 abd_iter_advance(aiter, 0);
757 }
758 }
759 return (cabd);
760 }
761
762 int
abd_iterate_func(abd_t * abd,size_t off,size_t size,abd_iter_func_t * func,void * private)763 abd_iterate_func(abd_t *abd, size_t off, size_t size,
764 abd_iter_func_t *func, void *private)
765 {
766 struct abd_iter aiter;
767 int ret = 0;
768
769 if (size == 0)
770 return (0);
771
772 abd_verify(abd);
773 ASSERT3U(off + size, <=, abd->abd_size);
774
775 abd_t *c_abd = abd_init_abd_iter(abd, &aiter, off);
776
777 while (size > 0) {
778 IMPLY(abd_is_gang(abd), c_abd != NULL);
779
780 abd_iter_map(&aiter);
781
782 size_t len = MIN(aiter.iter_mapsize, size);
783 ASSERT3U(len, >, 0);
784
785 ret = func(aiter.iter_mapaddr, len, private);
786
787 abd_iter_unmap(&aiter);
788
789 if (ret != 0)
790 break;
791
792 size -= len;
793 c_abd = abd_advance_abd_iter(abd, c_abd, &aiter, len);
794 }
795
796 return (ret);
797 }
798
799 #if defined(__linux__) && defined(_KERNEL)
800 int
abd_iterate_page_func(abd_t * abd,size_t off,size_t size,abd_iter_page_func_t * func,void * private)801 abd_iterate_page_func(abd_t *abd, size_t off, size_t size,
802 abd_iter_page_func_t *func, void *private)
803 {
804 struct abd_iter aiter;
805 int ret = 0;
806
807 if (size == 0)
808 return (0);
809
810 abd_verify(abd);
811 ASSERT3U(off + size, <=, abd->abd_size);
812
813 abd_t *c_abd = abd_init_abd_iter(abd, &aiter, off);
814
815 while (size > 0) {
816 IMPLY(abd_is_gang(abd), c_abd != NULL);
817
818 abd_iter_page(&aiter);
819
820 size_t len = MIN(aiter.iter_page_dsize, size);
821 ASSERT3U(len, >, 0);
822
823 ret = func(aiter.iter_page, aiter.iter_page_doff,
824 len, private);
825
826 aiter.iter_page = NULL;
827 aiter.iter_page_doff = 0;
828 aiter.iter_page_dsize = 0;
829
830 if (ret != 0)
831 break;
832
833 size -= len;
834 c_abd = abd_advance_abd_iter(abd, c_abd, &aiter, len);
835 }
836
837 return (ret);
838 }
839 #endif
840
841 struct buf_arg {
842 void *arg_buf;
843 };
844
845 static int
abd_copy_to_buf_off_cb(void * buf,size_t size,void * private)846 abd_copy_to_buf_off_cb(void *buf, size_t size, void *private)
847 {
848 struct buf_arg *ba_ptr = private;
849
850 (void) memcpy(ba_ptr->arg_buf, buf, size);
851 ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
852
853 return (0);
854 }
855
856 /*
857 * Copy abd to buf. (off is the offset in abd.)
858 */
859 void
abd_copy_to_buf_off(void * buf,abd_t * abd,size_t off,size_t size)860 abd_copy_to_buf_off(void *buf, abd_t *abd, size_t off, size_t size)
861 {
862 struct buf_arg ba_ptr = { buf };
863
864 (void) abd_iterate_func(abd, off, size, abd_copy_to_buf_off_cb,
865 &ba_ptr);
866 }
867
868 static int
abd_cmp_buf_off_cb(void * buf,size_t size,void * private)869 abd_cmp_buf_off_cb(void *buf, size_t size, void *private)
870 {
871 int ret;
872 struct buf_arg *ba_ptr = private;
873
874 ret = memcmp(buf, ba_ptr->arg_buf, size);
875 ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
876
877 return (ret);
878 }
879
880 /*
881 * Compare the contents of abd to buf. (off is the offset in abd.)
882 */
883 int
abd_cmp_buf_off(abd_t * abd,const void * buf,size_t off,size_t size)884 abd_cmp_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
885 {
886 struct buf_arg ba_ptr = { (void *) buf };
887
888 return (abd_iterate_func(abd, off, size, abd_cmp_buf_off_cb, &ba_ptr));
889 }
890
891 static int
abd_copy_from_buf_off_cb(void * buf,size_t size,void * private)892 abd_copy_from_buf_off_cb(void *buf, size_t size, void *private)
893 {
894 struct buf_arg *ba_ptr = private;
895
896 (void) memcpy(buf, ba_ptr->arg_buf, size);
897 ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
898
899 return (0);
900 }
901
902 /*
903 * Copy from buf to abd. (off is the offset in abd.)
904 */
905 void
abd_copy_from_buf_off(abd_t * abd,const void * buf,size_t off,size_t size)906 abd_copy_from_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
907 {
908 struct buf_arg ba_ptr = { (void *) buf };
909
910 (void) abd_iterate_func(abd, off, size, abd_copy_from_buf_off_cb,
911 &ba_ptr);
912 }
913
914 static int
abd_zero_off_cb(void * buf,size_t size,void * private)915 abd_zero_off_cb(void *buf, size_t size, void *private)
916 {
917 (void) private;
918 (void) memset(buf, 0, size);
919 return (0);
920 }
921
922 /*
923 * Zero out the abd from a particular offset to the end.
924 */
925 void
abd_zero_off(abd_t * abd,size_t off,size_t size)926 abd_zero_off(abd_t *abd, size_t off, size_t size)
927 {
928 (void) abd_iterate_func(abd, off, size, abd_zero_off_cb, NULL);
929 }
930
931 /*
932 * Iterate over two ABDs and call func incrementally on the two ABDs' data in
933 * equal-sized chunks (passed to func as raw buffers). func could be called many
934 * times during this iteration.
935 */
936 int
abd_iterate_func2(abd_t * dabd,abd_t * sabd,size_t doff,size_t soff,size_t size,abd_iter_func2_t * func,void * private)937 abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff,
938 size_t size, abd_iter_func2_t *func, void *private)
939 {
940 int ret = 0;
941 struct abd_iter daiter, saiter;
942 abd_t *c_dabd, *c_sabd;
943
944 if (size == 0)
945 return (0);
946
947 abd_verify(dabd);
948 abd_verify(sabd);
949
950 ASSERT3U(doff + size, <=, dabd->abd_size);
951 ASSERT3U(soff + size, <=, sabd->abd_size);
952
953 c_dabd = abd_init_abd_iter(dabd, &daiter, doff);
954 c_sabd = abd_init_abd_iter(sabd, &saiter, soff);
955
956 while (size > 0) {
957 IMPLY(abd_is_gang(dabd), c_dabd != NULL);
958 IMPLY(abd_is_gang(sabd), c_sabd != NULL);
959
960 abd_iter_map(&daiter);
961 abd_iter_map(&saiter);
962
963 size_t dlen = MIN(daiter.iter_mapsize, size);
964 size_t slen = MIN(saiter.iter_mapsize, size);
965 size_t len = MIN(dlen, slen);
966 ASSERT(dlen > 0 || slen > 0);
967
968 ret = func(daiter.iter_mapaddr, saiter.iter_mapaddr, len,
969 private);
970
971 abd_iter_unmap(&saiter);
972 abd_iter_unmap(&daiter);
973
974 if (ret != 0)
975 break;
976
977 size -= len;
978 c_dabd =
979 abd_advance_abd_iter(dabd, c_dabd, &daiter, len);
980 c_sabd =
981 abd_advance_abd_iter(sabd, c_sabd, &saiter, len);
982 }
983
984 return (ret);
985 }
986
987 static int
abd_copy_off_cb(void * dbuf,void * sbuf,size_t size,void * private)988 abd_copy_off_cb(void *dbuf, void *sbuf, size_t size, void *private)
989 {
990 (void) private;
991 (void) memcpy(dbuf, sbuf, size);
992 return (0);
993 }
994
995 /*
996 * Copy from sabd to dabd starting from soff and doff.
997 */
998 void
abd_copy_off(abd_t * dabd,abd_t * sabd,size_t doff,size_t soff,size_t size)999 abd_copy_off(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff, size_t size)
1000 {
1001 (void) abd_iterate_func2(dabd, sabd, doff, soff, size,
1002 abd_copy_off_cb, NULL);
1003 }
1004
1005 static int
abd_cmp_cb(void * bufa,void * bufb,size_t size,void * private)1006 abd_cmp_cb(void *bufa, void *bufb, size_t size, void *private)
1007 {
1008 (void) private;
1009 return (memcmp(bufa, bufb, size));
1010 }
1011
1012 /*
1013 * Compares the contents of two ABDs.
1014 */
1015 int
abd_cmp(abd_t * dabd,abd_t * sabd)1016 abd_cmp(abd_t *dabd, abd_t *sabd)
1017 {
1018 ASSERT3U(dabd->abd_size, ==, sabd->abd_size);
1019 return (abd_iterate_func2(dabd, sabd, 0, 0, dabd->abd_size,
1020 abd_cmp_cb, NULL));
1021 }
1022
1023 /*
1024 * Check if ABD content is all-zeroes.
1025 */
1026 static int
abd_cmp_zero_off_cb(void * data,size_t len,void * private)1027 abd_cmp_zero_off_cb(void *data, size_t len, void *private)
1028 {
1029 (void) private;
1030
1031 /* This function can only check whole uint64s. Enforce that. */
1032 ASSERT0(P2PHASE(len, 8));
1033
1034 uint64_t *end = (uint64_t *)((char *)data + len);
1035 for (uint64_t *word = (uint64_t *)data; word < end; word++)
1036 if (*word != 0)
1037 return (1);
1038
1039 return (0);
1040 }
1041
1042 int
abd_cmp_zero_off(abd_t * abd,size_t off,size_t size)1043 abd_cmp_zero_off(abd_t *abd, size_t off, size_t size)
1044 {
1045 return (abd_iterate_func(abd, off, size, abd_cmp_zero_off_cb, NULL));
1046 }
1047
1048 /*
1049 * Iterate over code ABDs and a data ABD and call @func_raidz_gen.
1050 *
1051 * @cabds parity ABDs, must have equal size
1052 * @dabd data ABD. Can be NULL (in this case @dsize = 0)
1053 * @func_raidz_gen should be implemented so that its behaviour
1054 * is the same when taking linear and when taking scatter
1055 */
1056 void
abd_raidz_gen_iterate(abd_t ** cabds,abd_t * dabd,size_t off,size_t csize,size_t dsize,const unsigned parity,void (* func_raidz_gen)(void **,const void *,size_t,size_t))1057 abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd, size_t off,
1058 size_t csize, size_t dsize, const unsigned parity,
1059 void (*func_raidz_gen)(void **, const void *, size_t, size_t))
1060 {
1061 int i;
1062 size_t len, dlen;
1063 struct abd_iter caiters[3];
1064 struct abd_iter daiter;
1065 void *caddrs[3], *daddr;
1066 unsigned long flags __maybe_unused = 0;
1067 abd_t *c_cabds[3];
1068 abd_t *c_dabd = NULL;
1069
1070 ASSERT3U(parity, <=, 3);
1071 for (i = 0; i < parity; i++) {
1072 abd_verify(cabds[i]);
1073 ASSERT3U(off + csize, <=, cabds[i]->abd_size);
1074 c_cabds[i] = abd_init_abd_iter(cabds[i], &caiters[i], off);
1075 }
1076
1077 if (dsize > 0) {
1078 ASSERT(dabd);
1079 abd_verify(dabd);
1080 ASSERT3U(off + dsize, <=, dabd->abd_size);
1081 c_dabd = abd_init_abd_iter(dabd, &daiter, off);
1082 }
1083
1084 abd_enter_critical(flags);
1085 while (csize > 0) {
1086 len = csize;
1087 for (i = 0; i < parity; i++) {
1088 IMPLY(abd_is_gang(cabds[i]), c_cabds[i] != NULL);
1089 abd_iter_map(&caiters[i]);
1090 caddrs[i] = caiters[i].iter_mapaddr;
1091 len = MIN(caiters[i].iter_mapsize, len);
1092 }
1093
1094 if (dsize > 0) {
1095 IMPLY(abd_is_gang(dabd), c_dabd != NULL);
1096 abd_iter_map(&daiter);
1097 daddr = daiter.iter_mapaddr;
1098 len = MIN(daiter.iter_mapsize, len);
1099 dlen = len;
1100 } else {
1101 daddr = NULL;
1102 dlen = 0;
1103 }
1104
1105 /* must be progressive */
1106 ASSERT3U(len, >, 0);
1107 /*
1108 * The iterated function likely will not do well if each
1109 * segment except the last one is not multiple of 512 (raidz).
1110 */
1111 ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
1112
1113 func_raidz_gen(caddrs, daddr, len, dlen);
1114
1115 if (dsize > 0) {
1116 abd_iter_unmap(&daiter);
1117 c_dabd =
1118 abd_advance_abd_iter(dabd, c_dabd, &daiter,
1119 dlen);
1120 dsize -= dlen;
1121 }
1122
1123 for (i = parity - 1; i >= 0; i--) {
1124 abd_iter_unmap(&caiters[i]);
1125 c_cabds[i] =
1126 abd_advance_abd_iter(cabds[i], c_cabds[i],
1127 &caiters[i], len);
1128 }
1129
1130 csize -= len;
1131 }
1132 abd_exit_critical(flags);
1133 }
1134
1135 /*
1136 * Iterate over code ABDs and data reconstruction target ABDs and call
1137 * @func_raidz_rec. Function maps at most 6 pages atomically.
1138 *
1139 * @cabds parity ABDs, must have equal size
1140 * @tabds rec target ABDs, at most 3
1141 * @tsize size of data target columns
1142 * @func_raidz_rec expects syndrome data in target columns. Function
1143 * reconstructs data and overwrites target columns.
1144 */
1145 void
abd_raidz_rec_iterate(abd_t ** cabds,abd_t ** tabds,size_t tsize,const unsigned parity,void (* func_raidz_rec)(void ** t,const size_t tsize,void ** c,const unsigned * mul),const unsigned * mul)1146 abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds,
1147 size_t tsize, const unsigned parity,
1148 void (*func_raidz_rec)(void **t, const size_t tsize, void **c,
1149 const unsigned *mul),
1150 const unsigned *mul)
1151 {
1152 int i;
1153 size_t len;
1154 struct abd_iter citers[3];
1155 struct abd_iter xiters[3];
1156 void *caddrs[3], *xaddrs[3];
1157 unsigned long flags __maybe_unused = 0;
1158 abd_t *c_cabds[3];
1159 abd_t *c_tabds[3];
1160
1161 ASSERT3U(parity, <=, 3);
1162
1163 for (i = 0; i < parity; i++) {
1164 abd_verify(cabds[i]);
1165 abd_verify(tabds[i]);
1166 ASSERT3U(tsize, <=, cabds[i]->abd_size);
1167 ASSERT3U(tsize, <=, tabds[i]->abd_size);
1168 c_cabds[i] =
1169 abd_init_abd_iter(cabds[i], &citers[i], 0);
1170 c_tabds[i] =
1171 abd_init_abd_iter(tabds[i], &xiters[i], 0);
1172 }
1173
1174 abd_enter_critical(flags);
1175 while (tsize > 0) {
1176 len = tsize;
1177 for (i = 0; i < parity; i++) {
1178 IMPLY(abd_is_gang(cabds[i]), c_cabds[i] != NULL);
1179 IMPLY(abd_is_gang(tabds[i]), c_tabds[i] != NULL);
1180 abd_iter_map(&citers[i]);
1181 abd_iter_map(&xiters[i]);
1182 caddrs[i] = citers[i].iter_mapaddr;
1183 xaddrs[i] = xiters[i].iter_mapaddr;
1184 len = MIN(citers[i].iter_mapsize, len);
1185 len = MIN(xiters[i].iter_mapsize, len);
1186 }
1187
1188 /* must be progressive */
1189 ASSERT3S(len, >, 0);
1190 /*
1191 * The iterated function likely will not do well if each
1192 * segment except the last one is not multiple of 512 (raidz).
1193 */
1194 ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
1195
1196 func_raidz_rec(xaddrs, len, caddrs, mul);
1197
1198 for (i = parity - 1; i >= 0; i--) {
1199 abd_iter_unmap(&xiters[i]);
1200 abd_iter_unmap(&citers[i]);
1201 c_tabds[i] =
1202 abd_advance_abd_iter(tabds[i], c_tabds[i],
1203 &xiters[i], len);
1204 c_cabds[i] =
1205 abd_advance_abd_iter(cabds[i], c_cabds[i],
1206 &citers[i], len);
1207 }
1208
1209 tsize -= len;
1210 ASSERT3S(tsize, >=, 0);
1211 }
1212 abd_exit_critical(flags);
1213 }
1214
1215 EXPORT_SYMBOL(abd_free);
1216