xref: /freebsd/sys/cddl/boot/zfs/zfsimpl.h (revision 0a36787e4c1fa0cf77dcf83be0867178476e372b)
1 /*-
2  * Copyright (c) 2002 McAfee, Inc.
3  * All rights reserved.
4  *
5  * This software was developed for the FreeBSD Project by Marshall
6  * Kirk McKusick and McAfee Research,, the Security Research Division of
7  * McAfee, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as
8  * part of the DARPA CHATS research program
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 /*
32  * CDDL HEADER START
33  *
34  * The contents of this file are subject to the terms of the
35  * Common Development and Distribution License (the "License").
36  * You may not use this file except in compliance with the License.
37  *
38  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
39  * or http://www.opensolaris.org/os/licensing.
40  * See the License for the specific language governing permissions
41  * and limitations under the License.
42  *
43  * When distributing Covered Code, include this CDDL HEADER in each
44  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
45  * If applicable, add the following below this CDDL HEADER, with the
46  * fields enclosed by brackets "[]" replaced with your own identifying
47  * information: Portions Copyright [yyyy] [name of copyright owner]
48  *
49  * CDDL HEADER END
50  */
51 /*
52  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
53  * Use is subject to license terms.
54  */
55 /*
56  * Copyright 2013 by Saso Kiselkov. All rights reserved.
57  */
58 /*
59  * Copyright (c) 2020 by Delphix. All rights reserved.
60  */
61 
62 #include <sys/queue.h>
63 #include <sys/list.h>
64 #include <bootstrap.h>
65 
66 #ifndef _ZFSIMPL_H_
67 #define	_ZFSIMPL_H_
68 
69 #define	MAXNAMELEN	256
70 
71 #define _NOTE(s)
72 
73 /*
74  * AVL comparator helpers
75  */
76 #define	AVL_ISIGN(a)	(((a) > 0) - ((a) < 0))
77 #define	AVL_CMP(a, b)	(((a) > (b)) - ((a) < (b)))
78 #define	AVL_PCMP(a, b)	\
79 	(((uintptr_t)(a) > (uintptr_t)(b)) - ((uintptr_t)(a) < (uintptr_t)(b)))
80 
81 typedef enum { B_FALSE, B_TRUE } boolean_t;
82 
83 /* CRC64 table */
84 #define	ZFS_CRC64_POLY	0xC96C5795D7870F42ULL	/* ECMA-182, reflected form */
85 
86 /*
87  * Macros for various sorts of alignment and rounding when the alignment
88  * is known to be a power of 2.
89  */
90 #define	P2ALIGN(x, align)		((x) & -(align))
91 #define	P2PHASE(x, align)		((x) & ((align) - 1))
92 #define	P2NPHASE(x, align)		(-(x) & ((align) - 1))
93 #define	P2ROUNDUP(x, align)		(-(-(x) & -(align)))
94 #define	P2END(x, align)			(-(~(x) & -(align)))
95 #define	P2PHASEUP(x, align, phase)	((phase) - (((phase) - (x)) & -(align)))
96 #define	P2BOUNDARY(off, len, align)	(((off) ^ ((off) + (len) - 1)) > (align) - 1)
97 
98 /*
99  * General-purpose 32-bit and 64-bit bitfield encodings.
100  */
101 #define	BF32_DECODE(x, low, len)	P2PHASE((x) >> (low), 1U << (len))
102 #define	BF64_DECODE(x, low, len)	P2PHASE((x) >> (low), 1ULL << (len))
103 #define	BF32_ENCODE(x, low, len)	(P2PHASE((x), 1U << (len)) << (low))
104 #define	BF64_ENCODE(x, low, len)	(P2PHASE((x), 1ULL << (len)) << (low))
105 
106 #define	BF32_GET(x, low, len)		BF32_DECODE(x, low, len)
107 #define	BF64_GET(x, low, len)		BF64_DECODE(x, low, len)
108 
109 #define	BF32_SET(x, low, len, val)	\
110 	((x) ^= BF32_ENCODE((x >> low) ^ (val), low, len))
111 #define	BF64_SET(x, low, len, val)	\
112 	((x) ^= BF64_ENCODE((x >> low) ^ (val), low, len))
113 
114 #define	BF32_GET_SB(x, low, len, shift, bias)	\
115 	((BF32_GET(x, low, len) + (bias)) << (shift))
116 #define	BF64_GET_SB(x, low, len, shift, bias)	\
117 	((BF64_GET(x, low, len) + (bias)) << (shift))
118 
119 #define	BF32_SET_SB(x, low, len, shift, bias, val)	\
120 	BF32_SET(x, low, len, ((val) >> (shift)) - (bias))
121 #define	BF64_SET_SB(x, low, len, shift, bias, val)	\
122 	BF64_SET(x, low, len, ((val) >> (shift)) - (bias))
123 
124 /*
125  * Macros to reverse byte order
126  */
127 #define	BSWAP_8(x)	((x) & 0xff)
128 #define	BSWAP_16(x)	((BSWAP_8(x) << 8) | BSWAP_8((x) >> 8))
129 #define	BSWAP_32(x)	((BSWAP_16(x) << 16) | BSWAP_16((x) >> 16))
130 #define	BSWAP_64(x)	((BSWAP_32(x) << 32) | BSWAP_32((x) >> 32))
131 
132 #define	SPA_MINBLOCKSHIFT	9
133 #define	SPA_OLDMAXBLOCKSHIFT	17
134 #define	SPA_MAXBLOCKSHIFT	24
135 #define	SPA_MINBLOCKSIZE	(1ULL << SPA_MINBLOCKSHIFT)
136 #define	SPA_OLDMAXBLOCKSIZE	(1ULL << SPA_OLDMAXBLOCKSHIFT)
137 #define	SPA_MAXBLOCKSIZE	(1ULL << SPA_MAXBLOCKSHIFT)
138 
139 /*
140  * The DVA size encodings for LSIZE and PSIZE support blocks up to 32MB.
141  * The ASIZE encoding should be at least 64 times larger (6 more bits)
142  * to support up to 4-way RAID-Z mirror mode with worst-case gang block
143  * overhead, three DVAs per bp, plus one more bit in case we do anything
144  * else that expands the ASIZE.
145  */
146 #define	SPA_LSIZEBITS		16	/* LSIZE up to 32M (2^16 * 512)	*/
147 #define	SPA_PSIZEBITS		16	/* PSIZE up to 32M (2^16 * 512)	*/
148 #define	SPA_ASIZEBITS		24	/* ASIZE up to 64 times larger	*/
149 
150 /*
151  * All SPA data is represented by 128-bit data virtual addresses (DVAs).
152  * The members of the dva_t should be considered opaque outside the SPA.
153  */
154 typedef struct dva {
155 	uint64_t	dva_word[2];
156 } dva_t;
157 
158 /*
159  * Each block has a 256-bit checksum -- strong enough for cryptographic hashes.
160  */
161 typedef struct zio_cksum {
162 	uint64_t	zc_word[4];
163 } zio_cksum_t;
164 
165 /*
166  * Some checksums/hashes need a 256-bit initialization salt. This salt is kept
167  * secret and is suitable for use in MAC algorithms as the key.
168  */
169 typedef struct zio_cksum_salt {
170 	uint8_t		zcs_bytes[32];
171 } zio_cksum_salt_t;
172 
173 /*
174  * Each block is described by its DVAs, time of birth, checksum, etc.
175  * The word-by-word, bit-by-bit layout of the blkptr is as follows:
176  *
177  *	64	56	48	40	32	24	16	8	0
178  *	+-------+-------+-------+-------+-------+-------+-------+-------+
179  * 0	|		vdev1		| GRID  |	  ASIZE		|
180  *	+-------+-------+-------+-------+-------+-------+-------+-------+
181  * 1	|G|			 offset1				|
182  *	+-------+-------+-------+-------+-------+-------+-------+-------+
183  * 2	|		vdev2		| GRID  |	  ASIZE		|
184  *	+-------+-------+-------+-------+-------+-------+-------+-------+
185  * 3	|G|			 offset2				|
186  *	+-------+-------+-------+-------+-------+-------+-------+-------+
187  * 4	|		vdev3		| GRID  |	  ASIZE		|
188  *	+-------+-------+-------+-------+-------+-------+-------+-------+
189  * 5	|G|			 offset3				|
190  *	+-------+-------+-------+-------+-------+-------+-------+-------+
191  * 6	|BDX|lvl| type	| cksum |E| comp|    PSIZE	|     LSIZE	|
192  *	+-------+-------+-------+-------+-------+-------+-------+-------+
193  * 7	|			padding					|
194  *	+-------+-------+-------+-------+-------+-------+-------+-------+
195  * 8	|			padding					|
196  *	+-------+-------+-------+-------+-------+-------+-------+-------+
197  * 9	|			physical birth txg			|
198  *	+-------+-------+-------+-------+-------+-------+-------+-------+
199  * a	|			logical birth txg			|
200  *	+-------+-------+-------+-------+-------+-------+-------+-------+
201  * b	|			fill count				|
202  *	+-------+-------+-------+-------+-------+-------+-------+-------+
203  * c	|			checksum[0]				|
204  *	+-------+-------+-------+-------+-------+-------+-------+-------+
205  * d	|			checksum[1]				|
206  *	+-------+-------+-------+-------+-------+-------+-------+-------+
207  * e	|			checksum[2]				|
208  *	+-------+-------+-------+-------+-------+-------+-------+-------+
209  * f	|			checksum[3]				|
210  *	+-------+-------+-------+-------+-------+-------+-------+-------+
211  *
212  * Legend:
213  *
214  * vdev		virtual device ID
215  * offset	offset into virtual device
216  * LSIZE	logical size
217  * PSIZE	physical size (after compression)
218  * ASIZE	allocated size (including RAID-Z parity and gang block headers)
219  * GRID		RAID-Z layout information (reserved for future use)
220  * cksum	checksum function
221  * comp		compression function
222  * G		gang block indicator
223  * B		byteorder (endianness)
224  * D		dedup
225  * X		encryption (on version 30, which is not supported)
226  * E		blkptr_t contains embedded data (see below)
227  * lvl		level of indirection
228  * type		DMU object type
229  * phys birth	txg of block allocation; zero if same as logical birth txg
230  * log. birth	transaction group in which the block was logically born
231  * fill count	number of non-zero blocks under this bp
232  * checksum[4]	256-bit checksum of the data this bp describes
233  */
234 
235 /*
236  * "Embedded" blkptr_t's don't actually point to a block, instead they
237  * have a data payload embedded in the blkptr_t itself.  See the comment
238  * in blkptr.c for more details.
239  *
240  * The blkptr_t is laid out as follows:
241  *
242  *	64	56	48	40	32	24	16	8	0
243  *	+-------+-------+-------+-------+-------+-------+-------+-------+
244  * 0	|      payload                                                  |
245  * 1	|      payload                                                  |
246  * 2	|      payload                                                  |
247  * 3	|      payload                                                  |
248  * 4	|      payload                                                  |
249  * 5	|      payload                                                  |
250  *	+-------+-------+-------+-------+-------+-------+-------+-------+
251  * 6	|BDX|lvl| type	| etype |E| comp| PSIZE|              LSIZE	|
252  *	+-------+-------+-------+-------+-------+-------+-------+-------+
253  * 7	|      payload                                                  |
254  * 8	|      payload                                                  |
255  * 9	|      payload                                                  |
256  *	+-------+-------+-------+-------+-------+-------+-------+-------+
257  * a	|			logical birth txg			|
258  *	+-------+-------+-------+-------+-------+-------+-------+-------+
259  * b	|      payload                                                  |
260  * c	|      payload                                                  |
261  * d	|      payload                                                  |
262  * e	|      payload                                                  |
263  * f	|      payload                                                  |
264  *	+-------+-------+-------+-------+-------+-------+-------+-------+
265  *
266  * Legend:
267  *
268  * payload		contains the embedded data
269  * B (byteorder)	byteorder (endianness)
270  * D (dedup)		padding (set to zero)
271  * X			encryption (set to zero; see above)
272  * E (embedded)		set to one
273  * lvl			indirection level
274  * type			DMU object type
275  * etype		how to interpret embedded data (BP_EMBEDDED_TYPE_*)
276  * comp			compression function of payload
277  * PSIZE		size of payload after compression, in bytes
278  * LSIZE		logical size of payload, in bytes
279  *			note that 25 bits is enough to store the largest
280  *			"normal" BP's LSIZE (2^16 * 2^9) in bytes
281  * log. birth		transaction group in which the block was logically born
282  *
283  * Note that LSIZE and PSIZE are stored in bytes, whereas for non-embedded
284  * bp's they are stored in units of SPA_MINBLOCKSHIFT.
285  * Generally, the generic BP_GET_*() macros can be used on embedded BP's.
286  * The B, D, X, lvl, type, and comp fields are stored the same as with normal
287  * BP's so the BP_SET_* macros can be used with them.  etype, PSIZE, LSIZE must
288  * be set with the BPE_SET_* macros.  BP_SET_EMBEDDED() should be called before
289  * other macros, as they assert that they are only used on BP's of the correct
290  * "embedded-ness".
291  */
292 
293 #define	BPE_GET_ETYPE(bp)	\
294 	(ASSERT(BP_IS_EMBEDDED(bp)), \
295 	BF64_GET((bp)->blk_prop, 40, 8))
296 #define	BPE_SET_ETYPE(bp, t)	do { \
297 	ASSERT(BP_IS_EMBEDDED(bp)); \
298 	BF64_SET((bp)->blk_prop, 40, 8, t); \
299 _NOTE(CONSTCOND) } while (0)
300 
301 #define	BPE_GET_LSIZE(bp)	\
302 	(ASSERT(BP_IS_EMBEDDED(bp)), \
303 	BF64_GET_SB((bp)->blk_prop, 0, 25, 0, 1))
304 #define	BPE_SET_LSIZE(bp, x)	do { \
305 	ASSERT(BP_IS_EMBEDDED(bp)); \
306 	BF64_SET_SB((bp)->blk_prop, 0, 25, 0, 1, x); \
307 _NOTE(CONSTCOND) } while (0)
308 
309 #define	BPE_GET_PSIZE(bp)	\
310 	(ASSERT(BP_IS_EMBEDDED(bp)), \
311 	BF64_GET_SB((bp)->blk_prop, 25, 7, 0, 1))
312 #define	BPE_SET_PSIZE(bp, x)	do { \
313 	ASSERT(BP_IS_EMBEDDED(bp)); \
314 	BF64_SET_SB((bp)->blk_prop, 25, 7, 0, 1, x); \
315 _NOTE(CONSTCOND) } while (0)
316 
317 typedef enum bp_embedded_type {
318 	BP_EMBEDDED_TYPE_DATA,
319 	BP_EMBEDDED_TYPE_RESERVED, /* Reserved for an unintegrated feature. */
320 	NUM_BP_EMBEDDED_TYPES = BP_EMBEDDED_TYPE_RESERVED
321 } bp_embedded_type_t;
322 
323 #define	BPE_NUM_WORDS 14
324 #define	BPE_PAYLOAD_SIZE (BPE_NUM_WORDS * sizeof (uint64_t))
325 #define	BPE_IS_PAYLOADWORD(bp, wp) \
326 	((wp) != &(bp)->blk_prop && (wp) != &(bp)->blk_birth)
327 
328 #define	SPA_BLKPTRSHIFT	7		/* blkptr_t is 128 bytes	*/
329 #define	SPA_DVAS_PER_BP	3		/* Number of DVAs in a bp	*/
330 
331 typedef struct blkptr {
332 	dva_t		blk_dva[SPA_DVAS_PER_BP]; /* Data Virtual Addresses */
333 	uint64_t	blk_prop;	/* size, compression, type, etc	    */
334 	uint64_t	blk_pad[2];	/* Extra space for the future	    */
335 	uint64_t	blk_phys_birth;	/* txg when block was allocated	    */
336 	uint64_t	blk_birth;	/* transaction group at birth	    */
337 	uint64_t	blk_fill;	/* fill count			    */
338 	zio_cksum_t	blk_cksum;	/* 256-bit checksum		    */
339 } blkptr_t;
340 
341 /*
342  * Macros to get and set fields in a bp or DVA.
343  */
344 #define	DVA_GET_ASIZE(dva)	\
345 	BF64_GET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, SPA_MINBLOCKSHIFT, 0)
346 #define	DVA_SET_ASIZE(dva, x)	\
347 	BF64_SET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, \
348 	SPA_MINBLOCKSHIFT, 0, x)
349 
350 #define	DVA_GET_GRID(dva)	BF64_GET((dva)->dva_word[0], 24, 8)
351 #define	DVA_SET_GRID(dva, x)	BF64_SET((dva)->dva_word[0], 24, 8, x)
352 
353 #define	DVA_GET_VDEV(dva)	BF64_GET((dva)->dva_word[0], 32, 32)
354 #define	DVA_SET_VDEV(dva, x)	BF64_SET((dva)->dva_word[0], 32, 32, x)
355 
356 #define	DVA_GET_OFFSET(dva)	\
357 	BF64_GET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0)
358 #define	DVA_SET_OFFSET(dva, x)	\
359 	BF64_SET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0, x)
360 
361 #define	DVA_GET_GANG(dva)	BF64_GET((dva)->dva_word[1], 63, 1)
362 #define	DVA_SET_GANG(dva, x)	BF64_SET((dva)->dva_word[1], 63, 1, x)
363 
364 #define	BP_GET_LSIZE(bp)	\
365 	(BP_IS_EMBEDDED(bp) ?	\
366 	(BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA ? BPE_GET_LSIZE(bp) : 0): \
367 	BF64_GET_SB((bp)->blk_prop, 0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1))
368 #define	BP_SET_LSIZE(bp, x)	do { \
369 	ASSERT(!BP_IS_EMBEDDED(bp)); \
370 	BF64_SET_SB((bp)->blk_prop, \
371 	    0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1, x); \
372 _NOTE(CONSTCOND) } while (0)
373 
374 #define	BP_GET_PSIZE(bp)	\
375 	BF64_GET_SB((bp)->blk_prop, 16, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1)
376 #define	BP_SET_PSIZE(bp, x)	\
377 	BF64_SET_SB((bp)->blk_prop, 16, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1, x)
378 
379 #define	BP_GET_COMPRESS(bp)	BF64_GET((bp)->blk_prop, 32, 7)
380 #define	BP_SET_COMPRESS(bp, x)	BF64_SET((bp)->blk_prop, 32, 7, x)
381 
382 #define	BP_GET_CHECKSUM(bp)	BF64_GET((bp)->blk_prop, 40, 8)
383 #define	BP_SET_CHECKSUM(bp, x)	BF64_SET((bp)->blk_prop, 40, 8, x)
384 
385 #define	BP_GET_TYPE(bp)		BF64_GET((bp)->blk_prop, 48, 8)
386 #define	BP_SET_TYPE(bp, x)	BF64_SET((bp)->blk_prop, 48, 8, x)
387 
388 #define	BP_GET_LEVEL(bp)	BF64_GET((bp)->blk_prop, 56, 5)
389 #define	BP_SET_LEVEL(bp, x)	BF64_SET((bp)->blk_prop, 56, 5, x)
390 
391 #define	BP_IS_EMBEDDED(bp)	BF64_GET((bp)->blk_prop, 39, 1)
392 
393 #define	BP_GET_DEDUP(bp)	BF64_GET((bp)->blk_prop, 62, 1)
394 #define	BP_SET_DEDUP(bp, x)	BF64_SET((bp)->blk_prop, 62, 1, x)
395 
396 #define	BP_GET_BYTEORDER(bp)	BF64_GET((bp)->blk_prop, 63, 1)
397 #define	BP_SET_BYTEORDER(bp, x)	BF64_SET((bp)->blk_prop, 63, 1, x)
398 
399 #define	BP_PHYSICAL_BIRTH(bp)		\
400 	((bp)->blk_phys_birth ? (bp)->blk_phys_birth : (bp)->blk_birth)
401 
402 #define	BP_GET_ASIZE(bp)	\
403 	(DVA_GET_ASIZE(&(bp)->blk_dva[0]) + DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
404 		DVA_GET_ASIZE(&(bp)->blk_dva[2]))
405 
406 #define	BP_GET_UCSIZE(bp) \
407 	((BP_GET_LEVEL(bp) > 0 || dmu_ot[BP_GET_TYPE(bp)].ot_metadata) ? \
408 	BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp));
409 
410 #define	BP_GET_NDVAS(bp)	\
411 	(!!DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \
412 	!!DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
413 	!!DVA_GET_ASIZE(&(bp)->blk_dva[2]))
414 
415 #define	DVA_EQUAL(dva1, dva2)	\
416 	((dva1)->dva_word[1] == (dva2)->dva_word[1] && \
417 	(dva1)->dva_word[0] == (dva2)->dva_word[0])
418 
419 #define	ZIO_CHECKSUM_EQUAL(zc1, zc2) \
420 	(0 == (((zc1).zc_word[0] - (zc2).zc_word[0]) | \
421 	((zc1).zc_word[1] - (zc2).zc_word[1]) | \
422 	((zc1).zc_word[2] - (zc2).zc_word[2]) | \
423 	((zc1).zc_word[3] - (zc2).zc_word[3])))
424 
425 
426 #define	DVA_IS_VALID(dva)	(DVA_GET_ASIZE(dva) != 0)
427 
428 #define	ZIO_SET_CHECKSUM(zcp, w0, w1, w2, w3)	\
429 {						\
430 	(zcp)->zc_word[0] = w0;			\
431 	(zcp)->zc_word[1] = w1;			\
432 	(zcp)->zc_word[2] = w2;			\
433 	(zcp)->zc_word[3] = w3;			\
434 }
435 
436 #define	BP_IDENTITY(bp)		(&(bp)->blk_dva[0])
437 #define	BP_IS_GANG(bp)		DVA_GET_GANG(BP_IDENTITY(bp))
438 #define	DVA_IS_EMPTY(dva)	((dva)->dva_word[0] == 0ULL &&  \
439 	(dva)->dva_word[1] == 0ULL)
440 #define	BP_IS_HOLE(bp)		DVA_IS_EMPTY(BP_IDENTITY(bp))
441 #define	BP_IS_OLDER(bp, txg)	(!BP_IS_HOLE(bp) && (bp)->blk_birth < (txg))
442 
443 #define	BP_ZERO(bp)				\
444 {						\
445 	(bp)->blk_dva[0].dva_word[0] = 0;	\
446 	(bp)->blk_dva[0].dva_word[1] = 0;	\
447 	(bp)->blk_dva[1].dva_word[0] = 0;	\
448 	(bp)->blk_dva[1].dva_word[1] = 0;	\
449 	(bp)->blk_dva[2].dva_word[0] = 0;	\
450 	(bp)->blk_dva[2].dva_word[1] = 0;	\
451 	(bp)->blk_prop = 0;			\
452 	(bp)->blk_pad[0] = 0;			\
453 	(bp)->blk_pad[1] = 0;			\
454 	(bp)->blk_phys_birth = 0;		\
455 	(bp)->blk_birth = 0;			\
456 	(bp)->blk_fill = 0;			\
457 	ZIO_SET_CHECKSUM(&(bp)->blk_cksum, 0, 0, 0, 0);	\
458 }
459 
460 #if BYTE_ORDER == _BIG_ENDIAN
461 #define	ZFS_HOST_BYTEORDER	(0ULL)
462 #else
463 #define	ZFS_HOST_BYTEORDER	(1ULL)
464 #endif
465 
466 #define	BP_SHOULD_BYTESWAP(bp)	(BP_GET_BYTEORDER(bp) != ZFS_HOST_BYTEORDER)
467 #define	BPE_NUM_WORDS 14
468 #define	BPE_PAYLOAD_SIZE (BPE_NUM_WORDS * sizeof (uint64_t))
469 #define	BPE_IS_PAYLOADWORD(bp, wp) \
470 	((wp) != &(bp)->blk_prop && (wp) != &(bp)->blk_birth)
471 
472 /*
473  * Embedded checksum
474  */
475 #define	ZEC_MAGIC	0x210da7ab10c7a11ULL
476 
477 typedef struct zio_eck {
478 	uint64_t	zec_magic;	/* for validation, endianness	*/
479 	zio_cksum_t	zec_cksum;	/* 256-bit checksum		*/
480 } zio_eck_t;
481 
482 /*
483  * Gang block headers are self-checksumming and contain an array
484  * of block pointers.
485  */
486 #define	SPA_GANGBLOCKSIZE	SPA_MINBLOCKSIZE
487 #define	SPA_GBH_NBLKPTRS	((SPA_GANGBLOCKSIZE - \
488 	sizeof (zio_eck_t)) / sizeof (blkptr_t))
489 #define	SPA_GBH_FILLER		((SPA_GANGBLOCKSIZE - \
490 	sizeof (zio_eck_t) - \
491 	(SPA_GBH_NBLKPTRS * sizeof (blkptr_t))) /\
492 	sizeof (uint64_t))
493 
494 typedef struct zio_gbh {
495 	blkptr_t		zg_blkptr[SPA_GBH_NBLKPTRS];
496 	uint64_t		zg_filler[SPA_GBH_FILLER];
497 	zio_eck_t		zg_tail;
498 } zio_gbh_phys_t;
499 
500 #define	VDEV_RAIDZ_MAXPARITY	3
501 
502 #define	VDEV_PAD_SIZE		(8 << 10)
503 /* 2 padding areas (vl_pad1 and vl_be) to skip */
504 #define	VDEV_SKIP_SIZE		VDEV_PAD_SIZE * 2
505 #define	VDEV_PHYS_SIZE		(112 << 10)
506 #define	VDEV_UBERBLOCK_RING	(128 << 10)
507 
508 /*
509  * MMP blocks occupy the last MMP_BLOCKS_PER_LABEL slots in the uberblock
510  * ring when MMP is enabled.
511  */
512 #define	MMP_BLOCKS_PER_LABEL	1
513 
514 /* The largest uberblock we support is 8k. */
515 #define	MAX_UBERBLOCK_SHIFT	(13)
516 #define	VDEV_UBERBLOCK_SHIFT(vd)	\
517 	MIN(MAX((vd)->v_top->v_ashift, UBERBLOCK_SHIFT), MAX_UBERBLOCK_SHIFT)
518 #define	VDEV_UBERBLOCK_COUNT(vd)	\
519 	(VDEV_UBERBLOCK_RING >> VDEV_UBERBLOCK_SHIFT(vd))
520 #define	VDEV_UBERBLOCK_OFFSET(vd, n)	\
521 	offsetof(vdev_label_t, vl_uberblock[(n) << VDEV_UBERBLOCK_SHIFT(vd)])
522 #define	VDEV_UBERBLOCK_SIZE(vd)		(1ULL << VDEV_UBERBLOCK_SHIFT(vd))
523 
524 typedef struct vdev_phys {
525 	char		vp_nvlist[VDEV_PHYS_SIZE - sizeof (zio_eck_t)];
526 	zio_eck_t	vp_zbt;
527 } vdev_phys_t;
528 
529 typedef enum vbe_vers {
530 	/* The bootenv file is stored as ascii text in the envblock */
531 	VB_RAW = 0,
532 
533 	/*
534 	 * The bootenv file is converted to an nvlist and then packed into the
535 	 * envblock.
536 	 */
537 	VB_NVLIST = 1
538 } vbe_vers_t;
539 
540 typedef struct vdev_boot_envblock {
541 	uint64_t	vbe_version;
542 	char		vbe_bootenv[VDEV_PAD_SIZE - sizeof (uint64_t) -
543 			sizeof (zio_eck_t)];
544  	zio_eck_t	vbe_zbt;
545 } vdev_boot_envblock_t;
546 
547 CTASSERT(sizeof (vdev_boot_envblock_t) == VDEV_PAD_SIZE);
548 
549 typedef struct vdev_label {
550 	char		vl_pad1[VDEV_PAD_SIZE];			/*  8K  */
551 	vdev_boot_envblock_t	vl_be;				/*  8K  */
552 	vdev_phys_t	vl_vdev_phys;				/* 112K	*/
553 	char		vl_uberblock[VDEV_UBERBLOCK_RING];	/* 128K	*/
554 } vdev_label_t;							/* 256K total */
555 
556 /*
557  * vdev_dirty() flags
558  */
559 #define	VDD_METASLAB	0x01
560 #define	VDD_DTL		0x02
561 
562 /*
563  * Size and offset of embedded boot loader region on each label.
564  * The total size of the first two labels plus the boot area is 4MB.
565  */
566 #define	VDEV_BOOT_OFFSET	(2 * sizeof (vdev_label_t))
567 #define	VDEV_BOOT_SIZE		(7ULL << 19)			/* 3.5M	*/
568 
569 /*
570  * Size of label regions at the start and end of each leaf device.
571  */
572 #define	VDEV_LABEL_START_SIZE	(2 * sizeof (vdev_label_t) + VDEV_BOOT_SIZE)
573 #define	VDEV_LABEL_END_SIZE	(2 * sizeof (vdev_label_t))
574 #define	VDEV_LABELS		4
575 
576 enum zio_checksum {
577 	ZIO_CHECKSUM_INHERIT = 0,
578 	ZIO_CHECKSUM_ON,
579 	ZIO_CHECKSUM_OFF,
580 	ZIO_CHECKSUM_LABEL,
581 	ZIO_CHECKSUM_GANG_HEADER,
582 	ZIO_CHECKSUM_ZILOG,
583 	ZIO_CHECKSUM_FLETCHER_2,
584 	ZIO_CHECKSUM_FLETCHER_4,
585 	ZIO_CHECKSUM_SHA256,
586 	ZIO_CHECKSUM_ZILOG2,
587 	ZIO_CHECKSUM_NOPARITY,
588 	ZIO_CHECKSUM_SHA512,
589 	ZIO_CHECKSUM_SKEIN,
590 	ZIO_CHECKSUM_EDONR,
591 	ZIO_CHECKSUM_FUNCTIONS
592 };
593 
594 #define	ZIO_CHECKSUM_ON_VALUE	ZIO_CHECKSUM_FLETCHER_4
595 #define	ZIO_CHECKSUM_DEFAULT	ZIO_CHECKSUM_ON
596 
597 enum zio_compress {
598 	ZIO_COMPRESS_INHERIT = 0,
599 	ZIO_COMPRESS_ON,
600 	ZIO_COMPRESS_OFF,
601 	ZIO_COMPRESS_LZJB,
602 	ZIO_COMPRESS_EMPTY,
603 	ZIO_COMPRESS_GZIP_1,
604 	ZIO_COMPRESS_GZIP_2,
605 	ZIO_COMPRESS_GZIP_3,
606 	ZIO_COMPRESS_GZIP_4,
607 	ZIO_COMPRESS_GZIP_5,
608 	ZIO_COMPRESS_GZIP_6,
609 	ZIO_COMPRESS_GZIP_7,
610 	ZIO_COMPRESS_GZIP_8,
611 	ZIO_COMPRESS_GZIP_9,
612 	ZIO_COMPRESS_ZLE,
613 	ZIO_COMPRESS_LZ4,
614 	ZIO_COMPRESS_ZSTD,
615 	ZIO_COMPRESS_FUNCTIONS
616 };
617 
618 enum zio_zstd_levels {
619 	ZIO_ZSTD_LEVEL_INHERIT = 0,
620 	ZIO_ZSTD_LEVEL_1,
621 #define	ZIO_ZSTD_LEVEL_MIN	ZIO_ZSTD_LEVEL_1
622 	ZIO_ZSTD_LEVEL_2,
623 	ZIO_ZSTD_LEVEL_3,
624 #define	ZIO_ZSTD_LEVEL_DEFAULT	ZIO_ZSTD_LEVEL_3
625 	ZIO_ZSTD_LEVEL_4,
626 	ZIO_ZSTD_LEVEL_5,
627 	ZIO_ZSTD_LEVEL_6,
628 	ZIO_ZSTD_LEVEL_7,
629 	ZIO_ZSTD_LEVEL_8,
630 	ZIO_ZSTD_LEVEL_9,
631 	ZIO_ZSTD_LEVEL_10,
632 	ZIO_ZSTD_LEVEL_11,
633 	ZIO_ZSTD_LEVEL_12,
634 	ZIO_ZSTD_LEVEL_13,
635 	ZIO_ZSTD_LEVEL_14,
636 	ZIO_ZSTD_LEVEL_15,
637 	ZIO_ZSTD_LEVEL_16,
638 	ZIO_ZSTD_LEVEL_17,
639 	ZIO_ZSTD_LEVEL_18,
640 	ZIO_ZSTD_LEVEL_19,
641 #define	ZIO_ZSTD_LEVEL_MAX	ZIO_ZSTD_LEVEL_19
642 	ZIO_ZSTD_LEVEL_RESERVE = 101, /* Leave room for new positive levels */
643 	ZIO_ZSTD_LEVEL_FAST, /* Fast levels are negative */
644 	ZIO_ZSTD_LEVEL_FAST_1,
645 #define	ZIO_ZSTD_LEVEL_FAST_DEFAULT	ZIO_ZSTD_LEVEL_FAST_1
646 	ZIO_ZSTD_LEVEL_FAST_2,
647 	ZIO_ZSTD_LEVEL_FAST_3,
648 	ZIO_ZSTD_LEVEL_FAST_4,
649 	ZIO_ZSTD_LEVEL_FAST_5,
650 	ZIO_ZSTD_LEVEL_FAST_6,
651 	ZIO_ZSTD_LEVEL_FAST_7,
652 	ZIO_ZSTD_LEVEL_FAST_8,
653 	ZIO_ZSTD_LEVEL_FAST_9,
654 	ZIO_ZSTD_LEVEL_FAST_10,
655 	ZIO_ZSTD_LEVEL_FAST_20,
656 	ZIO_ZSTD_LEVEL_FAST_30,
657 	ZIO_ZSTD_LEVEL_FAST_40,
658 	ZIO_ZSTD_LEVEL_FAST_50,
659 	ZIO_ZSTD_LEVEL_FAST_60,
660 	ZIO_ZSTD_LEVEL_FAST_70,
661 	ZIO_ZSTD_LEVEL_FAST_80,
662 	ZIO_ZSTD_LEVEL_FAST_90,
663 	ZIO_ZSTD_LEVEL_FAST_100,
664 	ZIO_ZSTD_LEVEL_FAST_500,
665 	ZIO_ZSTD_LEVEL_FAST_1000,
666 #define	ZIO_ZSTD_LEVEL_FAST_MAX	ZIO_ZSTD_LEVEL_FAST_1000
667 	ZIO_ZSTD_LEVEL_AUTO = 251, /* Reserved for future use */
668 	ZIO_ZSTD_LEVEL_LEVELS
669 };
670 
671 #define	ZIO_COMPRESS_ON_VALUE	ZIO_COMPRESS_LZJB
672 #define	ZIO_COMPRESS_DEFAULT	ZIO_COMPRESS_OFF
673 
674 /* nvlist pack encoding */
675 #define	NV_ENCODE_NATIVE	0
676 #define	NV_ENCODE_XDR		1
677 
678 typedef enum {
679 	DATA_TYPE_UNKNOWN = 0,
680 	DATA_TYPE_BOOLEAN,
681 	DATA_TYPE_BYTE,
682 	DATA_TYPE_INT16,
683 	DATA_TYPE_UINT16,
684 	DATA_TYPE_INT32,
685 	DATA_TYPE_UINT32,
686 	DATA_TYPE_INT64,
687 	DATA_TYPE_UINT64,
688 	DATA_TYPE_STRING,
689 	DATA_TYPE_BYTE_ARRAY,
690 	DATA_TYPE_INT16_ARRAY,
691 	DATA_TYPE_UINT16_ARRAY,
692 	DATA_TYPE_INT32_ARRAY,
693 	DATA_TYPE_UINT32_ARRAY,
694 	DATA_TYPE_INT64_ARRAY,
695 	DATA_TYPE_UINT64_ARRAY,
696 	DATA_TYPE_STRING_ARRAY,
697 	DATA_TYPE_HRTIME,
698 	DATA_TYPE_NVLIST,
699 	DATA_TYPE_NVLIST_ARRAY,
700 	DATA_TYPE_BOOLEAN_VALUE,
701 	DATA_TYPE_INT8,
702 	DATA_TYPE_UINT8,
703 	DATA_TYPE_BOOLEAN_ARRAY,
704 	DATA_TYPE_INT8_ARRAY,
705 	DATA_TYPE_UINT8_ARRAY
706 } data_type_t;
707 
708 /*
709  * On-disk version number.
710  */
711 #define	SPA_VERSION_1			1ULL
712 #define	SPA_VERSION_2			2ULL
713 #define	SPA_VERSION_3			3ULL
714 #define	SPA_VERSION_4			4ULL
715 #define	SPA_VERSION_5			5ULL
716 #define	SPA_VERSION_6			6ULL
717 #define	SPA_VERSION_7			7ULL
718 #define	SPA_VERSION_8			8ULL
719 #define	SPA_VERSION_9			9ULL
720 #define	SPA_VERSION_10			10ULL
721 #define	SPA_VERSION_11			11ULL
722 #define	SPA_VERSION_12			12ULL
723 #define	SPA_VERSION_13			13ULL
724 #define	SPA_VERSION_14			14ULL
725 #define	SPA_VERSION_15			15ULL
726 #define	SPA_VERSION_16			16ULL
727 #define	SPA_VERSION_17			17ULL
728 #define	SPA_VERSION_18			18ULL
729 #define	SPA_VERSION_19			19ULL
730 #define	SPA_VERSION_20			20ULL
731 #define	SPA_VERSION_21			21ULL
732 #define	SPA_VERSION_22			22ULL
733 #define	SPA_VERSION_23			23ULL
734 #define	SPA_VERSION_24			24ULL
735 #define	SPA_VERSION_25			25ULL
736 #define	SPA_VERSION_26			26ULL
737 #define	SPA_VERSION_27			27ULL
738 #define	SPA_VERSION_28			28ULL
739 #define	SPA_VERSION_5000		5000ULL
740 
741 /*
742  * When bumping up SPA_VERSION, make sure GRUB ZFS understands the on-disk
743  * format change. Go to usr/src/grub/grub-0.97/stage2/{zfs-include/, fsys_zfs*},
744  * and do the appropriate changes.  Also bump the version number in
745  * usr/src/grub/capability.
746  */
747 #define	SPA_VERSION			SPA_VERSION_5000
748 #define	SPA_VERSION_STRING		"5000"
749 
750 /*
751  * Symbolic names for the changes that caused a SPA_VERSION switch.
752  * Used in the code when checking for presence or absence of a feature.
753  * Feel free to define multiple symbolic names for each version if there
754  * were multiple changes to on-disk structures during that version.
755  *
756  * NOTE: When checking the current SPA_VERSION in your code, be sure
757  *       to use spa_version() since it reports the version of the
758  *       last synced uberblock.  Checking the in-flight version can
759  *       be dangerous in some cases.
760  */
761 #define	SPA_VERSION_INITIAL		SPA_VERSION_1
762 #define	SPA_VERSION_DITTO_BLOCKS	SPA_VERSION_2
763 #define	SPA_VERSION_SPARES		SPA_VERSION_3
764 #define	SPA_VERSION_RAID6		SPA_VERSION_3
765 #define	SPA_VERSION_BPLIST_ACCOUNT	SPA_VERSION_3
766 #define	SPA_VERSION_RAIDZ_DEFLATE	SPA_VERSION_3
767 #define	SPA_VERSION_DNODE_BYTES		SPA_VERSION_3
768 #define	SPA_VERSION_ZPOOL_HISTORY	SPA_VERSION_4
769 #define	SPA_VERSION_GZIP_COMPRESSION	SPA_VERSION_5
770 #define	SPA_VERSION_BOOTFS		SPA_VERSION_6
771 #define	SPA_VERSION_SLOGS		SPA_VERSION_7
772 #define	SPA_VERSION_DELEGATED_PERMS	SPA_VERSION_8
773 #define	SPA_VERSION_FUID		SPA_VERSION_9
774 #define	SPA_VERSION_REFRESERVATION	SPA_VERSION_9
775 #define	SPA_VERSION_REFQUOTA		SPA_VERSION_9
776 #define	SPA_VERSION_UNIQUE_ACCURATE	SPA_VERSION_9
777 #define	SPA_VERSION_L2CACHE		SPA_VERSION_10
778 #define	SPA_VERSION_NEXT_CLONES		SPA_VERSION_11
779 #define	SPA_VERSION_ORIGIN		SPA_VERSION_11
780 #define	SPA_VERSION_DSL_SCRUB		SPA_VERSION_11
781 #define	SPA_VERSION_SNAP_PROPS		SPA_VERSION_12
782 #define	SPA_VERSION_USED_BREAKDOWN	SPA_VERSION_13
783 #define	SPA_VERSION_PASSTHROUGH_X	SPA_VERSION_14
784 #define SPA_VERSION_USERSPACE		SPA_VERSION_15
785 #define	SPA_VERSION_STMF_PROP		SPA_VERSION_16
786 #define	SPA_VERSION_RAIDZ3		SPA_VERSION_17
787 #define	SPA_VERSION_USERREFS		SPA_VERSION_18
788 #define	SPA_VERSION_HOLES		SPA_VERSION_19
789 #define	SPA_VERSION_ZLE_COMPRESSION	SPA_VERSION_20
790 #define	SPA_VERSION_DEDUP		SPA_VERSION_21
791 #define	SPA_VERSION_RECVD_PROPS		SPA_VERSION_22
792 #define	SPA_VERSION_SLIM_ZIL		SPA_VERSION_23
793 #define	SPA_VERSION_SA			SPA_VERSION_24
794 #define	SPA_VERSION_SCAN		SPA_VERSION_25
795 #define	SPA_VERSION_DIR_CLONES		SPA_VERSION_26
796 #define	SPA_VERSION_DEADLISTS		SPA_VERSION_26
797 #define	SPA_VERSION_FAST_SNAP		SPA_VERSION_27
798 #define	SPA_VERSION_MULTI_REPLACE	SPA_VERSION_28
799 #define	SPA_VERSION_BEFORE_FEATURES	SPA_VERSION_28
800 #define	SPA_VERSION_FEATURES		SPA_VERSION_5000
801 
802 #define	SPA_VERSION_IS_SUPPORTED(v) \
803 	(((v) >= SPA_VERSION_INITIAL && (v) <= SPA_VERSION_BEFORE_FEATURES) || \
804 	((v) >= SPA_VERSION_FEATURES && (v) <= SPA_VERSION))
805 
806 /*
807  * The following are configuration names used in the nvlist describing a pool's
808  * configuration.
809  */
810 #define	ZPOOL_CONFIG_VERSION		"version"
811 #define	ZPOOL_CONFIG_POOL_NAME		"name"
812 #define	ZPOOL_CONFIG_POOL_STATE		"state"
813 #define	ZPOOL_CONFIG_POOL_TXG		"txg"
814 #define	ZPOOL_CONFIG_POOL_GUID		"pool_guid"
815 #define	ZPOOL_CONFIG_CREATE_TXG		"create_txg"
816 #define	ZPOOL_CONFIG_TOP_GUID		"top_guid"
817 #define	ZPOOL_CONFIG_VDEV_TREE		"vdev_tree"
818 #define	ZPOOL_CONFIG_TYPE		"type"
819 #define	ZPOOL_CONFIG_CHILDREN		"children"
820 #define	ZPOOL_CONFIG_ID			"id"
821 #define	ZPOOL_CONFIG_GUID		"guid"
822 #define	ZPOOL_CONFIG_INDIRECT_OBJECT	"com.delphix:indirect_object"
823 #define	ZPOOL_CONFIG_INDIRECT_BIRTHS	"com.delphix:indirect_births"
824 #define	ZPOOL_CONFIG_PREV_INDIRECT_VDEV	"com.delphix:prev_indirect_vdev"
825 #define	ZPOOL_CONFIG_PATH		"path"
826 #define	ZPOOL_CONFIG_DEVID		"devid"
827 #define	ZPOOL_CONFIG_METASLAB_ARRAY	"metaslab_array"
828 #define	ZPOOL_CONFIG_METASLAB_SHIFT	"metaslab_shift"
829 #define	ZPOOL_CONFIG_ASHIFT		"ashift"
830 #define	ZPOOL_CONFIG_ASIZE		"asize"
831 #define	ZPOOL_CONFIG_DTL		"DTL"
832 #define	ZPOOL_CONFIG_STATS		"stats"
833 #define	ZPOOL_CONFIG_WHOLE_DISK		"whole_disk"
834 #define	ZPOOL_CONFIG_ERRCOUNT		"error_count"
835 #define	ZPOOL_CONFIG_NOT_PRESENT	"not_present"
836 #define	ZPOOL_CONFIG_SPARES		"spares"
837 #define	ZPOOL_CONFIG_IS_SPARE		"is_spare"
838 #define	ZPOOL_CONFIG_NPARITY		"nparity"
839 #define	ZPOOL_CONFIG_HOSTID		"hostid"
840 #define	ZPOOL_CONFIG_HOSTNAME		"hostname"
841 #define	ZPOOL_CONFIG_IS_LOG		"is_log"
842 #define	ZPOOL_CONFIG_TIMESTAMP		"timestamp" /* not stored on disk */
843 #define	ZPOOL_CONFIG_FEATURES_FOR_READ	"features_for_read"
844 #define	ZPOOL_CONFIG_VDEV_CHILDREN	"vdev_children"
845 
846 /*
847  * The persistent vdev state is stored as separate values rather than a single
848  * 'vdev_state' entry.  This is because a device can be in multiple states, such
849  * as offline and degraded.
850  */
851 #define	ZPOOL_CONFIG_OFFLINE            "offline"
852 #define	ZPOOL_CONFIG_FAULTED            "faulted"
853 #define	ZPOOL_CONFIG_DEGRADED           "degraded"
854 #define	ZPOOL_CONFIG_REMOVED            "removed"
855 #define	ZPOOL_CONFIG_FRU		"fru"
856 #define	ZPOOL_CONFIG_AUX_STATE		"aux_state"
857 
858 #define	VDEV_TYPE_ROOT			"root"
859 #define	VDEV_TYPE_MIRROR		"mirror"
860 #define	VDEV_TYPE_REPLACING		"replacing"
861 #define	VDEV_TYPE_RAIDZ			"raidz"
862 #define	VDEV_TYPE_DISK			"disk"
863 #define	VDEV_TYPE_FILE			"file"
864 #define	VDEV_TYPE_MISSING		"missing"
865 #define	VDEV_TYPE_HOLE			"hole"
866 #define	VDEV_TYPE_SPARE			"spare"
867 #define	VDEV_TYPE_LOG			"log"
868 #define	VDEV_TYPE_L2CACHE		"l2cache"
869 #define	VDEV_TYPE_INDIRECT		"indirect"
870 
871 /*
872  * This is needed in userland to report the minimum necessary device size.
873  */
874 #define	SPA_MINDEVSIZE		(64ULL << 20)
875 
876 /*
877  * The location of the pool configuration repository, shared between kernel and
878  * userland.
879  */
880 #define	ZPOOL_CACHE		"/boot/zfs/zpool.cache"
881 
882 /*
883  * vdev states are ordered from least to most healthy.
884  * A vdev that's CANT_OPEN or below is considered unusable.
885  */
886 typedef enum vdev_state {
887 	VDEV_STATE_UNKNOWN = 0,	/* Uninitialized vdev			*/
888 	VDEV_STATE_CLOSED,	/* Not currently open			*/
889 	VDEV_STATE_OFFLINE,	/* Not allowed to open			*/
890 	VDEV_STATE_REMOVED,	/* Explicitly removed from system	*/
891 	VDEV_STATE_CANT_OPEN,	/* Tried to open, but failed		*/
892 	VDEV_STATE_FAULTED,	/* External request to fault device	*/
893 	VDEV_STATE_DEGRADED,	/* Replicated vdev with unhealthy kids	*/
894 	VDEV_STATE_HEALTHY	/* Presumed good			*/
895 } vdev_state_t;
896 
897 /*
898  * vdev aux states.  When a vdev is in the CANT_OPEN state, the aux field
899  * of the vdev stats structure uses these constants to distinguish why.
900  */
901 typedef enum vdev_aux {
902 	VDEV_AUX_NONE,		/* no error				*/
903 	VDEV_AUX_OPEN_FAILED,	/* ldi_open_*() or vn_open() failed	*/
904 	VDEV_AUX_CORRUPT_DATA,	/* bad label or disk contents		*/
905 	VDEV_AUX_NO_REPLICAS,	/* insufficient number of replicas	*/
906 	VDEV_AUX_BAD_GUID_SUM,	/* vdev guid sum doesn't match		*/
907 	VDEV_AUX_TOO_SMALL,	/* vdev size is too small		*/
908 	VDEV_AUX_BAD_LABEL,	/* the label is OK but invalid		*/
909 	VDEV_AUX_VERSION_NEWER,	/* on-disk version is too new		*/
910 	VDEV_AUX_VERSION_OLDER,	/* on-disk version is too old		*/
911 	VDEV_AUX_SPARED		/* hot spare used in another pool	*/
912 } vdev_aux_t;
913 
914 /*
915  * pool state.  The following states are written to disk as part of the normal
916  * SPA lifecycle: ACTIVE, EXPORTED, DESTROYED, SPARE.  The remaining states are
917  * software abstractions used at various levels to communicate pool state.
918  */
919 typedef enum pool_state {
920 	POOL_STATE_ACTIVE = 0,		/* In active use		*/
921 	POOL_STATE_EXPORTED,		/* Explicitly exported		*/
922 	POOL_STATE_DESTROYED,		/* Explicitly destroyed		*/
923 	POOL_STATE_SPARE,		/* Reserved for hot spare use	*/
924 	POOL_STATE_UNINITIALIZED,	/* Internal spa_t state		*/
925 	POOL_STATE_UNAVAIL,		/* Internal libzfs state	*/
926 	POOL_STATE_POTENTIALLY_ACTIVE	/* Internal libzfs state	*/
927 } pool_state_t;
928 
929 /*
930  * The uberblock version is incremented whenever an incompatible on-disk
931  * format change is made to the SPA, DMU, or ZAP.
932  *
933  * Note: the first two fields should never be moved.  When a storage pool
934  * is opened, the uberblock must be read off the disk before the version
935  * can be checked.  If the ub_version field is moved, we may not detect
936  * version mismatch.  If the ub_magic field is moved, applications that
937  * expect the magic number in the first word won't work.
938  */
939 #define	UBERBLOCK_MAGIC		0x00bab10c		/* oo-ba-bloc!	*/
940 #define	UBERBLOCK_SHIFT		10			/* up to 1K	*/
941 
942 #define	MMP_MAGIC		0xa11cea11		/* all-see-all  */
943 
944 #define	MMP_INTERVAL_VALID_BIT	0x01
945 #define	MMP_SEQ_VALID_BIT	0x02
946 #define	MMP_FAIL_INT_VALID_BIT	0x04
947 
948 #define	MMP_VALID(ubp)		(ubp->ub_magic == UBERBLOCK_MAGIC && \
949 				    ubp->ub_mmp_magic == MMP_MAGIC)
950 #define	MMP_INTERVAL_VALID(ubp)	(MMP_VALID(ubp) && (ubp->ub_mmp_config & \
951 				    MMP_INTERVAL_VALID_BIT))
952 #define	MMP_SEQ_VALID(ubp)	(MMP_VALID(ubp) && (ubp->ub_mmp_config & \
953 				    MMP_SEQ_VALID_BIT))
954 #define	MMP_FAIL_INT_VALID(ubp)	(MMP_VALID(ubp) && (ubp->ub_mmp_config & \
955 				    MMP_FAIL_INT_VALID_BIT))
956 
957 #define	MMP_INTERVAL(ubp)	((ubp->ub_mmp_config & 0x00000000FFFFFF00) \
958 				    >> 8)
959 #define	MMP_SEQ(ubp)		((ubp->ub_mmp_config & 0x0000FFFF00000000) \
960 				    >> 32)
961 #define	MMP_FAIL_INT(ubp)	((ubp->ub_mmp_config & 0xFFFF000000000000) \
962 				    >> 48)
963 
964 typedef struct uberblock {
965 	uint64_t	ub_magic;	/* UBERBLOCK_MAGIC		*/
966 	uint64_t	ub_version;	/* SPA_VERSION			*/
967 	uint64_t	ub_txg;		/* txg of last sync		*/
968 	uint64_t	ub_guid_sum;	/* sum of all vdev guids	*/
969 	uint64_t	ub_timestamp;	/* UTC time of last sync	*/
970 	blkptr_t	ub_rootbp;	/* MOS objset_phys_t		*/
971 	/* highest SPA_VERSION supported by software that wrote this txg */
972 	uint64_t	ub_software_version;
973 	/* Maybe missing in uberblocks we read, but always written */
974 	uint64_t	ub_mmp_magic;
975 	/*
976 	 * If ub_mmp_delay == 0 and ub_mmp_magic is valid, MMP is off.
977 	 * Otherwise, nanosec since last MMP write.
978 	 */
979 	uint64_t	ub_mmp_delay;
980 
981 	/*
982 	 * The ub_mmp_config contains the multihost write interval, multihost
983 	 * fail intervals, sequence number for sub-second granularity, and
984 	 * valid bit mask.  This layout is as follows:
985 	 *
986 	 *   64      56      48      40      32      24      16      8       0
987 	 *   +-------+-------+-------+-------+-------+-------+-------+-------+
988 	 * 0 | Fail Intervals|      Seq      |   Write Interval (ms) | VALID |
989 	 *   +-------+-------+-------+-------+-------+-------+-------+-------+
990 	 *
991 	 * This allows a write_interval of (2^24/1000)s, over 4.5 hours
992 	 *
993 	 * VALID Bits:
994 	 * - 0x01 - Write Interval (ms)
995 	 * - 0x02 - Sequence number exists
996 	 * - 0x04 - Fail Intervals
997 	 * - 0xf8 - Reserved
998 	 */
999 	uint64_t	ub_mmp_config;
1000 
1001 	/*
1002 	 * ub_checkpoint_txg indicates two things about the current uberblock:
1003 	 *
1004 	 * 1] If it is not zero then this uberblock is a checkpoint. If it is
1005 	 *    zero, then this uberblock is not a checkpoint.
1006 	 *
1007 	 * 2] On checkpointed uberblocks, the value of ub_checkpoint_txg is
1008 	 *    the ub_txg that the uberblock had at the time we moved it to
1009 	 *    the MOS config.
1010 	 *
1011 	 * The field is set when we checkpoint the uberblock and continues to
1012 	 * hold that value even after we've rewound (unlike the ub_txg that
1013 	 * is reset to a higher value).
1014 	 *
1015 	 * Besides checks used to determine whether we are reopening the
1016 	 * pool from a checkpointed uberblock [see spa_ld_select_uberblock()],
1017 	 * the value of the field is used to determine which ZIL blocks have
1018 	 * been allocated according to the ms_sm when we are rewinding to a
1019 	 * checkpoint. Specifically, if blk_birth > ub_checkpoint_txg, then
1020 	 * the ZIL block is not allocated [see uses of spa_min_claim_txg()].
1021 	 */
1022 	uint64_t	ub_checkpoint_txg;
1023 } uberblock_t;
1024 
1025 /*
1026  * Flags.
1027  */
1028 #define	DNODE_MUST_BE_ALLOCATED	1
1029 #define	DNODE_MUST_BE_FREE	2
1030 
1031 /*
1032  * Fixed constants.
1033  */
1034 #define	DNODE_SHIFT		9	/* 512 bytes */
1035 #define	DN_MIN_INDBLKSHIFT	12	/* 4k */
1036 #define	DN_MAX_INDBLKSHIFT	17	/* 128k */
1037 #define	DNODE_BLOCK_SHIFT	14	/* 16k */
1038 #define	DNODE_CORE_SIZE		64	/* 64 bytes for dnode sans blkptrs */
1039 #define	DN_MAX_OBJECT_SHIFT	48	/* 256 trillion (zfs_fid_t limit) */
1040 #define	DN_MAX_OFFSET_SHIFT	64	/* 2^64 bytes in a dnode */
1041 
1042 /*
1043  * Derived constants.
1044  */
1045 #define	DNODE_MIN_SIZE		(1 << DNODE_SHIFT)
1046 #define	DNODE_MAX_SIZE		(1 << DNODE_BLOCK_SHIFT)
1047 #define	DNODE_BLOCK_SIZE	(1 << DNODE_BLOCK_SHIFT)
1048 #define	DNODE_MIN_SLOTS		(DNODE_MIN_SIZE >> DNODE_SHIFT)
1049 #define	DNODE_MAX_SLOTS		(DNODE_MAX_SIZE >> DNODE_SHIFT)
1050 #define	DN_BONUS_SIZE(dnsize)	((dnsize) - DNODE_CORE_SIZE - \
1051 	(1 << SPA_BLKPTRSHIFT))
1052 #define	DN_SLOTS_TO_BONUSLEN(slots)	DN_BONUS_SIZE((slots) << DNODE_SHIFT)
1053 #define	DN_OLD_MAX_BONUSLEN		(DN_BONUS_SIZE(DNODE_MIN_SIZE))
1054 #define	DN_MAX_NBLKPTR		((DNODE_MIN_SIZE - DNODE_CORE_SIZE) >> \
1055 	SPA_BLKPTRSHIFT)
1056 #define	DN_MAX_OBJECT		(1ULL << DN_MAX_OBJECT_SHIFT)
1057 #define	DN_ZERO_BONUSLEN	(DN_BONUS_SIZE(DNODE_MAX_SIZE) + 1)
1058 
1059 #define	DNODES_PER_BLOCK_SHIFT	(DNODE_BLOCK_SHIFT - DNODE_SHIFT)
1060 #define	DNODES_PER_BLOCK	(1ULL << DNODES_PER_BLOCK_SHIFT)
1061 #define	DNODES_PER_LEVEL_SHIFT	(DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT)
1062 
1063 /* The +2 here is a cheesy way to round up */
1064 #define	DN_MAX_LEVELS	(2 + ((DN_MAX_OFFSET_SHIFT - SPA_MINBLOCKSHIFT) / \
1065 	(DN_MIN_INDBLKSHIFT - SPA_BLKPTRSHIFT)))
1066 
1067 #define	DN_BONUS(dnp)	((void*)((dnp)->dn_bonus + \
1068 	(((dnp)->dn_nblkptr - 1) * sizeof (blkptr_t))))
1069 
1070 #define	DN_USED_BYTES(dnp) (((dnp)->dn_flags & DNODE_FLAG_USED_BYTES) ? \
1071 	(dnp)->dn_used : (dnp)->dn_used << SPA_MINBLOCKSHIFT)
1072 
1073 #define	EPB(blkshift, typeshift)	(1 << (blkshift - typeshift))
1074 
1075 /* Is dn_used in bytes?  if not, it's in multiples of SPA_MINBLOCKSIZE */
1076 #define	DNODE_FLAG_USED_BYTES		(1<<0)
1077 #define	DNODE_FLAG_USERUSED_ACCOUNTED	(1<<1)
1078 
1079 /* Does dnode have a SA spill blkptr in bonus? */
1080 #define	DNODE_FLAG_SPILL_BLKPTR	(1<<2)
1081 
1082 typedef struct dnode_phys {
1083 	uint8_t dn_type;		/* dmu_object_type_t */
1084 	uint8_t dn_indblkshift;		/* ln2(indirect block size) */
1085 	uint8_t dn_nlevels;		/* 1=dn_blkptr->data blocks */
1086 	uint8_t dn_nblkptr;		/* length of dn_blkptr */
1087 	uint8_t dn_bonustype;		/* type of data in bonus buffer */
1088 	uint8_t	dn_checksum;		/* ZIO_CHECKSUM type */
1089 	uint8_t	dn_compress;		/* ZIO_COMPRESS type */
1090 	uint8_t dn_flags;		/* DNODE_FLAG_* */
1091 	uint16_t dn_datablkszsec;	/* data block size in 512b sectors */
1092 	uint16_t dn_bonuslen;		/* length of dn_bonus */
1093 	uint8_t dn_extra_slots;		/* # of subsequent slots consumed */
1094 	uint8_t dn_pad2[3];
1095 
1096 	/* accounting is protected by dn_dirty_mtx */
1097 	uint64_t dn_maxblkid;		/* largest allocated block ID */
1098 	uint64_t dn_used;		/* bytes (or sectors) of disk space */
1099 
1100 	uint64_t dn_pad3[4];
1101 
1102 	/*
1103 	 * The tail region is 448 bytes for a 512 byte dnode, and
1104 	 * correspondingly larger for larger dnode sizes. The spill
1105 	 * block pointer, when present, is always at the end of the tail
1106 	 * region. There are three ways this space may be used, using
1107 	 * a 512 byte dnode for this diagram:
1108 	 *
1109 	 * 0       64      128     192     256     320     384     448 (offset)
1110 	 * +---------------+---------------+---------------+-------+
1111 	 * | dn_blkptr[0]  | dn_blkptr[1]  | dn_blkptr[2]  | /     |
1112 	 * +---------------+---------------+---------------+-------+
1113 	 * | dn_blkptr[0]  | dn_bonus[0..319]                      |
1114 	 * +---------------+-----------------------+---------------+
1115 	 * | dn_blkptr[0]  | dn_bonus[0..191]      | dn_spill      |
1116 	 * +---------------+-----------------------+---------------+
1117 	 */
1118 	union {
1119 		blkptr_t dn_blkptr[1+DN_OLD_MAX_BONUSLEN/sizeof (blkptr_t)];
1120 		struct {
1121 			blkptr_t __dn_ignore1;
1122 			uint8_t dn_bonus[DN_OLD_MAX_BONUSLEN];
1123 		};
1124 		struct {
1125 			blkptr_t __dn_ignore2;
1126 			uint8_t __dn_ignore3[DN_OLD_MAX_BONUSLEN -
1127 			    sizeof (blkptr_t)];
1128 			blkptr_t dn_spill;
1129 		};
1130 	};
1131 } dnode_phys_t;
1132 
1133 #define	DN_SPILL_BLKPTR(dnp)	(blkptr_t *)((char *)(dnp) + \
1134 	(((dnp)->dn_extra_slots + 1) << DNODE_SHIFT) - (1 << SPA_BLKPTRSHIFT))
1135 
1136 typedef enum dmu_object_byteswap {
1137 	DMU_BSWAP_UINT8,
1138 	DMU_BSWAP_UINT16,
1139 	DMU_BSWAP_UINT32,
1140 	DMU_BSWAP_UINT64,
1141 	DMU_BSWAP_ZAP,
1142 	DMU_BSWAP_DNODE,
1143 	DMU_BSWAP_OBJSET,
1144 	DMU_BSWAP_ZNODE,
1145 	DMU_BSWAP_OLDACL,
1146 	DMU_BSWAP_ACL,
1147 	/*
1148 	 * Allocating a new byteswap type number makes the on-disk format
1149 	 * incompatible with any other format that uses the same number.
1150 	 *
1151 	 * Data can usually be structured to work with one of the
1152 	 * DMU_BSWAP_UINT* or DMU_BSWAP_ZAP types.
1153 	 */
1154 	DMU_BSWAP_NUMFUNCS
1155 } dmu_object_byteswap_t;
1156 
1157 #define	DMU_OT_NEWTYPE 0x80
1158 #define	DMU_OT_METADATA 0x40
1159 #define	DMU_OT_BYTESWAP_MASK 0x3f
1160 
1161 /*
1162  * Defines a uint8_t object type. Object types specify if the data
1163  * in the object is metadata (boolean) and how to byteswap the data
1164  * (dmu_object_byteswap_t).
1165  */
1166 #define	DMU_OT(byteswap, metadata) \
1167 	(DMU_OT_NEWTYPE | \
1168 	((metadata) ? DMU_OT_METADATA : 0) | \
1169 	((byteswap) & DMU_OT_BYTESWAP_MASK))
1170 
1171 typedef enum dmu_object_type {
1172 	DMU_OT_NONE,
1173 	/* general: */
1174 	DMU_OT_OBJECT_DIRECTORY,	/* ZAP */
1175 	DMU_OT_OBJECT_ARRAY,		/* UINT64 */
1176 	DMU_OT_PACKED_NVLIST,		/* UINT8 (XDR by nvlist_pack/unpack) */
1177 	DMU_OT_PACKED_NVLIST_SIZE,	/* UINT64 */
1178 	DMU_OT_BPLIST,			/* UINT64 */
1179 	DMU_OT_BPLIST_HDR,		/* UINT64 */
1180 	/* spa: */
1181 	DMU_OT_SPACE_MAP_HEADER,	/* UINT64 */
1182 	DMU_OT_SPACE_MAP,		/* UINT64 */
1183 	/* zil: */
1184 	DMU_OT_INTENT_LOG,		/* UINT64 */
1185 	/* dmu: */
1186 	DMU_OT_DNODE,			/* DNODE */
1187 	DMU_OT_OBJSET,			/* OBJSET */
1188 	/* dsl: */
1189 	DMU_OT_DSL_DIR,			/* UINT64 */
1190 	DMU_OT_DSL_DIR_CHILD_MAP,	/* ZAP */
1191 	DMU_OT_DSL_DS_SNAP_MAP,		/* ZAP */
1192 	DMU_OT_DSL_PROPS,		/* ZAP */
1193 	DMU_OT_DSL_DATASET,		/* UINT64 */
1194 	/* zpl: */
1195 	DMU_OT_ZNODE,			/* ZNODE */
1196 	DMU_OT_OLDACL,			/* Old ACL */
1197 	DMU_OT_PLAIN_FILE_CONTENTS,	/* UINT8 */
1198 	DMU_OT_DIRECTORY_CONTENTS,	/* ZAP */
1199 	DMU_OT_MASTER_NODE,		/* ZAP */
1200 	DMU_OT_UNLINKED_SET,		/* ZAP */
1201 	/* zvol: */
1202 	DMU_OT_ZVOL,			/* UINT8 */
1203 	DMU_OT_ZVOL_PROP,		/* ZAP */
1204 	/* other; for testing only! */
1205 	DMU_OT_PLAIN_OTHER,		/* UINT8 */
1206 	DMU_OT_UINT64_OTHER,		/* UINT64 */
1207 	DMU_OT_ZAP_OTHER,		/* ZAP */
1208 	/* new object types: */
1209 	DMU_OT_ERROR_LOG,		/* ZAP */
1210 	DMU_OT_SPA_HISTORY,		/* UINT8 */
1211 	DMU_OT_SPA_HISTORY_OFFSETS,	/* spa_his_phys_t */
1212 	DMU_OT_POOL_PROPS,		/* ZAP */
1213 	DMU_OT_DSL_PERMS,		/* ZAP */
1214 	DMU_OT_ACL,			/* ACL */
1215 	DMU_OT_SYSACL,			/* SYSACL */
1216 	DMU_OT_FUID,			/* FUID table (Packed NVLIST UINT8) */
1217 	DMU_OT_FUID_SIZE,		/* FUID table size UINT64 */
1218 	DMU_OT_NEXT_CLONES,		/* ZAP */
1219 	DMU_OT_SCAN_QUEUE,		/* ZAP */
1220 	DMU_OT_USERGROUP_USED,		/* ZAP */
1221 	DMU_OT_USERGROUP_QUOTA,		/* ZAP */
1222 	DMU_OT_USERREFS,		/* ZAP */
1223 	DMU_OT_DDT_ZAP,			/* ZAP */
1224 	DMU_OT_DDT_STATS,		/* ZAP */
1225 	DMU_OT_SA,			/* System attr */
1226 	DMU_OT_SA_MASTER_NODE,		/* ZAP */
1227 	DMU_OT_SA_ATTR_REGISTRATION,	/* ZAP */
1228 	DMU_OT_SA_ATTR_LAYOUTS,		/* ZAP */
1229 	DMU_OT_SCAN_XLATE,		/* ZAP */
1230 	DMU_OT_DEDUP,			/* fake dedup BP from ddt_bp_create() */
1231 	DMU_OT_NUMTYPES,
1232 
1233 	/*
1234 	 * Names for valid types declared with DMU_OT().
1235 	 */
1236 	DMU_OTN_UINT8_DATA = DMU_OT(DMU_BSWAP_UINT8, B_FALSE),
1237 	DMU_OTN_UINT8_METADATA = DMU_OT(DMU_BSWAP_UINT8, B_TRUE),
1238 	DMU_OTN_UINT16_DATA = DMU_OT(DMU_BSWAP_UINT16, B_FALSE),
1239 	DMU_OTN_UINT16_METADATA = DMU_OT(DMU_BSWAP_UINT16, B_TRUE),
1240 	DMU_OTN_UINT32_DATA = DMU_OT(DMU_BSWAP_UINT32, B_FALSE),
1241 	DMU_OTN_UINT32_METADATA = DMU_OT(DMU_BSWAP_UINT32, B_TRUE),
1242 	DMU_OTN_UINT64_DATA = DMU_OT(DMU_BSWAP_UINT64, B_FALSE),
1243 	DMU_OTN_UINT64_METADATA = DMU_OT(DMU_BSWAP_UINT64, B_TRUE),
1244 	DMU_OTN_ZAP_DATA = DMU_OT(DMU_BSWAP_ZAP, B_FALSE),
1245 	DMU_OTN_ZAP_METADATA = DMU_OT(DMU_BSWAP_ZAP, B_TRUE)
1246 } dmu_object_type_t;
1247 
1248 typedef enum dmu_objset_type {
1249 	DMU_OST_NONE,
1250 	DMU_OST_META,
1251 	DMU_OST_ZFS,
1252 	DMU_OST_ZVOL,
1253 	DMU_OST_OTHER,			/* For testing only! */
1254 	DMU_OST_ANY,			/* Be careful! */
1255 	DMU_OST_NUMTYPES
1256 } dmu_objset_type_t;
1257 
1258 #define	ZAP_MAXVALUELEN	(1024 * 8)
1259 
1260 /*
1261  * header for all bonus and spill buffers.
1262  * The header has a fixed portion with a variable number
1263  * of "lengths" depending on the number of variable sized
1264  * attribues which are determined by the "layout number"
1265  */
1266 
1267 #define	SA_MAGIC	0x2F505A  /* ZFS SA */
1268 typedef struct sa_hdr_phys {
1269 	uint32_t sa_magic;
1270 	uint16_t sa_layout_info;  /* Encoded with hdrsize and layout number */
1271 	uint16_t sa_lengths[1];	/* optional sizes for variable length attrs */
1272 	/* ... Data follows the lengths.  */
1273 } sa_hdr_phys_t;
1274 
1275 /*
1276  * sa_hdr_phys -> sa_layout_info
1277  *
1278  * 16      10       0
1279  * +--------+-------+
1280  * | hdrsz  |layout |
1281  * +--------+-------+
1282  *
1283  * Bits 0-10 are the layout number
1284  * Bits 11-16 are the size of the header.
1285  * The hdrsize is the number * 8
1286  *
1287  * For example.
1288  * hdrsz of 1 ==> 8 byte header
1289  *          2 ==> 16 byte header
1290  *
1291  */
1292 
1293 #define	SA_HDR_LAYOUT_NUM(hdr) BF32_GET(hdr->sa_layout_info, 0, 10)
1294 #define	SA_HDR_SIZE(hdr) BF32_GET_SB(hdr->sa_layout_info, 10, 16, 3, 0)
1295 #define	SA_HDR_LAYOUT_INFO_ENCODE(x, num, size) \
1296 { \
1297 	BF32_SET_SB(x, 10, 6, 3, 0, size); \
1298 	BF32_SET(x, 0, 10, num); \
1299 }
1300 
1301 #define	SA_MODE_OFFSET		0
1302 #define	SA_SIZE_OFFSET		8
1303 #define	SA_GEN_OFFSET		16
1304 #define	SA_UID_OFFSET		24
1305 #define	SA_GID_OFFSET		32
1306 #define	SA_PARENT_OFFSET	40
1307 #define	SA_SYMLINK_OFFSET	160
1308 
1309 #define	ZIO_OBJSET_MAC_LEN		32
1310 
1311 /*
1312  * Intent log header - this on disk structure holds fields to manage
1313  * the log.  All fields are 64 bit to easily handle cross architectures.
1314  */
1315 typedef struct zil_header {
1316 	uint64_t zh_claim_txg;	/* txg in which log blocks were claimed */
1317 	uint64_t zh_replay_seq;	/* highest replayed sequence number */
1318 	blkptr_t zh_log;	/* log chain */
1319 	uint64_t zh_claim_seq;	/* highest claimed sequence number */
1320 	uint64_t zh_pad[5];
1321 } zil_header_t;
1322 
1323 #define	OBJSET_PHYS_SIZE_V2 2048
1324 #define	OBJSET_PHYS_SIZE_V3 4096
1325 
1326 typedef struct objset_phys {
1327 	dnode_phys_t os_meta_dnode;
1328 	zil_header_t os_zil_header;
1329 	uint64_t os_type;
1330 	uint64_t os_flags;
1331 	uint8_t os_portable_mac[ZIO_OBJSET_MAC_LEN];
1332 	uint8_t os_local_mac[ZIO_OBJSET_MAC_LEN];
1333 	char os_pad0[OBJSET_PHYS_SIZE_V2 - sizeof (dnode_phys_t)*3 -
1334 		sizeof (zil_header_t) - sizeof (uint64_t)*2 -
1335 		2*ZIO_OBJSET_MAC_LEN];
1336 	dnode_phys_t os_userused_dnode;
1337 	dnode_phys_t os_groupused_dnode;
1338 	dnode_phys_t os_projectused_dnode;
1339 	char os_pad1[OBJSET_PHYS_SIZE_V3 - OBJSET_PHYS_SIZE_V2 -
1340 	    sizeof (dnode_phys_t)];
1341 } objset_phys_t;
1342 
1343 typedef struct dsl_dir_phys {
1344 	uint64_t dd_creation_time; /* not actually used */
1345 	uint64_t dd_head_dataset_obj;
1346 	uint64_t dd_parent_obj;
1347 	uint64_t dd_clone_parent_obj;
1348 	uint64_t dd_child_dir_zapobj;
1349 	/*
1350 	 * how much space our children are accounting for; for leaf
1351 	 * datasets, == physical space used by fs + snaps
1352 	 */
1353 	uint64_t dd_used_bytes;
1354 	uint64_t dd_compressed_bytes;
1355 	uint64_t dd_uncompressed_bytes;
1356 	/* Administrative quota setting */
1357 	uint64_t dd_quota;
1358 	/* Administrative reservation setting */
1359 	uint64_t dd_reserved;
1360 	uint64_t dd_props_zapobj;
1361 	uint64_t dd_pad[21]; /* pad out to 256 bytes for good measure */
1362 } dsl_dir_phys_t;
1363 
1364 typedef struct dsl_dataset_phys {
1365 	uint64_t ds_dir_obj;
1366 	uint64_t ds_prev_snap_obj;
1367 	uint64_t ds_prev_snap_txg;
1368 	uint64_t ds_next_snap_obj;
1369 	uint64_t ds_snapnames_zapobj;	/* zap obj of snaps; ==0 for snaps */
1370 	uint64_t ds_num_children;	/* clone/snap children; ==0 for head */
1371 	uint64_t ds_creation_time;	/* seconds since 1970 */
1372 	uint64_t ds_creation_txg;
1373 	uint64_t ds_deadlist_obj;
1374 	uint64_t ds_used_bytes;
1375 	uint64_t ds_compressed_bytes;
1376 	uint64_t ds_uncompressed_bytes;
1377 	uint64_t ds_unique_bytes;	/* only relevant to snapshots */
1378 	/*
1379 	 * The ds_fsid_guid is a 56-bit ID that can change to avoid
1380 	 * collisions.  The ds_guid is a 64-bit ID that will never
1381 	 * change, so there is a small probability that it will collide.
1382 	 */
1383 	uint64_t ds_fsid_guid;
1384 	uint64_t ds_guid;
1385 	uint64_t ds_flags;
1386 	blkptr_t ds_bp;
1387 	uint64_t ds_pad[8]; /* pad out to 320 bytes for good measure */
1388 } dsl_dataset_phys_t;
1389 
1390 /*
1391  * The names of zap entries in the DIRECTORY_OBJECT of the MOS.
1392  */
1393 #define	DMU_POOL_DIRECTORY_OBJECT	1
1394 #define	DMU_POOL_CONFIG			"config"
1395 #define	DMU_POOL_FEATURES_FOR_READ	"features_for_read"
1396 #define	DMU_POOL_ROOT_DATASET		"root_dataset"
1397 #define	DMU_POOL_SYNC_BPLIST		"sync_bplist"
1398 #define	DMU_POOL_ERRLOG_SCRUB		"errlog_scrub"
1399 #define	DMU_POOL_ERRLOG_LAST		"errlog_last"
1400 #define	DMU_POOL_SPARES			"spares"
1401 #define	DMU_POOL_DEFLATE		"deflate"
1402 #define	DMU_POOL_HISTORY		"history"
1403 #define	DMU_POOL_PROPS			"pool_props"
1404 #define	DMU_POOL_CHECKSUM_SALT		"org.illumos:checksum_salt"
1405 #define	DMU_POOL_REMOVING		"com.delphix:removing"
1406 #define	DMU_POOL_OBSOLETE_BPOBJ		"com.delphix:obsolete_bpobj"
1407 #define	DMU_POOL_CONDENSING_INDIRECT	"com.delphix:condensing_indirect"
1408 #define	DMU_POOL_ZPOOL_CHECKPOINT       "com.delphix:zpool_checkpoint"
1409 
1410 #define	ZAP_MAGIC 0x2F52AB2ABULL
1411 
1412 #define	FZAP_BLOCK_SHIFT(zap)	((zap)->zap_block_shift)
1413 
1414 #define	ZAP_MAXCD		(uint32_t)(-1)
1415 #define	ZAP_HASHBITS		28
1416 #define	MZAP_ENT_LEN		64
1417 #define	MZAP_NAME_LEN		(MZAP_ENT_LEN - 8 - 4 - 2)
1418 #define	MZAP_MAX_BLKSZ		SPA_OLD_MAXBLOCKSIZE
1419 
1420 typedef struct mzap_ent_phys {
1421 	uint64_t mze_value;
1422 	uint32_t mze_cd;
1423 	uint16_t mze_pad;	/* in case we want to chain them someday */
1424 	char mze_name[MZAP_NAME_LEN];
1425 } mzap_ent_phys_t;
1426 
1427 typedef struct mzap_phys {
1428 	uint64_t mz_block_type;	/* ZBT_MICRO */
1429 	uint64_t mz_salt;
1430 	uint64_t mz_normflags;
1431 	uint64_t mz_pad[5];
1432 	mzap_ent_phys_t mz_chunk[1];
1433 	/* actually variable size depending on block size */
1434 } mzap_phys_t;
1435 
1436 /*
1437  * The (fat) zap is stored in one object. It is an array of
1438  * 1<<FZAP_BLOCK_SHIFT byte blocks. The layout looks like one of:
1439  *
1440  * ptrtbl fits in first block:
1441  * 	[zap_phys_t zap_ptrtbl_shift < 6] [zap_leaf_t] ...
1442  *
1443  * ptrtbl too big for first block:
1444  * 	[zap_phys_t zap_ptrtbl_shift >= 6] [zap_leaf_t] [ptrtbl] ...
1445  *
1446  */
1447 
1448 #define	ZBT_LEAF		((1ULL << 63) + 0)
1449 #define	ZBT_HEADER		((1ULL << 63) + 1)
1450 #define	ZBT_MICRO		((1ULL << 63) + 3)
1451 /* any other values are ptrtbl blocks */
1452 
1453 /*
1454  * the embedded pointer table takes up half a block:
1455  * block size / entry size (2^3) / 2
1456  */
1457 #define	ZAP_EMBEDDED_PTRTBL_SHIFT(zap) (FZAP_BLOCK_SHIFT(zap) - 3 - 1)
1458 
1459 /*
1460  * The embedded pointer table starts half-way through the block.  Since
1461  * the pointer table itself is half the block, it starts at (64-bit)
1462  * word number (1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap)).
1463  */
1464 #define	ZAP_EMBEDDED_PTRTBL_ENT(zap, idx) \
1465 	((uint64_t *)(zap)->zap_phys) \
1466 	[(idx) + (1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap))]
1467 
1468 /*
1469  * TAKE NOTE:
1470  * If zap_phys_t is modified, zap_byteswap() must be modified.
1471  */
1472 typedef struct zap_phys {
1473 	uint64_t zap_block_type;	/* ZBT_HEADER */
1474 	uint64_t zap_magic;		/* ZAP_MAGIC */
1475 
1476 	struct zap_table_phys {
1477 		uint64_t zt_blk;	/* starting block number */
1478 		uint64_t zt_numblks;	/* number of blocks */
1479 		uint64_t zt_shift;	/* bits to index it */
1480 		uint64_t zt_nextblk;	/* next (larger) copy start block */
1481 		uint64_t zt_blks_copied; /* number source blocks copied */
1482 	} zap_ptrtbl;
1483 
1484 	uint64_t zap_freeblk;		/* the next free block */
1485 	uint64_t zap_num_leafs;		/* number of leafs */
1486 	uint64_t zap_num_entries;	/* number of entries */
1487 	uint64_t zap_salt;		/* salt to stir into hash function */
1488 	uint64_t zap_normflags;		/* flags for u8_textprep_str() */
1489 	uint64_t zap_flags;		/* zap_flags_t */
1490 	/*
1491 	 * This structure is followed by padding, and then the embedded
1492 	 * pointer table.  The embedded pointer table takes up second
1493 	 * half of the block.  It is accessed using the
1494 	 * ZAP_EMBEDDED_PTRTBL_ENT() macro.
1495 	 */
1496 } zap_phys_t;
1497 
1498 typedef struct zap_table_phys zap_table_phys_t;
1499 
1500 struct spa;
1501 typedef struct fat_zap {
1502 	int zap_block_shift;			/* block size shift */
1503 	zap_phys_t *zap_phys;
1504 	const struct spa *zap_spa;
1505 	const dnode_phys_t *zap_dnode;
1506 } fat_zap_t;
1507 
1508 #define	ZAP_LEAF_MAGIC 0x2AB1EAF
1509 
1510 /* chunk size = 24 bytes */
1511 #define	ZAP_LEAF_CHUNKSIZE 24
1512 
1513 /*
1514  * The amount of space available for chunks is:
1515  * block size (1<<l->l_bs) - hash entry size (2) * number of hash
1516  * entries - header space (2*chunksize)
1517  */
1518 #define	ZAP_LEAF_NUMCHUNKS(l) \
1519 	(((1<<(l)->l_bs) - 2*ZAP_LEAF_HASH_NUMENTRIES(l)) / \
1520 	ZAP_LEAF_CHUNKSIZE - 2)
1521 
1522 /*
1523  * The amount of space within the chunk available for the array is:
1524  * chunk size - space for type (1) - space for next pointer (2)
1525  */
1526 #define	ZAP_LEAF_ARRAY_BYTES (ZAP_LEAF_CHUNKSIZE - 3)
1527 
1528 #define	ZAP_LEAF_ARRAY_NCHUNKS(bytes) \
1529 	(((bytes)+ZAP_LEAF_ARRAY_BYTES-1)/ZAP_LEAF_ARRAY_BYTES)
1530 
1531 /*
1532  * Low water mark:  when there are only this many chunks free, start
1533  * growing the ptrtbl.  Ideally, this should be larger than a
1534  * "reasonably-sized" entry.  20 chunks is more than enough for the
1535  * largest directory entry (MAXNAMELEN (256) byte name, 8-byte value),
1536  * while still being only around 3% for 16k blocks.
1537  */
1538 #define	ZAP_LEAF_LOW_WATER (20)
1539 
1540 /*
1541  * The leaf hash table has block size / 2^5 (32) number of entries,
1542  * which should be more than enough for the maximum number of entries,
1543  * which is less than block size / CHUNKSIZE (24) / minimum number of
1544  * chunks per entry (3).
1545  */
1546 #define	ZAP_LEAF_HASH_SHIFT(l) ((l)->l_bs - 5)
1547 #define	ZAP_LEAF_HASH_NUMENTRIES(l) (1 << ZAP_LEAF_HASH_SHIFT(l))
1548 
1549 /*
1550  * The chunks start immediately after the hash table.  The end of the
1551  * hash table is at l_hash + HASH_NUMENTRIES, which we simply cast to a
1552  * chunk_t.
1553  */
1554 #define	ZAP_LEAF_CHUNK(l, idx) \
1555 	((zap_leaf_chunk_t *) \
1556 	((l)->l_phys->l_hash + ZAP_LEAF_HASH_NUMENTRIES(l)))[idx]
1557 #define	ZAP_LEAF_ENTRY(l, idx) (&ZAP_LEAF_CHUNK(l, idx).l_entry)
1558 
1559 typedef enum zap_chunk_type {
1560 	ZAP_CHUNK_FREE = 253,
1561 	ZAP_CHUNK_ENTRY = 252,
1562 	ZAP_CHUNK_ARRAY = 251,
1563 	ZAP_CHUNK_TYPE_MAX = 250
1564 } zap_chunk_type_t;
1565 
1566 /*
1567  * TAKE NOTE:
1568  * If zap_leaf_phys_t is modified, zap_leaf_byteswap() must be modified.
1569  */
1570 typedef struct zap_leaf_phys {
1571 	struct zap_leaf_header {
1572 		uint64_t lh_block_type;		/* ZBT_LEAF */
1573 		uint64_t lh_pad1;
1574 		uint64_t lh_prefix;		/* hash prefix of this leaf */
1575 		uint32_t lh_magic;		/* ZAP_LEAF_MAGIC */
1576 		uint16_t lh_nfree;		/* number free chunks */
1577 		uint16_t lh_nentries;		/* number of entries */
1578 		uint16_t lh_prefix_len;		/* num bits used to id this */
1579 
1580 /* above is accessable to zap, below is zap_leaf private */
1581 
1582 		uint16_t lh_freelist;		/* chunk head of free list */
1583 		uint8_t lh_pad2[12];
1584 	} l_hdr; /* 2 24-byte chunks */
1585 
1586 	/*
1587 	 * The header is followed by a hash table with
1588 	 * ZAP_LEAF_HASH_NUMENTRIES(zap) entries.  The hash table is
1589 	 * followed by an array of ZAP_LEAF_NUMCHUNKS(zap)
1590 	 * zap_leaf_chunk structures.  These structures are accessed
1591 	 * with the ZAP_LEAF_CHUNK() macro.
1592 	 */
1593 
1594 	uint16_t l_hash[1];
1595 } zap_leaf_phys_t;
1596 
1597 typedef union zap_leaf_chunk {
1598 	struct zap_leaf_entry {
1599 		uint8_t le_type; 		/* always ZAP_CHUNK_ENTRY */
1600 		uint8_t le_value_intlen;	/* size of ints */
1601 		uint16_t le_next;		/* next entry in hash chain */
1602 		uint16_t le_name_chunk;		/* first chunk of the name */
1603 		uint16_t le_name_numints;	/* bytes in name, incl null */
1604 		uint16_t le_value_chunk;	/* first chunk of the value */
1605 		uint16_t le_value_numints;	/* value length in ints */
1606 		uint32_t le_cd;			/* collision differentiator */
1607 		uint64_t le_hash;		/* hash value of the name */
1608 	} l_entry;
1609 	struct zap_leaf_array {
1610 		uint8_t la_type;		/* always ZAP_CHUNK_ARRAY */
1611 		uint8_t la_array[ZAP_LEAF_ARRAY_BYTES];
1612 		uint16_t la_next;		/* next blk or CHAIN_END */
1613 	} l_array;
1614 	struct zap_leaf_free {
1615 		uint8_t lf_type;		/* always ZAP_CHUNK_FREE */
1616 		uint8_t lf_pad[ZAP_LEAF_ARRAY_BYTES];
1617 		uint16_t lf_next;	/* next in free list, or CHAIN_END */
1618 	} l_free;
1619 } zap_leaf_chunk_t;
1620 
1621 typedef struct zap_leaf {
1622 	int l_bs;			/* block size shift */
1623 	zap_leaf_phys_t *l_phys;
1624 } zap_leaf_t;
1625 
1626 /*
1627  * Define special zfs pflags
1628  */
1629 #define	ZFS_XATTR	0x1		/* is an extended attribute */
1630 #define	ZFS_INHERIT_ACE	0x2		/* ace has inheritable ACEs */
1631 #define	ZFS_ACL_TRIVIAL 0x4		/* files ACL is trivial */
1632 
1633 #define	MASTER_NODE_OBJ	1
1634 
1635 /*
1636  * special attributes for master node.
1637  */
1638 
1639 #define	ZFS_FSID		"FSID"
1640 #define	ZFS_UNLINKED_SET	"DELETE_QUEUE"
1641 #define	ZFS_ROOT_OBJ		"ROOT"
1642 #define	ZPL_VERSION_OBJ		"VERSION"
1643 #define	ZFS_PROP_BLOCKPERPAGE	"BLOCKPERPAGE"
1644 #define	ZFS_PROP_NOGROWBLOCKS	"NOGROWBLOCKS"
1645 
1646 #define	ZFS_FLAG_BLOCKPERPAGE	0x1
1647 #define	ZFS_FLAG_NOGROWBLOCKS	0x2
1648 
1649 /*
1650  * ZPL version - rev'd whenever an incompatible on-disk format change
1651  * occurs.  Independent of SPA/DMU/ZAP versioning.
1652  */
1653 
1654 #define	ZPL_VERSION		1ULL
1655 
1656 /*
1657  * The directory entry has the type (currently unused on Solaris) in the
1658  * top 4 bits, and the object number in the low 48 bits.  The "middle"
1659  * 12 bits are unused.
1660  */
1661 #define	ZFS_DIRENT_TYPE(de) BF64_GET(de, 60, 4)
1662 #define	ZFS_DIRENT_OBJ(de) BF64_GET(de, 0, 48)
1663 #define	ZFS_DIRENT_MAKE(type, obj) (((uint64_t)type << 60) | obj)
1664 
1665 typedef struct ace {
1666 	uid_t		a_who;		/* uid or gid */
1667 	uint32_t	a_access_mask;	/* read,write,... */
1668 	uint16_t	a_flags;	/* see below */
1669 	uint16_t	a_type;		/* allow or deny */
1670 } ace_t;
1671 
1672 #define ACE_SLOT_CNT	6
1673 
1674 typedef struct zfs_znode_acl {
1675 	uint64_t	z_acl_extern_obj;	  /* ext acl pieces */
1676 	uint32_t	z_acl_count;		  /* Number of ACEs */
1677 	uint16_t	z_acl_version;		  /* acl version */
1678 	uint16_t	z_acl_pad;		  /* pad */
1679 	ace_t		z_ace_data[ACE_SLOT_CNT]; /* 6 standard ACEs */
1680 } zfs_znode_acl_t;
1681 
1682 /*
1683  * This is the persistent portion of the znode.  It is stored
1684  * in the "bonus buffer" of the file.  Short symbolic links
1685  * are also stored in the bonus buffer.
1686  */
1687 typedef struct znode_phys {
1688 	uint64_t zp_atime[2];		/*  0 - last file access time */
1689 	uint64_t zp_mtime[2];		/* 16 - last file modification time */
1690 	uint64_t zp_ctime[2];		/* 32 - last file change time */
1691 	uint64_t zp_crtime[2];		/* 48 - creation time */
1692 	uint64_t zp_gen;		/* 64 - generation (txg of creation) */
1693 	uint64_t zp_mode;		/* 72 - file mode bits */
1694 	uint64_t zp_size;		/* 80 - size of file */
1695 	uint64_t zp_parent;		/* 88 - directory parent (`..') */
1696 	uint64_t zp_links;		/* 96 - number of links to file */
1697 	uint64_t zp_xattr;		/* 104 - DMU object for xattrs */
1698 	uint64_t zp_rdev;		/* 112 - dev_t for VBLK & VCHR files */
1699 	uint64_t zp_flags;		/* 120 - persistent flags */
1700 	uint64_t zp_uid;		/* 128 - file owner */
1701 	uint64_t zp_gid;		/* 136 - owning group */
1702 	uint64_t zp_pad[4];		/* 144 - future */
1703 	zfs_znode_acl_t zp_acl;		/* 176 - 263 ACL */
1704 	/*
1705 	 * Data may pad out any remaining bytes in the znode buffer, eg:
1706 	 *
1707 	 * |<---------------------- dnode_phys (512) ------------------------>|
1708 	 * |<-- dnode (192) --->|<----------- "bonus" buffer (320) ---------->|
1709 	 *			|<---- znode (264) ---->|<---- data (56) ---->|
1710 	 *
1711 	 * At present, we only use this space to store symbolic links.
1712 	 */
1713 } znode_phys_t;
1714 
1715 /*
1716  * In-core vdev representation.
1717  */
1718 struct vdev;
1719 struct spa;
1720 typedef int vdev_phys_read_t(struct vdev *, void *, off_t, void *, size_t);
1721 typedef int vdev_phys_write_t(struct vdev *, off_t, void *, size_t);
1722 typedef int vdev_read_t(struct vdev *, const blkptr_t *, void *, off_t, size_t);
1723 
1724 typedef STAILQ_HEAD(vdev_list, vdev) vdev_list_t;
1725 
1726 typedef struct vdev_indirect_mapping_entry_phys {
1727 	/*
1728 	 * Decode with DVA_MAPPING_* macros.
1729 	 * Contains:
1730 	 *   the source offset (low 63 bits)
1731 	 *   the one-bit "mark", used for garbage collection (by zdb)
1732 	 */
1733 	uint64_t vimep_src;
1734 
1735 	/*
1736 	 * Note: the DVA's asize is 24 bits, and can thus store ranges
1737 	 * up to 8GB.
1738 	 */
1739 	dva_t	vimep_dst;
1740 } vdev_indirect_mapping_entry_phys_t;
1741 
1742 #define	DVA_MAPPING_GET_SRC_OFFSET(vimep)	\
1743 	BF64_GET_SB((vimep)->vimep_src, 0, 63, SPA_MINBLOCKSHIFT, 0)
1744 #define	DVA_MAPPING_SET_SRC_OFFSET(vimep, x)	\
1745 	BF64_SET_SB((vimep)->vimep_src, 0, 63, SPA_MINBLOCKSHIFT, 0, x)
1746 
1747 typedef struct vdev_indirect_mapping_entry {
1748 	vdev_indirect_mapping_entry_phys_t	vime_mapping;
1749 	uint32_t				vime_obsolete_count;
1750 	list_node_t				vime_node;
1751 } vdev_indirect_mapping_entry_t;
1752 
1753 /*
1754  * This is stored in the bonus buffer of the mapping object, see comment of
1755  * vdev_indirect_config for more details.
1756  */
1757 typedef struct vdev_indirect_mapping_phys {
1758 	uint64_t	vimp_max_offset;
1759 	uint64_t	vimp_bytes_mapped;
1760 	uint64_t	vimp_num_entries; /* number of v_i_m_entry_phys_t's */
1761 
1762 	/*
1763 	 * For each entry in the mapping object, this object contains an
1764 	 * entry representing the number of bytes of that mapping entry
1765 	 * that were no longer in use by the pool at the time this indirect
1766 	 * vdev was last condensed.
1767 	 */
1768 	uint64_t	vimp_counts_object;
1769 } vdev_indirect_mapping_phys_t;
1770 
1771 #define	VDEV_INDIRECT_MAPPING_SIZE_V0	(3 * sizeof (uint64_t))
1772 
1773 typedef struct vdev_indirect_mapping {
1774 	uint64_t	vim_object;
1775 	boolean_t	vim_havecounts;
1776 
1777 	/* vim_entries segment offset currently in memory. */
1778 	uint64_t	vim_entry_offset;
1779 	/* vim_entries segment size. */
1780 	size_t		vim_num_entries;
1781 
1782 	/* Needed by dnode_read() */
1783 	const void	*vim_spa;
1784 	dnode_phys_t	*vim_dn;
1785 
1786 	/*
1787 	 * An ordered array of mapping entries, sorted by source offset.
1788 	 * Note that vim_entries is needed during a removal (and contains
1789 	 * mappings that have been synced to disk so far) to handle frees
1790 	 * from the removing device.
1791 	 */
1792 	vdev_indirect_mapping_entry_phys_t *vim_entries;
1793 	objset_phys_t	*vim_objset;
1794 	vdev_indirect_mapping_phys_t	*vim_phys;
1795 } vdev_indirect_mapping_t;
1796 
1797 /*
1798  * On-disk indirect vdev state.
1799  *
1800  * An indirect vdev is described exclusively in the MOS config of a pool.
1801  * The config for an indirect vdev includes several fields, which are
1802  * accessed in memory by a vdev_indirect_config_t.
1803  */
1804 typedef struct vdev_indirect_config {
1805 	/*
1806 	 * Object (in MOS) which contains the indirect mapping. This object
1807 	 * contains an array of vdev_indirect_mapping_entry_phys_t ordered by
1808 	 * vimep_src. The bonus buffer for this object is a
1809 	 * vdev_indirect_mapping_phys_t. This object is allocated when a vdev
1810 	 * removal is initiated.
1811 	 *
1812 	 * Note that this object can be empty if none of the data on the vdev
1813 	 * has been copied yet.
1814 	 */
1815 	uint64_t	vic_mapping_object;
1816 
1817 	/*
1818 	 * Object (in MOS) which contains the birth times for the mapping
1819 	 * entries. This object contains an array of
1820 	 * vdev_indirect_birth_entry_phys_t sorted by vibe_offset. The bonus
1821 	 * buffer for this object is a vdev_indirect_birth_phys_t. This object
1822 	 * is allocated when a vdev removal is initiated.
1823 	 *
1824 	 * Note that this object can be empty if none of the vdev has yet been
1825 	 * copied.
1826 	 */
1827 	uint64_t	vic_births_object;
1828 
1829 /*
1830  * This is the vdev ID which was removed previous to this vdev, or
1831  * UINT64_MAX if there are no previously removed vdevs.
1832  */
1833 	uint64_t	vic_prev_indirect_vdev;
1834 } vdev_indirect_config_t;
1835 
1836 typedef struct vdev {
1837 	STAILQ_ENTRY(vdev) v_childlink;	/* link in parent's child list */
1838 	STAILQ_ENTRY(vdev) v_alllink;	/* link in global vdev list */
1839 	vdev_list_t	v_children;	/* children of this vdev */
1840 	const char	*v_name;	/* vdev name */
1841 	uint64_t	v_guid;		/* vdev guid */
1842 	uint64_t	v_id;		/* index in parent */
1843 	uint64_t	v_psize;	/* physical device capacity */
1844 	int		v_ashift;	/* offset to block shift */
1845 	int		v_nparity;	/* # parity for raidz */
1846 	struct vdev	*v_top;		/* parent vdev */
1847 	size_t		v_nchildren;	/* # children */
1848 	vdev_state_t	v_state;	/* current state */
1849 	vdev_phys_read_t *v_phys_read;	/* read from raw leaf vdev */
1850 	vdev_phys_write_t *v_phys_write; /* write to raw leaf vdev */
1851 	vdev_read_t	*v_read;	/* read from vdev */
1852 	void		*v_priv;	/* data for read/write function */
1853 	boolean_t	v_islog;
1854 	struct spa	*v_spa;		/* link to spa */
1855 	/*
1856 	 * Values stored in the config for an indirect or removing vdev.
1857 	 */
1858 	vdev_indirect_config_t vdev_indirect_config;
1859 	vdev_indirect_mapping_t *v_mapping;
1860 } vdev_t;
1861 
1862 /*
1863  * In-core pool representation.
1864  */
1865 typedef STAILQ_HEAD(spa_list, spa) spa_list_t;
1866 
1867 typedef struct spa {
1868 	STAILQ_ENTRY(spa) spa_link;	/* link in global pool list */
1869 	char		*spa_name;	/* pool name */
1870 	uint64_t	spa_guid;	/* pool guid */
1871 	uint64_t	spa_txg;	/* most recent transaction */
1872 	struct uberblock *spa_uberblock;	/* best uberblock so far */
1873 	vdev_t		*spa_root_vdev;	/* toplevel vdev container */
1874 	objset_phys_t	*spa_mos;	/* MOS for this pool */
1875 	zio_cksum_salt_t spa_cksum_salt;	/* secret salt for cksum */
1876 	void		*spa_cksum_tmpls[ZIO_CHECKSUM_FUNCTIONS];
1877 	boolean_t	spa_with_log;	/* this pool has log */
1878 
1879 	struct uberblock spa_uberblock_master;	/* best uberblock so far */
1880 	objset_phys_t	spa_mos_master;		/* MOS for this pool */
1881 	struct uberblock spa_uberblock_checkpoint; /* checkpoint uberblock */
1882 	objset_phys_t	spa_mos_checkpoint;	/* Checkpoint MOS */
1883 	void		*spa_bootenv;		/* bootenv from pool label */
1884 } spa_t;
1885 
1886 /* IO related arguments. */
1887 typedef struct zio {
1888 	spa_t		*io_spa;
1889 	blkptr_t	*io_bp;
1890 	void		*io_data;
1891 	uint64_t	io_size;
1892 	uint64_t	io_offset;
1893 
1894 	/* Stuff for the vdev stack */
1895 	vdev_t		*io_vd;
1896 	void		*io_vsd;
1897 
1898 	int		io_error;
1899 } zio_t;
1900 
1901 static void decode_embedded_bp_compressed(const blkptr_t *, void *);
1902 
1903 #endif /* _ZFSIMPL_H_ */
1904