xref: /illumos-gate/usr/src/boot/sys/cddl/boot/zfs/zfsimpl.h (revision dc5e7685b131559c0b7c622baee25a9a0ae50ada)
1 /*-
2  * Copyright (c) 2002 McAfee, Inc.
3  * All rights reserved.
4  *
5  * This software was developed for the FreeBSD Project by Marshall
6  * Kirk McKusick and McAfee Research,, the Security Research Division of
7  * McAfee, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as
8  * part of the DARPA CHATS research program
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 /*
32  * CDDL HEADER START
33  *
34  * The contents of this file are subject to the terms of the
35  * Common Development and Distribution License (the "License").
36  * You may not use this file except in compliance with the License.
37  *
38  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
39  * or http://www.opensolaris.org/os/licensing.
40  * See the License for the specific language governing permissions
41  * and limitations under the License.
42  *
43  * When distributing Covered Code, include this CDDL HEADER in each
44  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
45  * If applicable, add the following below this CDDL HEADER, with the
46  * fields enclosed by brackets "[]" replaced with your own identifying
47  * information: Portions Copyright [yyyy] [name of copyright owner]
48  *
49  * CDDL HEADER END
50  */
51 /*
52  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
53  * Use is subject to license terms.
54  */
55 /*
56  * Copyright 2013 by Saso Kiselkov. All rights reserved.
57  */
58 /*
59  * Copyright (c) 2013 by Delphix. All rights reserved.
60  */
61 
62 #define	MAXNAMELEN	256
63 
64 #define _NOTE(s)
65 
66 /* CRC64 table */
67 #define	ZFS_CRC64_POLY	0xC96C5795D7870F42ULL	/* ECMA-182, reflected form */
68 
69 /*
70  * Macros for various sorts of alignment and rounding when the alignment
71  * is known to be a power of 2.
72  */
73 #define	P2ALIGN(x, align)		((x) & -(align))
74 #define	P2PHASE(x, align)		((x) & ((align) - 1))
75 #define	P2NPHASE(x, align)		(-(x) & ((align) - 1))
76 #define	P2ROUNDUP(x, align)		(-(-(x) & -(align)))
77 #define	P2END(x, align)			(-(~(x) & -(align)))
78 #define	P2PHASEUP(x, align, phase)	((phase) - (((phase) - (x)) & -(align)))
79 #define	P2BOUNDARY(off, len, align)	(((off) ^ ((off) + (len) - 1)) > (align) - 1)
80 
81 /*
82  * General-purpose 32-bit and 64-bit bitfield encodings.
83  */
84 #define	BF32_DECODE(x, low, len)	P2PHASE((x) >> (low), 1U << (len))
85 #define	BF64_DECODE(x, low, len)	P2PHASE((x) >> (low), 1ULL << (len))
86 #define	BF32_ENCODE(x, low, len)	(P2PHASE((x), 1U << (len)) << (low))
87 #define	BF64_ENCODE(x, low, len)	(P2PHASE((x), 1ULL << (len)) << (low))
88 
89 #define	BF32_GET(x, low, len)		BF32_DECODE(x, low, len)
90 #define	BF64_GET(x, low, len)		BF64_DECODE(x, low, len)
91 
92 #define	BF32_SET(x, low, len, val)	\
93 	((x) ^= BF32_ENCODE((x >> low) ^ (val), low, len))
94 #define	BF64_SET(x, low, len, val)	\
95 	((x) ^= BF64_ENCODE((x >> low) ^ (val), low, len))
96 
97 #define	BF32_GET_SB(x, low, len, shift, bias)	\
98 	((BF32_GET(x, low, len) + (bias)) << (shift))
99 #define	BF64_GET_SB(x, low, len, shift, bias)	\
100 	((BF64_GET(x, low, len) + (bias)) << (shift))
101 
102 #define	BF32_SET_SB(x, low, len, shift, bias, val)	\
103 	BF32_SET(x, low, len, ((val) >> (shift)) - (bias))
104 #define	BF64_SET_SB(x, low, len, shift, bias, val)	\
105 	BF64_SET(x, low, len, ((val) >> (shift)) - (bias))
106 
107 /*
108  * Macros to reverse byte order
109  */
110 #define	BSWAP_8(x)	((x) & 0xff)
111 #define	BSWAP_16(x)	((BSWAP_8(x) << 8) | BSWAP_8((x) >> 8))
112 #define	BSWAP_32(x)	((BSWAP_16(x) << 16) | BSWAP_16((x) >> 16))
113 #define	BSWAP_64(x)	((BSWAP_32(x) << 32) | BSWAP_32((x) >> 32))
114 
115 #define	SPA_MINBLOCKSHIFT	9
116 #define	SPA_OLDMAXBLOCKSHIFT	17
117 #define	SPA_MAXBLOCKSHIFT	24
118 #define	SPA_MINBLOCKSIZE	(1ULL << SPA_MINBLOCKSHIFT)
119 #define	SPA_OLDMAXBLOCKSIZE	(1ULL << SPA_OLDMAXBLOCKSHIFT)
120 #define	SPA_MAXBLOCKSIZE	(1ULL << SPA_MAXBLOCKSHIFT)
121 
122 /*
123  * The DVA size encodings for LSIZE and PSIZE support blocks up to 32MB.
124  * The ASIZE encoding should be at least 64 times larger (6 more bits)
125  * to support up to 4-way RAID-Z mirror mode with worst-case gang block
126  * overhead, three DVAs per bp, plus one more bit in case we do anything
127  * else that expands the ASIZE.
128  */
129 #define	SPA_LSIZEBITS		16	/* LSIZE up to 32M (2^16 * 512)	*/
130 #define	SPA_PSIZEBITS		16	/* PSIZE up to 32M (2^16 * 512)	*/
131 #define	SPA_ASIZEBITS		24	/* ASIZE up to 64 times larger	*/
132 
133 /*
134  * All SPA data is represented by 128-bit data virtual addresses (DVAs).
135  * The members of the dva_t should be considered opaque outside the SPA.
136  */
137 typedef struct dva {
138 	uint64_t	dva_word[2];
139 } dva_t;
140 
141 /*
142  * Each block has a 256-bit checksum -- strong enough for cryptographic hashes.
143  */
144 typedef struct zio_cksum {
145 	uint64_t	zc_word[4];
146 } zio_cksum_t;
147 
148 /*
149  * Some checksums/hashes need a 256-bit initialization salt. This salt is kept
150  * secret and is suitable for use in MAC algorithms as the key.
151  */
152 typedef struct zio_cksum_salt {
153 	uint8_t		zcs_bytes[32];
154 } zio_cksum_salt_t;
155 
156 /*
157  * Each block is described by its DVAs, time of birth, checksum, etc.
158  * The word-by-word, bit-by-bit layout of the blkptr is as follows:
159  *
160  *	64	56	48	40	32	24	16	8	0
161  *	+-------+-------+-------+-------+-------+-------+-------+-------+
162  * 0	|		vdev1		| GRID  |	  ASIZE		|
163  *	+-------+-------+-------+-------+-------+-------+-------+-------+
164  * 1	|G|			 offset1				|
165  *	+-------+-------+-------+-------+-------+-------+-------+-------+
166  * 2	|		vdev2		| GRID  |	  ASIZE		|
167  *	+-------+-------+-------+-------+-------+-------+-------+-------+
168  * 3	|G|			 offset2				|
169  *	+-------+-------+-------+-------+-------+-------+-------+-------+
170  * 4	|		vdev3		| GRID  |	  ASIZE		|
171  *	+-------+-------+-------+-------+-------+-------+-------+-------+
172  * 5	|G|			 offset3				|
173  *	+-------+-------+-------+-------+-------+-------+-------+-------+
174  * 6	|BDX|lvl| type	| cksum |E| comp|    PSIZE	|     LSIZE	|
175  *	+-------+-------+-------+-------+-------+-------+-------+-------+
176  * 7	|			padding					|
177  *	+-------+-------+-------+-------+-------+-------+-------+-------+
178  * 8	|			padding					|
179  *	+-------+-------+-------+-------+-------+-------+-------+-------+
180  * 9	|			physical birth txg			|
181  *	+-------+-------+-------+-------+-------+-------+-------+-------+
182  * a	|			logical birth txg			|
183  *	+-------+-------+-------+-------+-------+-------+-------+-------+
184  * b	|			fill count				|
185  *	+-------+-------+-------+-------+-------+-------+-------+-------+
186  * c	|			checksum[0]				|
187  *	+-------+-------+-------+-------+-------+-------+-------+-------+
188  * d	|			checksum[1]				|
189  *	+-------+-------+-------+-------+-------+-------+-------+-------+
190  * e	|			checksum[2]				|
191  *	+-------+-------+-------+-------+-------+-------+-------+-------+
192  * f	|			checksum[3]				|
193  *	+-------+-------+-------+-------+-------+-------+-------+-------+
194  *
195  * Legend:
196  *
197  * vdev		virtual device ID
198  * offset	offset into virtual device
199  * LSIZE	logical size
200  * PSIZE	physical size (after compression)
201  * ASIZE	allocated size (including RAID-Z parity and gang block headers)
202  * GRID		RAID-Z layout information (reserved for future use)
203  * cksum	checksum function
204  * comp		compression function
205  * G		gang block indicator
206  * B		byteorder (endianness)
207  * D		dedup
208  * X		encryption (on version 30, which is not supported)
209  * E		blkptr_t contains embedded data (see below)
210  * lvl		level of indirection
211  * type		DMU object type
212  * phys birth	txg of block allocation; zero if same as logical birth txg
213  * log. birth	transaction group in which the block was logically born
214  * fill count	number of non-zero blocks under this bp
215  * checksum[4]	256-bit checksum of the data this bp describes
216  */
217 
218 /*
219  * "Embedded" blkptr_t's don't actually point to a block, instead they
220  * have a data payload embedded in the blkptr_t itself.  See the comment
221  * in blkptr.c for more details.
222  *
223  * The blkptr_t is laid out as follows:
224  *
225  *	64	56	48	40	32	24	16	8	0
226  *	+-------+-------+-------+-------+-------+-------+-------+-------+
227  * 0	|      payload                                                  |
228  * 1	|      payload                                                  |
229  * 2	|      payload                                                  |
230  * 3	|      payload                                                  |
231  * 4	|      payload                                                  |
232  * 5	|      payload                                                  |
233  *	+-------+-------+-------+-------+-------+-------+-------+-------+
234  * 6	|BDX|lvl| type	| etype |E| comp| PSIZE|              LSIZE	|
235  *	+-------+-------+-------+-------+-------+-------+-------+-------+
236  * 7	|      payload                                                  |
237  * 8	|      payload                                                  |
238  * 9	|      payload                                                  |
239  *	+-------+-------+-------+-------+-------+-------+-------+-------+
240  * a	|			logical birth txg			|
241  *	+-------+-------+-------+-------+-------+-------+-------+-------+
242  * b	|      payload                                                  |
243  * c	|      payload                                                  |
244  * d	|      payload                                                  |
245  * e	|      payload                                                  |
246  * f	|      payload                                                  |
247  *	+-------+-------+-------+-------+-------+-------+-------+-------+
248  *
249  * Legend:
250  *
251  * payload		contains the embedded data
252  * B (byteorder)	byteorder (endianness)
253  * D (dedup)		padding (set to zero)
254  * X			encryption (set to zero; see above)
255  * E (embedded)		set to one
256  * lvl			indirection level
257  * type			DMU object type
258  * etype		how to interpret embedded data (BP_EMBEDDED_TYPE_*)
259  * comp			compression function of payload
260  * PSIZE		size of payload after compression, in bytes
261  * LSIZE		logical size of payload, in bytes
262  *			note that 25 bits is enough to store the largest
263  *			"normal" BP's LSIZE (2^16 * 2^9) in bytes
264  * log. birth		transaction group in which the block was logically born
265  *
266  * Note that LSIZE and PSIZE are stored in bytes, whereas for non-embedded
267  * bp's they are stored in units of SPA_MINBLOCKSHIFT.
268  * Generally, the generic BP_GET_*() macros can be used on embedded BP's.
269  * The B, D, X, lvl, type, and comp fields are stored the same as with normal
270  * BP's so the BP_SET_* macros can be used with them.  etype, PSIZE, LSIZE must
271  * be set with the BPE_SET_* macros.  BP_SET_EMBEDDED() should be called before
272  * other macros, as they assert that they are only used on BP's of the correct
273  * "embedded-ness".
274  */
275 
276 #define	BPE_GET_ETYPE(bp)	\
277 	(ASSERT(BP_IS_EMBEDDED(bp)), \
278 	BF64_GET((bp)->blk_prop, 40, 8))
279 #define	BPE_SET_ETYPE(bp, t)	do { \
280 	ASSERT(BP_IS_EMBEDDED(bp)); \
281 	BF64_SET((bp)->blk_prop, 40, 8, t); \
282 _NOTE(CONSTCOND) } while (0)
283 
284 #define	BPE_GET_LSIZE(bp)	\
285 	(ASSERT(BP_IS_EMBEDDED(bp)), \
286 	BF64_GET_SB((bp)->blk_prop, 0, 25, 0, 1))
287 #define	BPE_SET_LSIZE(bp, x)	do { \
288 	ASSERT(BP_IS_EMBEDDED(bp)); \
289 	BF64_SET_SB((bp)->blk_prop, 0, 25, 0, 1, x); \
290 _NOTE(CONSTCOND) } while (0)
291 
292 #define	BPE_GET_PSIZE(bp)	\
293 	(ASSERT(BP_IS_EMBEDDED(bp)), \
294 	BF64_GET_SB((bp)->blk_prop, 25, 7, 0, 1))
295 #define	BPE_SET_PSIZE(bp, x)	do { \
296 	ASSERT(BP_IS_EMBEDDED(bp)); \
297 	BF64_SET_SB((bp)->blk_prop, 25, 7, 0, 1, x); \
298 _NOTE(CONSTCOND) } while (0)
299 
300 typedef enum bp_embedded_type {
301 	BP_EMBEDDED_TYPE_DATA,
302 	BP_EMBEDDED_TYPE_RESERVED, /* Reserved for an unintegrated feature. */
303 	NUM_BP_EMBEDDED_TYPES = BP_EMBEDDED_TYPE_RESERVED
304 } bp_embedded_type_t;
305 
306 #define	BPE_NUM_WORDS 14
307 #define	BPE_PAYLOAD_SIZE (BPE_NUM_WORDS * sizeof (uint64_t))
308 #define	BPE_IS_PAYLOADWORD(bp, wp) \
309 	((wp) != &(bp)->blk_prop && (wp) != &(bp)->blk_birth)
310 
311 #define	SPA_BLKPTRSHIFT	7		/* blkptr_t is 128 bytes	*/
312 #define	SPA_DVAS_PER_BP	3		/* Number of DVAs in a bp	*/
313 
314 typedef struct blkptr {
315 	dva_t		blk_dva[SPA_DVAS_PER_BP]; /* Data Virtual Addresses */
316 	uint64_t	blk_prop;	/* size, compression, type, etc	    */
317 	uint64_t	blk_pad[2];	/* Extra space for the future	    */
318 	uint64_t	blk_phys_birth;	/* txg when block was allocated	    */
319 	uint64_t	blk_birth;	/* transaction group at birth	    */
320 	uint64_t	blk_fill;	/* fill count			    */
321 	zio_cksum_t	blk_cksum;	/* 256-bit checksum		    */
322 } blkptr_t;
323 
324 /*
325  * Macros to get and set fields in a bp or DVA.
326  */
327 #define	DVA_GET_ASIZE(dva)	\
328 	BF64_GET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, SPA_MINBLOCKSHIFT, 0)
329 #define	DVA_SET_ASIZE(dva, x)	\
330 	BF64_SET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, \
331 	SPA_MINBLOCKSHIFT, 0, x)
332 
333 #define	DVA_GET_GRID(dva)	BF64_GET((dva)->dva_word[0], 24, 8)
334 #define	DVA_SET_GRID(dva, x)	BF64_SET((dva)->dva_word[0], 24, 8, x)
335 
336 #define	DVA_GET_VDEV(dva)	BF64_GET((dva)->dva_word[0], 32, 32)
337 #define	DVA_SET_VDEV(dva, x)	BF64_SET((dva)->dva_word[0], 32, 32, x)
338 
339 #define	DVA_GET_OFFSET(dva)	\
340 	BF64_GET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0)
341 #define	DVA_SET_OFFSET(dva, x)	\
342 	BF64_SET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0, x)
343 
344 #define	DVA_GET_GANG(dva)	BF64_GET((dva)->dva_word[1], 63, 1)
345 #define	DVA_SET_GANG(dva, x)	BF64_SET((dva)->dva_word[1], 63, 1, x)
346 
347 #define	BP_GET_LSIZE(bp)	\
348 	(BP_IS_EMBEDDED(bp) ?	\
349 	(BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA ? BPE_GET_LSIZE(bp) : 0): \
350 	BF64_GET_SB((bp)->blk_prop, 0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1))
351 #define	BP_SET_LSIZE(bp, x)	do { \
352 	ASSERT(!BP_IS_EMBEDDED(bp)); \
353 	BF64_SET_SB((bp)->blk_prop, \
354 	    0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1, x); \
355 _NOTE(CONSTCOND) } while (0)
356 
357 #define	BP_GET_PSIZE(bp)	\
358 	BF64_GET_SB((bp)->blk_prop, 16, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1)
359 #define	BP_SET_PSIZE(bp, x)	\
360 	BF64_SET_SB((bp)->blk_prop, 16, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1, x)
361 
362 #define	BP_GET_COMPRESS(bp)	BF64_GET((bp)->blk_prop, 32, 7)
363 #define	BP_SET_COMPRESS(bp, x)	BF64_SET((bp)->blk_prop, 32, 7, x)
364 
365 #define	BP_GET_CHECKSUM(bp)	BF64_GET((bp)->blk_prop, 40, 8)
366 #define	BP_SET_CHECKSUM(bp, x)	BF64_SET((bp)->blk_prop, 40, 8, x)
367 
368 #define	BP_GET_TYPE(bp)		BF64_GET((bp)->blk_prop, 48, 8)
369 #define	BP_SET_TYPE(bp, x)	BF64_SET((bp)->blk_prop, 48, 8, x)
370 
371 #define	BP_GET_LEVEL(bp)	BF64_GET((bp)->blk_prop, 56, 5)
372 #define	BP_SET_LEVEL(bp, x)	BF64_SET((bp)->blk_prop, 56, 5, x)
373 
374 #define	BP_IS_EMBEDDED(bp)	BF64_GET((bp)->blk_prop, 39, 1)
375 
376 #define	BP_GET_DEDUP(bp)	BF64_GET((bp)->blk_prop, 62, 1)
377 #define	BP_SET_DEDUP(bp, x)	BF64_SET((bp)->blk_prop, 62, 1, x)
378 
379 #define	BP_GET_BYTEORDER(bp)	BF64_GET((bp)->blk_prop, 63, 1)
380 #define	BP_SET_BYTEORDER(bp, x)	BF64_SET((bp)->blk_prop, 63, 1, x)
381 
382 #define	BP_PHYSICAL_BIRTH(bp)		\
383 	((bp)->blk_phys_birth ? (bp)->blk_phys_birth : (bp)->blk_birth)
384 
385 #define	BP_GET_ASIZE(bp)	\
386 	(DVA_GET_ASIZE(&(bp)->blk_dva[0]) + DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
387 		DVA_GET_ASIZE(&(bp)->blk_dva[2]))
388 
389 #define	BP_GET_UCSIZE(bp) \
390 	((BP_GET_LEVEL(bp) > 0 || dmu_ot[BP_GET_TYPE(bp)].ot_metadata) ? \
391 	BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp));
392 
393 #define	BP_GET_NDVAS(bp)	\
394 	(!!DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \
395 	!!DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
396 	!!DVA_GET_ASIZE(&(bp)->blk_dva[2]))
397 
398 #define	DVA_EQUAL(dva1, dva2)	\
399 	((dva1)->dva_word[1] == (dva2)->dva_word[1] && \
400 	(dva1)->dva_word[0] == (dva2)->dva_word[0])
401 
402 #define	ZIO_CHECKSUM_EQUAL(zc1, zc2) \
403 	(0 == (((zc1).zc_word[0] - (zc2).zc_word[0]) | \
404 	((zc1).zc_word[1] - (zc2).zc_word[1]) | \
405 	((zc1).zc_word[2] - (zc2).zc_word[2]) | \
406 	((zc1).zc_word[3] - (zc2).zc_word[3])))
407 
408 
409 #define	DVA_IS_VALID(dva)	(DVA_GET_ASIZE(dva) != 0)
410 
411 #define	ZIO_SET_CHECKSUM(zcp, w0, w1, w2, w3)	\
412 {						\
413 	(zcp)->zc_word[0] = w0;			\
414 	(zcp)->zc_word[1] = w1;			\
415 	(zcp)->zc_word[2] = w2;			\
416 	(zcp)->zc_word[3] = w3;			\
417 }
418 
419 #define	BP_IDENTITY(bp)		(&(bp)->blk_dva[0])
420 #define	BP_IS_GANG(bp)		DVA_GET_GANG(BP_IDENTITY(bp))
421 #define	DVA_IS_EMPTY(dva)	((dva)->dva_word[0] == 0ULL &&  \
422 	(dva)->dva_word[1] == 0ULL)
423 #define	BP_IS_HOLE(bp)		DVA_IS_EMPTY(BP_IDENTITY(bp))
424 #define	BP_IS_OLDER(bp, txg)	(!BP_IS_HOLE(bp) && (bp)->blk_birth < (txg))
425 
426 #define	BP_ZERO(bp)				\
427 {						\
428 	(bp)->blk_dva[0].dva_word[0] = 0;	\
429 	(bp)->blk_dva[0].dva_word[1] = 0;	\
430 	(bp)->blk_dva[1].dva_word[0] = 0;	\
431 	(bp)->blk_dva[1].dva_word[1] = 0;	\
432 	(bp)->blk_dva[2].dva_word[0] = 0;	\
433 	(bp)->blk_dva[2].dva_word[1] = 0;	\
434 	(bp)->blk_prop = 0;			\
435 	(bp)->blk_pad[0] = 0;			\
436 	(bp)->blk_pad[1] = 0;			\
437 	(bp)->blk_phys_birth = 0;		\
438 	(bp)->blk_birth = 0;			\
439 	(bp)->blk_fill = 0;			\
440 	ZIO_SET_CHECKSUM(&(bp)->blk_cksum, 0, 0, 0, 0);	\
441 }
442 
443 #define	BPE_NUM_WORDS 14
444 #define	BPE_PAYLOAD_SIZE (BPE_NUM_WORDS * sizeof (uint64_t))
445 #define	BPE_IS_PAYLOADWORD(bp, wp) \
446 	((wp) != &(bp)->blk_prop && (wp) != &(bp)->blk_birth)
447 
448 /*
449  * Embedded checksum
450  */
451 #define	ZEC_MAGIC	0x210da7ab10c7a11ULL
452 
453 typedef struct zio_eck {
454 	uint64_t	zec_magic;	/* for validation, endianness	*/
455 	zio_cksum_t	zec_cksum;	/* 256-bit checksum		*/
456 } zio_eck_t;
457 
458 /*
459  * Gang block headers are self-checksumming and contain an array
460  * of block pointers.
461  */
462 #define	SPA_GANGBLOCKSIZE	SPA_MINBLOCKSIZE
463 #define	SPA_GBH_NBLKPTRS	((SPA_GANGBLOCKSIZE - \
464 	sizeof (zio_eck_t)) / sizeof (blkptr_t))
465 #define	SPA_GBH_FILLER		((SPA_GANGBLOCKSIZE - \
466 	sizeof (zio_eck_t) - \
467 	(SPA_GBH_NBLKPTRS * sizeof (blkptr_t))) /\
468 	sizeof (uint64_t))
469 
470 typedef struct zio_gbh {
471 	blkptr_t		zg_blkptr[SPA_GBH_NBLKPTRS];
472 	uint64_t		zg_filler[SPA_GBH_FILLER];
473 	zio_eck_t		zg_tail;
474 } zio_gbh_phys_t;
475 
476 #define	VDEV_RAIDZ_MAXPARITY	3
477 
478 #define	VDEV_PAD_SIZE		(8 << 10)
479 /* 2 padding areas (vl_pad1 and vl_pad2) to skip */
480 #define	VDEV_SKIP_SIZE		VDEV_PAD_SIZE * 2
481 #define	VDEV_PHYS_SIZE		(112 << 10)
482 #define	VDEV_UBERBLOCK_RING	(128 << 10)
483 
484 #define	VDEV_UBERBLOCK_SHIFT(vd)	\
485 	MAX((vd)->v_top->v_ashift, UBERBLOCK_SHIFT)
486 #define	VDEV_UBERBLOCK_COUNT(vd)	\
487 	(VDEV_UBERBLOCK_RING >> VDEV_UBERBLOCK_SHIFT(vd))
488 #define	VDEV_UBERBLOCK_OFFSET(vd, n)	\
489 	offsetof(vdev_label_t, vl_uberblock[(n) << VDEV_UBERBLOCK_SHIFT(vd)])
490 #define	VDEV_UBERBLOCK_SIZE(vd)		(1ULL << VDEV_UBERBLOCK_SHIFT(vd))
491 
492 typedef struct vdev_phys {
493 	char		vp_nvlist[VDEV_PHYS_SIZE - sizeof (zio_eck_t)];
494 	zio_eck_t	vp_zbt;
495 } vdev_phys_t;
496 
497 typedef struct vdev_label {
498 	char		vl_pad1[VDEV_PAD_SIZE];			/*  8K  */
499 	char		vl_pad2[VDEV_PAD_SIZE];			/*  8K  */
500 	vdev_phys_t	vl_vdev_phys;				/* 112K	*/
501 	char		vl_uberblock[VDEV_UBERBLOCK_RING];	/* 128K	*/
502 } vdev_label_t;							/* 256K total */
503 
504 /*
505  * vdev_dirty() flags
506  */
507 #define	VDD_METASLAB	0x01
508 #define	VDD_DTL		0x02
509 
510 /*
511  * Size and offset of embedded boot loader region on each label.
512  * The total size of the first two labels plus the boot area is 4MB.
513  */
514 #define	VDEV_BOOT_OFFSET	(2 * sizeof (vdev_label_t))
515 #define	VDEV_BOOT_SIZE		(7ULL << 19)			/* 3.5M	*/
516 
517 /*
518  * Size of label regions at the start and end of each leaf device.
519  */
520 #define	VDEV_LABEL_START_SIZE	(2 * sizeof (vdev_label_t) + VDEV_BOOT_SIZE)
521 #define	VDEV_LABEL_END_SIZE	(2 * sizeof (vdev_label_t))
522 #define	VDEV_LABELS		4
523 
524 enum zio_checksum {
525 	ZIO_CHECKSUM_INHERIT = 0,
526 	ZIO_CHECKSUM_ON,
527 	ZIO_CHECKSUM_OFF,
528 	ZIO_CHECKSUM_LABEL,
529 	ZIO_CHECKSUM_GANG_HEADER,
530 	ZIO_CHECKSUM_ZILOG,
531 	ZIO_CHECKSUM_FLETCHER_2,
532 	ZIO_CHECKSUM_FLETCHER_4,
533 	ZIO_CHECKSUM_SHA256,
534 	ZIO_CHECKSUM_ZILOG2,
535 	ZIO_CHECKSUM_NOPARITY,
536 	ZIO_CHECKSUM_SHA512,
537 	ZIO_CHECKSUM_SKEIN,
538 	ZIO_CHECKSUM_EDONR,
539 	ZIO_CHECKSUM_FUNCTIONS
540 };
541 
542 #define	ZIO_CHECKSUM_ON_VALUE	ZIO_CHECKSUM_FLETCHER_4
543 #define	ZIO_CHECKSUM_DEFAULT	ZIO_CHECKSUM_ON
544 
545 enum zio_compress {
546 	ZIO_COMPRESS_INHERIT = 0,
547 	ZIO_COMPRESS_ON,
548 	ZIO_COMPRESS_OFF,
549 	ZIO_COMPRESS_LZJB,
550 	ZIO_COMPRESS_EMPTY,
551 	ZIO_COMPRESS_GZIP_1,
552 	ZIO_COMPRESS_GZIP_2,
553 	ZIO_COMPRESS_GZIP_3,
554 	ZIO_COMPRESS_GZIP_4,
555 	ZIO_COMPRESS_GZIP_5,
556 	ZIO_COMPRESS_GZIP_6,
557 	ZIO_COMPRESS_GZIP_7,
558 	ZIO_COMPRESS_GZIP_8,
559 	ZIO_COMPRESS_GZIP_9,
560 	ZIO_COMPRESS_ZLE,
561 	ZIO_COMPRESS_LZ4,
562 	ZIO_COMPRESS_FUNCTIONS
563 };
564 
565 #define	ZIO_COMPRESS_ON_VALUE	ZIO_COMPRESS_LZJB
566 #define	ZIO_COMPRESS_DEFAULT	ZIO_COMPRESS_OFF
567 
568 /* nvlist pack encoding */
569 #define	NV_ENCODE_NATIVE	0
570 #define	NV_ENCODE_XDR		1
571 
572 typedef enum {
573 	DATA_TYPE_UNKNOWN = 0,
574 	DATA_TYPE_BOOLEAN,
575 	DATA_TYPE_BYTE,
576 	DATA_TYPE_INT16,
577 	DATA_TYPE_UINT16,
578 	DATA_TYPE_INT32,
579 	DATA_TYPE_UINT32,
580 	DATA_TYPE_INT64,
581 	DATA_TYPE_UINT64,
582 	DATA_TYPE_STRING,
583 	DATA_TYPE_BYTE_ARRAY,
584 	DATA_TYPE_INT16_ARRAY,
585 	DATA_TYPE_UINT16_ARRAY,
586 	DATA_TYPE_INT32_ARRAY,
587 	DATA_TYPE_UINT32_ARRAY,
588 	DATA_TYPE_INT64_ARRAY,
589 	DATA_TYPE_UINT64_ARRAY,
590 	DATA_TYPE_STRING_ARRAY,
591 	DATA_TYPE_HRTIME,
592 	DATA_TYPE_NVLIST,
593 	DATA_TYPE_NVLIST_ARRAY,
594 	DATA_TYPE_BOOLEAN_VALUE,
595 	DATA_TYPE_INT8,
596 	DATA_TYPE_UINT8,
597 	DATA_TYPE_BOOLEAN_ARRAY,
598 	DATA_TYPE_INT8_ARRAY,
599 	DATA_TYPE_UINT8_ARRAY
600 } data_type_t;
601 
602 /*
603  * On-disk version number.
604  */
605 #define	SPA_VERSION_1			1ULL
606 #define	SPA_VERSION_2			2ULL
607 #define	SPA_VERSION_3			3ULL
608 #define	SPA_VERSION_4			4ULL
609 #define	SPA_VERSION_5			5ULL
610 #define	SPA_VERSION_6			6ULL
611 #define	SPA_VERSION_7			7ULL
612 #define	SPA_VERSION_8			8ULL
613 #define	SPA_VERSION_9			9ULL
614 #define	SPA_VERSION_10			10ULL
615 #define	SPA_VERSION_11			11ULL
616 #define	SPA_VERSION_12			12ULL
617 #define	SPA_VERSION_13			13ULL
618 #define	SPA_VERSION_14			14ULL
619 #define	SPA_VERSION_15			15ULL
620 #define	SPA_VERSION_16			16ULL
621 #define	SPA_VERSION_17			17ULL
622 #define	SPA_VERSION_18			18ULL
623 #define	SPA_VERSION_19			19ULL
624 #define	SPA_VERSION_20			20ULL
625 #define	SPA_VERSION_21			21ULL
626 #define	SPA_VERSION_22			22ULL
627 #define	SPA_VERSION_23			23ULL
628 #define	SPA_VERSION_24			24ULL
629 #define	SPA_VERSION_25			25ULL
630 #define	SPA_VERSION_26			26ULL
631 #define	SPA_VERSION_27			27ULL
632 #define	SPA_VERSION_28			28ULL
633 #define	SPA_VERSION_5000		5000ULL
634 
635 /*
636  * When bumping up SPA_VERSION, make sure GRUB ZFS understands the on-disk
637  * format change. Go to usr/src/grub/grub-0.97/stage2/{zfs-include/, fsys_zfs*},
638  * and do the appropriate changes.  Also bump the version number in
639  * usr/src/grub/capability.
640  */
641 #define	SPA_VERSION			SPA_VERSION_5000
642 #define	SPA_VERSION_STRING		"5000"
643 
644 /*
645  * Symbolic names for the changes that caused a SPA_VERSION switch.
646  * Used in the code when checking for presence or absence of a feature.
647  * Feel free to define multiple symbolic names for each version if there
648  * were multiple changes to on-disk structures during that version.
649  *
650  * NOTE: When checking the current SPA_VERSION in your code, be sure
651  *       to use spa_version() since it reports the version of the
652  *       last synced uberblock.  Checking the in-flight version can
653  *       be dangerous in some cases.
654  */
655 #define	SPA_VERSION_INITIAL		SPA_VERSION_1
656 #define	SPA_VERSION_DITTO_BLOCKS	SPA_VERSION_2
657 #define	SPA_VERSION_SPARES		SPA_VERSION_3
658 #define	SPA_VERSION_RAID6		SPA_VERSION_3
659 #define	SPA_VERSION_BPLIST_ACCOUNT	SPA_VERSION_3
660 #define	SPA_VERSION_RAIDZ_DEFLATE	SPA_VERSION_3
661 #define	SPA_VERSION_DNODE_BYTES		SPA_VERSION_3
662 #define	SPA_VERSION_ZPOOL_HISTORY	SPA_VERSION_4
663 #define	SPA_VERSION_GZIP_COMPRESSION	SPA_VERSION_5
664 #define	SPA_VERSION_BOOTFS		SPA_VERSION_6
665 #define	SPA_VERSION_SLOGS		SPA_VERSION_7
666 #define	SPA_VERSION_DELEGATED_PERMS	SPA_VERSION_8
667 #define	SPA_VERSION_FUID		SPA_VERSION_9
668 #define	SPA_VERSION_REFRESERVATION	SPA_VERSION_9
669 #define	SPA_VERSION_REFQUOTA		SPA_VERSION_9
670 #define	SPA_VERSION_UNIQUE_ACCURATE	SPA_VERSION_9
671 #define	SPA_VERSION_L2CACHE		SPA_VERSION_10
672 #define	SPA_VERSION_NEXT_CLONES		SPA_VERSION_11
673 #define	SPA_VERSION_ORIGIN		SPA_VERSION_11
674 #define	SPA_VERSION_DSL_SCRUB		SPA_VERSION_11
675 #define	SPA_VERSION_SNAP_PROPS		SPA_VERSION_12
676 #define	SPA_VERSION_USED_BREAKDOWN	SPA_VERSION_13
677 #define	SPA_VERSION_PASSTHROUGH_X	SPA_VERSION_14
678 #define SPA_VERSION_USERSPACE		SPA_VERSION_15
679 #define	SPA_VERSION_STMF_PROP		SPA_VERSION_16
680 #define	SPA_VERSION_RAIDZ3		SPA_VERSION_17
681 #define	SPA_VERSION_USERREFS		SPA_VERSION_18
682 #define	SPA_VERSION_HOLES		SPA_VERSION_19
683 #define	SPA_VERSION_ZLE_COMPRESSION	SPA_VERSION_20
684 #define	SPA_VERSION_DEDUP		SPA_VERSION_21
685 #define	SPA_VERSION_RECVD_PROPS		SPA_VERSION_22
686 #define	SPA_VERSION_SLIM_ZIL		SPA_VERSION_23
687 #define	SPA_VERSION_SA			SPA_VERSION_24
688 #define	SPA_VERSION_SCAN		SPA_VERSION_25
689 #define	SPA_VERSION_DIR_CLONES		SPA_VERSION_26
690 #define	SPA_VERSION_DEADLISTS		SPA_VERSION_26
691 #define	SPA_VERSION_FAST_SNAP		SPA_VERSION_27
692 #define	SPA_VERSION_MULTI_REPLACE	SPA_VERSION_28
693 #define	SPA_VERSION_BEFORE_FEATURES	SPA_VERSION_28
694 #define	SPA_VERSION_FEATURES		SPA_VERSION_5000
695 
696 #define	SPA_VERSION_IS_SUPPORTED(v) \
697 	(((v) >= SPA_VERSION_INITIAL && (v) <= SPA_VERSION_BEFORE_FEATURES) || \
698 	((v) >= SPA_VERSION_FEATURES && (v) <= SPA_VERSION))
699 
700 /*
701  * The following are configuration names used in the nvlist describing a pool's
702  * configuration.
703  */
704 #define	ZPOOL_CONFIG_VERSION		"version"
705 #define	ZPOOL_CONFIG_POOL_NAME		"name"
706 #define	ZPOOL_CONFIG_POOL_STATE		"state"
707 #define	ZPOOL_CONFIG_POOL_TXG		"txg"
708 #define	ZPOOL_CONFIG_POOL_GUID		"pool_guid"
709 #define	ZPOOL_CONFIG_CREATE_TXG		"create_txg"
710 #define	ZPOOL_CONFIG_TOP_GUID		"top_guid"
711 #define	ZPOOL_CONFIG_VDEV_TREE		"vdev_tree"
712 #define	ZPOOL_CONFIG_TYPE		"type"
713 #define	ZPOOL_CONFIG_CHILDREN		"children"
714 #define	ZPOOL_CONFIG_ID			"id"
715 #define	ZPOOL_CONFIG_GUID		"guid"
716 #define	ZPOOL_CONFIG_PATH		"path"
717 #define	ZPOOL_CONFIG_DEVID		"devid"
718 #define	ZPOOL_CONFIG_PHYS_PATH		"phys_path"
719 #define	ZPOOL_CONFIG_METASLAB_ARRAY	"metaslab_array"
720 #define	ZPOOL_CONFIG_METASLAB_SHIFT	"metaslab_shift"
721 #define	ZPOOL_CONFIG_ASHIFT		"ashift"
722 #define	ZPOOL_CONFIG_ASIZE		"asize"
723 #define	ZPOOL_CONFIG_DTL		"DTL"
724 #define	ZPOOL_CONFIG_STATS		"stats"
725 #define	ZPOOL_CONFIG_WHOLE_DISK		"whole_disk"
726 #define	ZPOOL_CONFIG_ERRCOUNT		"error_count"
727 #define	ZPOOL_CONFIG_NOT_PRESENT	"not_present"
728 #define	ZPOOL_CONFIG_SPARES		"spares"
729 #define	ZPOOL_CONFIG_IS_SPARE		"is_spare"
730 #define	ZPOOL_CONFIG_NPARITY		"nparity"
731 #define	ZPOOL_CONFIG_HOSTID		"hostid"
732 #define	ZPOOL_CONFIG_HOSTNAME		"hostname"
733 #define	ZPOOL_CONFIG_IS_LOG		"is_log"
734 #define	ZPOOL_CONFIG_TIMESTAMP		"timestamp" /* not stored on disk */
735 #define	ZPOOL_CONFIG_FEATURES_FOR_READ	"features_for_read"
736 
737 /*
738  * The persistent vdev state is stored as separate values rather than a single
739  * 'vdev_state' entry.  This is because a device can be in multiple states, such
740  * as offline and degraded.
741  */
742 #define	ZPOOL_CONFIG_OFFLINE            "offline"
743 #define	ZPOOL_CONFIG_FAULTED            "faulted"
744 #define	ZPOOL_CONFIG_DEGRADED           "degraded"
745 #define	ZPOOL_CONFIG_REMOVED            "removed"
746 #define	ZPOOL_CONFIG_FRU		"fru"
747 #define	ZPOOL_CONFIG_AUX_STATE		"aux_state"
748 
749 #define	VDEV_TYPE_ROOT			"root"
750 #define	VDEV_TYPE_MIRROR		"mirror"
751 #define	VDEV_TYPE_REPLACING		"replacing"
752 #define	VDEV_TYPE_RAIDZ			"raidz"
753 #define	VDEV_TYPE_DISK			"disk"
754 #define	VDEV_TYPE_FILE			"file"
755 #define	VDEV_TYPE_MISSING		"missing"
756 #define	VDEV_TYPE_HOLE			"hole"
757 #define	VDEV_TYPE_SPARE			"spare"
758 #define	VDEV_TYPE_LOG			"log"
759 #define	VDEV_TYPE_L2CACHE		"l2cache"
760 
761 /*
762  * This is needed in userland to report the minimum necessary device size.
763  */
764 #define	SPA_MINDEVSIZE		(64ULL << 20)
765 
766 /*
767  * The location of the pool configuration repository, shared between kernel and
768  * userland.
769  */
770 #define	ZPOOL_CACHE		"/boot/zfs/zpool.cache"
771 
772 /*
773  * vdev states are ordered from least to most healthy.
774  * A vdev that's CANT_OPEN or below is considered unusable.
775  */
776 typedef enum vdev_state {
777 	VDEV_STATE_UNKNOWN = 0,	/* Uninitialized vdev			*/
778 	VDEV_STATE_CLOSED,	/* Not currently open			*/
779 	VDEV_STATE_OFFLINE,	/* Not allowed to open			*/
780 	VDEV_STATE_REMOVED,	/* Explicitly removed from system	*/
781 	VDEV_STATE_CANT_OPEN,	/* Tried to open, but failed		*/
782 	VDEV_STATE_FAULTED,	/* External request to fault device	*/
783 	VDEV_STATE_DEGRADED,	/* Replicated vdev with unhealthy kids	*/
784 	VDEV_STATE_HEALTHY	/* Presumed good			*/
785 } vdev_state_t;
786 
787 /*
788  * vdev aux states.  When a vdev is in the CANT_OPEN state, the aux field
789  * of the vdev stats structure uses these constants to distinguish why.
790  */
791 typedef enum vdev_aux {
792 	VDEV_AUX_NONE,		/* no error				*/
793 	VDEV_AUX_OPEN_FAILED,	/* ldi_open_*() or vn_open() failed	*/
794 	VDEV_AUX_CORRUPT_DATA,	/* bad label or disk contents		*/
795 	VDEV_AUX_NO_REPLICAS,	/* insufficient number of replicas	*/
796 	VDEV_AUX_BAD_GUID_SUM,	/* vdev guid sum doesn't match		*/
797 	VDEV_AUX_TOO_SMALL,	/* vdev size is too small		*/
798 	VDEV_AUX_BAD_LABEL,	/* the label is OK but invalid		*/
799 	VDEV_AUX_VERSION_NEWER,	/* on-disk version is too new		*/
800 	VDEV_AUX_VERSION_OLDER,	/* on-disk version is too old		*/
801 	VDEV_AUX_SPARED		/* hot spare used in another pool	*/
802 } vdev_aux_t;
803 
804 /*
805  * pool state.  The following states are written to disk as part of the normal
806  * SPA lifecycle: ACTIVE, EXPORTED, DESTROYED, SPARE.  The remaining states are
807  * software abstractions used at various levels to communicate pool state.
808  */
809 typedef enum pool_state {
810 	POOL_STATE_ACTIVE = 0,		/* In active use		*/
811 	POOL_STATE_EXPORTED,		/* Explicitly exported		*/
812 	POOL_STATE_DESTROYED,		/* Explicitly destroyed		*/
813 	POOL_STATE_SPARE,		/* Reserved for hot spare use	*/
814 	POOL_STATE_UNINITIALIZED,	/* Internal spa_t state		*/
815 	POOL_STATE_UNAVAIL,		/* Internal libzfs state	*/
816 	POOL_STATE_POTENTIALLY_ACTIVE	/* Internal libzfs state	*/
817 } pool_state_t;
818 
819 /*
820  * The uberblock version is incremented whenever an incompatible on-disk
821  * format change is made to the SPA, DMU, or ZAP.
822  *
823  * Note: the first two fields should never be moved.  When a storage pool
824  * is opened, the uberblock must be read off the disk before the version
825  * can be checked.  If the ub_version field is moved, we may not detect
826  * version mismatch.  If the ub_magic field is moved, applications that
827  * expect the magic number in the first word won't work.
828  */
829 #define	UBERBLOCK_MAGIC		0x00bab10c		/* oo-ba-bloc!	*/
830 #define	UBERBLOCK_SHIFT		10			/* up to 1K	*/
831 
832 struct uberblock {
833 	uint64_t	ub_magic;	/* UBERBLOCK_MAGIC		*/
834 	uint64_t	ub_version;	/* SPA_VERSION			*/
835 	uint64_t	ub_txg;		/* txg of last sync		*/
836 	uint64_t	ub_guid_sum;	/* sum of all vdev guids	*/
837 	uint64_t	ub_timestamp;	/* UTC time of last sync	*/
838 	blkptr_t	ub_rootbp;	/* MOS objset_phys_t		*/
839 };
840 
841 /*
842  * Flags.
843  */
844 #define	DNODE_MUST_BE_ALLOCATED	1
845 #define	DNODE_MUST_BE_FREE	2
846 
847 /*
848  * Fixed constants.
849  */
850 #define	DNODE_SHIFT		9	/* 512 bytes */
851 #define	DN_MIN_INDBLKSHIFT	12	/* 4k */
852 #define	DN_MAX_INDBLKSHIFT	14	/* 16k */
853 #define	DNODE_BLOCK_SHIFT	14	/* 16k */
854 #define	DNODE_CORE_SIZE		64	/* 64 bytes for dnode sans blkptrs */
855 #define	DN_MAX_OBJECT_SHIFT	48	/* 256 trillion (zfs_fid_t limit) */
856 #define	DN_MAX_OFFSET_SHIFT	64	/* 2^64 bytes in a dnode */
857 
858 /*
859  * Derived constants.
860  */
861 #define	DNODE_MIN_SIZE		(1 << DNODE_SHIFT)
862 #define	DNODE_MAX_SIZE		(1 << DNODE_BLOCK_SHIFT)
863 #define	DNODE_BLOCK_SIZE	(1 << DNODE_BLOCK_SHIFT)
864 #define	DNODE_MIN_SLOTS		(DNODE_MIN_SIZE >> DNODE_SHIFT)
865 #define	DNODE_MAX_SLOTS		(DNODE_MAX_SIZE >> DNODE_SHIFT)
866 #define	DN_BONUS_SIZE(dnsize)	((dnsize) - DNODE_CORE_SIZE - \
867 	(1 << SPA_BLKPTRSHIFT))
868 #define	DN_SLOTS_TO_BONUSLEN(slots)	DN_BONUS_SIZE((slots) << DNODE_SHIFT)
869 #define	DN_OLD_MAX_BONUSLEN		(DN_BONUS_SIZE(DNODE_MIN_SIZE))
870 #define	DN_MAX_NBLKPTR		((DNODE_MIN_SIZE - DNODE_CORE_SIZE) >> \
871 	SPA_BLKPTRSHIFT)
872 #define	DN_MAX_OBJECT		(1ULL << DN_MAX_OBJECT_SHIFT)
873 #define	DN_ZERO_BONUSLEN	(DN_BONUS_SIZE(DNODE_MAX_SIZE) + 1)
874 
875 #define	DNODES_PER_BLOCK_SHIFT	(DNODE_BLOCK_SHIFT - DNODE_SHIFT)
876 #define	DNODES_PER_BLOCK	(1ULL << DNODES_PER_BLOCK_SHIFT)
877 #define	DNODES_PER_LEVEL_SHIFT	(DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT)
878 
879 /* The +2 here is a cheesy way to round up */
880 #define	DN_MAX_LEVELS	(2 + ((DN_MAX_OFFSET_SHIFT - SPA_MINBLOCKSHIFT) / \
881 	(DN_MIN_INDBLKSHIFT - SPA_BLKPTRSHIFT)))
882 
883 #define	DN_BONUS(dnp)	((void*)((dnp)->dn_bonus + \
884 	(((dnp)->dn_nblkptr - 1) * sizeof (blkptr_t))))
885 
886 #define	DN_USED_BYTES(dnp) (((dnp)->dn_flags & DNODE_FLAG_USED_BYTES) ? \
887 	(dnp)->dn_used : (dnp)->dn_used << SPA_MINBLOCKSHIFT)
888 
889 #define	EPB(blkshift, typeshift)	(1 << (blkshift - typeshift))
890 
891 /* Is dn_used in bytes?  if not, it's in multiples of SPA_MINBLOCKSIZE */
892 #define	DNODE_FLAG_USED_BYTES		(1<<0)
893 #define	DNODE_FLAG_USERUSED_ACCOUNTED	(1<<1)
894 
895 /* Does dnode have a SA spill blkptr in bonus? */
896 #define	DNODE_FLAG_SPILL_BLKPTR	(1<<2)
897 
898 typedef struct dnode_phys {
899 	uint8_t dn_type;		/* dmu_object_type_t */
900 	uint8_t dn_indblkshift;		/* ln2(indirect block size) */
901 	uint8_t dn_nlevels;		/* 1=dn_blkptr->data blocks */
902 	uint8_t dn_nblkptr;		/* length of dn_blkptr */
903 	uint8_t dn_bonustype;		/* type of data in bonus buffer */
904 	uint8_t	dn_checksum;		/* ZIO_CHECKSUM type */
905 	uint8_t	dn_compress;		/* ZIO_COMPRESS type */
906 	uint8_t dn_flags;		/* DNODE_FLAG_* */
907 	uint16_t dn_datablkszsec;	/* data block size in 512b sectors */
908 	uint16_t dn_bonuslen;		/* length of dn_bonus */
909 	uint8_t dn_extra_slots;		/* # of subsequent slots consumed */
910 	uint8_t dn_pad2[3];
911 
912 	/* accounting is protected by dn_dirty_mtx */
913 	uint64_t dn_maxblkid;		/* largest allocated block ID */
914 	uint64_t dn_used;		/* bytes (or sectors) of disk space */
915 
916 	uint64_t dn_pad3[4];
917 
918 	/*
919 	 * The tail region is 448 bytes for a 512 byte dnode, and
920 	 * correspondingly larger for larger dnode sizes. The spill
921 	 * block pointer, when present, is always at the end of the tail
922 	 * region. There are three ways this space may be used, using
923 	 * a 512 byte dnode for this diagram:
924 	 *
925 	 * 0       64      128     192     256     320     384     448 (offset)
926 	 * +---------------+---------------+---------------+-------+
927 	 * | dn_blkptr[0]  | dn_blkptr[1]  | dn_blkptr[2]  | /     |
928 	 * +---------------+---------------+---------------+-------+
929 	 * | dn_blkptr[0]  | dn_bonus[0..319]                      |
930 	 * +---------------+-----------------------+---------------+
931 	 * | dn_blkptr[0]  | dn_bonus[0..191]      | dn_spill      |
932 	 * +---------------+-----------------------+---------------+
933 	 */
934 	union {
935 		blkptr_t dn_blkptr[1+DN_OLD_MAX_BONUSLEN/sizeof (blkptr_t)];
936 		struct {
937 			blkptr_t __dn_ignore1;
938 			uint8_t dn_bonus[DN_OLD_MAX_BONUSLEN];
939 		};
940 		struct {
941 			blkptr_t __dn_ignore2;
942 			uint8_t __dn_ignore3[DN_OLD_MAX_BONUSLEN -
943 			    sizeof (blkptr_t)];
944 			blkptr_t dn_spill;
945 		};
946 	};
947 } dnode_phys_t;
948 
949 #define	DN_SPILL_BLKPTR(dnp)	(blkptr_t *)((char *)(dnp) + \
950 	(((dnp)->dn_extra_slots + 1) << DNODE_SHIFT) - (1 << SPA_BLKPTRSHIFT))
951 
952 typedef enum dmu_object_byteswap {
953 	DMU_BSWAP_UINT8,
954 	DMU_BSWAP_UINT16,
955 	DMU_BSWAP_UINT32,
956 	DMU_BSWAP_UINT64,
957 	DMU_BSWAP_ZAP,
958 	DMU_BSWAP_DNODE,
959 	DMU_BSWAP_OBJSET,
960 	DMU_BSWAP_ZNODE,
961 	DMU_BSWAP_OLDACL,
962 	DMU_BSWAP_ACL,
963 	/*
964 	 * Allocating a new byteswap type number makes the on-disk format
965 	 * incompatible with any other format that uses the same number.
966 	 *
967 	 * Data can usually be structured to work with one of the
968 	 * DMU_BSWAP_UINT* or DMU_BSWAP_ZAP types.
969 	 */
970 	DMU_BSWAP_NUMFUNCS
971 } dmu_object_byteswap_t;
972 
973 #define	DMU_OT_NEWTYPE 0x80
974 #define	DMU_OT_METADATA 0x40
975 #define	DMU_OT_BYTESWAP_MASK 0x3f
976 
977 /*
978  * Defines a uint8_t object type. Object types specify if the data
979  * in the object is metadata (boolean) and how to byteswap the data
980  * (dmu_object_byteswap_t).
981  */
982 #define	DMU_OT(byteswap, metadata) \
983 	(DMU_OT_NEWTYPE | \
984 	((metadata) ? DMU_OT_METADATA : 0) | \
985 	((byteswap) & DMU_OT_BYTESWAP_MASK))
986 
987 typedef enum dmu_object_type {
988 	DMU_OT_NONE,
989 	/* general: */
990 	DMU_OT_OBJECT_DIRECTORY,	/* ZAP */
991 	DMU_OT_OBJECT_ARRAY,		/* UINT64 */
992 	DMU_OT_PACKED_NVLIST,		/* UINT8 (XDR by nvlist_pack/unpack) */
993 	DMU_OT_PACKED_NVLIST_SIZE,	/* UINT64 */
994 	DMU_OT_BPLIST,			/* UINT64 */
995 	DMU_OT_BPLIST_HDR,		/* UINT64 */
996 	/* spa: */
997 	DMU_OT_SPACE_MAP_HEADER,	/* UINT64 */
998 	DMU_OT_SPACE_MAP,		/* UINT64 */
999 	/* zil: */
1000 	DMU_OT_INTENT_LOG,		/* UINT64 */
1001 	/* dmu: */
1002 	DMU_OT_DNODE,			/* DNODE */
1003 	DMU_OT_OBJSET,			/* OBJSET */
1004 	/* dsl: */
1005 	DMU_OT_DSL_DIR,			/* UINT64 */
1006 	DMU_OT_DSL_DIR_CHILD_MAP,	/* ZAP */
1007 	DMU_OT_DSL_DS_SNAP_MAP,		/* ZAP */
1008 	DMU_OT_DSL_PROPS,		/* ZAP */
1009 	DMU_OT_DSL_DATASET,		/* UINT64 */
1010 	/* zpl: */
1011 	DMU_OT_ZNODE,			/* ZNODE */
1012 	DMU_OT_OLDACL,			/* Old ACL */
1013 	DMU_OT_PLAIN_FILE_CONTENTS,	/* UINT8 */
1014 	DMU_OT_DIRECTORY_CONTENTS,	/* ZAP */
1015 	DMU_OT_MASTER_NODE,		/* ZAP */
1016 	DMU_OT_UNLINKED_SET,		/* ZAP */
1017 	/* zvol: */
1018 	DMU_OT_ZVOL,			/* UINT8 */
1019 	DMU_OT_ZVOL_PROP,		/* ZAP */
1020 	/* other; for testing only! */
1021 	DMU_OT_PLAIN_OTHER,		/* UINT8 */
1022 	DMU_OT_UINT64_OTHER,		/* UINT64 */
1023 	DMU_OT_ZAP_OTHER,		/* ZAP */
1024 	/* new object types: */
1025 	DMU_OT_ERROR_LOG,		/* ZAP */
1026 	DMU_OT_SPA_HISTORY,		/* UINT8 */
1027 	DMU_OT_SPA_HISTORY_OFFSETS,	/* spa_his_phys_t */
1028 	DMU_OT_POOL_PROPS,		/* ZAP */
1029 	DMU_OT_DSL_PERMS,		/* ZAP */
1030 	DMU_OT_ACL,			/* ACL */
1031 	DMU_OT_SYSACL,			/* SYSACL */
1032 	DMU_OT_FUID,			/* FUID table (Packed NVLIST UINT8) */
1033 	DMU_OT_FUID_SIZE,		/* FUID table size UINT64 */
1034 	DMU_OT_NEXT_CLONES,		/* ZAP */
1035 	DMU_OT_SCAN_QUEUE,		/* ZAP */
1036 	DMU_OT_USERGROUP_USED,		/* ZAP */
1037 	DMU_OT_USERGROUP_QUOTA,		/* ZAP */
1038 	DMU_OT_USERREFS,		/* ZAP */
1039 	DMU_OT_DDT_ZAP,			/* ZAP */
1040 	DMU_OT_DDT_STATS,		/* ZAP */
1041 	DMU_OT_SA,			/* System attr */
1042 	DMU_OT_SA_MASTER_NODE,		/* ZAP */
1043 	DMU_OT_SA_ATTR_REGISTRATION,	/* ZAP */
1044 	DMU_OT_SA_ATTR_LAYOUTS,		/* ZAP */
1045 	DMU_OT_SCAN_XLATE,		/* ZAP */
1046 	DMU_OT_DEDUP,			/* fake dedup BP from ddt_bp_create() */
1047 	DMU_OT_NUMTYPES,
1048 
1049 	/*
1050 	 * Names for valid types declared with DMU_OT().
1051 	 */
1052 	DMU_OTN_UINT8_DATA = DMU_OT(DMU_BSWAP_UINT8, B_FALSE),
1053 	DMU_OTN_UINT8_METADATA = DMU_OT(DMU_BSWAP_UINT8, B_TRUE),
1054 	DMU_OTN_UINT16_DATA = DMU_OT(DMU_BSWAP_UINT16, B_FALSE),
1055 	DMU_OTN_UINT16_METADATA = DMU_OT(DMU_BSWAP_UINT16, B_TRUE),
1056 	DMU_OTN_UINT32_DATA = DMU_OT(DMU_BSWAP_UINT32, B_FALSE),
1057 	DMU_OTN_UINT32_METADATA = DMU_OT(DMU_BSWAP_UINT32, B_TRUE),
1058 	DMU_OTN_UINT64_DATA = DMU_OT(DMU_BSWAP_UINT64, B_FALSE),
1059 	DMU_OTN_UINT64_METADATA = DMU_OT(DMU_BSWAP_UINT64, B_TRUE),
1060 	DMU_OTN_ZAP_DATA = DMU_OT(DMU_BSWAP_ZAP, B_FALSE),
1061 	DMU_OTN_ZAP_METADATA = DMU_OT(DMU_BSWAP_ZAP, B_TRUE)
1062 } dmu_object_type_t;
1063 
1064 typedef enum dmu_objset_type {
1065 	DMU_OST_NONE,
1066 	DMU_OST_META,
1067 	DMU_OST_ZFS,
1068 	DMU_OST_ZVOL,
1069 	DMU_OST_OTHER,			/* For testing only! */
1070 	DMU_OST_ANY,			/* Be careful! */
1071 	DMU_OST_NUMTYPES
1072 } dmu_objset_type_t;
1073 
1074 #define	ZAP_MAXVALUELEN	(1024 * 8)
1075 
1076 /*
1077  * header for all bonus and spill buffers.
1078  * The header has a fixed portion with a variable number
1079  * of "lengths" depending on the number of variable sized
1080  * attribues which are determined by the "layout number"
1081  */
1082 
1083 #define	SA_MAGIC	0x2F505A  /* ZFS SA */
1084 typedef struct sa_hdr_phys {
1085 	uint32_t sa_magic;
1086 	uint16_t sa_layout_info;  /* Encoded with hdrsize and layout number */
1087 	uint16_t sa_lengths[1];	/* optional sizes for variable length attrs */
1088 	/* ... Data follows the lengths.  */
1089 } sa_hdr_phys_t;
1090 
1091 /*
1092  * sa_hdr_phys -> sa_layout_info
1093  *
1094  * 16      10       0
1095  * +--------+-------+
1096  * | hdrsz  |layout |
1097  * +--------+-------+
1098  *
1099  * Bits 0-10 are the layout number
1100  * Bits 11-16 are the size of the header.
1101  * The hdrsize is the number * 8
1102  *
1103  * For example.
1104  * hdrsz of 1 ==> 8 byte header
1105  *          2 ==> 16 byte header
1106  *
1107  */
1108 
1109 #define	SA_HDR_LAYOUT_NUM(hdr) BF32_GET(hdr->sa_layout_info, 0, 10)
1110 #define	SA_HDR_SIZE(hdr) BF32_GET_SB(hdr->sa_layout_info, 10, 16, 3, 0)
1111 #define	SA_HDR_LAYOUT_INFO_ENCODE(x, num, size) \
1112 { \
1113 	BF32_SET_SB(x, 10, 6, 3, 0, size); \
1114 	BF32_SET(x, 0, 10, num); \
1115 }
1116 
1117 #define	SA_MODE_OFFSET		0
1118 #define	SA_SIZE_OFFSET		8
1119 #define	SA_GEN_OFFSET		16
1120 #define	SA_UID_OFFSET		24
1121 #define	SA_GID_OFFSET		32
1122 #define	SA_PARENT_OFFSET	40
1123 #define	SA_SYMLINK_OFFSET	160
1124 
1125 /*
1126  * Intent log header - this on disk structure holds fields to manage
1127  * the log.  All fields are 64 bit to easily handle cross architectures.
1128  */
1129 typedef struct zil_header {
1130 	uint64_t zh_claim_txg;	/* txg in which log blocks were claimed */
1131 	uint64_t zh_replay_seq;	/* highest replayed sequence number */
1132 	blkptr_t zh_log;	/* log chain */
1133 	uint64_t zh_claim_seq;	/* highest claimed sequence number */
1134 	uint64_t zh_pad[5];
1135 } zil_header_t;
1136 
1137 #define	OBJSET_PHYS_SIZE 2048
1138 
1139 typedef struct objset_phys {
1140 	dnode_phys_t os_meta_dnode;
1141 	zil_header_t os_zil_header;
1142 	uint64_t os_type;
1143 	uint64_t os_flags;
1144 	char os_pad[OBJSET_PHYS_SIZE - sizeof (dnode_phys_t)*3 -
1145 	    sizeof (zil_header_t) - sizeof (uint64_t)*2];
1146 	dnode_phys_t os_userused_dnode;
1147 	dnode_phys_t os_groupused_dnode;
1148 } objset_phys_t;
1149 
1150 typedef struct dsl_dir_phys {
1151 	uint64_t dd_creation_time; /* not actually used */
1152 	uint64_t dd_head_dataset_obj;
1153 	uint64_t dd_parent_obj;
1154 	uint64_t dd_clone_parent_obj;
1155 	uint64_t dd_child_dir_zapobj;
1156 	/*
1157 	 * how much space our children are accounting for; for leaf
1158 	 * datasets, == physical space used by fs + snaps
1159 	 */
1160 	uint64_t dd_used_bytes;
1161 	uint64_t dd_compressed_bytes;
1162 	uint64_t dd_uncompressed_bytes;
1163 	/* Administrative quota setting */
1164 	uint64_t dd_quota;
1165 	/* Administrative reservation setting */
1166 	uint64_t dd_reserved;
1167 	uint64_t dd_props_zapobj;
1168 	uint64_t dd_pad[21]; /* pad out to 256 bytes for good measure */
1169 } dsl_dir_phys_t;
1170 
1171 typedef struct dsl_dataset_phys {
1172 	uint64_t ds_dir_obj;
1173 	uint64_t ds_prev_snap_obj;
1174 	uint64_t ds_prev_snap_txg;
1175 	uint64_t ds_next_snap_obj;
1176 	uint64_t ds_snapnames_zapobj;	/* zap obj of snaps; ==0 for snaps */
1177 	uint64_t ds_num_children;	/* clone/snap children; ==0 for head */
1178 	uint64_t ds_creation_time;	/* seconds since 1970 */
1179 	uint64_t ds_creation_txg;
1180 	uint64_t ds_deadlist_obj;
1181 	uint64_t ds_used_bytes;
1182 	uint64_t ds_compressed_bytes;
1183 	uint64_t ds_uncompressed_bytes;
1184 	uint64_t ds_unique_bytes;	/* only relevant to snapshots */
1185 	/*
1186 	 * The ds_fsid_guid is a 56-bit ID that can change to avoid
1187 	 * collisions.  The ds_guid is a 64-bit ID that will never
1188 	 * change, so there is a small probability that it will collide.
1189 	 */
1190 	uint64_t ds_fsid_guid;
1191 	uint64_t ds_guid;
1192 	uint64_t ds_flags;
1193 	blkptr_t ds_bp;
1194 	uint64_t ds_pad[8]; /* pad out to 320 bytes for good measure */
1195 } dsl_dataset_phys_t;
1196 
1197 /*
1198  * The names of zap entries in the DIRECTORY_OBJECT of the MOS.
1199  */
1200 #define	DMU_POOL_DIRECTORY_OBJECT	1
1201 #define	DMU_POOL_CONFIG			"config"
1202 #define	DMU_POOL_FEATURES_FOR_READ	"features_for_read"
1203 #define	DMU_POOL_ROOT_DATASET		"root_dataset"
1204 #define	DMU_POOL_SYNC_BPLIST		"sync_bplist"
1205 #define	DMU_POOL_ERRLOG_SCRUB		"errlog_scrub"
1206 #define	DMU_POOL_ERRLOG_LAST		"errlog_last"
1207 #define	DMU_POOL_SPARES			"spares"
1208 #define	DMU_POOL_DEFLATE		"deflate"
1209 #define	DMU_POOL_HISTORY		"history"
1210 #define	DMU_POOL_PROPS			"pool_props"
1211 #define	DMU_POOL_CHECKSUM_SALT		"org.illumos:checksum_salt"
1212 
1213 #define	ZAP_MAGIC 0x2F52AB2ABULL
1214 
1215 #define	FZAP_BLOCK_SHIFT(zap)	((zap)->zap_block_shift)
1216 
1217 #define	ZAP_MAXCD		(uint32_t)(-1)
1218 #define	ZAP_HASHBITS		28
1219 #define	MZAP_ENT_LEN		64
1220 #define	MZAP_NAME_LEN		(MZAP_ENT_LEN - 8 - 4 - 2)
1221 #define	MZAP_MAX_BLKSHIFT	SPA_MAXBLOCKSHIFT
1222 #define	MZAP_MAX_BLKSZ		(1 << MZAP_MAX_BLKSHIFT)
1223 
1224 typedef struct mzap_ent_phys {
1225 	uint64_t mze_value;
1226 	uint32_t mze_cd;
1227 	uint16_t mze_pad;	/* in case we want to chain them someday */
1228 	char mze_name[MZAP_NAME_LEN];
1229 } mzap_ent_phys_t;
1230 
1231 typedef struct mzap_phys {
1232 	uint64_t mz_block_type;	/* ZBT_MICRO */
1233 	uint64_t mz_salt;
1234 	uint64_t mz_pad[6];
1235 	mzap_ent_phys_t mz_chunk[1];
1236 	/* actually variable size depending on block size */
1237 } mzap_phys_t;
1238 
1239 /*
1240  * The (fat) zap is stored in one object. It is an array of
1241  * 1<<FZAP_BLOCK_SHIFT byte blocks. The layout looks like one of:
1242  *
1243  * ptrtbl fits in first block:
1244  *	[zap_phys_t zap_ptrtbl_shift < 6] [zap_leaf_t] ...
1245  *
1246  * ptrtbl too big for first block:
1247  *	[zap_phys_t zap_ptrtbl_shift >= 6] [zap_leaf_t] [ptrtbl] ...
1248  *
1249  */
1250 
1251 #define	ZBT_LEAF		((1ULL << 63) + 0)
1252 #define	ZBT_HEADER		((1ULL << 63) + 1)
1253 #define	ZBT_MICRO		((1ULL << 63) + 3)
1254 /* any other values are ptrtbl blocks */
1255 
1256 /*
1257  * the embedded pointer table takes up half a block:
1258  * block size / entry size (2^3) / 2
1259  */
1260 #define	ZAP_EMBEDDED_PTRTBL_SHIFT(zap) (FZAP_BLOCK_SHIFT(zap) - 3 - 1)
1261 
1262 /*
1263  * The embedded pointer table starts half-way through the block.  Since
1264  * the pointer table itself is half the block, it starts at (64-bit)
1265  * word number (1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap)).
1266  */
1267 #define	ZAP_EMBEDDED_PTRTBL_ENT(zap, idx) \
1268 	((uint64_t *)(zap)->zap_phys) \
1269 	[(idx) + (1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap))]
1270 
1271 /*
1272  * TAKE NOTE:
1273  * If zap_phys_t is modified, zap_byteswap() must be modified.
1274  */
1275 typedef struct zap_phys {
1276 	uint64_t zap_block_type;	/* ZBT_HEADER */
1277 	uint64_t zap_magic;		/* ZAP_MAGIC */
1278 
1279 	struct zap_table_phys {
1280 		uint64_t zt_blk;	/* starting block number */
1281 		uint64_t zt_numblks;	/* number of blocks */
1282 		uint64_t zt_shift;	/* bits to index it */
1283 		uint64_t zt_nextblk;	/* next (larger) copy start block */
1284 		uint64_t zt_blks_copied; /* number source blocks copied */
1285 	} zap_ptrtbl;
1286 
1287 	uint64_t zap_freeblk;		/* the next free block */
1288 	uint64_t zap_num_leafs;		/* number of leafs */
1289 	uint64_t zap_num_entries;	/* number of entries */
1290 	uint64_t zap_salt;		/* salt to stir into hash function */
1291 	/*
1292 	 * This structure is followed by padding, and then the embedded
1293 	 * pointer table.  The embedded pointer table takes up second
1294 	 * half of the block.  It is accessed using the
1295 	 * ZAP_EMBEDDED_PTRTBL_ENT() macro.
1296 	 */
1297 } zap_phys_t;
1298 
1299 typedef struct zap_table_phys zap_table_phys_t;
1300 
1301 typedef struct fat_zap {
1302 	int zap_block_shift;			/* block size shift */
1303 	zap_phys_t *zap_phys;
1304 } fat_zap_t;
1305 
1306 #define	ZAP_LEAF_MAGIC 0x2AB1EAF
1307 
1308 /* chunk size = 24 bytes */
1309 #define	ZAP_LEAF_CHUNKSIZE 24
1310 
1311 /*
1312  * The amount of space available for chunks is:
1313  * block size (1<<l->l_bs) - hash entry size (2) * number of hash
1314  * entries - header space (2*chunksize)
1315  */
1316 #define	ZAP_LEAF_NUMCHUNKS(l) \
1317 	(((1<<(l)->l_bs) - 2*ZAP_LEAF_HASH_NUMENTRIES(l)) / \
1318 	ZAP_LEAF_CHUNKSIZE - 2)
1319 
1320 /*
1321  * The amount of space within the chunk available for the array is:
1322  * chunk size - space for type (1) - space for next pointer (2)
1323  */
1324 #define	ZAP_LEAF_ARRAY_BYTES (ZAP_LEAF_CHUNKSIZE - 3)
1325 
1326 #define	ZAP_LEAF_ARRAY_NCHUNKS(bytes) \
1327 	(((bytes)+ZAP_LEAF_ARRAY_BYTES-1)/ZAP_LEAF_ARRAY_BYTES)
1328 
1329 /*
1330  * Low water mark:  when there are only this many chunks free, start
1331  * growing the ptrtbl.  Ideally, this should be larger than a
1332  * "reasonably-sized" entry.  20 chunks is more than enough for the
1333  * largest directory entry (MAXNAMELEN (256) byte name, 8-byte value),
1334  * while still being only around 3% for 16k blocks.
1335  */
1336 #define	ZAP_LEAF_LOW_WATER (20)
1337 
1338 /*
1339  * The leaf hash table has block size / 2^5 (32) number of entries,
1340  * which should be more than enough for the maximum number of entries,
1341  * which is less than block size / CHUNKSIZE (24) / minimum number of
1342  * chunks per entry (3).
1343  */
1344 #define	ZAP_LEAF_HASH_SHIFT(l) ((l)->l_bs - 5)
1345 #define	ZAP_LEAF_HASH_NUMENTRIES(l) (1 << ZAP_LEAF_HASH_SHIFT(l))
1346 
1347 /*
1348  * The chunks start immediately after the hash table.  The end of the
1349  * hash table is at l_hash + HASH_NUMENTRIES, which we simply cast to a
1350  * chunk_t.
1351  */
1352 #define	ZAP_LEAF_CHUNK(l, idx) \
1353 	((zap_leaf_chunk_t *) \
1354 	((l)->l_phys->l_hash + ZAP_LEAF_HASH_NUMENTRIES(l)))[idx]
1355 #define	ZAP_LEAF_ENTRY(l, idx) (&ZAP_LEAF_CHUNK(l, idx).l_entry)
1356 
1357 typedef enum zap_chunk_type {
1358 	ZAP_CHUNK_FREE = 253,
1359 	ZAP_CHUNK_ENTRY = 252,
1360 	ZAP_CHUNK_ARRAY = 251,
1361 	ZAP_CHUNK_TYPE_MAX = 250
1362 } zap_chunk_type_t;
1363 
1364 /*
1365  * TAKE NOTE:
1366  * If zap_leaf_phys_t is modified, zap_leaf_byteswap() must be modified.
1367  */
1368 typedef struct zap_leaf_phys {
1369 	struct zap_leaf_header {
1370 		uint64_t lh_block_type;		/* ZBT_LEAF */
1371 		uint64_t lh_pad1;
1372 		uint64_t lh_prefix;		/* hash prefix of this leaf */
1373 		uint32_t lh_magic;		/* ZAP_LEAF_MAGIC */
1374 		uint16_t lh_nfree;		/* number free chunks */
1375 		uint16_t lh_nentries;		/* number of entries */
1376 		uint16_t lh_prefix_len;		/* num bits used to id this */
1377 
1378 /* above is accessable to zap, below is zap_leaf private */
1379 
1380 		uint16_t lh_freelist;		/* chunk head of free list */
1381 		uint8_t lh_pad2[12];
1382 	} l_hdr; /* 2 24-byte chunks */
1383 
1384 	/*
1385 	 * The header is followed by a hash table with
1386 	 * ZAP_LEAF_HASH_NUMENTRIES(zap) entries.  The hash table is
1387 	 * followed by an array of ZAP_LEAF_NUMCHUNKS(zap)
1388 	 * zap_leaf_chunk structures.  These structures are accessed
1389 	 * with the ZAP_LEAF_CHUNK() macro.
1390 	 */
1391 
1392 	uint16_t l_hash[1];
1393 } zap_leaf_phys_t;
1394 
1395 typedef union zap_leaf_chunk {
1396 	struct zap_leaf_entry {
1397 		uint8_t le_type;		/* always ZAP_CHUNK_ENTRY */
1398 		uint8_t le_value_intlen;	/* size of ints */
1399 		uint16_t le_next;		/* next entry in hash chain */
1400 		uint16_t le_name_chunk;		/* first chunk of the name */
1401 		uint16_t le_name_numints;	/* bytes in name, incl null */
1402 		uint16_t le_value_chunk;	/* first chunk of the value */
1403 		uint16_t le_value_numints;	/* value length in ints */
1404 		uint32_t le_cd;			/* collision differentiator */
1405 		uint64_t le_hash;		/* hash value of the name */
1406 	} l_entry;
1407 	struct zap_leaf_array {
1408 		uint8_t la_type;		/* always ZAP_CHUNK_ARRAY */
1409 		uint8_t la_array[ZAP_LEAF_ARRAY_BYTES];
1410 		uint16_t la_next;		/* next blk or CHAIN_END */
1411 	} l_array;
1412 	struct zap_leaf_free {
1413 		uint8_t lf_type;		/* always ZAP_CHUNK_FREE */
1414 		uint8_t lf_pad[ZAP_LEAF_ARRAY_BYTES];
1415 		uint16_t lf_next;	/* next in free list, or CHAIN_END */
1416 	} l_free;
1417 } zap_leaf_chunk_t;
1418 
1419 typedef struct zap_leaf {
1420 	int l_bs;			/* block size shift */
1421 	zap_leaf_phys_t *l_phys;
1422 } zap_leaf_t;
1423 
1424 /*
1425  * Define special zfs pflags
1426  */
1427 #define	ZFS_XATTR	0x1		/* is an extended attribute */
1428 #define	ZFS_INHERIT_ACE	0x2		/* ace has inheritable ACEs */
1429 #define	ZFS_ACL_TRIVIAL 0x4		/* files ACL is trivial */
1430 
1431 #define	MASTER_NODE_OBJ	1
1432 
1433 /*
1434  * special attributes for master node.
1435  */
1436 
1437 #define	ZFS_FSID		"FSID"
1438 #define	ZFS_UNLINKED_SET	"DELETE_QUEUE"
1439 #define	ZFS_ROOT_OBJ		"ROOT"
1440 #define	ZPL_VERSION_OBJ		"VERSION"
1441 #define	ZFS_PROP_BLOCKPERPAGE	"BLOCKPERPAGE"
1442 #define	ZFS_PROP_NOGROWBLOCKS	"NOGROWBLOCKS"
1443 
1444 #define	ZFS_FLAG_BLOCKPERPAGE	0x1
1445 #define	ZFS_FLAG_NOGROWBLOCKS	0x2
1446 
1447 /*
1448  * ZPL version - rev'd whenever an incompatible on-disk format change
1449  * occurs.  Independent of SPA/DMU/ZAP versioning.
1450  */
1451 
1452 #define	ZPL_VERSION		1ULL
1453 
1454 /*
1455  * The directory entry has the type (currently unused on Solaris) in the
1456  * top 4 bits, and the object number in the low 48 bits.  The "middle"
1457  * 12 bits are unused.
1458  */
1459 #define	ZFS_DIRENT_TYPE(de) BF64_GET(de, 60, 4)
1460 #define	ZFS_DIRENT_OBJ(de) BF64_GET(de, 0, 48)
1461 #define	ZFS_DIRENT_MAKE(type, obj) (((uint64_t)type << 60) | obj)
1462 
1463 typedef struct ace {
1464 	uid_t		a_who;		/* uid or gid */
1465 	uint32_t	a_access_mask;	/* read,write,... */
1466 	uint16_t	a_flags;	/* see below */
1467 	uint16_t	a_type;		/* allow or deny */
1468 } ace_t;
1469 
1470 #define ACE_SLOT_CNT	6
1471 
1472 typedef struct zfs_znode_acl {
1473 	uint64_t	z_acl_extern_obj;	  /* ext acl pieces */
1474 	uint32_t	z_acl_count;		  /* Number of ACEs */
1475 	uint16_t	z_acl_version;		  /* acl version */
1476 	uint16_t	z_acl_pad;		  /* pad */
1477 	ace_t		z_ace_data[ACE_SLOT_CNT]; /* 6 standard ACEs */
1478 } zfs_znode_acl_t;
1479 
1480 /*
1481  * This is the persistent portion of the znode.  It is stored
1482  * in the "bonus buffer" of the file.  Short symbolic links
1483  * are also stored in the bonus buffer.
1484  */
1485 typedef struct znode_phys {
1486 	uint64_t zp_atime[2];		/*  0 - last file access time */
1487 	uint64_t zp_mtime[2];		/* 16 - last file modification time */
1488 	uint64_t zp_ctime[2];		/* 32 - last file change time */
1489 	uint64_t zp_crtime[2];		/* 48 - creation time */
1490 	uint64_t zp_gen;		/* 64 - generation (txg of creation) */
1491 	uint64_t zp_mode;		/* 72 - file mode bits */
1492 	uint64_t zp_size;		/* 80 - size of file */
1493 	uint64_t zp_parent;		/* 88 - directory parent (`..') */
1494 	uint64_t zp_links;		/* 96 - number of links to file */
1495 	uint64_t zp_xattr;		/* 104 - DMU object for xattrs */
1496 	uint64_t zp_rdev;		/* 112 - dev_t for VBLK & VCHR files */
1497 	uint64_t zp_flags;		/* 120 - persistent flags */
1498 	uint64_t zp_uid;		/* 128 - file owner */
1499 	uint64_t zp_gid;		/* 136 - owning group */
1500 	uint64_t zp_pad[4];		/* 144 - future */
1501 	zfs_znode_acl_t zp_acl;		/* 176 - 263 ACL */
1502 	/*
1503 	 * Data may pad out any remaining bytes in the znode buffer, eg:
1504 	 *
1505 	 * |<---------------------- dnode_phys (512) ------------------------>|
1506 	 * |<-- dnode (192) --->|<----------- "bonus" buffer (320) ---------->|
1507 	 *			|<---- znode (264) ---->|<---- data (56) ---->|
1508 	 *
1509 	 * At present, we only use this space to store symbolic links.
1510 	 */
1511 } znode_phys_t;
1512 
1513 /*
1514  * In-core vdev representation.
1515  */
1516 struct vdev;
1517 struct spa;
1518 typedef int vdev_phys_read_t(struct vdev *vdev, void *priv,
1519     off_t offset, void *buf, size_t bytes);
1520 typedef int vdev_read_t(struct vdev *vdev, const blkptr_t *bp,
1521     void *buf, off_t offset, size_t bytes);
1522 
1523 typedef STAILQ_HEAD(vdev_list, vdev) vdev_list_t;
1524 
1525 typedef struct vdev {
1526 	STAILQ_ENTRY(vdev) v_childlink;	/* link in parent's child list */
1527 	STAILQ_ENTRY(vdev) v_alllink;	/* link in global vdev list */
1528 	vdev_list_t	v_children;	/* children of this vdev */
1529 	const char	*v_name;	/* vdev name */
1530 	const char	*v_phys_path;	/* vdev bootpath */
1531 	const char	*v_devid;	/* vdev devid */
1532 	uint64_t	v_guid;		/* vdev guid */
1533 	int		v_id;		/* index in parent */
1534 	int		v_ashift;	/* offset to block shift */
1535 	int		v_nparity;	/* # parity for raidz */
1536 	struct vdev	*v_top;		/* parent vdev */
1537 	int		v_nchildren;	/* # children */
1538 	vdev_state_t	v_state;	/* current state */
1539 	vdev_phys_read_t *v_phys_read;	/* read from raw leaf vdev */
1540 	vdev_read_t	*v_read;	/* read from vdev */
1541 	void		*v_read_priv;	/* private data for read function */
1542 	struct spa	*spa;		/* link to spa */
1543 } vdev_t;
1544 
1545 /*
1546  * In-core pool representation.
1547  */
1548 typedef STAILQ_HEAD(spa_list, spa) spa_list_t;
1549 
1550 typedef struct spa {
1551 	STAILQ_ENTRY(spa) spa_link;	/* link in global pool list */
1552 	char		*spa_name;	/* pool name */
1553 	uint64_t	spa_guid;	/* pool guid */
1554 	uint64_t	spa_txg;	/* most recent transaction */
1555 	struct uberblock spa_uberblock;	/* best uberblock so far */
1556 	vdev_list_t	spa_vdevs;	/* list of all toplevel vdevs */
1557 	objset_phys_t	spa_mos;	/* MOS for this pool */
1558 	zio_cksum_salt_t spa_cksum_salt;	/* secret salt for cksum */
1559 	void		*spa_cksum_tmpls[ZIO_CHECKSUM_FUNCTIONS];
1560 	int		spa_inited;	/* initialized */
1561 	vdev_t		*spa_boot_vdev;	/* boot device for kernel */
1562 } spa_t;
1563 
1564 static void decode_embedded_bp_compressed(const blkptr_t *, void *);
1565