1 /*- 2 * Copyright (c) 2002 McAfee, Inc. 3 * All rights reserved. 4 * 5 * This software was developed for the FreeBSD Project by Marshall 6 * Kirk McKusick and McAfee Research,, the Security Research Division of 7 * McAfee, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as 8 * part of the DARPA CHATS research program 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 /* 32 * CDDL HEADER START 33 * 34 * The contents of this file are subject to the terms of the 35 * Common Development and Distribution License (the "License"). 36 * You may not use this file except in compliance with the License. 37 * 38 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 39 * or http://www.opensolaris.org/os/licensing. 40 * See the License for the specific language governing permissions 41 * and limitations under the License. 42 * 43 * When distributing Covered Code, include this CDDL HEADER in each 44 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 45 * If applicable, add the following below this CDDL HEADER, with the 46 * fields enclosed by brackets "[]" replaced with your own identifying 47 * information: Portions Copyright [yyyy] [name of copyright owner] 48 * 49 * CDDL HEADER END 50 */ 51 /* 52 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 53 * Use is subject to license terms. 54 */ 55 /* 56 * Copyright 2013 by Saso Kiselkov. All rights reserved. 57 */ 58 /* 59 * Copyright (c) 2013 by Delphix. All rights reserved. 60 */ 61 62 #define MAXNAMELEN 256 63 64 #define _NOTE(s) 65 66 /* CRC64 table */ 67 #define ZFS_CRC64_POLY 0xC96C5795D7870F42ULL /* ECMA-182, reflected form */ 68 69 /* 70 * Macros for various sorts of alignment and rounding when the alignment 71 * is known to be a power of 2. 72 */ 73 #define P2ALIGN(x, align) ((x) & -(align)) 74 #define P2PHASE(x, align) ((x) & ((align) - 1)) 75 #define P2NPHASE(x, align) (-(x) & ((align) - 1)) 76 #define P2ROUNDUP(x, align) (-(-(x) & -(align))) 77 #define P2END(x, align) (-(~(x) & -(align))) 78 #define P2PHASEUP(x, align, phase) ((phase) - (((phase) - (x)) & -(align))) 79 #define P2BOUNDARY(off, len, align) (((off) ^ ((off) + (len) - 1)) > (align) - 1) 80 81 /* 82 * General-purpose 32-bit and 64-bit bitfield encodings. 83 */ 84 #define BF32_DECODE(x, low, len) P2PHASE((x) >> (low), 1U << (len)) 85 #define BF64_DECODE(x, low, len) P2PHASE((x) >> (low), 1ULL << (len)) 86 #define BF32_ENCODE(x, low, len) (P2PHASE((x), 1U << (len)) << (low)) 87 #define BF64_ENCODE(x, low, len) (P2PHASE((x), 1ULL << (len)) << (low)) 88 89 #define BF32_GET(x, low, len) BF32_DECODE(x, low, len) 90 #define BF64_GET(x, low, len) BF64_DECODE(x, low, len) 91 92 #define BF32_SET(x, low, len, val) \ 93 ((x) ^= BF32_ENCODE((x >> low) ^ (val), low, len)) 94 #define BF64_SET(x, low, len, val) \ 95 ((x) ^= BF64_ENCODE((x >> low) ^ (val), low, len)) 96 97 #define BF32_GET_SB(x, low, len, shift, bias) \ 98 ((BF32_GET(x, low, len) + (bias)) << (shift)) 99 #define BF64_GET_SB(x, low, len, shift, bias) \ 100 ((BF64_GET(x, low, len) + (bias)) << (shift)) 101 102 #define BF32_SET_SB(x, low, len, shift, bias, val) \ 103 BF32_SET(x, low, len, ((val) >> (shift)) - (bias)) 104 #define BF64_SET_SB(x, low, len, shift, bias, val) \ 105 BF64_SET(x, low, len, ((val) >> (shift)) - (bias)) 106 107 /* 108 * Macros to reverse byte order 109 */ 110 #define BSWAP_8(x) ((x) & 0xff) 111 #define BSWAP_16(x) ((BSWAP_8(x) << 8) | BSWAP_8((x) >> 8)) 112 #define BSWAP_32(x) ((BSWAP_16(x) << 16) | BSWAP_16((x) >> 16)) 113 #define BSWAP_64(x) ((BSWAP_32(x) << 32) | BSWAP_32((x) >> 32)) 114 115 /* 116 * We currently support nine block sizes, from 512 bytes to 128K. 117 * We could go higher, but the benefits are near-zero and the cost 118 * of COWing a giant block to modify one byte would become excessive. 119 */ 120 #define SPA_MINBLOCKSHIFT 9 121 #define SPA_MAXBLOCKSHIFT 17 122 #define SPA_MINBLOCKSIZE (1ULL << SPA_MINBLOCKSHIFT) 123 #define SPA_MAXBLOCKSIZE (1ULL << SPA_MAXBLOCKSHIFT) 124 125 #define SPA_BLOCKSIZES (SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1) 126 127 /* 128 * The DVA size encodings for LSIZE and PSIZE support blocks up to 32MB. 129 * The ASIZE encoding should be at least 64 times larger (6 more bits) 130 * to support up to 4-way RAID-Z mirror mode with worst-case gang block 131 * overhead, three DVAs per bp, plus one more bit in case we do anything 132 * else that expands the ASIZE. 133 */ 134 #define SPA_LSIZEBITS 16 /* LSIZE up to 32M (2^16 * 512) */ 135 #define SPA_PSIZEBITS 16 /* PSIZE up to 32M (2^16 * 512) */ 136 #define SPA_ASIZEBITS 24 /* ASIZE up to 64 times larger */ 137 138 /* 139 * All SPA data is represented by 128-bit data virtual addresses (DVAs). 140 * The members of the dva_t should be considered opaque outside the SPA. 141 */ 142 typedef struct dva { 143 uint64_t dva_word[2]; 144 } dva_t; 145 146 /* 147 * Each block has a 256-bit checksum -- strong enough for cryptographic hashes. 148 */ 149 typedef struct zio_cksum { 150 uint64_t zc_word[4]; 151 } zio_cksum_t; 152 153 /* 154 * Each block is described by its DVAs, time of birth, checksum, etc. 155 * The word-by-word, bit-by-bit layout of the blkptr is as follows: 156 * 157 * 64 56 48 40 32 24 16 8 0 158 * +-------+-------+-------+-------+-------+-------+-------+-------+ 159 * 0 | vdev1 | GRID | ASIZE | 160 * +-------+-------+-------+-------+-------+-------+-------+-------+ 161 * 1 |G| offset1 | 162 * +-------+-------+-------+-------+-------+-------+-------+-------+ 163 * 2 | vdev2 | GRID | ASIZE | 164 * +-------+-------+-------+-------+-------+-------+-------+-------+ 165 * 3 |G| offset2 | 166 * +-------+-------+-------+-------+-------+-------+-------+-------+ 167 * 4 | vdev3 | GRID | ASIZE | 168 * +-------+-------+-------+-------+-------+-------+-------+-------+ 169 * 5 |G| offset3 | 170 * +-------+-------+-------+-------+-------+-------+-------+-------+ 171 * 6 |BDX|lvl| type | cksum |E| comp| PSIZE | LSIZE | 172 * +-------+-------+-------+-------+-------+-------+-------+-------+ 173 * 7 | padding | 174 * +-------+-------+-------+-------+-------+-------+-------+-------+ 175 * 8 | padding | 176 * +-------+-------+-------+-------+-------+-------+-------+-------+ 177 * 9 | physical birth txg | 178 * +-------+-------+-------+-------+-------+-------+-------+-------+ 179 * a | logical birth txg | 180 * +-------+-------+-------+-------+-------+-------+-------+-------+ 181 * b | fill count | 182 * +-------+-------+-------+-------+-------+-------+-------+-------+ 183 * c | checksum[0] | 184 * +-------+-------+-------+-------+-------+-------+-------+-------+ 185 * d | checksum[1] | 186 * +-------+-------+-------+-------+-------+-------+-------+-------+ 187 * e | checksum[2] | 188 * +-------+-------+-------+-------+-------+-------+-------+-------+ 189 * f | checksum[3] | 190 * +-------+-------+-------+-------+-------+-------+-------+-------+ 191 * 192 * Legend: 193 * 194 * vdev virtual device ID 195 * offset offset into virtual device 196 * LSIZE logical size 197 * PSIZE physical size (after compression) 198 * ASIZE allocated size (including RAID-Z parity and gang block headers) 199 * GRID RAID-Z layout information (reserved for future use) 200 * cksum checksum function 201 * comp compression function 202 * G gang block indicator 203 * B byteorder (endianness) 204 * D dedup 205 * X encryption (on version 30, which is not supported) 206 * E blkptr_t contains embedded data (see below) 207 * lvl level of indirection 208 * type DMU object type 209 * phys birth txg of block allocation; zero if same as logical birth txg 210 * log. birth transaction group in which the block was logically born 211 * fill count number of non-zero blocks under this bp 212 * checksum[4] 256-bit checksum of the data this bp describes 213 */ 214 215 /* 216 * "Embedded" blkptr_t's don't actually point to a block, instead they 217 * have a data payload embedded in the blkptr_t itself. See the comment 218 * in blkptr.c for more details. 219 * 220 * The blkptr_t is laid out as follows: 221 * 222 * 64 56 48 40 32 24 16 8 0 223 * +-------+-------+-------+-------+-------+-------+-------+-------+ 224 * 0 | payload | 225 * 1 | payload | 226 * 2 | payload | 227 * 3 | payload | 228 * 4 | payload | 229 * 5 | payload | 230 * +-------+-------+-------+-------+-------+-------+-------+-------+ 231 * 6 |BDX|lvl| type | etype |E| comp| PSIZE| LSIZE | 232 * +-------+-------+-------+-------+-------+-------+-------+-------+ 233 * 7 | payload | 234 * 8 | payload | 235 * 9 | payload | 236 * +-------+-------+-------+-------+-------+-------+-------+-------+ 237 * a | logical birth txg | 238 * +-------+-------+-------+-------+-------+-------+-------+-------+ 239 * b | payload | 240 * c | payload | 241 * d | payload | 242 * e | payload | 243 * f | payload | 244 * +-------+-------+-------+-------+-------+-------+-------+-------+ 245 * 246 * Legend: 247 * 248 * payload contains the embedded data 249 * B (byteorder) byteorder (endianness) 250 * D (dedup) padding (set to zero) 251 * X encryption (set to zero; see above) 252 * E (embedded) set to one 253 * lvl indirection level 254 * type DMU object type 255 * etype how to interpret embedded data (BP_EMBEDDED_TYPE_*) 256 * comp compression function of payload 257 * PSIZE size of payload after compression, in bytes 258 * LSIZE logical size of payload, in bytes 259 * note that 25 bits is enough to store the largest 260 * "normal" BP's LSIZE (2^16 * 2^9) in bytes 261 * log. birth transaction group in which the block was logically born 262 * 263 * Note that LSIZE and PSIZE are stored in bytes, whereas for non-embedded 264 * bp's they are stored in units of SPA_MINBLOCKSHIFT. 265 * Generally, the generic BP_GET_*() macros can be used on embedded BP's. 266 * The B, D, X, lvl, type, and comp fields are stored the same as with normal 267 * BP's so the BP_SET_* macros can be used with them. etype, PSIZE, LSIZE must 268 * be set with the BPE_SET_* macros. BP_SET_EMBEDDED() should be called before 269 * other macros, as they assert that they are only used on BP's of the correct 270 * "embedded-ness". 271 */ 272 273 #define BPE_GET_ETYPE(bp) \ 274 (ASSERT(BP_IS_EMBEDDED(bp)), \ 275 BF64_GET((bp)->blk_prop, 40, 8)) 276 #define BPE_SET_ETYPE(bp, t) do { \ 277 ASSERT(BP_IS_EMBEDDED(bp)); \ 278 BF64_SET((bp)->blk_prop, 40, 8, t); \ 279 _NOTE(CONSTCOND) } while (0) 280 281 #define BPE_GET_LSIZE(bp) \ 282 (ASSERT(BP_IS_EMBEDDED(bp)), \ 283 BF64_GET_SB((bp)->blk_prop, 0, 25, 0, 1)) 284 #define BPE_SET_LSIZE(bp, x) do { \ 285 ASSERT(BP_IS_EMBEDDED(bp)); \ 286 BF64_SET_SB((bp)->blk_prop, 0, 25, 0, 1, x); \ 287 _NOTE(CONSTCOND) } while (0) 288 289 #define BPE_GET_PSIZE(bp) \ 290 (ASSERT(BP_IS_EMBEDDED(bp)), \ 291 BF64_GET_SB((bp)->blk_prop, 25, 7, 0, 1)) 292 #define BPE_SET_PSIZE(bp, x) do { \ 293 ASSERT(BP_IS_EMBEDDED(bp)); \ 294 BF64_SET_SB((bp)->blk_prop, 25, 7, 0, 1, x); \ 295 _NOTE(CONSTCOND) } while (0) 296 297 typedef enum bp_embedded_type { 298 BP_EMBEDDED_TYPE_DATA, 299 BP_EMBEDDED_TYPE_RESERVED, /* Reserved for an unintegrated feature. */ 300 NUM_BP_EMBEDDED_TYPES = BP_EMBEDDED_TYPE_RESERVED 301 } bp_embedded_type_t; 302 303 #define BPE_NUM_WORDS 14 304 #define BPE_PAYLOAD_SIZE (BPE_NUM_WORDS * sizeof (uint64_t)) 305 #define BPE_IS_PAYLOADWORD(bp, wp) \ 306 ((wp) != &(bp)->blk_prop && (wp) != &(bp)->blk_birth) 307 308 #define SPA_BLKPTRSHIFT 7 /* blkptr_t is 128 bytes */ 309 #define SPA_DVAS_PER_BP 3 /* Number of DVAs in a bp */ 310 311 typedef struct blkptr { 312 dva_t blk_dva[SPA_DVAS_PER_BP]; /* Data Virtual Addresses */ 313 uint64_t blk_prop; /* size, compression, type, etc */ 314 uint64_t blk_pad[2]; /* Extra space for the future */ 315 uint64_t blk_phys_birth; /* txg when block was allocated */ 316 uint64_t blk_birth; /* transaction group at birth */ 317 uint64_t blk_fill; /* fill count */ 318 zio_cksum_t blk_cksum; /* 256-bit checksum */ 319 } blkptr_t; 320 321 /* 322 * Macros to get and set fields in a bp or DVA. 323 */ 324 #define DVA_GET_ASIZE(dva) \ 325 BF64_GET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, SPA_MINBLOCKSHIFT, 0) 326 #define DVA_SET_ASIZE(dva, x) \ 327 BF64_SET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, \ 328 SPA_MINBLOCKSHIFT, 0, x) 329 330 #define DVA_GET_GRID(dva) BF64_GET((dva)->dva_word[0], 24, 8) 331 #define DVA_SET_GRID(dva, x) BF64_SET((dva)->dva_word[0], 24, 8, x) 332 333 #define DVA_GET_VDEV(dva) BF64_GET((dva)->dva_word[0], 32, 32) 334 #define DVA_SET_VDEV(dva, x) BF64_SET((dva)->dva_word[0], 32, 32, x) 335 336 #define DVA_GET_OFFSET(dva) \ 337 BF64_GET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0) 338 #define DVA_SET_OFFSET(dva, x) \ 339 BF64_SET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0, x) 340 341 #define DVA_GET_GANG(dva) BF64_GET((dva)->dva_word[1], 63, 1) 342 #define DVA_SET_GANG(dva, x) BF64_SET((dva)->dva_word[1], 63, 1, x) 343 344 #define BP_GET_LSIZE(bp) \ 345 (BP_IS_EMBEDDED(bp) ? \ 346 (BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA ? BPE_GET_LSIZE(bp) : 0): \ 347 BF64_GET_SB((bp)->blk_prop, 0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1)) 348 #define BP_SET_LSIZE(bp, x) do { \ 349 ASSERT(!BP_IS_EMBEDDED(bp)); \ 350 BF64_SET_SB((bp)->blk_prop, \ 351 0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1, x); \ 352 _NOTE(CONSTCOND) } while (0) 353 354 #define BP_GET_PSIZE(bp) \ 355 BF64_GET_SB((bp)->blk_prop, 16, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1) 356 #define BP_SET_PSIZE(bp, x) \ 357 BF64_SET_SB((bp)->blk_prop, 16, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1, x) 358 359 #define BP_GET_COMPRESS(bp) BF64_GET((bp)->blk_prop, 32, 7) 360 #define BP_SET_COMPRESS(bp, x) BF64_SET((bp)->blk_prop, 32, 7, x) 361 362 #define BP_GET_CHECKSUM(bp) BF64_GET((bp)->blk_prop, 40, 8) 363 #define BP_SET_CHECKSUM(bp, x) BF64_SET((bp)->blk_prop, 40, 8, x) 364 365 #define BP_GET_TYPE(bp) BF64_GET((bp)->blk_prop, 48, 8) 366 #define BP_SET_TYPE(bp, x) BF64_SET((bp)->blk_prop, 48, 8, x) 367 368 #define BP_GET_LEVEL(bp) BF64_GET((bp)->blk_prop, 56, 5) 369 #define BP_SET_LEVEL(bp, x) BF64_SET((bp)->blk_prop, 56, 5, x) 370 371 #define BP_IS_EMBEDDED(bp) BF64_GET((bp)->blk_prop, 39, 1) 372 373 #define BP_GET_DEDUP(bp) BF64_GET((bp)->blk_prop, 62, 1) 374 #define BP_SET_DEDUP(bp, x) BF64_SET((bp)->blk_prop, 62, 1, x) 375 376 #define BP_GET_BYTEORDER(bp) BF64_GET((bp)->blk_prop, 63, 1) 377 #define BP_SET_BYTEORDER(bp, x) BF64_SET((bp)->blk_prop, 63, 1, x) 378 379 #define BP_PHYSICAL_BIRTH(bp) \ 380 ((bp)->blk_phys_birth ? (bp)->blk_phys_birth : (bp)->blk_birth) 381 382 #define BP_GET_ASIZE(bp) \ 383 (DVA_GET_ASIZE(&(bp)->blk_dva[0]) + DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \ 384 DVA_GET_ASIZE(&(bp)->blk_dva[2])) 385 386 #define BP_GET_UCSIZE(bp) \ 387 ((BP_GET_LEVEL(bp) > 0 || dmu_ot[BP_GET_TYPE(bp)].ot_metadata) ? \ 388 BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp)); 389 390 #define BP_GET_NDVAS(bp) \ 391 (!!DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \ 392 !!DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \ 393 !!DVA_GET_ASIZE(&(bp)->blk_dva[2])) 394 395 #define DVA_EQUAL(dva1, dva2) \ 396 ((dva1)->dva_word[1] == (dva2)->dva_word[1] && \ 397 (dva1)->dva_word[0] == (dva2)->dva_word[0]) 398 399 #define ZIO_CHECKSUM_EQUAL(zc1, zc2) \ 400 (0 == (((zc1).zc_word[0] - (zc2).zc_word[0]) | \ 401 ((zc1).zc_word[1] - (zc2).zc_word[1]) | \ 402 ((zc1).zc_word[2] - (zc2).zc_word[2]) | \ 403 ((zc1).zc_word[3] - (zc2).zc_word[3]))) 404 405 406 #define DVA_IS_VALID(dva) (DVA_GET_ASIZE(dva) != 0) 407 408 #define ZIO_SET_CHECKSUM(zcp, w0, w1, w2, w3) \ 409 { \ 410 (zcp)->zc_word[0] = w0; \ 411 (zcp)->zc_word[1] = w1; \ 412 (zcp)->zc_word[2] = w2; \ 413 (zcp)->zc_word[3] = w3; \ 414 } 415 416 #define BP_IDENTITY(bp) (&(bp)->blk_dva[0]) 417 #define BP_IS_GANG(bp) DVA_GET_GANG(BP_IDENTITY(bp)) 418 #define DVA_IS_EMPTY(dva) ((dva)->dva_word[0] == 0ULL && \ 419 (dva)->dva_word[1] == 0ULL) 420 #define BP_IS_HOLE(bp) DVA_IS_EMPTY(BP_IDENTITY(bp)) 421 #define BP_IS_OLDER(bp, txg) (!BP_IS_HOLE(bp) && (bp)->blk_birth < (txg)) 422 423 #define BP_ZERO(bp) \ 424 { \ 425 (bp)->blk_dva[0].dva_word[0] = 0; \ 426 (bp)->blk_dva[0].dva_word[1] = 0; \ 427 (bp)->blk_dva[1].dva_word[0] = 0; \ 428 (bp)->blk_dva[1].dva_word[1] = 0; \ 429 (bp)->blk_dva[2].dva_word[0] = 0; \ 430 (bp)->blk_dva[2].dva_word[1] = 0; \ 431 (bp)->blk_prop = 0; \ 432 (bp)->blk_pad[0] = 0; \ 433 (bp)->blk_pad[1] = 0; \ 434 (bp)->blk_phys_birth = 0; \ 435 (bp)->blk_birth = 0; \ 436 (bp)->blk_fill = 0; \ 437 ZIO_SET_CHECKSUM(&(bp)->blk_cksum, 0, 0, 0, 0); \ 438 } 439 440 #define BPE_NUM_WORDS 14 441 #define BPE_PAYLOAD_SIZE (BPE_NUM_WORDS * sizeof (uint64_t)) 442 #define BPE_IS_PAYLOADWORD(bp, wp) \ 443 ((wp) != &(bp)->blk_prop && (wp) != &(bp)->blk_birth) 444 445 /* 446 * Embedded checksum 447 */ 448 #define ZEC_MAGIC 0x210da7ab10c7a11ULL 449 450 typedef struct zio_eck { 451 uint64_t zec_magic; /* for validation, endianness */ 452 zio_cksum_t zec_cksum; /* 256-bit checksum */ 453 } zio_eck_t; 454 455 /* 456 * Gang block headers are self-checksumming and contain an array 457 * of block pointers. 458 */ 459 #define SPA_GANGBLOCKSIZE SPA_MINBLOCKSIZE 460 #define SPA_GBH_NBLKPTRS ((SPA_GANGBLOCKSIZE - \ 461 sizeof (zio_eck_t)) / sizeof (blkptr_t)) 462 #define SPA_GBH_FILLER ((SPA_GANGBLOCKSIZE - \ 463 sizeof (zio_eck_t) - \ 464 (SPA_GBH_NBLKPTRS * sizeof (blkptr_t))) /\ 465 sizeof (uint64_t)) 466 467 typedef struct zio_gbh { 468 blkptr_t zg_blkptr[SPA_GBH_NBLKPTRS]; 469 uint64_t zg_filler[SPA_GBH_FILLER]; 470 zio_eck_t zg_tail; 471 } zio_gbh_phys_t; 472 473 #define VDEV_RAIDZ_MAXPARITY 3 474 475 #define VDEV_PAD_SIZE (8 << 10) 476 /* 2 padding areas (vl_pad1 and vl_pad2) to skip */ 477 #define VDEV_SKIP_SIZE VDEV_PAD_SIZE * 2 478 #define VDEV_PHYS_SIZE (112 << 10) 479 #define VDEV_UBERBLOCK_RING (128 << 10) 480 481 #define VDEV_UBERBLOCK_SHIFT(vd) \ 482 MAX((vd)->v_top->v_ashift, UBERBLOCK_SHIFT) 483 #define VDEV_UBERBLOCK_COUNT(vd) \ 484 (VDEV_UBERBLOCK_RING >> VDEV_UBERBLOCK_SHIFT(vd)) 485 #define VDEV_UBERBLOCK_OFFSET(vd, n) \ 486 offsetof(vdev_label_t, vl_uberblock[(n) << VDEV_UBERBLOCK_SHIFT(vd)]) 487 #define VDEV_UBERBLOCK_SIZE(vd) (1ULL << VDEV_UBERBLOCK_SHIFT(vd)) 488 489 typedef struct vdev_phys { 490 char vp_nvlist[VDEV_PHYS_SIZE - sizeof (zio_eck_t)]; 491 zio_eck_t vp_zbt; 492 } vdev_phys_t; 493 494 typedef struct vdev_label { 495 char vl_pad1[VDEV_PAD_SIZE]; /* 8K */ 496 char vl_pad2[VDEV_PAD_SIZE]; /* 8K */ 497 vdev_phys_t vl_vdev_phys; /* 112K */ 498 char vl_uberblock[VDEV_UBERBLOCK_RING]; /* 128K */ 499 } vdev_label_t; /* 256K total */ 500 501 /* 502 * vdev_dirty() flags 503 */ 504 #define VDD_METASLAB 0x01 505 #define VDD_DTL 0x02 506 507 /* 508 * Size and offset of embedded boot loader region on each label. 509 * The total size of the first two labels plus the boot area is 4MB. 510 */ 511 #define VDEV_BOOT_OFFSET (2 * sizeof (vdev_label_t)) 512 #define VDEV_BOOT_SIZE (7ULL << 19) /* 3.5M */ 513 514 /* 515 * Size of label regions at the start and end of each leaf device. 516 */ 517 #define VDEV_LABEL_START_SIZE (2 * sizeof (vdev_label_t) + VDEV_BOOT_SIZE) 518 #define VDEV_LABEL_END_SIZE (2 * sizeof (vdev_label_t)) 519 #define VDEV_LABELS 4 520 521 enum zio_checksum { 522 ZIO_CHECKSUM_INHERIT = 0, 523 ZIO_CHECKSUM_ON, 524 ZIO_CHECKSUM_OFF, 525 ZIO_CHECKSUM_LABEL, 526 ZIO_CHECKSUM_GANG_HEADER, 527 ZIO_CHECKSUM_ZILOG, 528 ZIO_CHECKSUM_FLETCHER_2, 529 ZIO_CHECKSUM_FLETCHER_4, 530 ZIO_CHECKSUM_SHA256, 531 ZIO_CHECKSUM_ZILOG2, 532 ZIO_CHECKSUM_FUNCTIONS 533 }; 534 535 #define ZIO_CHECKSUM_ON_VALUE ZIO_CHECKSUM_FLETCHER_4 536 #define ZIO_CHECKSUM_DEFAULT ZIO_CHECKSUM_ON 537 538 enum zio_compress { 539 ZIO_COMPRESS_INHERIT = 0, 540 ZIO_COMPRESS_ON, 541 ZIO_COMPRESS_OFF, 542 ZIO_COMPRESS_LZJB, 543 ZIO_COMPRESS_EMPTY, 544 ZIO_COMPRESS_GZIP_1, 545 ZIO_COMPRESS_GZIP_2, 546 ZIO_COMPRESS_GZIP_3, 547 ZIO_COMPRESS_GZIP_4, 548 ZIO_COMPRESS_GZIP_5, 549 ZIO_COMPRESS_GZIP_6, 550 ZIO_COMPRESS_GZIP_7, 551 ZIO_COMPRESS_GZIP_8, 552 ZIO_COMPRESS_GZIP_9, 553 ZIO_COMPRESS_ZLE, 554 ZIO_COMPRESS_LZ4, 555 ZIO_COMPRESS_FUNCTIONS 556 }; 557 558 #define ZIO_COMPRESS_ON_VALUE ZIO_COMPRESS_LZJB 559 #define ZIO_COMPRESS_DEFAULT ZIO_COMPRESS_OFF 560 561 /* nvlist pack encoding */ 562 #define NV_ENCODE_NATIVE 0 563 #define NV_ENCODE_XDR 1 564 565 typedef enum { 566 DATA_TYPE_UNKNOWN = 0, 567 DATA_TYPE_BOOLEAN, 568 DATA_TYPE_BYTE, 569 DATA_TYPE_INT16, 570 DATA_TYPE_UINT16, 571 DATA_TYPE_INT32, 572 DATA_TYPE_UINT32, 573 DATA_TYPE_INT64, 574 DATA_TYPE_UINT64, 575 DATA_TYPE_STRING, 576 DATA_TYPE_BYTE_ARRAY, 577 DATA_TYPE_INT16_ARRAY, 578 DATA_TYPE_UINT16_ARRAY, 579 DATA_TYPE_INT32_ARRAY, 580 DATA_TYPE_UINT32_ARRAY, 581 DATA_TYPE_INT64_ARRAY, 582 DATA_TYPE_UINT64_ARRAY, 583 DATA_TYPE_STRING_ARRAY, 584 DATA_TYPE_HRTIME, 585 DATA_TYPE_NVLIST, 586 DATA_TYPE_NVLIST_ARRAY, 587 DATA_TYPE_BOOLEAN_VALUE, 588 DATA_TYPE_INT8, 589 DATA_TYPE_UINT8, 590 DATA_TYPE_BOOLEAN_ARRAY, 591 DATA_TYPE_INT8_ARRAY, 592 DATA_TYPE_UINT8_ARRAY 593 } data_type_t; 594 595 /* 596 * On-disk version number. 597 */ 598 #define SPA_VERSION_1 1ULL 599 #define SPA_VERSION_2 2ULL 600 #define SPA_VERSION_3 3ULL 601 #define SPA_VERSION_4 4ULL 602 #define SPA_VERSION_5 5ULL 603 #define SPA_VERSION_6 6ULL 604 #define SPA_VERSION_7 7ULL 605 #define SPA_VERSION_8 8ULL 606 #define SPA_VERSION_9 9ULL 607 #define SPA_VERSION_10 10ULL 608 #define SPA_VERSION_11 11ULL 609 #define SPA_VERSION_12 12ULL 610 #define SPA_VERSION_13 13ULL 611 #define SPA_VERSION_14 14ULL 612 #define SPA_VERSION_15 15ULL 613 #define SPA_VERSION_16 16ULL 614 #define SPA_VERSION_17 17ULL 615 #define SPA_VERSION_18 18ULL 616 #define SPA_VERSION_19 19ULL 617 #define SPA_VERSION_20 20ULL 618 #define SPA_VERSION_21 21ULL 619 #define SPA_VERSION_22 22ULL 620 #define SPA_VERSION_23 23ULL 621 #define SPA_VERSION_24 24ULL 622 #define SPA_VERSION_25 25ULL 623 #define SPA_VERSION_26 26ULL 624 #define SPA_VERSION_27 27ULL 625 #define SPA_VERSION_28 28ULL 626 #define SPA_VERSION_5000 5000ULL 627 628 /* 629 * When bumping up SPA_VERSION, make sure GRUB ZFS understands the on-disk 630 * format change. Go to usr/src/grub/grub-0.97/stage2/{zfs-include/, fsys_zfs*}, 631 * and do the appropriate changes. Also bump the version number in 632 * usr/src/grub/capability. 633 */ 634 #define SPA_VERSION SPA_VERSION_5000 635 #define SPA_VERSION_STRING "5000" 636 637 /* 638 * Symbolic names for the changes that caused a SPA_VERSION switch. 639 * Used in the code when checking for presence or absence of a feature. 640 * Feel free to define multiple symbolic names for each version if there 641 * were multiple changes to on-disk structures during that version. 642 * 643 * NOTE: When checking the current SPA_VERSION in your code, be sure 644 * to use spa_version() since it reports the version of the 645 * last synced uberblock. Checking the in-flight version can 646 * be dangerous in some cases. 647 */ 648 #define SPA_VERSION_INITIAL SPA_VERSION_1 649 #define SPA_VERSION_DITTO_BLOCKS SPA_VERSION_2 650 #define SPA_VERSION_SPARES SPA_VERSION_3 651 #define SPA_VERSION_RAID6 SPA_VERSION_3 652 #define SPA_VERSION_BPLIST_ACCOUNT SPA_VERSION_3 653 #define SPA_VERSION_RAIDZ_DEFLATE SPA_VERSION_3 654 #define SPA_VERSION_DNODE_BYTES SPA_VERSION_3 655 #define SPA_VERSION_ZPOOL_HISTORY SPA_VERSION_4 656 #define SPA_VERSION_GZIP_COMPRESSION SPA_VERSION_5 657 #define SPA_VERSION_BOOTFS SPA_VERSION_6 658 #define SPA_VERSION_SLOGS SPA_VERSION_7 659 #define SPA_VERSION_DELEGATED_PERMS SPA_VERSION_8 660 #define SPA_VERSION_FUID SPA_VERSION_9 661 #define SPA_VERSION_REFRESERVATION SPA_VERSION_9 662 #define SPA_VERSION_REFQUOTA SPA_VERSION_9 663 #define SPA_VERSION_UNIQUE_ACCURATE SPA_VERSION_9 664 #define SPA_VERSION_L2CACHE SPA_VERSION_10 665 #define SPA_VERSION_NEXT_CLONES SPA_VERSION_11 666 #define SPA_VERSION_ORIGIN SPA_VERSION_11 667 #define SPA_VERSION_DSL_SCRUB SPA_VERSION_11 668 #define SPA_VERSION_SNAP_PROPS SPA_VERSION_12 669 #define SPA_VERSION_USED_BREAKDOWN SPA_VERSION_13 670 #define SPA_VERSION_PASSTHROUGH_X SPA_VERSION_14 671 #define SPA_VERSION_USERSPACE SPA_VERSION_15 672 #define SPA_VERSION_STMF_PROP SPA_VERSION_16 673 #define SPA_VERSION_RAIDZ3 SPA_VERSION_17 674 #define SPA_VERSION_USERREFS SPA_VERSION_18 675 #define SPA_VERSION_HOLES SPA_VERSION_19 676 #define SPA_VERSION_ZLE_COMPRESSION SPA_VERSION_20 677 #define SPA_VERSION_DEDUP SPA_VERSION_21 678 #define SPA_VERSION_RECVD_PROPS SPA_VERSION_22 679 #define SPA_VERSION_SLIM_ZIL SPA_VERSION_23 680 #define SPA_VERSION_SA SPA_VERSION_24 681 #define SPA_VERSION_SCAN SPA_VERSION_25 682 #define SPA_VERSION_DIR_CLONES SPA_VERSION_26 683 #define SPA_VERSION_DEADLISTS SPA_VERSION_26 684 #define SPA_VERSION_FAST_SNAP SPA_VERSION_27 685 #define SPA_VERSION_MULTI_REPLACE SPA_VERSION_28 686 #define SPA_VERSION_BEFORE_FEATURES SPA_VERSION_28 687 #define SPA_VERSION_FEATURES SPA_VERSION_5000 688 689 #define SPA_VERSION_IS_SUPPORTED(v) \ 690 (((v) >= SPA_VERSION_INITIAL && (v) <= SPA_VERSION_BEFORE_FEATURES) || \ 691 ((v) >= SPA_VERSION_FEATURES && (v) <= SPA_VERSION)) 692 693 /* 694 * The following are configuration names used in the nvlist describing a pool's 695 * configuration. 696 */ 697 #define ZPOOL_CONFIG_VERSION "version" 698 #define ZPOOL_CONFIG_POOL_NAME "name" 699 #define ZPOOL_CONFIG_POOL_STATE "state" 700 #define ZPOOL_CONFIG_POOL_TXG "txg" 701 #define ZPOOL_CONFIG_POOL_GUID "pool_guid" 702 #define ZPOOL_CONFIG_CREATE_TXG "create_txg" 703 #define ZPOOL_CONFIG_TOP_GUID "top_guid" 704 #define ZPOOL_CONFIG_VDEV_TREE "vdev_tree" 705 #define ZPOOL_CONFIG_TYPE "type" 706 #define ZPOOL_CONFIG_CHILDREN "children" 707 #define ZPOOL_CONFIG_ID "id" 708 #define ZPOOL_CONFIG_GUID "guid" 709 #define ZPOOL_CONFIG_PATH "path" 710 #define ZPOOL_CONFIG_DEVID "devid" 711 #define ZPOOL_CONFIG_METASLAB_ARRAY "metaslab_array" 712 #define ZPOOL_CONFIG_METASLAB_SHIFT "metaslab_shift" 713 #define ZPOOL_CONFIG_ASHIFT "ashift" 714 #define ZPOOL_CONFIG_ASIZE "asize" 715 #define ZPOOL_CONFIG_DTL "DTL" 716 #define ZPOOL_CONFIG_STATS "stats" 717 #define ZPOOL_CONFIG_WHOLE_DISK "whole_disk" 718 #define ZPOOL_CONFIG_ERRCOUNT "error_count" 719 #define ZPOOL_CONFIG_NOT_PRESENT "not_present" 720 #define ZPOOL_CONFIG_SPARES "spares" 721 #define ZPOOL_CONFIG_IS_SPARE "is_spare" 722 #define ZPOOL_CONFIG_NPARITY "nparity" 723 #define ZPOOL_CONFIG_HOSTID "hostid" 724 #define ZPOOL_CONFIG_HOSTNAME "hostname" 725 #define ZPOOL_CONFIG_IS_LOG "is_log" 726 #define ZPOOL_CONFIG_TIMESTAMP "timestamp" /* not stored on disk */ 727 #define ZPOOL_CONFIG_FEATURES_FOR_READ "features_for_read" 728 729 /* 730 * The persistent vdev state is stored as separate values rather than a single 731 * 'vdev_state' entry. This is because a device can be in multiple states, such 732 * as offline and degraded. 733 */ 734 #define ZPOOL_CONFIG_OFFLINE "offline" 735 #define ZPOOL_CONFIG_FAULTED "faulted" 736 #define ZPOOL_CONFIG_DEGRADED "degraded" 737 #define ZPOOL_CONFIG_REMOVED "removed" 738 #define ZPOOL_CONFIG_FRU "fru" 739 #define ZPOOL_CONFIG_AUX_STATE "aux_state" 740 741 #define VDEV_TYPE_ROOT "root" 742 #define VDEV_TYPE_MIRROR "mirror" 743 #define VDEV_TYPE_REPLACING "replacing" 744 #define VDEV_TYPE_RAIDZ "raidz" 745 #define VDEV_TYPE_DISK "disk" 746 #define VDEV_TYPE_FILE "file" 747 #define VDEV_TYPE_MISSING "missing" 748 #define VDEV_TYPE_HOLE "hole" 749 #define VDEV_TYPE_SPARE "spare" 750 #define VDEV_TYPE_LOG "log" 751 #define VDEV_TYPE_L2CACHE "l2cache" 752 753 /* 754 * This is needed in userland to report the minimum necessary device size. 755 */ 756 #define SPA_MINDEVSIZE (64ULL << 20) 757 758 /* 759 * The location of the pool configuration repository, shared between kernel and 760 * userland. 761 */ 762 #define ZPOOL_CACHE "/boot/zfs/zpool.cache" 763 764 /* 765 * vdev states are ordered from least to most healthy. 766 * A vdev that's CANT_OPEN or below is considered unusable. 767 */ 768 typedef enum vdev_state { 769 VDEV_STATE_UNKNOWN = 0, /* Uninitialized vdev */ 770 VDEV_STATE_CLOSED, /* Not currently open */ 771 VDEV_STATE_OFFLINE, /* Not allowed to open */ 772 VDEV_STATE_REMOVED, /* Explicitly removed from system */ 773 VDEV_STATE_CANT_OPEN, /* Tried to open, but failed */ 774 VDEV_STATE_FAULTED, /* External request to fault device */ 775 VDEV_STATE_DEGRADED, /* Replicated vdev with unhealthy kids */ 776 VDEV_STATE_HEALTHY /* Presumed good */ 777 } vdev_state_t; 778 779 /* 780 * vdev aux states. When a vdev is in the CANT_OPEN state, the aux field 781 * of the vdev stats structure uses these constants to distinguish why. 782 */ 783 typedef enum vdev_aux { 784 VDEV_AUX_NONE, /* no error */ 785 VDEV_AUX_OPEN_FAILED, /* ldi_open_*() or vn_open() failed */ 786 VDEV_AUX_CORRUPT_DATA, /* bad label or disk contents */ 787 VDEV_AUX_NO_REPLICAS, /* insufficient number of replicas */ 788 VDEV_AUX_BAD_GUID_SUM, /* vdev guid sum doesn't match */ 789 VDEV_AUX_TOO_SMALL, /* vdev size is too small */ 790 VDEV_AUX_BAD_LABEL, /* the label is OK but invalid */ 791 VDEV_AUX_VERSION_NEWER, /* on-disk version is too new */ 792 VDEV_AUX_VERSION_OLDER, /* on-disk version is too old */ 793 VDEV_AUX_SPARED /* hot spare used in another pool */ 794 } vdev_aux_t; 795 796 /* 797 * pool state. The following states are written to disk as part of the normal 798 * SPA lifecycle: ACTIVE, EXPORTED, DESTROYED, SPARE. The remaining states are 799 * software abstractions used at various levels to communicate pool state. 800 */ 801 typedef enum pool_state { 802 POOL_STATE_ACTIVE = 0, /* In active use */ 803 POOL_STATE_EXPORTED, /* Explicitly exported */ 804 POOL_STATE_DESTROYED, /* Explicitly destroyed */ 805 POOL_STATE_SPARE, /* Reserved for hot spare use */ 806 POOL_STATE_UNINITIALIZED, /* Internal spa_t state */ 807 POOL_STATE_UNAVAIL, /* Internal libzfs state */ 808 POOL_STATE_POTENTIALLY_ACTIVE /* Internal libzfs state */ 809 } pool_state_t; 810 811 /* 812 * The uberblock version is incremented whenever an incompatible on-disk 813 * format change is made to the SPA, DMU, or ZAP. 814 * 815 * Note: the first two fields should never be moved. When a storage pool 816 * is opened, the uberblock must be read off the disk before the version 817 * can be checked. If the ub_version field is moved, we may not detect 818 * version mismatch. If the ub_magic field is moved, applications that 819 * expect the magic number in the first word won't work. 820 */ 821 #define UBERBLOCK_MAGIC 0x00bab10c /* oo-ba-bloc! */ 822 #define UBERBLOCK_SHIFT 10 /* up to 1K */ 823 824 struct uberblock { 825 uint64_t ub_magic; /* UBERBLOCK_MAGIC */ 826 uint64_t ub_version; /* SPA_VERSION */ 827 uint64_t ub_txg; /* txg of last sync */ 828 uint64_t ub_guid_sum; /* sum of all vdev guids */ 829 uint64_t ub_timestamp; /* UTC time of last sync */ 830 blkptr_t ub_rootbp; /* MOS objset_phys_t */ 831 }; 832 833 /* 834 * Flags. 835 */ 836 #define DNODE_MUST_BE_ALLOCATED 1 837 #define DNODE_MUST_BE_FREE 2 838 839 /* 840 * Fixed constants. 841 */ 842 #define DNODE_SHIFT 9 /* 512 bytes */ 843 #define DN_MIN_INDBLKSHIFT 10 /* 1k */ 844 #define DN_MAX_INDBLKSHIFT 14 /* 16k */ 845 #define DNODE_BLOCK_SHIFT 14 /* 16k */ 846 #define DNODE_CORE_SIZE 64 /* 64 bytes for dnode sans blkptrs */ 847 #define DN_MAX_OBJECT_SHIFT 48 /* 256 trillion (zfs_fid_t limit) */ 848 #define DN_MAX_OFFSET_SHIFT 64 /* 2^64 bytes in a dnode */ 849 850 /* 851 * Derived constants. 852 */ 853 #define DNODE_SIZE (1 << DNODE_SHIFT) 854 #define DN_MAX_NBLKPTR ((DNODE_SIZE - DNODE_CORE_SIZE) >> SPA_BLKPTRSHIFT) 855 #define DN_MAX_BONUSLEN (DNODE_SIZE - DNODE_CORE_SIZE - (1 << SPA_BLKPTRSHIFT)) 856 #define DN_MAX_OBJECT (1ULL << DN_MAX_OBJECT_SHIFT) 857 858 #define DNODES_PER_BLOCK_SHIFT (DNODE_BLOCK_SHIFT - DNODE_SHIFT) 859 #define DNODES_PER_BLOCK (1ULL << DNODES_PER_BLOCK_SHIFT) 860 #define DNODES_PER_LEVEL_SHIFT (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT) 861 862 /* The +2 here is a cheesy way to round up */ 863 #define DN_MAX_LEVELS (2 + ((DN_MAX_OFFSET_SHIFT - SPA_MINBLOCKSHIFT) / \ 864 (DN_MIN_INDBLKSHIFT - SPA_BLKPTRSHIFT))) 865 866 #define DN_BONUS(dnp) ((void*)((dnp)->dn_bonus + \ 867 (((dnp)->dn_nblkptr - 1) * sizeof (blkptr_t)))) 868 869 #define DN_USED_BYTES(dnp) (((dnp)->dn_flags & DNODE_FLAG_USED_BYTES) ? \ 870 (dnp)->dn_used : (dnp)->dn_used << SPA_MINBLOCKSHIFT) 871 872 #define EPB(blkshift, typeshift) (1 << (blkshift - typeshift)) 873 874 /* Is dn_used in bytes? if not, it's in multiples of SPA_MINBLOCKSIZE */ 875 #define DNODE_FLAG_USED_BYTES (1<<0) 876 #define DNODE_FLAG_USERUSED_ACCOUNTED (1<<1) 877 878 /* Does dnode have a SA spill blkptr in bonus? */ 879 #define DNODE_FLAG_SPILL_BLKPTR (1<<2) 880 881 typedef struct dnode_phys { 882 uint8_t dn_type; /* dmu_object_type_t */ 883 uint8_t dn_indblkshift; /* ln2(indirect block size) */ 884 uint8_t dn_nlevels; /* 1=dn_blkptr->data blocks */ 885 uint8_t dn_nblkptr; /* length of dn_blkptr */ 886 uint8_t dn_bonustype; /* type of data in bonus buffer */ 887 uint8_t dn_checksum; /* ZIO_CHECKSUM type */ 888 uint8_t dn_compress; /* ZIO_COMPRESS type */ 889 uint8_t dn_flags; /* DNODE_FLAG_* */ 890 uint16_t dn_datablkszsec; /* data block size in 512b sectors */ 891 uint16_t dn_bonuslen; /* length of dn_bonus */ 892 uint8_t dn_pad2[4]; 893 894 /* accounting is protected by dn_dirty_mtx */ 895 uint64_t dn_maxblkid; /* largest allocated block ID */ 896 uint64_t dn_used; /* bytes (or sectors) of disk space */ 897 898 uint64_t dn_pad3[4]; 899 900 blkptr_t dn_blkptr[1]; 901 uint8_t dn_bonus[DN_MAX_BONUSLEN - sizeof (blkptr_t)]; 902 blkptr_t dn_spill; 903 } dnode_phys_t; 904 905 typedef enum dmu_object_type { 906 DMU_OT_NONE, 907 /* general: */ 908 DMU_OT_OBJECT_DIRECTORY, /* ZAP */ 909 DMU_OT_OBJECT_ARRAY, /* UINT64 */ 910 DMU_OT_PACKED_NVLIST, /* UINT8 (XDR by nvlist_pack/unpack) */ 911 DMU_OT_PACKED_NVLIST_SIZE, /* UINT64 */ 912 DMU_OT_BPLIST, /* UINT64 */ 913 DMU_OT_BPLIST_HDR, /* UINT64 */ 914 /* spa: */ 915 DMU_OT_SPACE_MAP_HEADER, /* UINT64 */ 916 DMU_OT_SPACE_MAP, /* UINT64 */ 917 /* zil: */ 918 DMU_OT_INTENT_LOG, /* UINT64 */ 919 /* dmu: */ 920 DMU_OT_DNODE, /* DNODE */ 921 DMU_OT_OBJSET, /* OBJSET */ 922 /* dsl: */ 923 DMU_OT_DSL_DIR, /* UINT64 */ 924 DMU_OT_DSL_DIR_CHILD_MAP, /* ZAP */ 925 DMU_OT_DSL_DS_SNAP_MAP, /* ZAP */ 926 DMU_OT_DSL_PROPS, /* ZAP */ 927 DMU_OT_DSL_DATASET, /* UINT64 */ 928 /* zpl: */ 929 DMU_OT_ZNODE, /* ZNODE */ 930 DMU_OT_OLDACL, /* Old ACL */ 931 DMU_OT_PLAIN_FILE_CONTENTS, /* UINT8 */ 932 DMU_OT_DIRECTORY_CONTENTS, /* ZAP */ 933 DMU_OT_MASTER_NODE, /* ZAP */ 934 DMU_OT_UNLINKED_SET, /* ZAP */ 935 /* zvol: */ 936 DMU_OT_ZVOL, /* UINT8 */ 937 DMU_OT_ZVOL_PROP, /* ZAP */ 938 /* other; for testing only! */ 939 DMU_OT_PLAIN_OTHER, /* UINT8 */ 940 DMU_OT_UINT64_OTHER, /* UINT64 */ 941 DMU_OT_ZAP_OTHER, /* ZAP */ 942 /* new object types: */ 943 DMU_OT_ERROR_LOG, /* ZAP */ 944 DMU_OT_SPA_HISTORY, /* UINT8 */ 945 DMU_OT_SPA_HISTORY_OFFSETS, /* spa_his_phys_t */ 946 DMU_OT_POOL_PROPS, /* ZAP */ 947 DMU_OT_DSL_PERMS, /* ZAP */ 948 DMU_OT_ACL, /* ACL */ 949 DMU_OT_SYSACL, /* SYSACL */ 950 DMU_OT_FUID, /* FUID table (Packed NVLIST UINT8) */ 951 DMU_OT_FUID_SIZE, /* FUID table size UINT64 */ 952 DMU_OT_NEXT_CLONES, /* ZAP */ 953 DMU_OT_SCAN_QUEUE, /* ZAP */ 954 DMU_OT_USERGROUP_USED, /* ZAP */ 955 DMU_OT_USERGROUP_QUOTA, /* ZAP */ 956 DMU_OT_USERREFS, /* ZAP */ 957 DMU_OT_DDT_ZAP, /* ZAP */ 958 DMU_OT_DDT_STATS, /* ZAP */ 959 DMU_OT_SA, /* System attr */ 960 DMU_OT_SA_MASTER_NODE, /* ZAP */ 961 DMU_OT_SA_ATTR_REGISTRATION, /* ZAP */ 962 DMU_OT_SA_ATTR_LAYOUTS, /* ZAP */ 963 DMU_OT_SCAN_XLATE, /* ZAP */ 964 DMU_OT_DEDUP, /* fake dedup BP from ddt_bp_create() */ 965 DMU_OT_NUMTYPES 966 } dmu_object_type_t; 967 968 typedef enum dmu_objset_type { 969 DMU_OST_NONE, 970 DMU_OST_META, 971 DMU_OST_ZFS, 972 DMU_OST_ZVOL, 973 DMU_OST_OTHER, /* For testing only! */ 974 DMU_OST_ANY, /* Be careful! */ 975 DMU_OST_NUMTYPES 976 } dmu_objset_type_t; 977 978 /* 979 * header for all bonus and spill buffers. 980 * The header has a fixed portion with a variable number 981 * of "lengths" depending on the number of variable sized 982 * attribues which are determined by the "layout number" 983 */ 984 985 #define SA_MAGIC 0x2F505A /* ZFS SA */ 986 typedef struct sa_hdr_phys { 987 uint32_t sa_magic; 988 uint16_t sa_layout_info; /* Encoded with hdrsize and layout number */ 989 uint16_t sa_lengths[1]; /* optional sizes for variable length attrs */ 990 /* ... Data follows the lengths. */ 991 } sa_hdr_phys_t; 992 993 /* 994 * sa_hdr_phys -> sa_layout_info 995 * 996 * 16 10 0 997 * +--------+-------+ 998 * | hdrsz |layout | 999 * +--------+-------+ 1000 * 1001 * Bits 0-10 are the layout number 1002 * Bits 11-16 are the size of the header. 1003 * The hdrsize is the number * 8 1004 * 1005 * For example. 1006 * hdrsz of 1 ==> 8 byte header 1007 * 2 ==> 16 byte header 1008 * 1009 */ 1010 1011 #define SA_HDR_LAYOUT_NUM(hdr) BF32_GET(hdr->sa_layout_info, 0, 10) 1012 #define SA_HDR_SIZE(hdr) BF32_GET_SB(hdr->sa_layout_info, 10, 16, 3, 0) 1013 #define SA_HDR_LAYOUT_INFO_ENCODE(x, num, size) \ 1014 { \ 1015 BF32_SET_SB(x, 10, 6, 3, 0, size); \ 1016 BF32_SET(x, 0, 10, num); \ 1017 } 1018 1019 #define SA_MODE_OFFSET 0 1020 #define SA_SIZE_OFFSET 8 1021 #define SA_GEN_OFFSET 16 1022 #define SA_UID_OFFSET 24 1023 #define SA_GID_OFFSET 32 1024 #define SA_PARENT_OFFSET 40 1025 1026 /* 1027 * Intent log header - this on disk structure holds fields to manage 1028 * the log. All fields are 64 bit to easily handle cross architectures. 1029 */ 1030 typedef struct zil_header { 1031 uint64_t zh_claim_txg; /* txg in which log blocks were claimed */ 1032 uint64_t zh_replay_seq; /* highest replayed sequence number */ 1033 blkptr_t zh_log; /* log chain */ 1034 uint64_t zh_claim_seq; /* highest claimed sequence number */ 1035 uint64_t zh_pad[5]; 1036 } zil_header_t; 1037 1038 #define OBJSET_PHYS_SIZE 2048 1039 1040 typedef struct objset_phys { 1041 dnode_phys_t os_meta_dnode; 1042 zil_header_t os_zil_header; 1043 uint64_t os_type; 1044 uint64_t os_flags; 1045 char os_pad[OBJSET_PHYS_SIZE - sizeof (dnode_phys_t)*3 - 1046 sizeof (zil_header_t) - sizeof (uint64_t)*2]; 1047 dnode_phys_t os_userused_dnode; 1048 dnode_phys_t os_groupused_dnode; 1049 } objset_phys_t; 1050 1051 typedef struct dsl_dir_phys { 1052 uint64_t dd_creation_time; /* not actually used */ 1053 uint64_t dd_head_dataset_obj; 1054 uint64_t dd_parent_obj; 1055 uint64_t dd_clone_parent_obj; 1056 uint64_t dd_child_dir_zapobj; 1057 /* 1058 * how much space our children are accounting for; for leaf 1059 * datasets, == physical space used by fs + snaps 1060 */ 1061 uint64_t dd_used_bytes; 1062 uint64_t dd_compressed_bytes; 1063 uint64_t dd_uncompressed_bytes; 1064 /* Administrative quota setting */ 1065 uint64_t dd_quota; 1066 /* Administrative reservation setting */ 1067 uint64_t dd_reserved; 1068 uint64_t dd_props_zapobj; 1069 uint64_t dd_pad[21]; /* pad out to 256 bytes for good measure */ 1070 } dsl_dir_phys_t; 1071 1072 typedef struct dsl_dataset_phys { 1073 uint64_t ds_dir_obj; 1074 uint64_t ds_prev_snap_obj; 1075 uint64_t ds_prev_snap_txg; 1076 uint64_t ds_next_snap_obj; 1077 uint64_t ds_snapnames_zapobj; /* zap obj of snaps; ==0 for snaps */ 1078 uint64_t ds_num_children; /* clone/snap children; ==0 for head */ 1079 uint64_t ds_creation_time; /* seconds since 1970 */ 1080 uint64_t ds_creation_txg; 1081 uint64_t ds_deadlist_obj; 1082 uint64_t ds_used_bytes; 1083 uint64_t ds_compressed_bytes; 1084 uint64_t ds_uncompressed_bytes; 1085 uint64_t ds_unique_bytes; /* only relevant to snapshots */ 1086 /* 1087 * The ds_fsid_guid is a 56-bit ID that can change to avoid 1088 * collisions. The ds_guid is a 64-bit ID that will never 1089 * change, so there is a small probability that it will collide. 1090 */ 1091 uint64_t ds_fsid_guid; 1092 uint64_t ds_guid; 1093 uint64_t ds_flags; 1094 blkptr_t ds_bp; 1095 uint64_t ds_pad[8]; /* pad out to 320 bytes for good measure */ 1096 } dsl_dataset_phys_t; 1097 1098 /* 1099 * The names of zap entries in the DIRECTORY_OBJECT of the MOS. 1100 */ 1101 #define DMU_POOL_DIRECTORY_OBJECT 1 1102 #define DMU_POOL_CONFIG "config" 1103 #define DMU_POOL_ROOT_DATASET "root_dataset" 1104 #define DMU_POOL_SYNC_BPLIST "sync_bplist" 1105 #define DMU_POOL_ERRLOG_SCRUB "errlog_scrub" 1106 #define DMU_POOL_ERRLOG_LAST "errlog_last" 1107 #define DMU_POOL_SPARES "spares" 1108 #define DMU_POOL_DEFLATE "deflate" 1109 #define DMU_POOL_HISTORY "history" 1110 #define DMU_POOL_PROPS "pool_props" 1111 1112 #define ZAP_MAGIC 0x2F52AB2ABULL 1113 1114 #define FZAP_BLOCK_SHIFT(zap) ((zap)->zap_block_shift) 1115 1116 #define ZAP_MAXCD (uint32_t)(-1) 1117 #define ZAP_HASHBITS 28 1118 #define MZAP_ENT_LEN 64 1119 #define MZAP_NAME_LEN (MZAP_ENT_LEN - 8 - 4 - 2) 1120 #define MZAP_MAX_BLKSHIFT SPA_MAXBLOCKSHIFT 1121 #define MZAP_MAX_BLKSZ (1 << MZAP_MAX_BLKSHIFT) 1122 1123 typedef struct mzap_ent_phys { 1124 uint64_t mze_value; 1125 uint32_t mze_cd; 1126 uint16_t mze_pad; /* in case we want to chain them someday */ 1127 char mze_name[MZAP_NAME_LEN]; 1128 } mzap_ent_phys_t; 1129 1130 typedef struct mzap_phys { 1131 uint64_t mz_block_type; /* ZBT_MICRO */ 1132 uint64_t mz_salt; 1133 uint64_t mz_pad[6]; 1134 mzap_ent_phys_t mz_chunk[1]; 1135 /* actually variable size depending on block size */ 1136 } mzap_phys_t; 1137 1138 /* 1139 * The (fat) zap is stored in one object. It is an array of 1140 * 1<<FZAP_BLOCK_SHIFT byte blocks. The layout looks like one of: 1141 * 1142 * ptrtbl fits in first block: 1143 * [zap_phys_t zap_ptrtbl_shift < 6] [zap_leaf_t] ... 1144 * 1145 * ptrtbl too big for first block: 1146 * [zap_phys_t zap_ptrtbl_shift >= 6] [zap_leaf_t] [ptrtbl] ... 1147 * 1148 */ 1149 1150 #define ZBT_LEAF ((1ULL << 63) + 0) 1151 #define ZBT_HEADER ((1ULL << 63) + 1) 1152 #define ZBT_MICRO ((1ULL << 63) + 3) 1153 /* any other values are ptrtbl blocks */ 1154 1155 /* 1156 * the embedded pointer table takes up half a block: 1157 * block size / entry size (2^3) / 2 1158 */ 1159 #define ZAP_EMBEDDED_PTRTBL_SHIFT(zap) (FZAP_BLOCK_SHIFT(zap) - 3 - 1) 1160 1161 /* 1162 * The embedded pointer table starts half-way through the block. Since 1163 * the pointer table itself is half the block, it starts at (64-bit) 1164 * word number (1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap)). 1165 */ 1166 #define ZAP_EMBEDDED_PTRTBL_ENT(zap, idx) \ 1167 ((uint64_t *)(zap)->zap_phys) \ 1168 [(idx) + (1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap))] 1169 1170 /* 1171 * TAKE NOTE: 1172 * If zap_phys_t is modified, zap_byteswap() must be modified. 1173 */ 1174 typedef struct zap_phys { 1175 uint64_t zap_block_type; /* ZBT_HEADER */ 1176 uint64_t zap_magic; /* ZAP_MAGIC */ 1177 1178 struct zap_table_phys { 1179 uint64_t zt_blk; /* starting block number */ 1180 uint64_t zt_numblks; /* number of blocks */ 1181 uint64_t zt_shift; /* bits to index it */ 1182 uint64_t zt_nextblk; /* next (larger) copy start block */ 1183 uint64_t zt_blks_copied; /* number source blocks copied */ 1184 } zap_ptrtbl; 1185 1186 uint64_t zap_freeblk; /* the next free block */ 1187 uint64_t zap_num_leafs; /* number of leafs */ 1188 uint64_t zap_num_entries; /* number of entries */ 1189 uint64_t zap_salt; /* salt to stir into hash function */ 1190 /* 1191 * This structure is followed by padding, and then the embedded 1192 * pointer table. The embedded pointer table takes up second 1193 * half of the block. It is accessed using the 1194 * ZAP_EMBEDDED_PTRTBL_ENT() macro. 1195 */ 1196 } zap_phys_t; 1197 1198 typedef struct zap_table_phys zap_table_phys_t; 1199 1200 typedef struct fat_zap { 1201 int zap_block_shift; /* block size shift */ 1202 zap_phys_t *zap_phys; 1203 } fat_zap_t; 1204 1205 #define ZAP_LEAF_MAGIC 0x2AB1EAF 1206 1207 /* chunk size = 24 bytes */ 1208 #define ZAP_LEAF_CHUNKSIZE 24 1209 1210 /* 1211 * The amount of space available for chunks is: 1212 * block size (1<<l->l_bs) - hash entry size (2) * number of hash 1213 * entries - header space (2*chunksize) 1214 */ 1215 #define ZAP_LEAF_NUMCHUNKS(l) \ 1216 (((1<<(l)->l_bs) - 2*ZAP_LEAF_HASH_NUMENTRIES(l)) / \ 1217 ZAP_LEAF_CHUNKSIZE - 2) 1218 1219 /* 1220 * The amount of space within the chunk available for the array is: 1221 * chunk size - space for type (1) - space for next pointer (2) 1222 */ 1223 #define ZAP_LEAF_ARRAY_BYTES (ZAP_LEAF_CHUNKSIZE - 3) 1224 1225 #define ZAP_LEAF_ARRAY_NCHUNKS(bytes) \ 1226 (((bytes)+ZAP_LEAF_ARRAY_BYTES-1)/ZAP_LEAF_ARRAY_BYTES) 1227 1228 /* 1229 * Low water mark: when there are only this many chunks free, start 1230 * growing the ptrtbl. Ideally, this should be larger than a 1231 * "reasonably-sized" entry. 20 chunks is more than enough for the 1232 * largest directory entry (MAXNAMELEN (256) byte name, 8-byte value), 1233 * while still being only around 3% for 16k blocks. 1234 */ 1235 #define ZAP_LEAF_LOW_WATER (20) 1236 1237 /* 1238 * The leaf hash table has block size / 2^5 (32) number of entries, 1239 * which should be more than enough for the maximum number of entries, 1240 * which is less than block size / CHUNKSIZE (24) / minimum number of 1241 * chunks per entry (3). 1242 */ 1243 #define ZAP_LEAF_HASH_SHIFT(l) ((l)->l_bs - 5) 1244 #define ZAP_LEAF_HASH_NUMENTRIES(l) (1 << ZAP_LEAF_HASH_SHIFT(l)) 1245 1246 /* 1247 * The chunks start immediately after the hash table. The end of the 1248 * hash table is at l_hash + HASH_NUMENTRIES, which we simply cast to a 1249 * chunk_t. 1250 */ 1251 #define ZAP_LEAF_CHUNK(l, idx) \ 1252 ((zap_leaf_chunk_t *) \ 1253 ((l)->l_phys->l_hash + ZAP_LEAF_HASH_NUMENTRIES(l)))[idx] 1254 #define ZAP_LEAF_ENTRY(l, idx) (&ZAP_LEAF_CHUNK(l, idx).l_entry) 1255 1256 typedef enum zap_chunk_type { 1257 ZAP_CHUNK_FREE = 253, 1258 ZAP_CHUNK_ENTRY = 252, 1259 ZAP_CHUNK_ARRAY = 251, 1260 ZAP_CHUNK_TYPE_MAX = 250 1261 } zap_chunk_type_t; 1262 1263 /* 1264 * TAKE NOTE: 1265 * If zap_leaf_phys_t is modified, zap_leaf_byteswap() must be modified. 1266 */ 1267 typedef struct zap_leaf_phys { 1268 struct zap_leaf_header { 1269 uint64_t lh_block_type; /* ZBT_LEAF */ 1270 uint64_t lh_pad1; 1271 uint64_t lh_prefix; /* hash prefix of this leaf */ 1272 uint32_t lh_magic; /* ZAP_LEAF_MAGIC */ 1273 uint16_t lh_nfree; /* number free chunks */ 1274 uint16_t lh_nentries; /* number of entries */ 1275 uint16_t lh_prefix_len; /* num bits used to id this */ 1276 1277 /* above is accessable to zap, below is zap_leaf private */ 1278 1279 uint16_t lh_freelist; /* chunk head of free list */ 1280 uint8_t lh_pad2[12]; 1281 } l_hdr; /* 2 24-byte chunks */ 1282 1283 /* 1284 * The header is followed by a hash table with 1285 * ZAP_LEAF_HASH_NUMENTRIES(zap) entries. The hash table is 1286 * followed by an array of ZAP_LEAF_NUMCHUNKS(zap) 1287 * zap_leaf_chunk structures. These structures are accessed 1288 * with the ZAP_LEAF_CHUNK() macro. 1289 */ 1290 1291 uint16_t l_hash[1]; 1292 } zap_leaf_phys_t; 1293 1294 typedef union zap_leaf_chunk { 1295 struct zap_leaf_entry { 1296 uint8_t le_type; /* always ZAP_CHUNK_ENTRY */ 1297 uint8_t le_value_intlen; /* size of ints */ 1298 uint16_t le_next; /* next entry in hash chain */ 1299 uint16_t le_name_chunk; /* first chunk of the name */ 1300 uint16_t le_name_numints; /* bytes in name, incl null */ 1301 uint16_t le_value_chunk; /* first chunk of the value */ 1302 uint16_t le_value_numints; /* value length in ints */ 1303 uint32_t le_cd; /* collision differentiator */ 1304 uint64_t le_hash; /* hash value of the name */ 1305 } l_entry; 1306 struct zap_leaf_array { 1307 uint8_t la_type; /* always ZAP_CHUNK_ARRAY */ 1308 uint8_t la_array[ZAP_LEAF_ARRAY_BYTES]; 1309 uint16_t la_next; /* next blk or CHAIN_END */ 1310 } l_array; 1311 struct zap_leaf_free { 1312 uint8_t lf_type; /* always ZAP_CHUNK_FREE */ 1313 uint8_t lf_pad[ZAP_LEAF_ARRAY_BYTES]; 1314 uint16_t lf_next; /* next in free list, or CHAIN_END */ 1315 } l_free; 1316 } zap_leaf_chunk_t; 1317 1318 typedef struct zap_leaf { 1319 int l_bs; /* block size shift */ 1320 zap_leaf_phys_t *l_phys; 1321 } zap_leaf_t; 1322 1323 /* 1324 * Define special zfs pflags 1325 */ 1326 #define ZFS_XATTR 0x1 /* is an extended attribute */ 1327 #define ZFS_INHERIT_ACE 0x2 /* ace has inheritable ACEs */ 1328 #define ZFS_ACL_TRIVIAL 0x4 /* files ACL is trivial */ 1329 1330 #define MASTER_NODE_OBJ 1 1331 1332 /* 1333 * special attributes for master node. 1334 */ 1335 1336 #define ZFS_FSID "FSID" 1337 #define ZFS_UNLINKED_SET "DELETE_QUEUE" 1338 #define ZFS_ROOT_OBJ "ROOT" 1339 #define ZPL_VERSION_OBJ "VERSION" 1340 #define ZFS_PROP_BLOCKPERPAGE "BLOCKPERPAGE" 1341 #define ZFS_PROP_NOGROWBLOCKS "NOGROWBLOCKS" 1342 1343 #define ZFS_FLAG_BLOCKPERPAGE 0x1 1344 #define ZFS_FLAG_NOGROWBLOCKS 0x2 1345 1346 /* 1347 * ZPL version - rev'd whenever an incompatible on-disk format change 1348 * occurs. Independent of SPA/DMU/ZAP versioning. 1349 */ 1350 1351 #define ZPL_VERSION 1ULL 1352 1353 /* 1354 * The directory entry has the type (currently unused on Solaris) in the 1355 * top 4 bits, and the object number in the low 48 bits. The "middle" 1356 * 12 bits are unused. 1357 */ 1358 #define ZFS_DIRENT_TYPE(de) BF64_GET(de, 60, 4) 1359 #define ZFS_DIRENT_OBJ(de) BF64_GET(de, 0, 48) 1360 #define ZFS_DIRENT_MAKE(type, obj) (((uint64_t)type << 60) | obj) 1361 1362 typedef struct ace { 1363 uid_t a_who; /* uid or gid */ 1364 uint32_t a_access_mask; /* read,write,... */ 1365 uint16_t a_flags; /* see below */ 1366 uint16_t a_type; /* allow or deny */ 1367 } ace_t; 1368 1369 #define ACE_SLOT_CNT 6 1370 1371 typedef struct zfs_znode_acl { 1372 uint64_t z_acl_extern_obj; /* ext acl pieces */ 1373 uint32_t z_acl_count; /* Number of ACEs */ 1374 uint16_t z_acl_version; /* acl version */ 1375 uint16_t z_acl_pad; /* pad */ 1376 ace_t z_ace_data[ACE_SLOT_CNT]; /* 6 standard ACEs */ 1377 } zfs_znode_acl_t; 1378 1379 /* 1380 * This is the persistent portion of the znode. It is stored 1381 * in the "bonus buffer" of the file. Short symbolic links 1382 * are also stored in the bonus buffer. 1383 */ 1384 typedef struct znode_phys { 1385 uint64_t zp_atime[2]; /* 0 - last file access time */ 1386 uint64_t zp_mtime[2]; /* 16 - last file modification time */ 1387 uint64_t zp_ctime[2]; /* 32 - last file change time */ 1388 uint64_t zp_crtime[2]; /* 48 - creation time */ 1389 uint64_t zp_gen; /* 64 - generation (txg of creation) */ 1390 uint64_t zp_mode; /* 72 - file mode bits */ 1391 uint64_t zp_size; /* 80 - size of file */ 1392 uint64_t zp_parent; /* 88 - directory parent (`..') */ 1393 uint64_t zp_links; /* 96 - number of links to file */ 1394 uint64_t zp_xattr; /* 104 - DMU object for xattrs */ 1395 uint64_t zp_rdev; /* 112 - dev_t for VBLK & VCHR files */ 1396 uint64_t zp_flags; /* 120 - persistent flags */ 1397 uint64_t zp_uid; /* 128 - file owner */ 1398 uint64_t zp_gid; /* 136 - owning group */ 1399 uint64_t zp_pad[4]; /* 144 - future */ 1400 zfs_znode_acl_t zp_acl; /* 176 - 263 ACL */ 1401 /* 1402 * Data may pad out any remaining bytes in the znode buffer, eg: 1403 * 1404 * |<---------------------- dnode_phys (512) ------------------------>| 1405 * |<-- dnode (192) --->|<----------- "bonus" buffer (320) ---------->| 1406 * |<---- znode (264) ---->|<---- data (56) ---->| 1407 * 1408 * At present, we only use this space to store symbolic links. 1409 */ 1410 } znode_phys_t; 1411 1412 /* 1413 * In-core vdev representation. 1414 */ 1415 struct vdev; 1416 typedef int vdev_phys_read_t(struct vdev *vdev, void *priv, 1417 off_t offset, void *buf, size_t bytes); 1418 typedef int vdev_read_t(struct vdev *vdev, const blkptr_t *bp, 1419 void *buf, off_t offset, size_t bytes); 1420 1421 typedef STAILQ_HEAD(vdev_list, vdev) vdev_list_t; 1422 1423 typedef struct vdev { 1424 STAILQ_ENTRY(vdev) v_childlink; /* link in parent's child list */ 1425 STAILQ_ENTRY(vdev) v_alllink; /* link in global vdev list */ 1426 vdev_list_t v_children; /* children of this vdev */ 1427 const char *v_name; /* vdev name */ 1428 uint64_t v_guid; /* vdev guid */ 1429 int v_id; /* index in parent */ 1430 int v_ashift; /* offset to block shift */ 1431 int v_nparity; /* # parity for raidz */ 1432 struct vdev *v_top; /* parent vdev */ 1433 int v_nchildren; /* # children */ 1434 vdev_state_t v_state; /* current state */ 1435 vdev_phys_read_t *v_phys_read; /* read from raw leaf vdev */ 1436 vdev_read_t *v_read; /* read from vdev */ 1437 void *v_read_priv; /* private data for read function */ 1438 } vdev_t; 1439 1440 /* 1441 * In-core pool representation. 1442 */ 1443 typedef STAILQ_HEAD(spa_list, spa) spa_list_t; 1444 1445 typedef struct spa { 1446 STAILQ_ENTRY(spa) spa_link; /* link in global pool list */ 1447 char *spa_name; /* pool name */ 1448 uint64_t spa_guid; /* pool guid */ 1449 uint64_t spa_txg; /* most recent transaction */ 1450 struct uberblock spa_uberblock; /* best uberblock so far */ 1451 vdev_list_t spa_vdevs; /* list of all toplevel vdevs */ 1452 objset_phys_t spa_mos; /* MOS for this pool */ 1453 int spa_inited; /* initialized */ 1454 } spa_t; 1455 1456 static void decode_embedded_bp_compressed(const blkptr_t *, void *); 1457