1 /*- 2 * Copyright (c) 2002 McAfee, Inc. 3 * All rights reserved. 4 * 5 * This software was developed for the FreeBSD Project by Marshall 6 * Kirk McKusick and McAfee Research,, the Security Research Division of 7 * McAfee, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as 8 * part of the DARPA CHATS research program 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 /* 32 * CDDL HEADER START 33 * 34 * The contents of this file are subject to the terms of the 35 * Common Development and Distribution License (the "License"). 36 * You may not use this file except in compliance with the License. 37 * 38 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 39 * or http://www.opensolaris.org/os/licensing. 40 * See the License for the specific language governing permissions 41 * and limitations under the License. 42 * 43 * When distributing Covered Code, include this CDDL HEADER in each 44 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 45 * If applicable, add the following below this CDDL HEADER, with the 46 * fields enclosed by brackets "[]" replaced with your own identifying 47 * information: Portions Copyright [yyyy] [name of copyright owner] 48 * 49 * CDDL HEADER END 50 */ 51 /* 52 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 53 * Use is subject to license terms. 54 */ 55 56 /* CRC64 table */ 57 #define ZFS_CRC64_POLY 0xC96C5795D7870F42ULL /* ECMA-182, reflected form */ 58 59 /* 60 * Macros for various sorts of alignment and rounding when the alignment 61 * is known to be a power of 2. 62 */ 63 #define P2ALIGN(x, align) ((x) & -(align)) 64 #define P2PHASE(x, align) ((x) & ((align) - 1)) 65 #define P2NPHASE(x, align) (-(x) & ((align) - 1)) 66 #define P2ROUNDUP(x, align) (-(-(x) & -(align))) 67 #define P2END(x, align) (-(~(x) & -(align))) 68 #define P2PHASEUP(x, align, phase) ((phase) - (((phase) - (x)) & -(align))) 69 #define P2BOUNDARY(off, len, align) (((off) ^ ((off) + (len) - 1)) > (align) - 1) 70 71 /* 72 * General-purpose 32-bit and 64-bit bitfield encodings. 73 */ 74 #define BF32_DECODE(x, low, len) P2PHASE((x) >> (low), 1U << (len)) 75 #define BF64_DECODE(x, low, len) P2PHASE((x) >> (low), 1ULL << (len)) 76 #define BF32_ENCODE(x, low, len) (P2PHASE((x), 1U << (len)) << (low)) 77 #define BF64_ENCODE(x, low, len) (P2PHASE((x), 1ULL << (len)) << (low)) 78 79 #define BF32_GET(x, low, len) BF32_DECODE(x, low, len) 80 #define BF64_GET(x, low, len) BF64_DECODE(x, low, len) 81 82 #define BF32_SET(x, low, len, val) \ 83 ((x) ^= BF32_ENCODE((x >> low) ^ (val), low, len)) 84 #define BF64_SET(x, low, len, val) \ 85 ((x) ^= BF64_ENCODE((x >> low) ^ (val), low, len)) 86 87 #define BF32_GET_SB(x, low, len, shift, bias) \ 88 ((BF32_GET(x, low, len) + (bias)) << (shift)) 89 #define BF64_GET_SB(x, low, len, shift, bias) \ 90 ((BF64_GET(x, low, len) + (bias)) << (shift)) 91 92 #define BF32_SET_SB(x, low, len, shift, bias, val) \ 93 BF32_SET(x, low, len, ((val) >> (shift)) - (bias)) 94 #define BF64_SET_SB(x, low, len, shift, bias, val) \ 95 BF64_SET(x, low, len, ((val) >> (shift)) - (bias)) 96 97 /* 98 * We currently support nine block sizes, from 512 bytes to 128K. 99 * We could go higher, but the benefits are near-zero and the cost 100 * of COWing a giant block to modify one byte would become excessive. 101 */ 102 #define SPA_MINBLOCKSHIFT 9 103 #define SPA_MAXBLOCKSHIFT 17 104 #define SPA_MINBLOCKSIZE (1ULL << SPA_MINBLOCKSHIFT) 105 #define SPA_MAXBLOCKSIZE (1ULL << SPA_MAXBLOCKSHIFT) 106 107 #define SPA_BLOCKSIZES (SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1) 108 109 /* 110 * The DVA size encodings for LSIZE and PSIZE support blocks up to 32MB. 111 * The ASIZE encoding should be at least 64 times larger (6 more bits) 112 * to support up to 4-way RAID-Z mirror mode with worst-case gang block 113 * overhead, three DVAs per bp, plus one more bit in case we do anything 114 * else that expands the ASIZE. 115 */ 116 #define SPA_LSIZEBITS 16 /* LSIZE up to 32M (2^16 * 512) */ 117 #define SPA_PSIZEBITS 16 /* PSIZE up to 32M (2^16 * 512) */ 118 #define SPA_ASIZEBITS 24 /* ASIZE up to 64 times larger */ 119 120 /* 121 * All SPA data is represented by 128-bit data virtual addresses (DVAs). 122 * The members of the dva_t should be considered opaque outside the SPA. 123 */ 124 typedef struct dva { 125 uint64_t dva_word[2]; 126 } dva_t; 127 128 /* 129 * Each block has a 256-bit checksum -- strong enough for cryptographic hashes. 130 */ 131 typedef struct zio_cksum { 132 uint64_t zc_word[4]; 133 } zio_cksum_t; 134 135 /* 136 * Each block is described by its DVAs, time of birth, checksum, etc. 137 * The word-by-word, bit-by-bit layout of the blkptr is as follows: 138 * 139 * 64 56 48 40 32 24 16 8 0 140 * +-------+-------+-------+-------+-------+-------+-------+-------+ 141 * 0 | vdev1 | GRID | ASIZE | 142 * +-------+-------+-------+-------+-------+-------+-------+-------+ 143 * 1 |G| offset1 | 144 * +-------+-------+-------+-------+-------+-------+-------+-------+ 145 * 2 | vdev2 | GRID | ASIZE | 146 * +-------+-------+-------+-------+-------+-------+-------+-------+ 147 * 3 |G| offset2 | 148 * +-------+-------+-------+-------+-------+-------+-------+-------+ 149 * 4 | vdev3 | GRID | ASIZE | 150 * +-------+-------+-------+-------+-------+-------+-------+-------+ 151 * 5 |G| offset3 | 152 * +-------+-------+-------+-------+-------+-------+-------+-------+ 153 * 6 |E| lvl | type | cksum | comp | PSIZE | LSIZE | 154 * +-------+-------+-------+-------+-------+-------+-------+-------+ 155 * 7 | padding | 156 * +-------+-------+-------+-------+-------+-------+-------+-------+ 157 * 8 | padding | 158 * +-------+-------+-------+-------+-------+-------+-------+-------+ 159 * 9 | padding | 160 * +-------+-------+-------+-------+-------+-------+-------+-------+ 161 * a | birth txg | 162 * +-------+-------+-------+-------+-------+-------+-------+-------+ 163 * b | fill count | 164 * +-------+-------+-------+-------+-------+-------+-------+-------+ 165 * c | checksum[0] | 166 * +-------+-------+-------+-------+-------+-------+-------+-------+ 167 * d | checksum[1] | 168 * +-------+-------+-------+-------+-------+-------+-------+-------+ 169 * e | checksum[2] | 170 * +-------+-------+-------+-------+-------+-------+-------+-------+ 171 * f | checksum[3] | 172 * +-------+-------+-------+-------+-------+-------+-------+-------+ 173 * 174 * Legend: 175 * 176 * vdev virtual device ID 177 * offset offset into virtual device 178 * LSIZE logical size 179 * PSIZE physical size (after compression) 180 * ASIZE allocated size (including RAID-Z parity and gang block headers) 181 * GRID RAID-Z layout information (reserved for future use) 182 * cksum checksum function 183 * comp compression function 184 * G gang block indicator 185 * E endianness 186 * type DMU object type 187 * lvl level of indirection 188 * birth txg transaction group in which the block was born 189 * fill count number of non-zero blocks under this bp 190 * checksum[4] 256-bit checksum of the data this bp describes 191 */ 192 typedef struct blkptr { 193 dva_t blk_dva[3]; /* 128-bit Data Virtual Address */ 194 uint64_t blk_prop; /* size, compression, type, etc */ 195 uint64_t blk_pad[3]; /* Extra space for the future */ 196 uint64_t blk_birth; /* transaction group at birth */ 197 uint64_t blk_fill; /* fill count */ 198 zio_cksum_t blk_cksum; /* 256-bit checksum */ 199 } blkptr_t; 200 201 #define SPA_BLKPTRSHIFT 7 /* blkptr_t is 128 bytes */ 202 #define SPA_DVAS_PER_BP 3 /* Number of DVAs in a bp */ 203 204 /* 205 * Macros to get and set fields in a bp or DVA. 206 */ 207 #define DVA_GET_ASIZE(dva) \ 208 BF64_GET_SB((dva)->dva_word[0], 0, 24, SPA_MINBLOCKSHIFT, 0) 209 #define DVA_SET_ASIZE(dva, x) \ 210 BF64_SET_SB((dva)->dva_word[0], 0, 24, SPA_MINBLOCKSHIFT, 0, x) 211 212 #define DVA_GET_GRID(dva) BF64_GET((dva)->dva_word[0], 24, 8) 213 #define DVA_SET_GRID(dva, x) BF64_SET((dva)->dva_word[0], 24, 8, x) 214 215 #define DVA_GET_VDEV(dva) BF64_GET((dva)->dva_word[0], 32, 32) 216 #define DVA_SET_VDEV(dva, x) BF64_SET((dva)->dva_word[0], 32, 32, x) 217 218 #define DVA_GET_OFFSET(dva) \ 219 BF64_GET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0) 220 #define DVA_SET_OFFSET(dva, x) \ 221 BF64_SET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0, x) 222 223 #define DVA_GET_GANG(dva) BF64_GET((dva)->dva_word[1], 63, 1) 224 #define DVA_SET_GANG(dva, x) BF64_SET((dva)->dva_word[1], 63, 1, x) 225 226 #define BP_GET_LSIZE(bp) \ 227 (BP_IS_HOLE(bp) ? 0 : \ 228 BF64_GET_SB((bp)->blk_prop, 0, 16, SPA_MINBLOCKSHIFT, 1)) 229 #define BP_SET_LSIZE(bp, x) \ 230 BF64_SET_SB((bp)->blk_prop, 0, 16, SPA_MINBLOCKSHIFT, 1, x) 231 232 #define BP_GET_PSIZE(bp) \ 233 BF64_GET_SB((bp)->blk_prop, 16, 16, SPA_MINBLOCKSHIFT, 1) 234 #define BP_SET_PSIZE(bp, x) \ 235 BF64_SET_SB((bp)->blk_prop, 16, 16, SPA_MINBLOCKSHIFT, 1, x) 236 237 #define BP_GET_COMPRESS(bp) BF64_GET((bp)->blk_prop, 32, 8) 238 #define BP_SET_COMPRESS(bp, x) BF64_SET((bp)->blk_prop, 32, 8, x) 239 240 #define BP_GET_CHECKSUM(bp) BF64_GET((bp)->blk_prop, 40, 8) 241 #define BP_SET_CHECKSUM(bp, x) BF64_SET((bp)->blk_prop, 40, 8, x) 242 243 #define BP_GET_TYPE(bp) BF64_GET((bp)->blk_prop, 48, 8) 244 #define BP_SET_TYPE(bp, x) BF64_SET((bp)->blk_prop, 48, 8, x) 245 246 #define BP_GET_LEVEL(bp) BF64_GET((bp)->blk_prop, 56, 5) 247 #define BP_SET_LEVEL(bp, x) BF64_SET((bp)->blk_prop, 56, 5, x) 248 249 #define BP_GET_BYTEORDER(bp) (0 - BF64_GET((bp)->blk_prop, 63, 1)) 250 #define BP_SET_BYTEORDER(bp, x) BF64_SET((bp)->blk_prop, 63, 1, x) 251 252 #define BP_GET_ASIZE(bp) \ 253 (DVA_GET_ASIZE(&(bp)->blk_dva[0]) + DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \ 254 DVA_GET_ASIZE(&(bp)->blk_dva[2])) 255 256 #define BP_GET_UCSIZE(bp) \ 257 ((BP_GET_LEVEL(bp) > 0 || dmu_ot[BP_GET_TYPE(bp)].ot_metadata) ? \ 258 BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp)); 259 260 #define BP_GET_NDVAS(bp) \ 261 (!!DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \ 262 !!DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \ 263 !!DVA_GET_ASIZE(&(bp)->blk_dva[2])) 264 265 #define BP_COUNT_GANG(bp) \ 266 (DVA_GET_GANG(&(bp)->blk_dva[0]) + \ 267 DVA_GET_GANG(&(bp)->blk_dva[1]) + \ 268 DVA_GET_GANG(&(bp)->blk_dva[2])) 269 270 #define DVA_EQUAL(dva1, dva2) \ 271 ((dva1)->dva_word[1] == (dva2)->dva_word[1] && \ 272 (dva1)->dva_word[0] == (dva2)->dva_word[0]) 273 274 #define ZIO_CHECKSUM_EQUAL(zc1, zc2) \ 275 (0 == (((zc1).zc_word[0] - (zc2).zc_word[0]) | \ 276 ((zc1).zc_word[1] - (zc2).zc_word[1]) | \ 277 ((zc1).zc_word[2] - (zc2).zc_word[2]) | \ 278 ((zc1).zc_word[3] - (zc2).zc_word[3]))) 279 280 281 #define DVA_IS_VALID(dva) (DVA_GET_ASIZE(dva) != 0) 282 283 #define ZIO_SET_CHECKSUM(zcp, w0, w1, w2, w3) \ 284 { \ 285 (zcp)->zc_word[0] = w0; \ 286 (zcp)->zc_word[1] = w1; \ 287 (zcp)->zc_word[2] = w2; \ 288 (zcp)->zc_word[3] = w3; \ 289 } 290 291 #define BP_IDENTITY(bp) (&(bp)->blk_dva[0]) 292 #define BP_IS_GANG(bp) DVA_GET_GANG(BP_IDENTITY(bp)) 293 #define BP_IS_HOLE(bp) ((bp)->blk_birth == 0) 294 #define BP_IS_OLDER(bp, txg) (!BP_IS_HOLE(bp) && (bp)->blk_birth < (txg)) 295 296 #define BP_ZERO(bp) \ 297 { \ 298 (bp)->blk_dva[0].dva_word[0] = 0; \ 299 (bp)->blk_dva[0].dva_word[1] = 0; \ 300 (bp)->blk_dva[1].dva_word[0] = 0; \ 301 (bp)->blk_dva[1].dva_word[1] = 0; \ 302 (bp)->blk_dva[2].dva_word[0] = 0; \ 303 (bp)->blk_dva[2].dva_word[1] = 0; \ 304 (bp)->blk_prop = 0; \ 305 (bp)->blk_pad[0] = 0; \ 306 (bp)->blk_pad[1] = 0; \ 307 (bp)->blk_pad[2] = 0; \ 308 (bp)->blk_birth = 0; \ 309 (bp)->blk_fill = 0; \ 310 ZIO_SET_CHECKSUM(&(bp)->blk_cksum, 0, 0, 0, 0); \ 311 } 312 313 #define ZBT_MAGIC 0x210da7ab10c7a11ULL /* zio data bloc tail */ 314 315 typedef struct zio_block_tail { 316 uint64_t zbt_magic; /* for validation, endianness */ 317 zio_cksum_t zbt_cksum; /* 256-bit checksum */ 318 } zio_block_tail_t; 319 320 #define VDEV_PAD_SIZE (8 << 10) 321 /* 2 padding areas (vl_pad1 and vl_pad2) to skip */ 322 #define VDEV_SKIP_SIZE VDEV_PAD_SIZE * 2 323 #define VDEV_PHYS_SIZE (112 << 10) 324 #define VDEV_UBERBLOCK_RING (128 << 10) 325 326 #define VDEV_UBERBLOCK_SHIFT(vd) \ 327 MAX((vd)->vdev_top->vdev_ashift, UBERBLOCK_SHIFT) 328 #define VDEV_UBERBLOCK_COUNT(vd) \ 329 (VDEV_UBERBLOCK_RING >> VDEV_UBERBLOCK_SHIFT(vd)) 330 #define VDEV_UBERBLOCK_OFFSET(vd, n) \ 331 offsetof(vdev_label_t, vl_uberblock[(n) << VDEV_UBERBLOCK_SHIFT(vd)]) 332 #define VDEV_UBERBLOCK_SIZE(vd) (1ULL << VDEV_UBERBLOCK_SHIFT(vd)) 333 334 typedef struct vdev_phys { 335 char vp_nvlist[VDEV_PHYS_SIZE - sizeof (zio_block_tail_t)]; 336 zio_block_tail_t vp_zbt; 337 } vdev_phys_t; 338 339 typedef struct vdev_label { 340 char vl_pad1[VDEV_PAD_SIZE]; /* 8K */ 341 char vl_pad2[VDEV_PAD_SIZE]; /* 8K */ 342 vdev_phys_t vl_vdev_phys; /* 112K */ 343 char vl_uberblock[VDEV_UBERBLOCK_RING]; /* 128K */ 344 } vdev_label_t; /* 256K total */ 345 346 /* 347 * vdev_dirty() flags 348 */ 349 #define VDD_METASLAB 0x01 350 #define VDD_DTL 0x02 351 352 /* 353 * Size and offset of embedded boot loader region on each label. 354 * The total size of the first two labels plus the boot area is 4MB. 355 */ 356 #define VDEV_BOOT_OFFSET (2 * sizeof (vdev_label_t)) 357 #define VDEV_BOOT_SIZE (7ULL << 19) /* 3.5M */ 358 359 /* 360 * Size of label regions at the start and end of each leaf device. 361 */ 362 #define VDEV_LABEL_START_SIZE (2 * sizeof (vdev_label_t) + VDEV_BOOT_SIZE) 363 #define VDEV_LABEL_END_SIZE (2 * sizeof (vdev_label_t)) 364 #define VDEV_LABELS 4 365 366 /* 367 * Gang block headers are self-checksumming and contain an array 368 * of block pointers. 369 */ 370 #define SPA_GANGBLOCKSIZE SPA_MINBLOCKSIZE 371 #define SPA_GBH_NBLKPTRS ((SPA_GANGBLOCKSIZE - \ 372 sizeof (zio_block_tail_t)) / sizeof (blkptr_t)) 373 #define SPA_GBH_FILLER ((SPA_GANGBLOCKSIZE - \ 374 sizeof (zio_block_tail_t) - \ 375 (SPA_GBH_NBLKPTRS * sizeof (blkptr_t))) /\ 376 sizeof (uint64_t)) 377 378 typedef struct zio_gbh { 379 blkptr_t zg_blkptr[SPA_GBH_NBLKPTRS]; 380 uint64_t zg_filler[SPA_GBH_FILLER]; 381 zio_block_tail_t zg_tail; 382 } zio_gbh_phys_t; 383 384 enum zio_checksum { 385 ZIO_CHECKSUM_INHERIT = 0, 386 ZIO_CHECKSUM_ON, 387 ZIO_CHECKSUM_OFF, 388 ZIO_CHECKSUM_LABEL, 389 ZIO_CHECKSUM_GANG_HEADER, 390 ZIO_CHECKSUM_ZILOG, 391 ZIO_CHECKSUM_FLETCHER_2, 392 ZIO_CHECKSUM_FLETCHER_4, 393 ZIO_CHECKSUM_SHA256, 394 ZIO_CHECKSUM_FUNCTIONS 395 }; 396 397 #define ZIO_CHECKSUM_ON_VALUE ZIO_CHECKSUM_FLETCHER_2 398 #define ZIO_CHECKSUM_DEFAULT ZIO_CHECKSUM_ON 399 400 enum zio_compress { 401 ZIO_COMPRESS_INHERIT = 0, 402 ZIO_COMPRESS_ON, 403 ZIO_COMPRESS_OFF, 404 ZIO_COMPRESS_LZJB, 405 ZIO_COMPRESS_EMPTY, 406 ZIO_COMPRESS_GZIP_1, 407 ZIO_COMPRESS_GZIP_2, 408 ZIO_COMPRESS_GZIP_3, 409 ZIO_COMPRESS_GZIP_4, 410 ZIO_COMPRESS_GZIP_5, 411 ZIO_COMPRESS_GZIP_6, 412 ZIO_COMPRESS_GZIP_7, 413 ZIO_COMPRESS_GZIP_8, 414 ZIO_COMPRESS_GZIP_9, 415 ZIO_COMPRESS_FUNCTIONS 416 }; 417 418 #define ZIO_COMPRESS_ON_VALUE ZIO_COMPRESS_LZJB 419 #define ZIO_COMPRESS_DEFAULT ZIO_COMPRESS_OFF 420 421 /* nvlist pack encoding */ 422 #define NV_ENCODE_NATIVE 0 423 #define NV_ENCODE_XDR 1 424 425 typedef enum { 426 DATA_TYPE_UNKNOWN = 0, 427 DATA_TYPE_BOOLEAN, 428 DATA_TYPE_BYTE, 429 DATA_TYPE_INT16, 430 DATA_TYPE_UINT16, 431 DATA_TYPE_INT32, 432 DATA_TYPE_UINT32, 433 DATA_TYPE_INT64, 434 DATA_TYPE_UINT64, 435 DATA_TYPE_STRING, 436 DATA_TYPE_BYTE_ARRAY, 437 DATA_TYPE_INT16_ARRAY, 438 DATA_TYPE_UINT16_ARRAY, 439 DATA_TYPE_INT32_ARRAY, 440 DATA_TYPE_UINT32_ARRAY, 441 DATA_TYPE_INT64_ARRAY, 442 DATA_TYPE_UINT64_ARRAY, 443 DATA_TYPE_STRING_ARRAY, 444 DATA_TYPE_HRTIME, 445 DATA_TYPE_NVLIST, 446 DATA_TYPE_NVLIST_ARRAY, 447 DATA_TYPE_BOOLEAN_VALUE, 448 DATA_TYPE_INT8, 449 DATA_TYPE_UINT8, 450 DATA_TYPE_BOOLEAN_ARRAY, 451 DATA_TYPE_INT8_ARRAY, 452 DATA_TYPE_UINT8_ARRAY 453 } data_type_t; 454 455 /* 456 * On-disk version number. 457 */ 458 #define SPA_VERSION_1 1ULL 459 #define SPA_VERSION_2 2ULL 460 #define SPA_VERSION_3 3ULL 461 #define SPA_VERSION_4 4ULL 462 #define SPA_VERSION_5 5ULL 463 #define SPA_VERSION_6 6ULL 464 #define SPA_VERSION_7 7ULL 465 #define SPA_VERSION_8 8ULL 466 #define SPA_VERSION_9 9ULL 467 #define SPA_VERSION_10 10ULL 468 #define SPA_VERSION_11 11ULL 469 #define SPA_VERSION_12 12ULL 470 #define SPA_VERSION_13 13ULL 471 #define SPA_VERSION_14 14ULL 472 #define SPA_VERSION_15 15ULL 473 /* 474 * When bumping up SPA_VERSION, make sure GRUB ZFS understand the on-disk 475 * format change. Go to usr/src/grub/grub-0.95/stage2/{zfs-include/, fsys_zfs*}, 476 * and do the appropriate changes. 477 */ 478 #define SPA_VERSION SPA_VERSION_15 479 #define SPA_VERSION_STRING "15" 480 481 /* 482 * Symbolic names for the changes that caused a SPA_VERSION switch. 483 * Used in the code when checking for presence or absence of a feature. 484 * Feel free to define multiple symbolic names for each version if there 485 * were multiple changes to on-disk structures during that version. 486 * 487 * NOTE: When checking the current SPA_VERSION in your code, be sure 488 * to use spa_version() since it reports the version of the 489 * last synced uberblock. Checking the in-flight version can 490 * be dangerous in some cases. 491 */ 492 #define SPA_VERSION_INITIAL SPA_VERSION_1 493 #define SPA_VERSION_DITTO_BLOCKS SPA_VERSION_2 494 #define SPA_VERSION_SPARES SPA_VERSION_3 495 #define SPA_VERSION_RAID6 SPA_VERSION_3 496 #define SPA_VERSION_BPLIST_ACCOUNT SPA_VERSION_3 497 #define SPA_VERSION_RAIDZ_DEFLATE SPA_VERSION_3 498 #define SPA_VERSION_DNODE_BYTES SPA_VERSION_3 499 #define SPA_VERSION_ZPOOL_HISTORY SPA_VERSION_4 500 #define SPA_VERSION_GZIP_COMPRESSION SPA_VERSION_5 501 #define SPA_VERSION_BOOTFS SPA_VERSION_6 502 #define SPA_VERSION_SLOGS SPA_VERSION_7 503 #define SPA_VERSION_DELEGATED_PERMS SPA_VERSION_8 504 #define SPA_VERSION_FUID SPA_VERSION_9 505 #define SPA_VERSION_REFRESERVATION SPA_VERSION_9 506 #define SPA_VERSION_REFQUOTA SPA_VERSION_9 507 #define SPA_VERSION_UNIQUE_ACCURATE SPA_VERSION_9 508 #define SPA_VERSION_L2CACHE SPA_VERSION_10 509 #define SPA_VERSION_NEXT_CLONES SPA_VERSION_11 510 #define SPA_VERSION_ORIGIN SPA_VERSION_11 511 #define SPA_VERSION_DSL_SCRUB SPA_VERSION_11 512 #define SPA_VERSION_SNAP_PROPS SPA_VERSION_12 513 #define SPA_VERSION_USED_BREAKDOWN SPA_VERSION_13 514 #define SPA_VERSION_PASSTHROUGH_X SPA_VERSION_14 515 #define SPA_VERSION_USERSPACE SPA_VERSION_15 516 517 /* 518 * The following are configuration names used in the nvlist describing a pool's 519 * configuration. 520 */ 521 #define ZPOOL_CONFIG_VERSION "version" 522 #define ZPOOL_CONFIG_POOL_NAME "name" 523 #define ZPOOL_CONFIG_POOL_STATE "state" 524 #define ZPOOL_CONFIG_POOL_TXG "txg" 525 #define ZPOOL_CONFIG_POOL_GUID "pool_guid" 526 #define ZPOOL_CONFIG_CREATE_TXG "create_txg" 527 #define ZPOOL_CONFIG_TOP_GUID "top_guid" 528 #define ZPOOL_CONFIG_VDEV_TREE "vdev_tree" 529 #define ZPOOL_CONFIG_TYPE "type" 530 #define ZPOOL_CONFIG_CHILDREN "children" 531 #define ZPOOL_CONFIG_ID "id" 532 #define ZPOOL_CONFIG_GUID "guid" 533 #define ZPOOL_CONFIG_PATH "path" 534 #define ZPOOL_CONFIG_DEVID "devid" 535 #define ZPOOL_CONFIG_METASLAB_ARRAY "metaslab_array" 536 #define ZPOOL_CONFIG_METASLAB_SHIFT "metaslab_shift" 537 #define ZPOOL_CONFIG_ASHIFT "ashift" 538 #define ZPOOL_CONFIG_ASIZE "asize" 539 #define ZPOOL_CONFIG_DTL "DTL" 540 #define ZPOOL_CONFIG_STATS "stats" 541 #define ZPOOL_CONFIG_WHOLE_DISK "whole_disk" 542 #define ZPOOL_CONFIG_ERRCOUNT "error_count" 543 #define ZPOOL_CONFIG_NOT_PRESENT "not_present" 544 #define ZPOOL_CONFIG_SPARES "spares" 545 #define ZPOOL_CONFIG_IS_SPARE "is_spare" 546 #define ZPOOL_CONFIG_NPARITY "nparity" 547 #define ZPOOL_CONFIG_HOSTID "hostid" 548 #define ZPOOL_CONFIG_HOSTNAME "hostname" 549 #define ZPOOL_CONFIG_TIMESTAMP "timestamp" /* not stored on disk */ 550 551 /* 552 * The persistent vdev state is stored as separate values rather than a single 553 * 'vdev_state' entry. This is because a device can be in multiple states, such 554 * as offline and degraded. 555 */ 556 #define ZPOOL_CONFIG_OFFLINE "offline" 557 #define ZPOOL_CONFIG_FAULTED "faulted" 558 #define ZPOOL_CONFIG_DEGRADED "degraded" 559 #define ZPOOL_CONFIG_REMOVED "removed" 560 561 #define VDEV_TYPE_ROOT "root" 562 #define VDEV_TYPE_MIRROR "mirror" 563 #define VDEV_TYPE_REPLACING "replacing" 564 #define VDEV_TYPE_RAIDZ "raidz" 565 #define VDEV_TYPE_DISK "disk" 566 #define VDEV_TYPE_FILE "file" 567 #define VDEV_TYPE_MISSING "missing" 568 #define VDEV_TYPE_SPARE "spare" 569 570 /* 571 * This is needed in userland to report the minimum necessary device size. 572 */ 573 #define SPA_MINDEVSIZE (64ULL << 20) 574 575 /* 576 * The location of the pool configuration repository, shared between kernel and 577 * userland. 578 */ 579 #define ZPOOL_CACHE_DIR "/boot/zfs" 580 #define ZPOOL_CACHE_FILE "zpool.cache" 581 #define ZPOOL_CACHE_TMP ".zpool.cache" 582 583 #define ZPOOL_CACHE ZPOOL_CACHE_DIR "/" ZPOOL_CACHE_FILE 584 585 /* 586 * vdev states are ordered from least to most healthy. 587 * A vdev that's CANT_OPEN or below is considered unusable. 588 */ 589 typedef enum vdev_state { 590 VDEV_STATE_UNKNOWN = 0, /* Uninitialized vdev */ 591 VDEV_STATE_CLOSED, /* Not currently open */ 592 VDEV_STATE_OFFLINE, /* Not allowed to open */ 593 VDEV_STATE_REMOVED, /* Explicitly removed from system */ 594 VDEV_STATE_CANT_OPEN, /* Tried to open, but failed */ 595 VDEV_STATE_FAULTED, /* External request to fault device */ 596 VDEV_STATE_DEGRADED, /* Replicated vdev with unhealthy kids */ 597 VDEV_STATE_HEALTHY /* Presumed good */ 598 } vdev_state_t; 599 600 /* 601 * vdev aux states. When a vdev is in the CANT_OPEN state, the aux field 602 * of the vdev stats structure uses these constants to distinguish why. 603 */ 604 typedef enum vdev_aux { 605 VDEV_AUX_NONE, /* no error */ 606 VDEV_AUX_OPEN_FAILED, /* ldi_open_*() or vn_open() failed */ 607 VDEV_AUX_CORRUPT_DATA, /* bad label or disk contents */ 608 VDEV_AUX_NO_REPLICAS, /* insufficient number of replicas */ 609 VDEV_AUX_BAD_GUID_SUM, /* vdev guid sum doesn't match */ 610 VDEV_AUX_TOO_SMALL, /* vdev size is too small */ 611 VDEV_AUX_BAD_LABEL, /* the label is OK but invalid */ 612 VDEV_AUX_VERSION_NEWER, /* on-disk version is too new */ 613 VDEV_AUX_VERSION_OLDER, /* on-disk version is too old */ 614 VDEV_AUX_SPARED /* hot spare used in another pool */ 615 } vdev_aux_t; 616 617 /* 618 * pool state. The following states are written to disk as part of the normal 619 * SPA lifecycle: ACTIVE, EXPORTED, DESTROYED, SPARE. The remaining states are 620 * software abstractions used at various levels to communicate pool state. 621 */ 622 typedef enum pool_state { 623 POOL_STATE_ACTIVE = 0, /* In active use */ 624 POOL_STATE_EXPORTED, /* Explicitly exported */ 625 POOL_STATE_DESTROYED, /* Explicitly destroyed */ 626 POOL_STATE_SPARE, /* Reserved for hot spare use */ 627 POOL_STATE_UNINITIALIZED, /* Internal spa_t state */ 628 POOL_STATE_UNAVAIL, /* Internal libzfs state */ 629 POOL_STATE_POTENTIALLY_ACTIVE /* Internal libzfs state */ 630 } pool_state_t; 631 632 /* 633 * The uberblock version is incremented whenever an incompatible on-disk 634 * format change is made to the SPA, DMU, or ZAP. 635 * 636 * Note: the first two fields should never be moved. When a storage pool 637 * is opened, the uberblock must be read off the disk before the version 638 * can be checked. If the ub_version field is moved, we may not detect 639 * version mismatch. If the ub_magic field is moved, applications that 640 * expect the magic number in the first word won't work. 641 */ 642 #define UBERBLOCK_MAGIC 0x00bab10c /* oo-ba-bloc! */ 643 #define UBERBLOCK_SHIFT 10 /* up to 1K */ 644 645 struct uberblock { 646 uint64_t ub_magic; /* UBERBLOCK_MAGIC */ 647 uint64_t ub_version; /* SPA_VERSION */ 648 uint64_t ub_txg; /* txg of last sync */ 649 uint64_t ub_guid_sum; /* sum of all vdev guids */ 650 uint64_t ub_timestamp; /* UTC time of last sync */ 651 blkptr_t ub_rootbp; /* MOS objset_phys_t */ 652 }; 653 654 /* 655 * Flags. 656 */ 657 #define DNODE_MUST_BE_ALLOCATED 1 658 #define DNODE_MUST_BE_FREE 2 659 660 /* 661 * Fixed constants. 662 */ 663 #define DNODE_SHIFT 9 /* 512 bytes */ 664 #define DN_MIN_INDBLKSHIFT 10 /* 1k */ 665 #define DN_MAX_INDBLKSHIFT 14 /* 16k */ 666 #define DNODE_BLOCK_SHIFT 14 /* 16k */ 667 #define DNODE_CORE_SIZE 64 /* 64 bytes for dnode sans blkptrs */ 668 #define DN_MAX_OBJECT_SHIFT 48 /* 256 trillion (zfs_fid_t limit) */ 669 #define DN_MAX_OFFSET_SHIFT 64 /* 2^64 bytes in a dnode */ 670 671 /* 672 * Derived constants. 673 */ 674 #define DNODE_SIZE (1 << DNODE_SHIFT) 675 #define DN_MAX_NBLKPTR ((DNODE_SIZE - DNODE_CORE_SIZE) >> SPA_BLKPTRSHIFT) 676 #define DN_MAX_BONUSLEN (DNODE_SIZE - DNODE_CORE_SIZE - (1 << SPA_BLKPTRSHIFT)) 677 #define DN_MAX_OBJECT (1ULL << DN_MAX_OBJECT_SHIFT) 678 679 #define DNODES_PER_BLOCK_SHIFT (DNODE_BLOCK_SHIFT - DNODE_SHIFT) 680 #define DNODES_PER_BLOCK (1ULL << DNODES_PER_BLOCK_SHIFT) 681 #define DNODES_PER_LEVEL_SHIFT (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT) 682 683 /* The +2 here is a cheesy way to round up */ 684 #define DN_MAX_LEVELS (2 + ((DN_MAX_OFFSET_SHIFT - SPA_MINBLOCKSHIFT) / \ 685 (DN_MIN_INDBLKSHIFT - SPA_BLKPTRSHIFT))) 686 687 #define DN_BONUS(dnp) ((void*)((dnp)->dn_bonus + \ 688 (((dnp)->dn_nblkptr - 1) * sizeof (blkptr_t)))) 689 690 #define DN_USED_BYTES(dnp) (((dnp)->dn_flags & DNODE_FLAG_USED_BYTES) ? \ 691 (dnp)->dn_used : (dnp)->dn_used << SPA_MINBLOCKSHIFT) 692 693 #define EPB(blkshift, typeshift) (1 << (blkshift - typeshift)) 694 695 /* Is dn_used in bytes? if not, it's in multiples of SPA_MINBLOCKSIZE */ 696 #define DNODE_FLAG_USED_BYTES (1<<0) 697 698 typedef struct dnode_phys { 699 uint8_t dn_type; /* dmu_object_type_t */ 700 uint8_t dn_indblkshift; /* ln2(indirect block size) */ 701 uint8_t dn_nlevels; /* 1=dn_blkptr->data blocks */ 702 uint8_t dn_nblkptr; /* length of dn_blkptr */ 703 uint8_t dn_bonustype; /* type of data in bonus buffer */ 704 uint8_t dn_checksum; /* ZIO_CHECKSUM type */ 705 uint8_t dn_compress; /* ZIO_COMPRESS type */ 706 uint8_t dn_flags; /* DNODE_FLAG_* */ 707 uint16_t dn_datablkszsec; /* data block size in 512b sectors */ 708 uint16_t dn_bonuslen; /* length of dn_bonus */ 709 uint8_t dn_pad2[4]; 710 711 /* accounting is protected by dn_dirty_mtx */ 712 uint64_t dn_maxblkid; /* largest allocated block ID */ 713 uint64_t dn_used; /* bytes (or sectors) of disk space */ 714 715 uint64_t dn_pad3[4]; 716 717 blkptr_t dn_blkptr[1]; 718 uint8_t dn_bonus[DN_MAX_BONUSLEN]; 719 } dnode_phys_t; 720 721 typedef enum dmu_object_type { 722 DMU_OT_NONE, 723 /* general: */ 724 DMU_OT_OBJECT_DIRECTORY, /* ZAP */ 725 DMU_OT_OBJECT_ARRAY, /* UINT64 */ 726 DMU_OT_PACKED_NVLIST, /* UINT8 (XDR by nvlist_pack/unpack) */ 727 DMU_OT_PACKED_NVLIST_SIZE, /* UINT64 */ 728 DMU_OT_BPLIST, /* UINT64 */ 729 DMU_OT_BPLIST_HDR, /* UINT64 */ 730 /* spa: */ 731 DMU_OT_SPACE_MAP_HEADER, /* UINT64 */ 732 DMU_OT_SPACE_MAP, /* UINT64 */ 733 /* zil: */ 734 DMU_OT_INTENT_LOG, /* UINT64 */ 735 /* dmu: */ 736 DMU_OT_DNODE, /* DNODE */ 737 DMU_OT_OBJSET, /* OBJSET */ 738 /* dsl: */ 739 DMU_OT_DSL_DIR, /* UINT64 */ 740 DMU_OT_DSL_DIR_CHILD_MAP, /* ZAP */ 741 DMU_OT_DSL_DS_SNAP_MAP, /* ZAP */ 742 DMU_OT_DSL_PROPS, /* ZAP */ 743 DMU_OT_DSL_DATASET, /* UINT64 */ 744 /* zpl: */ 745 DMU_OT_ZNODE, /* ZNODE */ 746 DMU_OT_ACL, /* ACL */ 747 DMU_OT_PLAIN_FILE_CONTENTS, /* UINT8 */ 748 DMU_OT_DIRECTORY_CONTENTS, /* ZAP */ 749 DMU_OT_MASTER_NODE, /* ZAP */ 750 DMU_OT_UNLINKED_SET, /* ZAP */ 751 /* zvol: */ 752 DMU_OT_ZVOL, /* UINT8 */ 753 DMU_OT_ZVOL_PROP, /* ZAP */ 754 /* other; for testing only! */ 755 DMU_OT_PLAIN_OTHER, /* UINT8 */ 756 DMU_OT_UINT64_OTHER, /* UINT64 */ 757 DMU_OT_ZAP_OTHER, /* ZAP */ 758 /* new object types: */ 759 DMU_OT_ERROR_LOG, /* ZAP */ 760 DMU_OT_SPA_HISTORY, /* UINT8 */ 761 DMU_OT_SPA_HISTORY_OFFSETS, /* spa_his_phys_t */ 762 DMU_OT_POOL_PROPS, /* ZAP */ 763 764 DMU_OT_NUMTYPES 765 } dmu_object_type_t; 766 767 typedef enum dmu_objset_type { 768 DMU_OST_NONE, 769 DMU_OST_META, 770 DMU_OST_ZFS, 771 DMU_OST_ZVOL, 772 DMU_OST_OTHER, /* For testing only! */ 773 DMU_OST_ANY, /* Be careful! */ 774 DMU_OST_NUMTYPES 775 } dmu_objset_type_t; 776 777 /* 778 * Intent log header - this on disk structure holds fields to manage 779 * the log. All fields are 64 bit to easily handle cross architectures. 780 */ 781 typedef struct zil_header { 782 uint64_t zh_claim_txg; /* txg in which log blocks were claimed */ 783 uint64_t zh_replay_seq; /* highest replayed sequence number */ 784 blkptr_t zh_log; /* log chain */ 785 uint64_t zh_claim_seq; /* highest claimed sequence number */ 786 uint64_t zh_pad[5]; 787 } zil_header_t; 788 789 typedef struct objset_phys { 790 dnode_phys_t os_meta_dnode; 791 zil_header_t os_zil_header; 792 uint64_t os_type; 793 uint64_t os_flags; 794 char os_pad[2048 - sizeof (dnode_phys_t)*3 - 795 sizeof (zil_header_t) - sizeof (uint64_t)*2]; 796 dnode_phys_t os_userused_dnode; 797 dnode_phys_t os_groupused_dnode; 798 } objset_phys_t; 799 800 typedef struct dsl_dir_phys { 801 uint64_t dd_creation_time; /* not actually used */ 802 uint64_t dd_head_dataset_obj; 803 uint64_t dd_parent_obj; 804 uint64_t dd_clone_parent_obj; 805 uint64_t dd_child_dir_zapobj; 806 /* 807 * how much space our children are accounting for; for leaf 808 * datasets, == physical space used by fs + snaps 809 */ 810 uint64_t dd_used_bytes; 811 uint64_t dd_compressed_bytes; 812 uint64_t dd_uncompressed_bytes; 813 /* Administrative quota setting */ 814 uint64_t dd_quota; 815 /* Administrative reservation setting */ 816 uint64_t dd_reserved; 817 uint64_t dd_props_zapobj; 818 uint64_t dd_pad[21]; /* pad out to 256 bytes for good measure */ 819 } dsl_dir_phys_t; 820 821 typedef struct dsl_dataset_phys { 822 uint64_t ds_dir_obj; 823 uint64_t ds_prev_snap_obj; 824 uint64_t ds_prev_snap_txg; 825 uint64_t ds_next_snap_obj; 826 uint64_t ds_snapnames_zapobj; /* zap obj of snaps; ==0 for snaps */ 827 uint64_t ds_num_children; /* clone/snap children; ==0 for head */ 828 uint64_t ds_creation_time; /* seconds since 1970 */ 829 uint64_t ds_creation_txg; 830 uint64_t ds_deadlist_obj; 831 uint64_t ds_used_bytes; 832 uint64_t ds_compressed_bytes; 833 uint64_t ds_uncompressed_bytes; 834 uint64_t ds_unique_bytes; /* only relevant to snapshots */ 835 /* 836 * The ds_fsid_guid is a 56-bit ID that can change to avoid 837 * collisions. The ds_guid is a 64-bit ID that will never 838 * change, so there is a small probability that it will collide. 839 */ 840 uint64_t ds_fsid_guid; 841 uint64_t ds_guid; 842 uint64_t ds_flags; 843 blkptr_t ds_bp; 844 uint64_t ds_pad[8]; /* pad out to 320 bytes for good measure */ 845 } dsl_dataset_phys_t; 846 847 /* 848 * The names of zap entries in the DIRECTORY_OBJECT of the MOS. 849 */ 850 #define DMU_POOL_DIRECTORY_OBJECT 1 851 #define DMU_POOL_CONFIG "config" 852 #define DMU_POOL_ROOT_DATASET "root_dataset" 853 #define DMU_POOL_SYNC_BPLIST "sync_bplist" 854 #define DMU_POOL_ERRLOG_SCRUB "errlog_scrub" 855 #define DMU_POOL_ERRLOG_LAST "errlog_last" 856 #define DMU_POOL_SPARES "spares" 857 #define DMU_POOL_DEFLATE "deflate" 858 #define DMU_POOL_HISTORY "history" 859 #define DMU_POOL_PROPS "pool_props" 860 861 #define ZAP_MAGIC 0x2F52AB2ABULL 862 863 #define FZAP_BLOCK_SHIFT(zap) ((zap)->zap_block_shift) 864 865 #define ZAP_MAXCD (uint32_t)(-1) 866 #define ZAP_HASHBITS 28 867 #define MZAP_ENT_LEN 64 868 #define MZAP_NAME_LEN (MZAP_ENT_LEN - 8 - 4 - 2) 869 #define MZAP_MAX_BLKSHIFT SPA_MAXBLOCKSHIFT 870 #define MZAP_MAX_BLKSZ (1 << MZAP_MAX_BLKSHIFT) 871 872 typedef struct mzap_ent_phys { 873 uint64_t mze_value; 874 uint32_t mze_cd; 875 uint16_t mze_pad; /* in case we want to chain them someday */ 876 char mze_name[MZAP_NAME_LEN]; 877 } mzap_ent_phys_t; 878 879 typedef struct mzap_phys { 880 uint64_t mz_block_type; /* ZBT_MICRO */ 881 uint64_t mz_salt; 882 uint64_t mz_pad[6]; 883 mzap_ent_phys_t mz_chunk[1]; 884 /* actually variable size depending on block size */ 885 } mzap_phys_t; 886 887 /* 888 * The (fat) zap is stored in one object. It is an array of 889 * 1<<FZAP_BLOCK_SHIFT byte blocks. The layout looks like one of: 890 * 891 * ptrtbl fits in first block: 892 * [zap_phys_t zap_ptrtbl_shift < 6] [zap_leaf_t] ... 893 * 894 * ptrtbl too big for first block: 895 * [zap_phys_t zap_ptrtbl_shift >= 6] [zap_leaf_t] [ptrtbl] ... 896 * 897 */ 898 899 #define ZBT_LEAF ((1ULL << 63) + 0) 900 #define ZBT_HEADER ((1ULL << 63) + 1) 901 #define ZBT_MICRO ((1ULL << 63) + 3) 902 /* any other values are ptrtbl blocks */ 903 904 /* 905 * the embedded pointer table takes up half a block: 906 * block size / entry size (2^3) / 2 907 */ 908 #define ZAP_EMBEDDED_PTRTBL_SHIFT(zap) (FZAP_BLOCK_SHIFT(zap) - 3 - 1) 909 910 /* 911 * The embedded pointer table starts half-way through the block. Since 912 * the pointer table itself is half the block, it starts at (64-bit) 913 * word number (1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap)). 914 */ 915 #define ZAP_EMBEDDED_PTRTBL_ENT(zap, idx) \ 916 ((uint64_t *)(zap)->zap_phys) \ 917 [(idx) + (1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap))] 918 919 /* 920 * TAKE NOTE: 921 * If zap_phys_t is modified, zap_byteswap() must be modified. 922 */ 923 typedef struct zap_phys { 924 uint64_t zap_block_type; /* ZBT_HEADER */ 925 uint64_t zap_magic; /* ZAP_MAGIC */ 926 927 struct zap_table_phys { 928 uint64_t zt_blk; /* starting block number */ 929 uint64_t zt_numblks; /* number of blocks */ 930 uint64_t zt_shift; /* bits to index it */ 931 uint64_t zt_nextblk; /* next (larger) copy start block */ 932 uint64_t zt_blks_copied; /* number source blocks copied */ 933 } zap_ptrtbl; 934 935 uint64_t zap_freeblk; /* the next free block */ 936 uint64_t zap_num_leafs; /* number of leafs */ 937 uint64_t zap_num_entries; /* number of entries */ 938 uint64_t zap_salt; /* salt to stir into hash function */ 939 /* 940 * This structure is followed by padding, and then the embedded 941 * pointer table. The embedded pointer table takes up second 942 * half of the block. It is accessed using the 943 * ZAP_EMBEDDED_PTRTBL_ENT() macro. 944 */ 945 } zap_phys_t; 946 947 typedef struct zap_table_phys zap_table_phys_t; 948 949 typedef struct fat_zap { 950 int zap_block_shift; /* block size shift */ 951 zap_phys_t *zap_phys; 952 } fat_zap_t; 953 954 #define ZAP_LEAF_MAGIC 0x2AB1EAF 955 956 /* chunk size = 24 bytes */ 957 #define ZAP_LEAF_CHUNKSIZE 24 958 959 /* 960 * The amount of space available for chunks is: 961 * block size (1<<l->l_bs) - hash entry size (2) * number of hash 962 * entries - header space (2*chunksize) 963 */ 964 #define ZAP_LEAF_NUMCHUNKS(l) \ 965 (((1<<(l)->l_bs) - 2*ZAP_LEAF_HASH_NUMENTRIES(l)) / \ 966 ZAP_LEAF_CHUNKSIZE - 2) 967 968 /* 969 * The amount of space within the chunk available for the array is: 970 * chunk size - space for type (1) - space for next pointer (2) 971 */ 972 #define ZAP_LEAF_ARRAY_BYTES (ZAP_LEAF_CHUNKSIZE - 3) 973 974 #define ZAP_LEAF_ARRAY_NCHUNKS(bytes) \ 975 (((bytes)+ZAP_LEAF_ARRAY_BYTES-1)/ZAP_LEAF_ARRAY_BYTES) 976 977 /* 978 * Low water mark: when there are only this many chunks free, start 979 * growing the ptrtbl. Ideally, this should be larger than a 980 * "reasonably-sized" entry. 20 chunks is more than enough for the 981 * largest directory entry (MAXNAMELEN (256) byte name, 8-byte value), 982 * while still being only around 3% for 16k blocks. 983 */ 984 #define ZAP_LEAF_LOW_WATER (20) 985 986 /* 987 * The leaf hash table has block size / 2^5 (32) number of entries, 988 * which should be more than enough for the maximum number of entries, 989 * which is less than block size / CHUNKSIZE (24) / minimum number of 990 * chunks per entry (3). 991 */ 992 #define ZAP_LEAF_HASH_SHIFT(l) ((l)->l_bs - 5) 993 #define ZAP_LEAF_HASH_NUMENTRIES(l) (1 << ZAP_LEAF_HASH_SHIFT(l)) 994 995 /* 996 * The chunks start immediately after the hash table. The end of the 997 * hash table is at l_hash + HASH_NUMENTRIES, which we simply cast to a 998 * chunk_t. 999 */ 1000 #define ZAP_LEAF_CHUNK(l, idx) \ 1001 ((zap_leaf_chunk_t *) \ 1002 ((l)->l_phys->l_hash + ZAP_LEAF_HASH_NUMENTRIES(l)))[idx] 1003 #define ZAP_LEAF_ENTRY(l, idx) (&ZAP_LEAF_CHUNK(l, idx).l_entry) 1004 1005 typedef enum zap_chunk_type { 1006 ZAP_CHUNK_FREE = 253, 1007 ZAP_CHUNK_ENTRY = 252, 1008 ZAP_CHUNK_ARRAY = 251, 1009 ZAP_CHUNK_TYPE_MAX = 250 1010 } zap_chunk_type_t; 1011 1012 /* 1013 * TAKE NOTE: 1014 * If zap_leaf_phys_t is modified, zap_leaf_byteswap() must be modified. 1015 */ 1016 typedef struct zap_leaf_phys { 1017 struct zap_leaf_header { 1018 uint64_t lh_block_type; /* ZBT_LEAF */ 1019 uint64_t lh_pad1; 1020 uint64_t lh_prefix; /* hash prefix of this leaf */ 1021 uint32_t lh_magic; /* ZAP_LEAF_MAGIC */ 1022 uint16_t lh_nfree; /* number free chunks */ 1023 uint16_t lh_nentries; /* number of entries */ 1024 uint16_t lh_prefix_len; /* num bits used to id this */ 1025 1026 /* above is accessable to zap, below is zap_leaf private */ 1027 1028 uint16_t lh_freelist; /* chunk head of free list */ 1029 uint8_t lh_pad2[12]; 1030 } l_hdr; /* 2 24-byte chunks */ 1031 1032 /* 1033 * The header is followed by a hash table with 1034 * ZAP_LEAF_HASH_NUMENTRIES(zap) entries. The hash table is 1035 * followed by an array of ZAP_LEAF_NUMCHUNKS(zap) 1036 * zap_leaf_chunk structures. These structures are accessed 1037 * with the ZAP_LEAF_CHUNK() macro. 1038 */ 1039 1040 uint16_t l_hash[1]; 1041 } zap_leaf_phys_t; 1042 1043 typedef union zap_leaf_chunk { 1044 struct zap_leaf_entry { 1045 uint8_t le_type; /* always ZAP_CHUNK_ENTRY */ 1046 uint8_t le_int_size; /* size of ints */ 1047 uint16_t le_next; /* next entry in hash chain */ 1048 uint16_t le_name_chunk; /* first chunk of the name */ 1049 uint16_t le_name_length; /* bytes in name, incl null */ 1050 uint16_t le_value_chunk; /* first chunk of the value */ 1051 uint16_t le_value_length; /* value length in ints */ 1052 uint32_t le_cd; /* collision differentiator */ 1053 uint64_t le_hash; /* hash value of the name */ 1054 } l_entry; 1055 struct zap_leaf_array { 1056 uint8_t la_type; /* always ZAP_CHUNK_ARRAY */ 1057 uint8_t la_array[ZAP_LEAF_ARRAY_BYTES]; 1058 uint16_t la_next; /* next blk or CHAIN_END */ 1059 } l_array; 1060 struct zap_leaf_free { 1061 uint8_t lf_type; /* always ZAP_CHUNK_FREE */ 1062 uint8_t lf_pad[ZAP_LEAF_ARRAY_BYTES]; 1063 uint16_t lf_next; /* next in free list, or CHAIN_END */ 1064 } l_free; 1065 } zap_leaf_chunk_t; 1066 1067 typedef struct zap_leaf { 1068 int l_bs; /* block size shift */ 1069 zap_leaf_phys_t *l_phys; 1070 } zap_leaf_t; 1071 1072 /* 1073 * Define special zfs pflags 1074 */ 1075 #define ZFS_XATTR 0x1 /* is an extended attribute */ 1076 #define ZFS_INHERIT_ACE 0x2 /* ace has inheritable ACEs */ 1077 #define ZFS_ACL_TRIVIAL 0x4 /* files ACL is trivial */ 1078 1079 #define MASTER_NODE_OBJ 1 1080 1081 /* 1082 * special attributes for master node. 1083 */ 1084 1085 #define ZFS_FSID "FSID" 1086 #define ZFS_UNLINKED_SET "DELETE_QUEUE" 1087 #define ZFS_ROOT_OBJ "ROOT" 1088 #define ZPL_VERSION_OBJ "VERSION" 1089 #define ZFS_PROP_BLOCKPERPAGE "BLOCKPERPAGE" 1090 #define ZFS_PROP_NOGROWBLOCKS "NOGROWBLOCKS" 1091 1092 #define ZFS_FLAG_BLOCKPERPAGE 0x1 1093 #define ZFS_FLAG_NOGROWBLOCKS 0x2 1094 1095 /* 1096 * ZPL version - rev'd whenever an incompatible on-disk format change 1097 * occurs. Independent of SPA/DMU/ZAP versioning. 1098 */ 1099 1100 #define ZPL_VERSION 1ULL 1101 1102 /* 1103 * The directory entry has the type (currently unused on Solaris) in the 1104 * top 4 bits, and the object number in the low 48 bits. The "middle" 1105 * 12 bits are unused. 1106 */ 1107 #define ZFS_DIRENT_TYPE(de) BF64_GET(de, 60, 4) 1108 #define ZFS_DIRENT_OBJ(de) BF64_GET(de, 0, 48) 1109 #define ZFS_DIRENT_MAKE(type, obj) (((uint64_t)type << 60) | obj) 1110 1111 typedef struct ace { 1112 uid_t a_who; /* uid or gid */ 1113 uint32_t a_access_mask; /* read,write,... */ 1114 uint16_t a_flags; /* see below */ 1115 uint16_t a_type; /* allow or deny */ 1116 } ace_t; 1117 1118 #define ACE_SLOT_CNT 6 1119 1120 typedef struct zfs_znode_acl { 1121 uint64_t z_acl_extern_obj; /* ext acl pieces */ 1122 uint32_t z_acl_count; /* Number of ACEs */ 1123 uint16_t z_acl_version; /* acl version */ 1124 uint16_t z_acl_pad; /* pad */ 1125 ace_t z_ace_data[ACE_SLOT_CNT]; /* 6 standard ACEs */ 1126 } zfs_znode_acl_t; 1127 1128 /* 1129 * This is the persistent portion of the znode. It is stored 1130 * in the "bonus buffer" of the file. Short symbolic links 1131 * are also stored in the bonus buffer. 1132 */ 1133 typedef struct znode_phys { 1134 uint64_t zp_atime[2]; /* 0 - last file access time */ 1135 uint64_t zp_mtime[2]; /* 16 - last file modification time */ 1136 uint64_t zp_ctime[2]; /* 32 - last file change time */ 1137 uint64_t zp_crtime[2]; /* 48 - creation time */ 1138 uint64_t zp_gen; /* 64 - generation (txg of creation) */ 1139 uint64_t zp_mode; /* 72 - file mode bits */ 1140 uint64_t zp_size; /* 80 - size of file */ 1141 uint64_t zp_parent; /* 88 - directory parent (`..') */ 1142 uint64_t zp_links; /* 96 - number of links to file */ 1143 uint64_t zp_xattr; /* 104 - DMU object for xattrs */ 1144 uint64_t zp_rdev; /* 112 - dev_t for VBLK & VCHR files */ 1145 uint64_t zp_flags; /* 120 - persistent flags */ 1146 uint64_t zp_uid; /* 128 - file owner */ 1147 uint64_t zp_gid; /* 136 - owning group */ 1148 uint64_t zp_pad[4]; /* 144 - future */ 1149 zfs_znode_acl_t zp_acl; /* 176 - 263 ACL */ 1150 /* 1151 * Data may pad out any remaining bytes in the znode buffer, eg: 1152 * 1153 * |<---------------------- dnode_phys (512) ------------------------>| 1154 * |<-- dnode (192) --->|<----------- "bonus" buffer (320) ---------->| 1155 * |<---- znode (264) ---->|<---- data (56) ---->| 1156 * 1157 * At present, we only use this space to store symbolic links. 1158 */ 1159 } znode_phys_t; 1160 1161 /* 1162 * In-core vdev representation. 1163 */ 1164 struct vdev; 1165 typedef int vdev_phys_read_t(struct vdev *vdev, void *priv, 1166 off_t offset, void *buf, size_t bytes); 1167 typedef int vdev_read_t(struct vdev *vdev, const blkptr_t *bp, 1168 void *buf, off_t offset, size_t bytes); 1169 1170 typedef STAILQ_HEAD(vdev_list, vdev) vdev_list_t; 1171 1172 typedef struct vdev { 1173 STAILQ_ENTRY(vdev) v_childlink; /* link in parent's child list */ 1174 STAILQ_ENTRY(vdev) v_alllink; /* link in global vdev list */ 1175 vdev_list_t v_children; /* children of this vdev */ 1176 char *v_name; /* vdev name */ 1177 uint64_t v_guid; /* vdev guid */ 1178 int v_id; /* index in parent */ 1179 int v_ashift; /* offset to block shift */ 1180 int v_nparity; /* # parity for raidz */ 1181 int v_nchildren; /* # children */ 1182 vdev_state_t v_state; /* current state */ 1183 vdev_phys_read_t *v_phys_read; /* read from raw leaf vdev */ 1184 vdev_read_t *v_read; /* read from vdev */ 1185 void *v_read_priv; /* private data for read function */ 1186 } vdev_t; 1187 1188 /* 1189 * In-core pool representation. 1190 */ 1191 typedef STAILQ_HEAD(spa_list, spa) spa_list_t; 1192 1193 typedef struct spa { 1194 STAILQ_ENTRY(spa) spa_link; /* link in global pool list */ 1195 char *spa_name; /* pool name */ 1196 uint64_t spa_guid; /* pool guid */ 1197 uint64_t spa_txg; /* most recent transaction */ 1198 struct uberblock spa_uberblock; /* best uberblock so far */ 1199 vdev_list_t spa_vdevs; /* list of all toplevel vdevs */ 1200 objset_phys_t spa_mos; /* MOS for this pool */ 1201 objset_phys_t spa_root_objset; /* current mounted ZPL objset */ 1202 } spa_t; 1203