1 /* 2 * GRUB -- GRand Unified Bootloader 3 * Copyright (C) 1999,2000,2001,2002,2003,2004 Free Software Foundation, Inc. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 */ 19 /* 20 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 21 * Use is subject to license terms. 22 */ 23 24 /* 25 * The zfs plug-in routines for GRUB are: 26 * 27 * zfs_mount() - locates a valid uberblock of the root pool and reads 28 * in its MOS at the memory address MOS. 29 * 30 * zfs_open() - locates a plain file object by following the MOS 31 * and places its dnode at the memory address DNODE. 32 * 33 * zfs_read() - read in the data blocks pointed by the DNODE. 34 * 35 * ZFS_SCRATCH is used as a working area. 36 * 37 * (memory addr) MOS DNODE ZFS_SCRATCH 38 * | | | 39 * +-------V---------V----------V---------------+ 40 * memory | | dnode | dnode | scratch | 41 * | | 512B | 512B | area | 42 * +--------------------------------------------+ 43 */ 44 45 #ifdef FSYS_ZFS 46 47 #include "shared.h" 48 #include "filesys.h" 49 #include "fsys_zfs.h" 50 51 /* cache for a file block of the currently zfs_open()-ed file */ 52 static void *file_buf = NULL; 53 static uint64_t file_start = 0; 54 static uint64_t file_end = 0; 55 56 /* cache for a dnode block */ 57 static dnode_phys_t *dnode_buf = NULL; 58 static dnode_phys_t *dnode_mdn = NULL; 59 static uint64_t dnode_start = 0; 60 static uint64_t dnode_end = 0; 61 62 static uint64_t pool_guid = 0; 63 static uberblock_t current_uberblock; 64 static char *stackbase; 65 66 decomp_entry_t decomp_table[ZIO_COMPRESS_FUNCTIONS] = 67 { 68 {"inherit", 0}, /* ZIO_COMPRESS_INHERIT */ 69 {"on", lzjb_decompress}, /* ZIO_COMPRESS_ON */ 70 {"off", 0}, /* ZIO_COMPRESS_OFF */ 71 {"lzjb", lzjb_decompress}, /* ZIO_COMPRESS_LZJB */ 72 {"empty", 0} /* ZIO_COMPRESS_EMPTY */ 73 }; 74 75 static int zio_read_data(blkptr_t *bp, void *buf, char *stack); 76 77 /* 78 * Our own version of bcmp(). 79 */ 80 static int 81 zfs_bcmp(const void *s1, const void *s2, size_t n) 82 { 83 const uchar_t *ps1 = s1; 84 const uchar_t *ps2 = s2; 85 86 if (s1 != s2 && n != 0) { 87 do { 88 if (*ps1++ != *ps2++) 89 return (1); 90 } while (--n != 0); 91 } 92 93 return (0); 94 } 95 96 /* 97 * Our own version of log2(). Same thing as highbit()-1. 98 */ 99 static int 100 zfs_log2(uint64_t num) 101 { 102 int i = 0; 103 104 while (num > 1) { 105 i++; 106 num = num >> 1; 107 } 108 109 return (i); 110 } 111 112 /* Checksum Functions */ 113 static void 114 zio_checksum_off(const void *buf, uint64_t size, zio_cksum_t *zcp) 115 { 116 ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0); 117 } 118 119 /* Checksum Table and Values */ 120 zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS] = { 121 NULL, NULL, 0, 0, "inherit", 122 NULL, NULL, 0, 0, "on", 123 zio_checksum_off, zio_checksum_off, 0, 0, "off", 124 zio_checksum_SHA256, zio_checksum_SHA256, 1, 1, "label", 125 zio_checksum_SHA256, zio_checksum_SHA256, 1, 1, "gang_header", 126 fletcher_2_native, fletcher_2_byteswap, 0, 1, "zilog", 127 fletcher_2_native, fletcher_2_byteswap, 0, 0, "fletcher2", 128 fletcher_4_native, fletcher_4_byteswap, 1, 0, "fletcher4", 129 zio_checksum_SHA256, zio_checksum_SHA256, 1, 0, "SHA256", 130 }; 131 132 /* 133 * zio_checksum_verify: Provides support for checksum verification. 134 * 135 * Fletcher2, Fletcher4, and SHA256 are supported. 136 * 137 * Return: 138 * -1 = Failure 139 * 0 = Success 140 */ 141 static int 142 zio_checksum_verify(blkptr_t *bp, char *data, int size) 143 { 144 zio_cksum_t zc = bp->blk_cksum; 145 uint32_t checksum = BP_GET_CHECKSUM(bp); 146 int byteswap = BP_SHOULD_BYTESWAP(bp); 147 zio_block_tail_t *zbt = (zio_block_tail_t *)(data + size) - 1; 148 zio_checksum_info_t *ci = &zio_checksum_table[checksum]; 149 zio_cksum_t actual_cksum, expected_cksum; 150 151 /* byteswap is not supported */ 152 if (byteswap) 153 return (-1); 154 155 if (checksum >= ZIO_CHECKSUM_FUNCTIONS || ci->ci_func[0] == NULL) 156 return (-1); 157 158 if (ci->ci_zbt) { 159 expected_cksum = zbt->zbt_cksum; 160 zbt->zbt_cksum = zc; 161 ci->ci_func[0](data, size, &actual_cksum); 162 zbt->zbt_cksum = expected_cksum; 163 zc = expected_cksum; 164 165 } else { 166 ci->ci_func[byteswap](data, size, &actual_cksum); 167 } 168 169 if ((actual_cksum.zc_word[0] - zc.zc_word[0]) | 170 (actual_cksum.zc_word[1] - zc.zc_word[1]) | 171 (actual_cksum.zc_word[2] - zc.zc_word[2]) | 172 (actual_cksum.zc_word[3] - zc.zc_word[3])) 173 return (-1); 174 175 return (0); 176 } 177 178 /* 179 * vdev_label_start returns the physical disk offset (in bytes) of 180 * label "l". 181 */ 182 static uint64_t 183 vdev_label_start(uint64_t psize, int l) 184 { 185 return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ? 186 0 : psize - VDEV_LABELS * sizeof (vdev_label_t))); 187 } 188 189 /* 190 * vdev_uberblock_compare takes two uberblock structures and returns an integer 191 * indicating the more recent of the two. 192 * Return Value = 1 if ub2 is more recent 193 * Return Value = -1 if ub1 is more recent 194 * The most recent uberblock is determined using its transaction number and 195 * timestamp. The uberblock with the highest transaction number is 196 * considered "newer". If the transaction numbers of the two blocks match, the 197 * timestamps are compared to determine the "newer" of the two. 198 */ 199 static int 200 vdev_uberblock_compare(uberblock_t *ub1, uberblock_t *ub2) 201 { 202 if (ub1->ub_txg < ub2->ub_txg) 203 return (-1); 204 if (ub1->ub_txg > ub2->ub_txg) 205 return (1); 206 207 if (ub1->ub_timestamp < ub2->ub_timestamp) 208 return (-1); 209 if (ub1->ub_timestamp > ub2->ub_timestamp) 210 return (1); 211 212 return (0); 213 } 214 215 /* 216 * Three pieces of information are needed to verify an uberblock: the magic 217 * number, the version number, and the checksum. 218 * 219 * Currently Implemented: version number, magic number 220 * Need to Implement: checksum 221 * 222 * Return: 223 * 0 - Success 224 * -1 - Failure 225 */ 226 static int 227 uberblock_verify(uberblock_phys_t *ub, uint64_t offset) 228 { 229 230 uberblock_t *uber = &ub->ubp_uberblock; 231 blkptr_t bp; 232 233 BP_ZERO(&bp); 234 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL); 235 BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER); 236 ZIO_SET_CHECKSUM(&bp.blk_cksum, offset, 0, 0, 0); 237 238 if (zio_checksum_verify(&bp, (char *)ub, UBERBLOCK_SIZE) != 0) 239 return (-1); 240 241 if (uber->ub_magic == UBERBLOCK_MAGIC && 242 uber->ub_version > 0 && uber->ub_version <= SPA_VERSION) 243 return (0); 244 245 return (-1); 246 } 247 248 /* 249 * Find the best uberblock. 250 * Return: 251 * Success - Pointer to the best uberblock. 252 * Failure - NULL 253 */ 254 static uberblock_phys_t * 255 find_bestub(uberblock_phys_t *ub_array, uint64_t sector) 256 { 257 uberblock_phys_t *ubbest = NULL; 258 uint64_t offset; 259 int i; 260 261 for (i = 0; i < (VDEV_UBERBLOCK_RING >> VDEV_UBERBLOCK_SHIFT); i++) { 262 offset = (sector << SPA_MINBLOCKSHIFT) + 263 VDEV_UBERBLOCK_OFFSET(i); 264 if (uberblock_verify(&ub_array[i], offset) == 0) { 265 if (ubbest == NULL) { 266 ubbest = &ub_array[i]; 267 } else if (vdev_uberblock_compare( 268 &(ub_array[i].ubp_uberblock), 269 &(ubbest->ubp_uberblock)) > 0) { 270 ubbest = &ub_array[i]; 271 } 272 } 273 } 274 275 return (ubbest); 276 } 277 278 /* 279 * Read a block of data based on the gang block address dva, 280 * and put its data in buf. 281 * 282 * Return: 283 * 0 - success 284 * 1 - failure 285 */ 286 static int 287 zio_read_gang(blkptr_t *bp, dva_t *dva, void *buf, char *stack) 288 { 289 zio_gbh_phys_t *zio_gb; 290 uint64_t offset, sector; 291 blkptr_t tmpbp; 292 int i; 293 294 zio_gb = (zio_gbh_phys_t *)stack; 295 stack += SPA_GANGBLOCKSIZE; 296 offset = DVA_GET_OFFSET(dva); 297 sector = DVA_OFFSET_TO_PHYS_SECTOR(offset); 298 299 /* read in the gang block header */ 300 if (devread(sector, 0, SPA_GANGBLOCKSIZE, (char *)zio_gb) == 0) { 301 grub_printf("failed to read in a gang block header\n"); 302 return (1); 303 } 304 305 /* self checksuming the gang block header */ 306 BP_ZERO(&tmpbp); 307 BP_SET_CHECKSUM(&tmpbp, ZIO_CHECKSUM_GANG_HEADER); 308 BP_SET_BYTEORDER(&tmpbp, ZFS_HOST_BYTEORDER); 309 ZIO_SET_CHECKSUM(&tmpbp.blk_cksum, DVA_GET_VDEV(dva), 310 DVA_GET_OFFSET(dva), bp->blk_birth, 0); 311 if (zio_checksum_verify(&tmpbp, (char *)zio_gb, SPA_GANGBLOCKSIZE)) { 312 grub_printf("failed to checksum a gang block header\n"); 313 return (1); 314 } 315 316 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) { 317 if (zio_gb->zg_blkptr[i].blk_birth == 0) 318 continue; 319 320 if (zio_read_data(&zio_gb->zg_blkptr[i], buf, stack)) 321 return (1); 322 buf += BP_GET_PSIZE(&zio_gb->zg_blkptr[i]); 323 } 324 325 return (0); 326 } 327 328 /* 329 * Read in a block of raw data to buf. 330 * 331 * Return: 332 * 0 - success 333 * 1 - failure 334 */ 335 static int 336 zio_read_data(blkptr_t *bp, void *buf, char *stack) 337 { 338 int i, psize; 339 340 psize = BP_GET_PSIZE(bp); 341 342 /* pick a good dva from the block pointer */ 343 for (i = 0; i < SPA_DVAS_PER_BP; i++) { 344 uint64_t offset, sector; 345 346 if (bp->blk_dva[i].dva_word[0] == 0 && 347 bp->blk_dva[i].dva_word[1] == 0) 348 continue; 349 350 if (DVA_GET_GANG(&bp->blk_dva[i])) { 351 if (zio_read_gang(bp, &bp->blk_dva[i], buf, stack) == 0) 352 return (0); 353 } else { 354 /* read in a data block */ 355 offset = DVA_GET_OFFSET(&bp->blk_dva[i]); 356 sector = DVA_OFFSET_TO_PHYS_SECTOR(offset); 357 if (devread(sector, 0, psize, buf)) 358 return (0); 359 } 360 } 361 362 return (1); 363 } 364 365 /* 366 * Read in a block of data, verify its checksum, decompress if needed, 367 * and put the uncompressed data in buf. 368 * 369 * Return: 370 * 0 - success 371 * errnum - failure 372 */ 373 static int 374 zio_read(blkptr_t *bp, void *buf, char *stack) 375 { 376 int lsize, psize, comp; 377 char *retbuf; 378 379 comp = BP_GET_COMPRESS(bp); 380 lsize = BP_GET_LSIZE(bp); 381 psize = BP_GET_PSIZE(bp); 382 383 if ((unsigned int)comp >= ZIO_COMPRESS_FUNCTIONS || 384 (comp != ZIO_COMPRESS_OFF && 385 decomp_table[comp].decomp_func == NULL)) { 386 grub_printf("compression algorithm not supported\n"); 387 return (ERR_FSYS_CORRUPT); 388 } 389 390 if ((char *)buf < stack && ((char *)buf) + lsize > stack) { 391 grub_printf("not enough memory allocated\n"); 392 return (ERR_WONT_FIT); 393 } 394 395 retbuf = buf; 396 if (comp != ZIO_COMPRESS_OFF) { 397 buf = stack; 398 stack += psize; 399 } 400 401 if (zio_read_data(bp, buf, stack)) { 402 grub_printf("zio_read_data failed\n"); 403 return (ERR_FSYS_CORRUPT); 404 } 405 406 if (zio_checksum_verify(bp, buf, psize) != 0) { 407 grub_printf("checksum verification failed\n"); 408 return (ERR_FSYS_CORRUPT); 409 } 410 411 if (comp != ZIO_COMPRESS_OFF) 412 decomp_table[comp].decomp_func(buf, retbuf, psize, lsize); 413 414 return (0); 415 } 416 417 /* 418 * Get the block from a block id. 419 * push the block onto the stack. 420 * 421 * Return: 422 * 0 - success 423 * errnum - failure 424 */ 425 static int 426 dmu_read(dnode_phys_t *dn, uint64_t blkid, void *buf, char *stack) 427 { 428 int idx, level; 429 blkptr_t *bp_array = dn->dn_blkptr; 430 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 431 blkptr_t *bp, *tmpbuf; 432 433 bp = (blkptr_t *)stack; 434 stack += sizeof (blkptr_t); 435 436 tmpbuf = (blkptr_t *)stack; 437 stack += 1<<dn->dn_indblkshift; 438 439 for (level = dn->dn_nlevels - 1; level >= 0; level--) { 440 idx = (blkid >> (epbs * level)) & ((1<<epbs)-1); 441 *bp = bp_array[idx]; 442 if (level == 0) 443 tmpbuf = buf; 444 if (BP_IS_HOLE(bp)) { 445 grub_memset(buf, 0, 446 dn->dn_datablkszsec << SPA_MINBLOCKSHIFT); 447 break; 448 } else if (errnum = zio_read(bp, tmpbuf, stack)) { 449 return (errnum); 450 } 451 452 bp_array = tmpbuf; 453 } 454 455 return (0); 456 } 457 458 /* 459 * mzap_lookup: Looks up property described by "name" and returns the value 460 * in "value". 461 * 462 * Return: 463 * 0 - success 464 * errnum - failure 465 */ 466 static int 467 mzap_lookup(mzap_phys_t *zapobj, int objsize, char *name, 468 uint64_t *value) 469 { 470 int i, chunks; 471 mzap_ent_phys_t *mzap_ent = zapobj->mz_chunk; 472 473 chunks = objsize/MZAP_ENT_LEN - 1; 474 for (i = 0; i < chunks; i++) { 475 if (grub_strcmp(mzap_ent[i].mze_name, name) == 0) { 476 *value = mzap_ent[i].mze_value; 477 return (0); 478 } 479 } 480 481 return (ERR_FSYS_CORRUPT); 482 } 483 484 static uint64_t 485 zap_hash(uint64_t salt, const char *name) 486 { 487 static uint64_t table[256]; 488 const uint8_t *cp; 489 uint8_t c; 490 uint64_t crc = salt; 491 492 if (table[128] == 0) { 493 uint64_t *ct; 494 int i, j; 495 for (i = 0; i < 256; i++) { 496 for (ct = table + i, *ct = i, j = 8; j > 0; j--) 497 *ct = (*ct >> 1) ^ (-(*ct & 1) & 498 ZFS_CRC64_POLY); 499 } 500 } 501 502 if (crc == 0 || table[128] != ZFS_CRC64_POLY) { 503 errnum = ERR_FSYS_CORRUPT; 504 return (0); 505 } 506 507 for (cp = (const uint8_t *)name; (c = *cp) != '\0'; cp++) 508 crc = (crc >> 8) ^ table[(crc ^ c) & 0xFF]; 509 510 /* 511 * Only use 28 bits, since we need 4 bits in the cookie for the 512 * collision differentiator. We MUST use the high bits, since 513 * those are the onces that we first pay attention to when 514 * chosing the bucket. 515 */ 516 crc &= ~((1ULL << (64 - 28)) - 1); 517 518 return (crc); 519 } 520 521 /* 522 * Only to be used on 8-bit arrays. 523 * array_len is actual len in bytes (not encoded le_value_length). 524 * buf is null-terminated. 525 */ 526 static int 527 zap_leaf_array_equal(zap_leaf_phys_t *l, int blksft, int chunk, 528 int array_len, const char *buf) 529 { 530 int bseen = 0; 531 532 while (bseen < array_len) { 533 struct zap_leaf_array *la = 534 &ZAP_LEAF_CHUNK(l, blksft, chunk).l_array; 535 int toread = MIN(array_len - bseen, ZAP_LEAF_ARRAY_BYTES); 536 537 if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft)) 538 return (0); 539 540 if (zfs_bcmp(la->la_array, buf + bseen, toread) != 0) 541 break; 542 chunk = la->la_next; 543 bseen += toread; 544 } 545 return (bseen == array_len); 546 } 547 548 /* 549 * Given a zap_leaf_phys_t, walk thru the zap leaf chunks to get the 550 * value for the property "name". 551 * 552 * Return: 553 * 0 - success 554 * errnum - failure 555 */ 556 static int 557 zap_leaf_lookup(zap_leaf_phys_t *l, int blksft, uint64_t h, 558 const char *name, uint64_t *value) 559 { 560 uint16_t chunk; 561 struct zap_leaf_entry *le; 562 563 /* Verify if this is a valid leaf block */ 564 if (l->l_hdr.lh_block_type != ZBT_LEAF) 565 return (ERR_FSYS_CORRUPT); 566 if (l->l_hdr.lh_magic != ZAP_LEAF_MAGIC) 567 return (ERR_FSYS_CORRUPT); 568 569 for (chunk = l->l_hash[LEAF_HASH(blksft, h)]; 570 chunk != CHAIN_END; chunk = le->le_next) { 571 572 if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft)) 573 return (ERR_FSYS_CORRUPT); 574 575 le = ZAP_LEAF_ENTRY(l, blksft, chunk); 576 577 /* Verify the chunk entry */ 578 if (le->le_type != ZAP_CHUNK_ENTRY) 579 return (ERR_FSYS_CORRUPT); 580 581 if (le->le_hash != h) 582 continue; 583 584 if (zap_leaf_array_equal(l, blksft, le->le_name_chunk, 585 le->le_name_length, name)) { 586 587 struct zap_leaf_array *la; 588 uint8_t *ip; 589 590 if (le->le_int_size != 8 || le->le_value_length != 1) 591 return (ERR_FSYS_CORRUPT); 592 593 /* get the uint64_t property value */ 594 la = &ZAP_LEAF_CHUNK(l, blksft, 595 le->le_value_chunk).l_array; 596 ip = la->la_array; 597 598 *value = (uint64_t)ip[0] << 56 | (uint64_t)ip[1] << 48 | 599 (uint64_t)ip[2] << 40 | (uint64_t)ip[3] << 32 | 600 (uint64_t)ip[4] << 24 | (uint64_t)ip[5] << 16 | 601 (uint64_t)ip[6] << 8 | (uint64_t)ip[7]; 602 603 return (0); 604 } 605 } 606 607 return (ERR_FSYS_CORRUPT); 608 } 609 610 /* 611 * Fat ZAP lookup 612 * 613 * Return: 614 * 0 - success 615 * errnum - failure 616 */ 617 static int 618 fzap_lookup(dnode_phys_t *zap_dnode, zap_phys_t *zap, 619 char *name, uint64_t *value, char *stack) 620 { 621 zap_leaf_phys_t *l; 622 uint64_t hash, idx, blkid; 623 int blksft = zfs_log2(zap_dnode->dn_datablkszsec << DNODE_SHIFT); 624 625 /* Verify if this is a fat zap header block */ 626 if (zap->zap_magic != (uint64_t)ZAP_MAGIC || 627 zap->zap_flags != 0) 628 return (ERR_FSYS_CORRUPT); 629 630 hash = zap_hash(zap->zap_salt, name); 631 if (errnum) 632 return (errnum); 633 634 /* get block id from index */ 635 if (zap->zap_ptrtbl.zt_numblks != 0) { 636 /* external pointer tables not supported */ 637 return (ERR_FSYS_CORRUPT); 638 } 639 idx = ZAP_HASH_IDX(hash, zap->zap_ptrtbl.zt_shift); 640 blkid = ((uint64_t *)zap)[idx + (1<<(blksft-3-1))]; 641 642 /* Get the leaf block */ 643 l = (zap_leaf_phys_t *)stack; 644 stack += 1<<blksft; 645 if ((1<<blksft) < sizeof (zap_leaf_phys_t)) 646 return (ERR_FSYS_CORRUPT); 647 if (errnum = dmu_read(zap_dnode, blkid, l, stack)) 648 return (errnum); 649 650 return (zap_leaf_lookup(l, blksft, hash, name, value)); 651 } 652 653 /* 654 * Read in the data of a zap object and find the value for a matching 655 * property name. 656 * 657 * Return: 658 * 0 - success 659 * errnum - failure 660 */ 661 static int 662 zap_lookup(dnode_phys_t *zap_dnode, char *name, uint64_t *val, char *stack) 663 { 664 uint64_t block_type; 665 int size; 666 void *zapbuf; 667 668 /* Read in the first block of the zap object data. */ 669 zapbuf = stack; 670 size = zap_dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 671 stack += size; 672 if (errnum = dmu_read(zap_dnode, 0, zapbuf, stack)) 673 return (errnum); 674 675 block_type = *((uint64_t *)zapbuf); 676 677 if (block_type == ZBT_MICRO) { 678 return (mzap_lookup(zapbuf, size, name, val)); 679 } else if (block_type == ZBT_HEADER) { 680 /* this is a fat zap */ 681 return (fzap_lookup(zap_dnode, zapbuf, name, 682 val, stack)); 683 } 684 685 return (ERR_FSYS_CORRUPT); 686 } 687 688 /* 689 * Get the dnode of an object number from the metadnode of an object set. 690 * 691 * Input 692 * mdn - metadnode to get the object dnode 693 * objnum - object number for the object dnode 694 * buf - data buffer that holds the returning dnode 695 * stack - scratch area 696 * 697 * Return: 698 * 0 - success 699 * errnum - failure 700 */ 701 static int 702 dnode_get(dnode_phys_t *mdn, uint64_t objnum, uint8_t type, dnode_phys_t *buf, 703 char *stack) 704 { 705 uint64_t blkid, blksz; /* the block id this object dnode is in */ 706 int epbs; /* shift of number of dnodes in a block */ 707 int idx; /* index within a block */ 708 dnode_phys_t *dnbuf; 709 710 blksz = mdn->dn_datablkszsec << SPA_MINBLOCKSHIFT; 711 epbs = zfs_log2(blksz) - DNODE_SHIFT; 712 blkid = objnum >> epbs; 713 idx = objnum & ((1<<epbs)-1); 714 715 if (dnode_buf != NULL && dnode_mdn == mdn && 716 objnum >= dnode_start && objnum < dnode_end) { 717 grub_memmove(buf, &dnode_buf[idx], DNODE_SIZE); 718 VERIFY_DN_TYPE(buf, type); 719 return (0); 720 } 721 722 if (dnode_buf && blksz == 1<<DNODE_BLOCK_SHIFT) { 723 dnbuf = dnode_buf; 724 dnode_mdn = mdn; 725 dnode_start = blkid << epbs; 726 dnode_end = (blkid + 1) << epbs; 727 } else { 728 dnbuf = (dnode_phys_t *)stack; 729 stack += blksz; 730 } 731 732 if (errnum = dmu_read(mdn, blkid, (char *)dnbuf, stack)) 733 return (errnum); 734 735 grub_memmove(buf, &dnbuf[idx], DNODE_SIZE); 736 VERIFY_DN_TYPE(buf, type); 737 738 return (0); 739 } 740 741 /* 742 * Check if this is a special file that resides at the top 743 * dataset of the pool. Currently this is the GRUB menu, 744 * boot signature and boot signature backup. 745 * str starts with '/'. 746 */ 747 static int 748 is_top_dataset_file(char *str) 749 { 750 char *tptr; 751 752 if ((tptr = grub_strstr(str, "menu.lst")) && 753 (tptr[8] == '\0' || tptr[8] == ' ') && 754 *(tptr-1) == '/') 755 return (1); 756 757 if (grub_strncmp(str, BOOTSIGN_DIR"/", 758 grub_strlen(BOOTSIGN_DIR) + 1) == 0) 759 return (1); 760 761 if (grub_strcmp(str, BOOTSIGN_BACKUP) == 0) 762 return (1); 763 764 return (0); 765 } 766 767 /* 768 * Get the file dnode for a given file name where mdn is the meta dnode 769 * for this ZFS object set. When found, place the file dnode in dn. 770 * The 'path' argument will be mangled. 771 * 772 * Return: 773 * 0 - success 774 * errnum - failure 775 */ 776 static int 777 dnode_get_path(dnode_phys_t *mdn, char *path, dnode_phys_t *dn, 778 char *stack) 779 { 780 uint64_t objnum, version; 781 char *cname, ch; 782 783 if (errnum = dnode_get(mdn, MASTER_NODE_OBJ, DMU_OT_MASTER_NODE, 784 dn, stack)) 785 return (errnum); 786 787 if (errnum = zap_lookup(dn, ZPL_VERSION_STR, &version, stack)) 788 return (errnum); 789 if (version > ZPL_VERSION) 790 return (-1); 791 792 if (errnum = zap_lookup(dn, ZFS_ROOT_OBJ, &objnum, stack)) 793 return (errnum); 794 795 if (errnum = dnode_get(mdn, objnum, DMU_OT_DIRECTORY_CONTENTS, 796 dn, stack)) 797 return (errnum); 798 799 /* skip leading slashes */ 800 while (*path == '/') 801 path++; 802 803 while (*path && !isspace(*path)) { 804 805 /* get the next component name */ 806 cname = path; 807 while (*path && !isspace(*path) && *path != '/') 808 path++; 809 ch = *path; 810 *path = 0; /* ensure null termination */ 811 812 if (errnum = zap_lookup(dn, cname, &objnum, stack)) 813 return (errnum); 814 815 objnum = ZFS_DIRENT_OBJ(objnum); 816 if (errnum = dnode_get(mdn, objnum, 0, dn, stack)) 817 return (errnum); 818 819 *path = ch; 820 while (*path == '/') 821 path++; 822 } 823 824 /* We found the dnode for this file. Verify if it is a plain file. */ 825 VERIFY_DN_TYPE(dn, DMU_OT_PLAIN_FILE_CONTENTS); 826 827 return (0); 828 } 829 830 /* 831 * Get the default 'bootfs' property value from the rootpool. 832 * 833 * Return: 834 * 0 - success 835 * errnum -failure 836 */ 837 static int 838 get_default_bootfsobj(dnode_phys_t *mosmdn, uint64_t *obj, char *stack) 839 { 840 uint64_t objnum = 0; 841 dnode_phys_t *dn = (dnode_phys_t *)stack; 842 stack += DNODE_SIZE; 843 844 if (errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT, 845 DMU_OT_OBJECT_DIRECTORY, dn, stack)) 846 return (errnum); 847 848 /* 849 * find the object number for 'pool_props', and get the dnode 850 * of the 'pool_props'. 851 */ 852 if (zap_lookup(dn, DMU_POOL_PROPS, &objnum, stack)) 853 return (ERR_FILESYSTEM_NOT_FOUND); 854 855 if (errnum = dnode_get(mosmdn, objnum, DMU_OT_POOL_PROPS, dn, stack)) 856 return (errnum); 857 858 if (zap_lookup(dn, ZPOOL_PROP_BOOTFS, &objnum, stack)) 859 return (ERR_FILESYSTEM_NOT_FOUND); 860 861 if (!objnum) 862 return (ERR_FILESYSTEM_NOT_FOUND); 863 864 *obj = objnum; 865 return (0); 866 } 867 868 /* 869 * Given a MOS metadnode, get the metadnode of a given filesystem name (fsname), 870 * e.g. pool/rootfs, or a given object number (obj), e.g. the object number 871 * of pool/rootfs. 872 * 873 * If no fsname and no obj are given, return the DSL_DIR metadnode. 874 * If fsname is given, return its metadnode and its matching object number. 875 * If only obj is given, return the metadnode for this object number. 876 * 877 * Return: 878 * 0 - success 879 * errnum - failure 880 */ 881 static int 882 get_objset_mdn(dnode_phys_t *mosmdn, char *fsname, uint64_t *obj, 883 dnode_phys_t *mdn, char *stack) 884 { 885 uint64_t objnum, headobj; 886 char *cname, ch; 887 blkptr_t *bp; 888 objset_phys_t *osp; 889 int issnapshot = 0; 890 char *snapname; 891 892 if (fsname == NULL && obj) { 893 headobj = *obj; 894 goto skip; 895 } 896 897 if (errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT, 898 DMU_OT_OBJECT_DIRECTORY, mdn, stack)) 899 return (errnum); 900 901 if (errnum = zap_lookup(mdn, DMU_POOL_ROOT_DATASET, &objnum, 902 stack)) 903 return (errnum); 904 905 if (errnum = dnode_get(mosmdn, objnum, DMU_OT_DSL_DIR, mdn, stack)) 906 return (errnum); 907 908 if (fsname == NULL) { 909 headobj = 910 ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_head_dataset_obj; 911 goto skip; 912 } 913 914 /* take out the pool name */ 915 while (*fsname && !isspace(*fsname) && *fsname != '/') 916 fsname++; 917 918 while (*fsname && !isspace(*fsname)) { 919 uint64_t childobj; 920 921 while (*fsname == '/') 922 fsname++; 923 924 cname = fsname; 925 while (*fsname && !isspace(*fsname) && *fsname != '/') 926 fsname++; 927 ch = *fsname; 928 *fsname = 0; 929 930 snapname = cname; 931 while (*snapname && !isspace(*snapname) && *snapname != '@') 932 snapname++; 933 if (*snapname == '@') { 934 issnapshot = 1; 935 *snapname = 0; 936 } 937 childobj = 938 ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_child_dir_zapobj; 939 if (errnum = dnode_get(mosmdn, childobj, 940 DMU_OT_DSL_DIR_CHILD_MAP, mdn, stack)) 941 return (errnum); 942 943 if (zap_lookup(mdn, cname, &objnum, stack)) 944 return (ERR_FILESYSTEM_NOT_FOUND); 945 946 if (errnum = dnode_get(mosmdn, objnum, DMU_OT_DSL_DIR, 947 mdn, stack)) 948 return (errnum); 949 950 *fsname = ch; 951 if (issnapshot) 952 *snapname = '@'; 953 } 954 headobj = ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_head_dataset_obj; 955 if (obj) 956 *obj = headobj; 957 958 skip: 959 if (errnum = dnode_get(mosmdn, headobj, DMU_OT_DSL_DATASET, mdn, stack)) 960 return (errnum); 961 if (issnapshot) { 962 uint64_t snapobj; 963 964 snapobj = ((dsl_dataset_phys_t *)DN_BONUS(mdn))-> 965 ds_snapnames_zapobj; 966 967 if (errnum = dnode_get(mosmdn, snapobj, 968 DMU_OT_DSL_DS_SNAP_MAP, mdn, stack)) 969 return (errnum); 970 if (zap_lookup(mdn, snapname + 1, &headobj, stack)) 971 return (ERR_FILESYSTEM_NOT_FOUND); 972 if (errnum = dnode_get(mosmdn, headobj, 973 DMU_OT_DSL_DATASET, mdn, stack)) 974 return (errnum); 975 if (obj) 976 *obj = headobj; 977 } 978 979 bp = &((dsl_dataset_phys_t *)DN_BONUS(mdn))->ds_bp; 980 osp = (objset_phys_t *)stack; 981 stack += sizeof (objset_phys_t); 982 if (errnum = zio_read(bp, osp, stack)) 983 return (errnum); 984 985 grub_memmove((char *)mdn, (char *)&osp->os_meta_dnode, DNODE_SIZE); 986 987 return (0); 988 } 989 990 /* 991 * For a given XDR packed nvlist, verify the first 4 bytes and move on. 992 * 993 * An XDR packed nvlist is encoded as (comments from nvs_xdr_create) : 994 * 995 * encoding method/host endian (4 bytes) 996 * nvl_version (4 bytes) 997 * nvl_nvflag (4 bytes) 998 * encoded nvpairs: 999 * encoded size of the nvpair (4 bytes) 1000 * decoded size of the nvpair (4 bytes) 1001 * name string size (4 bytes) 1002 * name string data (sizeof(NV_ALIGN4(string)) 1003 * data type (4 bytes) 1004 * # of elements in the nvpair (4 bytes) 1005 * data 1006 * 2 zero's for the last nvpair 1007 * (end of the entire list) (8 bytes) 1008 * 1009 * Return: 1010 * 0 - success 1011 * 1 - failure 1012 */ 1013 static int 1014 nvlist_unpack(char *nvlist, char **out) 1015 { 1016 /* Verify if the 1st and 2nd byte in the nvlist are valid. */ 1017 if (nvlist[0] != NV_ENCODE_XDR || nvlist[1] != HOST_ENDIAN) 1018 return (1); 1019 1020 nvlist += 4; 1021 *out = nvlist; 1022 return (0); 1023 } 1024 1025 static char * 1026 nvlist_array(char *nvlist, int index) 1027 { 1028 int i, encode_size; 1029 1030 for (i = 0; i < index; i++) { 1031 /* skip the header, nvl_version, and nvl_nvflag */ 1032 nvlist = nvlist + 4 * 2; 1033 1034 while (encode_size = BSWAP_32(*(uint32_t *)nvlist)) 1035 nvlist += encode_size; /* goto the next nvpair */ 1036 1037 nvlist = nvlist + 4 * 2; /* skip the ending 2 zeros - 8 bytes */ 1038 } 1039 1040 return (nvlist); 1041 } 1042 1043 static int 1044 nvlist_lookup_value(char *nvlist, char *name, void *val, int valtype, 1045 int *nelmp) 1046 { 1047 int name_len, type, slen, encode_size; 1048 char *nvpair, *nvp_name, *strval = val; 1049 uint64_t *intval = val; 1050 1051 /* skip the header, nvl_version, and nvl_nvflag */ 1052 nvlist = nvlist + 4 * 2; 1053 1054 /* 1055 * Loop thru the nvpair list 1056 * The XDR representation of an integer is in big-endian byte order. 1057 */ 1058 while (encode_size = BSWAP_32(*(uint32_t *)nvlist)) { 1059 1060 nvpair = nvlist + 4 * 2; /* skip the encode/decode size */ 1061 1062 name_len = BSWAP_32(*(uint32_t *)nvpair); 1063 nvpair += 4; 1064 1065 nvp_name = nvpair; 1066 nvpair = nvpair + ((name_len + 3) & ~3); /* align */ 1067 1068 type = BSWAP_32(*(uint32_t *)nvpair); 1069 nvpair += 4; 1070 1071 if ((grub_strncmp(nvp_name, name, name_len) == 0) && 1072 type == valtype) { 1073 int nelm; 1074 1075 if ((nelm = BSWAP_32(*(uint32_t *)nvpair)) < 1) 1076 return (1); 1077 nvpair += 4; 1078 1079 switch (valtype) { 1080 case DATA_TYPE_STRING: 1081 slen = BSWAP_32(*(uint32_t *)nvpair); 1082 nvpair += 4; 1083 grub_memmove(strval, nvpair, slen); 1084 strval[slen] = '\0'; 1085 return (0); 1086 1087 case DATA_TYPE_UINT64: 1088 *intval = BSWAP_64(*(uint64_t *)nvpair); 1089 return (0); 1090 1091 case DATA_TYPE_NVLIST: 1092 *(void **)val = (void *)nvpair; 1093 return (0); 1094 1095 case DATA_TYPE_NVLIST_ARRAY: 1096 *(void **)val = (void *)nvpair; 1097 if (nelmp) 1098 *nelmp = nelm; 1099 return (0); 1100 } 1101 } 1102 1103 nvlist += encode_size; /* goto the next nvpair */ 1104 } 1105 1106 return (1); 1107 } 1108 1109 /* 1110 * Check if this vdev is online and is in a good state. 1111 */ 1112 static int 1113 vdev_validate(char *nv) 1114 { 1115 uint64_t ival; 1116 1117 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_OFFLINE, &ival, 1118 DATA_TYPE_UINT64, NULL) == 0 || 1119 nvlist_lookup_value(nv, ZPOOL_CONFIG_FAULTED, &ival, 1120 DATA_TYPE_UINT64, NULL) == 0 || 1121 nvlist_lookup_value(nv, ZPOOL_CONFIG_REMOVED, &ival, 1122 DATA_TYPE_UINT64, NULL) == 0) 1123 return (ERR_DEV_VALUES); 1124 1125 return (0); 1126 } 1127 1128 /* 1129 * Get a valid vdev pathname/devid from the boot device. 1130 * The caller should already allocate MAXPATHLEN memory for bootpath and devid. 1131 */ 1132 static int 1133 vdev_get_bootpath(char *nv, uint64_t inguid, char *devid, char *bootpath, 1134 int is_spare) 1135 { 1136 char type[16]; 1137 1138 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_TYPE, &type, DATA_TYPE_STRING, 1139 NULL)) 1140 return (ERR_FSYS_CORRUPT); 1141 1142 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 1143 uint64_t guid; 1144 1145 if (vdev_validate(nv) != 0) 1146 return (ERR_NO_BOOTPATH); 1147 1148 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_GUID, 1149 &guid, DATA_TYPE_UINT64, NULL) != 0) 1150 return (ERR_NO_BOOTPATH); 1151 1152 if (guid != inguid) 1153 return (ERR_NO_BOOTPATH); 1154 1155 /* for a spare vdev, pick the disk labeled with "is_spare" */ 1156 if (is_spare) { 1157 uint64_t spare = 0; 1158 (void) nvlist_lookup_value(nv, ZPOOL_CONFIG_IS_SPARE, 1159 &spare, DATA_TYPE_UINT64, NULL); 1160 if (!spare) 1161 return (ERR_NO_BOOTPATH); 1162 } 1163 1164 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_PHYS_PATH, 1165 bootpath, DATA_TYPE_STRING, NULL) != 0) 1166 bootpath[0] = '\0'; 1167 1168 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_DEVID, 1169 devid, DATA_TYPE_STRING, NULL) != 0) 1170 devid[0] = '\0'; 1171 1172 if (strlen(bootpath) >= MAXPATHLEN || 1173 strlen(devid) >= MAXPATHLEN) 1174 return (ERR_WONT_FIT); 1175 1176 return (0); 1177 1178 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 1179 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 1180 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 1181 int nelm, i; 1182 char *child; 1183 1184 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_CHILDREN, &child, 1185 DATA_TYPE_NVLIST_ARRAY, &nelm)) 1186 return (ERR_FSYS_CORRUPT); 1187 1188 for (i = 0; i < nelm; i++) { 1189 char *child_i; 1190 1191 child_i = nvlist_array(child, i); 1192 if (vdev_get_bootpath(child_i, inguid, devid, 1193 bootpath, is_spare) == 0) 1194 return (0); 1195 } 1196 } 1197 1198 return (ERR_NO_BOOTPATH); 1199 } 1200 1201 /* 1202 * Check the disk label information and retrieve needed vdev name-value pairs. 1203 * 1204 * Return: 1205 * 0 - success 1206 * ERR_* - failure 1207 */ 1208 int 1209 check_pool_label(uint64_t sector, char *stack, char *outdevid, 1210 char *outpath, uint64_t *outguid) 1211 { 1212 vdev_phys_t *vdev; 1213 uint64_t pool_state, txg = 0; 1214 char *nvlist, *nv; 1215 uint64_t diskguid; 1216 uint64_t version; 1217 1218 sector += (VDEV_SKIP_SIZE >> SPA_MINBLOCKSHIFT); 1219 1220 /* Read in the vdev name-value pair list (112K). */ 1221 if (devread(sector, 0, VDEV_PHYS_SIZE, stack) == 0) 1222 return (ERR_READ); 1223 1224 vdev = (vdev_phys_t *)stack; 1225 stack += sizeof (vdev_phys_t); 1226 1227 if (nvlist_unpack(vdev->vp_nvlist, &nvlist)) 1228 return (ERR_FSYS_CORRUPT); 1229 1230 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_STATE, &pool_state, 1231 DATA_TYPE_UINT64, NULL)) 1232 return (ERR_FSYS_CORRUPT); 1233 1234 if (pool_state == POOL_STATE_DESTROYED) 1235 return (ERR_FILESYSTEM_NOT_FOUND); 1236 1237 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_NAME, 1238 current_rootpool, DATA_TYPE_STRING, NULL)) 1239 return (ERR_FSYS_CORRUPT); 1240 1241 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_TXG, &txg, 1242 DATA_TYPE_UINT64, NULL)) 1243 return (ERR_FSYS_CORRUPT); 1244 1245 /* not an active device */ 1246 if (txg == 0) 1247 return (ERR_NO_BOOTPATH); 1248 1249 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_VERSION, &version, 1250 DATA_TYPE_UINT64, NULL)) 1251 return (ERR_FSYS_CORRUPT); 1252 if (version > SPA_VERSION) 1253 return (ERR_NEWER_VERSION); 1254 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_VDEV_TREE, &nv, 1255 DATA_TYPE_NVLIST, NULL)) 1256 return (ERR_FSYS_CORRUPT); 1257 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_GUID, &diskguid, 1258 DATA_TYPE_UINT64, NULL)) 1259 return (ERR_FSYS_CORRUPT); 1260 if (vdev_get_bootpath(nv, diskguid, outdevid, outpath, 0)) 1261 return (ERR_NO_BOOTPATH); 1262 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_GUID, outguid, 1263 DATA_TYPE_UINT64, NULL)) 1264 return (ERR_FSYS_CORRUPT); 1265 return (0); 1266 } 1267 1268 /* 1269 * zfs_mount() locates a valid uberblock of the root pool and read in its MOS 1270 * to the memory address MOS. 1271 * 1272 * Return: 1273 * 1 - success 1274 * 0 - failure 1275 */ 1276 int 1277 zfs_mount(void) 1278 { 1279 char *stack; 1280 int label = 0; 1281 uberblock_phys_t *ub_array, *ubbest; 1282 objset_phys_t *osp; 1283 char tmp_bootpath[MAXNAMELEN]; 1284 char tmp_devid[MAXNAMELEN]; 1285 uint64_t tmp_guid; 1286 uint64_t adjpl = (uint64_t)part_length << SPA_MINBLOCKSHIFT; 1287 int err = errnum; /* preserve previous errnum state */ 1288 1289 /* if it's our first time here, zero the best uberblock out */ 1290 if (best_drive == 0 && best_part == 0 && find_best_root) { 1291 grub_memset(¤t_uberblock, 0, sizeof (uberblock_t)); 1292 pool_guid = 0; 1293 } 1294 1295 stackbase = ZFS_SCRATCH; 1296 stack = stackbase; 1297 ub_array = (uberblock_phys_t *)stack; 1298 stack += VDEV_UBERBLOCK_RING; 1299 1300 osp = (objset_phys_t *)stack; 1301 stack += sizeof (objset_phys_t); 1302 adjpl = P2ALIGN(adjpl, (uint64_t)sizeof (vdev_label_t)); 1303 1304 for (label = 0; label < VDEV_LABELS; label++) { 1305 1306 /* 1307 * some eltorito stacks don't give us a size and 1308 * we end up setting the size to MAXUINT, further 1309 * some of these devices stop working once a single 1310 * read past the end has been issued. Checking 1311 * for a maximum part_length and skipping the backup 1312 * labels at the end of the slice/partition/device 1313 * avoids breaking down on such devices. 1314 */ 1315 if (part_length == MAXUINT && label == 2) 1316 break; 1317 1318 uint64_t sector = vdev_label_start(adjpl, 1319 label) >> SPA_MINBLOCKSHIFT; 1320 1321 /* Read in the uberblock ring (128K). */ 1322 if (devread(sector + 1323 ((VDEV_SKIP_SIZE + VDEV_PHYS_SIZE) >> 1324 SPA_MINBLOCKSHIFT), 0, VDEV_UBERBLOCK_RING, 1325 (char *)ub_array) == 0) 1326 continue; 1327 1328 if ((ubbest = find_bestub(ub_array, sector)) != NULL && 1329 zio_read(&ubbest->ubp_uberblock.ub_rootbp, osp, stack) 1330 == 0) { 1331 1332 VERIFY_OS_TYPE(osp, DMU_OST_META); 1333 1334 if (check_pool_label(sector, stack, tmp_devid, 1335 tmp_bootpath, &tmp_guid)) 1336 continue; 1337 if (pool_guid == 0) 1338 pool_guid = tmp_guid; 1339 1340 if (find_best_root && ((pool_guid != tmp_guid) || 1341 vdev_uberblock_compare(&ubbest->ubp_uberblock, 1342 &(current_uberblock)) <= 0)) 1343 continue; 1344 1345 /* Got the MOS. Save it at the memory addr MOS. */ 1346 grub_memmove(MOS, &osp->os_meta_dnode, DNODE_SIZE); 1347 grub_memmove(¤t_uberblock, 1348 &ubbest->ubp_uberblock, sizeof (uberblock_t)); 1349 grub_memmove(current_bootpath, tmp_bootpath, 1350 MAXNAMELEN); 1351 grub_memmove(current_devid, tmp_devid, 1352 grub_strlen(tmp_devid)); 1353 is_zfs_mount = 1; 1354 return (1); 1355 } 1356 } 1357 1358 /* 1359 * While some fs impls. (tftp) rely on setting and keeping 1360 * global errnums set, others won't reset it and will break 1361 * when issuing rawreads. The goal here is to simply not 1362 * have zfs mount attempts impact the previous state. 1363 */ 1364 errnum = err; 1365 return (0); 1366 } 1367 1368 /* 1369 * zfs_open() locates a file in the rootpool by following the 1370 * MOS and places the dnode of the file in the memory address DNODE. 1371 * 1372 * Return: 1373 * 1 - success 1374 * 0 - failure 1375 */ 1376 int 1377 zfs_open(char *filename) 1378 { 1379 char *stack; 1380 dnode_phys_t *mdn; 1381 1382 file_buf = NULL; 1383 stackbase = ZFS_SCRATCH; 1384 stack = stackbase; 1385 1386 mdn = (dnode_phys_t *)stack; 1387 stack += sizeof (dnode_phys_t); 1388 1389 dnode_mdn = NULL; 1390 dnode_buf = (dnode_phys_t *)stack; 1391 stack += 1<<DNODE_BLOCK_SHIFT; 1392 1393 /* 1394 * menu.lst is placed at the root pool filesystem level, 1395 * do not goto 'current_bootfs'. 1396 */ 1397 if (is_top_dataset_file(filename)) { 1398 if (errnum = get_objset_mdn(MOS, NULL, NULL, mdn, stack)) 1399 return (0); 1400 1401 current_bootfs_obj = 0; 1402 } else { 1403 if (current_bootfs[0] == '\0') { 1404 /* Get the default root filesystem object number */ 1405 if (errnum = get_default_bootfsobj(MOS, 1406 ¤t_bootfs_obj, stack)) 1407 return (0); 1408 1409 if (errnum = get_objset_mdn(MOS, NULL, 1410 ¤t_bootfs_obj, mdn, stack)) 1411 return (0); 1412 } else { 1413 if (errnum = get_objset_mdn(MOS, current_bootfs, 1414 ¤t_bootfs_obj, mdn, stack)) { 1415 grub_memset(current_bootfs, 0, MAXNAMELEN); 1416 return (0); 1417 } 1418 } 1419 } 1420 1421 if (dnode_get_path(mdn, filename, DNODE, stack)) { 1422 errnum = ERR_FILE_NOT_FOUND; 1423 return (0); 1424 } 1425 1426 /* get the file size and set the file position to 0 */ 1427 filemax = ((znode_phys_t *)DN_BONUS(DNODE))->zp_size; 1428 filepos = 0; 1429 1430 dnode_buf = NULL; 1431 return (1); 1432 } 1433 1434 /* 1435 * zfs_read reads in the data blocks pointed by the DNODE. 1436 * 1437 * Return: 1438 * len - the length successfully read in to the buffer 1439 * 0 - failure 1440 */ 1441 int 1442 zfs_read(char *buf, int len) 1443 { 1444 char *stack; 1445 char *tmpbuf; 1446 int blksz, length, movesize; 1447 1448 if (file_buf == NULL) { 1449 file_buf = stackbase; 1450 stackbase += SPA_MAXBLOCKSIZE; 1451 file_start = file_end = 0; 1452 } 1453 stack = stackbase; 1454 1455 /* 1456 * If offset is in memory, move it into the buffer provided and return. 1457 */ 1458 if (filepos >= file_start && filepos+len <= file_end) { 1459 grub_memmove(buf, file_buf + filepos - file_start, len); 1460 filepos += len; 1461 return (len); 1462 } 1463 1464 blksz = DNODE->dn_datablkszsec << SPA_MINBLOCKSHIFT; 1465 1466 /* 1467 * Entire Dnode is too big to fit into the space available. We 1468 * will need to read it in chunks. This could be optimized to 1469 * read in as large a chunk as there is space available, but for 1470 * now, this only reads in one data block at a time. 1471 */ 1472 length = len; 1473 while (length) { 1474 /* 1475 * Find requested blkid and the offset within that block. 1476 */ 1477 uint64_t blkid = filepos / blksz; 1478 1479 if (errnum = dmu_read(DNODE, blkid, file_buf, stack)) 1480 return (0); 1481 1482 file_start = blkid * blksz; 1483 file_end = file_start + blksz; 1484 1485 movesize = MIN(length, file_end - filepos); 1486 1487 grub_memmove(buf, file_buf + filepos - file_start, 1488 movesize); 1489 buf += movesize; 1490 length -= movesize; 1491 filepos += movesize; 1492 } 1493 1494 return (len); 1495 } 1496 1497 /* 1498 * No-Op 1499 */ 1500 int 1501 zfs_embed(int *start_sector, int needed_sectors) 1502 { 1503 return (1); 1504 } 1505 1506 #endif /* FSYS_ZFS */ 1507