xref: /illumos-gate/usr/src/grub/grub-0.97/stage2/fsys_zfs.c (revision a6f561b4aee75d0d028e7b36b151c8ed8a86bc76)
1 /*
2  *  GRUB  --  GRand Unified Bootloader
3  *  Copyright (C) 1999,2000,2001,2002,2003,2004  Free Software Foundation, Inc.
4  *
5  *  This program is free software; you can redistribute it and/or modify
6  *  it under the terms of the GNU General Public License as published by
7  *  the Free Software Foundation; either version 2 of the License, or
8  *  (at your option) any later version.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License
16  *  along with this program; if not, write to the Free Software
17  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18  */
19 
20 /*
21  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
22  * Use is subject to license terms.
23  */
24 
25 /*
26  * Copyright (c) 2012 by Delphix. All rights reserved.
27  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
28  */
29 
30 /*
31  * The zfs plug-in routines for GRUB are:
32  *
33  * zfs_mount() - locates a valid uberblock of the root pool and reads
34  *		in its MOS at the memory address MOS.
35  *
36  * zfs_open() - locates a plain file object by following the MOS
37  *		and places its dnode at the memory address DNODE.
38  *
39  * zfs_read() - read in the data blocks pointed by the DNODE.
40  *
41  * ZFS_SCRATCH is used as a working area.
42  *
43  * (memory addr)   MOS      DNODE	ZFS_SCRATCH
44  *		    |         |          |
45  *	    +-------V---------V----------V---------------+
46  *   memory |       | dnode   | dnode    |  scratch      |
47  *	    |       | 512B    | 512B     |  area         |
48  *	    +--------------------------------------------+
49  */
50 
51 #ifdef	FSYS_ZFS
52 
53 #include "shared.h"
54 #include "filesys.h"
55 #include "fsys_zfs.h"
56 
57 /* cache for a file block of the currently zfs_open()-ed file */
58 static void *file_buf = NULL;
59 static uint64_t file_start = 0;
60 static uint64_t file_end = 0;
61 
62 /* cache for a dnode block */
63 static dnode_phys_t *dnode_buf = NULL;
64 static dnode_phys_t *dnode_mdn = NULL;
65 static uint64_t dnode_start = 0;
66 static uint64_t dnode_end = 0;
67 
68 static uint64_t pool_guid = 0;
69 static uberblock_t current_uberblock;
70 static char *stackbase;
71 
72 decomp_entry_t decomp_table[ZIO_COMPRESS_FUNCTIONS] =
73 {
74 	{"inherit", 0},			/* ZIO_COMPRESS_INHERIT */
75 	{"on", lzjb_decompress}, 	/* ZIO_COMPRESS_ON */
76 	{"off", 0},			/* ZIO_COMPRESS_OFF */
77 	{"lzjb", lzjb_decompress},	/* ZIO_COMPRESS_LZJB */
78 	{"empty", 0},			/* ZIO_COMPRESS_EMPTY */
79 	{"gzip-1", 0},			/* ZIO_COMPRESS_GZIP_1 */
80 	{"gzip-2", 0},			/* ZIO_COMPRESS_GZIP_2 */
81 	{"gzip-3", 0},			/* ZIO_COMPRESS_GZIP_3 */
82 	{"gzip-4", 0},			/* ZIO_COMPRESS_GZIP_4 */
83 	{"gzip-5", 0},			/* ZIO_COMPRESS_GZIP_5 */
84 	{"gzip-6", 0},			/* ZIO_COMPRESS_GZIP_6 */
85 	{"gzip-7", 0},			/* ZIO_COMPRESS_GZIP_7 */
86 	{"gzip-8", 0},			/* ZIO_COMPRESS_GZIP_8 */
87 	{"gzip-9", 0},			/* ZIO_COMPRESS_GZIP_9 */
88 	{"zle", 0},			/* ZIO_COMPRESS_ZLE */
89 	{"lz4", lz4_decompress}		/* ZIO_COMPRESS_LZ4 */
90 };
91 
92 static int zio_read_data(blkptr_t *bp, void *buf, char *stack);
93 
94 /*
95  * Our own version of bcmp().
96  */
97 static int
98 zfs_bcmp(const void *s1, const void *s2, size_t n)
99 {
100 	const uchar_t *ps1 = s1;
101 	const uchar_t *ps2 = s2;
102 
103 	if (s1 != s2 && n != 0) {
104 		do {
105 			if (*ps1++ != *ps2++)
106 				return (1);
107 		} while (--n != 0);
108 	}
109 
110 	return (0);
111 }
112 
113 /*
114  * Our own version of log2().  Same thing as highbit()-1.
115  */
116 static int
117 zfs_log2(uint64_t num)
118 {
119 	int i = 0;
120 
121 	while (num > 1) {
122 		i++;
123 		num = num >> 1;
124 	}
125 
126 	return (i);
127 }
128 
129 /* Checksum Functions */
130 static void
131 zio_checksum_off(const void *buf, uint64_t size, zio_cksum_t *zcp)
132 {
133 	ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
134 }
135 
136 /* Checksum Table and Values */
137 zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS] = {
138 	{{NULL,			NULL},			0, 0,	"inherit"},
139 	{{NULL,			NULL},			0, 0,	"on"},
140 	{{zio_checksum_off,	zio_checksum_off},	0, 0,	"off"},
141 	{{zio_checksum_SHA256,	zio_checksum_SHA256},	1, 1,	"label"},
142 	{{zio_checksum_SHA256,	zio_checksum_SHA256},	1, 1,	"gang_header"},
143 	{{NULL,			NULL},			0, 0,	"zilog"},
144 	{{fletcher_2_native,	fletcher_2_byteswap},	0, 0,	"fletcher2"},
145 	{{fletcher_4_native,	fletcher_4_byteswap},	1, 0,	"fletcher4"},
146 	{{zio_checksum_SHA256,	zio_checksum_SHA256},	1, 0,	"SHA256"},
147 	{{NULL,			NULL},			0, 0,	"zilog2"},
148 };
149 
150 /*
151  * zio_checksum_verify: Provides support for checksum verification.
152  *
153  * Fletcher2, Fletcher4, and SHA256 are supported.
154  *
155  * Return:
156  * 	-1 = Failure
157  *	 0 = Success
158  */
159 static int
160 zio_checksum_verify(blkptr_t *bp, char *data, int size)
161 {
162 	zio_cksum_t zc = bp->blk_cksum;
163 	uint32_t checksum = BP_GET_CHECKSUM(bp);
164 	int byteswap = BP_SHOULD_BYTESWAP(bp);
165 	zio_eck_t *zec = (zio_eck_t *)(data + size) - 1;
166 	zio_checksum_info_t *ci = &zio_checksum_table[checksum];
167 	zio_cksum_t actual_cksum, expected_cksum;
168 
169 	/* byteswap is not supported */
170 	if (byteswap)
171 		return (-1);
172 
173 	if (checksum >= ZIO_CHECKSUM_FUNCTIONS || ci->ci_func[0] == NULL)
174 		return (-1);
175 
176 	if (ci->ci_eck) {
177 		expected_cksum = zec->zec_cksum;
178 		zec->zec_cksum = zc;
179 		ci->ci_func[0](data, size, &actual_cksum);
180 		zec->zec_cksum = expected_cksum;
181 		zc = expected_cksum;
182 
183 	} else {
184 		ci->ci_func[byteswap](data, size, &actual_cksum);
185 	}
186 
187 	if ((actual_cksum.zc_word[0] - zc.zc_word[0]) |
188 	    (actual_cksum.zc_word[1] - zc.zc_word[1]) |
189 	    (actual_cksum.zc_word[2] - zc.zc_word[2]) |
190 	    (actual_cksum.zc_word[3] - zc.zc_word[3]))
191 		return (-1);
192 
193 	return (0);
194 }
195 
196 /*
197  * vdev_label_start returns the physical disk offset (in bytes) of
198  * label "l".
199  */
200 static uint64_t
201 vdev_label_start(uint64_t psize, int l)
202 {
203 	return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
204 	    0 : psize - VDEV_LABELS * sizeof (vdev_label_t)));
205 }
206 
207 /*
208  * vdev_uberblock_compare takes two uberblock structures and returns an integer
209  * indicating the more recent of the two.
210  * 	Return Value = 1 if ub2 is more recent
211  * 	Return Value = -1 if ub1 is more recent
212  * The most recent uberblock is determined using its transaction number and
213  * timestamp.  The uberblock with the highest transaction number is
214  * considered "newer".  If the transaction numbers of the two blocks match, the
215  * timestamps are compared to determine the "newer" of the two.
216  */
217 static int
218 vdev_uberblock_compare(uberblock_t *ub1, uberblock_t *ub2)
219 {
220 	if (ub1->ub_txg < ub2->ub_txg)
221 		return (-1);
222 	if (ub1->ub_txg > ub2->ub_txg)
223 		return (1);
224 
225 	if (ub1->ub_timestamp < ub2->ub_timestamp)
226 		return (-1);
227 	if (ub1->ub_timestamp > ub2->ub_timestamp)
228 		return (1);
229 
230 	return (0);
231 }
232 
233 /*
234  * Three pieces of information are needed to verify an uberblock: the magic
235  * number, the version number, and the checksum.
236  *
237  * Return:
238  *     0 - Success
239  *    -1 - Failure
240  */
241 static int
242 uberblock_verify(uberblock_t *uber, uint64_t ub_size, uint64_t offset)
243 {
244 	blkptr_t bp;
245 
246 	BP_ZERO(&bp);
247 	BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL);
248 	BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER);
249 	ZIO_SET_CHECKSUM(&bp.blk_cksum, offset, 0, 0, 0);
250 
251 	if (zio_checksum_verify(&bp, (char *)uber, ub_size) != 0)
252 		return (-1);
253 
254 	if (uber->ub_magic == UBERBLOCK_MAGIC &&
255 	    SPA_VERSION_IS_SUPPORTED(uber->ub_version))
256 		return (0);
257 
258 	return (-1);
259 }
260 
261 /*
262  * Find the best uberblock.
263  * Return:
264  *    Success - Pointer to the best uberblock.
265  *    Failure - NULL
266  */
267 static uberblock_t *
268 find_bestub(char *ub_array, uint64_t ashift, uint64_t sector)
269 {
270 	uberblock_t *ubbest = NULL;
271 	uberblock_t *ubnext;
272 	uint64_t offset, ub_size;
273 	int i;
274 
275 	ub_size = VDEV_UBERBLOCK_SIZE(ashift);
276 
277 	for (i = 0; i < VDEV_UBERBLOCK_COUNT(ashift); i++) {
278 		ubnext = (uberblock_t *)ub_array;
279 		ub_array += ub_size;
280 		offset = (sector << SPA_MINBLOCKSHIFT) +
281 		    VDEV_UBERBLOCK_OFFSET(ashift, i);
282 
283 		if (uberblock_verify(ubnext, ub_size, offset) != 0)
284 			continue;
285 
286 		if (ubbest == NULL ||
287 		    vdev_uberblock_compare(ubnext, ubbest) > 0)
288 			ubbest = ubnext;
289 	}
290 
291 	return (ubbest);
292 }
293 
294 /*
295  * Read a block of data based on the gang block address dva,
296  * and put its data in buf.
297  *
298  * Return:
299  *	0 - success
300  *	1 - failure
301  */
302 static int
303 zio_read_gang(blkptr_t *bp, dva_t *dva, void *buf, char *stack)
304 {
305 	zio_gbh_phys_t *zio_gb;
306 	uint64_t offset, sector;
307 	blkptr_t tmpbp;
308 	int i;
309 
310 	zio_gb = (zio_gbh_phys_t *)stack;
311 	stack += SPA_GANGBLOCKSIZE;
312 	offset = DVA_GET_OFFSET(dva);
313 	sector = DVA_OFFSET_TO_PHYS_SECTOR(offset);
314 
315 	/* read in the gang block header */
316 	if (devread(sector, 0, SPA_GANGBLOCKSIZE, (char *)zio_gb) == 0) {
317 		grub_printf("failed to read in a gang block header\n");
318 		return (1);
319 	}
320 
321 	/* self checksuming the gang block header */
322 	BP_ZERO(&tmpbp);
323 	BP_SET_CHECKSUM(&tmpbp, ZIO_CHECKSUM_GANG_HEADER);
324 	BP_SET_BYTEORDER(&tmpbp, ZFS_HOST_BYTEORDER);
325 	ZIO_SET_CHECKSUM(&tmpbp.blk_cksum, DVA_GET_VDEV(dva),
326 	    DVA_GET_OFFSET(dva), bp->blk_birth, 0);
327 	if (zio_checksum_verify(&tmpbp, (char *)zio_gb, SPA_GANGBLOCKSIZE)) {
328 		grub_printf("failed to checksum a gang block header\n");
329 		return (1);
330 	}
331 
332 	for (i = 0; i < SPA_GBH_NBLKPTRS; i++) {
333 		if (zio_gb->zg_blkptr[i].blk_birth == 0)
334 			continue;
335 
336 		if (zio_read_data(&zio_gb->zg_blkptr[i], buf, stack))
337 			return (1);
338 		buf += BP_GET_PSIZE(&zio_gb->zg_blkptr[i]);
339 	}
340 
341 	return (0);
342 }
343 
344 /*
345  * Read in a block of raw data to buf.
346  *
347  * Return:
348  *	0 - success
349  *	1 - failure
350  */
351 static int
352 zio_read_data(blkptr_t *bp, void *buf, char *stack)
353 {
354 	int i, psize;
355 
356 	psize = BP_GET_PSIZE(bp);
357 
358 	/* pick a good dva from the block pointer */
359 	for (i = 0; i < SPA_DVAS_PER_BP; i++) {
360 		uint64_t offset, sector;
361 
362 		if (bp->blk_dva[i].dva_word[0] == 0 &&
363 		    bp->blk_dva[i].dva_word[1] == 0)
364 			continue;
365 
366 		if (DVA_GET_GANG(&bp->blk_dva[i])) {
367 			if (zio_read_gang(bp, &bp->blk_dva[i], buf, stack) == 0)
368 				return (0);
369 		} else {
370 			/* read in a data block */
371 			offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
372 			sector = DVA_OFFSET_TO_PHYS_SECTOR(offset);
373 			if (devread(sector, 0, psize, buf) != 0)
374 				return (0);
375 		}
376 	}
377 
378 	return (1);
379 }
380 
381 /*
382  * Read in a block of data, verify its checksum, decompress if needed,
383  * and put the uncompressed data in buf.
384  *
385  * Return:
386  *	0 - success
387  *	errnum - failure
388  */
389 static int
390 zio_read(blkptr_t *bp, void *buf, char *stack)
391 {
392 	int lsize, psize, comp;
393 	char *retbuf;
394 
395 	comp = BP_GET_COMPRESS(bp);
396 	lsize = BP_GET_LSIZE(bp);
397 	psize = BP_GET_PSIZE(bp);
398 
399 	if ((unsigned int)comp >= ZIO_COMPRESS_FUNCTIONS ||
400 	    (comp != ZIO_COMPRESS_OFF &&
401 	    decomp_table[comp].decomp_func == NULL)) {
402 		grub_printf("compression algorithm not supported\n");
403 		return (ERR_FSYS_CORRUPT);
404 	}
405 
406 	if ((char *)buf < stack && ((char *)buf) + lsize > stack) {
407 		grub_printf("not enough memory allocated\n");
408 		return (ERR_WONT_FIT);
409 	}
410 
411 	retbuf = buf;
412 	if (comp != ZIO_COMPRESS_OFF) {
413 		buf = stack;
414 		stack += psize;
415 	}
416 
417 	if (zio_read_data(bp, buf, stack) != 0) {
418 		grub_printf("zio_read_data failed\n");
419 		return (ERR_FSYS_CORRUPT);
420 	}
421 
422 	if (zio_checksum_verify(bp, buf, psize) != 0) {
423 		grub_printf("checksum verification failed\n");
424 		return (ERR_FSYS_CORRUPT);
425 	}
426 
427 	if (comp != ZIO_COMPRESS_OFF) {
428 		if (decomp_table[comp].decomp_func(buf, retbuf, psize,
429 		    lsize) != 0) {
430 			grub_printf("zio_read decompression failed\n");
431 			return (ERR_FSYS_CORRUPT);
432 		}
433 	}
434 
435 	return (0);
436 }
437 
438 /*
439  * Get the block from a block id.
440  * push the block onto the stack.
441  *
442  * Return:
443  * 	0 - success
444  * 	errnum - failure
445  */
446 static int
447 dmu_read(dnode_phys_t *dn, uint64_t blkid, void *buf, char *stack)
448 {
449 	int idx, level;
450 	blkptr_t *bp_array = dn->dn_blkptr;
451 	int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
452 	blkptr_t *bp, *tmpbuf;
453 
454 	bp = (blkptr_t *)stack;
455 	stack += sizeof (blkptr_t);
456 
457 	tmpbuf = (blkptr_t *)stack;
458 	stack += 1<<dn->dn_indblkshift;
459 
460 	for (level = dn->dn_nlevels - 1; level >= 0; level--) {
461 		idx = (blkid >> (epbs * level)) & ((1<<epbs)-1);
462 		*bp = bp_array[idx];
463 		if (level == 0)
464 			tmpbuf = buf;
465 		if (BP_IS_HOLE(bp)) {
466 			grub_memset(buf, 0,
467 			    dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
468 			break;
469 		} else if (errnum = zio_read(bp, tmpbuf, stack)) {
470 			return (errnum);
471 		}
472 
473 		bp_array = tmpbuf;
474 	}
475 
476 	return (0);
477 }
478 
479 /*
480  * mzap_lookup: Looks up property described by "name" and returns the value
481  * in "value".
482  *
483  * Return:
484  *	0 - success
485  *	errnum - failure
486  */
487 static int
488 mzap_lookup(mzap_phys_t *zapobj, int objsize, const char *name,
489 	uint64_t *value)
490 {
491 	int i, chunks;
492 	mzap_ent_phys_t *mzap_ent = zapobj->mz_chunk;
493 
494 	chunks = objsize / MZAP_ENT_LEN - 1;
495 	for (i = 0; i < chunks; i++) {
496 		if (grub_strcmp(mzap_ent[i].mze_name, name) == 0) {
497 			*value = mzap_ent[i].mze_value;
498 			return (0);
499 		}
500 	}
501 
502 	return (ERR_FSYS_CORRUPT);
503 }
504 
505 static uint64_t
506 zap_hash(uint64_t salt, const char *name)
507 {
508 	static uint64_t table[256];
509 	const uint8_t *cp;
510 	uint8_t c;
511 	uint64_t crc = salt;
512 
513 	if (table[128] == 0) {
514 		uint64_t *ct;
515 		int i, j;
516 		for (i = 0; i < 256; i++) {
517 			for (ct = table + i, *ct = i, j = 8; j > 0; j--)
518 				*ct = (*ct >> 1) ^ (-(*ct & 1) &
519 				    ZFS_CRC64_POLY);
520 		}
521 	}
522 
523 	if (crc == 0 || table[128] != ZFS_CRC64_POLY) {
524 		errnum = ERR_FSYS_CORRUPT;
525 		return (0);
526 	}
527 
528 	for (cp = (const uint8_t *)name; (c = *cp) != '\0'; cp++)
529 		crc = (crc >> 8) ^ table[(crc ^ c) & 0xFF];
530 
531 	/*
532 	 * Only use 28 bits, since we need 4 bits in the cookie for the
533 	 * collision differentiator.  We MUST use the high bits, since
534 	 * those are the ones that we first pay attention to when
535 	 * choosing the bucket.
536 	 */
537 	crc &= ~((1ULL << (64 - 28)) - 1);
538 
539 	return (crc);
540 }
541 
542 /*
543  * Only to be used on 8-bit arrays.
544  * array_len is actual len in bytes (not encoded le_value_length).
545  * buf is null-terminated.
546  */
547 static int
548 zap_leaf_array_equal(zap_leaf_phys_t *l, int blksft, int chunk,
549     int array_len, const char *buf)
550 {
551 	int bseen = 0;
552 
553 	while (bseen < array_len) {
554 		struct zap_leaf_array *la =
555 		    &ZAP_LEAF_CHUNK(l, blksft, chunk).l_array;
556 		int toread = MIN(array_len - bseen, ZAP_LEAF_ARRAY_BYTES);
557 
558 		if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft))
559 			return (0);
560 
561 		if (zfs_bcmp(la->la_array, buf + bseen, toread) != 0)
562 			break;
563 		chunk = la->la_next;
564 		bseen += toread;
565 	}
566 	return (bseen == array_len);
567 }
568 
569 /*
570  * Given a zap_leaf_phys_t, walk thru the zap leaf chunks to get the
571  * value for the property "name".
572  *
573  * Return:
574  *	0 - success
575  *	errnum - failure
576  */
577 static int
578 zap_leaf_lookup(zap_leaf_phys_t *l, int blksft, uint64_t h,
579     const char *name, uint64_t *value)
580 {
581 	uint16_t chunk;
582 	struct zap_leaf_entry *le;
583 
584 	/* Verify if this is a valid leaf block */
585 	if (l->l_hdr.lh_block_type != ZBT_LEAF)
586 		return (ERR_FSYS_CORRUPT);
587 	if (l->l_hdr.lh_magic != ZAP_LEAF_MAGIC)
588 		return (ERR_FSYS_CORRUPT);
589 
590 	for (chunk = l->l_hash[LEAF_HASH(blksft, h)];
591 	    chunk != CHAIN_END; chunk = le->le_next) {
592 
593 		if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft))
594 			return (ERR_FSYS_CORRUPT);
595 
596 		le = ZAP_LEAF_ENTRY(l, blksft, chunk);
597 
598 		/* Verify the chunk entry */
599 		if (le->le_type != ZAP_CHUNK_ENTRY)
600 			return (ERR_FSYS_CORRUPT);
601 
602 		if (le->le_hash != h)
603 			continue;
604 
605 		if (zap_leaf_array_equal(l, blksft, le->le_name_chunk,
606 		    le->le_name_length, name)) {
607 
608 			struct zap_leaf_array *la;
609 			uint8_t *ip;
610 
611 			if (le->le_int_size != 8 || le->le_value_length != 1)
612 				return (ERR_FSYS_CORRUPT);
613 
614 			/* get the uint64_t property value */
615 			la = &ZAP_LEAF_CHUNK(l, blksft,
616 			    le->le_value_chunk).l_array;
617 			ip = la->la_array;
618 
619 			*value = (uint64_t)ip[0] << 56 | (uint64_t)ip[1] << 48 |
620 			    (uint64_t)ip[2] << 40 | (uint64_t)ip[3] << 32 |
621 			    (uint64_t)ip[4] << 24 | (uint64_t)ip[5] << 16 |
622 			    (uint64_t)ip[6] << 8 | (uint64_t)ip[7];
623 
624 			return (0);
625 		}
626 	}
627 
628 	return (ERR_FSYS_CORRUPT);
629 }
630 
631 /*
632  * Fat ZAP lookup
633  *
634  * Return:
635  *	0 - success
636  *	errnum - failure
637  */
638 static int
639 fzap_lookup(dnode_phys_t *zap_dnode, zap_phys_t *zap,
640     const char *name, uint64_t *value, char *stack)
641 {
642 	zap_leaf_phys_t *l;
643 	uint64_t hash, idx, blkid;
644 	int blksft = zfs_log2(zap_dnode->dn_datablkszsec << DNODE_SHIFT);
645 
646 	/* Verify if this is a fat zap header block */
647 	if (zap->zap_magic != (uint64_t)ZAP_MAGIC ||
648 	    zap->zap_flags != 0)
649 		return (ERR_FSYS_CORRUPT);
650 
651 	hash = zap_hash(zap->zap_salt, name);
652 	if (errnum)
653 		return (errnum);
654 
655 	/* get block id from index */
656 	if (zap->zap_ptrtbl.zt_numblks != 0) {
657 		/* external pointer tables not supported */
658 		return (ERR_FSYS_CORRUPT);
659 	}
660 	idx = ZAP_HASH_IDX(hash, zap->zap_ptrtbl.zt_shift);
661 	blkid = ((uint64_t *)zap)[idx + (1<<(blksft-3-1))];
662 
663 	/* Get the leaf block */
664 	l = (zap_leaf_phys_t *)stack;
665 	stack += 1<<blksft;
666 	if ((1<<blksft) < sizeof (zap_leaf_phys_t))
667 		return (ERR_FSYS_CORRUPT);
668 	if (errnum = dmu_read(zap_dnode, blkid, l, stack))
669 		return (errnum);
670 
671 	return (zap_leaf_lookup(l, blksft, hash, name, value));
672 }
673 
674 /*
675  * Read in the data of a zap object and find the value for a matching
676  * property name.
677  *
678  * Return:
679  *	0 - success
680  *	errnum - failure
681  */
682 static int
683 zap_lookup(dnode_phys_t *zap_dnode, const char *name, uint64_t *val,
684     char *stack)
685 {
686 	uint64_t block_type;
687 	int size;
688 	void *zapbuf;
689 
690 	/* Read in the first block of the zap object data. */
691 	zapbuf = stack;
692 	size = zap_dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
693 	stack += size;
694 
695 	if ((errnum = dmu_read(zap_dnode, 0, zapbuf, stack)) != 0)
696 		return (errnum);
697 
698 	block_type = *((uint64_t *)zapbuf);
699 
700 	if (block_type == ZBT_MICRO) {
701 		return (mzap_lookup(zapbuf, size, name, val));
702 	} else if (block_type == ZBT_HEADER) {
703 		/* this is a fat zap */
704 		return (fzap_lookup(zap_dnode, zapbuf, name,
705 		    val, stack));
706 	}
707 
708 	return (ERR_FSYS_CORRUPT);
709 }
710 
711 typedef struct zap_attribute {
712 	int za_integer_length;
713 	uint64_t za_num_integers;
714 	uint64_t za_first_integer;
715 	char *za_name;
716 } zap_attribute_t;
717 
718 typedef int (zap_cb_t)(zap_attribute_t *za, void *arg, char *stack);
719 
720 static int
721 zap_iterate(dnode_phys_t *zap_dnode, zap_cb_t *cb, void *arg, char *stack)
722 {
723 	uint32_t size = zap_dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
724 	zap_attribute_t za;
725 	int i;
726 	mzap_phys_t *mzp = (mzap_phys_t *)stack;
727 	stack += size;
728 
729 	if ((errnum = dmu_read(zap_dnode, 0, mzp, stack)) != 0)
730 		return (errnum);
731 
732 	/*
733 	 * Iteration over fatzap objects has not yet been implemented.
734 	 * If we encounter a pool in which there are more features for
735 	 * read than can fit inside a microzap (i.e., more than 2048
736 	 * features for read), we can add support for fatzap iteration.
737 	 * For now, fail.
738 	 */
739 	if (mzp->mz_block_type != ZBT_MICRO) {
740 		grub_printf("feature information stored in fatzap, pool "
741 		    "version not supported\n");
742 		return (1);
743 	}
744 
745 	za.za_integer_length = 8;
746 	za.za_num_integers = 1;
747 	for (i = 0; i < size / MZAP_ENT_LEN - 1; i++) {
748 		mzap_ent_phys_t *mzep = &mzp->mz_chunk[i];
749 		int err;
750 
751 		za.za_first_integer = mzep->mze_value;
752 		za.za_name = mzep->mze_name;
753 		err = cb(&za, arg, stack);
754 		if (err != 0)
755 			return (err);
756 	}
757 
758 	return (0);
759 }
760 
761 /*
762  * Get the dnode of an object number from the metadnode of an object set.
763  *
764  * Input
765  *	mdn - metadnode to get the object dnode
766  *	objnum - object number for the object dnode
767  *	buf - data buffer that holds the returning dnode
768  *	stack - scratch area
769  *
770  * Return:
771  *	0 - success
772  *	errnum - failure
773  */
774 static int
775 dnode_get(dnode_phys_t *mdn, uint64_t objnum, uint8_t type, dnode_phys_t *buf,
776 	char *stack)
777 {
778 	uint64_t blkid, blksz; /* the block id this object dnode is in */
779 	int epbs; /* shift of number of dnodes in a block */
780 	int idx; /* index within a block */
781 	dnode_phys_t *dnbuf;
782 
783 	blksz = mdn->dn_datablkszsec << SPA_MINBLOCKSHIFT;
784 	epbs = zfs_log2(blksz) - DNODE_SHIFT;
785 	blkid = objnum >> epbs;
786 	idx = objnum & ((1<<epbs)-1);
787 
788 	if (dnode_buf != NULL && dnode_mdn == mdn &&
789 	    objnum >= dnode_start && objnum < dnode_end) {
790 		grub_memmove(buf, &dnode_buf[idx], DNODE_SIZE);
791 		VERIFY_DN_TYPE(buf, type);
792 		return (0);
793 	}
794 
795 	if (dnode_buf && blksz == 1<<DNODE_BLOCK_SHIFT) {
796 		dnbuf = dnode_buf;
797 		dnode_mdn = mdn;
798 		dnode_start = blkid << epbs;
799 		dnode_end = (blkid + 1) << epbs;
800 	} else {
801 		dnbuf = (dnode_phys_t *)stack;
802 		stack += blksz;
803 	}
804 
805 	if (errnum = dmu_read(mdn, blkid, (char *)dnbuf, stack))
806 		return (errnum);
807 
808 	grub_memmove(buf, &dnbuf[idx], DNODE_SIZE);
809 	VERIFY_DN_TYPE(buf, type);
810 
811 	return (0);
812 }
813 
814 /*
815  * Check if this is a special file that resides at the top
816  * dataset of the pool. Currently this is the GRUB menu,
817  * boot signature and boot signature backup.
818  * str starts with '/'.
819  */
820 static int
821 is_top_dataset_file(char *str)
822 {
823 	char *tptr;
824 
825 	if ((tptr = grub_strstr(str, "menu.lst")) &&
826 	    (tptr[8] == '\0' || tptr[8] == ' ') &&
827 	    *(tptr-1) == '/')
828 		return (1);
829 
830 	if (grub_strncmp(str, BOOTSIGN_DIR"/",
831 	    grub_strlen(BOOTSIGN_DIR) + 1) == 0)
832 		return (1);
833 
834 	if (grub_strcmp(str, BOOTSIGN_BACKUP) == 0)
835 		return (1);
836 
837 	return (0);
838 }
839 
840 static int
841 check_feature(zap_attribute_t *za, void *arg, char *stack)
842 {
843 	const char **names = arg;
844 	int i;
845 
846 	if (za->za_first_integer == 0)
847 		return (0);
848 
849 	for (i = 0; names[i] != NULL; i++) {
850 		if (grub_strcmp(za->za_name, names[i]) == 0) {
851 			return (0);
852 		}
853 	}
854 	grub_printf("missing feature for read '%s'\n", za->za_name);
855 	return (ERR_NEWER_VERSION);
856 }
857 
858 /*
859  * Get the file dnode for a given file name where mdn is the meta dnode
860  * for this ZFS object set. When found, place the file dnode in dn.
861  * The 'path' argument will be mangled.
862  *
863  * Return:
864  *	0 - success
865  *	errnum - failure
866  */
867 static int
868 dnode_get_path(dnode_phys_t *mdn, char *path, dnode_phys_t *dn,
869     char *stack)
870 {
871 	uint64_t objnum, version;
872 	char *cname, ch;
873 
874 	if (errnum = dnode_get(mdn, MASTER_NODE_OBJ, DMU_OT_MASTER_NODE,
875 	    dn, stack))
876 		return (errnum);
877 
878 	if (errnum = zap_lookup(dn, ZPL_VERSION_STR, &version, stack))
879 		return (errnum);
880 	if (version > ZPL_VERSION)
881 		return (-1);
882 
883 	if (errnum = zap_lookup(dn, ZFS_ROOT_OBJ, &objnum, stack))
884 		return (errnum);
885 
886 	if (errnum = dnode_get(mdn, objnum, DMU_OT_DIRECTORY_CONTENTS,
887 	    dn, stack))
888 		return (errnum);
889 
890 	/* skip leading slashes */
891 	while (*path == '/')
892 		path++;
893 
894 	while (*path && !grub_isspace(*path)) {
895 
896 		/* get the next component name */
897 		cname = path;
898 		while (*path && !grub_isspace(*path) && *path != '/')
899 			path++;
900 		ch = *path;
901 		*path = 0;   /* ensure null termination */
902 
903 		if (errnum = zap_lookup(dn, cname, &objnum, stack))
904 			return (errnum);
905 
906 		objnum = ZFS_DIRENT_OBJ(objnum);
907 		if (errnum = dnode_get(mdn, objnum, 0, dn, stack))
908 			return (errnum);
909 
910 		*path = ch;
911 		while (*path == '/')
912 			path++;
913 	}
914 
915 	/* We found the dnode for this file. Verify if it is a plain file. */
916 	VERIFY_DN_TYPE(dn, DMU_OT_PLAIN_FILE_CONTENTS);
917 
918 	return (0);
919 }
920 
921 /*
922  * Get the default 'bootfs' property value from the rootpool.
923  *
924  * Return:
925  *	0 - success
926  *	errnum -failure
927  */
928 static int
929 get_default_bootfsobj(dnode_phys_t *mosmdn, uint64_t *obj, char *stack)
930 {
931 	uint64_t objnum = 0;
932 	dnode_phys_t *dn = (dnode_phys_t *)stack;
933 	stack += DNODE_SIZE;
934 
935 	if (errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
936 	    DMU_OT_OBJECT_DIRECTORY, dn, stack))
937 		return (errnum);
938 
939 	/*
940 	 * find the object number for 'pool_props', and get the dnode
941 	 * of the 'pool_props'.
942 	 */
943 	if (zap_lookup(dn, DMU_POOL_PROPS, &objnum, stack))
944 		return (ERR_FILESYSTEM_NOT_FOUND);
945 
946 	if (errnum = dnode_get(mosmdn, objnum, DMU_OT_POOL_PROPS, dn, stack))
947 		return (errnum);
948 
949 	if (zap_lookup(dn, ZPOOL_PROP_BOOTFS, &objnum, stack))
950 		return (ERR_FILESYSTEM_NOT_FOUND);
951 
952 	if (!objnum)
953 		return (ERR_FILESYSTEM_NOT_FOUND);
954 
955 	*obj = objnum;
956 	return (0);
957 }
958 
959 /*
960  * List of pool features that the grub implementation of ZFS supports for
961  * read. Note that features that are only required for write do not need
962  * to be listed here since grub opens pools in read-only mode.
963  */
964 static const char *spa_feature_names[] = {
965 	"org.illumos:lz4_compress",
966 	NULL
967 };
968 
969 /*
970  * Checks whether the MOS features that are active are supported by this
971  * (GRUB's) implementation of ZFS.
972  *
973  * Return:
974  *	0: Success.
975  *	errnum: Failure.
976  */
977 static int
978 check_mos_features(dnode_phys_t *mosmdn, char *stack)
979 {
980 	uint64_t objnum;
981 	dnode_phys_t *dn;
982 	uint8_t error = 0;
983 
984 	dn = (dnode_phys_t *)stack;
985 	stack += DNODE_SIZE;
986 
987 	if ((errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
988 	    DMU_OT_OBJECT_DIRECTORY, dn, stack)) != 0)
989 		return (errnum);
990 
991 	/*
992 	 * Find the object number for 'features_for_read' and retrieve its
993 	 * corresponding dnode. Note that we don't check features_for_write
994 	 * because GRUB is not opening the pool for write.
995 	 */
996 	if ((errnum = zap_lookup(dn, DMU_POOL_FEATURES_FOR_READ, &objnum,
997 	    stack)) != 0)
998 		return (errnum);
999 
1000 	if ((errnum = dnode_get(mosmdn, objnum, DMU_OTN_ZAP_METADATA,
1001 	    dn, stack)) != 0)
1002 		return (errnum);
1003 
1004 	return (zap_iterate(dn, check_feature, spa_feature_names, stack));
1005 }
1006 
1007 /*
1008  * Given a MOS metadnode, get the metadnode of a given filesystem name (fsname),
1009  * e.g. pool/rootfs, or a given object number (obj), e.g. the object number
1010  * of pool/rootfs.
1011  *
1012  * If no fsname and no obj are given, return the DSL_DIR metadnode.
1013  * If fsname is given, return its metadnode and its matching object number.
1014  * If only obj is given, return the metadnode for this object number.
1015  *
1016  * Return:
1017  *	0 - success
1018  *	errnum - failure
1019  */
1020 static int
1021 get_objset_mdn(dnode_phys_t *mosmdn, char *fsname, uint64_t *obj,
1022     dnode_phys_t *mdn, char *stack)
1023 {
1024 	uint64_t objnum, headobj;
1025 	char *cname, ch;
1026 	blkptr_t *bp;
1027 	objset_phys_t *osp;
1028 	int issnapshot = 0;
1029 	char *snapname;
1030 
1031 	if (fsname == NULL && obj) {
1032 		headobj = *obj;
1033 		goto skip;
1034 	}
1035 
1036 	if (errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
1037 	    DMU_OT_OBJECT_DIRECTORY, mdn, stack))
1038 		return (errnum);
1039 
1040 	if (errnum = zap_lookup(mdn, DMU_POOL_ROOT_DATASET, &objnum,
1041 	    stack))
1042 		return (errnum);
1043 
1044 	if (errnum = dnode_get(mosmdn, objnum, DMU_OT_DSL_DIR, mdn, stack))
1045 		return (errnum);
1046 
1047 	if (fsname == NULL) {
1048 		headobj =
1049 		    ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_head_dataset_obj;
1050 		goto skip;
1051 	}
1052 
1053 	/* take out the pool name */
1054 	while (*fsname && !grub_isspace(*fsname) && *fsname != '/')
1055 		fsname++;
1056 
1057 	while (*fsname && !grub_isspace(*fsname)) {
1058 		uint64_t childobj;
1059 
1060 		while (*fsname == '/')
1061 			fsname++;
1062 
1063 		cname = fsname;
1064 		while (*fsname && !grub_isspace(*fsname) && *fsname != '/')
1065 			fsname++;
1066 		ch = *fsname;
1067 		*fsname = 0;
1068 
1069 		snapname = cname;
1070 		while (*snapname && !grub_isspace(*snapname) && *snapname !=
1071 		    '@')
1072 			snapname++;
1073 		if (*snapname == '@') {
1074 			issnapshot = 1;
1075 			*snapname = 0;
1076 		}
1077 		childobj =
1078 		    ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_child_dir_zapobj;
1079 		if (errnum = dnode_get(mosmdn, childobj,
1080 		    DMU_OT_DSL_DIR_CHILD_MAP, mdn, stack))
1081 			return (errnum);
1082 
1083 		if (zap_lookup(mdn, cname, &objnum, stack))
1084 			return (ERR_FILESYSTEM_NOT_FOUND);
1085 
1086 		if (errnum = dnode_get(mosmdn, objnum, DMU_OT_DSL_DIR,
1087 		    mdn, stack))
1088 			return (errnum);
1089 
1090 		*fsname = ch;
1091 		if (issnapshot)
1092 			*snapname = '@';
1093 	}
1094 	headobj = ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_head_dataset_obj;
1095 	if (obj)
1096 		*obj = headobj;
1097 
1098 skip:
1099 	if (errnum = dnode_get(mosmdn, headobj, DMU_OT_DSL_DATASET, mdn, stack))
1100 		return (errnum);
1101 	if (issnapshot) {
1102 		uint64_t snapobj;
1103 
1104 		snapobj = ((dsl_dataset_phys_t *)DN_BONUS(mdn))->
1105 		    ds_snapnames_zapobj;
1106 
1107 		if (errnum = dnode_get(mosmdn, snapobj,
1108 		    DMU_OT_DSL_DS_SNAP_MAP, mdn, stack))
1109 			return (errnum);
1110 		if (zap_lookup(mdn, snapname + 1, &headobj, stack))
1111 			return (ERR_FILESYSTEM_NOT_FOUND);
1112 		if (errnum = dnode_get(mosmdn, headobj,
1113 		    DMU_OT_DSL_DATASET, mdn, stack))
1114 			return (errnum);
1115 		if (obj)
1116 			*obj = headobj;
1117 	}
1118 
1119 	bp = &((dsl_dataset_phys_t *)DN_BONUS(mdn))->ds_bp;
1120 	osp = (objset_phys_t *)stack;
1121 	stack += sizeof (objset_phys_t);
1122 	if (errnum = zio_read(bp, osp, stack))
1123 		return (errnum);
1124 
1125 	grub_memmove((char *)mdn, (char *)&osp->os_meta_dnode, DNODE_SIZE);
1126 
1127 	return (0);
1128 }
1129 
1130 /*
1131  * For a given XDR packed nvlist, verify the first 4 bytes and move on.
1132  *
1133  * An XDR packed nvlist is encoded as (comments from nvs_xdr_create) :
1134  *
1135  *      encoding method/host endian     (4 bytes)
1136  *      nvl_version                     (4 bytes)
1137  *      nvl_nvflag                      (4 bytes)
1138  *	encoded nvpairs:
1139  *		encoded size of the nvpair      (4 bytes)
1140  *		decoded size of the nvpair      (4 bytes)
1141  *		name string size                (4 bytes)
1142  *		name string data                (sizeof(NV_ALIGN4(string))
1143  *		data type                       (4 bytes)
1144  *		# of elements in the nvpair     (4 bytes)
1145  *		data
1146  *      2 zero's for the last nvpair
1147  *		(end of the entire list)	(8 bytes)
1148  *
1149  * Return:
1150  *	0 - success
1151  *	1 - failure
1152  */
1153 static int
1154 nvlist_unpack(char *nvlist, char **out)
1155 {
1156 	/* Verify if the 1st and 2nd byte in the nvlist are valid. */
1157 	if (nvlist[0] != NV_ENCODE_XDR || nvlist[1] != HOST_ENDIAN)
1158 		return (1);
1159 
1160 	*out = nvlist + 4;
1161 	return (0);
1162 }
1163 
1164 static char *
1165 nvlist_array(char *nvlist, int index)
1166 {
1167 	int i, encode_size;
1168 
1169 	for (i = 0; i < index; i++) {
1170 		/* skip the header, nvl_version, and nvl_nvflag */
1171 		nvlist = nvlist + 4 * 2;
1172 
1173 		while (encode_size = BSWAP_32(*(uint32_t *)nvlist))
1174 			nvlist += encode_size; /* goto the next nvpair */
1175 
1176 		nvlist = nvlist + 4 * 2; /* skip the ending 2 zeros - 8 bytes */
1177 	}
1178 
1179 	return (nvlist);
1180 }
1181 
1182 /*
1183  * The nvlist_next_nvpair() function returns a handle to the next nvpair in the
1184  * list following nvpair. If nvpair is NULL, the first pair is returned. If
1185  * nvpair is the last pair in the nvlist, NULL is returned.
1186  */
1187 static char *
1188 nvlist_next_nvpair(char *nvl, char *nvpair)
1189 {
1190 	char *cur, *prev;
1191 	int encode_size;
1192 
1193 	if (nvl == NULL)
1194 		return (NULL);
1195 
1196 	if (nvpair == NULL) {
1197 		/* skip over nvl_version and nvl_nvflag */
1198 		nvpair = nvl + 4 * 2;
1199 	} else {
1200 		/* skip to the next nvpair */
1201 		encode_size = BSWAP_32(*(uint32_t *)nvpair);
1202 		nvpair += encode_size;
1203 	}
1204 
1205 	/* 8 bytes of 0 marks the end of the list */
1206 	if (*(uint64_t *)nvpair == 0)
1207 		return (NULL);
1208 
1209 	return (nvpair);
1210 }
1211 
1212 /*
1213  * This function returns 0 on success and 1 on failure. On success, a string
1214  * containing the name of nvpair is saved in buf.
1215  */
1216 static int
1217 nvpair_name(char *nvp, char *buf, int buflen)
1218 {
1219 	int len;
1220 
1221 	/* skip over encode/decode size */
1222 	nvp += 4 * 2;
1223 
1224 	len = BSWAP_32(*(uint32_t *)nvp);
1225 	if (buflen < len + 1)
1226 		return (1);
1227 
1228 	grub_memmove(buf, nvp + 4, len);
1229 	buf[len] = '\0';
1230 
1231 	return (0);
1232 }
1233 
1234 /*
1235  * This function retrieves the value of the nvpair in the form of enumerated
1236  * type data_type_t. This is used to determine the appropriate type to pass to
1237  * nvpair_value().
1238  */
1239 static int
1240 nvpair_type(char *nvp)
1241 {
1242 	int name_len, type;
1243 
1244 	/* skip over encode/decode size */
1245 	nvp += 4 * 2;
1246 
1247 	/* skip over name_len */
1248 	name_len = BSWAP_32(*(uint32_t *)nvp);
1249 	nvp += 4;
1250 
1251 	/* skip over name */
1252 	nvp = nvp + ((name_len + 3) & ~3); /* align */
1253 
1254 	type = BSWAP_32(*(uint32_t *)nvp);
1255 
1256 	return (type);
1257 }
1258 
1259 static int
1260 nvpair_value(char *nvp, void *val, int valtype, int *nelmp)
1261 {
1262 	int name_len, type, slen;
1263 	char *strval = val;
1264 	uint64_t *intval = val;
1265 
1266 	/* skip over encode/decode size */
1267 	nvp += 4 * 2;
1268 
1269 	/* skip over name_len */
1270 	name_len = BSWAP_32(*(uint32_t *)nvp);
1271 	nvp += 4;
1272 
1273 	/* skip over name */
1274 	nvp = nvp + ((name_len + 3) & ~3); /* align */
1275 
1276 	/* skip over type */
1277 	type = BSWAP_32(*(uint32_t *)nvp);
1278 	nvp += 4;
1279 
1280 	if (type == valtype) {
1281 		int nelm;
1282 
1283 		nelm = BSWAP_32(*(uint32_t *)nvp);
1284 		if (valtype != DATA_TYPE_BOOLEAN && nelm < 1)
1285 			return (1);
1286 		nvp += 4;
1287 
1288 		switch (valtype) {
1289 		case DATA_TYPE_BOOLEAN:
1290 			return (0);
1291 
1292 		case DATA_TYPE_STRING:
1293 			slen = BSWAP_32(*(uint32_t *)nvp);
1294 			nvp += 4;
1295 			grub_memmove(strval, nvp, slen);
1296 			strval[slen] = '\0';
1297 			return (0);
1298 
1299 		case DATA_TYPE_UINT64:
1300 			*intval = BSWAP_64(*(uint64_t *)nvp);
1301 			return (0);
1302 
1303 		case DATA_TYPE_NVLIST:
1304 			*(void **)val = (void *)nvp;
1305 			return (0);
1306 
1307 		case DATA_TYPE_NVLIST_ARRAY:
1308 			*(void **)val = (void *)nvp;
1309 			if (nelmp)
1310 				*nelmp = nelm;
1311 			return (0);
1312 		}
1313 	}
1314 
1315 	return (1);
1316 }
1317 
1318 static int
1319 nvlist_lookup_value(char *nvlist, char *name, void *val, int valtype,
1320     int *nelmp)
1321 {
1322 	char *nvpair;
1323 
1324 	for (nvpair = nvlist_next_nvpair(nvlist, NULL);
1325 	    nvpair != NULL;
1326 	    nvpair = nvlist_next_nvpair(nvlist, nvpair)) {
1327 		int name_len = BSWAP_32(*(uint32_t *)(nvpair + 4 * 2));
1328 		char *nvp_name = nvpair + 4 * 3;
1329 
1330 		if ((grub_strncmp(nvp_name, name, name_len) == 0) &&
1331 		    nvpair_type(nvpair) == valtype) {
1332 			return (nvpair_value(nvpair, val, valtype, nelmp));
1333 		}
1334 	}
1335 	return (1);
1336 }
1337 
1338 /*
1339  * Check if this vdev is online and is in a good state.
1340  */
1341 static int
1342 vdev_validate(char *nv)
1343 {
1344 	uint64_t ival;
1345 
1346 	if (nvlist_lookup_value(nv, ZPOOL_CONFIG_OFFLINE, &ival,
1347 	    DATA_TYPE_UINT64, NULL) == 0 ||
1348 	    nvlist_lookup_value(nv, ZPOOL_CONFIG_FAULTED, &ival,
1349 	    DATA_TYPE_UINT64, NULL) == 0 ||
1350 	    nvlist_lookup_value(nv, ZPOOL_CONFIG_REMOVED, &ival,
1351 	    DATA_TYPE_UINT64, NULL) == 0)
1352 		return (ERR_DEV_VALUES);
1353 
1354 	return (0);
1355 }
1356 
1357 /*
1358  * Get a valid vdev pathname/devid from the boot device.
1359  * The caller should already allocate MAXPATHLEN memory for bootpath and devid.
1360  */
1361 static int
1362 vdev_get_bootpath(char *nv, uint64_t inguid, char *devid, char *bootpath,
1363     int is_spare)
1364 {
1365 	char type[16];
1366 
1367 	if (nvlist_lookup_value(nv, ZPOOL_CONFIG_TYPE, &type, DATA_TYPE_STRING,
1368 	    NULL))
1369 		return (ERR_FSYS_CORRUPT);
1370 
1371 	if (grub_strcmp(type, VDEV_TYPE_DISK) == 0) {
1372 		uint64_t guid;
1373 
1374 		if (vdev_validate(nv) != 0)
1375 			return (ERR_NO_BOOTPATH);
1376 
1377 		if (nvlist_lookup_value(nv, ZPOOL_CONFIG_GUID,
1378 		    &guid, DATA_TYPE_UINT64, NULL) != 0)
1379 			return (ERR_NO_BOOTPATH);
1380 
1381 		if (guid != inguid)
1382 			return (ERR_NO_BOOTPATH);
1383 
1384 		/* for a spare vdev, pick the disk labeled with "is_spare" */
1385 		if (is_spare) {
1386 			uint64_t spare = 0;
1387 			(void) nvlist_lookup_value(nv, ZPOOL_CONFIG_IS_SPARE,
1388 			    &spare, DATA_TYPE_UINT64, NULL);
1389 			if (!spare)
1390 				return (ERR_NO_BOOTPATH);
1391 		}
1392 
1393 		if (nvlist_lookup_value(nv, ZPOOL_CONFIG_PHYS_PATH,
1394 		    bootpath, DATA_TYPE_STRING, NULL) != 0)
1395 			bootpath[0] = '\0';
1396 
1397 		if (nvlist_lookup_value(nv, ZPOOL_CONFIG_DEVID,
1398 		    devid, DATA_TYPE_STRING, NULL) != 0)
1399 			devid[0] = '\0';
1400 
1401 		if (grub_strlen(bootpath) >= MAXPATHLEN ||
1402 		    grub_strlen(devid) >= MAXPATHLEN)
1403 			return (ERR_WONT_FIT);
1404 
1405 		return (0);
1406 
1407 	} else if (grub_strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
1408 	    grub_strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
1409 	    (is_spare = (grub_strcmp(type, VDEV_TYPE_SPARE) == 0))) {
1410 		int nelm, i;
1411 		char *child;
1412 
1413 		if (nvlist_lookup_value(nv, ZPOOL_CONFIG_CHILDREN, &child,
1414 		    DATA_TYPE_NVLIST_ARRAY, &nelm))
1415 			return (ERR_FSYS_CORRUPT);
1416 
1417 		for (i = 0; i < nelm; i++) {
1418 			char *child_i;
1419 
1420 			child_i = nvlist_array(child, i);
1421 			if (vdev_get_bootpath(child_i, inguid, devid,
1422 			    bootpath, is_spare) == 0)
1423 				return (0);
1424 		}
1425 	}
1426 
1427 	return (ERR_NO_BOOTPATH);
1428 }
1429 
1430 /*
1431  * Check the disk label information and retrieve needed vdev name-value pairs.
1432  *
1433  * Return:
1434  *	0 - success
1435  *	ERR_* - failure
1436  */
1437 static int
1438 check_pool_label(uint64_t sector, char *stack, char *outdevid,
1439     char *outpath, uint64_t *outguid, uint64_t *outashift, uint64_t *outversion)
1440 {
1441 	vdev_phys_t *vdev;
1442 	uint64_t pool_state, txg = 0;
1443 	char *nvlist, *nv, *features;
1444 	uint64_t diskguid;
1445 
1446 	sector += (VDEV_SKIP_SIZE >> SPA_MINBLOCKSHIFT);
1447 
1448 	/* Read in the vdev name-value pair list (112K). */
1449 	if (devread(sector, 0, VDEV_PHYS_SIZE, stack) == 0)
1450 		return (ERR_READ);
1451 
1452 	vdev = (vdev_phys_t *)stack;
1453 	stack += sizeof (vdev_phys_t);
1454 
1455 	if (nvlist_unpack(vdev->vp_nvlist, &nvlist))
1456 		return (ERR_FSYS_CORRUPT);
1457 
1458 	if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_STATE, &pool_state,
1459 	    DATA_TYPE_UINT64, NULL))
1460 		return (ERR_FSYS_CORRUPT);
1461 
1462 	if (pool_state == POOL_STATE_DESTROYED)
1463 		return (ERR_FILESYSTEM_NOT_FOUND);
1464 
1465 	if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_NAME,
1466 	    current_rootpool, DATA_TYPE_STRING, NULL))
1467 		return (ERR_FSYS_CORRUPT);
1468 
1469 	if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_TXG, &txg,
1470 	    DATA_TYPE_UINT64, NULL))
1471 		return (ERR_FSYS_CORRUPT);
1472 
1473 	/* not an active device */
1474 	if (txg == 0)
1475 		return (ERR_NO_BOOTPATH);
1476 
1477 	if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_VERSION, outversion,
1478 	    DATA_TYPE_UINT64, NULL))
1479 		return (ERR_FSYS_CORRUPT);
1480 	if (!SPA_VERSION_IS_SUPPORTED(*outversion))
1481 		return (ERR_NEWER_VERSION);
1482 	if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_VDEV_TREE, &nv,
1483 	    DATA_TYPE_NVLIST, NULL))
1484 		return (ERR_FSYS_CORRUPT);
1485 	if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_GUID, &diskguid,
1486 	    DATA_TYPE_UINT64, NULL))
1487 		return (ERR_FSYS_CORRUPT);
1488 	if (nvlist_lookup_value(nv, ZPOOL_CONFIG_ASHIFT, outashift,
1489 	    DATA_TYPE_UINT64, NULL) != 0)
1490 		return (ERR_FSYS_CORRUPT);
1491 	if (vdev_get_bootpath(nv, diskguid, outdevid, outpath, 0))
1492 		return (ERR_NO_BOOTPATH);
1493 	if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_GUID, outguid,
1494 	    DATA_TYPE_UINT64, NULL))
1495 		return (ERR_FSYS_CORRUPT);
1496 
1497 	if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_FEATURES_FOR_READ,
1498 	    &features, DATA_TYPE_NVLIST, NULL) == 0) {
1499 		char *nvp;
1500 		char *name = stack;
1501 		stack += MAXNAMELEN;
1502 
1503 		for (nvp = nvlist_next_nvpair(features, NULL);
1504 		    nvp != NULL;
1505 		    nvp = nvlist_next_nvpair(features, nvp)) {
1506 			zap_attribute_t za;
1507 
1508 			if (nvpair_name(nvp, name, MAXNAMELEN) != 0)
1509 				return (ERR_FSYS_CORRUPT);
1510 
1511 			za.za_integer_length = 8;
1512 			za.za_num_integers = 1;
1513 			za.za_first_integer = 1;
1514 			za.za_name = name;
1515 			if (check_feature(&za, spa_feature_names, stack) != 0)
1516 				return (ERR_NEWER_VERSION);
1517 		}
1518 	}
1519 
1520 	return (0);
1521 }
1522 
1523 /*
1524  * zfs_mount() locates a valid uberblock of the root pool and read in its MOS
1525  * to the memory address MOS.
1526  *
1527  * Return:
1528  *	1 - success
1529  *	0 - failure
1530  */
1531 int
1532 zfs_mount(void)
1533 {
1534 	char *stack, *ub_array;
1535 	int label = 0;
1536 	uberblock_t *ubbest;
1537 	objset_phys_t *osp;
1538 	char tmp_bootpath[MAXNAMELEN];
1539 	char tmp_devid[MAXNAMELEN];
1540 	uint64_t tmp_guid, ashift, version;
1541 	uint64_t adjpl = (uint64_t)part_length << SPA_MINBLOCKSHIFT;
1542 	int err = errnum; /* preserve previous errnum state */
1543 
1544 	/* if it's our first time here, zero the best uberblock out */
1545 	if (best_drive == 0 && best_part == 0 && find_best_root) {
1546 		grub_memset(&current_uberblock, 0, sizeof (uberblock_t));
1547 		pool_guid = 0;
1548 	}
1549 
1550 	stackbase = ZFS_SCRATCH;
1551 	stack = stackbase;
1552 	ub_array = stack;
1553 	stack += VDEV_UBERBLOCK_RING;
1554 
1555 	osp = (objset_phys_t *)stack;
1556 	stack += sizeof (objset_phys_t);
1557 	adjpl = P2ALIGN(adjpl, (uint64_t)sizeof (vdev_label_t));
1558 
1559 	for (label = 0; label < VDEV_LABELS; label++) {
1560 
1561 		/*
1562 		 * some eltorito stacks don't give us a size and
1563 		 * we end up setting the size to MAXUINT, further
1564 		 * some of these devices stop working once a single
1565 		 * read past the end has been issued. Checking
1566 		 * for a maximum part_length and skipping the backup
1567 		 * labels at the end of the slice/partition/device
1568 		 * avoids breaking down on such devices.
1569 		 */
1570 		if (part_length == MAXUINT && label == 2)
1571 			break;
1572 
1573 		uint64_t sector = vdev_label_start(adjpl,
1574 		    label) >> SPA_MINBLOCKSHIFT;
1575 
1576 		/* Read in the uberblock ring (128K). */
1577 		if (devread(sector  +
1578 		    ((VDEV_SKIP_SIZE + VDEV_PHYS_SIZE) >> SPA_MINBLOCKSHIFT),
1579 		    0, VDEV_UBERBLOCK_RING, ub_array) == 0)
1580 			continue;
1581 
1582 		if (check_pool_label(sector, stack, tmp_devid,
1583 		    tmp_bootpath, &tmp_guid, &ashift, &version))
1584 			continue;
1585 
1586 		if (pool_guid == 0)
1587 			pool_guid = tmp_guid;
1588 
1589 		if ((ubbest = find_bestub(ub_array, ashift, sector)) == NULL ||
1590 		    zio_read(&ubbest->ub_rootbp, osp, stack) != 0)
1591 			continue;
1592 
1593 		VERIFY_OS_TYPE(osp, DMU_OST_META);
1594 
1595 		if (version >= SPA_VERSION_FEATURES &&
1596 		    check_mos_features(&osp->os_meta_dnode, stack) != 0)
1597 			continue;
1598 
1599 		if (find_best_root && ((pool_guid != tmp_guid) ||
1600 		    vdev_uberblock_compare(ubbest, &(current_uberblock)) <= 0))
1601 			continue;
1602 
1603 		/* Got the MOS. Save it at the memory addr MOS. */
1604 		grub_memmove(MOS, &osp->os_meta_dnode, DNODE_SIZE);
1605 		grub_memmove(&current_uberblock, ubbest, sizeof (uberblock_t));
1606 		grub_memmove(current_bootpath, tmp_bootpath, MAXNAMELEN);
1607 		grub_memmove(current_devid, tmp_devid, grub_strlen(tmp_devid));
1608 		is_zfs_mount = 1;
1609 		return (1);
1610 	}
1611 
1612 	/*
1613 	 * While some fs impls. (tftp) rely on setting and keeping
1614 	 * global errnums set, others won't reset it and will break
1615 	 * when issuing rawreads. The goal here is to simply not
1616 	 * have zfs mount attempts impact the previous state.
1617 	 */
1618 	errnum = err;
1619 	return (0);
1620 }
1621 
1622 /*
1623  * zfs_open() locates a file in the rootpool by following the
1624  * MOS and places the dnode of the file in the memory address DNODE.
1625  *
1626  * Return:
1627  *	1 - success
1628  *	0 - failure
1629  */
1630 int
1631 zfs_open(char *filename)
1632 {
1633 	char *stack;
1634 	dnode_phys_t *mdn;
1635 
1636 	file_buf = NULL;
1637 	stackbase = ZFS_SCRATCH;
1638 	stack = stackbase;
1639 
1640 	mdn = (dnode_phys_t *)stack;
1641 	stack += sizeof (dnode_phys_t);
1642 
1643 	dnode_mdn = NULL;
1644 	dnode_buf = (dnode_phys_t *)stack;
1645 	stack += 1<<DNODE_BLOCK_SHIFT;
1646 
1647 	/*
1648 	 * menu.lst is placed at the root pool filesystem level,
1649 	 * do not goto 'current_bootfs'.
1650 	 */
1651 	if (is_top_dataset_file(filename)) {
1652 		if (errnum = get_objset_mdn(MOS, NULL, NULL, mdn, stack))
1653 			return (0);
1654 
1655 		current_bootfs_obj = 0;
1656 	} else {
1657 		if (current_bootfs[0] == '\0') {
1658 			/* Get the default root filesystem object number */
1659 			if (errnum = get_default_bootfsobj(MOS,
1660 			    &current_bootfs_obj, stack))
1661 				return (0);
1662 
1663 			if (errnum = get_objset_mdn(MOS, NULL,
1664 			    &current_bootfs_obj, mdn, stack))
1665 				return (0);
1666 		} else {
1667 			if (errnum = get_objset_mdn(MOS, current_bootfs,
1668 			    &current_bootfs_obj, mdn, stack)) {
1669 				grub_memset(current_bootfs, 0, MAXNAMELEN);
1670 				return (0);
1671 			}
1672 		}
1673 	}
1674 
1675 	if (dnode_get_path(mdn, filename, DNODE, stack)) {
1676 		errnum = ERR_FILE_NOT_FOUND;
1677 		return (0);
1678 	}
1679 
1680 	/* get the file size and set the file position to 0 */
1681 
1682 	/*
1683 	 * For DMU_OT_SA we will need to locate the SIZE attribute
1684 	 * attribute, which could be either in the bonus buffer
1685 	 * or the "spill" block.
1686 	 */
1687 	if (DNODE->dn_bonustype == DMU_OT_SA) {
1688 		sa_hdr_phys_t *sahdrp;
1689 		int hdrsize;
1690 
1691 		if (DNODE->dn_bonuslen != 0) {
1692 			sahdrp = (sa_hdr_phys_t *)DN_BONUS(DNODE);
1693 		} else {
1694 			if (DNODE->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
1695 				blkptr_t *bp = &DNODE->dn_spill;
1696 				void *buf;
1697 
1698 				buf = (void *)stack;
1699 				stack += BP_GET_LSIZE(bp);
1700 
1701 				/* reset errnum to rawread() failure */
1702 				errnum = 0;
1703 				if (zio_read(bp, buf, stack) != 0) {
1704 					return (0);
1705 				}
1706 				sahdrp = buf;
1707 			} else {
1708 				errnum = ERR_FSYS_CORRUPT;
1709 				return (0);
1710 			}
1711 		}
1712 		hdrsize = SA_HDR_SIZE(sahdrp);
1713 		filemax = *(uint64_t *)((char *)sahdrp + hdrsize +
1714 		    SA_SIZE_OFFSET);
1715 	} else {
1716 		filemax = ((znode_phys_t *)DN_BONUS(DNODE))->zp_size;
1717 	}
1718 	filepos = 0;
1719 
1720 	dnode_buf = NULL;
1721 	return (1);
1722 }
1723 
1724 /*
1725  * zfs_read reads in the data blocks pointed by the DNODE.
1726  *
1727  * Return:
1728  *	len - the length successfully read in to the buffer
1729  *	0   - failure
1730  */
1731 int
1732 zfs_read(char *buf, int len)
1733 {
1734 	char *stack;
1735 	int blksz, length, movesize;
1736 
1737 	if (file_buf == NULL) {
1738 		file_buf = stackbase;
1739 		stackbase += SPA_MAXBLOCKSIZE;
1740 		file_start = file_end = 0;
1741 	}
1742 	stack = stackbase;
1743 
1744 	/*
1745 	 * If offset is in memory, move it into the buffer provided and return.
1746 	 */
1747 	if (filepos >= file_start && filepos+len <= file_end) {
1748 		grub_memmove(buf, file_buf + filepos - file_start, len);
1749 		filepos += len;
1750 		return (len);
1751 	}
1752 
1753 	blksz = DNODE->dn_datablkszsec << SPA_MINBLOCKSHIFT;
1754 
1755 	/*
1756 	 * Entire Dnode is too big to fit into the space available.  We
1757 	 * will need to read it in chunks.  This could be optimized to
1758 	 * read in as large a chunk as there is space available, but for
1759 	 * now, this only reads in one data block at a time.
1760 	 */
1761 	length = len;
1762 	while (length) {
1763 		/*
1764 		 * Find requested blkid and the offset within that block.
1765 		 */
1766 		uint64_t blkid = filepos / blksz;
1767 
1768 		if (errnum = dmu_read(DNODE, blkid, file_buf, stack))
1769 			return (0);
1770 
1771 		file_start = blkid * blksz;
1772 		file_end = file_start + blksz;
1773 
1774 		movesize = MIN(length, file_end - filepos);
1775 
1776 		grub_memmove(buf, file_buf + filepos - file_start,
1777 		    movesize);
1778 		buf += movesize;
1779 		length -= movesize;
1780 		filepos += movesize;
1781 	}
1782 
1783 	return (len);
1784 }
1785 
1786 /*
1787  * No-Op
1788  */
1789 int
1790 zfs_embed(int *start_sector, int needed_sectors)
1791 {
1792 	return (1);
1793 }
1794 
1795 #endif /* FSYS_ZFS */
1796