1 /*
2 * GRUB -- GRand Unified Bootloader
3 * Copyright (C) 1999,2000,2001,2002,2003,2004 Free Software Foundation, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20 /*
21 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
22 * Use is subject to license terms.
23 */
24
25 /*
26 * Copyright (c) 2013 by Delphix. All rights reserved.
27 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
28 */
29
30 /*
31 * The zfs plug-in routines for GRUB are:
32 *
33 * zfs_mount() - locates a valid uberblock of the root pool and reads
34 * in its MOS at the memory address MOS.
35 *
36 * zfs_open() - locates a plain file object by following the MOS
37 * and places its dnode at the memory address DNODE.
38 *
39 * zfs_read() - read in the data blocks pointed by the DNODE.
40 *
41 * ZFS_SCRATCH is used as a working area.
42 *
43 * (memory addr) MOS DNODE ZFS_SCRATCH
44 * | | |
45 * +-------V---------V----------V---------------+
46 * memory | | dnode | dnode | scratch |
47 * | | 512B | 512B | area |
48 * +--------------------------------------------+
49 */
50
51 #ifdef FSYS_ZFS
52
53 #include "shared.h"
54 #include "filesys.h"
55 #include "fsys_zfs.h"
56
57 /* cache for a file block of the currently zfs_open()-ed file */
58 static void *file_buf = NULL;
59 static uint64_t file_start = 0;
60 static uint64_t file_end = 0;
61
62 /* cache for a dnode block */
63 static dnode_phys_t *dnode_buf = NULL;
64 static dnode_phys_t *dnode_mdn = NULL;
65 static uint64_t dnode_start = 0;
66 static uint64_t dnode_end = 0;
67
68 static uint64_t pool_guid = 0;
69 static uberblock_t current_uberblock;
70 static char *stackbase;
71
72 decomp_entry_t decomp_table[ZIO_COMPRESS_FUNCTIONS] =
73 {
74 {"inherit", 0}, /* ZIO_COMPRESS_INHERIT */
75 {"on", lzjb_decompress}, /* ZIO_COMPRESS_ON */
76 {"off", 0}, /* ZIO_COMPRESS_OFF */
77 {"lzjb", lzjb_decompress}, /* ZIO_COMPRESS_LZJB */
78 {"empty", 0}, /* ZIO_COMPRESS_EMPTY */
79 {"gzip-1", 0}, /* ZIO_COMPRESS_GZIP_1 */
80 {"gzip-2", 0}, /* ZIO_COMPRESS_GZIP_2 */
81 {"gzip-3", 0}, /* ZIO_COMPRESS_GZIP_3 */
82 {"gzip-4", 0}, /* ZIO_COMPRESS_GZIP_4 */
83 {"gzip-5", 0}, /* ZIO_COMPRESS_GZIP_5 */
84 {"gzip-6", 0}, /* ZIO_COMPRESS_GZIP_6 */
85 {"gzip-7", 0}, /* ZIO_COMPRESS_GZIP_7 */
86 {"gzip-8", 0}, /* ZIO_COMPRESS_GZIP_8 */
87 {"gzip-9", 0}, /* ZIO_COMPRESS_GZIP_9 */
88 {"zle", 0}, /* ZIO_COMPRESS_ZLE */
89 {"lz4", lz4_decompress} /* ZIO_COMPRESS_LZ4 */
90 };
91
92 static int zio_read_data(blkptr_t *bp, void *buf, char *stack);
93
94 /*
95 * Our own version of bcmp().
96 */
97 static int
zfs_bcmp(const void * s1,const void * s2,size_t n)98 zfs_bcmp(const void *s1, const void *s2, size_t n)
99 {
100 const uchar_t *ps1 = s1;
101 const uchar_t *ps2 = s2;
102
103 if (s1 != s2 && n != 0) {
104 do {
105 if (*ps1++ != *ps2++)
106 return (1);
107 } while (--n != 0);
108 }
109
110 return (0);
111 }
112
113 /*
114 * Our own version of log2(). Same thing as highbit()-1.
115 */
116 static int
zfs_log2(uint64_t num)117 zfs_log2(uint64_t num)
118 {
119 int i = 0;
120
121 while (num > 1) {
122 i++;
123 num = num >> 1;
124 }
125
126 return (i);
127 }
128
129 /* Checksum Functions */
130 static void
zio_checksum_off(const void * buf,uint64_t size,zio_cksum_t * zcp)131 zio_checksum_off(const void *buf, uint64_t size, zio_cksum_t *zcp)
132 {
133 ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
134 }
135
136 /* Checksum Table and Values */
137 zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS] = {
138 {{NULL, NULL}, 0, 0, "inherit"},
139 {{NULL, NULL}, 0, 0, "on"},
140 {{zio_checksum_off, zio_checksum_off}, 0, 0, "off"},
141 {{zio_checksum_SHA256, zio_checksum_SHA256}, 1, 1, "label"},
142 {{zio_checksum_SHA256, zio_checksum_SHA256}, 1, 1, "gang_header"},
143 {{NULL, NULL}, 0, 0, "zilog"},
144 {{fletcher_2_native, fletcher_2_byteswap}, 0, 0, "fletcher2"},
145 {{fletcher_4_native, fletcher_4_byteswap}, 1, 0, "fletcher4"},
146 {{zio_checksum_SHA256, zio_checksum_SHA256}, 1, 0, "SHA256"},
147 {{NULL, NULL}, 0, 0, "zilog2"},
148 };
149
150 /*
151 * zio_checksum_verify: Provides support for checksum verification.
152 *
153 * Fletcher2, Fletcher4, and SHA256 are supported.
154 *
155 * Return:
156 * -1 = Failure
157 * 0 = Success
158 */
159 static int
zio_checksum_verify(blkptr_t * bp,char * data,int size)160 zio_checksum_verify(blkptr_t *bp, char *data, int size)
161 {
162 zio_cksum_t zc = bp->blk_cksum;
163 uint32_t checksum = BP_GET_CHECKSUM(bp);
164 int byteswap = BP_SHOULD_BYTESWAP(bp);
165 zio_eck_t *zec = (zio_eck_t *)(data + size) - 1;
166 zio_checksum_info_t *ci = &zio_checksum_table[checksum];
167 zio_cksum_t actual_cksum, expected_cksum;
168
169 if (byteswap) {
170 grub_printf("byteswap not supported\n");
171 return (-1);
172 }
173
174 if (checksum >= ZIO_CHECKSUM_FUNCTIONS || ci->ci_func[0] == NULL) {
175 grub_printf("checksum algorithm %u not supported\n", checksum);
176 return (-1);
177 }
178
179 if (ci->ci_eck) {
180 expected_cksum = zec->zec_cksum;
181 zec->zec_cksum = zc;
182 ci->ci_func[0](data, size, &actual_cksum);
183 zec->zec_cksum = expected_cksum;
184 zc = expected_cksum;
185 } else {
186 ci->ci_func[byteswap](data, size, &actual_cksum);
187 }
188
189 if ((actual_cksum.zc_word[0] - zc.zc_word[0]) |
190 (actual_cksum.zc_word[1] - zc.zc_word[1]) |
191 (actual_cksum.zc_word[2] - zc.zc_word[2]) |
192 (actual_cksum.zc_word[3] - zc.zc_word[3]))
193 return (-1);
194
195 return (0);
196 }
197
198 /*
199 * vdev_label_start returns the physical disk offset (in bytes) of
200 * label "l".
201 */
202 static uint64_t
vdev_label_start(uint64_t psize,int l)203 vdev_label_start(uint64_t psize, int l)
204 {
205 return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
206 0 : psize - VDEV_LABELS * sizeof (vdev_label_t)));
207 }
208
209 /*
210 * vdev_uberblock_compare takes two uberblock structures and returns an integer
211 * indicating the more recent of the two.
212 * Return Value = 1 if ub2 is more recent
213 * Return Value = -1 if ub1 is more recent
214 * The most recent uberblock is determined using its transaction number and
215 * timestamp. The uberblock with the highest transaction number is
216 * considered "newer". If the transaction numbers of the two blocks match, the
217 * timestamps are compared to determine the "newer" of the two.
218 */
219 static int
vdev_uberblock_compare(uberblock_t * ub1,uberblock_t * ub2)220 vdev_uberblock_compare(uberblock_t *ub1, uberblock_t *ub2)
221 {
222 if (ub1->ub_txg < ub2->ub_txg)
223 return (-1);
224 if (ub1->ub_txg > ub2->ub_txg)
225 return (1);
226
227 if (ub1->ub_timestamp < ub2->ub_timestamp)
228 return (-1);
229 if (ub1->ub_timestamp > ub2->ub_timestamp)
230 return (1);
231
232 return (0);
233 }
234
235 /*
236 * Three pieces of information are needed to verify an uberblock: the magic
237 * number, the version number, and the checksum.
238 *
239 * Return:
240 * 0 - Success
241 * -1 - Failure
242 */
243 static int
uberblock_verify(uberblock_t * uber,uint64_t ub_size,uint64_t offset)244 uberblock_verify(uberblock_t *uber, uint64_t ub_size, uint64_t offset)
245 {
246 blkptr_t bp;
247
248 BP_ZERO(&bp);
249 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL);
250 BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER);
251 ZIO_SET_CHECKSUM(&bp.blk_cksum, offset, 0, 0, 0);
252
253 if (zio_checksum_verify(&bp, (char *)uber, ub_size) != 0)
254 return (-1);
255
256 if (uber->ub_magic == UBERBLOCK_MAGIC &&
257 SPA_VERSION_IS_SUPPORTED(uber->ub_version))
258 return (0);
259
260 return (-1);
261 }
262
263 /*
264 * Find the best uberblock.
265 * Return:
266 * Success - Pointer to the best uberblock.
267 * Failure - NULL
268 */
269 static uberblock_t *
find_bestub(char * ub_array,uint64_t ashift,uint64_t sector)270 find_bestub(char *ub_array, uint64_t ashift, uint64_t sector)
271 {
272 uberblock_t *ubbest = NULL;
273 uberblock_t *ubnext;
274 uint64_t offset, ub_size;
275 int i;
276
277 ub_size = VDEV_UBERBLOCK_SIZE(ashift);
278
279 for (i = 0; i < VDEV_UBERBLOCK_COUNT(ashift); i++) {
280 ubnext = (uberblock_t *)ub_array;
281 ub_array += ub_size;
282 offset = (sector << SPA_MINBLOCKSHIFT) +
283 VDEV_UBERBLOCK_OFFSET(ashift, i);
284
285 if (uberblock_verify(ubnext, ub_size, offset) != 0)
286 continue;
287
288 if (ubbest == NULL ||
289 vdev_uberblock_compare(ubnext, ubbest) > 0)
290 ubbest = ubnext;
291 }
292
293 return (ubbest);
294 }
295
296 /*
297 * Read a block of data based on the gang block address dva,
298 * and put its data in buf.
299 *
300 * Return:
301 * 0 - success
302 * 1 - failure
303 */
304 static int
zio_read_gang(blkptr_t * bp,dva_t * dva,void * buf,char * stack)305 zio_read_gang(blkptr_t *bp, dva_t *dva, void *buf, char *stack)
306 {
307 zio_gbh_phys_t *zio_gb;
308 uint64_t offset, sector;
309 blkptr_t tmpbp;
310 int i;
311
312 zio_gb = (zio_gbh_phys_t *)stack;
313 stack += SPA_GANGBLOCKSIZE;
314 offset = DVA_GET_OFFSET(dva);
315 sector = DVA_OFFSET_TO_PHYS_SECTOR(offset);
316
317 /* read in the gang block header */
318 if (devread(sector, 0, SPA_GANGBLOCKSIZE, (char *)zio_gb) == 0) {
319 grub_printf("failed to read in a gang block header\n");
320 return (1);
321 }
322
323 /* self checksuming the gang block header */
324 BP_ZERO(&tmpbp);
325 BP_SET_CHECKSUM(&tmpbp, ZIO_CHECKSUM_GANG_HEADER);
326 BP_SET_BYTEORDER(&tmpbp, ZFS_HOST_BYTEORDER);
327 ZIO_SET_CHECKSUM(&tmpbp.blk_cksum, DVA_GET_VDEV(dva),
328 DVA_GET_OFFSET(dva), bp->blk_birth, 0);
329 if (zio_checksum_verify(&tmpbp, (char *)zio_gb, SPA_GANGBLOCKSIZE)) {
330 grub_printf("failed to checksum a gang block header\n");
331 return (1);
332 }
333
334 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) {
335 if (BP_IS_HOLE(&zio_gb->zg_blkptr[i]))
336 continue;
337
338 if (zio_read_data(&zio_gb->zg_blkptr[i], buf, stack))
339 return (1);
340 buf += BP_GET_PSIZE(&zio_gb->zg_blkptr[i]);
341 }
342
343 return (0);
344 }
345
346 /*
347 * Read in a block of raw data to buf.
348 *
349 * Return:
350 * 0 - success
351 * 1 - failure
352 */
353 static int
zio_read_data(blkptr_t * bp,void * buf,char * stack)354 zio_read_data(blkptr_t *bp, void *buf, char *stack)
355 {
356 int i, psize;
357
358 psize = BP_GET_PSIZE(bp);
359
360 /* pick a good dva from the block pointer */
361 for (i = 0; i < SPA_DVAS_PER_BP; i++) {
362 uint64_t offset, sector;
363
364 if (bp->blk_dva[i].dva_word[0] == 0 &&
365 bp->blk_dva[i].dva_word[1] == 0)
366 continue;
367
368 if (DVA_GET_GANG(&bp->blk_dva[i])) {
369 if (zio_read_gang(bp, &bp->blk_dva[i], buf, stack) == 0)
370 return (0);
371 } else {
372 /* read in a data block */
373 offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
374 sector = DVA_OFFSET_TO_PHYS_SECTOR(offset);
375 if (devread(sector, 0, psize, buf) != 0)
376 return (0);
377 }
378 }
379
380 return (1);
381 }
382
383 /*
384 * buf must be at least BPE_GET_PSIZE(bp) bytes long (which will never be
385 * more than BPE_PAYLOAD_SIZE bytes).
386 */
387 static void
decode_embedded_bp_compressed(const blkptr_t * bp,void * buf)388 decode_embedded_bp_compressed(const blkptr_t *bp, void *buf)
389 {
390 int psize, i;
391 uint8_t *buf8 = buf;
392 uint64_t w = 0;
393 const uint64_t *bp64 = (const uint64_t *)bp;
394
395 psize = BPE_GET_PSIZE(bp);
396
397 /*
398 * Decode the words of the block pointer into the byte array.
399 * Low bits of first word are the first byte (little endian).
400 */
401 for (i = 0; i < psize; i++) {
402 if (i % sizeof (w) == 0) {
403 /* beginning of a word */
404 w = *bp64;
405 bp64++;
406 if (!BPE_IS_PAYLOADWORD(bp, bp64))
407 bp64++;
408 }
409 buf8[i] = BF64_GET(w, (i % sizeof (w)) * NBBY, NBBY);
410 }
411 }
412
413 /*
414 * Fill in the buffer with the (decompressed) payload of the embedded
415 * blkptr_t. Takes into account compression and byteorder (the payload is
416 * treated as a stream of bytes).
417 * Return 0 on success, or ENOSPC if it won't fit in the buffer.
418 */
419 static int
decode_embedded_bp(const blkptr_t * bp,void * buf)420 decode_embedded_bp(const blkptr_t *bp, void *buf)
421 {
422 int comp;
423 int lsize, psize;
424 uint8_t *dst = buf;
425 uint64_t w = 0;
426
427 lsize = BPE_GET_LSIZE(bp);
428 psize = BPE_GET_PSIZE(bp);
429 comp = BP_GET_COMPRESS(bp);
430
431 if (comp != ZIO_COMPRESS_OFF) {
432 uint8_t dstbuf[BPE_PAYLOAD_SIZE];
433
434 if ((unsigned int)comp >= ZIO_COMPRESS_FUNCTIONS ||
435 decomp_table[comp].decomp_func == NULL) {
436 grub_printf("compression algorithm not supported\n");
437 return (ERR_FSYS_CORRUPT);
438 }
439
440 decode_embedded_bp_compressed(bp, dstbuf);
441 decomp_table[comp].decomp_func(dstbuf, buf, psize, lsize);
442 } else {
443 decode_embedded_bp_compressed(bp, buf);
444 }
445
446 return (0);
447 }
448
449 /*
450 * Read in a block of data, verify its checksum, decompress if needed,
451 * and put the uncompressed data in buf.
452 *
453 * Return:
454 * 0 - success
455 * errnum - failure
456 */
457 static int
zio_read(blkptr_t * bp,void * buf,char * stack)458 zio_read(blkptr_t *bp, void *buf, char *stack)
459 {
460 int lsize, psize, comp;
461 char *retbuf;
462
463 if (BP_IS_EMBEDDED(bp)) {
464 if (BPE_GET_ETYPE(bp) != BP_EMBEDDED_TYPE_DATA) {
465 grub_printf("unsupported embedded BP (type=%u)\n",
466 (int)BPE_GET_ETYPE(bp));
467 return (ERR_FSYS_CORRUPT);
468 }
469 return (decode_embedded_bp(bp, buf));
470 }
471
472 comp = BP_GET_COMPRESS(bp);
473 lsize = BP_GET_LSIZE(bp);
474 psize = BP_GET_PSIZE(bp);
475
476 if ((unsigned int)comp >= ZIO_COMPRESS_FUNCTIONS ||
477 (comp != ZIO_COMPRESS_OFF &&
478 decomp_table[comp].decomp_func == NULL)) {
479 grub_printf("compression algorithm not supported\n");
480 return (ERR_FSYS_CORRUPT);
481 }
482
483 if ((char *)buf < stack && ((char *)buf) + lsize > stack) {
484 grub_printf("not enough memory to fit %u bytes on stack\n",
485 lsize);
486 return (ERR_WONT_FIT);
487 }
488
489 retbuf = buf;
490 if (comp != ZIO_COMPRESS_OFF) {
491 buf = stack;
492 stack += psize;
493 }
494
495 if (zio_read_data(bp, buf, stack) != 0) {
496 grub_printf("zio_read_data failed\n");
497 return (ERR_FSYS_CORRUPT);
498 }
499
500 if (zio_checksum_verify(bp, buf, psize) != 0) {
501 grub_printf("checksum verification failed\n");
502 return (ERR_FSYS_CORRUPT);
503 }
504
505 if (comp != ZIO_COMPRESS_OFF) {
506 if (decomp_table[comp].decomp_func(buf, retbuf, psize,
507 lsize) != 0) {
508 grub_printf("zio_read decompression failed\n");
509 return (ERR_FSYS_CORRUPT);
510 }
511 }
512
513 return (0);
514 }
515
516 /*
517 * Get the block from a block id.
518 * push the block onto the stack.
519 *
520 * Return:
521 * 0 - success
522 * errnum - failure
523 */
524 static int
dmu_read(dnode_phys_t * dn,uint64_t blkid,void * buf,char * stack)525 dmu_read(dnode_phys_t *dn, uint64_t blkid, void *buf, char *stack)
526 {
527 int idx, level;
528 blkptr_t *bp_array = dn->dn_blkptr;
529 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
530 blkptr_t *bp, *tmpbuf;
531
532 bp = (blkptr_t *)stack;
533 stack += sizeof (blkptr_t);
534
535 tmpbuf = (blkptr_t *)stack;
536 stack += 1<<dn->dn_indblkshift;
537
538 for (level = dn->dn_nlevels - 1; level >= 0; level--) {
539 idx = (blkid >> (epbs * level)) & ((1<<epbs)-1);
540 *bp = bp_array[idx];
541 if (level == 0)
542 tmpbuf = buf;
543 if (BP_IS_HOLE(bp)) {
544 grub_memset(buf, 0,
545 dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
546 break;
547 } else if (errnum = zio_read(bp, tmpbuf, stack)) {
548 return (errnum);
549 }
550
551 bp_array = tmpbuf;
552 }
553
554 return (0);
555 }
556
557 /*
558 * mzap_lookup: Looks up property described by "name" and returns the value
559 * in "value".
560 *
561 * Return:
562 * 0 - success
563 * errnum - failure
564 */
565 static int
mzap_lookup(mzap_phys_t * zapobj,int objsize,const char * name,uint64_t * value)566 mzap_lookup(mzap_phys_t *zapobj, int objsize, const char *name,
567 uint64_t *value)
568 {
569 int i, chunks;
570 mzap_ent_phys_t *mzap_ent = zapobj->mz_chunk;
571
572 chunks = objsize / MZAP_ENT_LEN - 1;
573 for (i = 0; i < chunks; i++) {
574 if (grub_strcmp(mzap_ent[i].mze_name, name) == 0) {
575 *value = mzap_ent[i].mze_value;
576 return (0);
577 }
578 }
579
580 return (ERR_FSYS_CORRUPT);
581 }
582
583 static uint64_t
zap_hash(uint64_t salt,const char * name)584 zap_hash(uint64_t salt, const char *name)
585 {
586 static uint64_t table[256];
587 const uint8_t *cp;
588 uint8_t c;
589 uint64_t crc = salt;
590
591 if (table[128] == 0) {
592 uint64_t *ct;
593 int i, j;
594 for (i = 0; i < 256; i++) {
595 for (ct = table + i, *ct = i, j = 8; j > 0; j--)
596 *ct = (*ct >> 1) ^ (-(*ct & 1) &
597 ZFS_CRC64_POLY);
598 }
599 }
600
601 if (crc == 0 || table[128] != ZFS_CRC64_POLY) {
602 errnum = ERR_FSYS_CORRUPT;
603 return (0);
604 }
605
606 for (cp = (const uint8_t *)name; (c = *cp) != '\0'; cp++)
607 crc = (crc >> 8) ^ table[(crc ^ c) & 0xFF];
608
609 /*
610 * Only use 28 bits, since we need 4 bits in the cookie for the
611 * collision differentiator. We MUST use the high bits, since
612 * those are the ones that we first pay attention to when
613 * choosing the bucket.
614 */
615 crc &= ~((1ULL << (64 - 28)) - 1);
616
617 return (crc);
618 }
619
620 /*
621 * Only to be used on 8-bit arrays.
622 * array_len is actual len in bytes (not encoded le_value_length).
623 * buf is null-terminated.
624 */
625 static int
zap_leaf_array_equal(zap_leaf_phys_t * l,int blksft,int chunk,int array_len,const char * buf)626 zap_leaf_array_equal(zap_leaf_phys_t *l, int blksft, int chunk,
627 int array_len, const char *buf)
628 {
629 int bseen = 0;
630
631 while (bseen < array_len) {
632 struct zap_leaf_array *la =
633 &ZAP_LEAF_CHUNK(l, blksft, chunk).l_array;
634 int toread = MIN(array_len - bseen, ZAP_LEAF_ARRAY_BYTES);
635
636 if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft))
637 return (0);
638
639 if (zfs_bcmp(la->la_array, buf + bseen, toread) != 0)
640 break;
641 chunk = la->la_next;
642 bseen += toread;
643 }
644 return (bseen == array_len);
645 }
646
647 /*
648 * Given a zap_leaf_phys_t, walk thru the zap leaf chunks to get the
649 * value for the property "name".
650 *
651 * Return:
652 * 0 - success
653 * errnum - failure
654 */
655 static int
zap_leaf_lookup(zap_leaf_phys_t * l,int blksft,uint64_t h,const char * name,uint64_t * value)656 zap_leaf_lookup(zap_leaf_phys_t *l, int blksft, uint64_t h,
657 const char *name, uint64_t *value)
658 {
659 uint16_t chunk;
660 struct zap_leaf_entry *le;
661
662 /* Verify if this is a valid leaf block */
663 if (l->l_hdr.lh_block_type != ZBT_LEAF)
664 return (ERR_FSYS_CORRUPT);
665 if (l->l_hdr.lh_magic != ZAP_LEAF_MAGIC)
666 return (ERR_FSYS_CORRUPT);
667
668 for (chunk = l->l_hash[LEAF_HASH(blksft, h)];
669 chunk != CHAIN_END; chunk = le->le_next) {
670
671 if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft))
672 return (ERR_FSYS_CORRUPT);
673
674 le = ZAP_LEAF_ENTRY(l, blksft, chunk);
675
676 /* Verify the chunk entry */
677 if (le->le_type != ZAP_CHUNK_ENTRY)
678 return (ERR_FSYS_CORRUPT);
679
680 if (le->le_hash != h)
681 continue;
682
683 if (zap_leaf_array_equal(l, blksft, le->le_name_chunk,
684 le->le_name_length, name)) {
685
686 struct zap_leaf_array *la;
687 uint8_t *ip;
688
689 if (le->le_int_size != 8 || le->le_value_length != 1)
690 return (ERR_FSYS_CORRUPT);
691
692 /* get the uint64_t property value */
693 la = &ZAP_LEAF_CHUNK(l, blksft,
694 le->le_value_chunk).l_array;
695 ip = la->la_array;
696
697 *value = (uint64_t)ip[0] << 56 | (uint64_t)ip[1] << 48 |
698 (uint64_t)ip[2] << 40 | (uint64_t)ip[3] << 32 |
699 (uint64_t)ip[4] << 24 | (uint64_t)ip[5] << 16 |
700 (uint64_t)ip[6] << 8 | (uint64_t)ip[7];
701
702 return (0);
703 }
704 }
705
706 return (ERR_FSYS_CORRUPT);
707 }
708
709 /*
710 * Fat ZAP lookup
711 *
712 * Return:
713 * 0 - success
714 * errnum - failure
715 */
716 static int
fzap_lookup(dnode_phys_t * zap_dnode,zap_phys_t * zap,const char * name,uint64_t * value,char * stack)717 fzap_lookup(dnode_phys_t *zap_dnode, zap_phys_t *zap,
718 const char *name, uint64_t *value, char *stack)
719 {
720 zap_leaf_phys_t *l;
721 uint64_t hash, idx, blkid;
722 int blksft = zfs_log2(zap_dnode->dn_datablkszsec << DNODE_SHIFT);
723
724 /* Verify if this is a fat zap header block */
725 if (zap->zap_magic != (uint64_t)ZAP_MAGIC ||
726 zap->zap_flags != 0)
727 return (ERR_FSYS_CORRUPT);
728
729 hash = zap_hash(zap->zap_salt, name);
730 if (errnum)
731 return (errnum);
732
733 /* get block id from index */
734 if (zap->zap_ptrtbl.zt_numblks != 0) {
735 /* external pointer tables not supported */
736 return (ERR_FSYS_CORRUPT);
737 }
738 idx = ZAP_HASH_IDX(hash, zap->zap_ptrtbl.zt_shift);
739 blkid = ((uint64_t *)zap)[idx + (1<<(blksft-3-1))];
740
741 /* Get the leaf block */
742 l = (zap_leaf_phys_t *)stack;
743 stack += 1<<blksft;
744 if ((1<<blksft) < sizeof (zap_leaf_phys_t))
745 return (ERR_FSYS_CORRUPT);
746 if (errnum = dmu_read(zap_dnode, blkid, l, stack))
747 return (errnum);
748
749 return (zap_leaf_lookup(l, blksft, hash, name, value));
750 }
751
752 /*
753 * Read in the data of a zap object and find the value for a matching
754 * property name.
755 *
756 * Return:
757 * 0 - success
758 * errnum - failure
759 */
760 static int
zap_lookup(dnode_phys_t * zap_dnode,const char * name,uint64_t * val,char * stack)761 zap_lookup(dnode_phys_t *zap_dnode, const char *name, uint64_t *val,
762 char *stack)
763 {
764 uint64_t block_type;
765 int size;
766 void *zapbuf;
767
768 /* Read in the first block of the zap object data. */
769 zapbuf = stack;
770 size = zap_dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
771 stack += size;
772
773 if ((errnum = dmu_read(zap_dnode, 0, zapbuf, stack)) != 0)
774 return (errnum);
775
776 block_type = *((uint64_t *)zapbuf);
777
778 if (block_type == ZBT_MICRO) {
779 return (mzap_lookup(zapbuf, size, name, val));
780 } else if (block_type == ZBT_HEADER) {
781 /* this is a fat zap */
782 return (fzap_lookup(zap_dnode, zapbuf, name,
783 val, stack));
784 }
785
786 return (ERR_FSYS_CORRUPT);
787 }
788
789 typedef struct zap_attribute {
790 int za_integer_length;
791 uint64_t za_num_integers;
792 uint64_t za_first_integer;
793 char *za_name;
794 } zap_attribute_t;
795
796 typedef int (zap_cb_t)(zap_attribute_t *za, void *arg, char *stack);
797
798 static int
zap_iterate(dnode_phys_t * zap_dnode,zap_cb_t * cb,void * arg,char * stack)799 zap_iterate(dnode_phys_t *zap_dnode, zap_cb_t *cb, void *arg, char *stack)
800 {
801 uint32_t size = zap_dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
802 zap_attribute_t za;
803 int i;
804 mzap_phys_t *mzp = (mzap_phys_t *)stack;
805 stack += size;
806
807 if ((errnum = dmu_read(zap_dnode, 0, mzp, stack)) != 0)
808 return (errnum);
809
810 /*
811 * Iteration over fatzap objects has not yet been implemented.
812 * If we encounter a pool in which there are more features for
813 * read than can fit inside a microzap (i.e., more than 2048
814 * features for read), we can add support for fatzap iteration.
815 * For now, fail.
816 */
817 if (mzp->mz_block_type != ZBT_MICRO) {
818 grub_printf("feature information stored in fatzap, pool "
819 "version not supported\n");
820 return (1);
821 }
822
823 za.za_integer_length = 8;
824 za.za_num_integers = 1;
825 for (i = 0; i < size / MZAP_ENT_LEN - 1; i++) {
826 mzap_ent_phys_t *mzep = &mzp->mz_chunk[i];
827 int err;
828
829 za.za_first_integer = mzep->mze_value;
830 za.za_name = mzep->mze_name;
831 err = cb(&za, arg, stack);
832 if (err != 0)
833 return (err);
834 }
835
836 return (0);
837 }
838
839 /*
840 * Get the dnode of an object number from the metadnode of an object set.
841 *
842 * Input
843 * mdn - metadnode to get the object dnode
844 * objnum - object number for the object dnode
845 * type - if nonzero, object must be of this type
846 * buf - data buffer that holds the returning dnode
847 * stack - scratch area
848 *
849 * Return:
850 * 0 - success
851 * errnum - failure
852 */
853 static int
dnode_get(dnode_phys_t * mdn,uint64_t objnum,uint8_t type,dnode_phys_t * buf,char * stack)854 dnode_get(dnode_phys_t *mdn, uint64_t objnum, uint8_t type, dnode_phys_t *buf,
855 char *stack)
856 {
857 uint64_t blkid, blksz; /* the block id this object dnode is in */
858 int epbs; /* shift of number of dnodes in a block */
859 int idx; /* index within a block */
860 dnode_phys_t *dnbuf;
861
862 blksz = mdn->dn_datablkszsec << SPA_MINBLOCKSHIFT;
863 epbs = zfs_log2(blksz) - DNODE_SHIFT;
864 blkid = objnum >> epbs;
865 idx = objnum & ((1<<epbs)-1);
866
867 if (dnode_buf != NULL && dnode_mdn == mdn &&
868 objnum >= dnode_start && objnum < dnode_end) {
869 grub_memmove(buf, &dnode_buf[idx], DNODE_SIZE);
870 VERIFY_DN_TYPE(buf, type);
871 return (0);
872 }
873
874 if (dnode_buf && blksz == 1<<DNODE_BLOCK_SHIFT) {
875 dnbuf = dnode_buf;
876 dnode_mdn = mdn;
877 dnode_start = blkid << epbs;
878 dnode_end = (blkid + 1) << epbs;
879 } else {
880 dnbuf = (dnode_phys_t *)stack;
881 stack += blksz;
882 }
883
884 if (errnum = dmu_read(mdn, blkid, (char *)dnbuf, stack))
885 return (errnum);
886
887 grub_memmove(buf, &dnbuf[idx], DNODE_SIZE);
888 VERIFY_DN_TYPE(buf, type);
889
890 return (0);
891 }
892
893 /*
894 * Check if this is a special file that resides at the top
895 * dataset of the pool. Currently this is the GRUB menu,
896 * boot signature and boot signature backup.
897 * str starts with '/'.
898 */
899 static int
is_top_dataset_file(char * str)900 is_top_dataset_file(char *str)
901 {
902 char *tptr;
903
904 if ((tptr = grub_strstr(str, "menu.lst")) &&
905 (tptr[8] == '\0' || tptr[8] == ' ') &&
906 *(tptr-1) == '/')
907 return (1);
908
909 if (grub_strncmp(str, BOOTSIGN_DIR"/",
910 grub_strlen(BOOTSIGN_DIR) + 1) == 0)
911 return (1);
912
913 if (grub_strcmp(str, BOOTSIGN_BACKUP) == 0)
914 return (1);
915
916 return (0);
917 }
918
919 static int
check_feature(zap_attribute_t * za,void * arg,char * stack)920 check_feature(zap_attribute_t *za, void *arg, char *stack)
921 {
922 const char **names = arg;
923 int i;
924
925 if (za->za_first_integer == 0)
926 return (0);
927
928 for (i = 0; names[i] != NULL; i++) {
929 if (grub_strcmp(za->za_name, names[i]) == 0) {
930 return (0);
931 }
932 }
933 grub_printf("missing feature for read '%s'\n", za->za_name);
934 return (ERR_NEWER_VERSION);
935 }
936
937 /*
938 * Get the file dnode for a given file name where mdn is the meta dnode
939 * for this ZFS object set. When found, place the file dnode in dn.
940 * The 'path' argument will be mangled.
941 *
942 * Return:
943 * 0 - success
944 * errnum - failure
945 */
946 static int
dnode_get_path(dnode_phys_t * mdn,char * path,dnode_phys_t * dn,char * stack)947 dnode_get_path(dnode_phys_t *mdn, char *path, dnode_phys_t *dn,
948 char *stack)
949 {
950 uint64_t objnum, version;
951 char *cname, ch;
952
953 if (errnum = dnode_get(mdn, MASTER_NODE_OBJ, DMU_OT_MASTER_NODE,
954 dn, stack))
955 return (errnum);
956
957 if (errnum = zap_lookup(dn, ZPL_VERSION_STR, &version, stack))
958 return (errnum);
959 if (version > ZPL_VERSION)
960 return (-1);
961
962 if (errnum = zap_lookup(dn, ZFS_ROOT_OBJ, &objnum, stack))
963 return (errnum);
964
965 if (errnum = dnode_get(mdn, objnum, DMU_OT_DIRECTORY_CONTENTS,
966 dn, stack))
967 return (errnum);
968
969 /* skip leading slashes */
970 while (*path == '/')
971 path++;
972
973 while (*path && !grub_isspace(*path)) {
974
975 /* get the next component name */
976 cname = path;
977 while (*path && !grub_isspace(*path) && *path != '/')
978 path++;
979 ch = *path;
980 *path = 0; /* ensure null termination */
981
982 if (errnum = zap_lookup(dn, cname, &objnum, stack))
983 return (errnum);
984
985 objnum = ZFS_DIRENT_OBJ(objnum);
986 if (errnum = dnode_get(mdn, objnum, 0, dn, stack))
987 return (errnum);
988
989 *path = ch;
990 while (*path == '/')
991 path++;
992 }
993
994 /* We found the dnode for this file. Verify if it is a plain file. */
995 VERIFY_DN_TYPE(dn, DMU_OT_PLAIN_FILE_CONTENTS);
996
997 return (0);
998 }
999
1000 /*
1001 * Get the default 'bootfs' property value from the rootpool.
1002 *
1003 * Return:
1004 * 0 - success
1005 * errnum -failure
1006 */
1007 static int
get_default_bootfsobj(dnode_phys_t * mosmdn,uint64_t * obj,char * stack)1008 get_default_bootfsobj(dnode_phys_t *mosmdn, uint64_t *obj, char *stack)
1009 {
1010 uint64_t objnum = 0;
1011 dnode_phys_t *dn = (dnode_phys_t *)stack;
1012 stack += DNODE_SIZE;
1013
1014 if (errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
1015 DMU_OT_OBJECT_DIRECTORY, dn, stack))
1016 return (errnum);
1017
1018 /*
1019 * find the object number for 'pool_props', and get the dnode
1020 * of the 'pool_props'.
1021 */
1022 if (zap_lookup(dn, DMU_POOL_PROPS, &objnum, stack))
1023 return (ERR_FILESYSTEM_NOT_FOUND);
1024
1025 if (errnum = dnode_get(mosmdn, objnum, DMU_OT_POOL_PROPS, dn, stack))
1026 return (errnum);
1027
1028 if (zap_lookup(dn, ZPOOL_PROP_BOOTFS, &objnum, stack))
1029 return (ERR_FILESYSTEM_NOT_FOUND);
1030
1031 if (!objnum)
1032 return (ERR_FILESYSTEM_NOT_FOUND);
1033
1034 *obj = objnum;
1035 return (0);
1036 }
1037
1038 /*
1039 * List of pool features that the grub implementation of ZFS supports for
1040 * read. Note that features that are only required for write do not need
1041 * to be listed here since grub opens pools in read-only mode.
1042 *
1043 * When this list is updated the version number in usr/src/grub/capability
1044 * must be incremented to ensure the new grub gets installed.
1045 */
1046 static const char *spa_feature_names[] = {
1047 "org.illumos:lz4_compress",
1048 "com.delphix:hole_birth",
1049 "com.delphix:extensible_dataset",
1050 "com.delphix:embedded_data",
1051 "org.open-zfs:large_blocks",
1052 NULL
1053 };
1054
1055 /*
1056 * Checks whether the MOS features that are active are supported by this
1057 * (GRUB's) implementation of ZFS.
1058 *
1059 * Return:
1060 * 0: Success.
1061 * errnum: Failure.
1062 */
1063 static int
check_mos_features(dnode_phys_t * mosmdn,char * stack)1064 check_mos_features(dnode_phys_t *mosmdn, char *stack)
1065 {
1066 uint64_t objnum;
1067 dnode_phys_t *dn;
1068 uint8_t error = 0;
1069
1070 dn = (dnode_phys_t *)stack;
1071 stack += DNODE_SIZE;
1072
1073 if ((errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
1074 DMU_OT_OBJECT_DIRECTORY, dn, stack)) != 0)
1075 return (errnum);
1076
1077 /*
1078 * Find the object number for 'features_for_read' and retrieve its
1079 * corresponding dnode. Note that we don't check features_for_write
1080 * because GRUB is not opening the pool for write.
1081 */
1082 if ((errnum = zap_lookup(dn, DMU_POOL_FEATURES_FOR_READ, &objnum,
1083 stack)) != 0)
1084 return (errnum);
1085
1086 if ((errnum = dnode_get(mosmdn, objnum, DMU_OTN_ZAP_METADATA,
1087 dn, stack)) != 0)
1088 return (errnum);
1089
1090 return (zap_iterate(dn, check_feature, spa_feature_names, stack));
1091 }
1092
1093 /*
1094 * Given a MOS metadnode, get the metadnode of a given filesystem name (fsname),
1095 * e.g. pool/rootfs, or a given object number (obj), e.g. the object number
1096 * of pool/rootfs.
1097 *
1098 * If no fsname and no obj are given, return the DSL_DIR metadnode.
1099 * If fsname is given, return its metadnode and its matching object number.
1100 * If only obj is given, return the metadnode for this object number.
1101 *
1102 * Return:
1103 * 0 - success
1104 * errnum - failure
1105 */
1106 static int
get_objset_mdn(dnode_phys_t * mosmdn,char * fsname,uint64_t * obj,dnode_phys_t * mdn,char * stack)1107 get_objset_mdn(dnode_phys_t *mosmdn, char *fsname, uint64_t *obj,
1108 dnode_phys_t *mdn, char *stack)
1109 {
1110 uint64_t objnum, headobj;
1111 char *cname, ch;
1112 blkptr_t *bp;
1113 objset_phys_t *osp;
1114 int issnapshot = 0;
1115 char *snapname;
1116
1117 if (fsname == NULL && obj) {
1118 headobj = *obj;
1119 goto skip;
1120 }
1121
1122 if (errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
1123 DMU_OT_OBJECT_DIRECTORY, mdn, stack))
1124 return (errnum);
1125
1126 if (errnum = zap_lookup(mdn, DMU_POOL_ROOT_DATASET, &objnum,
1127 stack))
1128 return (errnum);
1129
1130 if (errnum = dnode_get(mosmdn, objnum, 0, mdn, stack))
1131 return (errnum);
1132
1133 if (fsname == NULL) {
1134 headobj =
1135 ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_head_dataset_obj;
1136 goto skip;
1137 }
1138
1139 /* take out the pool name */
1140 while (*fsname && !grub_isspace(*fsname) && *fsname != '/')
1141 fsname++;
1142
1143 while (*fsname && !grub_isspace(*fsname)) {
1144 uint64_t childobj;
1145
1146 while (*fsname == '/')
1147 fsname++;
1148
1149 cname = fsname;
1150 while (*fsname && !grub_isspace(*fsname) && *fsname != '/')
1151 fsname++;
1152 ch = *fsname;
1153 *fsname = 0;
1154
1155 snapname = cname;
1156 while (*snapname && !grub_isspace(*snapname) && *snapname !=
1157 '@')
1158 snapname++;
1159 if (*snapname == '@') {
1160 issnapshot = 1;
1161 *snapname = 0;
1162 }
1163 childobj =
1164 ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_child_dir_zapobj;
1165 if (errnum = dnode_get(mosmdn, childobj,
1166 DMU_OT_DSL_DIR_CHILD_MAP, mdn, stack))
1167 return (errnum);
1168
1169 if (zap_lookup(mdn, cname, &objnum, stack))
1170 return (ERR_FILESYSTEM_NOT_FOUND);
1171
1172 if (errnum = dnode_get(mosmdn, objnum, 0,
1173 mdn, stack))
1174 return (errnum);
1175
1176 *fsname = ch;
1177 if (issnapshot)
1178 *snapname = '@';
1179 }
1180 headobj = ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_head_dataset_obj;
1181 if (obj)
1182 *obj = headobj;
1183
1184 skip:
1185 if (errnum = dnode_get(mosmdn, headobj, 0, mdn, stack))
1186 return (errnum);
1187 if (issnapshot) {
1188 uint64_t snapobj;
1189
1190 snapobj = ((dsl_dataset_phys_t *)DN_BONUS(mdn))->
1191 ds_snapnames_zapobj;
1192
1193 if (errnum = dnode_get(mosmdn, snapobj,
1194 DMU_OT_DSL_DS_SNAP_MAP, mdn, stack))
1195 return (errnum);
1196 if (zap_lookup(mdn, snapname + 1, &headobj, stack))
1197 return (ERR_FILESYSTEM_NOT_FOUND);
1198 if (errnum = dnode_get(mosmdn, headobj, 0, mdn, stack))
1199 return (errnum);
1200 if (obj)
1201 *obj = headobj;
1202 }
1203
1204 bp = &((dsl_dataset_phys_t *)DN_BONUS(mdn))->ds_bp;
1205 osp = (objset_phys_t *)stack;
1206 stack += sizeof (objset_phys_t);
1207 if (errnum = zio_read(bp, osp, stack))
1208 return (errnum);
1209
1210 grub_memmove((char *)mdn, (char *)&osp->os_meta_dnode, DNODE_SIZE);
1211
1212 return (0);
1213 }
1214
1215 /*
1216 * For a given XDR packed nvlist, verify the first 4 bytes and move on.
1217 *
1218 * An XDR packed nvlist is encoded as (comments from nvs_xdr_create) :
1219 *
1220 * encoding method/host endian (4 bytes)
1221 * nvl_version (4 bytes)
1222 * nvl_nvflag (4 bytes)
1223 * encoded nvpairs:
1224 * encoded size of the nvpair (4 bytes)
1225 * decoded size of the nvpair (4 bytes)
1226 * name string size (4 bytes)
1227 * name string data (sizeof(NV_ALIGN4(string))
1228 * data type (4 bytes)
1229 * # of elements in the nvpair (4 bytes)
1230 * data
1231 * 2 zero's for the last nvpair
1232 * (end of the entire list) (8 bytes)
1233 *
1234 * Return:
1235 * 0 - success
1236 * 1 - failure
1237 */
1238 static int
nvlist_unpack(char * nvlist,char ** out)1239 nvlist_unpack(char *nvlist, char **out)
1240 {
1241 /* Verify if the 1st and 2nd byte in the nvlist are valid. */
1242 if (nvlist[0] != NV_ENCODE_XDR || nvlist[1] != HOST_ENDIAN)
1243 return (1);
1244
1245 *out = nvlist + 4;
1246 return (0);
1247 }
1248
1249 static char *
nvlist_array(char * nvlist,int index)1250 nvlist_array(char *nvlist, int index)
1251 {
1252 int i, encode_size;
1253
1254 for (i = 0; i < index; i++) {
1255 /* skip the header, nvl_version, and nvl_nvflag */
1256 nvlist = nvlist + 4 * 2;
1257
1258 while (encode_size = BSWAP_32(*(uint32_t *)nvlist))
1259 nvlist += encode_size; /* goto the next nvpair */
1260
1261 nvlist = nvlist + 4 * 2; /* skip the ending 2 zeros - 8 bytes */
1262 }
1263
1264 return (nvlist);
1265 }
1266
1267 /*
1268 * The nvlist_next_nvpair() function returns a handle to the next nvpair in the
1269 * list following nvpair. If nvpair is NULL, the first pair is returned. If
1270 * nvpair is the last pair in the nvlist, NULL is returned.
1271 */
1272 static char *
nvlist_next_nvpair(char * nvl,char * nvpair)1273 nvlist_next_nvpair(char *nvl, char *nvpair)
1274 {
1275 char *cur, *prev;
1276 int encode_size;
1277
1278 if (nvl == NULL)
1279 return (NULL);
1280
1281 if (nvpair == NULL) {
1282 /* skip over nvl_version and nvl_nvflag */
1283 nvpair = nvl + 4 * 2;
1284 } else {
1285 /* skip to the next nvpair */
1286 encode_size = BSWAP_32(*(uint32_t *)nvpair);
1287 nvpair += encode_size;
1288 }
1289
1290 /* 8 bytes of 0 marks the end of the list */
1291 if (*(uint64_t *)nvpair == 0)
1292 return (NULL);
1293
1294 return (nvpair);
1295 }
1296
1297 /*
1298 * This function returns 0 on success and 1 on failure. On success, a string
1299 * containing the name of nvpair is saved in buf.
1300 */
1301 static int
nvpair_name(char * nvp,char * buf,int buflen)1302 nvpair_name(char *nvp, char *buf, int buflen)
1303 {
1304 int len;
1305
1306 /* skip over encode/decode size */
1307 nvp += 4 * 2;
1308
1309 len = BSWAP_32(*(uint32_t *)nvp);
1310 if (buflen < len + 1)
1311 return (1);
1312
1313 grub_memmove(buf, nvp + 4, len);
1314 buf[len] = '\0';
1315
1316 return (0);
1317 }
1318
1319 /*
1320 * This function retrieves the value of the nvpair in the form of enumerated
1321 * type data_type_t. This is used to determine the appropriate type to pass to
1322 * nvpair_value().
1323 */
1324 static int
nvpair_type(char * nvp)1325 nvpair_type(char *nvp)
1326 {
1327 int name_len, type;
1328
1329 /* skip over encode/decode size */
1330 nvp += 4 * 2;
1331
1332 /* skip over name_len */
1333 name_len = BSWAP_32(*(uint32_t *)nvp);
1334 nvp += 4;
1335
1336 /* skip over name */
1337 nvp = nvp + ((name_len + 3) & ~3); /* align */
1338
1339 type = BSWAP_32(*(uint32_t *)nvp);
1340
1341 return (type);
1342 }
1343
1344 static int
nvpair_value(char * nvp,void * val,int valtype,int * nelmp)1345 nvpair_value(char *nvp, void *val, int valtype, int *nelmp)
1346 {
1347 int name_len, type, slen;
1348 char *strval = val;
1349 uint64_t *intval = val;
1350
1351 /* skip over encode/decode size */
1352 nvp += 4 * 2;
1353
1354 /* skip over name_len */
1355 name_len = BSWAP_32(*(uint32_t *)nvp);
1356 nvp += 4;
1357
1358 /* skip over name */
1359 nvp = nvp + ((name_len + 3) & ~3); /* align */
1360
1361 /* skip over type */
1362 type = BSWAP_32(*(uint32_t *)nvp);
1363 nvp += 4;
1364
1365 if (type == valtype) {
1366 int nelm;
1367
1368 nelm = BSWAP_32(*(uint32_t *)nvp);
1369 if (valtype != DATA_TYPE_BOOLEAN && nelm < 1)
1370 return (1);
1371 nvp += 4;
1372
1373 switch (valtype) {
1374 case DATA_TYPE_BOOLEAN:
1375 return (0);
1376
1377 case DATA_TYPE_STRING:
1378 slen = BSWAP_32(*(uint32_t *)nvp);
1379 nvp += 4;
1380 grub_memmove(strval, nvp, slen);
1381 strval[slen] = '\0';
1382 return (0);
1383
1384 case DATA_TYPE_UINT64:
1385 *intval = BSWAP_64(*(uint64_t *)nvp);
1386 return (0);
1387
1388 case DATA_TYPE_NVLIST:
1389 *(void **)val = (void *)nvp;
1390 return (0);
1391
1392 case DATA_TYPE_NVLIST_ARRAY:
1393 *(void **)val = (void *)nvp;
1394 if (nelmp)
1395 *nelmp = nelm;
1396 return (0);
1397 }
1398 }
1399
1400 return (1);
1401 }
1402
1403 static int
nvlist_lookup_value(char * nvlist,char * name,void * val,int valtype,int * nelmp)1404 nvlist_lookup_value(char *nvlist, char *name, void *val, int valtype,
1405 int *nelmp)
1406 {
1407 char *nvpair;
1408
1409 for (nvpair = nvlist_next_nvpair(nvlist, NULL);
1410 nvpair != NULL;
1411 nvpair = nvlist_next_nvpair(nvlist, nvpair)) {
1412 int name_len = BSWAP_32(*(uint32_t *)(nvpair + 4 * 2));
1413 char *nvp_name = nvpair + 4 * 3;
1414
1415 if ((grub_strncmp(nvp_name, name, name_len) == 0) &&
1416 nvpair_type(nvpair) == valtype) {
1417 return (nvpair_value(nvpair, val, valtype, nelmp));
1418 }
1419 }
1420 return (1);
1421 }
1422
1423 /*
1424 * Check if this vdev is online and is in a good state.
1425 */
1426 static int
vdev_validate(char * nv)1427 vdev_validate(char *nv)
1428 {
1429 uint64_t ival;
1430
1431 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_OFFLINE, &ival,
1432 DATA_TYPE_UINT64, NULL) == 0 ||
1433 nvlist_lookup_value(nv, ZPOOL_CONFIG_FAULTED, &ival,
1434 DATA_TYPE_UINT64, NULL) == 0 ||
1435 nvlist_lookup_value(nv, ZPOOL_CONFIG_REMOVED, &ival,
1436 DATA_TYPE_UINT64, NULL) == 0)
1437 return (ERR_DEV_VALUES);
1438
1439 return (0);
1440 }
1441
1442 /*
1443 * Get a valid vdev pathname/devid from the boot device.
1444 * The caller should already allocate MAXPATHLEN memory for bootpath and devid.
1445 */
1446 static int
vdev_get_bootpath(char * nv,uint64_t inguid,char * devid,char * bootpath,int is_spare)1447 vdev_get_bootpath(char *nv, uint64_t inguid, char *devid, char *bootpath,
1448 int is_spare)
1449 {
1450 char type[16];
1451
1452 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_TYPE, &type, DATA_TYPE_STRING,
1453 NULL))
1454 return (ERR_FSYS_CORRUPT);
1455
1456 if (grub_strcmp(type, VDEV_TYPE_DISK) == 0) {
1457 uint64_t guid;
1458
1459 if (vdev_validate(nv) != 0)
1460 return (ERR_NO_BOOTPATH);
1461
1462 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_GUID,
1463 &guid, DATA_TYPE_UINT64, NULL) != 0)
1464 return (ERR_NO_BOOTPATH);
1465
1466 if (guid != inguid)
1467 return (ERR_NO_BOOTPATH);
1468
1469 /* for a spare vdev, pick the disk labeled with "is_spare" */
1470 if (is_spare) {
1471 uint64_t spare = 0;
1472 (void) nvlist_lookup_value(nv, ZPOOL_CONFIG_IS_SPARE,
1473 &spare, DATA_TYPE_UINT64, NULL);
1474 if (!spare)
1475 return (ERR_NO_BOOTPATH);
1476 }
1477
1478 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_PHYS_PATH,
1479 bootpath, DATA_TYPE_STRING, NULL) != 0)
1480 bootpath[0] = '\0';
1481
1482 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_DEVID,
1483 devid, DATA_TYPE_STRING, NULL) != 0)
1484 devid[0] = '\0';
1485
1486 if (grub_strlen(bootpath) >= MAXPATHLEN ||
1487 grub_strlen(devid) >= MAXPATHLEN)
1488 return (ERR_WONT_FIT);
1489
1490 return (0);
1491
1492 } else if (grub_strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
1493 grub_strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
1494 (is_spare = (grub_strcmp(type, VDEV_TYPE_SPARE) == 0))) {
1495 int nelm, i;
1496 char *child;
1497
1498 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_CHILDREN, &child,
1499 DATA_TYPE_NVLIST_ARRAY, &nelm))
1500 return (ERR_FSYS_CORRUPT);
1501
1502 for (i = 0; i < nelm; i++) {
1503 char *child_i;
1504
1505 child_i = nvlist_array(child, i);
1506 if (vdev_get_bootpath(child_i, inguid, devid,
1507 bootpath, is_spare) == 0)
1508 return (0);
1509 }
1510 }
1511
1512 return (ERR_NO_BOOTPATH);
1513 }
1514
1515 /*
1516 * Check the disk label information and retrieve needed vdev name-value pairs.
1517 *
1518 * Return:
1519 * 0 - success
1520 * ERR_* - failure
1521 */
1522 static int
check_pool_label(uint64_t sector,char * stack,char * outdevid,char * outpath,uint64_t * outguid,uint64_t * outashift,uint64_t * outversion)1523 check_pool_label(uint64_t sector, char *stack, char *outdevid,
1524 char *outpath, uint64_t *outguid, uint64_t *outashift, uint64_t *outversion)
1525 {
1526 vdev_phys_t *vdev;
1527 uint64_t pool_state, txg = 0;
1528 char *nvlist, *nv, *features;
1529 uint64_t diskguid;
1530
1531 sector += (VDEV_SKIP_SIZE >> SPA_MINBLOCKSHIFT);
1532
1533 /* Read in the vdev name-value pair list (112K). */
1534 if (devread(sector, 0, VDEV_PHYS_SIZE, stack) == 0)
1535 return (ERR_READ);
1536
1537 vdev = (vdev_phys_t *)stack;
1538 stack += sizeof (vdev_phys_t);
1539
1540 if (nvlist_unpack(vdev->vp_nvlist, &nvlist))
1541 return (ERR_FSYS_CORRUPT);
1542
1543 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_STATE, &pool_state,
1544 DATA_TYPE_UINT64, NULL))
1545 return (ERR_FSYS_CORRUPT);
1546
1547 if (pool_state == POOL_STATE_DESTROYED)
1548 return (ERR_FILESYSTEM_NOT_FOUND);
1549
1550 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_NAME,
1551 current_rootpool, DATA_TYPE_STRING, NULL))
1552 return (ERR_FSYS_CORRUPT);
1553
1554 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_TXG, &txg,
1555 DATA_TYPE_UINT64, NULL))
1556 return (ERR_FSYS_CORRUPT);
1557
1558 /* not an active device */
1559 if (txg == 0)
1560 return (ERR_NO_BOOTPATH);
1561
1562 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_VERSION, outversion,
1563 DATA_TYPE_UINT64, NULL))
1564 return (ERR_FSYS_CORRUPT);
1565 if (!SPA_VERSION_IS_SUPPORTED(*outversion))
1566 return (ERR_NEWER_VERSION);
1567 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_VDEV_TREE, &nv,
1568 DATA_TYPE_NVLIST, NULL))
1569 return (ERR_FSYS_CORRUPT);
1570 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_GUID, &diskguid,
1571 DATA_TYPE_UINT64, NULL))
1572 return (ERR_FSYS_CORRUPT);
1573 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_ASHIFT, outashift,
1574 DATA_TYPE_UINT64, NULL) != 0)
1575 return (ERR_FSYS_CORRUPT);
1576 if (vdev_get_bootpath(nv, diskguid, outdevid, outpath, 0))
1577 return (ERR_NO_BOOTPATH);
1578 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_GUID, outguid,
1579 DATA_TYPE_UINT64, NULL))
1580 return (ERR_FSYS_CORRUPT);
1581
1582 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_FEATURES_FOR_READ,
1583 &features, DATA_TYPE_NVLIST, NULL) == 0) {
1584 char *nvp;
1585 char *name = stack;
1586 stack += MAXNAMELEN;
1587
1588 for (nvp = nvlist_next_nvpair(features, NULL);
1589 nvp != NULL;
1590 nvp = nvlist_next_nvpair(features, nvp)) {
1591 zap_attribute_t za;
1592
1593 if (nvpair_name(nvp, name, MAXNAMELEN) != 0)
1594 return (ERR_FSYS_CORRUPT);
1595
1596 za.za_integer_length = 8;
1597 za.za_num_integers = 1;
1598 za.za_first_integer = 1;
1599 za.za_name = name;
1600 if (check_feature(&za, spa_feature_names, stack) != 0)
1601 return (ERR_NEWER_VERSION);
1602 }
1603 }
1604
1605 return (0);
1606 }
1607
1608 /*
1609 * zfs_mount() locates a valid uberblock of the root pool and read in its MOS
1610 * to the memory address MOS.
1611 *
1612 * Return:
1613 * 1 - success
1614 * 0 - failure
1615 */
1616 int
zfs_mount(void)1617 zfs_mount(void)
1618 {
1619 char *stack, *ub_array;
1620 int label = 0;
1621 uberblock_t *ubbest;
1622 objset_phys_t *osp;
1623 char tmp_bootpath[MAXNAMELEN];
1624 char tmp_devid[MAXNAMELEN];
1625 uint64_t tmp_guid, ashift, version;
1626 uint64_t adjpl = (uint64_t)part_length << SPA_MINBLOCKSHIFT;
1627 int err = errnum; /* preserve previous errnum state */
1628
1629 /* if it's our first time here, zero the best uberblock out */
1630 if (best_drive == 0 && best_part == 0 && find_best_root) {
1631 grub_memset(¤t_uberblock, 0, sizeof (uberblock_t));
1632 pool_guid = 0;
1633 }
1634
1635 stackbase = ZFS_SCRATCH;
1636 stack = stackbase;
1637 ub_array = stack;
1638 stack += VDEV_UBERBLOCK_RING;
1639
1640 osp = (objset_phys_t *)stack;
1641 stack += sizeof (objset_phys_t);
1642 adjpl = P2ALIGN(adjpl, (uint64_t)sizeof (vdev_label_t));
1643
1644 for (label = 0; label < VDEV_LABELS; label++) {
1645
1646 /*
1647 * some eltorito stacks don't give us a size and
1648 * we end up setting the size to MAXUINT, further
1649 * some of these devices stop working once a single
1650 * read past the end has been issued. Checking
1651 * for a maximum part_length and skipping the backup
1652 * labels at the end of the slice/partition/device
1653 * avoids breaking down on such devices.
1654 */
1655 if (part_length == MAXUINT && label == 2)
1656 break;
1657
1658 uint64_t sector = vdev_label_start(adjpl,
1659 label) >> SPA_MINBLOCKSHIFT;
1660
1661 /* Read in the uberblock ring (128K). */
1662 if (devread(sector +
1663 ((VDEV_SKIP_SIZE + VDEV_PHYS_SIZE) >> SPA_MINBLOCKSHIFT),
1664 0, VDEV_UBERBLOCK_RING, ub_array) == 0)
1665 continue;
1666
1667 if (check_pool_label(sector, stack, tmp_devid,
1668 tmp_bootpath, &tmp_guid, &ashift, &version))
1669 continue;
1670
1671 if (pool_guid == 0)
1672 pool_guid = tmp_guid;
1673
1674 if ((ubbest = find_bestub(ub_array, ashift, sector)) == NULL ||
1675 zio_read(&ubbest->ub_rootbp, osp, stack) != 0)
1676 continue;
1677
1678 VERIFY_OS_TYPE(osp, DMU_OST_META);
1679
1680 if (version >= SPA_VERSION_FEATURES &&
1681 check_mos_features(&osp->os_meta_dnode, stack) != 0)
1682 continue;
1683
1684 if (find_best_root && ((pool_guid != tmp_guid) ||
1685 vdev_uberblock_compare(ubbest, &(current_uberblock)) <= 0))
1686 continue;
1687
1688 /* Got the MOS. Save it at the memory addr MOS. */
1689 grub_memmove(MOS, &osp->os_meta_dnode, DNODE_SIZE);
1690 grub_memmove(¤t_uberblock, ubbest, sizeof (uberblock_t));
1691 grub_memmove(current_bootpath, tmp_bootpath, MAXNAMELEN);
1692 grub_memmove(current_devid, tmp_devid, grub_strlen(tmp_devid));
1693 is_zfs_mount = 1;
1694 return (1);
1695 }
1696
1697 /*
1698 * While some fs impls. (tftp) rely on setting and keeping
1699 * global errnums set, others won't reset it and will break
1700 * when issuing rawreads. The goal here is to simply not
1701 * have zfs mount attempts impact the previous state.
1702 */
1703 errnum = err;
1704 return (0);
1705 }
1706
1707 /*
1708 * zfs_open() locates a file in the rootpool by following the
1709 * MOS and places the dnode of the file in the memory address DNODE.
1710 *
1711 * Return:
1712 * 1 - success
1713 * 0 - failure
1714 */
1715 int
zfs_open(char * filename)1716 zfs_open(char *filename)
1717 {
1718 char *stack;
1719 dnode_phys_t *mdn;
1720
1721 file_buf = NULL;
1722 stackbase = ZFS_SCRATCH;
1723 stack = stackbase;
1724
1725 mdn = (dnode_phys_t *)stack;
1726 stack += sizeof (dnode_phys_t);
1727
1728 dnode_mdn = NULL;
1729 dnode_buf = (dnode_phys_t *)stack;
1730 stack += 1<<DNODE_BLOCK_SHIFT;
1731
1732 /*
1733 * menu.lst is placed at the root pool filesystem level,
1734 * do not goto 'current_bootfs'.
1735 */
1736 if (is_top_dataset_file(filename)) {
1737 if (errnum = get_objset_mdn(MOS, NULL, NULL, mdn, stack))
1738 return (0);
1739
1740 current_bootfs_obj = 0;
1741 } else {
1742 if (current_bootfs[0] == '\0') {
1743 /* Get the default root filesystem object number */
1744 if (errnum = get_default_bootfsobj(MOS,
1745 ¤t_bootfs_obj, stack))
1746 return (0);
1747
1748 if (errnum = get_objset_mdn(MOS, NULL,
1749 ¤t_bootfs_obj, mdn, stack))
1750 return (0);
1751 } else {
1752 if (errnum = get_objset_mdn(MOS, current_bootfs,
1753 ¤t_bootfs_obj, mdn, stack)) {
1754 grub_memset(current_bootfs, 0, MAXNAMELEN);
1755 return (0);
1756 }
1757 }
1758 }
1759
1760 if (dnode_get_path(mdn, filename, DNODE, stack)) {
1761 errnum = ERR_FILE_NOT_FOUND;
1762 return (0);
1763 }
1764
1765 /* get the file size and set the file position to 0 */
1766
1767 /*
1768 * For DMU_OT_SA we will need to locate the SIZE attribute
1769 * attribute, which could be either in the bonus buffer
1770 * or the "spill" block.
1771 */
1772 if (DNODE->dn_bonustype == DMU_OT_SA) {
1773 sa_hdr_phys_t *sahdrp;
1774 int hdrsize;
1775
1776 if (DNODE->dn_bonuslen != 0) {
1777 sahdrp = (sa_hdr_phys_t *)DN_BONUS(DNODE);
1778 } else {
1779 if (DNODE->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
1780 blkptr_t *bp = &DNODE->dn_spill;
1781 void *buf;
1782
1783 buf = (void *)stack;
1784 stack += BP_GET_LSIZE(bp);
1785
1786 /* reset errnum to rawread() failure */
1787 errnum = 0;
1788 if (zio_read(bp, buf, stack) != 0) {
1789 return (0);
1790 }
1791 sahdrp = buf;
1792 } else {
1793 errnum = ERR_FSYS_CORRUPT;
1794 return (0);
1795 }
1796 }
1797 hdrsize = SA_HDR_SIZE(sahdrp);
1798 filemax = *(uint64_t *)((char *)sahdrp + hdrsize +
1799 SA_SIZE_OFFSET);
1800 } else {
1801 filemax = ((znode_phys_t *)DN_BONUS(DNODE))->zp_size;
1802 }
1803 filepos = 0;
1804
1805 dnode_buf = NULL;
1806 return (1);
1807 }
1808
1809 /*
1810 * zfs_read reads in the data blocks pointed by the DNODE.
1811 *
1812 * Return:
1813 * len - the length successfully read in to the buffer
1814 * 0 - failure
1815 */
1816 int
zfs_read(char * buf,int len)1817 zfs_read(char *buf, int len)
1818 {
1819 char *stack;
1820 int blksz, length, movesize;
1821
1822 if (file_buf == NULL) {
1823 file_buf = stackbase;
1824 stackbase += SPA_MAXBLOCKSIZE;
1825 file_start = file_end = 0;
1826 }
1827 stack = stackbase;
1828
1829 /*
1830 * If offset is in memory, move it into the buffer provided and return.
1831 */
1832 if (filepos >= file_start && filepos+len <= file_end) {
1833 grub_memmove(buf, file_buf + filepos - file_start, len);
1834 filepos += len;
1835 return (len);
1836 }
1837
1838 blksz = DNODE->dn_datablkszsec << SPA_MINBLOCKSHIFT;
1839
1840 /*
1841 * Note: for GRUB, SPA_MAXBLOCKSIZE is 128KB. There is not enough
1842 * memory to allocate the new max blocksize (16MB), so while
1843 * GRUB understands the large_blocks on-disk feature, it can't
1844 * actually read large blocks.
1845 */
1846 if (blksz > SPA_MAXBLOCKSIZE) {
1847 grub_printf("blocks larger than 128K are not supported\n");
1848 return (0);
1849 }
1850
1851 /*
1852 * Entire Dnode is too big to fit into the space available. We
1853 * will need to read it in chunks. This could be optimized to
1854 * read in as large a chunk as there is space available, but for
1855 * now, this only reads in one data block at a time.
1856 */
1857 length = len;
1858 while (length) {
1859 /*
1860 * Find requested blkid and the offset within that block.
1861 */
1862 uint64_t blkid = filepos / blksz;
1863
1864 if (errnum = dmu_read(DNODE, blkid, file_buf, stack))
1865 return (0);
1866
1867 file_start = blkid * blksz;
1868 file_end = file_start + blksz;
1869
1870 movesize = MIN(length, file_end - filepos);
1871
1872 grub_memmove(buf, file_buf + filepos - file_start,
1873 movesize);
1874 buf += movesize;
1875 length -= movesize;
1876 filepos += movesize;
1877 }
1878
1879 return (len);
1880 }
1881
1882 /*
1883 * No-Op
1884 */
1885 int
zfs_embed(int * start_sector,int needed_sectors)1886 zfs_embed(int *start_sector, int needed_sectors)
1887 {
1888 return (1);
1889 }
1890
1891 #endif /* FSYS_ZFS */
1892