1# ZFS boot stub for initramfs-tools. 2# 3# In the initramfs environment, the /init script sources this stub to 4# override the default functions in the /scripts/local script. 5# 6# Enable this by passing boot=zfs on the kernel command line. 7# 8# $quiet, $root, $rpool, $bootfs come from the cmdline: 9# shellcheck disable=SC2154 10 11# Source the common functions 12. /etc/zfs/zfs-functions 13 14# Start interactive shell. 15# Use debian's panic() if defined, because it allows to prevent shell access 16# by setting panic in cmdline (e.g. panic=0 or panic=15). 17# See "4.5 Disable root prompt on the initramfs" of Securing Debian Manual: 18# https://www.debian.org/doc/manuals/securing-debian-howto/ch4.en.html 19shell() { 20 if command -v panic > /dev/null 2>&1; then 21 panic 22 else 23 /bin/sh 24 fi 25} 26 27# This runs any scripts that should run before we start importing 28# pools and mounting any filesystems. 29pre_mountroot() 30{ 31 if command -v run_scripts > /dev/null 2>&1 32 then 33 if [ -f "/scripts/local-top" ] || [ -d "/scripts/local-top" ] 34 then 35 [ "$quiet" != "y" ] && \ 36 zfs_log_begin_msg "Running /scripts/local-top" 37 run_scripts /scripts/local-top 38 [ "$quiet" != "y" ] && zfs_log_end_msg 39 fi 40 41 if [ -f "/scripts/local-premount" ] || [ -d "/scripts/local-premount" ] 42 then 43 [ "$quiet" != "y" ] && \ 44 zfs_log_begin_msg "Running /scripts/local-premount" 45 run_scripts /scripts/local-premount 46 [ "$quiet" != "y" ] && zfs_log_end_msg 47 fi 48 fi 49} 50 51# If plymouth is available, hide the splash image. 52disable_plymouth() 53{ 54 if [ -x /bin/plymouth ] && /bin/plymouth --ping 55 then 56 /bin/plymouth hide-splash >/dev/null 2>&1 57 fi 58} 59 60# Get a ZFS filesystem property value. 61get_fs_value() 62{ 63 fs="$1" 64 value=$2 65 66 "${ZFS}" get -H -ovalue "$value" "$fs" 2> /dev/null 67} 68 69# Find the 'bootfs' property on pool $1. 70# If the property does not contain '/', then ignore this 71# pool by exporting it again. 72find_rootfs() 73{ 74 pool="$1" 75 76 # If 'POOL_IMPORTED' isn't set, no pool imported and therefore 77 # we won't be able to find a root fs. 78 [ -z "${POOL_IMPORTED}" ] && return 1 79 80 # If it's already specified, just keep it mounted and exit 81 # User (kernel command line) must be correct. 82 [ -n "${ZFS_BOOTFS}" ] && return 0 83 84 # Not set, try to find it in the 'bootfs' property of the pool. 85 # NOTE: zpool does not support 'get -H -ovalue bootfs'... 86 ZFS_BOOTFS=$("${ZPOOL}" list -H -obootfs "$pool") 87 88 # Make sure it's not '-' and that it starts with /. 89 if [ "${ZFS_BOOTFS}" != "-" ] && \ 90 get_fs_value "${ZFS_BOOTFS}" mountpoint | grep -q '^/$' 91 then 92 # Keep it mounted 93 POOL_IMPORTED=1 94 return 0 95 fi 96 97 # Not boot fs here, export it and later try again.. 98 "${ZPOOL}" export "$pool" 99 POOL_IMPORTED= 100 ZFS_BOOTFS= 101 return 1 102} 103 104# Support function to get a list of all pools, separated with ';' 105find_pools() 106{ 107 pools=$("$@" 2> /dev/null | \ 108 grep -E "pool:|^[a-zA-Z0-9]" | \ 109 sed 's@.*: @@' | \ 110 tr '\n' ';') 111 112 echo "${pools%%;}" # Return without the last ';'. 113} 114 115# Get a list of all available pools 116get_pools() 117{ 118 if [ -n "${ZFS_POOL_IMPORT}" ]; then 119 echo "$ZFS_POOL_IMPORT" 120 return 0 121 fi 122 123 # Get the base list of available pools. 124 available_pools=$(find_pools "$ZPOOL" import) 125 126 # Just in case - seen it happen (that a pool isn't visible/found 127 # with a simple "zpool import" but only when using the "-d" 128 # option or setting ZPOOL_IMPORT_PATH). 129 if [ -d "/dev/disk/by-id" ] 130 then 131 npools=$(find_pools "$ZPOOL" import -d /dev/disk/by-id) 132 if [ -n "$npools" ] 133 then 134 # Because we have found extra pool(s) here, which wasn't 135 # found 'normally', we need to force USE_DISK_BY_ID to 136 # make sure we're able to actually import it/them later. 137 USE_DISK_BY_ID='yes' 138 139 if [ -n "$available_pools" ] 140 then 141 # Filter out duplicates (pools found with the simple 142 # "zpool import" but which is also found with the 143 # "zpool import -d ..."). 144 npools=$(echo "$npools" | sed "s,$available_pools,,") 145 146 # Add the list to the existing list of 147 # available pools 148 available_pools="$available_pools;$npools" 149 else 150 available_pools="$npools" 151 fi 152 fi 153 fi 154 155 # Filter out any exceptions... 156 if [ -n "$ZFS_POOL_EXCEPTIONS" ] 157 then 158 found="" 159 apools="" 160 OLD_IFS="$IFS" ; IFS=";" 161 162 for pool in $available_pools 163 do 164 for exception in $ZFS_POOL_EXCEPTIONS 165 do 166 [ "$pool" = "$exception" ] && continue 2 167 found="$pool" 168 done 169 170 if [ -n "$found" ] 171 then 172 if [ -n "$apools" ] 173 then 174 apools="$apools;$pool" 175 else 176 apools="$pool" 177 fi 178 fi 179 done 180 181 IFS="$OLD_IFS" 182 available_pools="$apools" 183 fi 184 185 # Return list of available pools. 186 echo "$available_pools" 187} 188 189# Import given pool $1 190import_pool() 191{ 192 pool="$1" 193 194 # Verify that the pool isn't already imported 195 # Make as sure as we can to not require '-f' to import. 196 "${ZPOOL}" get name,guid -o value -H 2>/dev/null | grep -Fxq "$pool" && return 0 197 198 # For backwards compatibility, make sure that ZPOOL_IMPORT_PATH is set 199 # to something we can use later with the real import(s). We want to 200 # make sure we find all by* dirs, BUT by-vdev should be first (if it 201 # exists). 202 if [ -n "$USE_DISK_BY_ID" ] && [ -z "$ZPOOL_IMPORT_PATH" ] 203 then 204 dirs="$(for dir in /dev/disk/by-* 205 do 206 # Ignore by-vdev here - we want it first! 207 echo "$dir" | grep -q /by-vdev && continue 208 [ ! -d "$dir" ] && continue 209 210 printf "%s" "$dir:" 211 done | sed 's,:$,,g')" 212 213 if [ -d "/dev/disk/by-vdev" ] 214 then 215 # Add by-vdev at the beginning. 216 ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:" 217 fi 218 219 # ... and /dev at the very end, just for good measure. 220 ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH$dirs:/dev" 221 fi 222 223 # Needs to be exported for "zpool" to catch it. 224 [ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH 225 226 227 [ "$quiet" != "y" ] && zfs_log_begin_msg \ 228 "Importing pool '${pool}' using defaults" 229 230 ZFS_CMD="${ZPOOL} import -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}" 231 ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)" 232 ZFS_ERROR="$?" 233 if [ "${ZFS_ERROR}" != 0 ] 234 then 235 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}" 236 237 if [ -f "${ZPOOL_CACHE}" ] 238 then 239 [ "$quiet" != "y" ] && zfs_log_begin_msg \ 240 "Importing pool '${pool}' using cachefile." 241 242 ZFS_CMD="${ZPOOL} import -c ${ZPOOL_CACHE} -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}" 243 ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)" 244 ZFS_ERROR="$?" 245 fi 246 247 if [ "${ZFS_ERROR}" != 0 ] 248 then 249 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}" 250 251 disable_plymouth 252 echo "" 253 echo "Command: ${ZFS_CMD} '$pool'" 254 echo "Message: $ZFS_STDERR" 255 echo "Error: $ZFS_ERROR" 256 echo "" 257 echo "Failed to import pool '$pool'." 258 echo "Manually import the pool and exit." 259 shell 260 fi 261 fi 262 263 [ "$quiet" != "y" ] && zfs_log_end_msg 264 265 POOL_IMPORTED=1 266 return 0 267} 268 269# Load ZFS modules 270# Loading a module in a initrd require a slightly different approach, 271# with more logging etc. 272load_module_initrd() 273{ 274 [ -n "$ROOTDELAY" ] && ZFS_INITRD_PRE_MOUNTROOT_SLEEP="$ROOTDELAY" 275 276 if [ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" -gt 0 ] 2>/dev/null 277 then 278 if [ "$quiet" != "y" ]; then 279 zfs_log_begin_msg "Sleeping for" \ 280 "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP seconds..." 281 fi 282 sleep "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" 283 [ "$quiet" != "y" ] && zfs_log_end_msg 284 fi 285 286 # Wait for all of the /dev/{hd,sd}[a-z] device nodes to appear. 287 if command -v wait_for_udev > /dev/null 2>&1 ; then 288 wait_for_udev 10 289 elif command -v wait_for_dev > /dev/null 2>&1 ; then 290 wait_for_dev 291 fi 292 293 # zpool import refuse to import without a valid /proc/self/mounts 294 [ ! -f /proc/self/mounts ] && mount proc /proc 295 296 # Load the module 297 load_module "zfs" || return 1 298 299 if [ "$ZFS_INITRD_POST_MODPROBE_SLEEP" -gt 0 ] 2>/dev/null 300 then 301 if [ "$quiet" != "y" ]; then 302 zfs_log_begin_msg "Sleeping for" \ 303 "$ZFS_INITRD_POST_MODPROBE_SLEEP seconds..." 304 fi 305 sleep "$ZFS_INITRD_POST_MODPROBE_SLEEP" 306 [ "$quiet" != "y" ] && zfs_log_end_msg 307 fi 308 309 return 0 310} 311 312# Mount a given filesystem 313mount_fs() 314{ 315 fs="$1" 316 317 # Check that the filesystem exists 318 "${ZFS}" list -oname -tfilesystem -H "${fs}" > /dev/null 2>&1 || return 1 319 320 # Skip filesystems with canmount=off. The root fs should not have 321 # canmount=off, but ignore it for backwards compatibility just in case. 322 if [ "$fs" != "${ZFS_BOOTFS}" ] 323 then 324 canmount=$(get_fs_value "$fs" canmount) 325 [ "$canmount" = "off" ] && return 0 326 fi 327 328 # Need the _original_ datasets mountpoint! 329 mountpoint=$(get_fs_value "$fs" mountpoint) 330 ZFS_CMD="mount -o zfsutil -t zfs" 331 if [ "$mountpoint" = "legacy" ] || [ "$mountpoint" = "none" ]; then 332 # Can't use the mountpoint property. Might be one of our 333 # clones. Check the 'org.zol:mountpoint' property set in 334 # clone_snap() if that's usable. 335 mountpoint=$(get_fs_value "$fs" org.zol:mountpoint) 336 if [ "$mountpoint" = "legacy" ] || 337 [ "$mountpoint" = "none" ] || 338 [ "$mountpoint" = "-" ] 339 then 340 if [ "$fs" != "${ZFS_BOOTFS}" ]; then 341 # We don't have a proper mountpoint and this 342 # isn't the root fs. 343 return 0 344 else 345 # Last hail-mary: Hope 'rootmnt' is set! 346 mountpoint="" 347 fi 348 fi 349 350 # If it's not a legacy filesystem, it can only be a 351 # native one... 352 if [ "$mountpoint" = "legacy" ]; then 353 ZFS_CMD="mount -t zfs" 354 fi 355 fi 356 357 # Possibly decrypt a filesystem using native encryption. 358 decrypt_fs "$fs" 359 360 [ "$quiet" != "y" ] && \ 361 zfs_log_begin_msg "Mounting '${fs}' on '${rootmnt}/${mountpoint}'" 362 [ -n "${ZFS_DEBUG}" ] && \ 363 zfs_log_begin_msg "CMD: '$ZFS_CMD ${fs} ${rootmnt}/${mountpoint}'" 364 365 ZFS_STDERR=$(${ZFS_CMD} "${fs}" "${rootmnt}/${mountpoint}" 2>&1) 366 ZFS_ERROR=$? 367 if [ "${ZFS_ERROR}" != 0 ] 368 then 369 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}" 370 371 disable_plymouth 372 echo "" 373 echo "Command: ${ZFS_CMD} ${fs} ${rootmnt}/${mountpoint}" 374 echo "Message: $ZFS_STDERR" 375 echo "Error: $ZFS_ERROR" 376 echo "" 377 echo "Failed to mount ${fs} on ${rootmnt}/${mountpoint}." 378 echo "Manually mount the filesystem and exit." 379 shell 380 else 381 [ "$quiet" != "y" ] && zfs_log_end_msg 382 fi 383 384 return 0 385} 386 387# Unlock a ZFS native encrypted filesystem. 388decrypt_fs() 389{ 390 fs="$1" 391 392 # If pool encryption is active and the zfs command understands '-o encryption' 393 if [ "$(zpool list -H -o feature@encryption "${fs%%/*}")" = 'active' ]; then 394 395 # Determine dataset that holds key for root dataset 396 ENCRYPTIONROOT="$(get_fs_value "${fs}" encryptionroot)" 397 KEYLOCATION="$(get_fs_value "${ENCRYPTIONROOT}" keylocation)" 398 399 echo "${ENCRYPTIONROOT}" > /run/zfs_fs_name 400 401 # If root dataset is encrypted... 402 if ! [ "${ENCRYPTIONROOT}" = "-" ]; then 403 KEYSTATUS="$(get_fs_value "${ENCRYPTIONROOT}" keystatus)" 404 # Continue only if the key needs to be loaded 405 [ "$KEYSTATUS" = "unavailable" ] || return 0 406 407 # Do not prompt if key is stored noninteractively, 408 if ! [ "${KEYLOCATION}" = "prompt" ]; then 409 $ZFS load-key "${ENCRYPTIONROOT}" 410 411 # Prompt with plymouth, if active 412 elif /bin/plymouth --ping 2>/dev/null; then 413 echo "plymouth" > /run/zfs_console_askpwd_cmd 414 for _ in 1 2 3; do 415 plymouth ask-for-password --prompt "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \ 416 $ZFS load-key "${ENCRYPTIONROOT}" && break 417 done 418 419 # Prompt with systemd, if active 420 elif [ -e /run/systemd/system ]; then 421 echo "systemd-ask-password" > /run/zfs_console_askpwd_cmd 422 for _ in 1 2 3; do 423 systemd-ask-password "Encrypted ZFS password for ${ENCRYPTIONROOT}" --no-tty | \ 424 $ZFS load-key "${ENCRYPTIONROOT}" && break 425 done 426 427 # Prompt with ZFS tty, otherwise 428 else 429 # Temporarily setting "printk" to "7" allows the prompt to appear even when the "quiet" kernel option has been used 430 echo "load-key" > /run/zfs_console_askpwd_cmd 431 storeprintk="$(awk '{print $1}' /proc/sys/kernel/printk)" 432 echo 7 > /proc/sys/kernel/printk 433 $ZFS load-key "${ENCRYPTIONROOT}" 434 echo "$storeprintk" > /proc/sys/kernel/printk 435 fi 436 fi 437 fi 438 439 return 0 440} 441 442# Destroy a given filesystem. 443destroy_fs() 444{ 445 fs="$1" 446 447 [ "$quiet" != "y" ] && \ 448 zfs_log_begin_msg "Destroying '$fs'" 449 450 ZFS_CMD="${ZFS} destroy $fs" 451 ZFS_STDERR="$(${ZFS_CMD} 2>&1)" 452 ZFS_ERROR="$?" 453 if [ "${ZFS_ERROR}" != 0 ] 454 then 455 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}" 456 457 disable_plymouth 458 echo "" 459 echo "Command: $ZFS_CMD" 460 echo "Message: $ZFS_STDERR" 461 echo "Error: $ZFS_ERROR" 462 echo "" 463 echo "Failed to destroy '$fs'. Please make sure that '$fs' is not available." 464 echo "Hint: Try: zfs destroy -Rfn $fs" 465 echo "If this dryrun looks good, then remove the 'n' from '-Rfn' and try again." 466 shell 467 else 468 [ "$quiet" != "y" ] && zfs_log_end_msg 469 fi 470 471 return 0 472} 473 474# Clone snapshot $1 to destination filesystem $2 475# Set 'canmount=noauto' and 'mountpoint=none' so that we get to keep 476# manual control over it's mounting (i.e., make sure it's not automatically 477# mounted with a 'zfs mount -a' in the init/systemd scripts). 478clone_snap() 479{ 480 snap="$1" 481 destfs="$2" 482 mountpoint="$3" 483 484 [ "$quiet" != "y" ] && zfs_log_begin_msg "Cloning '$snap' to '$destfs'" 485 486 # Clone the snapshot into a dataset we can boot from 487 # + We don't want this filesystem to be automatically mounted, we 488 # want control over this here and nowhere else. 489 # + We don't need any mountpoint set for the same reason. 490 # We use the 'org.zol:mountpoint' property to remember the mountpoint. 491 ZFS_CMD="${ZFS} clone -o canmount=noauto -o mountpoint=none" 492 ZFS_CMD="${ZFS_CMD} -o org.zol:mountpoint=${mountpoint}" 493 ZFS_CMD="${ZFS_CMD} $snap $destfs" 494 ZFS_STDERR="$(${ZFS_CMD} 2>&1)" 495 ZFS_ERROR="$?" 496 if [ "${ZFS_ERROR}" != 0 ] 497 then 498 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}" 499 500 disable_plymouth 501 echo "" 502 echo "Command: $ZFS_CMD" 503 echo "Message: $ZFS_STDERR" 504 echo "Error: $ZFS_ERROR" 505 echo "" 506 echo "Failed to clone snapshot." 507 echo "Make sure that the any problems are corrected and then make sure" 508 echo "that the dataset '$destfs' exists and is bootable." 509 shell 510 else 511 [ "$quiet" != "y" ] && zfs_log_end_msg 512 fi 513 514 return 0 515} 516 517# Rollback a given snapshot. 518rollback_snap() 519{ 520 snap="$1" 521 522 [ "$quiet" != "y" ] && zfs_log_begin_msg "Rollback $snap" 523 524 ZFS_CMD="${ZFS} rollback -Rf $snap" 525 ZFS_STDERR="$(${ZFS_CMD} 2>&1)" 526 ZFS_ERROR="$?" 527 if [ "${ZFS_ERROR}" != 0 ] 528 then 529 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}" 530 531 disable_plymouth 532 echo "" 533 echo "Command: $ZFS_CMD" 534 echo "Message: $ZFS_STDERR" 535 echo "Error: $ZFS_ERROR" 536 echo "" 537 echo "Failed to rollback snapshot." 538 shell 539 else 540 [ "$quiet" != "y" ] && zfs_log_end_msg 541 fi 542 543 return 0 544} 545 546# Get a list of snapshots, give them as a numbered list 547# to the user to choose from. 548ask_user_snap() 549{ 550 fs="$1" 551 552 # We need to temporarily disable debugging. Set 'debug' so we 553 # remember to enabled it again. 554 if [ -n "${ZFS_DEBUG}" ]; then 555 unset ZFS_DEBUG 556 set +x 557 debug=1 558 fi 559 560 # Because we need the resulting snapshot, which is sent on 561 # stdout to the caller, we use stderr for our questions. 562 echo "What snapshot do you want to boot from?" > /dev/stderr 563 # shellcheck disable=SC2046 564 IFS=" 565" set -- $("${ZFS}" list -H -oname -tsnapshot -r "${fs}") 566 567 i=1 568 for snap in "$@"; do 569 echo " $i: $snap" 570 i=$((i + 1)) 571 done > /dev/stderr 572 573 # expr instead of test here because [ a -lt 0 ] errors out, 574 # but expr falls back to lexicographical, which works out right 575 snapnr=0 576 while expr "$snapnr" "<" 1 > /dev/null || 577 expr "$snapnr" ">" "$#" > /dev/null 578 do 579 printf "%s" "Snap nr [1-$#]? " > /dev/stderr 580 read -r snapnr 581 done 582 583 # Re-enable debugging. 584 if [ -n "${debug}" ]; then 585 ZFS_DEBUG=1 586 set -x 587 fi 588 589 eval echo '$'"$snapnr" 590} 591 592setup_snapshot_booting() 593{ 594 snap="$1" 595 retval=0 596 597 # Make sure that the snapshot specified actually exists. 598 if [ ! "$(get_fs_value "${snap}" type)" ] 599 then 600 # Snapshot does not exist (...@<null> ?) 601 # ask the user for a snapshot to use. 602 snap="$(ask_user_snap "${snap%%@*}")" 603 fi 604 605 # Separate the full snapshot ('$snap') into it's filesystem and 606 # snapshot names. Would have been nice with a split() function.. 607 rootfs="${snap%%@*}" 608 snapname="${snap##*@}" 609 ZFS_BOOTFS="${rootfs}_${snapname}" 610 611 if ! grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline 612 then 613 # If the destination dataset for the clone 614 # already exists, destroy it. Recursively 615 if [ "$(get_fs_value "${rootfs}_${snapname}" type)" ]; then 616 filesystems=$("${ZFS}" list -oname -tfilesystem -H \ 617 -r -Sname "${ZFS_BOOTFS}") 618 for fs in $filesystems; do 619 destroy_fs "${fs}" 620 done 621 fi 622 fi 623 624 # Get all snapshots, recursively (might need to clone /usr, /var etc 625 # as well). 626 for s in $("${ZFS}" list -H -oname -tsnapshot -r "${rootfs}" | \ 627 grep "${snapname}") 628 do 629 if grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline 630 then 631 # Rollback snapshot 632 rollback_snap "$s" || retval=$((retval + 1)) 633 ZFS_BOOTFS="${rootfs}" 634 else 635 # Setup a destination filesystem name. 636 # Ex: Called with 'rpool/ROOT/debian@snap2' 637 # rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2 638 # rpool/ROOT/debian/boot@snap2 => rpool/ROOT/debian_snap2/boot 639 # rpool/ROOT/debian/usr@snap2 => rpool/ROOT/debian_snap2/usr 640 # rpool/ROOT/debian/var@snap2 => rpool/ROOT/debian_snap2/var 641 subfs="${s##$rootfs}" 642 subfs="${subfs%%@$snapname}" 643 644 destfs="${rootfs}_${snapname}" # base fs. 645 [ -n "$subfs" ] && destfs="${destfs}$subfs" # + sub fs. 646 647 # Get the mountpoint of the filesystem, to be used 648 # with clone_snap(). If legacy or none, then use 649 # the sub fs value. 650 mountpoint=$(get_fs_value "${s%%@*}" mountpoint) 651 if [ "$mountpoint" = "legacy" ] || \ 652 [ "$mountpoint" = "none" ] 653 then 654 if [ -n "${subfs}" ]; then 655 mountpoint="${subfs}" 656 else 657 mountpoint="/" 658 fi 659 fi 660 661 # Clone the snapshot into its own 662 # filesystem 663 clone_snap "$s" "${destfs}" "${mountpoint}" || \ 664 retval=$((retval + 1)) 665 fi 666 done 667 668 # If we haven't return yet, we have a problem... 669 return "${retval}" 670} 671 672# ================================================================ 673 674# This is the main function. 675mountroot() 676{ 677 # ---------------------------------------------------------------- 678 # I N I T I A L S E T U P 679 680 # ------------ 681 # Run the pre-mount scripts from /scripts/local-top. 682 pre_mountroot 683 684 # ------------ 685 # Source the default setup variables. 686 [ -r '/etc/default/zfs' ] && . /etc/default/zfs 687 688 # ------------ 689 # Support debug option 690 if grep -qiE '(^|[^\\](\\\\)* )(zfs_debug|zfs\.debug|zfsdebug)=(on|yes|1)( |$)' /proc/cmdline 691 then 692 ZFS_DEBUG=1 693 mkdir /var/log 694 #exec 2> /var/log/boot.debug 695 set -x 696 fi 697 698 # ------------ 699 # Load ZFS module etc. 700 if ! load_module_initrd; then 701 disable_plymouth 702 echo "" 703 echo "Failed to load ZFS modules." 704 echo "Manually load the modules and exit." 705 shell 706 fi 707 708 # ------------ 709 # Look for the cache file (if any). 710 [ -f "${ZPOOL_CACHE}" ] || unset ZPOOL_CACHE 711 [ -s "${ZPOOL_CACHE}" ] || unset ZPOOL_CACHE 712 713 # ------------ 714 # Compatibility: 'ROOT' is for Debian GNU/Linux (etc), 715 # 'root' is for Redhat/Fedora (etc), 716 # 'REAL_ROOT' is for Gentoo 717 if [ -z "$ROOT" ] 718 then 719 [ -n "$root" ] && ROOT=${root} 720 721 [ -n "$REAL_ROOT" ] && ROOT=${REAL_ROOT} 722 fi 723 724 # ------------ 725 # Where to mount the root fs in the initrd - set outside this script 726 # Compatibility: 'rootmnt' is for Debian GNU/Linux (etc), 727 # 'NEWROOT' is for RedHat/Fedora (etc), 728 # 'NEW_ROOT' is for Gentoo 729 if [ -z "$rootmnt" ] 730 then 731 [ -n "$NEWROOT" ] && rootmnt=${NEWROOT} 732 733 [ -n "$NEW_ROOT" ] && rootmnt=${NEW_ROOT} 734 fi 735 736 # ------------ 737 # No longer set in the defaults file, but it could have been set in 738 # get_pools() in some circumstances. If it's something, but not 'yes', 739 # it's no good to us. 740 [ -n "$USE_DISK_BY_ID" ] && [ "$USE_DISK_BY_ID" != 'yes' ] && \ 741 unset USE_DISK_BY_ID 742 743 # ---------------------------------------------------------------- 744 # P A R S E C O M M A N D L I N E O P T I O N S 745 746 # This part is the really ugly part - there's so many options and permutations 747 # 'out there', and if we should make this the 'primary' source for ZFS initrd 748 # scripting, we need/should support them all. 749 # 750 # Supports the following kernel command line argument combinations 751 # (in this order - first match win): 752 # 753 # rpool=<pool> (tries to finds bootfs automatically) 754 # bootfs=<pool>/<dataset> (uses this for rpool - first part) 755 # rpool=<pool> bootfs=<pool>/<dataset> 756 # -B zfs-bootfs=<pool>/<fs> (uses this for rpool - first part) 757 # rpool=rpool (default if none of the above is used) 758 # root=<pool>/<dataset> (uses this for rpool - first part) 759 # root=ZFS=<pool>/<dataset> (uses this for rpool - first part, without 'ZFS=') 760 # root=zfs:AUTO (tries to detect both pool and rootfs 761 # root=zfs:<pool>/<dataset> (uses this for rpool - first part, without 'zfs:') 762 # 763 # Option <dataset> could also be <snapshot> 764 # Option <pool> could also be <guid> 765 766 # ------------ 767 # Support force option 768 # In addition, setting one of zfs_force, zfs.force or zfsforce to 769 # 'yes', 'on' or '1' will make sure we force import the pool. 770 # This should (almost) never be needed, but it's here for 771 # completeness. 772 ZPOOL_FORCE="" 773 if grep -qiE '(^|[^\\](\\\\)* )(zfs_force|zfs\.force|zfsforce)=(on|yes|1)( |$)' /proc/cmdline 774 then 775 ZPOOL_FORCE="-f" 776 fi 777 778 # ------------ 779 # Look for 'rpool' and 'bootfs' parameter 780 [ -n "$rpool" ] && ZFS_RPOOL="${rpool#rpool=}" 781 [ -n "$bootfs" ] && ZFS_BOOTFS="${bootfs#bootfs=}" 782 783 # ------------ 784 # If we have 'ROOT' (see above), but not 'ZFS_BOOTFS', then use 785 # 'ROOT' 786 [ -n "$ROOT" ] && [ -z "${ZFS_BOOTFS}" ] && ZFS_BOOTFS="$ROOT" 787 788 # ------------ 789 # Check for the `-B zfs-bootfs=%s/%u,...` kind of parameter. 790 # NOTE: Only use the pool name and dataset. The rest is not 791 # supported by OpenZFS (whatever it's for). 792 if [ -z "$ZFS_RPOOL" ] 793 then 794 # The ${zfs-bootfs} variable is set at the kernel command 795 # line, usually by GRUB, but it cannot be referenced here 796 # directly because bourne variable names cannot contain a 797 # hyphen. 798 # 799 # Reassign the variable by dumping the environment and 800 # stripping the zfs-bootfs= prefix. Let the shell handle 801 # quoting through the eval command: 802 # shellcheck disable=SC2046 803 eval ZFS_RPOOL=$(set | sed -n -e 's,^zfs-bootfs=,,p') 804 fi 805 806 # ------------ 807 # No root fs or pool specified - do auto detect. 808 if [ -z "$ZFS_RPOOL" ] && [ -z "${ZFS_BOOTFS}" ] 809 then 810 # Do auto detect. Do this by 'cheating' - set 'root=zfs:AUTO' 811 # which will be caught later 812 ROOT='zfs:AUTO' 813 fi 814 815 # ---------------------------------------------------------------- 816 # F I N D A N D I M P O R T C O R R E C T P O O L 817 818 # ------------ 819 if [ "$ROOT" = "zfs:AUTO" ] 820 then 821 # Try to detect both pool and root fs. 822 823 # If we got here, that means we don't have a hint so as to 824 # the root dataset, but with root=zfs:AUTO on cmdline, 825 # this says "zfs:AUTO" here and interferes with checks later 826 ZFS_BOOTFS= 827 828 [ "$quiet" != "y" ] && \ 829 zfs_log_begin_msg "Attempting to import additional pools." 830 831 # Get a list of pools available for import 832 if [ -n "$ZFS_RPOOL" ] 833 then 834 # We've specified a pool - check only that 835 POOLS=$ZFS_RPOOL 836 else 837 POOLS=$(get_pools) 838 fi 839 840 OLD_IFS="$IFS" ; IFS=";" 841 for pool in $POOLS 842 do 843 [ -z "$pool" ] && continue 844 845 IFS="$OLD_IFS" import_pool "$pool" 846 IFS="$OLD_IFS" find_rootfs "$pool" && break 847 done 848 IFS="$OLD_IFS" 849 850 [ "$quiet" != "y" ] && zfs_log_end_msg $ZFS_ERROR 851 else 852 # No auto - use value from the command line option. 853 854 # Strip 'zfs:' and 'ZFS='. 855 ZFS_BOOTFS="${ROOT#*[:=]}" 856 857 # Strip everything after the first slash. 858 ZFS_RPOOL="${ZFS_BOOTFS%%/*}" 859 fi 860 861 # Import the pool (if not already done so in the AUTO check above). 862 if [ -n "$ZFS_RPOOL" ] && [ -z "${POOL_IMPORTED}" ] 863 then 864 [ "$quiet" != "y" ] && \ 865 zfs_log_begin_msg "Importing ZFS root pool '$ZFS_RPOOL'" 866 867 import_pool "${ZFS_RPOOL}" 868 find_rootfs "${ZFS_RPOOL}" 869 870 [ "$quiet" != "y" ] && zfs_log_end_msg 871 fi 872 873 if [ -z "${POOL_IMPORTED}" ] 874 then 875 # No pool imported, this is serious! 876 disable_plymouth 877 echo "" 878 echo "Command: $ZFS_CMD" 879 echo "Message: $ZFS_STDERR" 880 echo "Error: $ZFS_ERROR" 881 echo "" 882 echo "No pool imported. Manually import the root pool" 883 echo "at the command prompt and then exit." 884 echo "Hint: Try: zpool import -N ${ZFS_RPOOL}" 885 shell 886 fi 887 888 # In case the pool was specified as guid, resolve guid to name 889 pool="$("${ZPOOL}" get name,guid -o name,value -H | \ 890 awk -v pool="${ZFS_RPOOL}" '$2 == pool { print $1 }')" 891 if [ -n "$pool" ]; then 892 # If $ZFS_BOOTFS contains guid, replace the guid portion with $pool 893 ZFS_BOOTFS=$(echo "$ZFS_BOOTFS" | \ 894 sed -e "s/$("${ZPOOL}" get guid -o value "$pool" -H)/$pool/g") 895 ZFS_RPOOL="${pool}" 896 fi 897 898 899 # ---------------------------------------------------------------- 900 # P R E P A R E R O O T F I L E S Y S T E M 901 902 if [ -n "${ZFS_BOOTFS}" ] 903 then 904 # Booting from a snapshot? 905 # Will overwrite the ZFS_BOOTFS variable like so: 906 # rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2 907 echo "${ZFS_BOOTFS}" | grep -q '@' && \ 908 setup_snapshot_booting "${ZFS_BOOTFS}" 909 fi 910 911 if [ -z "${ZFS_BOOTFS}" ] 912 then 913 # Still nothing! Let the user sort this out. 914 disable_plymouth 915 echo "" 916 echo "Error: Unknown root filesystem - no 'bootfs' pool property and" 917 echo " not specified on the kernel command line." 918 echo "" 919 echo "Manually mount the root filesystem on $rootmnt and then exit." 920 echo "Hint: Try: mount -o zfsutil -t zfs ${ZFS_RPOOL-rpool}/ROOT/system $rootmnt" 921 shell 922 fi 923 924 # ---------------------------------------------------------------- 925 # M O U N T F I L E S Y S T E M S 926 927 # * Ideally, the root filesystem would be mounted like this: 928 # 929 # zpool import -R "$rootmnt" -N "$ZFS_RPOOL" 930 # zfs mount -o mountpoint=/ "${ZFS_BOOTFS}" 931 # 932 # but the MOUNTPOINT prefix is preserved on descendent filesystem 933 # after the pivot into the regular root, which later breaks things 934 # like `zfs mount -a` and the /proc/self/mounts refresh. 935 # 936 # * Mount additional filesystems required 937 # Such as /usr, /var, /usr/local etc. 938 # NOTE: Mounted in the order specified in the 939 # ZFS_INITRD_ADDITIONAL_DATASETS variable so take care! 940 941 # Go through the complete list (recursively) of all filesystems below 942 # the real root dataset 943 filesystems="$("${ZFS}" list -oname -tfilesystem -H -r "${ZFS_BOOTFS}")" 944 OLD_IFS="$IFS" ; IFS=" 945" 946 for fs in $filesystems; do 947 IFS="$OLD_IFS" mount_fs "$fs" 948 done 949 IFS="$OLD_IFS" 950 for fs in $ZFS_INITRD_ADDITIONAL_DATASETS; do 951 mount_fs "$fs" 952 done 953 954 touch /run/zfs_unlock_complete 955 if [ -e /run/zfs_unlock_complete_notify ]; then 956 read -r < /run/zfs_unlock_complete_notify 957 fi 958 959 # ------------ 960 # Debugging information 961 if [ -n "${ZFS_DEBUG}" ] 962 then 963 #exec 2>&1- 964 965 echo "DEBUG: imported pools:" 966 "${ZPOOL}" list -H 967 echo 968 969 echo "DEBUG: mounted ZFS filesystems:" 970 mount | grep zfs 971 echo 972 973 echo "=> waiting for ENTER before continuing because of 'zfsdebug=1'. " 974 printf "%s" " 'c' for shell, 'r' for reboot, 'ENTER' to continue. " 975 read -r b 976 977 [ "$b" = "c" ] && /bin/sh 978 [ "$b" = "r" ] && reboot -f 979 980 set +x 981 fi 982 983 # ------------ 984 # Run local bottom script 985 if command -v run_scripts > /dev/null 2>&1 986 then 987 if [ -f "/scripts/local-bottom" ] || [ -d "/scripts/local-bottom" ] 988 then 989 [ "$quiet" != "y" ] && \ 990 zfs_log_begin_msg "Running /scripts/local-bottom" 991 run_scripts /scripts/local-bottom 992 [ "$quiet" != "y" ] && zfs_log_end_msg 993 fi 994 fi 995} 996