xref: /freebsd/sys/contrib/openzfs/contrib/initramfs/scripts/zfs (revision c1d255d3ffdbe447de3ab875bf4e7d7accc5bfc5)
1# ZFS boot stub for initramfs-tools.
2#
3# In the initramfs environment, the /init script sources this stub to
4# override the default functions in the /scripts/local script.
5#
6# Enable this by passing boot=zfs on the kernel command line.
7#
8# $quiet, $root, $rpool, $bootfs come from the cmdline:
9# shellcheck disable=SC2154
10
11# Source the common functions
12. /etc/zfs/zfs-functions
13
14# Start interactive shell.
15# Use debian's panic() if defined, because it allows to prevent shell access
16# by setting panic in cmdline (e.g. panic=0 or panic=15).
17# See "4.5 Disable root prompt on the initramfs" of Securing Debian Manual:
18# https://www.debian.org/doc/manuals/securing-debian-howto/ch4.en.html
19shell() {
20	if command -v panic > /dev/null 2>&1; then
21		panic
22	else
23		/bin/sh
24	fi
25}
26
27# This runs any scripts that should run before we start importing
28# pools and mounting any filesystems.
29pre_mountroot()
30{
31	if command -v run_scripts > /dev/null 2>&1
32	then
33		if [ -f "/scripts/local-top" ] || [ -d "/scripts/local-top" ]
34		then
35			[ "$quiet" != "y" ] && \
36			    zfs_log_begin_msg "Running /scripts/local-top"
37			run_scripts /scripts/local-top
38			[ "$quiet" != "y" ] && zfs_log_end_msg
39		fi
40
41	  if [ -f "/scripts/local-premount" ] || [ -d "/scripts/local-premount" ]
42	  then
43			[ "$quiet" != "y" ] && \
44			    zfs_log_begin_msg "Running /scripts/local-premount"
45			run_scripts /scripts/local-premount
46			[ "$quiet" != "y" ] && zfs_log_end_msg
47		fi
48	fi
49}
50
51# If plymouth is available, hide the splash image.
52disable_plymouth()
53{
54	if [ -x /bin/plymouth ] && /bin/plymouth --ping
55	then
56		/bin/plymouth hide-splash >/dev/null 2>&1
57	fi
58}
59
60# Get a ZFS filesystem property value.
61get_fs_value()
62{
63	fs="$1"
64	value=$2
65
66	"${ZFS}" get -H -ovalue "$value" "$fs" 2> /dev/null
67}
68
69# Find the 'bootfs' property on pool $1.
70# If the property does not contain '/', then ignore this
71# pool by exporting it again.
72find_rootfs()
73{
74	pool="$1"
75
76	# If 'POOL_IMPORTED' isn't set, no pool imported and therefore
77	# we won't be able to find a root fs.
78	[ -z "${POOL_IMPORTED}" ] && return 1
79
80	# If it's already specified, just keep it mounted and exit
81	# User (kernel command line) must be correct.
82	[ -n "${ZFS_BOOTFS}" ] && return 0
83
84	# Not set, try to find it in the 'bootfs' property of the pool.
85	# NOTE: zpool does not support 'get -H -ovalue bootfs'...
86	ZFS_BOOTFS=$("${ZPOOL}" list -H -obootfs "$pool")
87
88	# Make sure it's not '-' and that it starts with /.
89	if [ "${ZFS_BOOTFS}" != "-" ] && \
90		get_fs_value "${ZFS_BOOTFS}" mountpoint | grep -q '^/$'
91	then
92		# Keep it mounted
93		POOL_IMPORTED=1
94		return 0
95	fi
96
97	# Not boot fs here, export it and later try again..
98	"${ZPOOL}" export "$pool"
99	POOL_IMPORTED=
100	ZFS_BOOTFS=
101	return 1
102}
103
104# Support function to get a list of all pools, separated with ';'
105find_pools()
106{
107	pools=$("$@" 2> /dev/null | \
108		grep -E "pool:|^[a-zA-Z0-9]" | \
109		sed 's@.*: @@' | \
110		tr '\n' ';')
111
112	echo "${pools%%;}" # Return without the last ';'.
113}
114
115# Get a list of all available pools
116get_pools()
117{
118	if [ -n "${ZFS_POOL_IMPORT}" ]; then
119		echo "$ZFS_POOL_IMPORT"
120		return 0
121	fi
122
123	# Get the base list of available pools.
124	available_pools=$(find_pools "$ZPOOL" import)
125
126	# Just in case - seen it happen (that a pool isn't visible/found
127	# with a simple "zpool import" but only when using the "-d"
128	# option or setting ZPOOL_IMPORT_PATH).
129	if [ -d "/dev/disk/by-id" ]
130	then
131		npools=$(find_pools "$ZPOOL" import -d /dev/disk/by-id)
132		if [ -n "$npools" ]
133		then
134			# Because we have found extra pool(s) here, which wasn't
135			# found 'normally', we need to force USE_DISK_BY_ID to
136			# make sure we're able to actually import it/them later.
137			USE_DISK_BY_ID='yes'
138
139			if [ -n "$available_pools" ]
140			then
141				# Filter out duplicates (pools found with the simple
142				# "zpool import" but which is also found with the
143				# "zpool import -d ...").
144				npools=$(echo "$npools" | sed "s,$available_pools,,")
145
146				# Add the list to the existing list of
147				# available pools
148				available_pools="$available_pools;$npools"
149			else
150				available_pools="$npools"
151			fi
152		fi
153	fi
154
155	# Filter out any exceptions...
156	if [ -n "$ZFS_POOL_EXCEPTIONS" ]
157	then
158		found=""
159		apools=""
160		OLD_IFS="$IFS" ; IFS=";"
161
162		for pool in $available_pools
163		do
164			for exception in $ZFS_POOL_EXCEPTIONS
165			do
166				[ "$pool" = "$exception" ] && continue 2
167				found="$pool"
168			done
169
170			if [ -n "$found" ]
171			then
172				if [ -n "$apools" ]
173				then
174					apools="$apools;$pool"
175				else
176					apools="$pool"
177				fi
178			fi
179		done
180
181		IFS="$OLD_IFS"
182		available_pools="$apools"
183	fi
184
185	# Return list of available pools.
186	echo "$available_pools"
187}
188
189# Import given pool $1
190import_pool()
191{
192	pool="$1"
193
194	# Verify that the pool isn't already imported
195	# Make as sure as we can to not require '-f' to import.
196	"${ZPOOL}" get name,guid -o value -H 2>/dev/null | grep -Fxq "$pool" && return 0
197
198	# For backwards compatibility, make sure that ZPOOL_IMPORT_PATH is set
199	# to something we can use later with the real import(s). We want to
200	# make sure we find all by* dirs, BUT by-vdev should be first (if it
201	# exists).
202	if [ -n "$USE_DISK_BY_ID" ] && [ -z "$ZPOOL_IMPORT_PATH" ]
203	then
204		dirs="$(for dir in /dev/disk/by-*
205		do
206			# Ignore by-vdev here - we want it first!
207			echo "$dir" | grep -q /by-vdev && continue
208			[ ! -d "$dir" ] && continue
209
210			printf "%s" "$dir:"
211		done | sed 's,:$,,g')"
212
213		if [ -d "/dev/disk/by-vdev" ]
214		then
215			# Add by-vdev at the beginning.
216			ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:"
217		fi
218
219		# ... and /dev at the very end, just for good measure.
220		ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH$dirs:/dev"
221	fi
222
223	# Needs to be exported for "zpool" to catch it.
224	[ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH
225
226
227	[ "$quiet" != "y" ] && zfs_log_begin_msg \
228		"Importing pool '${pool}' using defaults"
229
230	ZFS_CMD="${ZPOOL} import -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
231	ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
232	ZFS_ERROR="$?"
233	if [ "${ZFS_ERROR}" != 0 ]
234	then
235		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
236
237		if [ -f "${ZPOOL_CACHE}" ]
238		then
239			[ "$quiet" != "y" ] && zfs_log_begin_msg \
240				"Importing pool '${pool}' using cachefile."
241
242			ZFS_CMD="${ZPOOL} import -c ${ZPOOL_CACHE} -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
243			ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
244			ZFS_ERROR="$?"
245		fi
246
247		if [ "${ZFS_ERROR}" != 0 ]
248		then
249			[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
250
251			disable_plymouth
252			echo ""
253			echo "Command: ${ZFS_CMD} '$pool'"
254			echo "Message: $ZFS_STDERR"
255			echo "Error: $ZFS_ERROR"
256			echo ""
257			echo "Failed to import pool '$pool'."
258			echo "Manually import the pool and exit."
259			shell
260		fi
261	fi
262
263	[ "$quiet" != "y" ] && zfs_log_end_msg
264
265	POOL_IMPORTED=1
266	return 0
267}
268
269# Load ZFS modules
270# Loading a module in a initrd require a slightly different approach,
271# with more logging etc.
272load_module_initrd()
273{
274	[ -n "$ROOTDELAY" ] && ZFS_INITRD_PRE_MOUNTROOT_SLEEP="$ROOTDELAY"
275
276	if [ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" -gt 0 ] 2>/dev/null
277	then
278		if [ "$quiet" != "y" ]; then
279			zfs_log_begin_msg "Sleeping for" \
280				"$ZFS_INITRD_PRE_MOUNTROOT_SLEEP seconds..."
281		fi
282		sleep "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP"
283		[ "$quiet" != "y" ] && zfs_log_end_msg
284	fi
285
286	# Wait for all of the /dev/{hd,sd}[a-z] device nodes to appear.
287	if command -v wait_for_udev > /dev/null 2>&1 ; then
288		wait_for_udev 10
289	elif command -v wait_for_dev > /dev/null 2>&1 ; then
290		wait_for_dev
291	fi
292
293	# zpool import refuse to import without a valid /proc/self/mounts
294	[ ! -f /proc/self/mounts ] && mount proc /proc
295
296	# Load the module
297	load_module "zfs" || return 1
298
299	if [ "$ZFS_INITRD_POST_MODPROBE_SLEEP" -gt 0 ] 2>/dev/null
300	then
301		if [ "$quiet" != "y" ]; then
302			zfs_log_begin_msg "Sleeping for" \
303				"$ZFS_INITRD_POST_MODPROBE_SLEEP seconds..."
304		fi
305		sleep "$ZFS_INITRD_POST_MODPROBE_SLEEP"
306		[ "$quiet" != "y" ] && zfs_log_end_msg
307	fi
308
309	return 0
310}
311
312# Mount a given filesystem
313mount_fs()
314{
315	fs="$1"
316
317	# Check that the filesystem exists
318	"${ZFS}" list -oname -tfilesystem -H "${fs}" > /dev/null 2>&1 ||  return 1
319
320	# Skip filesystems with canmount=off.  The root fs should not have
321	# canmount=off, but ignore it for backwards compatibility just in case.
322	if [ "$fs" != "${ZFS_BOOTFS}" ]
323	then
324		canmount=$(get_fs_value "$fs" canmount)
325		[ "$canmount" = "off" ] && return 0
326	fi
327
328	# Need the _original_ datasets mountpoint!
329	mountpoint=$(get_fs_value "$fs" mountpoint)
330	ZFS_CMD="mount -o zfsutil -t zfs"
331	if [ "$mountpoint" = "legacy" ] || [ "$mountpoint" = "none" ]; then
332		# Can't use the mountpoint property. Might be one of our
333		# clones. Check the 'org.zol:mountpoint' property set in
334		# clone_snap() if that's usable.
335		mountpoint=$(get_fs_value "$fs" org.zol:mountpoint)
336		if [ "$mountpoint" = "legacy" ] ||
337		   [ "$mountpoint" = "none" ] ||
338		   [ "$mountpoint" = "-" ]
339		then
340			if [ "$fs" != "${ZFS_BOOTFS}" ]; then
341				# We don't have a proper mountpoint and this
342				# isn't the root fs.
343				return 0
344			else
345				# Last hail-mary: Hope 'rootmnt' is set!
346				mountpoint=""
347			fi
348		fi
349
350		# If it's not a legacy filesystem, it can only be a
351		# native one...
352		if [ "$mountpoint" = "legacy" ]; then
353			ZFS_CMD="mount -t zfs"
354		fi
355	fi
356
357	# Possibly decrypt a filesystem using native encryption.
358	decrypt_fs "$fs"
359
360	[ "$quiet" != "y" ] && \
361	    zfs_log_begin_msg "Mounting '${fs}' on '${rootmnt}/${mountpoint}'"
362	[ -n "${ZFS_DEBUG}" ] && \
363	    zfs_log_begin_msg "CMD: '$ZFS_CMD ${fs} ${rootmnt}/${mountpoint}'"
364
365	ZFS_STDERR=$(${ZFS_CMD} "${fs}" "${rootmnt}/${mountpoint}" 2>&1)
366	ZFS_ERROR=$?
367	if [ "${ZFS_ERROR}" != 0 ]
368	then
369		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
370
371		disable_plymouth
372		echo ""
373		echo "Command: ${ZFS_CMD} ${fs} ${rootmnt}/${mountpoint}"
374		echo "Message: $ZFS_STDERR"
375		echo "Error: $ZFS_ERROR"
376		echo ""
377		echo "Failed to mount ${fs} on ${rootmnt}/${mountpoint}."
378		echo "Manually mount the filesystem and exit."
379		shell
380	else
381		[ "$quiet" != "y" ] && zfs_log_end_msg
382	fi
383
384	return 0
385}
386
387# Unlock a ZFS native encrypted filesystem.
388decrypt_fs()
389{
390	fs="$1"
391
392	# If pool encryption is active and the zfs command understands '-o encryption'
393	if [ "$(zpool list -H -o feature@encryption "${fs%%/*}")" = 'active' ]; then
394
395		# Determine dataset that holds key for root dataset
396		ENCRYPTIONROOT="$(get_fs_value "${fs}" encryptionroot)"
397		KEYLOCATION="$(get_fs_value "${ENCRYPTIONROOT}" keylocation)"
398
399		echo "${ENCRYPTIONROOT}" > /run/zfs_fs_name
400
401		# If root dataset is encrypted...
402		if ! [ "${ENCRYPTIONROOT}" = "-" ]; then
403			KEYSTATUS="$(get_fs_value "${ENCRYPTIONROOT}" keystatus)"
404			# Continue only if the key needs to be loaded
405			[ "$KEYSTATUS" = "unavailable" ] || return 0
406
407			# Do not prompt if key is stored noninteractively,
408			if ! [ "${KEYLOCATION}" = "prompt" ]; then
409				$ZFS load-key "${ENCRYPTIONROOT}"
410
411			# Prompt with plymouth, if active
412			elif /bin/plymouth --ping 2>/dev/null; then
413				echo "plymouth" > /run/zfs_console_askpwd_cmd
414				for _ in 1 2 3; do
415					plymouth ask-for-password --prompt "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \
416						$ZFS load-key "${ENCRYPTIONROOT}" && break
417				done
418
419			# Prompt with systemd, if active
420			elif [ -e /run/systemd/system ]; then
421				echo "systemd-ask-password" > /run/zfs_console_askpwd_cmd
422				for _ in 1 2 3; do
423					systemd-ask-password "Encrypted ZFS password for ${ENCRYPTIONROOT}" --no-tty | \
424						$ZFS load-key "${ENCRYPTIONROOT}" && break
425				done
426
427			# Prompt with ZFS tty, otherwise
428			else
429				# Temporarily setting "printk" to "7" allows the prompt to appear even when the "quiet" kernel option has been used
430				echo "load-key" > /run/zfs_console_askpwd_cmd
431				storeprintk="$(awk '{print $1}' /proc/sys/kernel/printk)"
432				echo 7 > /proc/sys/kernel/printk
433				$ZFS load-key "${ENCRYPTIONROOT}"
434				echo "$storeprintk" > /proc/sys/kernel/printk
435			fi
436		fi
437	fi
438
439	return 0
440}
441
442# Destroy a given filesystem.
443destroy_fs()
444{
445	fs="$1"
446
447	[ "$quiet" != "y" ] && \
448	    zfs_log_begin_msg "Destroying '$fs'"
449
450	ZFS_CMD="${ZFS} destroy $fs"
451	ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
452	ZFS_ERROR="$?"
453	if [ "${ZFS_ERROR}" != 0 ]
454	then
455		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
456
457		disable_plymouth
458		echo ""
459		echo "Command: $ZFS_CMD"
460		echo "Message: $ZFS_STDERR"
461		echo "Error: $ZFS_ERROR"
462		echo ""
463		echo "Failed to destroy '$fs'. Please make sure that '$fs' is not available."
464		echo "Hint: Try:  zfs destroy -Rfn $fs"
465		echo "If this dryrun looks good, then remove the 'n' from '-Rfn' and try again."
466		shell
467	else
468		[ "$quiet" != "y" ] && zfs_log_end_msg
469	fi
470
471	return 0
472}
473
474# Clone snapshot $1 to destination filesystem $2
475# Set 'canmount=noauto' and 'mountpoint=none' so that we get to keep
476# manual control over it's mounting (i.e., make sure it's not automatically
477# mounted with a 'zfs mount -a' in the init/systemd scripts).
478clone_snap()
479{
480	snap="$1"
481	destfs="$2"
482	mountpoint="$3"
483
484	[ "$quiet" != "y" ] && zfs_log_begin_msg "Cloning '$snap' to '$destfs'"
485
486	# Clone the snapshot into a dataset we can boot from
487	# + We don't want this filesystem to be automatically mounted, we
488	#   want control over this here and nowhere else.
489	# + We don't need any mountpoint set for the same reason.
490	# We use the 'org.zol:mountpoint' property to remember the mountpoint.
491	ZFS_CMD="${ZFS} clone -o canmount=noauto -o mountpoint=none"
492	ZFS_CMD="${ZFS_CMD} -o org.zol:mountpoint=${mountpoint}"
493	ZFS_CMD="${ZFS_CMD} $snap $destfs"
494	ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
495	ZFS_ERROR="$?"
496	if [ "${ZFS_ERROR}" != 0 ]
497	then
498		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
499
500		disable_plymouth
501		echo ""
502		echo "Command: $ZFS_CMD"
503		echo "Message: $ZFS_STDERR"
504		echo "Error: $ZFS_ERROR"
505		echo ""
506		echo "Failed to clone snapshot."
507		echo "Make sure that the any problems are corrected and then make sure"
508		echo "that the dataset '$destfs' exists and is bootable."
509		shell
510	else
511		[ "$quiet" != "y" ] && zfs_log_end_msg
512	fi
513
514	return 0
515}
516
517# Rollback a given snapshot.
518rollback_snap()
519{
520	snap="$1"
521
522	[ "$quiet" != "y" ] && zfs_log_begin_msg "Rollback $snap"
523
524	ZFS_CMD="${ZFS} rollback -Rf $snap"
525	ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
526	ZFS_ERROR="$?"
527	if [ "${ZFS_ERROR}" != 0 ]
528	then
529		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
530
531		disable_plymouth
532		echo ""
533		echo "Command: $ZFS_CMD"
534		echo "Message: $ZFS_STDERR"
535		echo "Error: $ZFS_ERROR"
536		echo ""
537		echo "Failed to rollback snapshot."
538		shell
539	else
540		[ "$quiet" != "y" ] && zfs_log_end_msg
541	fi
542
543	return 0
544}
545
546# Get a list of snapshots, give them as a numbered list
547# to the user to choose from.
548ask_user_snap()
549{
550	fs="$1"
551
552	# We need to temporarily disable debugging. Set 'debug' so we
553	# remember to enabled it again.
554	if [ -n "${ZFS_DEBUG}" ]; then
555		unset ZFS_DEBUG
556		set +x
557		debug=1
558	fi
559
560	# Because we need the resulting snapshot, which is sent on
561	# stdout to the caller, we use stderr for our questions.
562	echo "What snapshot do you want to boot from?" > /dev/stderr
563	# shellcheck disable=SC2046
564	IFS="
565" set -- $("${ZFS}" list -H -oname -tsnapshot -r "${fs}")
566
567	i=1
568	for snap in "$@"; do
569		echo "  $i: $snap"
570		i=$((i + 1))
571	done > /dev/stderr
572
573	# expr instead of test here because [ a -lt 0 ] errors out,
574	# but expr falls back to lexicographical, which works out right
575	snapnr=0
576	while expr "$snapnr" "<" 1 > /dev/null ||
577	    expr "$snapnr" ">" "$#" > /dev/null
578	do
579		printf "%s" "Snap nr [1-$#]? " > /dev/stderr
580		read -r snapnr
581	done
582
583	# Re-enable debugging.
584	if [ -n "${debug}" ]; then
585		ZFS_DEBUG=1
586		set -x
587	fi
588
589	eval echo '$'"$snapnr"
590}
591
592setup_snapshot_booting()
593{
594	snap="$1"
595	retval=0
596
597	# Make sure that the snapshot specified actually exists.
598	if [ ! "$(get_fs_value "${snap}" type)" ]
599	then
600		# Snapshot does not exist (...@<null> ?)
601		# ask the user for a snapshot to use.
602		snap="$(ask_user_snap "${snap%%@*}")"
603	fi
604
605	# Separate the full snapshot ('$snap') into it's filesystem and
606	# snapshot names. Would have been nice with a split() function..
607	rootfs="${snap%%@*}"
608	snapname="${snap##*@}"
609	ZFS_BOOTFS="${rootfs}_${snapname}"
610
611	if ! grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
612	then
613		# If the destination dataset for the clone
614		# already exists, destroy it. Recursively
615		if [ "$(get_fs_value "${rootfs}_${snapname}" type)" ]; then
616			filesystems=$("${ZFS}" list -oname -tfilesystem -H \
617			    -r -Sname "${ZFS_BOOTFS}")
618			for fs in $filesystems; do
619				destroy_fs "${fs}"
620			done
621		fi
622	fi
623
624	# Get all snapshots, recursively (might need to clone /usr, /var etc
625	# as well).
626	for s in $("${ZFS}" list -H -oname -tsnapshot -r "${rootfs}" | \
627	    grep "${snapname}")
628	do
629		if grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
630		then
631			# Rollback snapshot
632			rollback_snap "$s" || retval=$((retval + 1))
633		else
634			# Setup a destination filesystem name.
635			# Ex: Called with 'rpool/ROOT/debian@snap2'
636			#       rpool/ROOT/debian@snap2		=> rpool/ROOT/debian_snap2
637			#       rpool/ROOT/debian/boot@snap2	=> rpool/ROOT/debian_snap2/boot
638			#       rpool/ROOT/debian/usr@snap2	=> rpool/ROOT/debian_snap2/usr
639			#       rpool/ROOT/debian/var@snap2	=> rpool/ROOT/debian_snap2/var
640			subfs="${s##$rootfs}"
641			subfs="${subfs%%@$snapname}"
642
643			destfs="${rootfs}_${snapname}" # base fs.
644			[ -n "$subfs" ] && destfs="${destfs}$subfs" # + sub fs.
645
646			# Get the mountpoint of the filesystem, to be used
647			# with clone_snap(). If legacy or none, then use
648			# the sub fs value.
649			mountpoint=$(get_fs_value "${s%%@*}" mountpoint)
650			if [ "$mountpoint" = "legacy" ] || \
651			   [ "$mountpoint" = "none" ]
652			then
653				if [ -n "${subfs}" ]; then
654					mountpoint="${subfs}"
655				else
656					mountpoint="/"
657				fi
658			fi
659
660			# Clone the snapshot into its own
661			# filesystem
662			clone_snap "$s" "${destfs}" "${mountpoint}" || \
663			    retval=$((retval + 1))
664		fi
665	done
666
667	# If we haven't return yet, we have a problem...
668	return "${retval}"
669}
670
671# ================================================================
672
673# This is the main function.
674mountroot()
675{
676	# ----------------------------------------------------------------
677	# I N I T I A L   S E T U P
678
679	# ------------
680	# Run the pre-mount scripts from /scripts/local-top.
681	pre_mountroot
682
683	# ------------
684	# Source the default setup variables.
685	[ -r '/etc/default/zfs' ] && . /etc/default/zfs
686
687	# ------------
688	# Support debug option
689	if grep -qiE '(^|[^\\](\\\\)* )(zfs_debug|zfs\.debug|zfsdebug)=(on|yes|1)( |$)' /proc/cmdline
690	then
691		ZFS_DEBUG=1
692		mkdir /var/log
693		#exec 2> /var/log/boot.debug
694		set -x
695	fi
696
697	# ------------
698	# Load ZFS module etc.
699	if ! load_module_initrd; then
700		disable_plymouth
701		echo ""
702		echo "Failed to load ZFS modules."
703		echo "Manually load the modules and exit."
704		shell
705	fi
706
707	# ------------
708	# Look for the cache file (if any).
709	[ -f "${ZPOOL_CACHE}" ] || unset ZPOOL_CACHE
710	[ -s "${ZPOOL_CACHE}" ] || unset ZPOOL_CACHE
711
712	# ------------
713	# Compatibility: 'ROOT' is for Debian GNU/Linux (etc),
714	#		 'root' is for Redhat/Fedora (etc),
715	#		 'REAL_ROOT' is for Gentoo
716	if [ -z "$ROOT" ]
717	then
718		[ -n "$root" ] && ROOT=${root}
719
720		[ -n "$REAL_ROOT" ] && ROOT=${REAL_ROOT}
721	fi
722
723	# ------------
724	# Where to mount the root fs in the initrd - set outside this script
725	# Compatibility: 'rootmnt' is for Debian GNU/Linux (etc),
726	#		 'NEWROOT' is for RedHat/Fedora (etc),
727	#		 'NEW_ROOT' is for Gentoo
728	if [ -z "$rootmnt" ]
729	then
730		[ -n "$NEWROOT" ] && rootmnt=${NEWROOT}
731
732		[ -n "$NEW_ROOT" ] && rootmnt=${NEW_ROOT}
733	fi
734
735	# ------------
736	# No longer set in the defaults file, but it could have been set in
737	# get_pools() in some circumstances. If it's something, but not 'yes',
738	# it's no good to us.
739	[ -n "$USE_DISK_BY_ID" ] && [ "$USE_DISK_BY_ID" != 'yes' ] && \
740	    unset USE_DISK_BY_ID
741
742	# ----------------------------------------------------------------
743	# P A R S E   C O M M A N D   L I N E   O P T I O N S
744
745	# This part is the really ugly part - there's so many options and permutations
746	# 'out there', and if we should make this the 'primary' source for ZFS initrd
747	# scripting, we need/should support them all.
748	#
749	# Supports the following kernel command line argument combinations
750	# (in this order - first match win):
751	#
752	#	rpool=<pool>			(tries to finds bootfs automatically)
753	#	bootfs=<pool>/<dataset>		(uses this for rpool - first part)
754	#	rpool=<pool> bootfs=<pool>/<dataset>
755	#	-B zfs-bootfs=<pool>/<fs>	(uses this for rpool - first part)
756	#	rpool=rpool			(default if none of the above is used)
757	#	root=<pool>/<dataset>		(uses this for rpool - first part)
758	#	root=ZFS=<pool>/<dataset>	(uses this for rpool - first part, without 'ZFS=')
759	#	root=zfs:AUTO			(tries to detect both pool and rootfs
760	#	root=zfs:<pool>/<dataset>	(uses this for rpool - first part, without 'zfs:')
761	#
762	# Option <dataset> could also be <snapshot>
763	# Option <pool> could also be <guid>
764
765	# ------------
766	# Support force option
767	# In addition, setting one of zfs_force, zfs.force or zfsforce to
768	# 'yes', 'on' or '1' will make sure we force import the pool.
769	# This should (almost) never be needed, but it's here for
770	# completeness.
771	ZPOOL_FORCE=""
772	if grep -qiE '(^|[^\\](\\\\)* )(zfs_force|zfs\.force|zfsforce)=(on|yes|1)( |$)' /proc/cmdline
773	then
774		ZPOOL_FORCE="-f"
775	fi
776
777	# ------------
778	# Look for 'rpool' and 'bootfs' parameter
779	[ -n "$rpool" ] && ZFS_RPOOL="${rpool#rpool=}"
780	[ -n "$bootfs" ] && ZFS_BOOTFS="${bootfs#bootfs=}"
781
782	# ------------
783	# If we have 'ROOT' (see above), but not 'ZFS_BOOTFS', then use
784	# 'ROOT'
785	[ -n "$ROOT" ] && [ -z "${ZFS_BOOTFS}" ] && ZFS_BOOTFS="$ROOT"
786
787	# ------------
788	# Check for the `-B zfs-bootfs=%s/%u,...` kind of parameter.
789	# NOTE: Only use the pool name and dataset. The rest is not
790	#       supported by OpenZFS (whatever it's for).
791	if [ -z "$ZFS_RPOOL" ]
792	then
793		# The ${zfs-bootfs} variable is set at the kernel command
794		# line, usually by GRUB, but it cannot be referenced here
795		# directly because bourne variable names cannot contain a
796		# hyphen.
797		#
798		# Reassign the variable by dumping the environment and
799		# stripping the zfs-bootfs= prefix.  Let the shell handle
800		# quoting through the eval command:
801		# shellcheck disable=SC2046
802		eval ZFS_RPOOL=$(set | sed -n -e 's,^zfs-bootfs=,,p')
803	fi
804
805	# ------------
806	# No root fs or pool specified - do auto detect.
807	if [ -z "$ZFS_RPOOL" ] && [ -z "${ZFS_BOOTFS}" ]
808	then
809		# Do auto detect. Do this by 'cheating' - set 'root=zfs:AUTO'
810		# which will be caught later
811		ROOT='zfs:AUTO'
812	fi
813
814	# ----------------------------------------------------------------
815	# F I N D   A N D   I M P O R T   C O R R E C T   P O O L
816
817	# ------------
818	if [ "$ROOT" = "zfs:AUTO" ]
819	then
820		# Try to detect both pool and root fs.
821
822		# If we got here, that means we don't have a hint so as to
823		# the root dataset, but with root=zfs:AUTO on cmdline,
824		# this says "zfs:AUTO" here and interferes with checks later
825		ZFS_BOOTFS=
826
827		[ "$quiet" != "y" ] && \
828		    zfs_log_begin_msg "Attempting to import additional pools."
829
830		# Get a list of pools available for import
831		if [ -n "$ZFS_RPOOL" ]
832		then
833			# We've specified a pool - check only that
834			POOLS=$ZFS_RPOOL
835		else
836			POOLS=$(get_pools)
837		fi
838
839		OLD_IFS="$IFS" ; IFS=";"
840		for pool in $POOLS
841		do
842			[ -z "$pool" ] && continue
843
844			IFS="$OLD_IFS" import_pool "$pool"
845			IFS="$OLD_IFS" find_rootfs "$pool" && break
846		done
847		IFS="$OLD_IFS"
848
849		[ "$quiet" != "y" ] && zfs_log_end_msg $ZFS_ERROR
850	else
851		# No auto - use value from the command line option.
852
853		# Strip 'zfs:' and 'ZFS='.
854		ZFS_BOOTFS="${ROOT#*[:=]}"
855
856		# Strip everything after the first slash.
857		ZFS_RPOOL="${ZFS_BOOTFS%%/*}"
858	fi
859
860	# Import the pool (if not already done so in the AUTO check above).
861	if [ -n "$ZFS_RPOOL" ] && [ -z "${POOL_IMPORTED}" ]
862	then
863		[ "$quiet" != "y" ] && \
864		    zfs_log_begin_msg "Importing ZFS root pool '$ZFS_RPOOL'"
865
866		import_pool "${ZFS_RPOOL}"
867		find_rootfs "${ZFS_RPOOL}"
868
869		[ "$quiet" != "y" ] && zfs_log_end_msg
870	fi
871
872	if [ -z "${POOL_IMPORTED}" ]
873	then
874		# No pool imported, this is serious!
875		disable_plymouth
876		echo ""
877		echo "Command: $ZFS_CMD"
878		echo "Message: $ZFS_STDERR"
879		echo "Error: $ZFS_ERROR"
880		echo ""
881		echo "No pool imported. Manually import the root pool"
882		echo "at the command prompt and then exit."
883		echo "Hint: Try:  zpool import -N ${ZFS_RPOOL}"
884		shell
885	fi
886
887	# In case the pool was specified as guid, resolve guid to name
888	pool="$("${ZPOOL}" get name,guid -o name,value -H | \
889	    awk -v pool="${ZFS_RPOOL}" '$2 == pool { print $1 }')"
890	if [ -n "$pool" ]; then
891		# If $ZFS_BOOTFS contains guid, replace the guid portion with $pool
892		ZFS_BOOTFS=$(echo "$ZFS_BOOTFS" | \
893			sed -e "s/$("${ZPOOL}" get guid -o value "$pool" -H)/$pool/g")
894		ZFS_RPOOL="${pool}"
895	fi
896
897
898	# ----------------------------------------------------------------
899	# P R E P A R E   R O O T   F I L E S Y S T E M
900
901	if [ -n "${ZFS_BOOTFS}" ]
902	then
903		# Booting from a snapshot?
904		# Will overwrite the ZFS_BOOTFS variable like so:
905		#   rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2
906		echo "${ZFS_BOOTFS}" | grep -q '@' && \
907		    setup_snapshot_booting "${ZFS_BOOTFS}"
908	fi
909
910	if [ -z "${ZFS_BOOTFS}" ]
911	then
912		# Still nothing! Let the user sort this out.
913		disable_plymouth
914		echo ""
915		echo "Error: Unknown root filesystem - no 'bootfs' pool property and"
916		echo "       not specified on the kernel command line."
917		echo ""
918		echo "Manually mount the root filesystem on $rootmnt and then exit."
919		echo "Hint: Try:  mount -o zfsutil -t zfs ${ZFS_RPOOL-rpool}/ROOT/system $rootmnt"
920		shell
921	fi
922
923	# ----------------------------------------------------------------
924	# M O U N T   F I L E S Y S T E M S
925
926	# * Ideally, the root filesystem would be mounted like this:
927	#
928	#     zpool import -R "$rootmnt" -N "$ZFS_RPOOL"
929	#     zfs mount -o mountpoint=/ "${ZFS_BOOTFS}"
930	#
931	#   but the MOUNTPOINT prefix is preserved on descendent filesystem
932	#   after the pivot into the regular root, which later breaks things
933	#   like `zfs mount -a` and the /proc/self/mounts refresh.
934	#
935	# * Mount additional filesystems required
936	#   Such as /usr, /var, /usr/local etc.
937	#   NOTE: Mounted in the order specified in the
938	#         ZFS_INITRD_ADDITIONAL_DATASETS variable so take care!
939
940	# Go through the complete list (recursively) of all filesystems below
941	# the real root dataset
942	filesystems="$("${ZFS}" list -oname -tfilesystem -H -r "${ZFS_BOOTFS}")"
943	OLD_IFS="$IFS" ; IFS="
944"
945	for fs in $filesystems; do
946		IFS="$OLD_IFS" mount_fs "$fs"
947	done
948	IFS="$OLD_IFS"
949	for fs in $ZFS_INITRD_ADDITIONAL_DATASETS; do
950		mount_fs "$fs"
951	done
952
953	touch /run/zfs_unlock_complete
954	if [ -e /run/zfs_unlock_complete_notify ]; then
955		read -r < /run/zfs_unlock_complete_notify
956	fi
957
958	# ------------
959	# Debugging information
960	if [ -n "${ZFS_DEBUG}" ]
961	then
962		#exec 2>&1-
963
964		echo "DEBUG: imported pools:"
965		"${ZPOOL}" list -H
966		echo
967
968		echo "DEBUG: mounted ZFS filesystems:"
969		mount | grep zfs
970		echo
971
972		echo "=> waiting for ENTER before continuing because of 'zfsdebug=1'. "
973		printf "%s" "   'c' for shell, 'r' for reboot, 'ENTER' to continue. "
974		read -r b
975
976		[ "$b" = "c" ] && /bin/sh
977		[ "$b" = "r" ] && reboot -f
978
979		set +x
980	fi
981
982	# ------------
983	# Run local bottom script
984	if command -v run_scripts > /dev/null 2>&1
985	then
986		if [ -f "/scripts/local-bottom" ] || [ -d "/scripts/local-bottom" ]
987		then
988			[ "$quiet" != "y" ] && \
989			    zfs_log_begin_msg "Running /scripts/local-bottom"
990			run_scripts /scripts/local-bottom
991			[ "$quiet" != "y" ] && zfs_log_end_msg
992		fi
993	fi
994}
995