xref: /freebsd/sys/contrib/openzfs/contrib/initramfs/scripts/zfs (revision 8833aad7befe90716c7526ce6858344ba635582f)
1# ZFS boot stub for initramfs-tools.
2#
3# In the initramfs environment, the /init script sources this stub to
4# override the default functions in the /scripts/local script.
5#
6# Enable this by passing boot=zfs on the kernel command line.
7#
8
9# Source the common functions
10. /etc/zfs/zfs-functions
11
12# Start interactive shell.
13# Use debian's panic() if defined, because it allows to prevent shell access
14# by setting panic in cmdline (e.g. panic=0 or panic=15).
15# See "4.5 Disable root prompt on the initramfs" of Securing Debian Manual:
16# https://www.debian.org/doc/manuals/securing-debian-howto/ch4.en.html
17shell() {
18	if type panic > /dev/null 2>&1; then
19		panic $@
20	else
21		/bin/sh
22	fi
23}
24
25# This runs any scripts that should run before we start importing
26# pools and mounting any filesystems.
27pre_mountroot()
28{
29	if type run_scripts > /dev/null 2>&1 && \
30	    [ -f "/scripts/local-top" -o -d "/scripts/local-top" ]
31	then
32		[ "$quiet" != "y" ] && \
33		    zfs_log_begin_msg "Running /scripts/local-top"
34		run_scripts /scripts/local-top
35		[ "$quiet" != "y" ] && zfs_log_end_msg
36	fi
37
38	if type run_scripts > /dev/null 2>&1 && \
39	    [ -f "/scripts/local-premount" -o -d "/scripts/local-premount" ]
40	then
41		[ "$quiet" != "y" ] && \
42		    zfs_log_begin_msg "Running /scripts/local-premount"
43		run_scripts /scripts/local-premount
44		[ "$quiet" != "y" ] && zfs_log_end_msg
45	fi
46}
47
48# If plymouth is available, hide the splash image.
49disable_plymouth()
50{
51	if [ -x /bin/plymouth ] && /bin/plymouth --ping
52	then
53		/bin/plymouth hide-splash >/dev/null 2>&1
54	fi
55}
56
57# Get a ZFS filesystem property value.
58get_fs_value()
59{
60	local fs="$1"
61	local value=$2
62
63	"${ZFS}" get -H -ovalue $value "$fs" 2> /dev/null
64}
65
66# Find the 'bootfs' property on pool $1.
67# If the property does not contain '/', then ignore this
68# pool by exporting it again.
69find_rootfs()
70{
71	local pool="$1"
72
73	# If 'POOL_IMPORTED' isn't set, no pool imported and therefore
74	# we won't be able to find a root fs.
75	[ -z "${POOL_IMPORTED}" ] && return 1
76
77	# If it's already specified, just keep it mounted and exit
78	# User (kernel command line) must be correct.
79	[ -n "${ZFS_BOOTFS}" ] && return 0
80
81	# Not set, try to find it in the 'bootfs' property of the pool.
82	# NOTE: zpool does not support 'get -H -ovalue bootfs'...
83	ZFS_BOOTFS=$("${ZPOOL}" list -H -obootfs "$pool")
84
85	# Make sure it's not '-' and that it starts with /.
86	if [ "${ZFS_BOOTFS}" != "-" ] && \
87		$(get_fs_value "${ZFS_BOOTFS}" mountpoint | grep -q '^/$')
88	then
89		# Keep it mounted
90		POOL_IMPORTED=1
91		return 0
92	fi
93
94	# Not boot fs here, export it and later try again..
95	"${ZPOOL}" export "$pool"
96	POOL_IMPORTED=""
97
98	return 1
99}
100
101# Support function to get a list of all pools, separated with ';'
102find_pools()
103{
104	local CMD="$*"
105	local pools pool
106
107	pools=$($CMD 2> /dev/null | \
108		grep -E "pool:|^[a-zA-Z0-9]" | \
109		sed 's@.*: @@' | \
110		while read pool; do \
111		    echo -n "$pool;"
112		done)
113
114	echo "${pools%%;}" # Return without the last ';'.
115}
116
117# Get a list of all available pools
118get_pools()
119{
120	local available_pools npools
121
122	if [ -n "${ZFS_POOL_IMPORT}" ]; then
123		echo "$ZFS_POOL_IMPORT"
124		return 0
125	fi
126
127	# Get the base list of available pools.
128	available_pools=$(find_pools "$ZPOOL" import)
129
130	# Just in case - seen it happen (that a pool isn't visible/found
131	# with a simple "zpool import" but only when using the "-d"
132	# option or setting ZPOOL_IMPORT_PATH).
133	if [ -d "/dev/disk/by-id" ]
134	then
135		npools=$(find_pools "$ZPOOL" import -d /dev/disk/by-id)
136		if [ -n "$npools" ]
137		then
138			# Because we have found extra pool(s) here, which wasn't
139			# found 'normally', we need to force USE_DISK_BY_ID to
140			# make sure we're able to actually import it/them later.
141			USE_DISK_BY_ID='yes'
142
143			if [ -n "$available_pools" ]
144			then
145				# Filter out duplicates (pools found with the simple
146				# "zpool import" but which is also found with the
147				# "zpool import -d ...").
148				npools=$(echo "$npools" | sed "s,$available_pools,,")
149
150				# Add the list to the existing list of
151				# available pools
152				available_pools="$available_pools;$npools"
153			else
154				available_pools="$npools"
155			fi
156		fi
157	fi
158
159	# Filter out any exceptions...
160	if [ -n "$ZFS_POOL_EXCEPTIONS" ]
161	then
162		local found=""
163		local apools=""
164		local pool exception
165		OLD_IFS="$IFS" ; IFS=";"
166
167		for pool in $available_pools
168		do
169			for exception in $ZFS_POOL_EXCEPTIONS
170			do
171				[ "$pool" = "$exception" ] && continue 2
172				found="$pool"
173			done
174
175			if [ -n "$found" ]
176			then
177				if [ -n "$apools" ]
178				then
179					apools="$apools;$pool"
180				else
181					apools="$pool"
182				fi
183			fi
184		done
185
186		IFS="$OLD_IFS"
187		available_pools="$apools"
188	fi
189
190	# Return list of available pools.
191	echo "$available_pools"
192}
193
194# Import given pool $1
195import_pool()
196{
197	local pool="$1"
198	local dirs dir
199
200	# Verify that the pool isn't already imported
201	# Make as sure as we can to not require '-f' to import.
202	"${ZPOOL}" get name,guid -o value -H 2>/dev/null | grep -Fxq "$pool" && return 0
203
204	# For backwards compatibility, make sure that ZPOOL_IMPORT_PATH is set
205	# to something we can use later with the real import(s). We want to
206	# make sure we find all by* dirs, BUT by-vdev should be first (if it
207	# exists).
208	if [ -n "$USE_DISK_BY_ID" -a -z "$ZPOOL_IMPORT_PATH" ]
209	then
210		dirs="$(for dir in $(echo /dev/disk/by-*)
211		do
212			# Ignore by-vdev here - we want it first!
213			echo "$dir" | grep -q /by-vdev && continue
214			[ ! -d "$dir" ] && continue
215
216			echo -n "$dir:"
217		done | sed 's,:$,,g')"
218
219		if [ -d "/dev/disk/by-vdev" ]
220		then
221			# Add by-vdev at the beginning.
222			ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:"
223		fi
224
225		# ... and /dev at the very end, just for good measure.
226		ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH$dirs:/dev"
227	fi
228
229	# Needs to be exported for "zpool" to catch it.
230	[ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH
231
232
233	[ "$quiet" != "y" ] && zfs_log_begin_msg \
234		"Importing pool '${pool}' using defaults"
235
236	ZFS_CMD="${ZPOOL} import -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
237	ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
238	ZFS_ERROR="$?"
239	if [ "${ZFS_ERROR}" != 0 ]
240	then
241		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
242
243		if [ -f "${ZPOOL_CACHE}" ]
244		then
245			[ "$quiet" != "y" ] && zfs_log_begin_msg \
246				"Importing pool '${pool}' using cachefile."
247
248			ZFS_CMD="${ZPOOL} import -c ${ZPOOL_CACHE} -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
249			ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
250			ZFS_ERROR="$?"
251		fi
252
253		if [ "${ZFS_ERROR}" != 0 ]
254		then
255			[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
256
257			disable_plymouth
258			echo ""
259			echo "Command: ${ZFS_CMD} '$pool'"
260			echo "Message: $ZFS_STDERR"
261			echo "Error: $ZFS_ERROR"
262			echo ""
263			echo "Failed to import pool '$pool'."
264			echo "Manually import the pool and exit."
265			shell
266		fi
267	fi
268
269	[ "$quiet" != "y" ] && zfs_log_end_msg
270
271	POOL_IMPORTED=1
272	return 0
273}
274
275# Load ZFS modules
276# Loading a module in a initrd require a slightly different approach,
277# with more logging etc.
278load_module_initrd()
279{
280	if [ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" > 0 ]
281	then
282		if [ "$quiet" != "y" ]; then
283			zfs_log_begin_msg "Sleeping for" \
284				"$ZFS_INITRD_PRE_MOUNTROOT_SLEEP seconds..."
285		fi
286		sleep "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP"
287		[ "$quiet" != "y" ] && zfs_log_end_msg
288	fi
289
290	# Wait for all of the /dev/{hd,sd}[a-z] device nodes to appear.
291	if type wait_for_udev > /dev/null 2>&1 ; then
292		wait_for_udev 10
293	elif type wait_for_dev > /dev/null 2>&1 ; then
294		wait_for_dev
295	fi
296
297	# zpool import refuse to import without a valid /proc/self/mounts
298	[ ! -f /proc/self/mounts ] && mount proc /proc
299
300	# Load the module
301	load_module "zfs" || return 1
302
303	if [ "$ZFS_INITRD_POST_MODPROBE_SLEEP" > 0 ]
304	then
305		if [ "$quiet" != "y" ]; then
306			zfs_log_begin_msg "Sleeping for" \
307				"$ZFS_INITRD_POST_MODPROBE_SLEEP seconds..."
308		fi
309		sleep "$ZFS_INITRD_POST_MODPROBE_SLEEP"
310		[ "$quiet" != "y" ] && zfs_log_end_msg
311	fi
312
313	return 0
314}
315
316# Mount a given filesystem
317mount_fs()
318{
319	local fs="$1"
320	local mountpoint
321
322	# Check that the filesystem exists
323	"${ZFS}" list -oname -tfilesystem -H "${fs}" > /dev/null 2>&1
324	[ "$?" -ne 0 ] && return 1
325
326	# Skip filesystems with canmount=off.  The root fs should not have
327	# canmount=off, but ignore it for backwards compatibility just in case.
328	if [ "$fs" != "${ZFS_BOOTFS}" ]
329	then
330		canmount=$(get_fs_value "$fs" canmount)
331		[ "$canmount" = "off" ] && return 0
332	fi
333
334	# Need the _original_ datasets mountpoint!
335	mountpoint=$(get_fs_value "$fs" mountpoint)
336	if [ "$mountpoint" = "legacy" -o "$mountpoint" = "none" ]; then
337		# Can't use the mountpoint property. Might be one of our
338		# clones. Check the 'org.zol:mountpoint' property set in
339		# clone_snap() if that's usable.
340		mountpoint=$(get_fs_value "$fs" org.zol:mountpoint)
341		if [ "$mountpoint" = "legacy" -o \
342		    "$mountpoint" = "none" -o \
343		    "$mountpoint" = "-" ]
344		then
345			if [ "$fs" != "${ZFS_BOOTFS}" ]; then
346				# We don't have a proper mountpoint and this
347				# isn't the root fs.
348				return 0
349			else
350				# Last hail-mary: Hope 'rootmnt' is set!
351				mountpoint=""
352			fi
353		fi
354
355		if [ "$mountpoint" = "legacy" ]; then
356			ZFS_CMD="mount -t zfs"
357		else
358			# If it's not a legacy filesystem, it can only be a
359			# native one...
360			ZFS_CMD="mount -o zfsutil -t zfs"
361		fi
362	else
363		ZFS_CMD="mount -o zfsutil -t zfs"
364	fi
365
366	# Possibly decrypt a filesystem using native encryption.
367	decrypt_fs "$fs"
368
369	[ "$quiet" != "y" ] && \
370	    zfs_log_begin_msg "Mounting '${fs}' on '${rootmnt}/${mountpoint}'"
371	[ -n "${ZFS_DEBUG}" ] && \
372	    zfs_log_begin_msg "CMD: '$ZFS_CMD ${fs} ${rootmnt}/${mountpoint}'"
373
374	ZFS_STDERR=$(${ZFS_CMD} "${fs}" "${rootmnt}/${mountpoint}" 2>&1)
375	ZFS_ERROR=$?
376	if [ "${ZFS_ERROR}" != 0 ]
377	then
378		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
379
380		disable_plymouth
381		echo ""
382		echo "Command: ${ZFS_CMD} ${fs} ${rootmnt}/${mountpoint}"
383		echo "Message: $ZFS_STDERR"
384		echo "Error: $ZFS_ERROR"
385		echo ""
386		echo "Failed to mount ${fs} on ${rootmnt}/${mountpoint}."
387		echo "Manually mount the filesystem and exit."
388		shell
389	else
390		[ "$quiet" != "y" ] && zfs_log_end_msg
391	fi
392
393	return 0
394}
395
396# Unlock a ZFS native encrypted filesystem.
397decrypt_fs()
398{
399	local fs="$1"
400
401	# If pool encryption is active and the zfs command understands '-o encryption'
402	if [ "$(zpool list -H -o feature@encryption $(echo "${fs}" | awk -F\/ '{print $1}'))" = 'active' ]; then
403
404		# Determine dataset that holds key for root dataset
405		ENCRYPTIONROOT="$(get_fs_value "${fs}" encryptionroot)"
406		KEYLOCATION="$(get_fs_value "${ENCRYPTIONROOT}" keylocation)"
407
408		echo "${ENCRYPTIONROOT}" > /run/zfs_fs_name
409
410		# If root dataset is encrypted...
411		if ! [ "${ENCRYPTIONROOT}" = "-" ]; then
412			KEYSTATUS="$(get_fs_value "${ENCRYPTIONROOT}" keystatus)"
413			# Continue only if the key needs to be loaded
414			[ "$KEYSTATUS" = "unavailable" ] || return 0
415			TRY_COUNT=3
416
417			# If key is stored in a file, do not prompt
418			if ! [ "${KEYLOCATION}" = "prompt" ]; then
419				$ZFS load-key "${ENCRYPTIONROOT}"
420
421			# Prompt with plymouth, if active
422			elif [ -e /bin/plymouth ] && /bin/plymouth --ping 2>/dev/null; then
423				echo "plymouth" > /run/zfs_console_askpwd_cmd
424				while [ $TRY_COUNT -gt 0 ]; do
425					plymouth ask-for-password --prompt "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \
426						$ZFS load-key "${ENCRYPTIONROOT}" && break
427					TRY_COUNT=$((TRY_COUNT - 1))
428				done
429
430			# Prompt with systemd, if active
431			elif [ -e /run/systemd/system ]; then
432				echo "systemd-ask-password" > /run/zfs_console_askpwd_cmd
433				while [ $TRY_COUNT -gt 0 ]; do
434					systemd-ask-password "Encrypted ZFS password for ${ENCRYPTIONROOT}" --no-tty | \
435						$ZFS load-key "${ENCRYPTIONROOT}" && break
436					TRY_COUNT=$((TRY_COUNT - 1))
437				done
438
439			# Prompt with ZFS tty, otherwise
440			else
441				# Temporarily setting "printk" to "7" allows the prompt to appear even when the "quiet" kernel option has been used
442				echo "load-key" > /run/zfs_console_askpwd_cmd
443				storeprintk="$(awk '{print $1}' /proc/sys/kernel/printk)"
444				echo 7 > /proc/sys/kernel/printk
445				$ZFS load-key "${ENCRYPTIONROOT}"
446				echo "$storeprintk" > /proc/sys/kernel/printk
447			fi
448		fi
449	fi
450
451	return 0
452}
453
454# Destroy a given filesystem.
455destroy_fs()
456{
457	local fs="$1"
458
459	[ "$quiet" != "y" ] && \
460	    zfs_log_begin_msg "Destroying '$fs'"
461
462	ZFS_CMD="${ZFS} destroy $fs"
463	ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
464	ZFS_ERROR="$?"
465	if [ "${ZFS_ERROR}" != 0 ]
466	then
467		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
468
469		disable_plymouth
470		echo ""
471		echo "Command: $ZFS_CMD"
472		echo "Message: $ZFS_STDERR"
473		echo "Error: $ZFS_ERROR"
474		echo ""
475		echo "Failed to destroy '$fs'. Please make sure that '$fs' is not available."
476		echo "Hint: Try:  zfs destroy -Rfn $fs"
477		echo "If this dryrun looks good, then remove the 'n' from '-Rfn' and try again."
478		shell
479	else
480		[ "$quiet" != "y" ] && zfs_log_end_msg
481	fi
482
483	return 0
484}
485
486# Clone snapshot $1 to destination filesystem $2
487# Set 'canmount=noauto' and 'mountpoint=none' so that we get to keep
488# manual control over it's mounting (i.e., make sure it's not automatically
489# mounted with a 'zfs mount -a' in the init/systemd scripts).
490clone_snap()
491{
492	local snap="$1"
493	local destfs="$2"
494	local mountpoint="$3"
495
496	[ "$quiet" != "y" ] && zfs_log_begin_msg "Cloning '$snap' to '$destfs'"
497
498	# Clone the snapshot into a dataset we can boot from
499	# + We don't want this filesystem to be automatically mounted, we
500	#   want control over this here and nowhere else.
501	# + We don't need any mountpoint set for the same reason.
502	# We use the 'org.zol:mountpoint' property to remember the mountpoint.
503	ZFS_CMD="${ZFS} clone -o canmount=noauto -o mountpoint=none"
504	ZFS_CMD="${ZFS_CMD} -o org.zol:mountpoint=${mountpoint}"
505	ZFS_CMD="${ZFS_CMD} $snap $destfs"
506	ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
507	ZFS_ERROR="$?"
508	if [ "${ZFS_ERROR}" != 0 ]
509	then
510		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
511
512		disable_plymouth
513		echo ""
514		echo "Command: $ZFS_CMD"
515		echo "Message: $ZFS_STDERR"
516		echo "Error: $ZFS_ERROR"
517		echo ""
518		echo "Failed to clone snapshot."
519		echo "Make sure that the any problems are corrected and then make sure"
520		echo "that the dataset '$destfs' exists and is bootable."
521		shell
522	else
523		[ "$quiet" != "y" ] && zfs_log_end_msg
524	fi
525
526	return 0
527}
528
529# Rollback a given snapshot.
530rollback_snap()
531{
532	local snap="$1"
533
534	[ "$quiet" != "y" ] && zfs_log_begin_msg "Rollback $snap"
535
536	ZFS_CMD="${ZFS} rollback -Rf $snap"
537	ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
538	ZFS_ERROR="$?"
539	if [ "${ZFS_ERROR}" != 0 ]
540	then
541		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
542
543		disable_plymouth
544		echo ""
545		echo "Command: $ZFS_CMD"
546		echo "Message: $ZFS_STDERR"
547		echo "Error: $ZFS_ERROR"
548		echo ""
549		echo "Failed to rollback snapshot."
550		shell
551	else
552		[ "$quiet" != "y" ] && zfs_log_end_msg
553	fi
554
555	return 0
556}
557
558# Get a list of snapshots, give them as a numbered list
559# to the user to choose from.
560ask_user_snap()
561{
562	local fs="$1"
563	local i=1
564	local SNAP snapnr snap debug
565
566	# We need to temporarily disable debugging. Set 'debug' so we
567	# remember to enabled it again.
568	if [ -n "${ZFS_DEBUG}" ]; then
569		unset ZFS_DEBUG
570		set +x
571		debug=1
572	fi
573
574	# Because we need the resulting snapshot, which is sent on
575	# stdout to the caller, we use stderr for our questions.
576	echo "What snapshot do you want to boot from?" > /dev/stderr
577	while read snap; do
578	    echo "  $i: ${snap}" > /dev/stderr
579	    eval `echo SNAP_$i=$snap`
580	    i=$((i + 1))
581	done <<EOT
582$("${ZFS}" list -H -oname -tsnapshot -r "${fs}")
583EOT
584
585	echo -n "  Snap nr [1-$((i-1))]? " > /dev/stderr
586	read snapnr
587
588	# Re-enable debugging.
589	if [ -n "${debug}" ]; then
590		ZFS_DEBUG=1
591		set -x
592	fi
593
594	echo "$(eval echo "$"SNAP_$snapnr)"
595}
596
597setup_snapshot_booting()
598{
599	local snap="$1"
600	local s destfs subfs mountpoint retval=0 filesystems fs
601
602	# Make sure that the snapshot specified actually exist.
603	if [ ! $(get_fs_value "${snap}" type) ]
604	then
605		# Snapshot does not exist (...@<null> ?)
606		# ask the user for a snapshot to use.
607		snap="$(ask_user_snap "${snap%%@*}")"
608	fi
609
610	# Separate the full snapshot ('$snap') into it's filesystem and
611	# snapshot names. Would have been nice with a split() function..
612	rootfs="${snap%%@*}"
613	snapname="${snap##*@}"
614	ZFS_BOOTFS="${rootfs}_${snapname}"
615
616	if ! grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
617	then
618		# If the destination dataset for the clone
619		# already exists, destroy it. Recursively
620		if [ $(get_fs_value "${rootfs}_${snapname}" type) ]; then
621			filesystems=$("${ZFS}" list -oname -tfilesystem -H \
622			    -r -Sname "${ZFS_BOOTFS}")
623			for fs in $filesystems; do
624				destroy_fs "${fs}"
625			done
626		fi
627	fi
628
629	# Get all snapshots, recursively (might need to clone /usr, /var etc
630	# as well).
631	for s in $("${ZFS}" list -H -oname -tsnapshot -r "${rootfs}" | \
632	    grep "${snapname}")
633	do
634		if grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
635		then
636			# Rollback snapshot
637			rollback_snap "$s" || retval=$((retval + 1))
638		else
639			# Setup a destination filesystem name.
640			# Ex: Called with 'rpool/ROOT/debian@snap2'
641			#       rpool/ROOT/debian@snap2		=> rpool/ROOT/debian_snap2
642			#       rpool/ROOT/debian/boot@snap2	=> rpool/ROOT/debian_snap2/boot
643			#       rpool/ROOT/debian/usr@snap2	=> rpool/ROOT/debian_snap2/usr
644			#       rpool/ROOT/debian/var@snap2	=> rpool/ROOT/debian_snap2/var
645			subfs="${s##$rootfs}"
646			subfs="${subfs%%@$snapname}"
647
648			destfs="${rootfs}_${snapname}" # base fs.
649			[ -n "$subfs" ] && destfs="${destfs}$subfs" # + sub fs.
650
651			# Get the mountpoint of the filesystem, to be used
652			# with clone_snap(). If legacy or none, then use
653			# the sub fs value.
654			mountpoint=$(get_fs_value "${s%%@*}" mountpoint)
655			if [ "$mountpoint" = "legacy" -o \
656			    "$mountpoint" = "none" ]
657			then
658				if [ -n "${subfs}" ]; then
659					mountpoint="${subfs}"
660				else
661					mountpoint="/"
662				fi
663			fi
664
665			# Clone the snapshot into its own
666			# filesystem
667			clone_snap "$s" "${destfs}" "${mountpoint}" || \
668			    retval=$((retval + 1))
669		fi
670	done
671
672	# If we haven't return yet, we have a problem...
673	return "${retval}"
674}
675
676# ================================================================
677
678# This is the main function.
679mountroot()
680{
681	local snaporig snapsub destfs pool POOLS
682
683	# ----------------------------------------------------------------
684	# I N I T I A L   S E T U P
685
686	# ------------
687	# Run the pre-mount scripts from /scripts/local-top.
688	pre_mountroot
689
690	# ------------
691	# Source the default setup variables.
692	[ -r '/etc/default/zfs' ] && . /etc/default/zfs
693
694	# ------------
695	# Support debug option
696	if grep -qiE '(^|[^\\](\\\\)* )(zfs_debug|zfs\.debug|zfsdebug)=(on|yes|1)( |$)' /proc/cmdline
697	then
698		ZFS_DEBUG=1
699		mkdir /var/log
700		#exec 2> /var/log/boot.debug
701		set -x
702	fi
703
704	# ------------
705	# Load ZFS module etc.
706	if ! load_module_initrd; then
707		disable_plymouth
708		echo ""
709		echo "Failed to load ZFS modules."
710		echo "Manually load the modules and exit."
711		shell
712	fi
713
714	# ------------
715	# Look for the cache file (if any).
716	[ ! -f ${ZPOOL_CACHE} ] && unset ZPOOL_CACHE
717
718	# ------------
719	# Compatibility: 'ROOT' is for Debian GNU/Linux (etc),
720	#		 'root' is for Redhat/Fedora (etc),
721	#		 'REAL_ROOT' is for Gentoo
722	if [ -z "$ROOT" ]
723	then
724		[ -n "$root" ] && ROOT=${root}
725
726		[ -n "$REAL_ROOT" ] && ROOT=${REAL_ROOT}
727	fi
728
729	# ------------
730	# Where to mount the root fs in the initrd - set outside this script
731	# Compatibility: 'rootmnt' is for Debian GNU/Linux (etc),
732	#		 'NEWROOT' is for RedHat/Fedora (etc),
733	#		 'NEW_ROOT' is for Gentoo
734	if [ -z "$rootmnt" ]
735	then
736		[ -n "$NEWROOT" ] && rootmnt=${NEWROOT}
737
738		[ -n "$NEW_ROOT" ] && rootmnt=${NEW_ROOT}
739	fi
740
741	# ------------
742	# No longer set in the defaults file, but it could have been set in
743	# get_pools() in some circumstances. If it's something, but not 'yes',
744	# it's no good to us.
745	[ -n "$USE_DISK_BY_ID" -a "$USE_DISK_BY_ID" != 'yes' ] && \
746	    unset USE_DISK_BY_ID
747
748	# ----------------------------------------------------------------
749	# P A R S E   C O M M A N D   L I N E   O P T I O N S
750
751	# This part is the really ugly part - there's so many options and permutations
752	# 'out there', and if we should make this the 'primary' source for ZFS initrd
753	# scripting, we need/should support them all.
754	#
755	# Supports the following kernel command line argument combinations
756	# (in this order - first match win):
757	#
758	#	rpool=<pool>			(tries to finds bootfs automatically)
759	#	bootfs=<pool>/<dataset>		(uses this for rpool - first part)
760	#	rpool=<pool> bootfs=<pool>/<dataset>
761	#	-B zfs-bootfs=<pool>/<fs>	(uses this for rpool - first part)
762	#	rpool=rpool			(default if none of the above is used)
763	#	root=<pool>/<dataset>		(uses this for rpool - first part)
764	#	root=ZFS=<pool>/<dataset>	(uses this for rpool - first part, without 'ZFS=')
765	#	root=zfs:AUTO			(tries to detect both pool and rootfs
766	#	root=zfs:<pool>/<dataset>	(uses this for rpool - first part, without 'zfs:')
767	#
768	# Option <dataset> could also be <snapshot>
769	# Option <pool> could also be <guid>
770
771	# ------------
772	# Support force option
773	# In addition, setting one of zfs_force, zfs.force or zfsforce to
774	# 'yes', 'on' or '1' will make sure we force import the pool.
775	# This should (almost) never be needed, but it's here for
776	# completeness.
777	ZPOOL_FORCE=""
778	if grep -qiE '(^|[^\\](\\\\)* )(zfs_force|zfs\.force|zfsforce)=(on|yes|1)( |$)' /proc/cmdline
779	then
780		ZPOOL_FORCE="-f"
781	fi
782
783	# ------------
784	# Look for 'rpool' and 'bootfs' parameter
785	[ -n "$rpool" ] && ZFS_RPOOL="${rpool#rpool=}"
786	[ -n "$bootfs" ] && ZFS_BOOTFS="${bootfs#bootfs=}"
787
788	# ------------
789	# If we have 'ROOT' (see above), but not 'ZFS_BOOTFS', then use
790	# 'ROOT'
791	[ -n "$ROOT" -a -z "${ZFS_BOOTFS}" ] && ZFS_BOOTFS="$ROOT"
792
793	# ------------
794	# Check for the `-B zfs-bootfs=%s/%u,...` kind of parameter.
795	# NOTE: Only use the pool name and dataset. The rest is not
796	#       supported by ZoL (whatever it's for).
797	if [ -z "$ZFS_RPOOL" ]
798	then
799		# The ${zfs-bootfs} variable is set at the kernel command
800		# line, usually by GRUB, but it cannot be referenced here
801		# directly because bourne variable names cannot contain a
802		# hyphen.
803		#
804		# Reassign the variable by dumping the environment and
805		# stripping the zfs-bootfs= prefix.  Let the shell handle
806		# quoting through the eval command.
807		eval ZFS_RPOOL=$(set | sed -n -e 's,^zfs-bootfs=,,p')
808	fi
809
810	# ------------
811	# No root fs or pool specified - do auto detect.
812	if [ -z "$ZFS_RPOOL" -a -z "${ZFS_BOOTFS}" ]
813	then
814		# Do auto detect. Do this by 'cheating' - set 'root=zfs:AUTO'
815		# which will be caught later
816		ROOT=zfs:AUTO
817	fi
818
819	# ----------------------------------------------------------------
820	# F I N D   A N D   I M P O R T   C O R R E C T   P O O L
821
822	# ------------
823	if [ "$ROOT" = "zfs:AUTO" ]
824	then
825		# Try to detect both pool and root fs.
826
827		[ "$quiet" != "y" ] && \
828		    zfs_log_begin_msg "Attempting to import additional pools."
829
830		# Get a list of pools available for import
831		if [ -n "$ZFS_RPOOL" ]
832		then
833			# We've specified a pool - check only that
834			POOLS=$ZFS_RPOOL
835		else
836			POOLS=$(get_pools)
837		fi
838
839		OLD_IFS="$IFS" ; IFS=";"
840		for pool in $POOLS
841		do
842			[ -z "$pool" ] && continue
843
844			import_pool "$pool"
845			find_rootfs "$pool"
846		done
847		IFS="$OLD_IFS"
848
849		[ "$quiet" != "y" ] && zfs_log_end_msg $ZFS_ERROR
850	else
851		# No auto - use value from the command line option.
852
853		# Strip 'zfs:' and 'ZFS='.
854		ZFS_BOOTFS="${ROOT#*[:=]}"
855
856		# Strip everything after the first slash.
857		ZFS_RPOOL="${ZFS_BOOTFS%%/*}"
858	fi
859
860	# Import the pool (if not already done so in the AUTO check above).
861	if [ -n "$ZFS_RPOOL" -a -z "${POOL_IMPORTED}" ]
862	then
863		[ "$quiet" != "y" ] && \
864		    zfs_log_begin_msg "Importing ZFS root pool '$ZFS_RPOOL'"
865
866		import_pool "${ZFS_RPOOL}"
867		find_rootfs "${ZFS_RPOOL}"
868
869		[ "$quiet" != "y" ] && zfs_log_end_msg
870	fi
871
872	if [ -z "${POOL_IMPORTED}" ]
873	then
874		# No pool imported, this is serious!
875		disable_plymouth
876		echo ""
877		echo "Command: $ZFS_CMD"
878		echo "Message: $ZFS_STDERR"
879		echo "Error: $ZFS_ERROR"
880		echo ""
881		echo "No pool imported. Manually import the root pool"
882		echo "at the command prompt and then exit."
883		echo "Hint: Try:  zpool import -R ${rootmnt} -N ${ZFS_RPOOL}"
884		shell
885	fi
886
887	# In case the pool was specified as guid, resolve guid to name
888	pool="$("${ZPOOL}" get name,guid -o name,value -H | \
889	    awk -v pool="${ZFS_RPOOL}" '$2 == pool { print $1 }')"
890	if [ -n "$pool" ]; then
891		# If $ZFS_BOOTFS contains guid, replace the guid portion with $pool
892		ZFS_BOOTFS=$(echo "$ZFS_BOOTFS" | \
893			sed -e "s/$("${ZPOOL}" get guid -o value "$pool" -H)/$pool/g")
894		ZFS_RPOOL="${pool}"
895	fi
896
897	# Set the no-op scheduler on the disks containing the vdevs of
898	# the root pool. For single-queue devices, this scheduler is
899	# "noop", for multi-queue devices, it is "none".
900	# ZFS already does this for wholedisk vdevs (for all pools), so this
901	# is only important for partitions.
902	"${ZPOOL}" status -L "${ZFS_RPOOL}" 2> /dev/null |
903	    awk '/^\t / && !/(mirror|raidz)/ {
904	        dev=$1;
905	        sub(/[0-9]+$/, "", dev);
906	        print dev
907	    }' |
908	while read -r i
909	do
910		SCHEDULER=/sys/block/$i/queue/scheduler
911		if [ -e "${SCHEDULER}" ]
912		then
913			# Query to see what schedulers are available
914			case "$(cat "${SCHEDULER}")" in
915				*noop*) echo noop > "${SCHEDULER}" ;;
916				*none*) echo none > "${SCHEDULER}" ;;
917			esac
918		fi
919	done
920
921
922	# ----------------------------------------------------------------
923	# P R E P A R E   R O O T   F I L E S Y S T E M
924
925	if [ -n "${ZFS_BOOTFS}" ]
926	then
927		# Booting from a snapshot?
928		# Will overwrite the ZFS_BOOTFS variable like so:
929		#   rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2
930		echo "${ZFS_BOOTFS}" | grep -q '@' && \
931		    setup_snapshot_booting "${ZFS_BOOTFS}"
932	fi
933
934	if [ -z "${ZFS_BOOTFS}" ]
935	then
936		# Still nothing! Let the user sort this out.
937		disable_plymouth
938		echo ""
939		echo "Error: Unknown root filesystem - no 'bootfs' pool property and"
940		echo "       not specified on the kernel command line."
941		echo ""
942		echo "Manually mount the root filesystem on $rootmnt and then exit."
943		echo "Hint: Try:  mount -o zfsutil -t zfs ${ZFS_RPOOL-rpool}/ROOT/system $rootmnt"
944		shell
945	fi
946
947	# ----------------------------------------------------------------
948	# M O U N T   F I L E S Y S T E M S
949
950	# * Ideally, the root filesystem would be mounted like this:
951	#
952	#     zpool import -R "$rootmnt" -N "$ZFS_RPOOL"
953	#     zfs mount -o mountpoint=/ "${ZFS_BOOTFS}"
954	#
955	#   but the MOUNTPOINT prefix is preserved on descendent filesystem
956	#   after the pivot into the regular root, which later breaks things
957	#   like `zfs mount -a` and the /proc/self/mounts refresh.
958	#
959	# * Mount additional filesystems required
960	#   Such as /usr, /var, /usr/local etc.
961	#   NOTE: Mounted in the order specified in the
962	#         ZFS_INITRD_ADDITIONAL_DATASETS variable so take care!
963
964	# Go through the complete list (recursively) of all filesystems below
965	# the real root dataset
966	filesystems=$("${ZFS}" list -oname -tfilesystem -H -r "${ZFS_BOOTFS}")
967	for fs in $filesystems $ZFS_INITRD_ADDITIONAL_DATASETS
968	do
969		mount_fs "$fs"
970	done
971
972	touch /run/zfs_unlock_complete
973	if [ -e /run/zfs_unlock_complete_notify ]; then
974		read zfs_unlock_complete_notify < /run/zfs_unlock_complete_notify
975	fi
976
977	# ------------
978	# Debugging information
979	if [ -n "${ZFS_DEBUG}" ]
980	then
981		#exec 2>&1-
982
983		echo "DEBUG: imported pools:"
984		"${ZPOOL}" list -H
985		echo
986
987		echo "DEBUG: mounted ZFS filesystems:"
988		mount | grep zfs
989		echo
990
991		echo "=> waiting for ENTER before continuing because of 'zfsdebug=1'. "
992		echo -n "   'c' for shell, 'r' for reboot, 'ENTER' to continue. "
993		read b
994
995		[ "$b" = "c" ] && /bin/sh
996		[ "$b" = "r" ] && reboot -f
997
998		set +x
999	fi
1000
1001	# ------------
1002	# Run local bottom script
1003	if type run_scripts > /dev/null 2>&1 && \
1004	    [ -f "/scripts/local-bottom" -o -d "/scripts/local-bottom" ]
1005	then
1006		[ "$quiet" != "y" ] && \
1007		    zfs_log_begin_msg "Running /scripts/local-bottom"
1008		run_scripts /scripts/local-bottom
1009		[ "$quiet" != "y" ] && zfs_log_end_msg
1010	fi
1011}
1012