xref: /freebsd/sys/contrib/openzfs/contrib/initramfs/scripts/zfs (revision 546d3d08e5993cbe2d6141b256e8c2ebad5aa102)
1# ZFS boot stub for initramfs-tools.
2#
3# In the initramfs environment, the /init script sources this stub to
4# override the default functions in the /scripts/local script.
5#
6# Enable this by passing boot=zfs on the kernel command line.
7#
8# $quiet, $root, $rpool, $bootfs come from the cmdline:
9# shellcheck disable=SC2154
10
11# Source the common functions
12. /etc/zfs/zfs-functions
13
14# Start interactive shell.
15# Use debian's panic() if defined, because it allows to prevent shell access
16# by setting panic in cmdline (e.g. panic=0 or panic=15).
17# See "4.5 Disable root prompt on the initramfs" of Securing Debian Manual:
18# https://www.debian.org/doc/manuals/securing-debian-howto/ch4.en.html
19shell() {
20	if command -v panic > /dev/null 2>&1
21	then
22		panic
23	else
24		/bin/sh
25	fi
26}
27
28get_cmdline()
29{
30	_option="${1}"
31
32	# shellcheck disable=SC2089
33	grep -qiE "(^|[^\\](\\\\)* )(${_option})=(on|yes|1)( |$)" /proc/cmdline
34}
35
36# This runs any scripts that should run before we start importing
37# pools and mounting any filesystems.
38pre_mountroot()
39{
40	if command -v run_scripts > /dev/null 2>&1
41	then
42		if [ -f "/scripts/local-top" ] || [ -d "/scripts/local-top" ]
43		then
44			[ "${quiet}" != "y" ] && \
45			    zfs_log_begin_msg "Running /scripts/local-top"
46			run_scripts /scripts/local-top
47			[ "${quiet}" != "y" ] && zfs_log_end_msg
48		fi
49
50	  if [ -f "/scripts/local-premount" ] || [ -d "/scripts/local-premount" ]
51	  then
52			[ "${quiet}" != "y" ] && \
53			    zfs_log_begin_msg "Running /scripts/local-premount"
54			run_scripts /scripts/local-premount
55			[ "${quiet}" != "y" ] && zfs_log_end_msg
56		fi
57	fi
58}
59
60# If plymouth is available, hide the splash image.
61disable_plymouth()
62{
63	if [ -x /bin/plymouth ] && /bin/plymouth --ping
64	then
65		/bin/plymouth hide-splash >/dev/null 2>&1
66	fi
67}
68
69# Get a ZFS filesystem property value.
70get_fs_value()
71{
72	_get_fs="${1}"
73	_get_value="${2}"
74
75	"${ZFS}" get -H -ovalue "${_get_value}" "${_get_fs}" 2> /dev/null
76}
77
78# Find the 'bootfs' property on pool $1.
79# If the property does not contain '/', then ignore this
80# pool by exporting it again.
81find_rootfs()
82{
83	_find_rootfs_pool="${1}"
84
85	# If 'POOL_IMPORTED' isn't set, no pool imported and therefore
86	# we won't be able to find a root fs.
87	[ -z "${POOL_IMPORTED}" ] && return 1
88
89	# If it's already specified, just keep it mounted and exit
90	# User (kernel command line) must be correct.
91	if [ -n "${ZFS_BOOTFS}" ] && [ "${ZFS_BOOTFS}" != "zfs:AUTO" ]
92	then
93		return 0
94	fi
95
96	# Not set, try to find it in the 'bootfs' property of the pool.
97	# NOTE: zpool does not support 'get -H -ovalue bootfs'...
98	ZFS_BOOTFS=$("${ZPOOL}" list -H -obootfs "${_find_rootfs_pool}")
99
100	# Make sure it's not '-' and that it starts with /.
101	if [ "${ZFS_BOOTFS}" != "-" ] && \
102		get_fs_value "${ZFS_BOOTFS}" mountpoint | grep -q '^/$'
103	then
104		# Keep it mounted
105		POOL_IMPORTED=1
106		return 0
107	fi
108
109	# Not boot fs here, export it and later try again..
110	"${ZPOOL}" export "${_find_rootfs_pool}"
111	POOL_IMPORTED=
112	ZFS_BOOTFS=
113	return 1
114}
115
116# Support function to get a list of all pools, separated with ';'
117find_pools()
118{
119	_find_pools=$("${@}" 2> /dev/null | \
120		sed -Ee '/ pool: | id: /!d' -e 's@.*: @@' | \
121		tr '\n' ';')
122
123	echo "${_find_pools%%;}" # Return without the last ';'.
124}
125
126# Get a list of all available pools
127get_pools()
128{
129	if [ -n "${ZFS_POOL_IMPORT}" ]
130	then
131		echo "${ZFS_POOL_IMPORT}"
132		return 0
133	fi
134
135	# Get the base list of available pools.
136	_available_pools="$(find_pools "${ZPOOL}" import)"
137
138	# Just in case - seen it happen (that a pool isn't visible/found
139	# with a simple "zpool import" but only when using the "-d"
140	# option or setting ZPOOL_IMPORT_PATH).
141	if [ -d "/dev/disk/by-id" ]
142	then
143		_npools="$(find_pools "${ZPOOL}" import -d /dev/disk/by-id)"
144		if [ -n "${_npools}" ]
145		then
146			# Because we have found extra pool(s) here, which wasn't
147			# found 'normally', we need to force USE_DISK_BY_ID to
148			# make sure we're able to actually import it/them later.
149			USE_DISK_BY_ID='yes'
150
151			if [ -n "${_available_pools}" ]
152			then
153				# Filter out duplicates (pools found with the simple
154				# "zpool import" but which is also found with the
155				# "zpool import -d ...").
156				_npools="$(echo "${_npools}" | sed "s,${_available_pools},,")"
157
158				# Add the list to the existing list of
159				# available pools
160				_available_pools="${_available_pools};${_npools}"
161			else
162				_available_pools="${_npools}"
163			fi
164		fi
165	fi
166
167	# Filter out any exceptions...
168	if [ -n "${ZFS_POOL_EXCEPTIONS}" ]
169	then
170		_found=""
171		_apools=""
172		OLD_IFS="${IFS}" ; IFS=";"
173
174		for pool in ${_available_pools}
175		do
176			for exception in ${ZFS_POOL_EXCEPTIONS}
177			do
178				[ "${pool}" = "${exception}" ] && continue 2
179				_found="${pool}"
180			done
181
182			if [ -n "${_found}" ]
183			then
184				if [ -n "${_apools}" ]
185				then
186					_apools="${_apools};${pool}"
187				else
188					_apools="${pool}"
189				fi
190			fi
191		done
192
193		IFS="${OLD_IFS}"
194		_available_pools="${_apools}"
195	fi
196
197	# Return list of available pools.
198	echo "${_available_pools}"
199}
200
201# Import given pool $1
202import_pool()
203{
204	_import_pool="${1}"
205
206	# Verify that the pool isn't already imported
207	# Make as sure as we can to not require '-f' to import.
208	"${ZPOOL}" get -H -o value name,guid 2>/dev/null | \
209		grep -Fxq "${_import_pool}" && return 0
210
211	# For backwards compatibility, make sure that ZPOOL_IMPORT_PATH is set
212	# to something we can use later with the real import(s). We want to
213	# make sure we find all by* dirs, BUT by-vdev should be first (if it
214	# exists).
215	if [ -n "${USE_DISK_BY_ID}" ] && [ -z "${ZPOOL_IMPORT_PATH}" ]
216	then
217		dirs="$(for dir in /dev/disk/by-*
218		do
219			# Ignore by-vdev here - we want it first!
220			echo "${dir}" | grep -q /by-vdev && continue
221			[ ! -d "${dir}" ] && continue
222
223			printf "%s" "${dir}:"
224		done | sed 's,:$,,g')"
225
226		if [ -d "/dev/disk/by-vdev" ]
227		then
228			# Add by-vdev at the beginning.
229			ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:"
230		fi
231
232		# ... and /dev at the very end, just for good measure.
233		ZPOOL_IMPORT_PATH="${ZPOOL_IMPORT_PATH}${dirs}:/dev"
234	fi
235
236	# Needs to be exported for "zpool" to catch it.
237	[ -n "${ZPOOL_IMPORT_PATH}" ] && export ZPOOL_IMPORT_PATH
238
239	[ "${quiet}" != "y" ] && zfs_log_begin_msg \
240		"Importing pool '${_import_pool}' using defaults"
241
242	ZFS_CMD="${ZPOOL} import -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
243	ZFS_STDERR="$(${ZFS_CMD} "${_import_pool}" 2>&1)"
244	ZFS_ERROR="${?}"
245	if [ "${ZFS_ERROR}" != 0 ]
246	then
247		[ "${quiet}" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
248
249		if [ -f "${ZPOOL_CACHE}" ]
250		then
251			[ "${quiet}" != "y" ] && zfs_log_begin_msg \
252				"Importing pool '${_import_pool}' using cachefile."
253
254			ZFS_CMD="${ZPOOL} import -c ${ZPOOL_CACHE} -N ${ZPOOL_FORCE}"
255			ZFS_CMD="${ZFS_CMD} ${ZPOOL_IMPORT_OPTS}"
256			ZFS_STDERR="$(${ZFS_CMD} "${_import_pool}" 2>&1)"
257			ZFS_ERROR="${?}"
258		fi
259
260		if [ "${ZFS_ERROR}" != 0 ]
261		then
262			[ "${quiet}" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
263
264			disable_plymouth
265			echo ""
266			echo "Command: ${ZFS_CMD} '${_import_pool}'"
267			echo "Message: ${ZFS_STDERR}"
268			echo "Error: ${ZFS_ERROR}"
269			echo ""
270			echo "Failed to import pool '${_import_pool}'."
271			echo "Manually import the pool and exit."
272			shell
273		fi
274	fi
275
276	[ "${quiet}" != "y" ] && zfs_log_end_msg
277
278	POOL_IMPORTED=1
279	return 0
280}
281
282# Load ZFS modules
283# Loading a module in a initrd require a slightly different approach,
284# with more logging etc.
285load_module_initrd()
286{
287	_retval=0
288
289	ZFS_INITRD_PRE_MOUNTROOT_SLEEP=${ROOTDELAY:-0}
290
291	if [ "${ZFS_INITRD_PRE_MOUNTROOT_SLEEP}" -gt 0 ]
292	then
293		[ "${quiet}" != "y" ] && \
294			zfs_log_begin_msg "Delaying for up to '${ZFS_INITRD_PRE_MOUNTROOT_SLEEP}' seconds."
295	fi
296
297	START="$(/bin/date -u +%s)"
298	END="$((START+ZFS_INITRD_PRE_MOUNTROOT_SLEEP))"
299	while true
300	do
301		# Wait for all of the /dev/{hd,sd}[a-z] device nodes to appear.
302		if command -v wait_for_udev > /dev/null 2>&1
303		then
304			wait_for_udev 10
305		elif command -v wait_for_dev > /dev/null 2>&1
306		then
307			wait_for_dev
308		fi
309
310		# zpool import refuse to import without a valid /proc/self/mounts
311		[ ! -f /proc/self/mounts ] && mount proc /proc
312
313		# Load the module
314		if load_module "zfs"
315		then
316			_retval=0
317			break
318		else
319			_retval=1
320		fi
321
322		[ "$(/bin/date -u +%s)" -gt "${END}" ] && break
323		sleep 1
324	done
325
326	if [ "${ZFS_INITRD_PRE_MOUNTROOT_SLEEP}" -gt 0 ]
327	then
328		[ "${quiet}" != "y" ] && zfs_log_end_msg
329	fi
330
331	[ "${_retval}" -ne 0 ] && return "${_retval}"
332
333	if [ "${ZFS_INITRD_POST_MODPROBE_SLEEP}" -gt 0 ] 2>/dev/null
334	then
335		if [ "${quiet}" != "y" ]
336		then
337			zfs_log_begin_msg "Sleeping for" \
338				"${ZFS_INITRD_POST_MODPROBE_SLEEP} seconds..."
339		fi
340		sleep "${ZFS_INITRD_POST_MODPROBE_SLEEP}"
341		[ "${quiet}" != "y" ] && zfs_log_end_msg
342	fi
343
344	return "${_retval}"
345}
346
347# Mount a given filesystem
348# Note: Filesystem must have either `canmount=on` or `canmount=noauto`.
349#       This script only deals with "important" file system, such as those
350#       that are relevant to the operation of the operating system.
351#       Therefor we need to mount even those that have `canmount=noauto`.
352#       However, if user have specifically specified `canmount=off`, then
353#       we will assume that user knows what they're doing and ignore it.
354mount_fs()
355{
356	_mount_fs="${1}"
357
358	# Check that the filesystem exists
359	"${ZFS}" list -oname -tfilesystem -H "${_mount_fs}" \
360			 > /dev/null 2>&1 ||  return 1
361
362	# Skip filesystems with canmount=off.  The root fs should not have
363	# canmount=off, but ignore it for backwards compatibility just in case.
364	if [ "${_mount_fs}" != "${ZFS_BOOTFS}" ]
365	then
366		_canmount="$(get_fs_value "${_mount_fs}" canmount)"
367		[ "${_canmount}" = "off" ] && return 0
368	fi
369
370	# Need the _original_ datasets mountpoint!
371	_mountpoint="$(get_fs_value "${_mount_fs}" mountpoint)"
372	ZFS_CMD="mount.zfs -o zfsutil"
373	if [ "${_mountpoint}" = "legacy" ] || [ "${_mountpoint}" = "none" ]
374	then
375		# Can't use the mountpoint property. Might be one of our
376		# clones. Check the 'org.zol:mountpoint' property set in
377		# clone_snap() if that's usable.
378		_mountpoint1="$(get_fs_value "${_mount_fs}" org.zol:mountpoint)"
379		if [ "${_mountpoint1}" = "legacy" ] ||
380		   [ "${_mountpoint1}" = "none" ] ||
381		   [ "${_mountpoint1}" = "-" ]
382		then
383			if [ "${_mount_fs}" != "${ZFS_BOOTFS}" ]
384			then
385				# We don't have a proper mountpoint and this
386				# isn't the root fs.
387				return 0
388			fi
389			if [ "${_mountpoint}" = "legacy" ]
390			then
391				ZFS_CMD="mount.zfs"
392			fi
393			# Last hail-mary: Hope 'rootmnt' is set!
394			_mountpoint=""
395		else
396			_mountpoint="${_mountpoint1}"
397		fi
398	fi
399
400	# Possibly decrypt a filesystem using native encryption.
401	decrypt_fs "${_mount_fs}"
402
403	[ "${quiet}" != "y" ] && \
404	    zfs_log_begin_msg \
405			"Mounting '${_mount_fs}' on '${rootmnt}/${_mountpoint}'"
406	[ -n "${ZFS_DEBUG}" ] && \
407	    zfs_log_begin_msg \
408			"CMD: '${ZFS_CMD} ${_mount_fs} ${rootmnt}/${_mountpoint}'"
409
410	ZFS_STDERR="$(${ZFS_CMD} "${_mount_fs}" "${rootmnt}/${_mountpoint}" 2>&1)"
411	ZFS_ERROR="${?}"
412	if [ "${ZFS_ERROR}" != 0 ]
413	then
414		[ "${quiet}" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
415
416		disable_plymouth
417		echo ""
418		echo "Command: ${ZFS_CMD} ${_mount_fs} ${rootmnt}/${_mountpoint}"
419		echo "Message: ${ZFS_STDERR}"
420		echo "Error: ${ZFS_ERROR}"
421		echo ""
422		echo "Failed to mount ${_mount_fs} on ${rootmnt}/${_mountpoint}."
423		echo "Manually mount the filesystem and exit."
424		shell
425	else
426		[ "${quiet}" != "y" ] && zfs_log_end_msg
427	fi
428
429	return 0
430}
431
432# Unlock a ZFS native encrypted filesystem.
433decrypt_fs()
434{
435	_decrypt_fs="${1}"
436
437	# If pool encryption is active and the zfs command understands
438	# '-o encryption'.
439	_enc="$("${ZPOOL}" list -H -o feature@encryption "${_decrypt_fs%%/*}")"
440	if [ "${_enc}" = 'active' ]
441	then
442		# Determine dataset that holds key for root dataset
443		ENCRYPTIONROOT="$(get_fs_value "${_decrypt_fs}" encryptionroot)"
444		KEYLOCATION="$(get_fs_value "${ENCRYPTIONROOT}" keylocation)"
445
446		echo "${ENCRYPTIONROOT}" > /run/zfs_fs_name
447
448		# If root dataset is encrypted...
449		if [ "${ENCRYPTIONROOT}" != "-" ]
450		then
451			KEYSTATUS="$(get_fs_value "${ENCRYPTIONROOT}" keystatus)"
452			# Continue only if the key needs to be loaded
453			[ "${KEYSTATUS}" = "unavailable" ] || return 0
454
455			# Try extensions first
456			for key in "/etc/zfs/initramfs-tools-load-key" \
457						   "/etc/zfs/initramfs-tools-load-key.d/"*
458			do
459				[ -r "${key}" ] || continue
460				(. "${key}") && {
461					# Successful return and actually-loaded key: we're done
462					KEYSTATUS="$(get_fs_value "${ENCRYPTIONROOT}" keystatus)"
463					[ "${KEYSTATUS}" = "unavailable" ] || return 0
464				}
465			done
466
467			# Do not prompt if key is stored noninteractively,
468			if [ "${KEYLOCATION}" != "prompt" ]
469			then
470				"${ZFS}" load-key "${ENCRYPTIONROOT}"
471
472			# Prompt with plymouth, if active
473			elif /bin/plymouth --ping 2>/dev/null
474			then
475				echo "plymouth" > /run/zfs_console_askpwd_cmd
476				for _ in 1 2 3
477				do
478					plymouth ask-for-password --prompt \
479							 "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \
480						"${ZFS}" load-key "${ENCRYPTIONROOT}" && break
481				done
482
483			# Prompt with systemd, if active
484			elif [ -e /run/systemd/system ]
485			then
486				echo "systemd-ask-password" > /run/zfs_console_askpwd_cmd
487				for _ in 1 2 3
488				do
489					systemd-ask-password \
490						--no-tty \
491						"Encrypted ZFS password for ${ENCRYPTIONROOT}" | \
492						"${ZFS}" load-key "${ENCRYPTIONROOT}" && break
493				done
494
495			# Prompt with ZFS tty, otherwise
496			else
497				# Temporarily setting "printk" to "7" allows the prompt to
498				# appear even when the "quiet" kernel option has been used.
499				echo "load-key" > /run/zfs_console_askpwd_cmd
500				read -r storeprintk _ < /proc/sys/kernel/printk
501				echo 7 > /proc/sys/kernel/printk
502				"${ZFS}" load-key "${ENCRYPTIONROOT}"
503				echo "${storeprintk}" > /proc/sys/kernel/printk
504			fi
505		fi
506	fi
507
508	return 0
509}
510
511# Destroy a given filesystem.
512destroy_fs()
513{
514	_destroy_fs="${1}"
515
516	[ "${quiet}" != "y" ] && \
517	    zfs_log_begin_msg "Destroying '${destroy_fs}'"
518
519	ZFS_CMD="${ZFS} destroy ${destroy_fs}"
520	ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
521	ZFS_ERROR="${?}"
522	if [ "${ZFS_ERROR}" != 0 ]
523	then
524		[ "${quiet}" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
525
526		disable_plymouth
527		echo ""
528		echo "Command: ${ZFS_CMD}"
529		echo "Message: ${ZFS_STDERR}"
530		echo "Error: ${ZFS_ERROR}"
531		echo ""
532		echo "Failed to destroy '${_destroy_fs}'."
533		echo "Please make sure that it is not available."
534		echo "Hint: Try:  zfs destroy -Rfn ${_destroy_fs}"
535		echo "If this dryrun looks good, then remove the 'n' from '-Rfn'" \
536			 "and try again."
537		shell
538	else
539		[ "${quiet}" != "y" ] && zfs_log_end_msg
540	fi
541
542	return 0
543}
544
545# Clone snapshot $1 to destination filesystem $2
546# Set 'canmount=noauto' and 'mountpoint=none' so that we get to keep
547# manual control over it's mounting (i.e., make sure it's not automatically
548# mounted with a 'zfs mount -a' in the init/systemd scripts).
549clone_snap()
550{
551	_clone_snap="${1}"
552	_clone_destfs="${2}"
553	_clone_mountpoint="${3}"
554
555	[ "${quiet}" != "y" ] && \
556		zfs_log_begin_msg "Cloning '${_clone_snap}' to '${_clone_destfs}'"
557
558	# Clone the snapshot into a dataset we can boot from
559	# + We don't want this filesystem to be automatically mounted, we
560	#   want control over this here and nowhere else.
561	# + We don't need any mountpoint set for the same reason.
562	# We use the 'org.zol:mountpoint' property to remember the mountpoint.
563	ZFS_CMD="${ZFS} clone -o canmount=noauto -o mountpoint=none"
564	ZFS_CMD="${ZFS_CMD} -o org.zol:mountpoint=${_clone_mountpoint}"
565	ZFS_CMD="${ZFS_CMD} ${_clone_snap} ${_clone_destfs}"
566	ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
567	ZFS_ERROR="${?}"
568	if [ "${ZFS_ERROR}" != 0 ]
569	then
570		[ "${quiet}" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
571
572		disable_plymouth
573		echo ""
574		echo "Command: ${ZFS_CMD}"
575		echo "Message: ${ZFS_STDERR}"
576		echo "Error: ${ZFS_ERROR}"
577		echo ""
578		echo "Failed to clone snapshot."
579		echo "Make sure that any problems are corrected and then make sure"
580		echo "that the dataset '${_clone_destfs}' exists and is bootable."
581		shell
582	else
583		[ "${quiet}" != "y" ] && zfs_log_end_msg
584	fi
585
586	return 0
587}
588
589# Rollback a given snapshot.
590rollback_snap()
591{
592	_rollback_snap="${1}"
593
594	[ "${quiet}" != "y" ] && zfs_log_begin_msg "Rollback ${_rollback_snap}"
595
596	ZFS_CMD="${ZFS} rollback -Rf ${_rollback_snap}"
597	ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
598	ZFS_ERROR="${?}"
599	if [ "${ZFS_ERROR}" != 0 ]
600	then
601		[ "${quiet}" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
602
603		disable_plymouth
604		echo ""
605		echo "Command: ${ZFS_CMD}"
606		echo "Message: ${ZFS_STDERR}"
607		echo "Error: ${ZFS_ERROR}"
608		echo ""
609		echo "Failed to rollback snapshot."
610		shell
611	else
612		[ "${quiet}" != "y" ] && zfs_log_end_msg
613	fi
614
615	return 0
616}
617
618# Get a list of snapshots, give them as a numbered list
619# to the user to choose from.
620ask_user_snap()
621{
622	_ask_snap="${1}"
623
624	# We need to temporarily disable debugging. Set 'debug' so we
625	# remember to enabled it again.
626	if [ -n "${ZFS_DEBUG}" ]
627	then
628		unset ZFS_DEBUG
629		set +x
630		_debug=1
631	fi
632
633	# Because we need the resulting snapshot, which is sent on
634	# stdout to the caller, we use stderr for our questions.
635	echo "What snapshot do you want to boot from?" > /dev/stderr
636	# shellcheck disable=SC2046
637	IFS="
638" set -- $("${ZFS}" list -H -oname -tsnapshot -r "${_ask_snap}")
639
640	i=1
641	for snap in "${@}"
642	do
643		echo "  ${i}: ${snap}"
644		i="$((i + 1))"
645	done > /dev/stderr
646
647	# expr instead of test here because [ a -lt 0 ] errors out,
648	# but expr falls back to lexicographical, which works out right
649	_snapnr=0
650	while expr "${_snapnr}" "<" 1 > /dev/null ||
651	    expr "${_snapnr}" ">" "${#}" > /dev/null
652	do
653		printf "%s" "Snap nr [1-${#}]? " > /dev/stderr
654		read -r _snapnr
655	done
656
657	# Re-enable debugging.
658	if [ -n "${_debug}" ]
659	then
660		ZFS_DEBUG=1
661		set -x
662	fi
663
664	eval echo '$'"${_snapnr}"
665}
666
667setup_snapshot_booting()
668{
669	_boot_snap="${1}"
670	_retval=0
671
672	# Make sure that the snapshot specified actually exists.
673	if [ -z "$(get_fs_value "${_boot_snap}" type)" ]
674	then
675		# Snapshot does not exist (...@<null> ?)
676		# ask the user for a snapshot to use.
677		snap="$(ask_user_snap "${_boot_snap%%@*}")"
678	fi
679
680	# Separate the full snapshot ('${snap}') into it's filesystem and
681	# snapshot names. Would have been nice with a split() function..
682	_rootfs="${_boot_snap%%@*}"
683	_snapname="${_boot_snap##*@}"
684	ZFS_BOOTFS="${_rootfs}_${_snapname}"
685
686	if ! get_cmdline 'rollback'
687	then
688		# If the destination dataset for the clone
689		# already exists, destroy it. Recursively
690		if [ -n "$(get_fs_value "${_rootfs}_${_snapname}" type)" ]
691		then
692			_filesystems="$("${ZFS}" list -oname -tfilesystem -H \
693			    -r -Sname "${ZFS_BOOTFS}")"
694			for fs in ${_filesystems}
695			do
696				destroy_fs "${_boot_snap}"
697			done
698		fi
699	fi
700
701	# Get all snapshots, recursively (might need to clone /usr, /var etc
702	# as well).
703	for s in $("${ZFS}" list -H -oname -tsnapshot -r "${_rootfs}" | \
704	    grep "${_snapname}")
705	do
706		if get_cmdline 'rollback'
707		then
708			# Rollback snapshot
709			rollback_snap "${s}" || _retval="$((_retval + 1))"
710			ZFS_BOOTFS="${_rootfs}"
711		else
712			# Setup a destination filesystem name.
713			# Ex: Called with 'rpool/ROOT/debian@snap2'
714			#       rpool/ROOT/debian@snap2      => rpool/ROOT/debian_snap2
715			#       rpool/ROOT/debian/boot@snap2 => rpool/ROOT/debian_snap2/boot
716			#       rpool/ROOT/debian/usr@snap2  => rpool/ROOT/debian_snap2/usr
717			#       rpool/ROOT/debian/var@snap2  => rpool/ROOT/debian_snap2/var
718			_subfs="${s##"${_rootfs}"}"
719			_subfs="${_subfs%%@"${_snapname}"}"
720
721			_destfs="${_rootfs}_${_snapname}" # base fs.
722			[ -n "${_subfs}" ] && _destfs="${_destfs}${_subfs}" # + sub fs.
723
724			# Get the mountpoint of the filesystem, to be used
725			# with clone_snap(). If legacy or none, then use
726			# the sub fs value.
727			_mountpoint="$(get_fs_value "${s%%@*}" mountpoint)"
728			if [ "${_mountpoint}" = "legacy" ] || \
729			   [ "${_mountpoint}" = "none" ]
730			then
731				if [ -n "${_subfs}" ]
732				then
733					_mountpoint="${_subfs}"
734				else
735					_mountpoint="/"
736				fi
737			fi
738
739			# Clone the snapshot into its own
740			# filesystem
741			clone_snap "${s}" "${_destfs}" "${_mountpoint}" || \
742			    _retval="$((_retval + 1))"
743		fi
744	done
745
746	# If we haven't return yet, we have a problem...
747	return "${_retval}"
748}
749
750# ================================================================
751
752# This is the main function.
753mountroot()
754{
755	# ----------------------------------------------------------------
756	# I N I T I A L   S E T U P
757
758	# ------------
759	# Run the pre-mount scripts from /scripts/local-top.
760	pre_mountroot
761
762	# ------------
763	# Support debug option
764	if get_cmdline 'zfs_debug|zfs\.debug|zfsdebug'
765	then
766		ZFS_DEBUG=1
767		mkdir /var/log
768		#exec 2> /var/log/boot.debug
769		set -x
770	fi
771
772	# ------------
773	# Load ZFS module etc.
774	if ! load_module_initrd
775	then
776		disable_plymouth
777		echo ""
778		echo "Failed to load ZFS modules."
779		echo "Manually load the modules and exit."
780		shell
781	fi
782
783	# ------------
784	# Look for the cache file (if any).
785	[ -f "${ZPOOL_CACHE}" ] || unset ZPOOL_CACHE
786	[ -s "${ZPOOL_CACHE}" ] || unset ZPOOL_CACHE
787
788	# ------------
789	# Compatibility: 'ROOT' is for Debian GNU/Linux (etc),
790	#		 'root' is for Redhat/Fedora (etc),
791	#		 'REAL_ROOT' is for Gentoo
792	if [ -z "${ROOT}" ]
793	then
794		[ -n "${root}" ] && ROOT="${root}"
795
796		[ -n "${REAL_ROOT}" ] && ROOT="${REAL_ROOT}"
797	fi
798
799	# ------------
800	# Where to mount the root fs in the initrd - set outside this script
801	# Compatibility: 'rootmnt' is for Debian GNU/Linux (etc),
802	#		 'NEWROOT' is for RedHat/Fedora (etc),
803	#		 'NEW_ROOT' is for Gentoo
804	if [ -z "${rootmnt}" ]
805	then
806		[ -n "${NEWROOT}" ] && rootmnt="${NEWROOT}"
807
808		[ -n "${NEW_ROOT}" ] && rootmnt="${NEW_ROOT}"
809	fi
810
811	# ------------
812	# No longer set in the defaults file, but it could have been set in
813	# get_pools() in some circumstances. If it's something, but not 'yes',
814	# it's no good to us.
815	[ -n "${USE_DISK_BY_ID}" ] && [ "${USE_DISK_BY_ID}" != 'yes' ] && \
816	    unset USE_DISK_BY_ID
817
818	# ----------------------------------------------------------------
819	# P A R S E   C O M M A N D   L I N E   O P T I O N S
820
821	# This part is the really ugly part - there's so many options and
822	# permutations 'out there', and if we should make this the 'primary'
823	# source for ZFS initrd scripting, we need/should support them all.
824	#
825	# Supports the following kernel command line argument combinations
826	# (in this order - first match win):
827	#
828	#   rpool=<pool>                (tries to finds bootfs automatically)
829	#   bootfs=<pool>/<dataset>		(uses this for rpool - first part)
830	#   rpool=<pool> bootfs=<pool>/<dataset>
831	#   -B zfs-bootfs=<pool>/<fs>   (uses this for rpool - first part)
832	#   rpool=rpool                 (default if none of the above is used)
833	#   root=<pool>/<dataset>       (uses this for rpool - first part)
834	#   root=ZFS=<pool>/<dataset>   (uses this for rpool - first part, without 'ZFS=')
835	#   root=zfs:AUTO               (tries to detect both pool and rootfs)
836	#   root=zfs:<pool>/<dataset>   (uses this for rpool - first part, without 'zfs:')
837	#
838	# Option <dataset> could also be <snapshot>
839	# Option <pool> could also be <guid>
840
841	# ------------
842	# Support force option
843	# In addition, setting one of zfs_force, zfs.force or zfsforce to
844	# 'yes', 'on' or '1' will make sure we force import the pool.
845	# This should (almost) never be needed, but it's here for
846	# completeness.
847	ZPOOL_FORCE=""
848	if get_cmdline 'zfs_force|zfs\.force|zfsforce'
849	then
850		ZPOOL_FORCE="-f"
851	fi
852
853	# ------------
854	# Look for 'rpool' and 'bootfs' parameter
855	[ -n "${rpool}" ] && ZFS_RPOOL="${rpool#rpool=}"
856	[ -n "${bootfs}" ] && ZFS_BOOTFS="${bootfs#bootfs=}"
857
858	# ------------
859	# If we have 'ROOT' (see above), but not 'ZFS_BOOTFS', then use
860	# 'ROOT'
861	[ -n "${ROOT}" ] && [ -z "${ZFS_BOOTFS}" ] && ZFS_BOOTFS="${ROOT}"
862
863	# ------------
864	# Check for the `-B zfs-bootfs=%s/%u,...` kind of parameter.
865	# NOTE: Only use the pool name and dataset. The rest is not
866	#       supported by OpenZFS (whatever it's for).
867	if [ -z "${ZFS_RPOOL}" ]
868	then
869		# The ${zfs-bootfs} variable is set at the kernel command
870		# line, usually by GRUB, but it cannot be referenced here
871		# directly because bourne variable names cannot contain a
872		# hyphen.
873		#
874		# Reassign the variable by dumping the environment and
875		# stripping the zfs-bootfs= prefix.  Let the shell handle
876		# quoting through the eval command:
877		# shellcheck disable=SC2046
878		eval ZFS_RPOOL="$(set | sed -n -e 's,^zfs-bootfs=,,p')"
879	fi
880
881	# ------------
882	# No root fs or pool specified - do auto detect.
883	if [ -z "${ZFS_RPOOL}" ] && [ -z "${ZFS_BOOTFS}" ]
884	then
885		# Do auto detect. Do this by 'cheating' - set 'root=zfs:AUTO'
886		# which will be caught later
887		ROOT='zfs:AUTO'
888	fi
889
890	# ----------------------------------------------------------------
891	# F I N D   A N D   I M P O R T   C O R R E C T   P O O L
892
893	# ------------
894	if [ "${ROOT}" = "zfs:AUTO" ]
895	then
896		# Try to detect both pool and root fs.
897
898		# If we got here, that means we don't have a hint so as to
899		# the root dataset, but with root=zfs:AUTO on cmdline,
900		# this says "zfs:AUTO" here and interferes with checks later
901		ZFS_BOOTFS=
902
903		[ "${quiet}" != "y" ] && \
904		    zfs_log_begin_msg "Attempting to import additional pools."
905
906		# Get a list of pools available for import
907		if [ -n "${ZFS_RPOOL}" ]
908		then
909			# We've specified a pool - check only that
910			POOLS="${ZFS_RPOOL}"
911		else
912			POOLS="$(get_pools)"
913		fi
914
915		OLD_IFS="${IFS}" ; IFS=";"
916		for pool in ${POOLS}
917		do
918			[ -z "${pool}" ] && continue
919
920			IFS="${OLD_IFS}" import_pool "${pool}"
921			IFS="${OLD_IFS}" find_rootfs "${pool}" && break
922		done
923		IFS="${OLD_IFS}"
924
925		[ "${quiet}" != "y" ] && zfs_log_end_msg "${ZFS_ERROR}"
926	else
927		# No auto - use value from the command line option.
928
929		# Strip 'zfs:' and 'ZFS='.
930		ZFS_BOOTFS="${ROOT#*[:=]}"
931
932		# Strip everything after the first slash.
933		ZFS_RPOOL="${ZFS_BOOTFS%%/*}"
934	fi
935
936	# Import the pool (if not already done so in the AUTO check above).
937	if [ -n "${ZFS_RPOOL}" ] && [ -z "${POOL_IMPORTED}" ]
938	then
939		[ "${quiet}" != "y" ] && \
940		    zfs_log_begin_msg "Importing ZFS root pool '${ZFS_RPOOL}'"
941
942		import_pool "${ZFS_RPOOL}"
943		find_rootfs "${ZFS_RPOOL}"
944
945		[ "${quiet}" != "y" ] && zfs_log_end_msg
946	fi
947
948	if [ -z "${POOL_IMPORTED}" ]
949	then
950		# No pool imported, this is serious!
951		disable_plymouth
952		echo ""
953		echo "Command: ${ZFS_CMD}"
954		echo "Message: ${ZFS_STDERR}"
955		echo "Error: ${ZFS_ERROR}"
956		echo ""
957		echo "No pool imported. Manually import the root pool"
958		echo "at the command prompt and then exit."
959		echo "Hint: Try:  zpool import -N ${ZFS_RPOOL}"
960		shell
961	fi
962
963	# In case the pool was specified as guid, resolve guid to name
964	pool="$("${ZPOOL}" get -H -o name,value name,guid | \
965	    awk -v pool="${ZFS_RPOOL}" '$2 == pool { print $1 }')"
966	if [ -n "${pool}" ]
967	then
968		# If ${ZFS_BOOTFS} contains guid, replace the guid portion with ${pool}.
969		ZFS_BOOTFS="$(echo "${ZFS_BOOTFS}" | \
970			sed -e "s/$("${ZPOOL}" get -H -o value guid "${pool}")/${pool}/g")"
971		ZFS_RPOOL="${pool}"
972	fi
973
974
975	# ----------------------------------------------------------------
976	# P R E P A R E   R O O T   F I L E S Y S T E M
977
978	if [ -n "${ZFS_BOOTFS}" ]
979	then
980		# Booting from a snapshot?
981		# Will overwrite the ZFS_BOOTFS variable like so:
982		#   rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2
983		echo "${ZFS_BOOTFS}" | grep -q '@' && \
984		    setup_snapshot_booting "${ZFS_BOOTFS}"
985	fi
986
987	if [ -z "${ZFS_BOOTFS}" ]
988	then
989		# Still nothing! Let the user sort this out.
990		disable_plymouth
991		echo ""
992		echo "Error: Unknown root filesystem - no 'bootfs' pool property and"
993		echo "       not specified on the kernel command line."
994		echo ""
995		echo "Manually mount the root filesystem on ${rootmnt} and then exit."
996		echo "Hint: Try:  mount -o zfsutil -t zfs " \
997			 "${ZFS_RPOOL:-rpool}/ROOT/system ${rootmnt}"
998		shell
999	fi
1000
1001	# ----------------------------------------------------------------
1002	# M O U N T   F I L E S Y S T E M S
1003
1004	# * Ideally, the root filesystem would be mounted like this:
1005	#
1006	#     zpool import -R "${rootmnt}" -N "${ZFS_RPOOL}"
1007	#     zfs mount -o mountpoint=/ "${ZFS_BOOTFS}"
1008	#
1009	#   but the MOUNTPOINT prefix is preserved on descendent filesystem
1010	#   after the pivot into the regular root, which later breaks things
1011	#   like `zfs mount -a` and the /proc/self/mounts refresh.
1012	#
1013	# * Mount additional filesystems required
1014	#   Such as /usr, /var, /usr/local etc.
1015	#   NOTE: Mounted in the order specified in the
1016	#         ZFS_INITRD_ADDITIONAL_DATASETS variable so take care!
1017
1018	# Go through the complete list (recursively) of all filesystems below
1019	# the real root dataset
1020	# As in, nested filesystems:
1021	#    pool/root
1022	#    pool/root/usr
1023	#    pool/root/var
1024	#    [etc]
1025	filesystems="$("${ZFS}" list -oname -tfilesystem -H -r "${ZFS_BOOTFS}")"
1026	OLD_IFS="${IFS}" ; IFS="
1027"
1028	for fs in ${filesystems}
1029	do
1030		IFS="${OLD_IFS}" mount_fs "${fs}"
1031	done
1032	IFS="${OLD_IFS}"
1033
1034	# Mount any extra filesystems specified in `/etc/default/zfs`.
1035	# This should only be datasets *not* located nested below the root
1036	# filesystem, *and* that are part of the operating system.
1037	# I.e., /home isn't part of the OS, it will be mounted automatically
1038	# later in the boot process.
1039	for fs in ${ZFS_INITRD_ADDITIONAL_DATASETS}
1040	do
1041		mount_fs "${fs}"
1042	done
1043
1044	touch /run/zfs_unlock_complete
1045	if [ -e /run/zfs_unlock_complete_notify ]
1046	then
1047		# shellcheck disable=SC2034
1048		read -r zfs_unlock_complete_notify < /run/zfs_unlock_complete_notify
1049	fi
1050
1051	# ------------
1052	# Debugging information
1053	if [ -n "${ZFS_DEBUG}" ]
1054	then
1055		#exec 2>&1-
1056
1057		echo "DEBUG: imported pools:"
1058		"${ZPOOL}" list -H
1059		echo
1060
1061		echo "DEBUG: mounted ZFS filesystems:"
1062		mount | grep zfs
1063		echo
1064
1065		echo "=> waiting for ENTER before continuing because of 'zfsdebug=1'. "
1066		printf "%s" "   'c' for shell, 'r' for reboot, 'ENTER' to continue. "
1067		read -r b
1068
1069		[ "${b}" = "c" ] && /bin/sh
1070		[ "${b}" = "r" ] && reboot -f
1071
1072		set +x
1073	fi
1074
1075	# ------------
1076	# Run local bottom script
1077	if command -v run_scripts > /dev/null 2>&1
1078	then
1079		if [ -f "/scripts/local-bottom" ] || [ -d "/scripts/local-bottom" ]
1080		then
1081			[ "${quiet}" != "y" ] && \
1082			    zfs_log_begin_msg "Running /scripts/local-bottom"
1083			run_scripts /scripts/local-bottom
1084			[ "${quiet}" != "y" ] && zfs_log_end_msg
1085		fi
1086	fi
1087}
1088