xref: /freebsd/sys/contrib/openzfs/contrib/initramfs/scripts/zfs (revision a0409676120c1e558d0ade943019934e0f15118d)
1# ZFS boot stub for initramfs-tools.
2#
3# In the initramfs environment, the /init script sources this stub to
4# override the default functions in the /scripts/local script.
5#
6# Enable this by passing boot=zfs on the kernel command line.
7#
8
9# Source the common functions
10. /etc/zfs/zfs-functions
11
12# Start interactive shell.
13# Use debian's panic() if defined, because it allows to prevent shell access
14# by setting panic in cmdline (e.g. panic=0 or panic=15).
15# See "4.5 Disable root prompt on the initramfs" of Securing Debian Manual:
16# https://www.debian.org/doc/manuals/securing-debian-howto/ch4.en.html
17shell() {
18	if command -v panic > /dev/null 2>&1; then
19		panic
20	else
21		/bin/sh
22	fi
23}
24
25# This runs any scripts that should run before we start importing
26# pools and mounting any filesystems.
27pre_mountroot()
28{
29	if command -v run_scripts > /dev/null 2>&1
30	then
31		if [ -f "/scripts/local-top" ] || [ -d "/scripts/local-top" ]
32		then
33			[ "$quiet" != "y" ] && \
34			    zfs_log_begin_msg "Running /scripts/local-top"
35			run_scripts /scripts/local-top
36			[ "$quiet" != "y" ] && zfs_log_end_msg
37		fi
38
39	  if [ -f "/scripts/local-premount" ] || [ -d "/scripts/local-premount" ]
40	  then
41			[ "$quiet" != "y" ] && \
42			    zfs_log_begin_msg "Running /scripts/local-premount"
43			run_scripts /scripts/local-premount
44			[ "$quiet" != "y" ] && zfs_log_end_msg
45		fi
46	fi
47}
48
49# If plymouth is available, hide the splash image.
50disable_plymouth()
51{
52	if [ -x /bin/plymouth ] && /bin/plymouth --ping
53	then
54		/bin/plymouth hide-splash >/dev/null 2>&1
55	fi
56}
57
58# Get a ZFS filesystem property value.
59get_fs_value()
60{
61	fs="$1"
62	value=$2
63
64	"${ZFS}" get -H -ovalue "$value" "$fs" 2> /dev/null
65}
66
67# Find the 'bootfs' property on pool $1.
68# If the property does not contain '/', then ignore this
69# pool by exporting it again.
70find_rootfs()
71{
72	pool="$1"
73
74	# If 'POOL_IMPORTED' isn't set, no pool imported and therefore
75	# we won't be able to find a root fs.
76	[ -z "${POOL_IMPORTED}" ] && return 1
77
78	# If it's already specified, just keep it mounted and exit
79	# User (kernel command line) must be correct.
80	[ -n "${ZFS_BOOTFS}" ] && return 0
81
82	# Not set, try to find it in the 'bootfs' property of the pool.
83	# NOTE: zpool does not support 'get -H -ovalue bootfs'...
84	ZFS_BOOTFS=$("${ZPOOL}" list -H -obootfs "$pool")
85
86	# Make sure it's not '-' and that it starts with /.
87	if [ "${ZFS_BOOTFS}" != "-" ] && \
88		get_fs_value "${ZFS_BOOTFS}" mountpoint | grep -q '^/$'
89	then
90		# Keep it mounted
91		POOL_IMPORTED=1
92		return 0
93	fi
94
95	# Not boot fs here, export it and later try again..
96	"${ZPOOL}" export "$pool"
97	POOL_IMPORTED=""
98
99	return 1
100}
101
102# Support function to get a list of all pools, separated with ';'
103find_pools()
104{
105	CMD="$*"
106
107	pools=$($CMD 2> /dev/null | \
108		grep -E "pool:|^[a-zA-Z0-9]" | \
109		sed 's@.*: @@' | \
110		while read -r pool; do \
111		    printf "%s" "$pool;"
112		done)
113
114	echo "${pools%%;}" # Return without the last ';'.
115}
116
117# Get a list of all available pools
118get_pools()
119{
120	if [ -n "${ZFS_POOL_IMPORT}" ]; then
121		echo "$ZFS_POOL_IMPORT"
122		return 0
123	fi
124
125	# Get the base list of available pools.
126	available_pools=$(find_pools "$ZPOOL" import)
127
128	# Just in case - seen it happen (that a pool isn't visible/found
129	# with a simple "zpool import" but only when using the "-d"
130	# option or setting ZPOOL_IMPORT_PATH).
131	if [ -d "/dev/disk/by-id" ]
132	then
133		npools=$(find_pools "$ZPOOL" import -d /dev/disk/by-id)
134		if [ -n "$npools" ]
135		then
136			# Because we have found extra pool(s) here, which wasn't
137			# found 'normally', we need to force USE_DISK_BY_ID to
138			# make sure we're able to actually import it/them later.
139			USE_DISK_BY_ID='yes'
140
141			if [ -n "$available_pools" ]
142			then
143				# Filter out duplicates (pools found with the simple
144				# "zpool import" but which is also found with the
145				# "zpool import -d ...").
146				npools=$(echo "$npools" | sed "s,$available_pools,,")
147
148				# Add the list to the existing list of
149				# available pools
150				available_pools="$available_pools;$npools"
151			else
152				available_pools="$npools"
153			fi
154		fi
155	fi
156
157	# Filter out any exceptions...
158	if [ -n "$ZFS_POOL_EXCEPTIONS" ]
159	then
160		found=""
161		apools=""
162		OLD_IFS="$IFS" ; IFS=";"
163
164		for pool in $available_pools
165		do
166			for exception in $ZFS_POOL_EXCEPTIONS
167			do
168				[ "$pool" = "$exception" ] && continue 2
169				found="$pool"
170			done
171
172			if [ -n "$found" ]
173			then
174				if [ -n "$apools" ]
175				then
176					apools="$apools;$pool"
177				else
178					apools="$pool"
179				fi
180			fi
181		done
182
183		IFS="$OLD_IFS"
184		available_pools="$apools"
185	fi
186
187	# Return list of available pools.
188	echo "$available_pools"
189}
190
191# Import given pool $1
192import_pool()
193{
194	pool="$1"
195
196	# Verify that the pool isn't already imported
197	# Make as sure as we can to not require '-f' to import.
198	"${ZPOOL}" get name,guid -o value -H 2>/dev/null | grep -Fxq "$pool" && return 0
199
200	# For backwards compatibility, make sure that ZPOOL_IMPORT_PATH is set
201	# to something we can use later with the real import(s). We want to
202	# make sure we find all by* dirs, BUT by-vdev should be first (if it
203	# exists).
204	if [ -n "$USE_DISK_BY_ID" ] && [ -z "$ZPOOL_IMPORT_PATH" ]
205	then
206		dirs="$(for dir in $(echo /dev/disk/by-*)
207		do
208			# Ignore by-vdev here - we want it first!
209			echo "$dir" | grep -q /by-vdev && continue
210			[ ! -d "$dir" ] && continue
211
212			printf "%s" "$dir:"
213		done | sed 's,:$,,g')"
214
215		if [ -d "/dev/disk/by-vdev" ]
216		then
217			# Add by-vdev at the beginning.
218			ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:"
219		fi
220
221		# ... and /dev at the very end, just for good measure.
222		ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH$dirs:/dev"
223	fi
224
225	# Needs to be exported for "zpool" to catch it.
226	[ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH
227
228
229	[ "$quiet" != "y" ] && zfs_log_begin_msg \
230		"Importing pool '${pool}' using defaults"
231
232	ZFS_CMD="${ZPOOL} import -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
233	ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
234	ZFS_ERROR="$?"
235	if [ "${ZFS_ERROR}" != 0 ]
236	then
237		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
238
239		if [ -f "${ZPOOL_CACHE}" ]
240		then
241			[ "$quiet" != "y" ] && zfs_log_begin_msg \
242				"Importing pool '${pool}' using cachefile."
243
244			ZFS_CMD="${ZPOOL} import -c ${ZPOOL_CACHE} -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
245			ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
246			ZFS_ERROR="$?"
247		fi
248
249		if [ "${ZFS_ERROR}" != 0 ]
250		then
251			[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
252
253			disable_plymouth
254			echo ""
255			echo "Command: ${ZFS_CMD} '$pool'"
256			echo "Message: $ZFS_STDERR"
257			echo "Error: $ZFS_ERROR"
258			echo ""
259			echo "Failed to import pool '$pool'."
260			echo "Manually import the pool and exit."
261			shell
262		fi
263	fi
264
265	[ "$quiet" != "y" ] && zfs_log_end_msg
266
267	POOL_IMPORTED=1
268	return 0
269}
270
271# Load ZFS modules
272# Loading a module in a initrd require a slightly different approach,
273# with more logging etc.
274load_module_initrd()
275{
276	if [ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" -gt 0 ] 2>/dev/null
277	then
278		if [ "$quiet" != "y" ]; then
279			zfs_log_begin_msg "Sleeping for" \
280				"$ZFS_INITRD_PRE_MOUNTROOT_SLEEP seconds..."
281		fi
282		sleep "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP"
283		[ "$quiet" != "y" ] && zfs_log_end_msg
284	fi
285
286	# Wait for all of the /dev/{hd,sd}[a-z] device nodes to appear.
287	if command -v wait_for_udev > /dev/null 2>&1 ; then
288		wait_for_udev 10
289	elif command -v wait_for_dev > /dev/null 2>&1 ; then
290		wait_for_dev
291	fi
292
293	# zpool import refuse to import without a valid /proc/self/mounts
294	[ ! -f /proc/self/mounts ] && mount proc /proc
295
296	# Load the module
297	load_module "zfs" || return 1
298
299	if [ "$ZFS_INITRD_POST_MODPROBE_SLEEP" -gt 0 ] 2>/dev/null
300	then
301		if [ "$quiet" != "y" ]; then
302			zfs_log_begin_msg "Sleeping for" \
303				"$ZFS_INITRD_POST_MODPROBE_SLEEP seconds..."
304		fi
305		sleep "$ZFS_INITRD_POST_MODPROBE_SLEEP"
306		[ "$quiet" != "y" ] && zfs_log_end_msg
307	fi
308
309	return 0
310}
311
312# Mount a given filesystem
313mount_fs()
314{
315	fs="$1"
316
317	# Check that the filesystem exists
318	"${ZFS}" list -oname -tfilesystem -H "${fs}" > /dev/null 2>&1 ||  return 1
319
320	# Skip filesystems with canmount=off.  The root fs should not have
321	# canmount=off, but ignore it for backwards compatibility just in case.
322	if [ "$fs" != "${ZFS_BOOTFS}" ]
323	then
324		canmount=$(get_fs_value "$fs" canmount)
325		[ "$canmount" = "off" ] && return 0
326	fi
327
328	# Need the _original_ datasets mountpoint!
329	mountpoint=$(get_fs_value "$fs" mountpoint)
330	if [ "$mountpoint" = "legacy" ] || [ "$mountpoint" = "none" ]; then
331		# Can't use the mountpoint property. Might be one of our
332		# clones. Check the 'org.zol:mountpoint' property set in
333		# clone_snap() if that's usable.
334		mountpoint=$(get_fs_value "$fs" org.zol:mountpoint)
335		if [ "$mountpoint" = "legacy" ] ||
336		   [ "$mountpoint" = "none" ] ||
337		   [ "$mountpoint" = "-" ]
338		then
339			if [ "$fs" != "${ZFS_BOOTFS}" ]; then
340				# We don't have a proper mountpoint and this
341				# isn't the root fs.
342				return 0
343			else
344				# Last hail-mary: Hope 'rootmnt' is set!
345				mountpoint=""
346			fi
347		fi
348
349		if [ "$mountpoint" = "legacy" ]; then
350			ZFS_CMD="mount -t zfs"
351		else
352			# If it's not a legacy filesystem, it can only be a
353			# native one...
354			ZFS_CMD="mount -o zfsutil -t zfs"
355		fi
356	else
357		ZFS_CMD="mount -o zfsutil -t zfs"
358	fi
359
360	# Possibly decrypt a filesystem using native encryption.
361	decrypt_fs "$fs"
362
363	[ "$quiet" != "y" ] && \
364	    zfs_log_begin_msg "Mounting '${fs}' on '${rootmnt}/${mountpoint}'"
365	[ -n "${ZFS_DEBUG}" ] && \
366	    zfs_log_begin_msg "CMD: '$ZFS_CMD ${fs} ${rootmnt}/${mountpoint}'"
367
368	ZFS_STDERR=$(${ZFS_CMD} "${fs}" "${rootmnt}/${mountpoint}" 2>&1)
369	ZFS_ERROR=$?
370	if [ "${ZFS_ERROR}" != 0 ]
371	then
372		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
373
374		disable_plymouth
375		echo ""
376		echo "Command: ${ZFS_CMD} ${fs} ${rootmnt}/${mountpoint}"
377		echo "Message: $ZFS_STDERR"
378		echo "Error: $ZFS_ERROR"
379		echo ""
380		echo "Failed to mount ${fs} on ${rootmnt}/${mountpoint}."
381		echo "Manually mount the filesystem and exit."
382		shell
383	else
384		[ "$quiet" != "y" ] && zfs_log_end_msg
385	fi
386
387	return 0
388}
389
390# Unlock a ZFS native encrypted filesystem.
391decrypt_fs()
392{
393	fs="$1"
394
395	# If pool encryption is active and the zfs command understands '-o encryption'
396	if [ "$(zpool list -H -o feature@encryption "$(echo "${fs}" | awk -F/ '{print $1}')")" = 'active' ]; then
397
398		# Determine dataset that holds key for root dataset
399		ENCRYPTIONROOT="$(get_fs_value "${fs}" encryptionroot)"
400		KEYLOCATION="$(get_fs_value "${ENCRYPTIONROOT}" keylocation)"
401
402		echo "${ENCRYPTIONROOT}" > /run/zfs_fs_name
403
404		# If root dataset is encrypted...
405		if ! [ "${ENCRYPTIONROOT}" = "-" ]; then
406			KEYSTATUS="$(get_fs_value "${ENCRYPTIONROOT}" keystatus)"
407			# Continue only if the key needs to be loaded
408			[ "$KEYSTATUS" = "unavailable" ] || return 0
409			TRY_COUNT=3
410
411			# If key is stored in a file, do not prompt
412			if ! [ "${KEYLOCATION}" = "prompt" ]; then
413				$ZFS load-key "${ENCRYPTIONROOT}"
414
415			# Prompt with plymouth, if active
416			elif [ -e /bin/plymouth ] && /bin/plymouth --ping 2>/dev/null; then
417				echo "plymouth" > /run/zfs_console_askpwd_cmd
418				while [ $TRY_COUNT -gt 0 ]; do
419					plymouth ask-for-password --prompt "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \
420						$ZFS load-key "${ENCRYPTIONROOT}" && break
421					TRY_COUNT=$((TRY_COUNT - 1))
422				done
423
424			# Prompt with systemd, if active
425			elif [ -e /run/systemd/system ]; then
426				echo "systemd-ask-password" > /run/zfs_console_askpwd_cmd
427				while [ $TRY_COUNT -gt 0 ]; do
428					systemd-ask-password "Encrypted ZFS password for ${ENCRYPTIONROOT}" --no-tty | \
429						$ZFS load-key "${ENCRYPTIONROOT}" && break
430					TRY_COUNT=$((TRY_COUNT - 1))
431				done
432
433			# Prompt with ZFS tty, otherwise
434			else
435				# Temporarily setting "printk" to "7" allows the prompt to appear even when the "quiet" kernel option has been used
436				echo "load-key" > /run/zfs_console_askpwd_cmd
437				storeprintk="$(awk '{print $1}' /proc/sys/kernel/printk)"
438				echo 7 > /proc/sys/kernel/printk
439				$ZFS load-key "${ENCRYPTIONROOT}"
440				echo "$storeprintk" > /proc/sys/kernel/printk
441			fi
442		fi
443	fi
444
445	return 0
446}
447
448# Destroy a given filesystem.
449destroy_fs()
450{
451	fs="$1"
452
453	[ "$quiet" != "y" ] && \
454	    zfs_log_begin_msg "Destroying '$fs'"
455
456	ZFS_CMD="${ZFS} destroy $fs"
457	ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
458	ZFS_ERROR="$?"
459	if [ "${ZFS_ERROR}" != 0 ]
460	then
461		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
462
463		disable_plymouth
464		echo ""
465		echo "Command: $ZFS_CMD"
466		echo "Message: $ZFS_STDERR"
467		echo "Error: $ZFS_ERROR"
468		echo ""
469		echo "Failed to destroy '$fs'. Please make sure that '$fs' is not available."
470		echo "Hint: Try:  zfs destroy -Rfn $fs"
471		echo "If this dryrun looks good, then remove the 'n' from '-Rfn' and try again."
472		shell
473	else
474		[ "$quiet" != "y" ] && zfs_log_end_msg
475	fi
476
477	return 0
478}
479
480# Clone snapshot $1 to destination filesystem $2
481# Set 'canmount=noauto' and 'mountpoint=none' so that we get to keep
482# manual control over it's mounting (i.e., make sure it's not automatically
483# mounted with a 'zfs mount -a' in the init/systemd scripts).
484clone_snap()
485{
486	snap="$1"
487	destfs="$2"
488	mountpoint="$3"
489
490	[ "$quiet" != "y" ] && zfs_log_begin_msg "Cloning '$snap' to '$destfs'"
491
492	# Clone the snapshot into a dataset we can boot from
493	# + We don't want this filesystem to be automatically mounted, we
494	#   want control over this here and nowhere else.
495	# + We don't need any mountpoint set for the same reason.
496	# We use the 'org.zol:mountpoint' property to remember the mountpoint.
497	ZFS_CMD="${ZFS} clone -o canmount=noauto -o mountpoint=none"
498	ZFS_CMD="${ZFS_CMD} -o org.zol:mountpoint=${mountpoint}"
499	ZFS_CMD="${ZFS_CMD} $snap $destfs"
500	ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
501	ZFS_ERROR="$?"
502	if [ "${ZFS_ERROR}" != 0 ]
503	then
504		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
505
506		disable_plymouth
507		echo ""
508		echo "Command: $ZFS_CMD"
509		echo "Message: $ZFS_STDERR"
510		echo "Error: $ZFS_ERROR"
511		echo ""
512		echo "Failed to clone snapshot."
513		echo "Make sure that the any problems are corrected and then make sure"
514		echo "that the dataset '$destfs' exists and is bootable."
515		shell
516	else
517		[ "$quiet" != "y" ] && zfs_log_end_msg
518	fi
519
520	return 0
521}
522
523# Rollback a given snapshot.
524rollback_snap()
525{
526	snap="$1"
527
528	[ "$quiet" != "y" ] && zfs_log_begin_msg "Rollback $snap"
529
530	ZFS_CMD="${ZFS} rollback -Rf $snap"
531	ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
532	ZFS_ERROR="$?"
533	if [ "${ZFS_ERROR}" != 0 ]
534	then
535		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
536
537		disable_plymouth
538		echo ""
539		echo "Command: $ZFS_CMD"
540		echo "Message: $ZFS_STDERR"
541		echo "Error: $ZFS_ERROR"
542		echo ""
543		echo "Failed to rollback snapshot."
544		shell
545	else
546		[ "$quiet" != "y" ] && zfs_log_end_msg
547	fi
548
549	return 0
550}
551
552# Get a list of snapshots, give them as a numbered list
553# to the user to choose from.
554ask_user_snap()
555{
556	fs="$1"
557	i=1
558
559	# We need to temporarily disable debugging. Set 'debug' so we
560	# remember to enabled it again.
561	if [ -n "${ZFS_DEBUG}" ]; then
562		unset ZFS_DEBUG
563		set +x
564		debug=1
565	fi
566
567	# Because we need the resulting snapshot, which is sent on
568	# stdout to the caller, we use stderr for our questions.
569	echo "What snapshot do you want to boot from?" > /dev/stderr
570	while read -r snap; do
571	    echo "  $i: ${snap}" > /dev/stderr
572	    eval "$(echo SNAP_$i=$snap)"
573	    i=$((i + 1))
574	done <<EOT
575$("${ZFS}" list -H -oname -tsnapshot -r "${fs}")
576EOT
577
578	echo "%s" "  Snap nr [1-$((i-1))]? " > /dev/stderr
579	read -r snapnr
580
581	# Re-enable debugging.
582	if [ -n "${debug}" ]; then
583		ZFS_DEBUG=1
584		set -x
585	fi
586
587	echo "$(eval echo '$SNAP_'$snapnr)"
588}
589
590setup_snapshot_booting()
591{
592	snap="$1"
593	retval=0
594
595	# Make sure that the snapshot specified actually exists.
596	if [ ! "$(get_fs_value "${snap}" type)" ]
597	then
598		# Snapshot does not exist (...@<null> ?)
599		# ask the user for a snapshot to use.
600		snap="$(ask_user_snap "${snap%%@*}")"
601	fi
602
603	# Separate the full snapshot ('$snap') into it's filesystem and
604	# snapshot names. Would have been nice with a split() function..
605	rootfs="${snap%%@*}"
606	snapname="${snap##*@}"
607	ZFS_BOOTFS="${rootfs}_${snapname}"
608
609	if ! grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
610	then
611		# If the destination dataset for the clone
612		# already exists, destroy it. Recursively
613		if [ "$(get_fs_value "${rootfs}_${snapname}" type)" ]; then
614			filesystems=$("${ZFS}" list -oname -tfilesystem -H \
615			    -r -Sname "${ZFS_BOOTFS}")
616			for fs in $filesystems; do
617				destroy_fs "${fs}"
618			done
619		fi
620	fi
621
622	# Get all snapshots, recursively (might need to clone /usr, /var etc
623	# as well).
624	for s in $("${ZFS}" list -H -oname -tsnapshot -r "${rootfs}" | \
625	    grep "${snapname}")
626	do
627		if grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
628		then
629			# Rollback snapshot
630			rollback_snap "$s" || retval=$((retval + 1))
631		else
632			# Setup a destination filesystem name.
633			# Ex: Called with 'rpool/ROOT/debian@snap2'
634			#       rpool/ROOT/debian@snap2		=> rpool/ROOT/debian_snap2
635			#       rpool/ROOT/debian/boot@snap2	=> rpool/ROOT/debian_snap2/boot
636			#       rpool/ROOT/debian/usr@snap2	=> rpool/ROOT/debian_snap2/usr
637			#       rpool/ROOT/debian/var@snap2	=> rpool/ROOT/debian_snap2/var
638			subfs="${s##$rootfs}"
639			subfs="${subfs%%@$snapname}"
640
641			destfs="${rootfs}_${snapname}" # base fs.
642			[ -n "$subfs" ] && destfs="${destfs}$subfs" # + sub fs.
643
644			# Get the mountpoint of the filesystem, to be used
645			# with clone_snap(). If legacy or none, then use
646			# the sub fs value.
647			mountpoint=$(get_fs_value "${s%%@*}" mountpoint)
648			if [ "$mountpoint" = "legacy" ] || \
649			   [ "$mountpoint" = "none" ]
650			then
651				if [ -n "${subfs}" ]; then
652					mountpoint="${subfs}"
653				else
654					mountpoint="/"
655				fi
656			fi
657
658			# Clone the snapshot into its own
659			# filesystem
660			clone_snap "$s" "${destfs}" "${mountpoint}" || \
661			    retval=$((retval + 1))
662		fi
663	done
664
665	# If we haven't return yet, we have a problem...
666	return "${retval}"
667}
668
669# ================================================================
670
671# This is the main function.
672mountroot()
673{
674	# ----------------------------------------------------------------
675	# I N I T I A L   S E T U P
676
677	# ------------
678	# Run the pre-mount scripts from /scripts/local-top.
679	pre_mountroot
680
681	# ------------
682	# Source the default setup variables.
683	[ -r '/etc/default/zfs' ] && . /etc/default/zfs
684
685	# ------------
686	# Support debug option
687	if grep -qiE '(^|[^\\](\\\\)* )(zfs_debug|zfs\.debug|zfsdebug)=(on|yes|1)( |$)' /proc/cmdline
688	then
689		ZFS_DEBUG=1
690		mkdir /var/log
691		#exec 2> /var/log/boot.debug
692		set -x
693	fi
694
695	# ------------
696	# Load ZFS module etc.
697	if ! load_module_initrd; then
698		disable_plymouth
699		echo ""
700		echo "Failed to load ZFS modules."
701		echo "Manually load the modules and exit."
702		shell
703	fi
704
705	# ------------
706	# Look for the cache file (if any).
707	[ ! -f ${ZPOOL_CACHE} ] && unset ZPOOL_CACHE
708
709	# ------------
710	# Compatibility: 'ROOT' is for Debian GNU/Linux (etc),
711	#		 'root' is for Redhat/Fedora (etc),
712	#		 'REAL_ROOT' is for Gentoo
713	if [ -z "$ROOT" ]
714	then
715		[ -n "$root" ] && ROOT=${root}
716
717		[ -n "$REAL_ROOT" ] && ROOT=${REAL_ROOT}
718	fi
719
720	# ------------
721	# Where to mount the root fs in the initrd - set outside this script
722	# Compatibility: 'rootmnt' is for Debian GNU/Linux (etc),
723	#		 'NEWROOT' is for RedHat/Fedora (etc),
724	#		 'NEW_ROOT' is for Gentoo
725	if [ -z "$rootmnt" ]
726	then
727		[ -n "$NEWROOT" ] && rootmnt=${NEWROOT}
728
729		[ -n "$NEW_ROOT" ] && rootmnt=${NEW_ROOT}
730	fi
731
732	# ------------
733	# No longer set in the defaults file, but it could have been set in
734	# get_pools() in some circumstances. If it's something, but not 'yes',
735	# it's no good to us.
736	[ -n "$USE_DISK_BY_ID" ] && [ "$USE_DISK_BY_ID" != 'yes' ] && \
737	    unset USE_DISK_BY_ID
738
739	# ----------------------------------------------------------------
740	# P A R S E   C O M M A N D   L I N E   O P T I O N S
741
742	# This part is the really ugly part - there's so many options and permutations
743	# 'out there', and if we should make this the 'primary' source for ZFS initrd
744	# scripting, we need/should support them all.
745	#
746	# Supports the following kernel command line argument combinations
747	# (in this order - first match win):
748	#
749	#	rpool=<pool>			(tries to finds bootfs automatically)
750	#	bootfs=<pool>/<dataset>		(uses this for rpool - first part)
751	#	rpool=<pool> bootfs=<pool>/<dataset>
752	#	-B zfs-bootfs=<pool>/<fs>	(uses this for rpool - first part)
753	#	rpool=rpool			(default if none of the above is used)
754	#	root=<pool>/<dataset>		(uses this for rpool - first part)
755	#	root=ZFS=<pool>/<dataset>	(uses this for rpool - first part, without 'ZFS=')
756	#	root=zfs:AUTO			(tries to detect both pool and rootfs
757	#	root=zfs:<pool>/<dataset>	(uses this for rpool - first part, without 'zfs:')
758	#
759	# Option <dataset> could also be <snapshot>
760	# Option <pool> could also be <guid>
761
762	# ------------
763	# Support force option
764	# In addition, setting one of zfs_force, zfs.force or zfsforce to
765	# 'yes', 'on' or '1' will make sure we force import the pool.
766	# This should (almost) never be needed, but it's here for
767	# completeness.
768	ZPOOL_FORCE=""
769	if grep -qiE '(^|[^\\](\\\\)* )(zfs_force|zfs\.force|zfsforce)=(on|yes|1)( |$)' /proc/cmdline
770	then
771		ZPOOL_FORCE="-f"
772	fi
773
774	# ------------
775	# Look for 'rpool' and 'bootfs' parameter
776	[ -n "$rpool" ] && ZFS_RPOOL="${rpool#rpool=}"
777	[ -n "$bootfs" ] && ZFS_BOOTFS="${bootfs#bootfs=}"
778
779	# ------------
780	# If we have 'ROOT' (see above), but not 'ZFS_BOOTFS', then use
781	# 'ROOT'
782	[ -n "$ROOT" ] && [ -z "${ZFS_BOOTFS}" ] && ZFS_BOOTFS="$ROOT"
783
784	# ------------
785	# Check for the `-B zfs-bootfs=%s/%u,...` kind of parameter.
786	# NOTE: Only use the pool name and dataset. The rest is not
787	#       supported by OpenZFS (whatever it's for).
788	if [ -z "$ZFS_RPOOL" ]
789	then
790		# The ${zfs-bootfs} variable is set at the kernel command
791		# line, usually by GRUB, but it cannot be referenced here
792		# directly because bourne variable names cannot contain a
793		# hyphen.
794		#
795		# Reassign the variable by dumping the environment and
796		# stripping the zfs-bootfs= prefix.  Let the shell handle
797		# quoting through the eval command.
798		eval ZFS_RPOOL=$(set | sed -n -e 's,^zfs-bootfs=,,p')
799	fi
800
801	# ------------
802	# No root fs or pool specified - do auto detect.
803	if [ -z "$ZFS_RPOOL" ] && [ -z "${ZFS_BOOTFS}" ]
804	then
805		# Do auto detect. Do this by 'cheating' - set 'root=zfs:AUTO'
806		# which will be caught later
807		ROOT='zfs:AUTO'
808	fi
809
810	# ----------------------------------------------------------------
811	# F I N D   A N D   I M P O R T   C O R R E C T   P O O L
812
813	# ------------
814	if [ "$ROOT" = "zfs:AUTO" ]
815	then
816		# Try to detect both pool and root fs.
817
818		[ "$quiet" != "y" ] && \
819		    zfs_log_begin_msg "Attempting to import additional pools."
820
821		# Get a list of pools available for import
822		if [ -n "$ZFS_RPOOL" ]
823		then
824			# We've specified a pool - check only that
825			POOLS=$ZFS_RPOOL
826		else
827			POOLS=$(get_pools)
828		fi
829
830		OLD_IFS="$IFS" ; IFS=";"
831		for pool in $POOLS
832		do
833			[ -z "$pool" ] && continue
834
835			import_pool "$pool"
836			find_rootfs "$pool"
837		done
838		IFS="$OLD_IFS"
839
840		[ "$quiet" != "y" ] && zfs_log_end_msg $ZFS_ERROR
841	else
842		# No auto - use value from the command line option.
843
844		# Strip 'zfs:' and 'ZFS='.
845		ZFS_BOOTFS="${ROOT#*[:=]}"
846
847		# Strip everything after the first slash.
848		ZFS_RPOOL="${ZFS_BOOTFS%%/*}"
849	fi
850
851	# Import the pool (if not already done so in the AUTO check above).
852	if [ -n "$ZFS_RPOOL" ] && [ -z "${POOL_IMPORTED}" ]
853	then
854		[ "$quiet" != "y" ] && \
855		    zfs_log_begin_msg "Importing ZFS root pool '$ZFS_RPOOL'"
856
857		import_pool "${ZFS_RPOOL}"
858		find_rootfs "${ZFS_RPOOL}"
859
860		[ "$quiet" != "y" ] && zfs_log_end_msg
861	fi
862
863	if [ -z "${POOL_IMPORTED}" ]
864	then
865		# No pool imported, this is serious!
866		disable_plymouth
867		echo ""
868		echo "Command: $ZFS_CMD"
869		echo "Message: $ZFS_STDERR"
870		echo "Error: $ZFS_ERROR"
871		echo ""
872		echo "No pool imported. Manually import the root pool"
873		echo "at the command prompt and then exit."
874		echo "Hint: Try:  zpool import -R ${rootmnt} -N ${ZFS_RPOOL}"
875		shell
876	fi
877
878	# In case the pool was specified as guid, resolve guid to name
879	pool="$("${ZPOOL}" get name,guid -o name,value -H | \
880	    awk -v pool="${ZFS_RPOOL}" '$2 == pool { print $1 }')"
881	if [ -n "$pool" ]; then
882		# If $ZFS_BOOTFS contains guid, replace the guid portion with $pool
883		ZFS_BOOTFS=$(echo "$ZFS_BOOTFS" | \
884			sed -e "s/$("${ZPOOL}" get guid -o value "$pool" -H)/$pool/g")
885		ZFS_RPOOL="${pool}"
886	fi
887
888	# Set the no-op scheduler on the disks containing the vdevs of
889	# the root pool. For single-queue devices, this scheduler is
890	# "noop", for multi-queue devices, it is "none".
891	# ZFS already does this for wholedisk vdevs (for all pools), so this
892	# is only important for partitions.
893	"${ZPOOL}" status -L "${ZFS_RPOOL}" 2> /dev/null |
894	    awk '/^\t / && !/(mirror|raidz)/ {
895	        dev=$1;
896	        sub(/[0-9]+$/, "", dev);
897	        print dev
898	    }' |
899	while read -r i
900	do
901		SCHEDULER=/sys/block/$i/queue/scheduler
902		if [ -e "${SCHEDULER}" ]
903		then
904			# Query to see what schedulers are available
905			case "$(cat "${SCHEDULER}")" in
906				*noop*) echo noop > "${SCHEDULER}" ;;
907				*none*) echo none > "${SCHEDULER}" ;;
908			esac
909		fi
910	done
911
912
913	# ----------------------------------------------------------------
914	# P R E P A R E   R O O T   F I L E S Y S T E M
915
916	if [ -n "${ZFS_BOOTFS}" ]
917	then
918		# Booting from a snapshot?
919		# Will overwrite the ZFS_BOOTFS variable like so:
920		#   rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2
921		echo "${ZFS_BOOTFS}" | grep -q '@' && \
922		    setup_snapshot_booting "${ZFS_BOOTFS}"
923	fi
924
925	if [ -z "${ZFS_BOOTFS}" ]
926	then
927		# Still nothing! Let the user sort this out.
928		disable_plymouth
929		echo ""
930		echo "Error: Unknown root filesystem - no 'bootfs' pool property and"
931		echo "       not specified on the kernel command line."
932		echo ""
933		echo "Manually mount the root filesystem on $rootmnt and then exit."
934		echo "Hint: Try:  mount -o zfsutil -t zfs ${ZFS_RPOOL-rpool}/ROOT/system $rootmnt"
935		shell
936	fi
937
938	# ----------------------------------------------------------------
939	# M O U N T   F I L E S Y S T E M S
940
941	# * Ideally, the root filesystem would be mounted like this:
942	#
943	#     zpool import -R "$rootmnt" -N "$ZFS_RPOOL"
944	#     zfs mount -o mountpoint=/ "${ZFS_BOOTFS}"
945	#
946	#   but the MOUNTPOINT prefix is preserved on descendent filesystem
947	#   after the pivot into the regular root, which later breaks things
948	#   like `zfs mount -a` and the /proc/self/mounts refresh.
949	#
950	# * Mount additional filesystems required
951	#   Such as /usr, /var, /usr/local etc.
952	#   NOTE: Mounted in the order specified in the
953	#         ZFS_INITRD_ADDITIONAL_DATASETS variable so take care!
954
955	# Go through the complete list (recursively) of all filesystems below
956	# the real root dataset
957	filesystems=$("${ZFS}" list -oname -tfilesystem -H -r "${ZFS_BOOTFS}")
958	for fs in $filesystems $ZFS_INITRD_ADDITIONAL_DATASETS
959	do
960		mount_fs "$fs"
961	done
962
963	touch /run/zfs_unlock_complete
964	if [ -e /run/zfs_unlock_complete_notify ]; then
965		read -r zfs_unlock_complete_notify < /run/zfs_unlock_complete_notify
966	fi
967
968	# ------------
969	# Debugging information
970	if [ -n "${ZFS_DEBUG}" ]
971	then
972		#exec 2>&1-
973
974		echo "DEBUG: imported pools:"
975		"${ZPOOL}" list -H
976		echo
977
978		echo "DEBUG: mounted ZFS filesystems:"
979		mount | grep zfs
980		echo
981
982		echo "=> waiting for ENTER before continuing because of 'zfsdebug=1'. "
983		printf "%s" "   'c' for shell, 'r' for reboot, 'ENTER' to continue. "
984		read -r b
985
986		[ "$b" = "c" ] && /bin/sh
987		[ "$b" = "r" ] && reboot -f
988
989		set +x
990	fi
991
992	# ------------
993	# Run local bottom script
994	if command -v run_scripts > /dev/null 2>&1
995	then
996		if [ -f "/scripts/local-bottom" ] || [ -d "/scripts/local-bottom" ]
997		then
998			[ "$quiet" != "y" ] && \
999			    zfs_log_begin_msg "Running /scripts/local-bottom"
1000			run_scripts /scripts/local-bottom
1001			[ "$quiet" != "y" ] && zfs_log_end_msg
1002		fi
1003	fi
1004}
1005