xref: /titanic_50/usr/src/cmd/fs.d/nfs/svc/nfs-server (revision e3ffd6e13f33aa6f350ad293275d238d628ffaa5)
1#!/sbin/sh
2#
3# CDDL HEADER START
4#
5# The contents of this file are subject to the terms of the
6# Common Development and Distribution License (the "License").
7# You may not use this file except in compliance with the License.
8#
9# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10# or http://www.opensolaris.org/os/licensing.
11# See the License for the specific language governing permissions
12# and limitations under the License.
13#
14# When distributing Covered Code, include this CDDL HEADER in each
15# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16# If applicable, add the following below this CDDL HEADER, with the
17# fields enclosed by brackets "[]" replaced with your own identifying
18# information: Portions Copyright [yyyy] [name of copyright owner]
19#
20# CDDL HEADER END
21#
22
23#
24# Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
25# Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
26# Copyright 2016 Hans Rosenfeld <rosenfeld@grumpf.hope-2000.org>
27#
28
29# Start/stop processes required for server NFS
30
31. /lib/svc/share/smf_include.sh
32. /lib/svc/share/ipf_include.sh
33zone=`smf_zonename`
34
35#
36# Handling a corner case here. If we were in offline state due to an
37# unsatisfied dependency, the ipf_method process wouldn't have generated
38# the ipfilter configuration. When we transition to online because the
39# dependency is satisfied, the start method will have to generate the
40# ipfilter configuration. To avoid all possible deadlock scenarios,
41# we restart ipfilter which will regenerate the ipfilter configuration
42# for the entire system.
43#
44# The ipf_method process signals that it didn't generate ipf rules by
45# removing the service's ipf file. Thus we only restart network/ipfilter
46# when the file is missing.
47#
48configure_ipfilter()
49{
50	ipfile=`fmri_to_file $SMF_FMRI $IPF_SUFFIX`
51	ip6file=`fmri_to_file $SMF_FMRI $IPF6_SUFFIX`
52	[ -f "$ipfile" -a -f "$ip6file" ] && return 0
53
54        #
55	# Nothing to do if:
56        # - ipfilter isn't online
57	# - global policy is 'custom'
58	# - service's policy is 'use_global'
59        #
60        service_check_state $IPF_FMRI $SMF_ONLINE || return 0
61        [ "`get_global_def_policy`" = "custom" ] && return 0
62	[ "`get_policy $SMF_FMRI`" = "use_global" ] && return 0
63
64	svcadm restart $IPF_FMRI
65}
66
67case "$1" in
68'start')
69	# The NFS server is not supported in a local zone
70	if smf_is_nonglobalzone; then
71		/usr/sbin/svcadm disable -t svc:/network/nfs/server
72		echo "The NFS server is not supported in a local zone"
73		sleep 5 &
74		exit $SMF_EXIT_OK
75	fi
76
77	# Share all file systems enabled for sharing. sharemgr understands
78	# regular shares and ZFS shares and will handle both. Technically,
79	# the shares would have been started long before getting here since
80	# nfsd has a dependency on them.
81
82	# restart stopped shares from the repository
83	/usr/sbin/sharemgr start -P nfs -a
84
85	# Options for nfsd are now set in SMF
86
87	/usr/lib/nfs/mountd
88	rc=$?
89	if [ $rc != 0 ]; then
90		/usr/sbin/svcadm mark -t maintenance svc:/network/nfs/server
91		echo "$0: mountd failed with $rc"
92		sleep 5 &
93		exit $SMF_EXIT_ERR_FATAL
94	fi
95
96	/usr/lib/nfs/nfsd
97	rc=$?
98	if [ $rc != 0 ]; then
99		/usr/sbin/svcadm mark -t maintenance svc:/network/nfs/server
100		echo "$0: nfsd failed with $rc"
101		sleep 5 &
102		exit $SMF_EXIT_ERR_FATAL
103	fi
104
105	configure_ipfilter
106	;;
107
108'refresh')
109	/usr/sbin/sharemgr start -P nfs -a
110	;;
111
112'stop')
113	/usr/bin/pkill -x -u 0,1 -z $zone '(nfsd|mountd)'
114
115	# Unshare all shared file systems using NFS
116
117	/usr/sbin/sharemgr stop -P nfs -a
118
119	# Kill any processes left in service contract
120	smf_kill_contract $2 TERM 1
121	[ $? -ne 0 ] && exit 1
122	;;
123
124'ipfilter')
125	#
126	# NFS related services are RPC. nfs/server has nfsd which has
127	# well-defined port number but mountd is an RPC daemon.
128	#
129	# Essentially, we generate rules for the following "services"
130	#  - nfs/server which has nfsd and mountd
131	#  - nfs/rquota
132	#
133	# The following services are enabled for both nfs client and
134	# server, if nfs/client is enabled we'll treat them as client
135	# services and simply allow incoming traffic.
136	#  - nfs/status
137	#  - nfs/nlockmgr
138	#  - nfs/cbd
139	#
140	NFS_FMRI="svc:/network/nfs/server:default"
141	NFSCLI_FMRI="svc:/network/nfs/client:default"
142	RQUOTA_FMRI="svc:/network/nfs/rquota:default"
143	FMRI=$2
144
145	file=`fmri_to_file $FMRI $IPF_SUFFIX`
146	file6=`fmri_to_file $FMRI $IPF6_SUFFIX`
147	echo "# $FMRI" >$file
148	echo "# $FMRI" >$file6
149	policy=`get_policy $NFS_FMRI`
150
151	#
152	# nfs/server configuration is processed in the start method.
153	#
154	if [ "$FMRI" = "$NFS_FMRI" ]; then
155		service_check_state $FMRI $SMF_ONLINE
156		if [ $? -ne 0 ]; then
157			rm  $file
158			exit $SMF_EXIT_OK
159		fi
160
161		nfs_name=`svcprop -p $FW_CONTEXT_PG/name $FMRI 2>/dev/null`
162		tport=`$SERVINFO -p -t -s $nfs_name 2>/dev/null`
163		if [ -n "$tport" ]; then
164			generate_rules $FMRI $policy "tcp" $tport $file
165		fi
166
167		tport6=`$SERVINFO -p -t6 -s $nfs_name 2>/dev/null`
168		if [ -n "$tport6" ]; then
169			generate_rules $FMRI $policy "tcp" $tport6 $file6 _6
170		fi
171
172		uport=`$SERVINFO -p -u -s $nfs_name 2>/dev/null`
173		if [ -n "$uport" ]; then
174			generate_rules $FMRI $policy "udp" $uport $file
175		fi
176
177		uport6=`$SERVINFO -p -u6 -s $nfs_name 2>/dev/null`
178		if [ -n "$uport6" ]; then
179			generate_rules $FMRI $policy "udp" $uport6 $file6 _6
180		fi
181
182		# mountd IPv6 ports are also reachable through IPv4, so include
183		# them when generating IPv4 rules.
184		tports=`$SERVINFO -R -p -t -s "mountd" 2>/dev/null`
185		tports6=`$SERVINFO -R -p -t6 -s "mountd" 2>/dev/null`
186		if [ -n "$tports" -o -n "$tports6" ]; then
187			tports=`unique_ports $tports $tports6`
188			for tport in $tports; do
189				generate_rules $FMRI $policy "tcp" \
190				    $tport $file
191			done
192		fi
193
194		if [ -n "$tports6" ]; then
195			for tport6 in $tports6; do
196				generate_rules $FMRI $policy "tcp" \
197				    $tport6 $file6 _6
198			done
199		fi
200
201		uports=`$SERVINFO -R -p -u -s "mountd" 2>/dev/null`
202		uports6=`$SERVINFO -R -p -u6 -s "mountd" 2>/dev/null`
203		if [ -n "$uports" -o -n "$uports6" ]; then
204			uports=`unique_ports $uports $uports6`
205			for uport in $uports; do
206				generate_rules $FMRI $policy "udp" \
207				    $uport $file
208			done
209		fi
210
211		if [ -n "$uports6" ]; then
212			for uport6 in $uports6; do
213				generate_rules $FMRI $policy "udp" \
214				    $uport6 $file6 _6
215			done
216		fi
217
218	elif [ "$FMRI" = "$RQUOTA_FMRI" ]; then
219		iana_name=`svcprop -p inetd/name $FMRI`
220
221		# rquota IPv6 ports are also reachable through IPv4, so include
222		# them when generating IPv4 rules.
223		tports=`$SERVINFO -R -p -t -s $iana_name 2>/dev/null`
224		tports6=`$SERVINFO -R -p -t6 -s $iana_name 2>/dev/null`
225		if [ -n "$tports" -o -n "$tports6" ]; then
226			tports=`unique_ports $tports $tports6`
227			for tport in $tports; do
228				generate_rules $NFS_FMRI $policy "tcp" \
229				    $tport $file
230			done
231		fi
232
233		if [ -n "$tports6" ]; then
234			for tport6 in $tports6; do
235				generate_rules $NFS_FMRI $policy "tcp" \
236				    $tport6 $file6 _6
237			done
238		fi
239
240		uports=`$SERVINFO -R -p -u -s $iana_name 2>/dev/null`
241		uports6=`$SERVINFO -R -p -u6 -s $iana_name 2>/dev/null`
242		if [ -n "$uports" -o -n "$uports6" ]; then
243			uports=`unique_ports $uports $uports6`
244			for uport in $uports; do
245				generate_rules $NFS_FMRI $policy "udp" \
246				    $uport $file
247			done
248		fi
249
250		if [ -n "$uports6" ]; then
251			for uport6 in $uports6; do
252				generate_rules $NFS_FMRI $policy "udp" \
253				    $uport6 $file6 _6
254			done
255		fi
256	else
257		#
258		# Handle the client services here
259		#
260		if service_check_state $NFSCLI_FMRI $SMF_ONLINE; then
261			policy=none
262			ip=any
263		fi
264
265		restarter=`svcprop -p general/restarter $FMRI 2>/dev/null`
266		if [ "$restarter" = "$INETDFMRI" ]; then
267			iana_name=`svcprop -p inetd/name $FMRI`
268			isrpc=`svcprop -p inetd/isrpc $FMRI`
269		else
270			iana_name=`svcprop -p $FW_CONTEXT_PG/name $FMRI`
271			isrpc=`svcprop -p $FW_CONTEXT_PG/isrpc $FMRI`
272		fi
273
274		if [ "$isrpc" = "true" ]; then
275			tports=`$SERVINFO -R -p -t -s $iana_name 2>/dev/null`
276			tports6=`$SERVINFO -R -p -t6 -s $iana_name 2>/dev/null`
277			uports=`$SERVINFO -R -p -u -s $iana_name 2>/dev/null`
278			uports6=`$SERVINFO -R -p -u6 -s $iana_name 2>/dev/null`
279		else
280			tports=`$SERVINFO -p -t -s $iana_name 2>/dev/null`
281			tports6=`$SERVINFO -p -t6 -s $iana_name 2>/dev/null`
282			uports=`$SERVINFO -p -u -s $iana_name 2>/dev/null`
283			uports6=`$SERVINFO -p -u6 -s $iana_name 2>/dev/null`
284		fi
285
286		# IPv6 ports are also reachable through IPv4, so include
287		# them when generating IPv4 rules.
288		if [ -n "$tports" -o -n "$tports6" ]; then
289			tports=`unique_ports $tports $tports6`
290			for tport in $tports; do
291				generate_rules $FMRI $policy "tcp" $tport $file
292			done
293		fi
294
295		if [ -n "$tports6" ]; then
296			for tport6 in $tports6; do
297				generate_rules $FMRI $policy "tcp" $tport6 $file6 _6
298			done
299		fi
300
301		if [ -n "$uports" -o -n "$uports6" ]; then
302			uports=`unique_ports $uports $uports6`
303			for uport in $uports; do
304				generate_rules $FMRI $policy "udp" $uport $file
305			done
306		fi
307
308		if [ -n "$uports6" ]; then
309			for uport6 in $uports6; do
310				generate_rules $FMRI $policy "udp" $uport6 $file6 _6
311			done
312		fi
313	fi
314
315	;;
316
317*)
318	echo "Usage: $0 { start | stop | refresh }"
319	exit 1
320	;;
321esac
322exit $SMF_EXIT_OK
323