1#!/sbin/sh 2# 3# CDDL HEADER START 4# 5# The contents of this file are subject to the terms of the 6# Common Development and Distribution License (the "License"). 7# You may not use this file except in compliance with the License. 8# 9# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10# or http://www.opensolaris.org/os/licensing. 11# See the License for the specific language governing permissions 12# and limitations under the License. 13# 14# When distributing Covered Code, include this CDDL HEADER in each 15# file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16# If applicable, add the following below this CDDL HEADER, with the 17# fields enclosed by brackets "[]" replaced with your own identifying 18# information: Portions Copyright [yyyy] [name of copyright owner] 19# 20# CDDL HEADER END 21# 22# 23# Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. 24# 25 26# Start/stop processes required for server NFS 27 28. /lib/svc/share/smf_include.sh 29. /lib/svc/share/ipf_include.sh 30zone=`smf_zonename` 31 32# 33# Handling a corner case here. If we were in offline state due to an 34# unsatisfied dependency, the ipf_method process wouldn't have generated 35# the ipfilter configuration. When we transition to online because the 36# dependency is satisfied, the start method will have to generate the 37# ipfilter configuration. To avoid all possible deadlock scenarios, 38# we restart ipfilter which will regenerate the ipfilter configuration 39# for the entire system. 40# 41# The ipf_method process signals that it didn't generate ipf rules by 42# removing the service's ipf file. Thus we only restart network/ipfilter 43# when the file is missing. 44# 45configure_ipfilter() 46{ 47 ipfile=`fmri_to_file $SMF_FMRI $IPF_SUFFIX` 48 [ -f "$ipfile" ] && return 0 49 50 # 51 # Nothing to do if: 52 # - ipfilter isn't online 53 # - global policy is 'custom' 54 # - service's policy is 'use_global' 55 # 56 service_check_state $IPF_FMRI $SMF_ONLINE || return 0 57 [ "`get_global_def_policy`" = "custom" ] && return 0 58 [ "`get_policy $SMF_FMRI`" = "use_global" ] && return 0 59 60 svcadm restart $IPF_FMRI 61} 62 63case "$1" in 64'start') 65 # The NFS server is not supported in a local zone 66 if smf_is_nonglobalzone; then 67 /usr/sbin/svcadm disable -t svc:/network/nfs/server 68 echo "The NFS server is not supported in a local zone" 69 sleep 5 & 70 exit $SMF_EXIT_OK 71 fi 72 73 # Share all file systems enabled for sharing. sharemgr understands 74 # regular shares and ZFS shares and will handle both. Technically, 75 # the shares would have been started long before getting here since 76 # nfsd has a dependency on them. 77 78 startnfsd=0 79 80 # restart stopped shares from the repository 81 /usr/sbin/sharemgr start -P nfs -a 82 83 # Start up mountd and nfsd if anything is exported. 84 85 if /usr/bin/grep -s nfs /etc/dfs/sharetab >/dev/null; then 86 startnfsd=1 87 fi 88 89 # If auto-enable behavior is disabled, always start nfsd 90 91 if [ `svcprop -p application/auto_enable nfs/server` = "false" ]; then 92 startnfsd=1 93 fi 94 95 # Options for nfsd are now set in SMF 96 if [ $startnfsd -ne 0 ]; then 97 /usr/lib/nfs/mountd 98 rc=$? 99 if [ $rc != 0 ]; then 100 /usr/sbin/svcadm mark -t maintenance svc:/network/nfs/server 101 echo "$0: mountd failed with $rc" 102 sleep 5 & 103 exit $SMF_EXIT_ERR_FATAL 104 fi 105 106 /usr/lib/nfs/nfsd 107 rc=$? 108 if [ $rc != 0 ]; then 109 /usr/sbin/svcadm mark -t maintenance svc:/network/nfs/server 110 echo "$0: nfsd failed with $rc" 111 sleep 5 & 112 exit $SMF_EXIT_ERR_FATAL 113 fi 114 115 configure_ipfilter 116 else 117 /usr/sbin/svcadm disable -t svc:/network/nfs/server 118 echo "No NFS filesystems are shared" 119 sleep 5 & 120 fi 121 122 ;; 123 124'refresh') 125 /usr/sbin/sharemgr start -P nfs -a 126 ;; 127 128'stop') 129 /usr/bin/pkill -x -u 0,1 -z $zone '(nfsd|mountd)' 130 131 # Unshare all shared file systems using NFS 132 133 /usr/sbin/sharemgr stop -P nfs -a 134 135 # 136 # Wait up to 10 seconds for nfslogd to gracefully handle SIGHUP 137 # 138 /usr/bin/pkill -HUP -x -u 0 -z $zone nfslogd 139 wtime=10 140 141 while [ $wtime -gt 0 ]; do 142 /usr/bin/pgrep -x -u 0 -z $zone nfslogd >/dev/null || break 143 wtime=`expr $wtime - 1` 144 sleep 1 145 done 146 147 # 148 # Kill nfslogd more forcefully if it did not shutdown during 149 # the grace period 150 # 151 if [ $wtime -eq 0 ]; then 152 /usr/bin/pkill -TERM -x -u 0 -z $zone nfslogd 153 fi 154 155 # Kill any processes left in service contract 156 smf_kill_contract $2 TERM 1 157 [ $? -ne 0 ] && exit 1 158 ;; 159 160'ipfilter') 161 # 162 # NFS related services are RPC. nfs/server has nfsd which has 163 # well-defined port number but mountd is an RPC daemon. 164 # 165 # Essentially, we generate rules for the following "services" 166 # - nfs/server which has nfsd and mountd 167 # - nfs/rquota 168 # 169 # The following services are enabled for both nfs client and 170 # server so we'll treat them as client services and simply 171 # allow incoming traffic. 172 # - nfs/status 173 # - nfs/nlockmgr 174 # - nfs/cbd 175 # 176 NFS_FMRI="svc:/network/nfs/server:default" 177 RQUOTA_FMRI="svc:/network/nfs/rquota:default" 178 FMRI=$2 179 180 file=`fmri_to_file $FMRI $IPF_SUFFIX` 181 echo "# $FMRI" >$file 182 policy=`get_policy $NFS_FMRI` 183 ip="any" 184 185 # 186 # nfs/server configuration is processed in the start method. 187 # 188 if [ "$FMRI" = "$NFS_FMRI" ]; then 189 service_check_state $FMRI $SMF_ONLINE 190 if [ $? -ne 0 ]; then 191 rm $file 192 exit $SMF_EXIT_OK 193 fi 194 195 nfs_name=`svcprop -p $FW_CONTEXT_PG/name $FMRI 2>/dev/null` 196 tport=`$SERVINFO -p -t -s $nfs_name 2>/dev/null` 197 if [ -n "$tport" ]; then 198 generate_rules $FMRI $policy "tcp" $ip $tport $file 199 fi 200 201 uport=`$SERVINFO -p -u -s $nfs_name 2>/dev/null` 202 if [ -n "$uport" ]; then 203 generate_rules $FMRI $policy "udp" $ip $uport $file 204 fi 205 206 tports=`$SERVINFO -R -p -t -s "mountd" 2>/dev/null` 207 if [ -n "$tports" ]; then 208 for tport in $tports; do 209 generate_rules $FMRI $policy "tcp" $ip \ 210 $tport $file 211 done 212 fi 213 214 uports=`$SERVINFO -R -p -u -s "mountd" 2>/dev/null` 215 if [ -n "$uports" ]; then 216 for uport in $uports; do 217 generate_rules $FMRI $policy "udp" $ip \ 218 $uport $file 219 done 220 fi 221 222 elif [ "$FMRI" = "$RQUOTA_FMRI" ]; then 223 iana_name=`svcprop -p inetd/name $FMRI` 224 225 tports=`$SERVINFO -R -p -t -s $iana_name 2>/dev/null` 226 if [ -n "$tports" ]; then 227 for tport in $tports; do 228 generate_rules $NFS_FMRI $policy "tcp" \ 229 $ip $tport $file 230 done 231 fi 232 233 uports=`$SERVINFO -R -p -u -s $iana_name 2>/dev/null` 234 if [ -n "$uports" ]; then 235 for uport in $uports; do 236 generate_rules $NFS_FMRI $policy "udp" \ 237 $ip $uport $file 238 done 239 fi 240 else 241 # 242 # Handle the client services here 243 # 244 restarter=`svcprop -p general/restarter $FMRI 2>/dev/null` 245 if [ "$restarter" = "$INETDFMRI" ]; then 246 iana_name=`svcprop -p inetd/name $FMRI` 247 isrpc=`svcprop -p inetd/isrpc $FMRI` 248 else 249 iana_name=`svcprop -p $FW_CONTEXT_PG/name $FMRI` 250 isrpc=`svcprop -p $FW_CONTEXT_PG/isrpc $FMRI` 251 fi 252 253 if [ "$isrpc" = "true" ]; then 254 tports=`$SERVINFO -R -p -t -s $iana_name 2>/dev/null` 255 uports=`$SERVINFO -R -p -u -s $iana_name 2>/dev/null` 256 else 257 tports=`$SERVINFO -p -t -s $iana_name 2>/dev/null` 258 uports=`$SERVINFO -p -u -s $iana_name 2>/dev/null` 259 fi 260 261 if [ -n "$tports" ]; then 262 for tport in $tports; do 263 echo "pass in log quick proto tcp from any" \ 264 "to any port = ${tport} flags S " \ 265 "keep state" >>${file} 266 done 267 fi 268 269 if [ -n "$uports" ]; then 270 for uport in $uports; do 271 echo "pass in log quick proto udp from any" \ 272 "to any port = ${uport}" >>${file} 273 done 274 fi 275 fi 276 277 ;; 278 279*) 280 echo "Usage: $0 { start | stop | refresh }" 281 exit 1 282 ;; 283esac 284exit $SMF_EXIT_OK 285