xref: /illumos-gate/usr/src/uts/common/fs/sharefs/sharefs_vnops.c (revision 6e6545bfaed3bab9ce836ee82d1abd8f2edba89a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <fs/fs_subr.h>
28 
29 #include <sys/errno.h>
30 #include <sys/file.h>
31 #include <sys/kmem.h>
32 #include <sys/kobj.h>
33 #include <sys/cmn_err.h>
34 #include <sys/stat.h>
35 #include <sys/systm.h>
36 #include <sys/sysmacros.h>
37 #include <sys/atomic.h>
38 #include <sys/vfs.h>
39 #include <sys/vfs_opreg.h>
40 
41 #include <sharefs/sharefs.h>
42 
43 /*
44  * sharefs_snap_create: create a large character buffer with
45  * the shares enumerated.
46  */
47 static int
48 sharefs_snap_create(shnode_t *sft)
49 {
50 	sharetab_t		*sht;
51 	share_t			*sh;
52 	size_t			sWritten = 0;
53 	int			iCount = 0;
54 	char			*buf;
55 
56 	rw_enter(&sharefs_lock, RW_WRITER);
57 	rw_enter(&sharetab_lock, RW_READER);
58 
59 	if (sft->sharefs_snap) {
60 		/*
61 		 * Nothing has changed, so no need to grab a new copy!
62 		 */
63 		if (sft->sharefs_generation == sharetab_generation) {
64 			rw_exit(&sharetab_lock);
65 			rw_exit(&sharefs_lock);
66 			return (0);
67 		}
68 
69 		ASSERT(sft->sharefs_size != 0);
70 		kmem_free(sft->sharefs_snap, sft->sharefs_size + 1);
71 		sft->sharefs_snap = NULL;
72 	}
73 
74 	sft->sharefs_size = sharetab_size;
75 	sft->sharefs_count = sharetab_count;
76 
77 	if (sft->sharefs_size == 0) {
78 		rw_exit(&sharetab_lock);
79 		rw_exit(&sharefs_lock);
80 		return (0);
81 	}
82 
83 	sft->sharefs_snap = kmem_zalloc(sft->sharefs_size + 1, KM_SLEEP);
84 
85 	buf = sft->sharefs_snap;
86 
87 	/*
88 	 * Walk the Sharetab, dumping each entry.
89 	 */
90 	for (sht = sharefs_sharetab; sht != NULL; sht = sht->s_next) {
91 		int	i;
92 
93 		for (i = 0; i < SHARETAB_HASHES; i++) {
94 			for (sh = sht->s_buckets[i].ssh_sh;
95 			    sh != NULL;
96 			    sh = sh->sh_next) {
97 				int	n;
98 
99 				if ((sWritten + sh->sh_size) >
100 				    sft->sharefs_size) {
101 					goto error_fault;
102 				}
103 
104 				/*
105 				 * Note that sh->sh_size accounts
106 				 * for the field seperators.
107 				 * We need to add one for the EOL
108 				 * marker. And we should note that
109 				 * the space is accounted for in
110 				 * each share by the EOS marker.
111 				 */
112 				n = snprintf(&buf[sWritten],
113 				    sh->sh_size + 1,
114 				    "%s\t%s\t%s\t%s\t%s\n",
115 				    sh->sh_path,
116 				    sh->sh_res,
117 				    sh->sh_fstype,
118 				    sh->sh_opts,
119 				    sh->sh_descr);
120 
121 				if (n != sh->sh_size) {
122 					goto error_fault;
123 				}
124 
125 				sWritten += n;
126 				iCount++;
127 			}
128 		}
129 	}
130 
131 	/*
132 	 * We want to record the generation number and
133 	 * mtime inside this snapshot.
134 	 */
135 	gethrestime(&sharetab_snap_time);
136 	sft->sharefs_snap_time = sharetab_snap_time;
137 	sft->sharefs_generation = sharetab_generation;
138 
139 	ASSERT(iCount == sft->sharefs_count);
140 
141 	rw_exit(&sharetab_lock);
142 	rw_exit(&sharefs_lock);
143 	return (0);
144 
145 error_fault:
146 
147 	kmem_free(sft->sharefs_snap, sft->sharefs_size + 1);
148 	sft->sharefs_size = 0;
149 	sft->sharefs_count = 0;
150 	sft->sharefs_snap = NULL;
151 	rw_exit(&sharetab_lock);
152 	rw_exit(&sharefs_lock);
153 
154 	return (EFAULT);
155 }
156 
157 /* ARGSUSED */
158 static int
159 sharefs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
160     caller_context_t *ct)
161 {
162 	timestruc_t	now;
163 	shnode_t	*sft = VTOSH(vp);
164 
165 	vap->va_type = VREG;
166 	vap->va_mode = S_IRUSR | S_IRGRP | S_IROTH;
167 	vap->va_nodeid = SHAREFS_INO_FILE;
168 	vap->va_nlink = 1;
169 
170 	rw_enter(&sharefs_lock, RW_READER);
171 
172 	/*
173 	 * If we get asked about a snapped vnode, then
174 	 * we must report the data in that vnode.
175 	 *
176 	 * Else we report what is currently in the
177 	 * sharetab.
178 	 */
179 	if (sft->sharefs_real_vp) {
180 		rw_enter(&sharetab_lock, RW_READER);
181 		vap->va_size = sharetab_size;
182 		vap->va_mtime = sharetab_mtime;
183 		rw_exit(&sharetab_lock);
184 	} else {
185 		vap->va_size = sft->sharefs_size;
186 		vap->va_mtime = sft->sharefs_snap_time;
187 	}
188 	rw_exit(&sharefs_lock);
189 
190 	gethrestime(&now);
191 	vap->va_atime = vap->va_ctime = now;
192 
193 	vap->va_uid = 0;
194 	vap->va_gid = 0;
195 	vap->va_rdev = 0;
196 	vap->va_blksize = DEV_BSIZE;
197 	vap->va_nblocks = howmany(vap->va_size, vap->va_blksize);
198 	vap->va_seq = 0;
199 	vap->va_fsid = vp->v_vfsp->vfs_dev;
200 
201 	return (0);
202 }
203 
204 /* ARGSUSED */
205 static int
206 sharefs_access(vnode_t *vp, int mode, int flags, cred_t *cr,
207     caller_context_t *ct)
208 {
209 	if (mode & (VWRITE|VEXEC))
210 		return (EROFS);
211 
212 	return (0);
213 }
214 
215 /* ARGSUSED */
216 int
217 sharefs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
218 {
219 	vnode_t		*vp;
220 	vnode_t		*ovp = *vpp;
221 	shnode_t	*sft;
222 	int		error = 0;
223 
224 	if (flag & FWRITE)
225 		return (EINVAL);
226 
227 	/*
228 	 * Create a new sharefs vnode for each operation. In order to
229 	 * avoid locks, we create a snapshot which can not change during
230 	 * reads.
231 	 */
232 	vp = gfs_file_create(sizeof (shnode_t), NULL, sharefs_ops_data);
233 
234 	((gfs_file_t *)vp->v_data)->gfs_ino = SHAREFS_INO_FILE;
235 
236 	/*
237 	 * Hold the parent!
238 	 */
239 	VFS_HOLD(ovp->v_vfsp);
240 
241 	VN_SET_VFS_TYPE_DEV(vp, ovp->v_vfsp, VREG, 0);
242 
243 	vp->v_flag |= VROOT | VNOCACHE | VNOMAP | VNOSWAP | VNOMOUNT;
244 
245 	*vpp = vp;
246 	VN_RELE(ovp);
247 
248 	sft = VTOSH(vp);
249 
250 	/*
251 	 * No need for the lock, no other thread can be accessing
252 	 * this data structure.
253 	 */
254 	atomic_inc_32(&sft->sharefs_refs);
255 	sft->sharefs_real_vp = 0;
256 
257 	/*
258 	 * Since the sharetab could easily change on us whilst we
259 	 * are dumping an extremely huge sharetab, we make a copy
260 	 * of it here and use it to dump instead.
261 	 */
262 	error = sharefs_snap_create(sft);
263 
264 	return (error);
265 }
266 
267 /* ARGSUSED */
268 int
269 sharefs_close(vnode_t *vp, int flag, int count,
270     offset_t off, cred_t *cr, caller_context_t *ct)
271 {
272 	shnode_t	*sft = VTOSH(vp);
273 
274 	if (count > 1)
275 		return (0);
276 
277 	rw_enter(&sharefs_lock, RW_WRITER);
278 	if (vp->v_count == 1) {
279 		if (sft->sharefs_snap != NULL) {
280 			kmem_free(sft->sharefs_snap, sft->sharefs_size + 1);
281 			sft->sharefs_size = 0;
282 			sft->sharefs_snap = NULL;
283 			sft->sharefs_generation = 0;
284 		}
285 	}
286 	atomic_dec_32(&sft->sharefs_refs);
287 	rw_exit(&sharefs_lock);
288 
289 	return (0);
290 }
291 
292 /* ARGSUSED */
293 static int
294 sharefs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr,
295 			caller_context_t *ct)
296 {
297 	shnode_t	*sft = VTOSH(vp);
298 	off_t		off = uio->uio_offset;
299 	size_t		len = uio->uio_resid;
300 	int		error = 0;
301 
302 	rw_enter(&sharefs_lock, RW_READER);
303 
304 	/*
305 	 * First check to see if we need to grab a new snapshot.
306 	 */
307 	if (off == (off_t)0) {
308 		rw_exit(&sharefs_lock);
309 		error = sharefs_snap_create(sft);
310 		if (error) {
311 			return (EFAULT);
312 		}
313 		rw_enter(&sharefs_lock, RW_READER);
314 	}
315 
316 	/* LINTED */
317 	if (len <= 0 || off >= sft->sharefs_size) {
318 		rw_exit(&sharefs_lock);
319 		return (error);
320 	}
321 
322 	if ((size_t)(off + len) > sft->sharefs_size)
323 		len = sft->sharefs_size - off;
324 
325 	if (off < 0 || len > sft->sharefs_size) {
326 		rw_exit(&sharefs_lock);
327 		return (EFAULT);
328 	}
329 
330 	if (len != 0) {
331 		error = uiomove(sft->sharefs_snap + off,
332 		    len, UIO_READ, uio);
333 	}
334 
335 	rw_exit(&sharefs_lock);
336 	return (error);
337 }
338 
339 /* ARGSUSED */
340 static void
341 sharefs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *tx)
342 {
343 	gfs_file_t	*fp = vp->v_data;
344 	shnode_t	*sft;
345 
346 	sft = (shnode_t *)gfs_file_inactive(vp);
347 	if (sft) {
348 		rw_enter(&sharefs_lock, RW_WRITER);
349 		if (sft->sharefs_snap != NULL) {
350 			kmem_free(sft->sharefs_snap, sft->sharefs_size + 1);
351 		}
352 
353 		kmem_free(sft, fp->gfs_size);
354 		rw_exit(&sharefs_lock);
355 	}
356 }
357 
358 vnode_t *
359 sharefs_create_root_file(vfs_t *vfsp)
360 {
361 	vnode_t		*vp;
362 	shnode_t	*sft;
363 
364 	vp = gfs_root_create_file(sizeof (shnode_t),
365 	    vfsp, sharefs_ops_data, SHAREFS_INO_FILE);
366 
367 	sft = VTOSH(vp);
368 
369 	sft->sharefs_real_vp = 1;
370 
371 	return (vp);
372 }
373 
374 const fs_operation_def_t sharefs_tops_data[] = {
375 	{ VOPNAME_OPEN,		{ .vop_open = sharefs_open } },
376 	{ VOPNAME_CLOSE,	{ .vop_close = sharefs_close } },
377 	{ VOPNAME_IOCTL,	{ .error = fs_inval } },
378 	{ VOPNAME_GETATTR,	{ .vop_getattr = sharefs_getattr } },
379 	{ VOPNAME_ACCESS,	{ .vop_access = sharefs_access } },
380 	{ VOPNAME_INACTIVE,	{ .vop_inactive = sharefs_inactive } },
381 	{ VOPNAME_READ,		{ .vop_read = sharefs_read } },
382 	{ VOPNAME_SEEK,		{ .vop_seek = fs_seek } },
383 	{ NULL }
384 };
385