xref: /titanic_51/usr/src/uts/common/fs/sharefs/sharefs_vnops.c (revision 5af4ae460e4b20d1119c788db199a9821b2d19b2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <fs/fs_subr.h>
30 
31 #include <sys/errno.h>
32 #include <sys/file.h>
33 #include <sys/kmem.h>
34 #include <sys/kobj.h>
35 #include <sys/cmn_err.h>
36 #include <sys/stat.h>
37 #include <sys/systm.h>
38 #include <sys/sysmacros.h>
39 #include <sys/atomic.h>
40 #include <sys/vfs.h>
41 #include <sys/vfs_opreg.h>
42 
43 #include <sharefs/sharefs.h>
44 
45 /*
46  * sharefs_snap_create: create a large character buffer with
47  * the shares enumerated.
48  */
49 static int
50 sharefs_snap_create(shnode_t *sft)
51 {
52 	sharetab_t		*sht;
53 	share_t			*sh;
54 	size_t			sWritten = 0;
55 	int			iCount = 0;
56 	char			*buf;
57 
58 	rw_enter(&sharefs_lock, RW_WRITER);
59 	rw_enter(&sharetab_lock, RW_READER);
60 
61 	if (sft->sharefs_snap) {
62 		/*
63 		 * Nothing has changed, so no need to grab a new copy!
64 		 */
65 		if (sft->sharefs_generation == sharetab_generation) {
66 			rw_exit(&sharetab_lock);
67 			rw_exit(&sharefs_lock);
68 			return (0);
69 		}
70 
71 		ASSERT(sft->sharefs_size != 0);
72 		kmem_free(sft->sharefs_snap, sft->sharefs_size + 1);
73 		sft->sharefs_snap = NULL;
74 	}
75 
76 	sft->sharefs_size = sharetab_size;
77 	sft->sharefs_count = sharetab_count;
78 
79 	if (sft->sharefs_size == 0) {
80 		rw_exit(&sharetab_lock);
81 		rw_exit(&sharefs_lock);
82 		return (0);
83 	}
84 
85 	sft->sharefs_snap = kmem_zalloc(sft->sharefs_size + 1, KM_SLEEP);
86 
87 	buf = sft->sharefs_snap;
88 
89 	/*
90 	 * Walk the Sharetab, dumping each entry.
91 	 */
92 	for (sht = sharefs_sharetab; sht != NULL; sht = sht->s_next) {
93 		int	i;
94 
95 		for (i = 0; i < SHARETAB_HASHES; i++) {
96 			for (sh = sht->s_buckets[i].ssh_sh;
97 			    sh != NULL;
98 			    sh = sh->sh_next) {
99 				int	n;
100 
101 				if ((sWritten + sh->sh_size) >
102 				    sft->sharefs_size) {
103 					goto error_fault;
104 				}
105 
106 				/*
107 				 * Note that sh->sh_size accounts
108 				 * for the field seperators.
109 				 * We need to add one for the EOL
110 				 * marker. And we should note that
111 				 * the space is accounted for in
112 				 * each share by the EOS marker.
113 				 */
114 				n = snprintf(&buf[sWritten],
115 				    sh->sh_size + 1,
116 				    "%s\t%s\t%s\t%s\t%s\n",
117 				    sh->sh_path,
118 				    sh->sh_res,
119 				    sh->sh_fstype,
120 				    sh->sh_opts,
121 				    sh->sh_descr);
122 
123 				if (n != sh->sh_size) {
124 					goto error_fault;
125 				}
126 
127 				sWritten += n;
128 				iCount++;
129 			}
130 		}
131 	}
132 
133 	/*
134 	 * We want to record the generation number and
135 	 * mtime inside this snapshot.
136 	 */
137 	gethrestime(&sharetab_snap_time);
138 	sft->sharefs_snap_time = sharetab_snap_time;
139 	sft->sharefs_generation = sharetab_generation;
140 
141 	ASSERT(iCount == sft->sharefs_count);
142 
143 	rw_exit(&sharetab_lock);
144 	rw_exit(&sharefs_lock);
145 	return (0);
146 
147 error_fault:
148 
149 	kmem_free(sft->sharefs_snap, sft->sharefs_size + 1);
150 	sft->sharefs_size = 0;
151 	sft->sharefs_count = 0;
152 	sft->sharefs_snap = NULL;
153 	rw_exit(&sharetab_lock);
154 	rw_exit(&sharefs_lock);
155 
156 	return (EFAULT);
157 }
158 
159 /* ARGSUSED */
160 static int
161 sharefs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr)
162 {
163 	timestruc_t	now;
164 	shnode_t	*sft = VTOSH(vp);
165 
166 	vap->va_type = VREG;
167 	vap->va_mode = S_IRUSR | S_IRGRP | S_IROTH;
168 	vap->va_nodeid = SHAREFS_INO_FILE;
169 	vap->va_nlink = 1;
170 
171 	rw_enter(&sharefs_lock, RW_READER);
172 
173 	/*
174 	 * If we get asked about a snapped vnode, then
175 	 * we must report the data in that vnode.
176 	 *
177 	 * Else we report what is currently in the
178 	 * sharetab.
179 	 */
180 	if (sft->sharefs_real_vp) {
181 		rw_enter(&sharetab_lock, RW_READER);
182 		vap->va_size = sharetab_size;
183 		vap->va_mtime = sharetab_mtime;
184 		rw_exit(&sharetab_lock);
185 	} else {
186 		vap->va_size = sft->sharefs_size;
187 		vap->va_mtime = sft->sharefs_snap_time;
188 	}
189 	rw_exit(&sharefs_lock);
190 
191 	gethrestime(&now);
192 	vap->va_atime = vap->va_ctime = now;
193 
194 	vap->va_uid = 0;
195 	vap->va_gid = 0;
196 	vap->va_rdev = 0;
197 	vap->va_blksize = DEV_BSIZE;
198 	vap->va_nblocks = howmany(vap->va_size, vap->va_blksize);
199 	vap->va_seq = 0;
200 	vap->va_fsid = vp->v_vfsp->vfs_dev;
201 
202 	return (0);
203 }
204 
205 /* ARGSUSED */
206 static int
207 sharefs_access(vnode_t *vp, int mode, int flags, cred_t *cr)
208 {
209 	if (mode & (VWRITE|VEXEC))
210 		return (EROFS);
211 
212 	return (0);
213 }
214 
215 /* ARGSUSED */
216 int
217 sharefs_open(vnode_t **vpp, int flag, cred_t *cr)
218 {
219 	vnode_t		*vp;
220 	vnode_t		*ovp = *vpp;
221 	shnode_t	*sft;
222 	int		error = 0;
223 
224 	if (flag & FWRITE)
225 		return (EINVAL);
226 
227 	/*
228 	 * Create a new sharefs vnode for each operation. In order to
229 	 * avoid locks, we create a snapshot which can not change during
230 	 * reads.
231 	 */
232 	vp = gfs_file_create(sizeof (shnode_t), NULL, sharefs_ops_data);
233 
234 	((gfs_file_t *)vp->v_data)->gfs_ino = SHAREFS_INO_FILE;
235 
236 	/*
237 	 * Hold the parent!
238 	 */
239 	VFS_HOLD(ovp->v_vfsp);
240 
241 	VN_SET_VFS_TYPE_DEV(vp, ovp->v_vfsp, VREG, 0);
242 
243 	vp->v_flag |= VROOT | VNOCACHE | VNOMAP | VNOSWAP | VNOMOUNT;
244 
245 	*vpp = vp;
246 	VN_RELE(ovp);
247 
248 	sft = VTOSH(vp);
249 
250 	/*
251 	 * No need for the lock, no other thread can be accessing
252 	 * this data structure.
253 	 */
254 	atomic_add_32(&sft->sharefs_refs, 1);
255 	sft->sharefs_real_vp = 0;
256 
257 	/*
258 	 * Since the sharetab could easily change on us whilst we
259 	 * are dumping an extremely huge sharetab, we make a copy
260 	 * of it here and use it to dump instead.
261 	 */
262 	error = sharefs_snap_create(sft);
263 
264 	return (error);
265 }
266 
267 /* ARGSUSED */
268 int
269 sharefs_close(vnode_t *vp, int flag, int count,
270 			offset_t off, cred_t *cr)
271 {
272 	shnode_t	*sft = VTOSH(vp);
273 
274 	if (count > 1)
275 		return (0);
276 
277 	rw_enter(&sharefs_lock, RW_WRITER);
278 	if (vp->v_count == 1) {
279 		if (sft->sharefs_snap != NULL) {
280 			kmem_free(sft->sharefs_snap, sft->sharefs_size + 1);
281 			sft->sharefs_size = 0;
282 			sft->sharefs_snap = NULL;
283 			sft->sharefs_generation = 0;
284 		}
285 	}
286 	atomic_add_32(&sft->sharefs_refs, -1);
287 	rw_exit(&sharefs_lock);
288 
289 	return (0);
290 }
291 
292 /* ARGSUSED */
293 static int
294 sharefs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr,
295 			caller_context_t *ct)
296 {
297 	shnode_t	*sft = VTOSH(vp);
298 	off_t		off = uio->uio_offset;
299 	size_t		len = uio->uio_resid;
300 	int		error = 0;
301 
302 	rw_enter(&sharefs_lock, RW_READER);
303 
304 	/*
305 	 * First check to see if we need to grab a new snapshot.
306 	 */
307 	if (off == (off_t)0) {
308 		rw_exit(&sharefs_lock);
309 		error = sharefs_snap_create(sft);
310 		if (error) {
311 			return (EFAULT);
312 		}
313 		rw_enter(&sharefs_lock, RW_READER);
314 	}
315 
316 	/* LINTED */
317 	if (len <= 0 || off >= sft->sharefs_size) {
318 		rw_exit(&sharefs_lock);
319 		return (error);
320 	}
321 
322 	if ((size_t)(off + len) > sft->sharefs_size)
323 		len = sft->sharefs_size - off;
324 
325 	if (off < 0 || len > sft->sharefs_size) {
326 		rw_exit(&sharefs_lock);
327 		return (EFAULT);
328 	}
329 
330 	if (len != 0) {
331 		error = uiomove(sft->sharefs_snap + off,
332 		    len, UIO_READ, uio);
333 	}
334 
335 	rw_exit(&sharefs_lock);
336 	return (error);
337 }
338 
339 /* ARGSUSED */
340 static void
341 sharefs_inactive(vnode_t *vp, cred_t *cr)
342 {
343 	gfs_file_t	*fp = vp->v_data;
344 	shnode_t	*sft;
345 
346 	sft = (shnode_t *)gfs_file_inactive(vp);
347 	if (sft) {
348 		rw_enter(&sharefs_lock, RW_WRITER);
349 		if (sft->sharefs_snap != NULL) {
350 			kmem_free(sft->sharefs_snap, sft->sharefs_size + 1);
351 		}
352 
353 		kmem_free(sft, fp->gfs_size);
354 		rw_exit(&sharefs_lock);
355 	}
356 }
357 
358 vnode_t *
359 sharefs_create_root_file(vfs_t *vfsp)
360 {
361 	vnode_t		*vp;
362 	shnode_t	*sft;
363 
364 	vp = gfs_root_create_file(sizeof (shnode_t),
365 	    vfsp, sharefs_ops_data, SHAREFS_INO_FILE);
366 
367 	sft = VTOSH(vp);
368 
369 	sft->sharefs_real_vp = 1;
370 
371 	return (vp);
372 }
373 
374 const fs_operation_def_t sharefs_tops_data[] = {
375 	{ VOPNAME_OPEN,		{ .vop_open = sharefs_open } },
376 	{ VOPNAME_CLOSE,	{ .vop_close = sharefs_close } },
377 	{ VOPNAME_IOCTL,	{ .error = fs_inval } },
378 	{ VOPNAME_GETATTR,	{ .vop_getattr = sharefs_getattr } },
379 	{ VOPNAME_ACCESS,	{ .vop_access = sharefs_access } },
380 	{ VOPNAME_INACTIVE,	{ .vop_inactive = sharefs_inactive } },
381 	{ VOPNAME_READ,		{ .vop_read = sharefs_read } },
382 	{ VOPNAME_SEEK,		{ .vop_seek = fs_seek } },
383 	{ NULL }
384 };
385