xref: /titanic_50/usr/src/uts/common/fs/sharefs/sharefs_vnops.c (revision 5c51f1241dbbdf2656d0e10011981411ed0c9673)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <fs/fs_subr.h>
30 
31 #include <sys/errno.h>
32 #include <sys/file.h>
33 #include <sys/kmem.h>
34 #include <sys/kobj.h>
35 #include <sys/cmn_err.h>
36 #include <sys/stat.h>
37 #include <sys/systm.h>
38 #include <sys/sysmacros.h>
39 #include <sys/atomic.h>
40 #include <sys/vfs.h>
41 #include <sys/vfs_opreg.h>
42 
43 #include <sharefs/sharefs.h>
44 
45 /*
46  * sharefs_snap_create: create a large character buffer with
47  * the shares enumerated.
48  */
49 static int
50 sharefs_snap_create(shnode_t *sft)
51 {
52 	sharetab_t		*sht;
53 	share_t			*sh;
54 	size_t			sWritten = 0;
55 	int			iCount = 0;
56 	char			*buf;
57 
58 	rw_enter(&sharefs_lock, RW_WRITER);
59 	rw_enter(&sharetab_lock, RW_READER);
60 
61 	if (sft->sharefs_snap) {
62 		/*
63 		 * Nothing has changed, so no need to grab a new copy!
64 		 */
65 		if (sft->sharefs_generation == sharetab_generation) {
66 			rw_exit(&sharetab_lock);
67 			rw_exit(&sharefs_lock);
68 			return (0);
69 		}
70 
71 		ASSERT(sft->sharefs_size != 0);
72 		kmem_free(sft->sharefs_snap, sft->sharefs_size + 1);
73 		sft->sharefs_snap = NULL;
74 	}
75 
76 	sft->sharefs_size = sharetab_size;
77 	sft->sharefs_count = sharetab_count;
78 
79 	if (sft->sharefs_size == 0) {
80 		rw_exit(&sharetab_lock);
81 		rw_exit(&sharefs_lock);
82 		return (0);
83 	}
84 
85 	sft->sharefs_snap = kmem_zalloc(sft->sharefs_size + 1, KM_SLEEP);
86 
87 	buf = sft->sharefs_snap;
88 
89 	/*
90 	 * Walk the Sharetab, dumping each entry.
91 	 */
92 	for (sht = sharefs_sharetab; sht != NULL; sht = sht->s_next) {
93 		int	i;
94 
95 		for (i = 0; i < SHARETAB_HASHES; i++) {
96 			for (sh = sht->s_buckets[i].ssh_sh;
97 			    sh != NULL;
98 			    sh = sh->sh_next) {
99 				int	n;
100 
101 				if ((sWritten + sh->sh_size) >
102 				    sft->sharefs_size) {
103 					goto error_fault;
104 				}
105 
106 				/*
107 				 * Note that sh->sh_size accounts
108 				 * for the field seperators.
109 				 * We need to add one for the EOL
110 				 * marker. And we should note that
111 				 * the space is accounted for in
112 				 * each share by the EOS marker.
113 				 */
114 				n = snprintf(&buf[sWritten],
115 				    sh->sh_size + 1,
116 				    "%s\t%s\t%s\t%s\t%s\n",
117 				    sh->sh_path,
118 				    sh->sh_res,
119 				    sh->sh_fstype,
120 				    sh->sh_opts,
121 				    sh->sh_descr);
122 
123 				if (n != sh->sh_size) {
124 					goto error_fault;
125 				}
126 
127 				sWritten += n;
128 				iCount++;
129 			}
130 		}
131 	}
132 
133 	/*
134 	 * We want to record the generation number and
135 	 * mtime inside this snapshot.
136 	 */
137 	gethrestime(&sharetab_snap_time);
138 	sft->sharefs_snap_time = sharetab_snap_time;
139 	sft->sharefs_generation = sharetab_generation;
140 
141 	ASSERT(iCount == sft->sharefs_count);
142 
143 	rw_exit(&sharetab_lock);
144 	rw_exit(&sharefs_lock);
145 	return (0);
146 
147 error_fault:
148 
149 	kmem_free(sft->sharefs_snap, sft->sharefs_size + 1);
150 	sft->sharefs_size = 0;
151 	sft->sharefs_count = 0;
152 	sft->sharefs_snap = NULL;
153 	rw_exit(&sharetab_lock);
154 	rw_exit(&sharefs_lock);
155 
156 	return (EFAULT);
157 }
158 
159 /* ARGSUSED */
160 static int
161 sharefs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
162     caller_context_t *ct)
163 {
164 	timestruc_t	now;
165 	shnode_t	*sft = VTOSH(vp);
166 
167 	vap->va_type = VREG;
168 	vap->va_mode = S_IRUSR | S_IRGRP | S_IROTH;
169 	vap->va_nodeid = SHAREFS_INO_FILE;
170 	vap->va_nlink = 1;
171 
172 	rw_enter(&sharefs_lock, RW_READER);
173 
174 	/*
175 	 * If we get asked about a snapped vnode, then
176 	 * we must report the data in that vnode.
177 	 *
178 	 * Else we report what is currently in the
179 	 * sharetab.
180 	 */
181 	if (sft->sharefs_real_vp) {
182 		rw_enter(&sharetab_lock, RW_READER);
183 		vap->va_size = sharetab_size;
184 		vap->va_mtime = sharetab_mtime;
185 		rw_exit(&sharetab_lock);
186 	} else {
187 		vap->va_size = sft->sharefs_size;
188 		vap->va_mtime = sft->sharefs_snap_time;
189 	}
190 	rw_exit(&sharefs_lock);
191 
192 	gethrestime(&now);
193 	vap->va_atime = vap->va_ctime = now;
194 
195 	vap->va_uid = 0;
196 	vap->va_gid = 0;
197 	vap->va_rdev = 0;
198 	vap->va_blksize = DEV_BSIZE;
199 	vap->va_nblocks = howmany(vap->va_size, vap->va_blksize);
200 	vap->va_seq = 0;
201 	vap->va_fsid = vp->v_vfsp->vfs_dev;
202 
203 	return (0);
204 }
205 
206 /* ARGSUSED */
207 static int
208 sharefs_access(vnode_t *vp, int mode, int flags, cred_t *cr,
209     caller_context_t *ct)
210 {
211 	if (mode & (VWRITE|VEXEC))
212 		return (EROFS);
213 
214 	return (0);
215 }
216 
217 /* ARGSUSED */
218 int
219 sharefs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
220 {
221 	vnode_t		*vp;
222 	vnode_t		*ovp = *vpp;
223 	shnode_t	*sft;
224 	int		error = 0;
225 
226 	if (flag & FWRITE)
227 		return (EINVAL);
228 
229 	/*
230 	 * Create a new sharefs vnode for each operation. In order to
231 	 * avoid locks, we create a snapshot which can not change during
232 	 * reads.
233 	 */
234 	vp = gfs_file_create(sizeof (shnode_t), NULL, sharefs_ops_data);
235 
236 	((gfs_file_t *)vp->v_data)->gfs_ino = SHAREFS_INO_FILE;
237 
238 	/*
239 	 * Hold the parent!
240 	 */
241 	VFS_HOLD(ovp->v_vfsp);
242 
243 	VN_SET_VFS_TYPE_DEV(vp, ovp->v_vfsp, VREG, 0);
244 
245 	vp->v_flag |= VROOT | VNOCACHE | VNOMAP | VNOSWAP | VNOMOUNT;
246 
247 	*vpp = vp;
248 	VN_RELE(ovp);
249 
250 	sft = VTOSH(vp);
251 
252 	/*
253 	 * No need for the lock, no other thread can be accessing
254 	 * this data structure.
255 	 */
256 	atomic_add_32(&sft->sharefs_refs, 1);
257 	sft->sharefs_real_vp = 0;
258 
259 	/*
260 	 * Since the sharetab could easily change on us whilst we
261 	 * are dumping an extremely huge sharetab, we make a copy
262 	 * of it here and use it to dump instead.
263 	 */
264 	error = sharefs_snap_create(sft);
265 
266 	return (error);
267 }
268 
269 /* ARGSUSED */
270 int
271 sharefs_close(vnode_t *vp, int flag, int count,
272     offset_t off, cred_t *cr, caller_context_t *ct)
273 {
274 	shnode_t	*sft = VTOSH(vp);
275 
276 	if (count > 1)
277 		return (0);
278 
279 	rw_enter(&sharefs_lock, RW_WRITER);
280 	if (vp->v_count == 1) {
281 		if (sft->sharefs_snap != NULL) {
282 			kmem_free(sft->sharefs_snap, sft->sharefs_size + 1);
283 			sft->sharefs_size = 0;
284 			sft->sharefs_snap = NULL;
285 			sft->sharefs_generation = 0;
286 		}
287 	}
288 	atomic_add_32(&sft->sharefs_refs, -1);
289 	rw_exit(&sharefs_lock);
290 
291 	return (0);
292 }
293 
294 /* ARGSUSED */
295 static int
296 sharefs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr,
297 			caller_context_t *ct)
298 {
299 	shnode_t	*sft = VTOSH(vp);
300 	off_t		off = uio->uio_offset;
301 	size_t		len = uio->uio_resid;
302 	int		error = 0;
303 
304 	rw_enter(&sharefs_lock, RW_READER);
305 
306 	/*
307 	 * First check to see if we need to grab a new snapshot.
308 	 */
309 	if (off == (off_t)0) {
310 		rw_exit(&sharefs_lock);
311 		error = sharefs_snap_create(sft);
312 		if (error) {
313 			return (EFAULT);
314 		}
315 		rw_enter(&sharefs_lock, RW_READER);
316 	}
317 
318 	/* LINTED */
319 	if (len <= 0 || off >= sft->sharefs_size) {
320 		rw_exit(&sharefs_lock);
321 		return (error);
322 	}
323 
324 	if ((size_t)(off + len) > sft->sharefs_size)
325 		len = sft->sharefs_size - off;
326 
327 	if (off < 0 || len > sft->sharefs_size) {
328 		rw_exit(&sharefs_lock);
329 		return (EFAULT);
330 	}
331 
332 	if (len != 0) {
333 		error = uiomove(sft->sharefs_snap + off,
334 		    len, UIO_READ, uio);
335 	}
336 
337 	rw_exit(&sharefs_lock);
338 	return (error);
339 }
340 
341 /* ARGSUSED */
342 static void
343 sharefs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *tx)
344 {
345 	gfs_file_t	*fp = vp->v_data;
346 	shnode_t	*sft;
347 
348 	sft = (shnode_t *)gfs_file_inactive(vp);
349 	if (sft) {
350 		rw_enter(&sharefs_lock, RW_WRITER);
351 		if (sft->sharefs_snap != NULL) {
352 			kmem_free(sft->sharefs_snap, sft->sharefs_size + 1);
353 		}
354 
355 		kmem_free(sft, fp->gfs_size);
356 		rw_exit(&sharefs_lock);
357 	}
358 }
359 
360 vnode_t *
361 sharefs_create_root_file(vfs_t *vfsp)
362 {
363 	vnode_t		*vp;
364 	shnode_t	*sft;
365 
366 	vp = gfs_root_create_file(sizeof (shnode_t),
367 	    vfsp, sharefs_ops_data, SHAREFS_INO_FILE);
368 
369 	sft = VTOSH(vp);
370 
371 	sft->sharefs_real_vp = 1;
372 
373 	return (vp);
374 }
375 
376 const fs_operation_def_t sharefs_tops_data[] = {
377 	{ VOPNAME_OPEN,		{ .vop_open = sharefs_open } },
378 	{ VOPNAME_CLOSE,	{ .vop_close = sharefs_close } },
379 	{ VOPNAME_IOCTL,	{ .error = fs_inval } },
380 	{ VOPNAME_GETATTR,	{ .vop_getattr = sharefs_getattr } },
381 	{ VOPNAME_ACCESS,	{ .vop_access = sharefs_access } },
382 	{ VOPNAME_INACTIVE,	{ .vop_inactive = sharefs_inactive } },
383 	{ VOPNAME_READ,		{ .vop_read = sharefs_read } },
384 	{ VOPNAME_SEEK,		{ .vop_seek = fs_seek } },
385 	{ NULL }
386 };
387