xref: /illumos-gate/usr/src/uts/common/fs/smbclnt/smbfs/smbfs_vnops.c (revision bdd0f017b5c28992671ccec0e5a239f9f850933c)
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: smbfs_vnops.c,v 1.128.36.1 2005/05/27 02:35:28 lindak Exp $
33  */
34 
35 /*
36  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
37  * Copyright 2021 Tintri by DDN, Inc.  All rights reserved.
38  * Copyright 2025-2026 RackTop Systems, Inc.
39  */
40 
41 /*
42  * Vnode operations
43  *
44  * This file is similar to nfs3_vnops.c
45  */
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/cred.h>
50 #include <sys/vnode.h>
51 #include <sys/vfs.h>
52 #include <sys/filio.h>
53 #include <sys/uio.h>
54 #include <sys/dirent.h>
55 #include <sys/errno.h>
56 #include <sys/sunddi.h>
57 #include <sys/sysmacros.h>
58 #include <sys/kmem.h>
59 #include <sys/cmn_err.h>
60 #include <sys/vfs_opreg.h>
61 #include <sys/policy.h>
62 #include <sys/sdt.h>
63 #include <sys/taskq_impl.h>
64 #include <sys/zone.h>
65 
66 #ifdef	_KERNEL
67 #include <sys/vmsystm.h>	// for desfree
68 #include <vm/hat.h>
69 #include <vm/as.h>
70 #include <vm/page.h>
71 #include <vm/pvn.h>
72 #include <vm/seg.h>
73 #include <vm/seg_map.h>
74 #include <vm/seg_kpm.h>
75 #include <vm/seg_vn.h>
76 #endif	// _KERNEL
77 
78 #include <netsmb/smb_osdep.h>
79 #include <netsmb/smb.h>
80 #include <netsmb/smb_conn.h>
81 #include <netsmb/smb_subr.h>
82 
83 #include <smbfs/smbfs.h>
84 #include <smbfs/smbfs_node.h>
85 #include <smbfs/smbfs_subr.h>
86 
87 #include <sys/fs/smbfs_ioctl.h>
88 #include <fs/fs_subr.h>
89 
90 #ifndef	MAXOFF32_T
91 #define	MAXOFF32_T	0x7fffffff
92 #endif
93 
94 /*
95  * We assign directory offsets like the NFS client, where the
96  * offset increments by _one_ after each directory entry.
97  * Further, the entries "." and ".." are always at offsets
98  * zero and one (respectively) and the "real" entries from
99  * the server appear at offsets starting with two.  This
100  * macro is used to initialize the n_dirofs field after
101  * setting n_dirseq with a _findopen call.
102  */
103 #define	FIRST_DIROFS	2
104 
105 /*
106  * These characters are illegal in NTFS file names.
107  * ref: http://support.microsoft.com/kb/147438
108  *
109  * Careful!  The check in the XATTR case skips the
110  * first character to allow colon in XATTR names.
111  */
112 static const char illegal_chars[] = {
113 	':',	/* colon - keep this first! */
114 	'\\',	/* back slash */
115 	'/',	/* slash */
116 	'*',	/* asterisk */
117 	'?',	/* question mark */
118 	'"',	/* double quote */
119 	'<',	/* less than sign */
120 	'>',	/* greater than sign */
121 	'|',	/* vertical bar */
122 	0
123 };
124 
125 /*
126  * Turning this on causes nodes to be created in the cache
127  * during directory listings, normally avoiding a second
128  * OtW attribute fetch just after a readdir.
129  */
130 int smbfs_fastlookup = 1;
131 
132 struct vnodeops *smbfs_vnodeops = NULL;
133 
134 /* local static function defines */
135 
136 static int	smbfslookup_cache(vnode_t *, char *, int, vnode_t **,
137 			cred_t *);
138 static int	smbfslookup(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr,
139 			int cache_ok, caller_context_t *);
140 static int	smbfsremove(vnode_t *dvp, vnode_t *vp, struct smb_cred *scred,
141 			int flags);
142 static int	smbfsrename(vnode_t *odvp, vnode_t *ovp, vnode_t *ndvp,
143 			char *nnm, struct smb_cred *scred, int flags);
144 static int	smbfssetattr(vnode_t *, struct vattr *, int, cred_t *);
145 static int	smbfs_accessx(void *, int, cred_t *);
146 static int	smbfs_readvdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
147 			caller_context_t *);
148 static int	smbfsflush(smbnode_t *, struct smb_cred *);
149 static void	smbfs_rele_fid(smbnode_t *, struct smb_cred *);
150 static uint32_t xvattr_to_dosattr(smbnode_t *, struct vattr *);
151 
152 static int	smbfs_fsync(vnode_t *, int, cred_t *, caller_context_t *);
153 
154 static int	smbfs_putpage(vnode_t *, offset_t, size_t, int, cred_t *,
155 			caller_context_t *);
156 #ifdef	_KERNEL
157 static int	smbfs_getapage(vnode_t *, u_offset_t, size_t, uint_t *,
158 			page_t *[], size_t, struct seg *, caddr_t,
159 			enum seg_rw, cred_t *);
160 static int	smbfs_putapage(vnode_t *, page_t *, u_offset_t *, size_t *,
161 			int, cred_t *);
162 static void	smbfs_delmap_async(void *);
163 
164 static int	smbfs_rdwrlbn(vnode_t *, page_t *, u_offset_t, size_t, int,
165 			cred_t *);
166 static int	smbfs_bio(struct buf *, int, cred_t *);
167 static int	smbfs_writenp(smbnode_t *np, caddr_t base, int tcount,
168 			struct uio *uiop, int pgcreated);
169 #endif	// _KERNEL
170 
171 /*
172  * Error flags used to pass information about certain special errors
173  * which need to be handled specially.
174  */
175 #define	SMBFS_EOF			-98
176 
177 /* When implementing OtW locks, make this a real function. */
178 #define	smbfs_lm_has_sleep(vp) 0
179 
180 /*
181  * These are the vnode ops routines which implement the vnode interface to
182  * the networked file system.  These routines just take their parameters,
183  * make them look networkish by putting the right info into interface structs,
184  * and then calling the appropriate remote routine(s) to do the work.
185  *
186  * Note on directory name lookup cacheing:  If we detect a stale fhandle,
187  * we purge the directory cache relative to that vnode.  This way, the
188  * user won't get burned by the cache repeatedly.  See <smbfs/smbnode.h> for
189  * more details on smbnode locking.
190  */
191 
192 
193 /*
194  * XXX
195  * When new and relevant functionality is enabled, we should be
196  * calling vfs_set_feature() to inform callers that pieces of
197  * functionality are available, per PSARC 2007/227.
198  */
199 /* ARGSUSED */
200 static int
smbfs_open(vnode_t ** vpp,int flag,cred_t * cr,caller_context_t * ct)201 smbfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
202 {
203 	smbnode_t	*np;
204 	vnode_t		*vp;
205 	smbfattr_t	fa;
206 	smb_fh_t	*fid = NULL;
207 	smb_fh_t	*oldfid;
208 	uint32_t	rights;
209 	struct smb_cred scred;
210 	smbmntinfo_t	*smi;
211 	smb_share_t	*ssp;
212 	cred_t		*oldcr;
213 	int		error = 0;
214 
215 	vp = *vpp;
216 	np = VTOSMB(vp);
217 	smi = VTOSMI(vp);
218 	ssp = smi->smi_share;
219 
220 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
221 		return (EIO);
222 
223 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
224 		return (EIO);
225 
226 	if (vp->v_type != VREG && vp->v_type != VDIR) { /* XXX VLNK? */
227 		SMBVDEBUG("open eacces vtype=%d\n", vp->v_type);
228 		return (EACCES);
229 	}
230 
231 	/*
232 	 * Get exclusive access to n_fid and related stuff.
233 	 * No returns after this until out.
234 	 */
235 	if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp)))
236 		return (EINTR);
237 	smb_credinit(&scred, cr);
238 
239 	/*
240 	 * Keep track of the vnode type at first open.
241 	 * It may change later, and we need close to do
242 	 * cleanup for the type we opened.  Also deny
243 	 * open of new types until old type is closed.
244 	 */
245 	if (np->n_ovtype == VNON) {
246 		ASSERT(np->n_dirrefs == 0);
247 		ASSERT(np->n_fidrefs == 0);
248 	} else if (np->n_ovtype != vp->v_type) {
249 		SMBVDEBUG("open n_ovtype=%d v_type=%d\n",
250 		    np->n_ovtype, vp->v_type);
251 		error = EACCES;
252 		goto out;
253 	}
254 
255 	/*
256 	 * Directory open.  See smbfs_readvdir()
257 	 */
258 	if (vp->v_type == VDIR) {
259 		if (np->n_dirseq == NULL) {
260 			/* first open */
261 			error = smbfs_smb_findopen(np, "*", 1,
262 			    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
263 			    &scred, &np->n_dirseq);
264 			if (error != 0)
265 				goto out;
266 		}
267 		np->n_dirofs = FIRST_DIROFS;
268 		np->n_dirrefs++;
269 		goto have_fid;
270 	}
271 
272 	/*
273 	 * If caller specified O_TRUNC/FTRUNC, then be sure to set
274 	 * FWRITE (to drive successful setattr(size=0) after open)
275 	 */
276 	if (flag & FTRUNC)
277 		flag |= FWRITE;
278 
279 	/*
280 	 * If we already have it open, and the FID is still valid,
281 	 * check whether the rights are sufficient for FID reuse.
282 	 */
283 	if (np->n_fidrefs > 0 &&
284 	    (fid = np->n_fid) != NULL &&
285 	    fid->fh_vcgenid == ssp->ss_vcgenid) {
286 		int upgrade = 0;
287 
288 		if ((flag & FWRITE) &&
289 		    !(fid->fh_rights & SA_RIGHT_FILE_WRITE_DATA))
290 			upgrade = 1;
291 		if ((flag & FREAD) &&
292 		    !(fid->fh_rights & SA_RIGHT_FILE_READ_DATA))
293 			upgrade = 1;
294 		if (!upgrade) {
295 			/*
296 			 *  the existing open is good enough
297 			 */
298 			np->n_fidrefs++;
299 			goto have_fid;
300 		}
301 		fid = NULL;
302 	}
303 	rights = (fid != NULL) ? fid->fh_rights : 0;
304 
305 	/*
306 	 * we always ask for READ_CONTROL so we can always get the
307 	 * owner/group IDs to satisfy a stat.  Ditto attributes.
308 	 */
309 	rights |= (STD_RIGHT_READ_CONTROL_ACCESS |
310 	    SA_RIGHT_FILE_READ_ATTRIBUTES);
311 	if ((flag & FREAD))
312 		rights |= SA_RIGHT_FILE_READ_DATA;
313 	if ((flag & FWRITE))
314 		rights |= SA_RIGHT_FILE_WRITE_DATA |
315 		    SA_RIGHT_FILE_APPEND_DATA |
316 		    SA_RIGHT_FILE_WRITE_ATTRIBUTES;
317 
318 	bzero(&fa, sizeof (fa));
319 	error = smbfs_smb_open(np,
320 	    NULL, 0, 0, /* name nmlen xattr */
321 	    rights, &scred,
322 	    &fid, &fa);
323 	if (error)
324 		goto out;
325 	smbfs_attrcache_fa(vp, &fa);
326 
327 	/*
328 	 * We have a new FID and access rights.
329 	 */
330 	VERIFY(fid != NULL);
331 	oldfid = np->n_fid;
332 	np->n_fid = fid;
333 	np->n_fidrefs++;
334 	if (oldfid != NULL)
335 		smb_fh_rele(oldfid);
336 
337 	/*
338 	 * This thread did the open.
339 	 * Save our credentials too.
340 	 */
341 	mutex_enter(&np->r_statelock);
342 	oldcr = np->r_cred;
343 	np->r_cred = cr;
344 	crhold(cr);
345 	if (oldcr)
346 		crfree(oldcr);
347 	mutex_exit(&np->r_statelock);
348 
349 have_fid:
350 	/*
351 	 * Keep track of the vnode type at first open.
352 	 * (see comments above)
353 	 */
354 	if (np->n_ovtype == VNON)
355 		np->n_ovtype = vp->v_type;
356 
357 out:
358 	smb_credrele(&scred);
359 	smbfs_rw_exit(&np->r_lkserlock);
360 	return (error);
361 }
362 
363 /*ARGSUSED*/
364 static int
smbfs_close(vnode_t * vp,int flag,int count,offset_t offset,cred_t * cr,caller_context_t * ct)365 smbfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
366 	caller_context_t *ct)
367 {
368 	smbnode_t	*np;
369 	smbmntinfo_t	*smi;
370 	struct smb_cred scred;
371 	int error = 0;
372 
373 	np = VTOSMB(vp);
374 	smi = VTOSMI(vp);
375 
376 	/*
377 	 * Don't "bail out" for VFS_UNMOUNTED here,
378 	 * as we want to do cleanup, etc.
379 	 */
380 
381 	/*
382 	 * zone_enter(2) prevents processes from changing zones with SMBFS files
383 	 * open; if we happen to get here from the wrong zone we can't do
384 	 * anything over the wire.
385 	 */
386 	if (smi->smi_zone_ref.zref_zone != curproc->p_zone) {
387 		/*
388 		 * We could attempt to clean up locks, except we're sure
389 		 * that the current process didn't acquire any locks on
390 		 * the file: any attempt to lock a file belong to another zone
391 		 * will fail, and one can't lock an SMBFS file and then change
392 		 * zones, as that fails too.
393 		 *
394 		 * Returning an error here is the sane thing to do.  A
395 		 * subsequent call to VN_RELE() which translates to a
396 		 * smbfs_inactive() will clean up state: if the zone of the
397 		 * vnode's origin is still alive and kicking, an async worker
398 		 * thread will handle the request (from the correct zone), and
399 		 * everything (minus the final smbfs_getattr_otw() call) should
400 		 * be OK. If the zone is going away smbfs_async_inactive() will
401 		 * throw away cached pages inline.
402 		 */
403 		return (EIO);
404 	}
405 
406 	/*
407 	 * If we are using local locking for this filesystem, then
408 	 * release all of the SYSV style record locks.  Otherwise,
409 	 * we are doing network locking and we need to release all
410 	 * of the network locks.  All of the locks held by this
411 	 * process on this file are released no matter what the
412 	 * incoming reference count is.
413 	 */
414 	if (smi->smi_flags & SMI_LLOCK) {
415 		pid_t pid = ddi_get_pid();
416 		cleanlocks(vp, pid, 0);
417 		cleanshares(vp, pid);
418 	}
419 	/*
420 	 * else doing OtW locking.  SMB servers drop all locks
421 	 * on the file ID we close here, so no _lockrelease()
422 	 */
423 
424 	/*
425 	 * This (passed in) count is the ref. count from the
426 	 * user's file_t before the closef call (fio.c).
427 	 * The rest happens only on last close.
428 	 */
429 	if (count > 1)
430 		return (0);
431 
432 	/* NFS has DNLC purge here. */
433 
434 	/*
435 	 * If the file was open for write and there are pages,
436 	 * then make sure dirty pages written back.
437 	 *
438 	 * NFS does this async when "close-to-open" is off
439 	 * (MI_NOCTO flag is set) to avoid blocking the caller.
440 	 * For now, always do this synchronously (no B_ASYNC).
441 	 */
442 	if ((flag & FWRITE) && vn_has_cached_data(vp)) {
443 		error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
444 		if (error == EAGAIN)
445 			error = 0;
446 	}
447 	if (error == 0) {
448 		mutex_enter(&np->r_statelock);
449 		np->r_flags &= ~RSTALE;
450 		np->r_error = 0;
451 		mutex_exit(&np->r_statelock);
452 	}
453 
454 	/*
455 	 * Decrement the reference count for the FID
456 	 * and possibly do the OtW close.
457 	 *
458 	 * Exclusive lock for modifying n_fid stuff.
459 	 * Don't want this one ever interruptible.
460 	 */
461 	(void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
462 	smb_credinit(&scred, cr);
463 
464 	smbfs_rele_fid(np, &scred);
465 
466 	smb_credrele(&scred);
467 	smbfs_rw_exit(&np->r_lkserlock);
468 
469 	return (0);
470 }
471 
472 /*
473  * Helper for smbfs_close.  Decrement the reference count
474  * for an SMB-level file or directory ID, and when the last
475  * reference for the fid goes away, do the OtW close.
476  */
477 static void
smbfs_rele_fid(smbnode_t * np,struct smb_cred * scred)478 smbfs_rele_fid(smbnode_t *np, struct smb_cred *scred)
479 {
480 	cred_t		*oldcr;
481 	struct smbfs_fctx *fctx;
482 	int		error;
483 	smb_fh_t	*ofid;
484 
485 	error = 0;
486 
487 	/* Make sure we serialize for n_dirseq use. */
488 	ASSERT(smbfs_rw_lock_held(&np->r_lkserlock, RW_WRITER));
489 
490 	/*
491 	 * Note that vp->v_type may change if a remote node
492 	 * is deleted and recreated as a different type, and
493 	 * our getattr may change v_type accordingly.
494 	 * Now use n_ovtype to keep track of the v_type
495 	 * we had during open (see comments above).
496 	 */
497 	switch (np->n_ovtype) {
498 	case VDIR:
499 		ASSERT(np->n_dirrefs > 0);
500 		if (--np->n_dirrefs)
501 			return;
502 		if ((fctx = np->n_dirseq) != NULL) {
503 			np->n_dirseq = NULL;
504 			np->n_dirofs = 0;
505 			error = smbfs_smb_findclose(fctx, scred);
506 		}
507 		break;
508 
509 	case VREG:
510 		ASSERT(np->n_fidrefs > 0);
511 		if (--np->n_fidrefs)
512 			return;
513 		if ((ofid = np->n_fid) != NULL) {
514 			np->n_fid = NULL;
515 			smb_fh_rele(ofid);
516 		}
517 		break;
518 
519 	default:
520 		SMBVDEBUG("bad n_ovtype %d\n", np->n_ovtype);
521 		break;
522 	}
523 	if (error) {
524 		SMBVDEBUG("error %d closing %s\n",
525 		    error, np->n_rpath);
526 	}
527 
528 	/* Allow next open to use any v_type. */
529 	np->n_ovtype = VNON;
530 
531 	/*
532 	 * Other "last close" stuff.
533 	 */
534 	mutex_enter(&np->r_statelock);
535 	if (np->n_flag & NATTRCHANGED)
536 		smbfs_attrcache_rm_locked(np);
537 	oldcr = np->r_cred;
538 	np->r_cred = NULL;
539 	mutex_exit(&np->r_statelock);
540 	if (oldcr != NULL)
541 		crfree(oldcr);
542 }
543 
544 /* ARGSUSED */
545 static int
smbfs_read(vnode_t * vp,struct uio * uiop,int ioflag,cred_t * cr,caller_context_t * ct)546 smbfs_read(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
547 	caller_context_t *ct)
548 {
549 	struct smb_cred scred;
550 	struct vattr	va;
551 	smbnode_t	*np;
552 	smbmntinfo_t	*smi;
553 	offset_t	endoff;
554 	ssize_t		past_eof;
555 	int		error;
556 
557 	np = VTOSMB(vp);
558 	smi = VTOSMI(vp);
559 
560 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
561 		return (EIO);
562 
563 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
564 		return (EIO);
565 
566 	/* Sanity check: should have a valid open */
567 	if (np->n_fid == NULL)
568 		return (EIO);
569 
570 	ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_READER));
571 
572 	if (vp->v_type != VREG)
573 		return (EISDIR);
574 
575 	if (uiop->uio_resid == 0)
576 		return (0);
577 
578 	/*
579 	 * Like NFS3, just check for 63-bit overflow.
580 	 * Our SMB layer takes care to return EFBIG
581 	 * when it has to fallback to a 32-bit call.
582 	 */
583 	endoff = uiop->uio_loffset + uiop->uio_resid;
584 	if (uiop->uio_loffset < 0 || endoff < 0)
585 		return (EINVAL);
586 
587 	/* get vnode attributes from server */
588 	va.va_mask = AT_SIZE | AT_MTIME;
589 	if (error = smbfsgetattr(vp, &va, cr))
590 		return (error);
591 
592 	/* Update mtime with mtime from server here? */
593 
594 	/* if offset is beyond EOF, read nothing */
595 	if (uiop->uio_loffset >= va.va_size)
596 		return (0);
597 
598 	/*
599 	 * Limit the read to the remaining file size.
600 	 * Do this by temporarily reducing uio_resid
601 	 * by the amount the lies beyoned the EOF.
602 	 */
603 	if (endoff > va.va_size) {
604 		past_eof = (ssize_t)(endoff - va.va_size);
605 		uiop->uio_resid -= past_eof;
606 	} else
607 		past_eof = 0;
608 
609 	/*
610 	 * Bypass VM if caching has been disabled (e.g., locking) or if
611 	 * using client-side direct I/O and the file is not mmap'd and
612 	 * there are no cached pages.
613 	 */
614 	if ((vp->v_flag & VNOCACHE) ||
615 	    (((np->r_flags & RDIRECTIO) || (smi->smi_flags & SMI_DIRECTIO)) &&
616 	    np->r_mapcnt == 0 && np->r_inmap == 0 &&
617 	    !vn_has_cached_data(vp))) {
618 
619 		/* Shared lock for n_fid use in smb_rwuio */
620 		if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER,
621 		    SMBINTR(vp)))
622 			return (EINTR);
623 		smb_credinit(&scred, cr);
624 
625 		error = smb_rwuio(np->n_fid, UIO_READ,
626 		    uiop, &scred, smb_timo_read);
627 
628 		smb_credrele(&scred);
629 		smbfs_rw_exit(&np->r_lkserlock);
630 
631 		/* undo adjustment of resid */
632 		uiop->uio_resid += past_eof;
633 
634 		return (error);
635 	}
636 
637 #ifdef	_KERNEL
638 	/* (else) Do I/O through segmap. */
639 	do {
640 		caddr_t		base;
641 		u_offset_t	off;
642 		size_t		n;
643 		int		on;
644 		uint_t		flags;
645 
646 		off = uiop->uio_loffset & MAXBMASK; /* mapping offset */
647 		on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */
648 		n = MIN(MAXBSIZE - on, uiop->uio_resid);
649 
650 		error = smbfs_validate_caches(vp, cr);
651 		if (error)
652 			break;
653 
654 		/* NFS waits for RINCACHEPURGE here. */
655 
656 		if (vpm_enable) {
657 			/*
658 			 * Copy data.
659 			 */
660 			error = vpm_data_copy(vp, off + on, n, uiop,
661 			    1, NULL, 0, S_READ);
662 		} else {
663 			base = segmap_getmapflt(segkmap, vp, off + on, n, 1,
664 			    S_READ);
665 
666 			error = uiomove(base + on, n, UIO_READ, uiop);
667 		}
668 
669 		if (!error) {
670 			/*
671 			 * If read a whole block or read to eof,
672 			 * won't need this buffer again soon.
673 			 */
674 			mutex_enter(&np->r_statelock);
675 			if (n + on == MAXBSIZE ||
676 			    uiop->uio_loffset == np->r_size)
677 				flags = SM_DONTNEED;
678 			else
679 				flags = 0;
680 			mutex_exit(&np->r_statelock);
681 			if (vpm_enable) {
682 				error = vpm_sync_pages(vp, off, n, flags);
683 			} else {
684 				error = segmap_release(segkmap, base, flags);
685 			}
686 		} else {
687 			if (vpm_enable) {
688 				(void) vpm_sync_pages(vp, off, n, 0);
689 			} else {
690 				(void) segmap_release(segkmap, base, 0);
691 			}
692 		}
693 	} while (!error && uiop->uio_resid > 0);
694 #else	// _KERNEL
695 	error = ENOSYS;
696 #endif	// _KERNEL
697 
698 	/* undo adjustment of resid */
699 	uiop->uio_resid += past_eof;
700 
701 	return (error);
702 }
703 
704 
705 /* ARGSUSED */
706 static int
smbfs_write(vnode_t * vp,struct uio * uiop,int ioflag,cred_t * cr,caller_context_t * ct)707 smbfs_write(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
708 	caller_context_t *ct)
709 {
710 	struct smb_cred scred;
711 	struct vattr    va;
712 	smbnode_t	*np;
713 	smbmntinfo_t	*smi;
714 	offset_t	endoff, limit;
715 	ssize_t		past_limit;
716 	int		error, timo;
717 	u_offset_t	last_off;
718 	size_t		last_resid;
719 #ifdef	_KERNEL
720 	uint_t		bsize;
721 #endif
722 
723 	np = VTOSMB(vp);
724 	smi = VTOSMI(vp);
725 
726 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
727 		return (EIO);
728 
729 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
730 		return (EIO);
731 
732 	/* Sanity check: should have a valid open */
733 	if (np->n_fid == NULL)
734 		return (EIO);
735 
736 	ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_WRITER));
737 
738 	if (vp->v_type != VREG)
739 		return (EISDIR);
740 
741 	if (uiop->uio_resid == 0)
742 		return (0);
743 
744 	/*
745 	 * Handle ioflag bits: (FAPPEND|FSYNC|FDSYNC)
746 	 */
747 	if (ioflag & (FAPPEND | FSYNC)) {
748 		if (np->n_flag & NMODIFIED) {
749 			smbfs_attrcache_remove(np);
750 		}
751 	}
752 	if (ioflag & FAPPEND) {
753 		/*
754 		 * File size can be changed by another client
755 		 *
756 		 * Todo: Consider redesigning this to use a
757 		 * handle opened for append instead.
758 		 */
759 		va.va_mask = AT_SIZE;
760 		if (error = smbfsgetattr(vp, &va, cr))
761 			return (error);
762 		uiop->uio_loffset = va.va_size;
763 	}
764 
765 	/*
766 	 * Like NFS3, just check for 63-bit overflow.
767 	 */
768 	endoff = uiop->uio_loffset + uiop->uio_resid;
769 	if (uiop->uio_loffset < 0 || endoff < 0)
770 		return (EINVAL);
771 
772 	/*
773 	 * Check to make sure that the process will not exceed
774 	 * its limit on file size.  It is okay to write up to
775 	 * the limit, but not beyond.  Thus, the write which
776 	 * reaches the limit will be short and the next write
777 	 * will return an error.
778 	 *
779 	 * So if we're starting at or beyond the limit, EFBIG.
780 	 * Otherwise, temporarily reduce resid to the amount
781 	 * that is after the limit.
782 	 */
783 	limit = uiop->uio_llimit;
784 	if (limit == RLIM64_INFINITY)
785 		limit = MAXOFFSET_T;
786 	if (uiop->uio_loffset >= limit) {
787 #ifdef	_KERNEL
788 		proc_t *p = ttoproc(curthread);
789 
790 		mutex_enter(&p->p_lock);
791 		(void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE],
792 		    p->p_rctls, p, RCA_UNSAFE_SIGINFO);
793 		mutex_exit(&p->p_lock);
794 #endif	// _KERNEL
795 		return (EFBIG);
796 	}
797 	if (endoff > limit) {
798 		past_limit = (ssize_t)(endoff - limit);
799 		uiop->uio_resid -= past_limit;
800 	} else
801 		past_limit = 0;
802 
803 	/*
804 	 * Bypass VM if caching has been disabled (e.g., locking) or if
805 	 * using client-side direct I/O and the file is not mmap'd and
806 	 * there are no cached pages.
807 	 */
808 	if ((vp->v_flag & VNOCACHE) ||
809 	    (((np->r_flags & RDIRECTIO) || (smi->smi_flags & SMI_DIRECTIO)) &&
810 	    np->r_mapcnt == 0 && np->r_inmap == 0 &&
811 	    !vn_has_cached_data(vp))) {
812 
813 #ifdef	_KERNEL
814 smbfs_fwrite:
815 #endif	// _KERNEL
816 		if (np->r_flags & RSTALE) {
817 			last_resid = uiop->uio_resid;
818 			last_off = uiop->uio_loffset;
819 			error = np->r_error;
820 			/*
821 			 * A close may have cleared r_error, if so,
822 			 * propagate ESTALE error return properly
823 			 */
824 			if (error == 0)
825 				error = ESTALE;
826 			goto bottom;
827 		}
828 
829 		/* Timeout: longer for append. */
830 		timo = smb_timo_write;
831 		if (endoff > np->r_size)
832 			timo = smb_timo_append;
833 
834 		/* Shared lock for n_fid use in smb_rwuio */
835 		if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER,
836 		    SMBINTR(vp)))
837 			return (EINTR);
838 		smb_credinit(&scred, cr);
839 
840 		error = smb_rwuio(np->n_fid, UIO_WRITE,
841 		    uiop, &scred, timo);
842 
843 		if (error == 0) {
844 			mutex_enter(&np->r_statelock);
845 			np->n_flag |= (NFLUSHWIRE | NATTRCHANGED);
846 			if (uiop->uio_loffset > (offset_t)np->r_size)
847 				np->r_size = (len_t)uiop->uio_loffset;
848 			mutex_exit(&np->r_statelock);
849 			if (ioflag & (FSYNC | FDSYNC)) {
850 				/* Don't error the I/O if this fails. */
851 				(void) smbfsflush(np, &scred);
852 			}
853 		}
854 
855 		smb_credrele(&scred);
856 		smbfs_rw_exit(&np->r_lkserlock);
857 
858 		/* undo adjustment of resid */
859 		uiop->uio_resid += past_limit;
860 
861 		return (error);
862 	}
863 
864 #ifdef	_KERNEL
865 	/* (else) Do I/O through segmap. */
866 	bsize = vp->v_vfsp->vfs_bsize;
867 
868 	do {
869 		caddr_t		base;
870 		u_offset_t	off;
871 		size_t		n;
872 		int		on;
873 		uint_t		flags;
874 
875 		off = uiop->uio_loffset & MAXBMASK; /* mapping offset */
876 		on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */
877 		n = MIN(MAXBSIZE - on, uiop->uio_resid);
878 
879 		last_resid = uiop->uio_resid;
880 		last_off = uiop->uio_loffset;
881 
882 		if (np->r_flags & RSTALE) {
883 			error = np->r_error;
884 			/*
885 			 * A close may have cleared r_error, if so,
886 			 * propagate ESTALE error return properly
887 			 */
888 			if (error == 0)
889 				error = ESTALE;
890 			break;
891 		}
892 
893 		/*
894 		 * From NFS: Don't create dirty pages faster than they
895 		 * can be cleaned.
896 		 *
897 		 * Here NFS also checks for async writes (np->r_awcount)
898 		 */
899 		mutex_enter(&np->r_statelock);
900 		while (np->r_gcount > 0) {
901 			if (SMBINTR(vp)) {
902 				klwp_t *lwp = ttolwp(curthread);
903 
904 				if (lwp != NULL)
905 					lwp->lwp_nostop++;
906 				if (!cv_wait_sig(&np->r_cv, &np->r_statelock)) {
907 					mutex_exit(&np->r_statelock);
908 					if (lwp != NULL)
909 						lwp->lwp_nostop--;
910 					error = EINTR;
911 					goto bottom;
912 				}
913 				if (lwp != NULL)
914 					lwp->lwp_nostop--;
915 			} else
916 				cv_wait(&np->r_cv, &np->r_statelock);
917 		}
918 		mutex_exit(&np->r_statelock);
919 
920 		/*
921 		 * Touch the page and fault it in if it is not in core
922 		 * before segmap_getmapflt or vpm_data_copy can lock it.
923 		 * This is to avoid the deadlock if the buffer is mapped
924 		 * to the same file through mmap which we want to write.
925 		 */
926 		uio_prefaultpages((long)n, uiop);
927 
928 		if (vpm_enable) {
929 			/*
930 			 * It will use kpm mappings, so no need to
931 			 * pass an address.
932 			 */
933 			error = smbfs_writenp(np, NULL, n, uiop, 0);
934 		} else {
935 			if (segmap_kpm) {
936 				int pon = uiop->uio_loffset & PAGEOFFSET;
937 				size_t pn = MIN(PAGESIZE - pon,
938 				    uiop->uio_resid);
939 				int pagecreate;
940 
941 				mutex_enter(&np->r_statelock);
942 				pagecreate = (pon == 0) && (pn == PAGESIZE ||
943 				    uiop->uio_loffset + pn >= np->r_size);
944 				mutex_exit(&np->r_statelock);
945 
946 				base = segmap_getmapflt(segkmap, vp, off + on,
947 				    pn, !pagecreate, S_WRITE);
948 
949 				error = smbfs_writenp(np, base + pon, n, uiop,
950 				    pagecreate);
951 
952 			} else {
953 				base = segmap_getmapflt(segkmap, vp, off + on,
954 				    n, 0, S_READ);
955 				error = smbfs_writenp(np, base + on, n, uiop,
956 				    0);
957 			}
958 		}
959 
960 		if (!error) {
961 			if (smi->smi_flags & SMI_NOAC)
962 				flags = SM_WRITE;
963 			else if ((uiop->uio_loffset % bsize) == 0 ||
964 			    IS_SWAPVP(vp)) {
965 				/*
966 				 * Have written a whole block.
967 				 * Start an asynchronous write
968 				 * and mark the buffer to
969 				 * indicate that it won't be
970 				 * needed again soon.
971 				 */
972 				flags = SM_WRITE | SM_ASYNC | SM_DONTNEED;
973 			} else
974 				flags = 0;
975 			if ((ioflag & (FSYNC|FDSYNC)) ||
976 			    (np->r_flags & ROUTOFSPACE)) {
977 				flags &= ~SM_ASYNC;
978 				flags |= SM_WRITE;
979 			}
980 			if (vpm_enable) {
981 				error = vpm_sync_pages(vp, off, n, flags);
982 			} else {
983 				error = segmap_release(segkmap, base, flags);
984 			}
985 		} else {
986 			if (vpm_enable) {
987 				(void) vpm_sync_pages(vp, off, n, 0);
988 			} else {
989 				(void) segmap_release(segkmap, base, 0);
990 			}
991 			/*
992 			 * In the event that we got an access error while
993 			 * faulting in a page for a write-only file just
994 			 * force a write.
995 			 */
996 			if (error == EACCES)
997 				goto smbfs_fwrite;
998 		}
999 	} while (!error && uiop->uio_resid > 0);
1000 #else	// _KERNEL
1001 	last_resid = uiop->uio_resid;
1002 	last_off = uiop->uio_loffset;
1003 	error = ENOSYS;
1004 #endif	// _KERNEL
1005 
1006 bottom:
1007 	/* undo adjustment of resid */
1008 	if (error) {
1009 		uiop->uio_resid = last_resid + past_limit;
1010 		uiop->uio_loffset = last_off;
1011 	} else {
1012 		uiop->uio_resid += past_limit;
1013 	}
1014 
1015 	return (error);
1016 }
1017 
1018 #ifdef	_KERNEL
1019 
1020 /*
1021  * Like nfs_client.c: writerp()
1022  *
1023  * Write by creating pages and uiomove data onto them.
1024  */
1025 
1026 int
smbfs_writenp(smbnode_t * np,caddr_t base,int tcount,struct uio * uio,int pgcreated)1027 smbfs_writenp(smbnode_t *np, caddr_t base, int tcount, struct uio *uio,
1028     int pgcreated)
1029 {
1030 	int		pagecreate;
1031 	int		n;
1032 	int		saved_n;
1033 	caddr_t		saved_base;
1034 	u_offset_t	offset;
1035 	int		error;
1036 	int		sm_error;
1037 	vnode_t		*vp = SMBTOV(np);
1038 
1039 	ASSERT(tcount <= MAXBSIZE && tcount <= uio->uio_resid);
1040 	ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_WRITER));
1041 	if (!vpm_enable) {
1042 		ASSERT(((uintptr_t)base & MAXBOFFSET) + tcount <= MAXBSIZE);
1043 	}
1044 
1045 	/*
1046 	 * Move bytes in at most PAGESIZE chunks. We must avoid
1047 	 * spanning pages in uiomove() because page faults may cause
1048 	 * the cache to be invalidated out from under us. The r_size is not
1049 	 * updated until after the uiomove. If we push the last page of a
1050 	 * file before r_size is correct, we will lose the data written past
1051 	 * the current (and invalid) r_size.
1052 	 */
1053 	do {
1054 		offset = uio->uio_loffset;
1055 		pagecreate = 0;
1056 
1057 		/*
1058 		 * n is the number of bytes required to satisfy the request
1059 		 *   or the number of bytes to fill out the page.
1060 		 */
1061 		n = (int)MIN((PAGESIZE - (offset & PAGEOFFSET)), tcount);
1062 
1063 		/*
1064 		 * Check to see if we can skip reading in the page
1065 		 * and just allocate the memory.  We can do this
1066 		 * if we are going to rewrite the entire mapping
1067 		 * or if we are going to write to or beyond the current
1068 		 * end of file from the beginning of the mapping.
1069 		 *
1070 		 * The read of r_size is now protected by r_statelock.
1071 		 */
1072 		mutex_enter(&np->r_statelock);
1073 		/*
1074 		 * When pgcreated is nonzero the caller has already done
1075 		 * a segmap_getmapflt with forcefault 0 and S_WRITE. With
1076 		 * segkpm this means we already have at least one page
1077 		 * created and mapped at base.
1078 		 */
1079 		pagecreate = pgcreated ||
1080 		    ((offset & PAGEOFFSET) == 0 &&
1081 		    (n == PAGESIZE || ((offset + n) >= np->r_size)));
1082 
1083 		mutex_exit(&np->r_statelock);
1084 		if (!vpm_enable && pagecreate) {
1085 			/*
1086 			 * The last argument tells segmap_pagecreate() to
1087 			 * always lock the page, as opposed to sometimes
1088 			 * returning with the page locked. This way we avoid a
1089 			 * fault on the ensuing uiomove(), but also
1090 			 * more importantly (to fix bug 1094402) we can
1091 			 * call segmap_fault() to unlock the page in all
1092 			 * cases. An alternative would be to modify
1093 			 * segmap_pagecreate() to tell us when it is
1094 			 * locking a page, but that's a fairly major
1095 			 * interface change.
1096 			 */
1097 			if (pgcreated == 0)
1098 				(void) segmap_pagecreate(segkmap, base,
1099 				    (uint_t)n, 1);
1100 			saved_base = base;
1101 			saved_n = n;
1102 		}
1103 
1104 		/*
1105 		 * The number of bytes of data in the last page can not
1106 		 * be accurately be determined while page is being
1107 		 * uiomove'd to and the size of the file being updated.
1108 		 * Thus, inform threads which need to know accurately
1109 		 * how much data is in the last page of the file.  They
1110 		 * will not do the i/o immediately, but will arrange for
1111 		 * the i/o to happen later when this modify operation
1112 		 * will have finished.
1113 		 */
1114 		ASSERT(!(np->r_flags & RMODINPROGRESS));
1115 		mutex_enter(&np->r_statelock);
1116 		np->r_flags |= RMODINPROGRESS;
1117 		np->r_modaddr = (offset & MAXBMASK);
1118 		mutex_exit(&np->r_statelock);
1119 
1120 		if (vpm_enable) {
1121 			/*
1122 			 * Copy data. If new pages are created, part of
1123 			 * the page that is not written will be initizliazed
1124 			 * with zeros.
1125 			 */
1126 			error = vpm_data_copy(vp, offset, n, uio,
1127 			    !pagecreate, NULL, 0, S_WRITE);
1128 		} else {
1129 			error = uiomove(base, n, UIO_WRITE, uio);
1130 		}
1131 
1132 		/*
1133 		 * r_size is the maximum number of
1134 		 * bytes known to be in the file.
1135 		 * Make sure it is at least as high as the
1136 		 * first unwritten byte pointed to by uio_loffset.
1137 		 */
1138 		mutex_enter(&np->r_statelock);
1139 		if (np->r_size < uio->uio_loffset)
1140 			np->r_size = uio->uio_loffset;
1141 		np->r_flags &= ~RMODINPROGRESS;
1142 		np->r_flags |= RDIRTY;
1143 		mutex_exit(&np->r_statelock);
1144 
1145 		/* n = # of bytes written */
1146 		n = (int)(uio->uio_loffset - offset);
1147 
1148 		if (!vpm_enable) {
1149 			base += n;
1150 		}
1151 		tcount -= n;
1152 		/*
1153 		 * If we created pages w/o initializing them completely,
1154 		 * we need to zero the part that wasn't set up.
1155 		 * This happens on a most EOF write cases and if
1156 		 * we had some sort of error during the uiomove.
1157 		 */
1158 		if (!vpm_enable && pagecreate) {
1159 			if ((uio->uio_loffset & PAGEOFFSET) || n == 0)
1160 				(void) kzero(base, PAGESIZE - n);
1161 
1162 			if (pgcreated) {
1163 				/*
1164 				 * Caller is responsible for this page,
1165 				 * it was not created in this loop.
1166 				 */
1167 				pgcreated = 0;
1168 			} else {
1169 				/*
1170 				 * For bug 1094402: segmap_pagecreate locks
1171 				 * page. Unlock it. This also unlocks the
1172 				 * pages allocated by page_create_va() in
1173 				 * segmap_pagecreate().
1174 				 */
1175 				sm_error = segmap_fault(kas.a_hat, segkmap,
1176 				    saved_base, saved_n,
1177 				    F_SOFTUNLOCK, S_WRITE);
1178 				if (error == 0)
1179 					error = sm_error;
1180 			}
1181 		}
1182 	} while (tcount > 0 && error == 0);
1183 
1184 	return (error);
1185 }
1186 
1187 /*
1188  * Flags are composed of {B_ASYNC, B_INVAL, B_FREE, B_DONTNEED}
1189  * Like nfs3_rdwrlbn()
1190  */
1191 static int
smbfs_rdwrlbn(vnode_t * vp,page_t * pp,u_offset_t off,size_t len,int flags,cred_t * cr)1192 smbfs_rdwrlbn(vnode_t *vp, page_t *pp, u_offset_t off, size_t len,
1193 	int flags, cred_t *cr)
1194 {
1195 	smbmntinfo_t	*smi = VTOSMI(vp);
1196 	struct buf *bp;
1197 	int error;
1198 	int sync;
1199 
1200 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
1201 		return (EIO);
1202 
1203 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
1204 		return (EIO);
1205 
1206 	bp = pageio_setup(pp, len, vp, flags);
1207 	ASSERT(bp != NULL);
1208 
1209 	/*
1210 	 * pageio_setup should have set b_addr to 0.  This
1211 	 * is correct since we want to do I/O on a page
1212 	 * boundary.  bp_mapin will use this addr to calculate
1213 	 * an offset, and then set b_addr to the kernel virtual
1214 	 * address it allocated for us.
1215 	 */
1216 	ASSERT(bp->b_un.b_addr == 0);
1217 
1218 	bp->b_edev = 0;
1219 	bp->b_dev = 0;
1220 	bp->b_lblkno = lbtodb(off);
1221 	bp->b_file = vp;
1222 	bp->b_offset = (offset_t)off;
1223 	bp_mapin(bp);
1224 
1225 	/*
1226 	 * Calculate the desired level of stability to write data.
1227 	 */
1228 	if ((flags & (B_WRITE|B_ASYNC)) == (B_WRITE|B_ASYNC) &&
1229 	    freemem > desfree) {
1230 		sync = 0;
1231 	} else {
1232 		sync = 1;
1233 	}
1234 
1235 	error = smbfs_bio(bp, sync, cr);
1236 
1237 	bp_mapout(bp);
1238 	pageio_done(bp);
1239 
1240 	return (error);
1241 }
1242 
1243 
1244 /*
1245  * Corresponds to nfs3_vnopc.c : nfs3_bio(), though the NFS code
1246  * uses nfs3read()/nfs3write() where we use smb_rwuio().  Also,
1247  * NFS has this later in the file.  Move it up here closer to
1248  * the one call site just above.
1249  */
1250 
1251 static int
smbfs_bio(struct buf * bp,int sync,cred_t * cr)1252 smbfs_bio(struct buf *bp, int sync, cred_t *cr)
1253 {
1254 	struct iovec aiov[1];
1255 	struct uio  auio;
1256 	struct smb_cred scred;
1257 	smbnode_t *np = VTOSMB(bp->b_vp);
1258 	smbmntinfo_t *smi = np->n_mount;
1259 	offset_t offset;
1260 	offset_t endoff;
1261 	size_t count;
1262 	size_t past_eof;
1263 	int error;
1264 
1265 	ASSERT(curproc->p_zone == smi->smi_zone_ref.zref_zone);
1266 
1267 	offset = ldbtob(bp->b_lblkno);
1268 	count = bp->b_bcount;
1269 	endoff = offset + count;
1270 	if (offset < 0 || endoff < 0)
1271 		return (EINVAL);
1272 
1273 	/*
1274 	 * Limit file I/O to the remaining file size, but see
1275 	 * the notes in smbfs_getpage about SMBFS_EOF.
1276 	 */
1277 	mutex_enter(&np->r_statelock);
1278 	if (offset >= np->r_size) {
1279 		mutex_exit(&np->r_statelock);
1280 		if (bp->b_flags & B_READ) {
1281 			return (SMBFS_EOF);
1282 		} else {
1283 			return (EINVAL);
1284 		}
1285 	}
1286 	if (endoff > np->r_size) {
1287 		past_eof = (size_t)(endoff - np->r_size);
1288 		count -= past_eof;
1289 	} else
1290 		past_eof = 0;
1291 	mutex_exit(&np->r_statelock);
1292 	ASSERT(count > 0);
1293 
1294 	/* Caller did bpmapin().  Mapped address is... */
1295 	aiov[0].iov_base = bp->b_un.b_addr;
1296 	aiov[0].iov_len = count;
1297 	auio.uio_iov = aiov;
1298 	auio.uio_iovcnt = 1;
1299 	auio.uio_loffset = offset;
1300 	auio.uio_segflg = UIO_SYSSPACE;
1301 	auio.uio_fmode = 0;
1302 	auio.uio_resid = count;
1303 
1304 	/* Shared lock for n_fid use in smb_rwuio */
1305 	if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER,
1306 	    smi->smi_flags & SMI_INT))
1307 		return (EINTR);
1308 	smb_credinit(&scred, cr);
1309 
1310 	DTRACE_IO1(start, struct buf *, bp);
1311 
1312 	if (bp->b_flags & B_READ) {
1313 
1314 		error = smb_rwuio(np->n_fid, UIO_READ,
1315 		    &auio, &scred, smb_timo_read);
1316 
1317 		/* Like NFS, only set b_error here. */
1318 		bp->b_error = error;
1319 		bp->b_resid = auio.uio_resid;
1320 
1321 		if (!error && auio.uio_resid != 0)
1322 			error = EIO;
1323 		if (!error && past_eof != 0) {
1324 			/* Zero the memory beyond EOF. */
1325 			bzero(bp->b_un.b_addr + count, past_eof);
1326 		}
1327 	} else {
1328 
1329 		error = smb_rwuio(np->n_fid, UIO_WRITE,
1330 		    &auio, &scred, smb_timo_write);
1331 
1332 		/* Like NFS, only set b_error here. */
1333 		bp->b_error = error;
1334 		bp->b_resid = auio.uio_resid;
1335 
1336 		if (!error && auio.uio_resid != 0)
1337 			error = EIO;
1338 		if (!error && sync) {
1339 			(void) smbfsflush(np, &scred);
1340 		}
1341 	}
1342 
1343 	/*
1344 	 * This comes from nfs3_commit()
1345 	 */
1346 	if (error != 0) {
1347 		mutex_enter(&np->r_statelock);
1348 		if (error == ESTALE)
1349 			np->r_flags |= RSTALE;
1350 		if (!np->r_error)
1351 			np->r_error = error;
1352 		mutex_exit(&np->r_statelock);
1353 		bp->b_flags |= B_ERROR;
1354 	}
1355 
1356 	DTRACE_IO1(done, struct buf *, bp);
1357 
1358 	smb_credrele(&scred);
1359 	smbfs_rw_exit(&np->r_lkserlock);
1360 
1361 	if (error == ESTALE)
1362 		smbfs_attrcache_remove(np);
1363 
1364 	return (error);
1365 }
1366 #endif	// _KERNEL
1367 
1368 /*
1369  * Here NFS has: nfs3write, nfs3read
1370  * We use smb_rwuio instead.
1371  */
1372 
1373 /* ARGSUSED */
1374 static int
smbfs_ioctl(vnode_t * vp,int cmd,intptr_t arg,int flag,cred_t * cr,int * rvalp,caller_context_t * ct)1375 smbfs_ioctl(vnode_t *vp, int cmd, intptr_t arg, int flag,
1376 	cred_t *cr, int *rvalp,	caller_context_t *ct)
1377 {
1378 	int		error;
1379 	smbmntinfo_t	*smi;
1380 
1381 	smi = VTOSMI(vp);
1382 
1383 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
1384 		return (EIO);
1385 
1386 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
1387 		return (EIO);
1388 
1389 	switch (cmd) {
1390 
1391 	case _FIOFFS:
1392 		error = smbfs_fsync(vp, 0, cr, ct);
1393 		break;
1394 
1395 		/*
1396 		 * The following two ioctls are used by bfu.
1397 		 * Silently ignore to avoid bfu errors.
1398 		 */
1399 	case _FIOGDIO:
1400 	case _FIOSDIO:
1401 		error = 0;
1402 		break;
1403 
1404 #if 0	/* Todo - SMB ioctl query regions */
1405 	case _FIO_SEEK_DATA:
1406 	case _FIO_SEEK_HOLE:
1407 #endif
1408 
1409 	case _FIODIRECTIO:
1410 		error = smbfs_directio(vp, (int)arg, cr);
1411 		break;
1412 
1413 		/*
1414 		 * Allow get/set with "raw" security descriptor (SD) data.
1415 		 * Useful for testing, diagnosing idmap problems, etc.
1416 		 */
1417 	case SMBFSIO_GETSD:
1418 		error = smbfs_acl_iocget(vp, arg, flag, cr);
1419 		break;
1420 
1421 	case SMBFSIO_SETSD:
1422 		error = smbfs_acl_iocset(vp, arg, flag, cr);
1423 		break;
1424 
1425 	default:
1426 		error = ENOTTY;
1427 		break;
1428 	}
1429 
1430 	return (error);
1431 }
1432 
1433 
1434 /*
1435  * Return either cached or remote attributes. If get remote attr
1436  * use them to check and invalidate caches, then cache the new attributes.
1437  */
1438 /* ARGSUSED */
1439 static int
smbfs_getattr(vnode_t * vp,struct vattr * vap,int flags,cred_t * cr,caller_context_t * ct)1440 smbfs_getattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
1441 	caller_context_t *ct)
1442 {
1443 	smbnode_t *np;
1444 	smbmntinfo_t *smi;
1445 	int error;
1446 
1447 	smi = VTOSMI(vp);
1448 
1449 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
1450 		return (EIO);
1451 
1452 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
1453 		return (EIO);
1454 
1455 	/*
1456 	 * If it has been specified that the return value will
1457 	 * just be used as a hint, and we are only being asked
1458 	 * for size, fsid or rdevid, then return the client's
1459 	 * notion of these values without checking to make sure
1460 	 * that the attribute cache is up to date.
1461 	 * The whole point is to avoid an over the wire GETATTR
1462 	 * call.
1463 	 */
1464 	np = VTOSMB(vp);
1465 	if (flags & ATTR_HINT) {
1466 		if (vap->va_mask ==
1467 		    (vap->va_mask & (AT_SIZE | AT_FSID | AT_RDEV))) {
1468 			mutex_enter(&np->r_statelock);
1469 			if (vap->va_mask | AT_SIZE)
1470 				vap->va_size = np->r_size;
1471 			if (vap->va_mask | AT_FSID)
1472 				vap->va_fsid = vp->v_vfsp->vfs_dev;
1473 			if (vap->va_mask | AT_RDEV)
1474 				vap->va_rdev = vp->v_rdev;
1475 			mutex_exit(&np->r_statelock);
1476 			return (0);
1477 		}
1478 	}
1479 
1480 	/*
1481 	 * Only need to flush pages if asking for the mtime
1482 	 * and if there any dirty pages.
1483 	 *
1484 	 * Here NFS also checks for async writes (np->r_awcount)
1485 	 */
1486 	if (vap->va_mask & AT_MTIME) {
1487 		if (vn_has_cached_data(vp) &&
1488 		    ((np->r_flags & RDIRTY) != 0)) {
1489 			mutex_enter(&np->r_statelock);
1490 			np->r_gcount++;
1491 			mutex_exit(&np->r_statelock);
1492 			error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
1493 			mutex_enter(&np->r_statelock);
1494 			if (error && (error == ENOSPC || error == EDQUOT)) {
1495 				if (!np->r_error)
1496 					np->r_error = error;
1497 			}
1498 			if (--np->r_gcount == 0)
1499 				cv_broadcast(&np->r_cv);
1500 			mutex_exit(&np->r_statelock);
1501 		}
1502 	}
1503 
1504 	return (smbfsgetattr(vp, vap, cr));
1505 }
1506 
1507 /* smbfsgetattr() in smbfs_client.c */
1508 
1509 /*ARGSUSED4*/
1510 static int
smbfs_setattr(vnode_t * vp,struct vattr * vap,int flags,cred_t * cr,caller_context_t * ct)1511 smbfs_setattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
1512 		caller_context_t *ct)
1513 {
1514 	vfs_t		*vfsp;
1515 	smbmntinfo_t	*smi;
1516 	int		error;
1517 	uint_t		mask;
1518 	struct vattr	oldva;
1519 
1520 	vfsp = vp->v_vfsp;
1521 	smi = VFTOSMI(vfsp);
1522 
1523 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
1524 		return (EIO);
1525 
1526 	if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
1527 		return (EIO);
1528 
1529 	mask = vap->va_mask;
1530 	if (mask & AT_NOSET)
1531 		return (EINVAL);
1532 
1533 	if (vfsp->vfs_flag & VFS_RDONLY)
1534 		return (EROFS);
1535 
1536 	/*
1537 	 * This is a _local_ access check so that only the owner of
1538 	 * this mount can set attributes.  With ACLs enabled, the
1539 	 * file owner can be different from the mount owner, and we
1540 	 * need to check the _mount_ owner here.  See _access_rwx
1541 	 */
1542 	bzero(&oldva, sizeof (oldva));
1543 	oldva.va_mask = AT_TYPE | AT_MODE;
1544 	error = smbfsgetattr(vp, &oldva, cr);
1545 	if (error)
1546 		return (error);
1547 	oldva.va_mask |= AT_UID | AT_GID;
1548 	oldva.va_uid = smi->smi_uid;
1549 	oldva.va_gid = smi->smi_gid;
1550 
1551 	error = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
1552 	    smbfs_accessx, vp);
1553 	if (error)
1554 		return (error);
1555 
1556 	if (mask & (AT_UID | AT_GID)) {
1557 		if (smi->smi_flags & SMI_ACL)
1558 			error = smbfs_acl_setids(vp, vap, cr);
1559 		else
1560 			error = ENOSYS;
1561 		if (error != 0) {
1562 			SMBVDEBUG("error %d seting UID/GID on %s",
1563 			    error, VTOSMB(vp)->n_rpath);
1564 			/*
1565 			 * It might be more correct to return the
1566 			 * error here, but that causes complaints
1567 			 * when root extracts a cpio archive, etc.
1568 			 * So ignore this error, and go ahead with
1569 			 * the rest of the setattr work.
1570 			 */
1571 		}
1572 	}
1573 
1574 	error = smbfssetattr(vp, vap, flags, cr);
1575 
1576 #ifdef	SMBFS_VNEVENT
1577 	if (error == 0 && (vap->va_mask & AT_SIZE) && vap->va_size == 0)
1578 		vnevent_truncate(vp, ct);
1579 #endif
1580 
1581 	return (error);
1582 }
1583 
1584 /*
1585  * Mostly from Darwin smbfs_setattr()
1586  * but then modified a lot.
1587  */
1588 /* ARGSUSED */
1589 static int
smbfssetattr(vnode_t * vp,struct vattr * vap,int flags,cred_t * cr)1590 smbfssetattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr)
1591 {
1592 	int		error = 0;
1593 	smbnode_t	*np = VTOSMB(vp);
1594 	smbmntinfo_t	*smi = np->n_mount;
1595 	uint_t		mask = vap->va_mask;
1596 	struct timespec	*mtime, *atime;
1597 	struct smb_cred	scred;
1598 	int		modified = 0;
1599 	smb_fh_t	*fid = NULL;
1600 	uint32_t rights = 0;
1601 	uint32_t dosattr = 0;
1602 
1603 	ASSERT(curproc->p_zone == VTOSMI(vp)->smi_zone_ref.zref_zone);
1604 
1605 	/*
1606 	 * There are no settable attributes on the XATTR dir,
1607 	 * so just silently ignore these.  On XATTR files,
1608 	 * you can set the size but nothing else.
1609 	 */
1610 	if (vp->v_flag & V_XATTRDIR)
1611 		return (0);
1612 	if (np->n_flag & N_XATTR) {
1613 		if (mask & AT_TIMES)
1614 			SMBVDEBUG("ignore set time on xattr\n");
1615 		mask &= AT_SIZE;
1616 	}
1617 
1618 	/*
1619 	 * Only need to flush pages if there are any pages and
1620 	 * if the file is marked as dirty in some fashion.  The
1621 	 * file must be flushed so that we can accurately
1622 	 * determine the size of the file and the cached data
1623 	 * after the SETATTR returns.  A file is considered to
1624 	 * be dirty if it is either marked with RDIRTY, has
1625 	 * outstanding i/o's active, or is mmap'd.  In this
1626 	 * last case, we can't tell whether there are dirty
1627 	 * pages, so we flush just to be sure.
1628 	 */
1629 	if (vn_has_cached_data(vp) &&
1630 	    ((np->r_flags & RDIRTY) ||
1631 	    np->r_count > 0 ||
1632 	    np->r_mapcnt > 0)) {
1633 		ASSERT(vp->v_type != VCHR);
1634 		error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, NULL);
1635 		if (error && (error == ENOSPC || error == EDQUOT)) {
1636 			mutex_enter(&np->r_statelock);
1637 			if (!np->r_error)
1638 				np->r_error = error;
1639 			mutex_exit(&np->r_statelock);
1640 		}
1641 	}
1642 
1643 	/*
1644 	 * If our caller is trying to set multiple attributes, they
1645 	 * can make no assumption about what order they are done in.
1646 	 * Here we try to do them in order of decreasing likelihood
1647 	 * of failure, just to minimize the chance we'll wind up
1648 	 * with a partially complete request.
1649 	 */
1650 
1651 	smb_credinit(&scred, cr);
1652 
1653 	/*
1654 	 * If the caller has provided extensible attributes,
1655 	 * map those into DOS attributes supported by SMB.
1656 	 * Note: zero means "no change".
1657 	 */
1658 	if (mask & AT_XVATTR)
1659 		dosattr = xvattr_to_dosattr(np, vap);
1660 
1661 	/*
1662 	 * Will we need an open handle for this setattr?
1663 	 * If so, what rights will we need?
1664 	 */
1665 	if (dosattr || (mask & (AT_ATIME | AT_MTIME))) {
1666 		rights |=
1667 		    SA_RIGHT_FILE_WRITE_ATTRIBUTES;
1668 	}
1669 	if (mask & AT_SIZE) {
1670 		rights |=
1671 		    SA_RIGHT_FILE_WRITE_DATA |
1672 		    SA_RIGHT_FILE_APPEND_DATA;
1673 	}
1674 
1675 	/*
1676 	 * Only SIZE really requires a handle, but it's
1677 	 * simpler and more reliable to set via a handle.
1678 	 * Some servers like NT4 won't set times by path.
1679 	 * Also, we're usually setting everything anyway.
1680 	 */
1681 	if (rights != 0) {
1682 		error = smbfs_smb_tmpopen(np, rights, &scred, &fid);
1683 		if (error) {
1684 			SMBVDEBUG("error %d opening %s\n",
1685 			    error, np->n_rpath);
1686 			goto out;
1687 		}
1688 		ASSERT(fid != NULL);
1689 	}
1690 
1691 	/*
1692 	 * If the server supports the UNIX extensions, right here is where
1693 	 * we'd support changes to uid, gid, mode, and possibly va_flags.
1694 	 * For now we claim to have made any such changes.
1695 	 */
1696 
1697 	if (mask & AT_SIZE) {
1698 		/*
1699 		 * If the new file size is less than what the client sees as
1700 		 * the file size, then just change the size and invalidate
1701 		 * the pages.
1702 		 */
1703 
1704 		/*
1705 		 * Set the file size to vap->va_size.
1706 		 */
1707 		ASSERT(fid != NULL);
1708 		error = smbfs_smb_setfsize(smi->smi_share, fid,
1709 		    vap->va_size, &scred);
1710 		if (error) {
1711 			SMBVDEBUG("setsize error %d file %s\n",
1712 			    error, np->n_rpath);
1713 		} else {
1714 			/*
1715 			 * Darwin had code here to zero-extend.
1716 			 * Tests indicate the server will zero-fill,
1717 			 * so looks like we don't need to do that.
1718 			 */
1719 			mutex_enter(&np->r_statelock);
1720 			np->r_size = vap->va_size;
1721 			np->n_flag |= (NFLUSHWIRE | NATTRCHANGED);
1722 			mutex_exit(&np->r_statelock);
1723 			modified = 1;
1724 		}
1725 	}
1726 
1727 	/*
1728 	 * Todo: Implement setting create_time (which is
1729 	 * different from ctime).
1730 	 */
1731 	mtime = ((mask & AT_MTIME) ? &vap->va_mtime : 0);
1732 	atime = ((mask & AT_ATIME) ? &vap->va_atime : 0);
1733 
1734 	if (dosattr || mtime || atime) {
1735 		/*
1736 		 * Always use the handle-based set attr call now.
1737 		 */
1738 		ASSERT(fid != NULL);
1739 		error = smbfs_smb_setfattr(smi->smi_share, fid,
1740 		    dosattr, mtime, atime, &scred);
1741 		if (error) {
1742 			SMBVDEBUG("set times error %d file %s\n",
1743 			    error, np->n_rpath);
1744 		} else {
1745 			modified = 1;
1746 		}
1747 	}
1748 
1749 out:
1750 	if (fid != NULL)
1751 		smbfs_smb_tmpclose(np, fid);
1752 
1753 	smb_credrele(&scred);
1754 
1755 	if (modified) {
1756 		/*
1757 		 * Invalidate attribute cache in case the server
1758 		 * doesn't set exactly the attributes we asked.
1759 		 */
1760 		smbfs_attrcache_remove(np);
1761 
1762 		/*
1763 		 * If changing the size of the file, invalidate
1764 		 * any local cached data which is no longer part
1765 		 * of the file.  We also possibly invalidate the
1766 		 * last page in the file.  We could use
1767 		 * pvn_vpzero(), but this would mark the page as
1768 		 * modified and require it to be written back to
1769 		 * the server for no particularly good reason.
1770 		 * This way, if we access it, then we bring it
1771 		 * back in.  A read should be cheaper than a
1772 		 * write.
1773 		 */
1774 		if (mask & AT_SIZE) {
1775 			smbfs_invalidate_pages(vp,
1776 			    (vap->va_size & PAGEMASK), cr);
1777 		}
1778 	}
1779 
1780 	return (error);
1781 }
1782 
1783 /*
1784  * Helper function for extensible system attributes (PSARC 2007/315)
1785  * Compute the DOS attribute word to pass to _setfattr (see above).
1786  * This returns zero IFF no change is being made to attributes.
1787  * Otherwise return the new attributes or SMB_EFA_NORMAL.
1788  */
1789 static uint32_t
xvattr_to_dosattr(smbnode_t * np,struct vattr * vap)1790 xvattr_to_dosattr(smbnode_t *np, struct vattr *vap)
1791 {
1792 	xvattr_t *xvap = (xvattr_t *)vap;
1793 	xoptattr_t *xoap = NULL;
1794 	uint32_t attr = np->r_attr.fa_attr;
1795 	boolean_t anyset = B_FALSE;
1796 
1797 	if ((xoap = xva_getxoptattr(xvap)) == NULL)
1798 		return (0);
1799 
1800 	if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
1801 		if (xoap->xoa_archive)
1802 			attr |= SMB_FA_ARCHIVE;
1803 		else
1804 			attr &= ~SMB_FA_ARCHIVE;
1805 		XVA_SET_RTN(xvap, XAT_ARCHIVE);
1806 		anyset = B_TRUE;
1807 	}
1808 	if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
1809 		if (xoap->xoa_system)
1810 			attr |= SMB_FA_SYSTEM;
1811 		else
1812 			attr &= ~SMB_FA_SYSTEM;
1813 		XVA_SET_RTN(xvap, XAT_SYSTEM);
1814 		anyset = B_TRUE;
1815 	}
1816 	if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
1817 		if (xoap->xoa_readonly)
1818 			attr |= SMB_FA_RDONLY;
1819 		else
1820 			attr &= ~SMB_FA_RDONLY;
1821 		XVA_SET_RTN(xvap, XAT_READONLY);
1822 		anyset = B_TRUE;
1823 	}
1824 	if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
1825 		if (xoap->xoa_hidden)
1826 			attr |= SMB_FA_HIDDEN;
1827 		else
1828 			attr &= ~SMB_FA_HIDDEN;
1829 		XVA_SET_RTN(xvap, XAT_HIDDEN);
1830 		anyset = B_TRUE;
1831 	}
1832 
1833 	if (anyset == B_FALSE)
1834 		return (0);	/* no change */
1835 	if (attr == 0)
1836 		attr = SMB_EFA_NORMAL;
1837 
1838 	return (attr);
1839 }
1840 
1841 /*
1842  * smbfs_access_rwx()
1843  * Common function for smbfs_access, etc.
1844  *
1845  * The security model implemented by the FS is unusual
1846  * due to the current "single user mounts" restriction:
1847  * All access under a given mount point uses the CIFS
1848  * credentials established by the owner of the mount.
1849  *
1850  * Most access checking is handled by the CIFS server,
1851  * but we need sufficient Unix access checks here to
1852  * prevent other local Unix users from having access
1853  * to objects under this mount that the uid/gid/mode
1854  * settings in the mount would not allow.
1855  *
1856  * With this model, there is a case where we need the
1857  * ability to do an access check before we have the
1858  * vnode for an object.  This function takes advantage
1859  * of the fact that the uid/gid/mode is per mount, and
1860  * avoids the need for a vnode.
1861  *
1862  * We still (sort of) need a vnode when we call
1863  * secpolicy_vnode_access, but that only uses
1864  * the vtype field, so we can use a pair of fake
1865  * vnodes that have only v_type filled in.
1866  */
1867 static int
smbfs_access_rwx(vfs_t * vfsp,int vtype,int mode,cred_t * cr)1868 smbfs_access_rwx(vfs_t *vfsp, int vtype, int mode, cred_t *cr)
1869 {
1870 	/* See the secpolicy call below. */
1871 	static const vnode_t tmpl_vdir = { .v_type = VDIR };
1872 	static const vnode_t tmpl_vreg = { .v_type = VREG };
1873 	vattr_t		va;
1874 	vnode_t		*tvp;
1875 	struct smbmntinfo *smi = VFTOSMI(vfsp);
1876 	int shift = 0;
1877 
1878 	/*
1879 	 * Build our (fabricated) vnode attributes.
1880 	 */
1881 	bzero(&va, sizeof (va));
1882 	va.va_mask = AT_TYPE | AT_MODE | AT_UID | AT_GID;
1883 	va.va_type = vtype;
1884 	va.va_mode = (vtype == VDIR) ?
1885 	    smi->smi_dmode : smi->smi_fmode;
1886 	va.va_uid = smi->smi_uid;
1887 	va.va_gid = smi->smi_gid;
1888 
1889 	/*
1890 	 * Disallow write attempts on read-only file systems,
1891 	 * unless the file is a device or fifo node.  Note:
1892 	 * Inline vn_is_readonly and IS_DEVVP here because
1893 	 * we may not have a vnode ptr.  Original expr. was:
1894 	 * (mode & VWRITE) && vn_is_readonly(vp) && !IS_DEVVP(vp))
1895 	 */
1896 	if ((mode & VWRITE) &&
1897 	    (vfsp->vfs_flag & VFS_RDONLY) &&
1898 	    !(vtype == VCHR || vtype == VBLK || vtype == VFIFO))
1899 		return (EROFS);
1900 
1901 	/*
1902 	 * Disallow attempts to access mandatory lock files.
1903 	 * Similarly, expand MANDLOCK here.
1904 	 */
1905 	if ((mode & (VWRITE | VREAD | VEXEC)) &&
1906 	    va.va_type == VREG && MANDMODE(va.va_mode))
1907 		return (EACCES);
1908 
1909 	/*
1910 	 * Access check is based on only
1911 	 * one of owner, group, public.
1912 	 * If not owner, then check group.
1913 	 * If not a member of the group,
1914 	 * then check public access.
1915 	 */
1916 	if (crgetuid(cr) != va.va_uid) {
1917 		shift += 3;
1918 		if (!groupmember(va.va_gid, cr))
1919 			shift += 3;
1920 	}
1921 
1922 	/*
1923 	 * We need a vnode for secpolicy_vnode_access,
1924 	 * but the only thing it looks at is v_type,
1925 	 * so pass one of the templates above.
1926 	 */
1927 	tvp = (va.va_type == VDIR) ?
1928 	    (vnode_t *)&tmpl_vdir :
1929 	    (vnode_t *)&tmpl_vreg;
1930 
1931 	return (secpolicy_vnode_access2(cr, tvp, va.va_uid,
1932 	    va.va_mode << shift, mode));
1933 }
1934 
1935 /*
1936  * See smbfs_setattr
1937  */
1938 static int
smbfs_accessx(void * arg,int mode,cred_t * cr)1939 smbfs_accessx(void *arg, int mode, cred_t *cr)
1940 {
1941 	vnode_t *vp = arg;
1942 	/*
1943 	 * Note: The caller has checked the current zone,
1944 	 * the SMI_DEAD and VFS_UNMOUNTED flags, etc.
1945 	 */
1946 	return (smbfs_access_rwx(vp->v_vfsp, vp->v_type, mode, cr));
1947 }
1948 
1949 /*
1950  * XXX
1951  * This op should support PSARC 2007/403, Modified Access Checks for CIFS
1952  */
1953 /* ARGSUSED */
1954 static int
smbfs_access(vnode_t * vp,int mode,int flags,cred_t * cr,caller_context_t * ct)1955 smbfs_access(vnode_t *vp, int mode, int flags, cred_t *cr, caller_context_t *ct)
1956 {
1957 	vfs_t		*vfsp;
1958 	smbmntinfo_t	*smi;
1959 
1960 	vfsp = vp->v_vfsp;
1961 	smi = VFTOSMI(vfsp);
1962 
1963 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
1964 		return (EIO);
1965 
1966 	if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
1967 		return (EIO);
1968 
1969 	return (smbfs_access_rwx(vfsp, vp->v_type, mode, cr));
1970 }
1971 
1972 
1973 /* ARGSUSED */
1974 static int
smbfs_readlink(vnode_t * vp,struct uio * uiop,cred_t * cr,caller_context_t * ct)1975 smbfs_readlink(vnode_t *vp, struct uio *uiop, cred_t *cr, caller_context_t *ct)
1976 {
1977 	/* Not yet... */
1978 	return (ENOSYS);
1979 }
1980 
1981 
1982 /*
1983  * Flush local dirty pages to stable storage on the server.
1984  *
1985  * If FNODSYNC is specified, then there is nothing to do because
1986  * metadata changes are not cached on the client before being
1987  * sent to the server.
1988  */
1989 /* ARGSUSED */
1990 static int
smbfs_fsync(vnode_t * vp,int syncflag,cred_t * cr,caller_context_t * ct)1991 smbfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
1992 {
1993 	int		error = 0;
1994 	smbmntinfo_t	*smi;
1995 	smbnode_t	*np;
1996 	struct smb_cred scred;
1997 
1998 	np = VTOSMB(vp);
1999 	smi = VTOSMI(vp);
2000 
2001 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
2002 		return (EIO);
2003 
2004 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
2005 		return (EIO);
2006 
2007 	if ((syncflag & FNODSYNC) || IS_SWAPVP(vp))
2008 		return (0);
2009 
2010 	if ((syncflag & (FSYNC|FDSYNC)) == 0)
2011 		return (0);
2012 
2013 	error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
2014 	if (error)
2015 		return (error);
2016 
2017 	/* Shared lock for n_fid use in _flush */
2018 	if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
2019 		return (EINTR);
2020 	smb_credinit(&scred, cr);
2021 
2022 	error = smbfsflush(np, &scred);
2023 
2024 	smb_credrele(&scred);
2025 	smbfs_rw_exit(&np->r_lkserlock);
2026 
2027 	return (error);
2028 }
2029 
2030 static int
smbfsflush(smbnode_t * np,struct smb_cred * scrp)2031 smbfsflush(smbnode_t *np, struct smb_cred *scrp)
2032 {
2033 	struct smb_share *ssp = np->n_mount->smi_share;
2034 	smb_fh_t *fhp;
2035 	int error;
2036 
2037 	/* Shared lock for n_fid use below. */
2038 	ASSERT(smbfs_rw_lock_held(&np->r_lkserlock, RW_READER));
2039 
2040 	if (!(np->n_flag & NFLUSHWIRE))
2041 		return (0);
2042 	if (np->n_fidrefs == 0)
2043 		return (0); /* not open */
2044 	if ((fhp = np->n_fid) == NULL)
2045 		return (0);
2046 
2047 	/* After reconnect, n_fid is invalid */
2048 	if (fhp->fh_vcgenid != ssp->ss_vcgenid)
2049 		return (ESTALE);
2050 
2051 	error = smbfs_smb_flush(ssp, fhp, scrp);
2052 
2053 	if (!error) {
2054 		mutex_enter(&np->r_statelock);
2055 		np->n_flag &= ~NFLUSHWIRE;
2056 		mutex_exit(&np->r_statelock);
2057 	}
2058 	return (error);
2059 }
2060 
2061 /*
2062  * Last reference to vnode MAY be going away.  Caution:
2063  * Note that vn_rele() calls this when vp->v_count == 1
2064  * but drops vp->v_lock before calling.  This function is
2065  * expected to take whatever FS-specific locks it needs,
2066  * then re-enter v_lock and re-check v_count before doing
2067  * any actual destruction.  That happens in smbfs_addfree.
2068  */
2069 /* ARGSUSED */
2070 static void
smbfs_inactive(vnode_t * vp,cred_t * cr,caller_context_t * ct)2071 smbfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
2072 {
2073 	smbnode_t	*np = VTOSMB(vp);
2074 	int error;
2075 
2076 	/*
2077 	 * Don't "bail out" for VFS_UNMOUNTED here,
2078 	 * as we want to do cleanup, etc.
2079 	 * See also pcfs_inactive
2080 	 */
2081 
2082 	/*
2083 	 * If this is coming from the wrong zone, we let someone in the right
2084 	 * zone take care of it asynchronously.  We can get here due to
2085 	 * VN_RELE() being called from pageout() or fsflush().  This call may
2086 	 * potentially turn into an expensive no-op if, for instance, v_count
2087 	 * gets incremented in the meantime, but it's still correct.
2088 	 */
2089 
2090 	/*
2091 	 * From NFS:rinactive()
2092 	 *
2093 	 * Before freeing anything, wait until all asynchronous
2094 	 * activity is done on this rnode.  This will allow all
2095 	 * asynchronous read ahead and write behind i/o's to
2096 	 * finish.
2097 	 */
2098 	mutex_enter(&np->r_statelock);
2099 	while (np->r_count > 0)
2100 		cv_wait(&np->r_cv, &np->r_statelock);
2101 	mutex_exit(&np->r_statelock);
2102 
2103 	/*
2104 	 * Flush and invalidate all pages associated with the vnode.
2105 	 */
2106 	if (vn_has_cached_data(vp)) {
2107 		if ((np->r_flags & RDIRTY) && !np->r_error) {
2108 			error = smbfs_putpage(vp, (u_offset_t)0, 0, 0, cr, ct);
2109 			if (error && (error == ENOSPC || error == EDQUOT)) {
2110 				mutex_enter(&np->r_statelock);
2111 				if (!np->r_error)
2112 					np->r_error = error;
2113 				mutex_exit(&np->r_statelock);
2114 			}
2115 		}
2116 		smbfs_invalidate_pages(vp, (u_offset_t)0, cr);
2117 	}
2118 
2119 	smbfs_addfree(np);
2120 }
2121 
2122 /*
2123  * Remote file system operations having to do with directory manipulation.
2124  */
2125 /* ARGSUSED */
2126 static int
smbfs_lookup(vnode_t * dvp,char * nm,vnode_t ** vpp,struct pathname * pnp,int flags,vnode_t * rdir,cred_t * cr,caller_context_t * ct,int * direntflags,pathname_t * realpnp)2127 smbfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
2128 	int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
2129 	int *direntflags, pathname_t *realpnp)
2130 {
2131 	vfs_t		*vfs;
2132 	smbmntinfo_t	*smi;
2133 	smbnode_t	*dnp;
2134 	int		error;
2135 
2136 	vfs = dvp->v_vfsp;
2137 	smi = VFTOSMI(vfs);
2138 
2139 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
2140 		return (EPERM);
2141 
2142 	if (smi->smi_flags & SMI_DEAD || vfs->vfs_flag & VFS_UNMOUNTED)
2143 		return (EIO);
2144 
2145 	dnp = VTOSMB(dvp);
2146 
2147 	/*
2148 	 * Are we looking up extended attributes?  If so, "dvp" is
2149 	 * the file or directory for which we want attributes, and
2150 	 * we need a lookup of the (faked up) attribute directory
2151 	 * before we lookup the rest of the path.
2152 	 */
2153 	if (flags & LOOKUP_XATTR) {
2154 		/*
2155 		 * Require the xattr mount option.
2156 		 */
2157 		if ((vfs->vfs_flag & VFS_XATTR) == 0)
2158 			return (EINVAL);
2159 
2160 		error = smbfs_get_xattrdir(dvp, vpp, cr, flags);
2161 		return (error);
2162 	}
2163 
2164 	if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_READER, SMBINTR(dvp)))
2165 		return (EINTR);
2166 
2167 	error = smbfslookup(dvp, nm, vpp, cr, 1, ct);
2168 
2169 	smbfs_rw_exit(&dnp->r_rwlock);
2170 
2171 	/*
2172 	 * If the caller passes an invalid name here, we'll have
2173 	 * error == EINVAL but want to return ENOENT.  This is
2174 	 * common with things like "ls foo*" with no matches.
2175 	 */
2176 	if (error == EINVAL)
2177 		error = ENOENT;
2178 
2179 	return (error);
2180 }
2181 
2182 /* ARGSUSED */
2183 static int
smbfslookup(vnode_t * dvp,char * nm,vnode_t ** vpp,cred_t * cr,int cache_ok,caller_context_t * ct)2184 smbfslookup(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr,
2185 	int cache_ok, caller_context_t *ct)
2186 {
2187 	int		error;
2188 	int		supplen; /* supported length */
2189 	vnode_t		*vp;
2190 	smbnode_t	*np;
2191 	smbnode_t	*dnp;
2192 	smbmntinfo_t	*smi;
2193 	/* struct smb_vc	*vcp; */
2194 	const char	*ill;
2195 	const char	*name = (const char *)nm;
2196 	int		nmlen = strlen(nm);
2197 	int		rplen;
2198 	struct smb_cred scred;
2199 	struct smbfattr fa;
2200 
2201 	smi = VTOSMI(dvp);
2202 	dnp = VTOSMB(dvp);
2203 
2204 	ASSERT(curproc->p_zone == smi->smi_zone_ref.zref_zone);
2205 
2206 	supplen = 255;
2207 
2208 	/*
2209 	 * RWlock must be held, either reader or writer.
2210 	 */
2211 	ASSERT(dnp->r_rwlock.count != 0);
2212 
2213 	/*
2214 	 * If lookup is for "", just return dvp.
2215 	 * No need to perform any access checks.
2216 	 */
2217 	if (nmlen == 0) {
2218 		VN_HOLD(dvp);
2219 		*vpp = dvp;
2220 		return (0);
2221 	}
2222 
2223 	/*
2224 	 * Can't do lookups in non-directories.
2225 	 */
2226 	if (dvp->v_type != VDIR)
2227 		return (ENOTDIR);
2228 
2229 	/*
2230 	 * Need search permission in the directory.
2231 	 */
2232 	error = smbfs_access(dvp, VEXEC, 0, cr, ct);
2233 	if (error)
2234 		return (error);
2235 
2236 	/*
2237 	 * If lookup is for ".", just return dvp.
2238 	 * Access check was done above.
2239 	 */
2240 	if (nmlen == 1 && name[0] == '.') {
2241 		VN_HOLD(dvp);
2242 		*vpp = dvp;
2243 		return (0);
2244 	}
2245 
2246 	/*
2247 	 * Now some sanity checks on the name.
2248 	 * First check the length.
2249 	 */
2250 	if (nmlen > supplen)
2251 		return (ENAMETOOLONG);
2252 
2253 	/*
2254 	 * Avoid surprises with characters that are
2255 	 * illegal in Windows file names.
2256 	 * Todo: CATIA mappings?
2257 	 */
2258 	ill = illegal_chars;
2259 	if (dnp->n_flag & N_XATTR)
2260 		ill++; /* allow colon */
2261 	if (strpbrk(nm, ill))
2262 		return (EINVAL);
2263 
2264 	/*
2265 	 * Special handling for lookup of ".."
2266 	 *
2267 	 * We keep full pathnames (as seen on the server)
2268 	 * so we can just trim off the last component to
2269 	 * get the full pathname of the parent.  Note:
2270 	 * We don't actually copy and modify, but just
2271 	 * compute the trimmed length and pass that with
2272 	 * the current dir path (not null terminated).
2273 	 *
2274 	 * We don't go over-the-wire to get attributes
2275 	 * for ".." because we know it's a directory,
2276 	 * and we can just leave the rest "stale"
2277 	 * until someone does a getattr.
2278 	 */
2279 	if (nmlen == 2 && name[0] == '.' && name[1] == '.') {
2280 		if (dvp->v_flag & VROOT) {
2281 			/*
2282 			 * Already at the root.  This can happen
2283 			 * with directory listings at the root,
2284 			 * which lookup "." and ".." to get the
2285 			 * inode numbers.  Let ".." be the same
2286 			 * as "." in the FS root.
2287 			 */
2288 			VN_HOLD(dvp);
2289 			*vpp = dvp;
2290 			return (0);
2291 		}
2292 
2293 		/*
2294 		 * Special case for XATTR directory
2295 		 */
2296 		if (dvp->v_flag & V_XATTRDIR) {
2297 			error = smbfs_xa_parent(dvp, vpp);
2298 			return (error);
2299 		}
2300 
2301 		/*
2302 		 * Find the parent path length.
2303 		 */
2304 		rplen = dnp->n_rplen;
2305 		ASSERT(rplen > 0);
2306 		while (--rplen >= 0) {
2307 			if (dnp->n_rpath[rplen] == '\\')
2308 				break;
2309 		}
2310 		if (rplen <= 0) {
2311 			/* Found our way to the root. */
2312 			vp = SMBTOV(smi->smi_root);
2313 			VN_HOLD(vp);
2314 			*vpp = vp;
2315 			return (0);
2316 		}
2317 		np = smbfs_node_findcreate(smi,
2318 		    dnp->n_rpath, rplen, NULL, 0, 0,
2319 		    &smbfs_fattr0); /* force create */
2320 		ASSERT(np != NULL);
2321 		vp = SMBTOV(np);
2322 		vp->v_type = VDIR;
2323 
2324 		/* Success! */
2325 		*vpp = vp;
2326 		return (0);
2327 	}
2328 
2329 	/*
2330 	 * Normal lookup of a name under this directory.
2331 	 * Note we handled "", ".", ".." above.
2332 	 */
2333 	if (cache_ok) {
2334 		/*
2335 		 * The caller indicated that it's OK to use a
2336 		 * cached result for this lookup, so try to
2337 		 * reclaim a node from the smbfs node cache.
2338 		 */
2339 		error = smbfslookup_cache(dvp, nm, nmlen, &vp, cr);
2340 		if (error)
2341 			return (error);
2342 		if (vp != NULL) {
2343 			/* hold taken in lookup_cache */
2344 			*vpp = vp;
2345 			return (0);
2346 		}
2347 	}
2348 
2349 	/*
2350 	 * OK, go over-the-wire to get the attributes,
2351 	 * then create the node.
2352 	 */
2353 	smb_credinit(&scred, cr);
2354 	/* Note: this can allocate a new "name" */
2355 	error = smbfs_smb_lookup(dnp, &name, &nmlen, &fa, &scred);
2356 	smb_credrele(&scred);
2357 	if (error == ENOTDIR) {
2358 		/*
2359 		 * Lookup failed because this directory was
2360 		 * removed or renamed by another client.
2361 		 * Remove any cached attributes under it.
2362 		 */
2363 		smbfs_attrcache_remove(dnp);
2364 		smbfs_attrcache_prune(dnp);
2365 	}
2366 	if (error)
2367 		goto out;
2368 
2369 	error = smbfs_nget(dvp, name, nmlen, &fa, &vp);
2370 	if (error)
2371 		goto out;
2372 
2373 	/* Success! */
2374 	*vpp = vp;
2375 
2376 out:
2377 	/* smbfs_smb_lookup may have allocated name. */
2378 	if (name != nm)
2379 		smbfs_name_free(name, nmlen);
2380 
2381 	return (error);
2382 }
2383 
2384 /*
2385  * smbfslookup_cache
2386  *
2387  * Try to reclaim a node from the smbfs node cache.
2388  * Some statistics for DEBUG.
2389  *
2390  * This mechanism lets us avoid many of the five (or more)
2391  * OtW lookup calls per file seen with "ls -l" if we search
2392  * the smbfs node cache for recently inactive(ated) nodes.
2393  */
2394 #ifdef DEBUG
2395 int smbfs_lookup_cache_calls = 0;
2396 int smbfs_lookup_cache_error = 0;
2397 int smbfs_lookup_cache_miss = 0;
2398 int smbfs_lookup_cache_stale = 0;
2399 int smbfs_lookup_cache_hits = 0;
2400 #endif /* DEBUG */
2401 
2402 /* ARGSUSED */
2403 static int
smbfslookup_cache(vnode_t * dvp,char * nm,int nmlen,vnode_t ** vpp,cred_t * cr)2404 smbfslookup_cache(vnode_t *dvp, char *nm, int nmlen,
2405 	vnode_t **vpp, cred_t *cr)
2406 {
2407 	struct vattr va;
2408 	smbnode_t *dnp;
2409 	smbnode_t *np;
2410 	vnode_t *vp;
2411 	int error;
2412 	char sep;
2413 
2414 	dnp = VTOSMB(dvp);
2415 	*vpp = NULL;
2416 
2417 #ifdef DEBUG
2418 	smbfs_lookup_cache_calls++;
2419 #endif
2420 
2421 	/*
2422 	 * First make sure we can get attributes for the
2423 	 * directory.  Cached attributes are OK here.
2424 	 * If we removed or renamed the directory, this
2425 	 * will return ENOENT.  If someone else removed
2426 	 * this directory or file, we'll find out when we
2427 	 * try to open or get attributes.
2428 	 */
2429 	va.va_mask = AT_TYPE | AT_MODE;
2430 	error = smbfsgetattr(dvp, &va, cr);
2431 	if (error) {
2432 #ifdef DEBUG
2433 		smbfs_lookup_cache_error++;
2434 #endif
2435 		return (error);
2436 	}
2437 
2438 	/*
2439 	 * Passing NULL smbfattr here so we will
2440 	 * just look, not create.
2441 	 */
2442 	sep = SMBFS_DNP_SEP(dnp);
2443 	np = smbfs_node_findcreate(dnp->n_mount,
2444 	    dnp->n_rpath, dnp->n_rplen,
2445 	    nm, nmlen, sep, NULL);
2446 	if (np == NULL) {
2447 #ifdef DEBUG
2448 		smbfs_lookup_cache_miss++;
2449 #endif
2450 		return (0);
2451 	}
2452 
2453 	/*
2454 	 * Found it.  Attributes still valid?
2455 	 */
2456 	vp = SMBTOV(np);
2457 	if (np->r_attrtime <= gethrtime()) {
2458 		/* stale */
2459 #ifdef DEBUG
2460 		smbfs_lookup_cache_stale++;
2461 #endif
2462 		VN_RELE(vp);
2463 		return (0);
2464 	}
2465 
2466 	/*
2467 	 * Success!
2468 	 * Caller gets hold from smbfs_node_findcreate
2469 	 */
2470 #ifdef DEBUG
2471 	smbfs_lookup_cache_hits++;
2472 #endif
2473 	*vpp = vp;
2474 	return (0);
2475 }
2476 
2477 
2478 /*
2479  * XXX
2480  * vsecattr_t is new to build 77, and we need to eventually support
2481  * it in order to create an ACL when an object is created.
2482  *
2483  * This op should support the new FIGNORECASE flag for case-insensitive
2484  * lookups, per PSARC 2007/244.
2485  */
2486 /* ARGSUSED */
2487 static int
smbfs_create(vnode_t * dvp,char * nm,struct vattr * va,enum vcexcl exclusive,int mode,vnode_t ** vpp,cred_t * cr,int lfaware,caller_context_t * ct,vsecattr_t * vsecp)2488 smbfs_create(vnode_t *dvp, char *nm, struct vattr *va, enum vcexcl exclusive,
2489 	int mode, vnode_t **vpp, cred_t *cr, int lfaware, caller_context_t *ct,
2490 	vsecattr_t *vsecp)
2491 {
2492 	int		error;
2493 	vfs_t		*vfsp;
2494 	vnode_t		*vp;
2495 	smbnode_t	*np;
2496 	smbnode_t	*dnp;
2497 	smbmntinfo_t	*smi;
2498 	struct vattr	vattr;
2499 	struct smbfattr	fattr;
2500 	struct smb_cred	scred;
2501 	const char *name = (const char *)nm;
2502 	int		nmlen = strlen(nm);
2503 	uint32_t	disp;
2504 	smb_fh_t	*fid = NULL;
2505 	int		xattr;
2506 
2507 	vfsp = dvp->v_vfsp;
2508 	smi = VFTOSMI(vfsp);
2509 	dnp = VTOSMB(dvp);
2510 	vp = NULL;
2511 
2512 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
2513 		return (EPERM);
2514 
2515 	if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
2516 		return (EIO);
2517 
2518 	/*
2519 	 * Note: this may break mknod(2) calls to create a directory,
2520 	 * but that's obscure use.  Some other filesystems do this.
2521 	 * Todo: redirect VDIR type here to _mkdir.
2522 	 */
2523 	if (va->va_type != VREG)
2524 		return (EINVAL);
2525 
2526 	/*
2527 	 * If the pathname is "", just use dvp, no checks.
2528 	 * Do this outside of the rwlock (like zfs).
2529 	 */
2530 	if (nmlen == 0) {
2531 		VN_HOLD(dvp);
2532 		*vpp = dvp;
2533 		return (0);
2534 	}
2535 
2536 	/* Don't allow "." or ".." through here. */
2537 	if ((nmlen == 1 && name[0] == '.') ||
2538 	    (nmlen == 2 && name[0] == '.' && name[1] == '.'))
2539 		return (EISDIR);
2540 
2541 	/*
2542 	 * We make a copy of the attributes because the caller does not
2543 	 * expect us to change what va points to.
2544 	 */
2545 	vattr = *va;
2546 
2547 	if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp)))
2548 		return (EINTR);
2549 	smb_credinit(&scred, cr);
2550 
2551 	/*
2552 	 * NFS needs to go over the wire, just to be sure whether the
2553 	 * file exists or not.  Using a cached result is dangerous in
2554 	 * this case when making a decision regarding existence.
2555 	 *
2556 	 * The SMB protocol does NOT really need to go OTW here
2557 	 * thanks to the expressive NTCREATE disposition values.
2558 	 * Unfortunately, to do Unix access checks correctly,
2559 	 * we need to know if the object already exists.
2560 	 * When the object does not exist, we need VWRITE on
2561 	 * the directory.  Note: smbfslookup() checks VEXEC.
2562 	 */
2563 	error = smbfslookup(dvp, nm, &vp, cr, 0, ct);
2564 	if (error == 0) {
2565 		/*
2566 		 * The file already exists.  Error?
2567 		 * NB: have a hold from smbfslookup
2568 		 */
2569 		if (exclusive == EXCL) {
2570 			error = EEXIST;
2571 			VN_RELE(vp);
2572 			goto out;
2573 		}
2574 		/*
2575 		 * Verify requested access.
2576 		 */
2577 		error = smbfs_access(vp, mode, 0, cr, ct);
2578 		if (error) {
2579 			VN_RELE(vp);
2580 			goto out;
2581 		}
2582 
2583 		/*
2584 		 * Truncate (if requested).
2585 		 */
2586 		if ((vattr.va_mask & AT_SIZE) && vp->v_type == VREG) {
2587 			np = VTOSMB(vp);
2588 			/*
2589 			 * Check here for large file truncation by
2590 			 * LF-unaware process, like ufs_create().
2591 			 */
2592 			if (!(lfaware & FOFFMAX)) {
2593 				mutex_enter(&np->r_statelock);
2594 				if (np->r_size > MAXOFF32_T)
2595 					error = EOVERFLOW;
2596 				mutex_exit(&np->r_statelock);
2597 			}
2598 			if (error) {
2599 				VN_RELE(vp);
2600 				goto out;
2601 			}
2602 			vattr.va_mask = AT_SIZE;
2603 			error = smbfssetattr(vp, &vattr, 0, cr);
2604 			if (error) {
2605 				VN_RELE(vp);
2606 				goto out;
2607 			}
2608 #ifdef	SMBFS_VNEVENT
2609 			/* Existing file was truncated */
2610 			vnevent_create(vp, ct);
2611 #endif
2612 			/* invalidate pages done in smbfssetattr() */
2613 		}
2614 		/* Success! */
2615 		*vpp = vp;
2616 		goto out;
2617 	}
2618 
2619 	/*
2620 	 * The file did not exist.  Need VWRITE in the directory.
2621 	 */
2622 	error = smbfs_access(dvp, VWRITE, 0, cr, ct);
2623 	if (error)
2624 		goto out;
2625 
2626 	/*
2627 	 * Now things get tricky.  We also need to check the
2628 	 * requested open mode against the file we may create.
2629 	 * See comments at smbfs_access_rwx
2630 	 */
2631 	error = smbfs_access_rwx(vfsp, VREG, mode, cr);
2632 	if (error)
2633 		goto out;
2634 
2635 	/*
2636 	 * Now the code derived from Darwin,
2637 	 * but with greater use of NT_CREATE
2638 	 * disposition options.  Much changed.
2639 	 *
2640 	 * Create (or open) a new child node.
2641 	 * Note we handled "." and ".." above.
2642 	 */
2643 
2644 	if (exclusive == EXCL)
2645 		disp = NTCREATEX_DISP_CREATE;
2646 	else {
2647 		/* Truncate regular files if requested. */
2648 		if ((va->va_type == VREG) &&
2649 		    (va->va_mask & AT_SIZE) &&
2650 		    (va->va_size == 0))
2651 			disp = NTCREATEX_DISP_OVERWRITE_IF;
2652 		else
2653 			disp = NTCREATEX_DISP_OPEN_IF;
2654 	}
2655 	xattr = (dnp->n_flag & N_XATTR) ? 1 : 0;
2656 	error = smbfs_smb_create(dnp,
2657 	    name, nmlen, xattr,
2658 	    disp, &scred, &fid);
2659 	if (error)
2660 		goto out;
2661 
2662 	/*
2663 	 * Should use the fid to get/set the size
2664 	 * while we have it opened here.  See above.
2665 	 */
2666 	smbfs_smb_close(fid);
2667 
2668 	/*
2669 	 * In the open case, the name may differ a little
2670 	 * from what we passed to create (case, etc.)
2671 	 * so call lookup to get the (opened) name.
2672 	 *
2673 	 * XXX: Could avoid this extra lookup if the
2674 	 * "createact" result from NT_CREATE says we
2675 	 * created the object.
2676 	 */
2677 	error = smbfs_smb_lookup(dnp, &name, &nmlen, &fattr, &scred);
2678 	if (error)
2679 		goto out;
2680 
2681 	/* update attr and directory cache */
2682 	smbfs_attr_touchdir(dnp);
2683 
2684 	error = smbfs_nget(dvp, name, nmlen, &fattr, &vp);
2685 	if (error)
2686 		goto out;
2687 
2688 	/* Success! */
2689 	*vpp = vp;
2690 	error = 0;
2691 
2692 out:
2693 	smb_credrele(&scred);
2694 	smbfs_rw_exit(&dnp->r_rwlock);
2695 	if (name != nm)
2696 		smbfs_name_free(name, nmlen);
2697 	return (error);
2698 }
2699 
2700 /*
2701  * XXX
2702  * This op should support the new FIGNORECASE flag for case-insensitive
2703  * lookups, per PSARC 2007/244.
2704  */
2705 /* ARGSUSED */
2706 static int
smbfs_remove(vnode_t * dvp,char * nm,cred_t * cr,caller_context_t * ct,int flags)2707 smbfs_remove(vnode_t *dvp, char *nm, cred_t *cr, caller_context_t *ct,
2708 	int flags)
2709 {
2710 	struct smb_cred	scred;
2711 	vnode_t		*vp = NULL;
2712 	smbnode_t	*dnp = VTOSMB(dvp);
2713 	smbmntinfo_t	*smi = VTOSMI(dvp);
2714 	int		error;
2715 
2716 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
2717 		return (EPERM);
2718 
2719 	if (smi->smi_flags & SMI_DEAD || dvp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
2720 		return (EIO);
2721 
2722 	/*
2723 	 * Verify access to the dirctory.
2724 	 */
2725 	error = smbfs_access(dvp, VWRITE|VEXEC, 0, cr, ct);
2726 	if (error)
2727 		return (error);
2728 
2729 	if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp)))
2730 		return (EINTR);
2731 	smb_credinit(&scred, cr);
2732 
2733 	/* Lookup the file to remove. */
2734 	error = smbfslookup(dvp, nm, &vp, cr, 0, ct);
2735 	if (error != 0)
2736 		goto out;
2737 
2738 	/* Don't allow unlink of a directory. */
2739 	if (vp->v_type == VDIR) {
2740 		error = EPERM;
2741 		goto out;
2742 	}
2743 
2744 	/*
2745 	 * Do the real remove work
2746 	 */
2747 	error = smbfsremove(dvp, vp, &scred, flags);
2748 	if (error != 0)
2749 		goto out;
2750 
2751 #ifdef	SMBFS_VNEVENT
2752 	vnevent_remove(vp, dvp, nm, ct);
2753 #endif
2754 
2755 out:
2756 	if (vp != NULL)
2757 		VN_RELE(vp);
2758 
2759 	smb_credrele(&scred);
2760 	smbfs_rw_exit(&dnp->r_rwlock);
2761 
2762 	return (error);
2763 }
2764 
2765 /*
2766  * smbfsremove does the real work of removing in SMBFS
2767  * Caller has done dir access checks etc.
2768  *
2769  * The normal way to delete a file over SMB is open it (with DELETE access),
2770  * set the "delete-on-close" flag, and close the file.  The problem for Unix
2771  * applications is that they expect the file name to be gone once the unlink
2772  * completes, and the SMB server does not actually delete the file until ALL
2773  * opens of that file are closed.  We can't assume our open handles are the
2774  * only open handles on a file we're deleting, so to be safe we'll try to
2775  * rename the file to a temporary name and then set delete-on-close.  If we
2776  * fail to set delete-on-close (i.e. because other opens prevent it) then
2777  * undo the changes we made and give up with EBUSY.  Note that we might have
2778  * permission to delete a file but lack permission to rename, so we want to
2779  * continue in cases where rename fails.  As an optimization, only do the
2780  * rename when we have the file open.
2781  *
2782  * This is similar to what NFS does when deleting a file that has local opens,
2783  * but thanks to SMB delete-on-close, we don't need to keep track of when the
2784  * last local open goes away and send a delete.  The server does that for us.
2785  */
2786 /* ARGSUSED */
2787 static int
smbfsremove(vnode_t * dvp,vnode_t * vp,struct smb_cred * scred,int flags)2788 smbfsremove(vnode_t *dvp, vnode_t *vp, struct smb_cred *scred,
2789     int flags)
2790 {
2791 	smbnode_t	*dnp = VTOSMB(dvp);
2792 	smbnode_t	*np = VTOSMB(vp);
2793 	smbmntinfo_t	*smi = np->n_mount;
2794 	char		*tmpname = NULL;
2795 	int		tnlen;
2796 	int		error;
2797 	smb_fh_t	*fid = NULL;
2798 	boolean_t	renamed = B_FALSE;
2799 
2800 	/*
2801 	 * The dvp RWlock must be held as writer.
2802 	 */
2803 	ASSERT(dnp->r_rwlock.owner == curthread);
2804 
2805 	/*
2806 	 * We need to flush any dirty pages which happen to
2807 	 * be hanging around before removing the file.  This
2808 	 * shouldn't happen very often and mostly on file
2809 	 * systems mounted "nocto".
2810 	 */
2811 	if (vn_has_cached_data(vp) &&
2812 	    ((np->r_flags & RDIRTY) || np->r_count > 0)) {
2813 		error = smbfs_putpage(vp, (offset_t)0, 0, 0,
2814 		    scred->scr_cred, NULL);
2815 		if (error && (error == ENOSPC || error == EDQUOT)) {
2816 			mutex_enter(&np->r_statelock);
2817 			if (!np->r_error)
2818 				np->r_error = error;
2819 			mutex_exit(&np->r_statelock);
2820 		}
2821 	}
2822 
2823 	/*
2824 	 * Get a file handle with delete access.
2825 	 * Close this FID before return.
2826 	 */
2827 	error = smbfs_smb_tmpopen(np, STD_RIGHT_DELETE_ACCESS,
2828 	    scred, &fid);
2829 	if (error) {
2830 		SMBVDEBUG("error %d opening %s\n",
2831 		    error, np->n_rpath);
2832 		goto out;
2833 	}
2834 	ASSERT(fid != NULL);
2835 
2836 	/*
2837 	 * If we have the file open, try to rename it to a temporary name.
2838 	 * If we can't rename, continue on and try setting DoC anyway.
2839 	 * Unnecessary for directories.
2840 	 */
2841 	if (vp->v_type != VDIR && vp->v_count > 1 && np->n_fidrefs > 0) {
2842 		tmpname = kmem_alloc(MAXNAMELEN, KM_SLEEP);
2843 		tnlen = smbfs_newname(tmpname, MAXNAMELEN);
2844 		error = smbfs_smb_rename(dnp, np, dnp, tmpname, tnlen,
2845 		    fid, scred);
2846 		if (error != 0) {
2847 			SMBVDEBUG("error %d renaming %s -> %s\n",
2848 			    error, np->n_rpath, tmpname);
2849 			/* Keep going without the rename. */
2850 		} else {
2851 			renamed = B_TRUE;
2852 		}
2853 	}
2854 
2855 	/*
2856 	 * Mark the file as delete-on-close.  If we can't,
2857 	 * undo what we did and err out.
2858 	 */
2859 	error = smbfs_smb_setdisp(smi->smi_share, fid, 1, scred);
2860 	if (error != 0) {
2861 		SMBVDEBUG("error %d setting DoC on %s\n",
2862 		    error, np->n_rpath);
2863 		/*
2864 		 * Failed to set DoC. If we renamed, undo that.
2865 		 * Need np->n_rpath relative to parent (dnp).
2866 		 * Use parent path name length plus one for
2867 		 * the separator ('/' or ':')
2868 		 */
2869 		if (renamed) {
2870 			char *oldname;
2871 			int oldnlen;
2872 			int err2;
2873 
2874 			oldname = np->n_rpath + (dnp->n_rplen + 1);
2875 			oldnlen = np->n_rplen - (dnp->n_rplen + 1);
2876 			err2 = smbfs_smb_rename(dnp, np, dnp, oldname, oldnlen,
2877 			    fid, scred);
2878 			SMBVDEBUG("error %d un-renaming %s -> %s\n",
2879 			    err2, tmpname, np->n_rpath);
2880 		}
2881 		error = EBUSY;
2882 		goto out;
2883 	}
2884 	/* Done! */
2885 	smbfs_attrcache_remove(np);
2886 	smbfs_attrcache_prune(np);
2887 
2888 out:
2889 	if (tmpname != NULL)
2890 		kmem_free(tmpname, MAXNAMELEN);
2891 	if (fid != NULL)
2892 		smbfs_smb_tmpclose(np, fid);
2893 
2894 	if (error == 0) {
2895 		/* Keep lookup from finding this node anymore. */
2896 		smbfs_rmhash(np);
2897 	}
2898 
2899 	return (error);
2900 }
2901 
2902 
2903 /* ARGSUSED */
2904 static int
smbfs_link(vnode_t * tdvp,vnode_t * svp,char * tnm,cred_t * cr,caller_context_t * ct,int flags)2905 smbfs_link(vnode_t *tdvp, vnode_t *svp, char *tnm, cred_t *cr,
2906 	caller_context_t *ct, int flags)
2907 {
2908 	/* Not yet... */
2909 	return (ENOSYS);
2910 }
2911 
2912 
2913 /*
2914  * XXX
2915  * This op should support the new FIGNORECASE flag for case-insensitive
2916  * lookups, per PSARC 2007/244.
2917  */
2918 /* ARGSUSED */
2919 static int
smbfs_rename(vnode_t * odvp,char * onm,vnode_t * ndvp,char * nnm,cred_t * cr,caller_context_t * ct,int flags)2920 smbfs_rename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr,
2921 	caller_context_t *ct, int flags)
2922 {
2923 	struct smb_cred	scred;
2924 	smbnode_t	*odnp = VTOSMB(odvp);
2925 	smbnode_t	*ndnp = VTOSMB(ndvp);
2926 	vnode_t		*ovp;
2927 	int error;
2928 
2929 	if (curproc->p_zone != VTOSMI(odvp)->smi_zone_ref.zref_zone ||
2930 	    curproc->p_zone != VTOSMI(ndvp)->smi_zone_ref.zref_zone)
2931 		return (EPERM);
2932 
2933 	if (VTOSMI(odvp)->smi_flags & SMI_DEAD ||
2934 	    VTOSMI(ndvp)->smi_flags & SMI_DEAD ||
2935 	    odvp->v_vfsp->vfs_flag & VFS_UNMOUNTED ||
2936 	    ndvp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
2937 		return (EIO);
2938 
2939 	if (strcmp(onm, ".") == 0 || strcmp(onm, "..") == 0 ||
2940 	    strcmp(nnm, ".") == 0 || strcmp(nnm, "..") == 0)
2941 		return (EINVAL);
2942 
2943 	/*
2944 	 * Check that everything is on the same filesystem.
2945 	 * vn_rename checks the fsid's, but in case we don't
2946 	 * fill those in correctly, check here too.
2947 	 */
2948 	if (odvp->v_vfsp != ndvp->v_vfsp)
2949 		return (EXDEV);
2950 
2951 	/*
2952 	 * Need write access on source and target.
2953 	 * Server takes care of most checks.
2954 	 */
2955 	error = smbfs_access(odvp, VWRITE|VEXEC, 0, cr, ct);
2956 	if (error)
2957 		return (error);
2958 	if (odvp != ndvp) {
2959 		error = smbfs_access(ndvp, VWRITE, 0, cr, ct);
2960 		if (error)
2961 			return (error);
2962 	}
2963 
2964 	/*
2965 	 * Need to lock both old/new dirs as writer.
2966 	 *
2967 	 * Avoid deadlock here on old vs new directory nodes
2968 	 * by always taking the locks in order of address.
2969 	 * The order is arbitrary, but must be consistent.
2970 	 */
2971 	if (odnp < ndnp) {
2972 		if (smbfs_rw_enter_sig(&odnp->r_rwlock, RW_WRITER,
2973 		    SMBINTR(odvp)))
2974 			return (EINTR);
2975 		if (smbfs_rw_enter_sig(&ndnp->r_rwlock, RW_WRITER,
2976 		    SMBINTR(ndvp))) {
2977 			smbfs_rw_exit(&odnp->r_rwlock);
2978 			return (EINTR);
2979 		}
2980 	} else {
2981 		if (smbfs_rw_enter_sig(&ndnp->r_rwlock, RW_WRITER,
2982 		    SMBINTR(ndvp)))
2983 			return (EINTR);
2984 		if (smbfs_rw_enter_sig(&odnp->r_rwlock, RW_WRITER,
2985 		    SMBINTR(odvp))) {
2986 			smbfs_rw_exit(&ndnp->r_rwlock);
2987 			return (EINTR);
2988 		}
2989 	}
2990 	smb_credinit(&scred, cr);
2991 
2992 	/* Lookup the "old" name */
2993 	error = smbfslookup(odvp, onm, &ovp, cr, 0, ct);
2994 	if (error == 0) {
2995 		/*
2996 		 * Do the real rename work
2997 		 */
2998 		error = smbfsrename(odvp, ovp, ndvp, nnm, &scred, flags);
2999 		VN_RELE(ovp);
3000 	}
3001 
3002 	smb_credrele(&scred);
3003 	smbfs_rw_exit(&odnp->r_rwlock);
3004 	smbfs_rw_exit(&ndnp->r_rwlock);
3005 
3006 	return (error);
3007 }
3008 
3009 /*
3010  * smbfsrename does the real work of renaming in SMBFS
3011  * Caller has done dir access checks etc.
3012  */
3013 /* ARGSUSED */
3014 static int
smbfsrename(vnode_t * odvp,vnode_t * ovp,vnode_t * ndvp,char * nnm,struct smb_cred * scred,int flags)3015 smbfsrename(vnode_t *odvp, vnode_t *ovp, vnode_t *ndvp, char *nnm,
3016     struct smb_cred *scred, int flags)
3017 {
3018 	smbnode_t	*odnp = VTOSMB(odvp);
3019 	smbnode_t	*onp = VTOSMB(ovp);
3020 	smbnode_t	*ndnp = VTOSMB(ndvp);
3021 	vnode_t		*nvp = NULL;
3022 	int		error;
3023 	int		nvp_locked = 0;
3024 	smb_fh_t	*fid = NULL;
3025 
3026 	/* Things our caller should have checked. */
3027 	ASSERT(curproc->p_zone == VTOSMI(odvp)->smi_zone_ref.zref_zone);
3028 	ASSERT(odvp->v_vfsp == ndvp->v_vfsp);
3029 	ASSERT(odnp->r_rwlock.owner == curthread);
3030 	ASSERT(ndnp->r_rwlock.owner == curthread);
3031 
3032 	/*
3033 	 * Lookup the target file.  If it exists, it needs to be
3034 	 * checked to see whether it is a mount point and whether
3035 	 * it is active (open).
3036 	 */
3037 	error = smbfslookup(ndvp, nnm, &nvp, scred->scr_cred, 0, NULL);
3038 	if (!error) {
3039 		/*
3040 		 * Target (nvp) already exists.  Check that it
3041 		 * has the same type as the source.  The server
3042 		 * will check this also, (and more reliably) but
3043 		 * this lets us return the correct error codes.
3044 		 */
3045 		if (ovp->v_type == VDIR) {
3046 			if (nvp->v_type != VDIR) {
3047 				error = ENOTDIR;
3048 				goto out;
3049 			}
3050 		} else {
3051 			if (nvp->v_type == VDIR) {
3052 				error = EISDIR;
3053 				goto out;
3054 			}
3055 		}
3056 
3057 		/*
3058 		 * POSIX dictates that when the source and target
3059 		 * entries refer to the same file object, rename
3060 		 * must do nothing and exit without error.
3061 		 */
3062 		if (ovp == nvp) {
3063 			error = 0;
3064 			goto out;
3065 		}
3066 
3067 		/*
3068 		 * Also must ensure the target is not a mount point,
3069 		 * and keep mount/umount away until we're done.
3070 		 */
3071 		if (vn_vfsrlock(nvp)) {
3072 			error = EBUSY;
3073 			goto out;
3074 		}
3075 		nvp_locked = 1;
3076 		if (vn_mountedvfs(nvp) != NULL) {
3077 			error = EBUSY;
3078 			goto out;
3079 		}
3080 
3081 		/*
3082 		 * CIFS may give a SHARING_VIOLATION error when
3083 		 * trying to rename onto an exising object,
3084 		 * so try to remove the target first.
3085 		 * (Only for files, not directories.)
3086 		 */
3087 		if (nvp->v_type == VDIR) {
3088 			error = EEXIST;
3089 			goto out;
3090 		}
3091 		error = smbfsremove(ndvp, nvp, scred, flags);
3092 		if (error != 0)
3093 			goto out;
3094 
3095 		/*
3096 		 * OK, removed the target file.  Continue as if
3097 		 * lookup target had failed (nvp == NULL).
3098 		 */
3099 		vn_vfsunlock(nvp);
3100 		nvp_locked = 0;
3101 		VN_RELE(nvp);
3102 		nvp = NULL;
3103 	} /* nvp */
3104 
3105 	/*
3106 	 * Get a file handle with delete access.
3107 	 * Close this FID before return.
3108 	 */
3109 	error = smbfs_smb_tmpopen(onp, STD_RIGHT_DELETE_ACCESS,
3110 	    scred, &fid);
3111 	if (error) {
3112 		SMBVDEBUG("error %d opening %s\n",
3113 		    error, onp->n_rpath);
3114 		goto out;
3115 	}
3116 
3117 	smbfs_attrcache_remove(onp);
3118 	error = smbfs_smb_rename(odnp, onp, ndnp, nnm, strlen(nnm),
3119 	    fid, scred);
3120 
3121 	smbfs_smb_tmpclose(onp, fid);
3122 
3123 	/*
3124 	 * If the old name should no longer exist,
3125 	 * discard any cached attributes under it.
3126 	 */
3127 	if (error == 0) {
3128 		smbfs_attrcache_prune(onp);
3129 		/* SMBFS_VNEVENT... */
3130 	}
3131 
3132 out:
3133 	if (nvp) {
3134 		if (nvp_locked)
3135 			vn_vfsunlock(nvp);
3136 		VN_RELE(nvp);
3137 	}
3138 
3139 	return (error);
3140 }
3141 
3142 /*
3143  * XXX
3144  * vsecattr_t is new to build 77, and we need to eventually support
3145  * it in order to create an ACL when an object is created.
3146  *
3147  * This op should support the new FIGNORECASE flag for case-insensitive
3148  * lookups, per PSARC 2007/244.
3149  */
3150 /* ARGSUSED */
3151 static int
smbfs_mkdir(vnode_t * dvp,char * nm,struct vattr * va,vnode_t ** vpp,cred_t * cr,caller_context_t * ct,int flags,vsecattr_t * vsecp)3152 smbfs_mkdir(vnode_t *dvp, char *nm, struct vattr *va, vnode_t **vpp,
3153 	cred_t *cr, caller_context_t *ct, int flags, vsecattr_t *vsecp)
3154 {
3155 	vnode_t		*vp;
3156 	struct smbnode	*dnp = VTOSMB(dvp);
3157 	struct smbmntinfo *smi = VTOSMI(dvp);
3158 	struct smb_cred	scred;
3159 	struct smbfattr	fattr;
3160 	const char		*name = (const char *) nm;
3161 	int		nmlen = strlen(name);
3162 	int		error;
3163 
3164 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
3165 		return (EPERM);
3166 
3167 	if (smi->smi_flags & SMI_DEAD || dvp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
3168 		return (EIO);
3169 
3170 	if ((nmlen == 1 && name[0] == '.') ||
3171 	    (nmlen == 2 && name[0] == '.' && name[1] == '.'))
3172 		return (EEXIST);
3173 
3174 	/* Only plain files are allowed in V_XATTRDIR. */
3175 	if (dvp->v_flag & V_XATTRDIR)
3176 		return (EINVAL);
3177 
3178 	if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp)))
3179 		return (EINTR);
3180 	smb_credinit(&scred, cr);
3181 
3182 	/*
3183 	 * Require write access in the containing directory.
3184 	 */
3185 	error = smbfs_access(dvp, VWRITE, 0, cr, ct);
3186 	if (error)
3187 		goto out;
3188 
3189 	error = smbfs_smb_mkdir(dnp, name, nmlen, &scred);
3190 	if (error)
3191 		goto out;
3192 
3193 	error = smbfs_smb_lookup(dnp, &name, &nmlen, &fattr, &scred);
3194 	if (error)
3195 		goto out;
3196 
3197 	smbfs_attr_touchdir(dnp);
3198 
3199 	error = smbfs_nget(dvp, name, nmlen, &fattr, &vp);
3200 	if (error)
3201 		goto out;
3202 
3203 	/* Success! */
3204 	*vpp = vp;
3205 	error = 0;
3206 out:
3207 	smb_credrele(&scred);
3208 	smbfs_rw_exit(&dnp->r_rwlock);
3209 
3210 	if (name != nm)
3211 		smbfs_name_free(name, nmlen);
3212 
3213 	return (error);
3214 }
3215 
3216 /*
3217  * XXX
3218  * This op should support the new FIGNORECASE flag for case-insensitive
3219  * lookups, per PSARC 2007/244.
3220  */
3221 /* ARGSUSED */
3222 static int
smbfs_rmdir(vnode_t * dvp,char * nm,vnode_t * cdir,cred_t * cr,caller_context_t * ct,int flags)3223 smbfs_rmdir(vnode_t *dvp, char *nm, vnode_t *cdir, cred_t *cr,
3224 	caller_context_t *ct, int flags)
3225 {
3226 	struct smb_cred	scred;
3227 	vnode_t		*vp = NULL;
3228 	int		vp_locked = 0;
3229 	struct smbmntinfo *smi = VTOSMI(dvp);
3230 	struct smbnode	*dnp = VTOSMB(dvp);
3231 	struct smbnode	*np;
3232 	int		error;
3233 
3234 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
3235 		return (EPERM);
3236 
3237 	if (smi->smi_flags & SMI_DEAD || dvp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
3238 		return (EIO);
3239 
3240 	/*
3241 	 * Verify access to the dirctory.
3242 	 */
3243 	error = smbfs_access(dvp, VWRITE|VEXEC, 0, cr, ct);
3244 	if (error)
3245 		return (error);
3246 
3247 	if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp)))
3248 		return (EINTR);
3249 	smb_credinit(&scred, cr);
3250 
3251 	/*
3252 	 * First lookup the entry to be removed.
3253 	 */
3254 	error = smbfslookup(dvp, nm, &vp, cr, 0, ct);
3255 	if (error)
3256 		goto out;
3257 	np = VTOSMB(vp);
3258 
3259 	/*
3260 	 * Disallow rmdir of "." or current dir, or the FS root.
3261 	 * Also make sure it's a directory, not a mount point,
3262 	 * and lock to keep mount/umount away until we're done.
3263 	 */
3264 	if ((vp == dvp) || (vp == cdir) || (vp->v_flag & VROOT)) {
3265 		error = EINVAL;
3266 		goto out;
3267 	}
3268 	if (vp->v_type != VDIR) {
3269 		error = ENOTDIR;
3270 		goto out;
3271 	}
3272 	if (vn_vfsrlock(vp)) {
3273 		error = EBUSY;
3274 		goto out;
3275 	}
3276 	vp_locked = 1;
3277 	if (vn_mountedvfs(vp) != NULL) {
3278 		error = EBUSY;
3279 		goto out;
3280 	}
3281 
3282 	/*
3283 	 * Do the real rmdir work
3284 	 */
3285 	error = smbfsremove(dvp, vp, &scred, flags);
3286 	if (error)
3287 		goto out;
3288 
3289 #ifdef	SMBFS_VNEVENT
3290 	vnevent_rmdir(vp, dvp, nm, ct);
3291 #endif
3292 
3293 	mutex_enter(&np->r_statelock);
3294 	dnp->n_flag |= NMODIFIED;
3295 	mutex_exit(&np->r_statelock);
3296 	smbfs_attr_touchdir(dnp);
3297 	smbfs_rmhash(np);
3298 
3299 out:
3300 	if (vp) {
3301 		if (vp_locked)
3302 			vn_vfsunlock(vp);
3303 		VN_RELE(vp);
3304 	}
3305 	smb_credrele(&scred);
3306 	smbfs_rw_exit(&dnp->r_rwlock);
3307 
3308 	return (error);
3309 }
3310 
3311 
3312 /* ARGSUSED */
3313 static int
smbfs_symlink(vnode_t * dvp,char * lnm,struct vattr * tva,char * tnm,cred_t * cr,caller_context_t * ct,int flags)3314 smbfs_symlink(vnode_t *dvp, char *lnm, struct vattr *tva, char *tnm, cred_t *cr,
3315 	caller_context_t *ct, int flags)
3316 {
3317 	/* Not yet... */
3318 	return (ENOSYS);
3319 }
3320 
3321 
3322 /* ARGSUSED */
3323 static int
smbfs_readdir(vnode_t * vp,struct uio * uiop,cred_t * cr,int * eofp,caller_context_t * ct,int flags)3324 smbfs_readdir(vnode_t *vp, struct uio *uiop, cred_t *cr, int *eofp,
3325 	caller_context_t *ct, int flags)
3326 {
3327 	struct smbnode	*np = VTOSMB(vp);
3328 	int		error = 0;
3329 	smbmntinfo_t	*smi;
3330 
3331 	smi = VTOSMI(vp);
3332 
3333 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
3334 		return (EIO);
3335 
3336 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
3337 		return (EIO);
3338 
3339 	/*
3340 	 * Require read access in the directory.
3341 	 */
3342 	error = smbfs_access(vp, VREAD, 0, cr, ct);
3343 	if (error)
3344 		return (error);
3345 
3346 	ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_READER));
3347 
3348 	/*
3349 	 * Todo readdir cache here
3350 	 *
3351 	 * I am serializing the entire readdir opreation
3352 	 * now since we have not yet implemented readdir
3353 	 * cache. This fix needs to be revisited once
3354 	 * we implement readdir cache.
3355 	 */
3356 	if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp)))
3357 		return (EINTR);
3358 
3359 	error = smbfs_readvdir(vp, uiop, cr, eofp, ct);
3360 
3361 	smbfs_rw_exit(&np->r_lkserlock);
3362 
3363 	return (error);
3364 }
3365 
3366 /* ARGSUSED */
3367 static int
smbfs_readvdir(vnode_t * vp,uio_t * uio,cred_t * cr,int * eofp,caller_context_t * ct)3368 smbfs_readvdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
3369 	caller_context_t *ct)
3370 {
3371 	/*
3372 	 * Note: "limit" tells the SMB-level FindFirst/FindNext
3373 	 * functions how many directory entries to request in
3374 	 * each OtW call.  It needs to be large enough so that
3375 	 * we don't make lots of tiny OtW requests, but there's
3376 	 * no point making it larger than the maximum number of
3377 	 * OtW entries that would fit in a maximum sized trans2
3378 	 * response (64k / 48).  Beyond that, it's just tuning.
3379 	 * WinNT used 512, Win2k used 1366.  We use 1000.
3380 	 */
3381 	static const int limit = 1000;
3382 	/* Largest possible dirent size. */
3383 	static const size_t dbufsiz = DIRENT64_RECLEN(SMB_MAXFNAMELEN);
3384 	struct smb_cred scred;
3385 	vnode_t		*newvp;
3386 	struct smbnode	*np = VTOSMB(vp);
3387 	struct smbfs_fctx *ctx;
3388 	struct dirent64 *dp;
3389 	ssize_t		save_resid;
3390 	offset_t	save_offset; /* 64 bits */
3391 	int		offset; /* yes, 32 bits */
3392 	int		nmlen, error;
3393 	ushort_t	reclen;
3394 
3395 	ASSERT(curproc->p_zone == VTOSMI(vp)->smi_zone_ref.zref_zone);
3396 
3397 	/* Make sure we serialize for n_dirseq use. */
3398 	ASSERT(smbfs_rw_lock_held(&np->r_lkserlock, RW_WRITER));
3399 
3400 	/*
3401 	 * Make sure smbfs_open filled in n_dirseq
3402 	 */
3403 	if (np->n_dirseq == NULL)
3404 		return (EBADF);
3405 
3406 	/* Check for overflow of (32-bit) directory offset. */
3407 	if (uio->uio_loffset < 0 || uio->uio_loffset > INT32_MAX ||
3408 	    (uio->uio_loffset + uio->uio_resid) > INT32_MAX)
3409 		return (EINVAL);
3410 
3411 	/* Require space for at least one dirent. */
3412 	if (uio->uio_resid < dbufsiz)
3413 		return (EINVAL);
3414 
3415 	SMBVDEBUG("dirname='%s'\n", np->n_rpath);
3416 	smb_credinit(&scred, cr);
3417 	dp = kmem_alloc(dbufsiz, KM_SLEEP);
3418 
3419 	save_resid = uio->uio_resid;
3420 	save_offset = uio->uio_loffset;
3421 	offset = uio->uio_offset;
3422 	SMBVDEBUG("in: offset=%d, resid=%d\n",
3423 	    (int)uio->uio_offset, (int)uio->uio_resid);
3424 	error = 0;
3425 
3426 	/*
3427 	 * Generate the "." and ".." entries here so we can
3428 	 * (1) make sure they appear (but only once), and
3429 	 * (2) deal with getting their I numbers which the
3430 	 * findnext below does only for normal names.
3431 	 */
3432 	while (offset < FIRST_DIROFS) {
3433 		/*
3434 		 * Tricky bit filling in the first two:
3435 		 * offset 0 is ".", offset 1 is ".."
3436 		 * so strlen of these is offset+1.
3437 		 */
3438 		reclen = DIRENT64_RECLEN(offset + 1);
3439 		if (uio->uio_resid < reclen) {
3440 			error = EINVAL;
3441 			goto out;
3442 		}
3443 		bzero(dp, reclen);
3444 		dp->d_reclen = reclen;
3445 		dp->d_name[0] = '.';
3446 		dp->d_name[1] = '.';
3447 		dp->d_name[offset + 1] = '\0';
3448 
3449 		/*
3450 		 * Want the real I-numbers for the "." and ".."
3451 		 * entries.  For these two names, we know that
3452 		 * smbfslookup can get the nodes efficiently.
3453 		 */
3454 		error = smbfslookup(vp, dp->d_name, &newvp, cr, 1, ct);
3455 		if (error) {
3456 			dp->d_ino = np->n_ino + offset; /* fiction */
3457 		} else {
3458 			dp->d_ino = VTOSMB(newvp)->n_ino;
3459 			VN_RELE(newvp);
3460 		}
3461 		/*
3462 		 * Note: d_off is the offset that a user-level program
3463 		 * should seek to for reading the NEXT directory entry.
3464 		 * See libc: readdir, telldir, seekdir
3465 		 */
3466 		dp->d_off = offset + 1;
3467 		error = uiomove(dp, reclen, UIO_READ, uio);
3468 		if (error)
3469 			goto out;
3470 		/*
3471 		 * Note: uiomove updates uio->uio_offset,
3472 		 * but we want it to be our "cookie" value,
3473 		 * which just counts dirents ignoring size.
3474 		 */
3475 		uio->uio_offset = ++offset;
3476 	}
3477 
3478 	/*
3479 	 * If there was a backward seek, we have to reopen.
3480 	 */
3481 	if (offset < np->n_dirofs) {
3482 		SMBVDEBUG("Reopening search %d:%d\n",
3483 		    offset, np->n_dirofs);
3484 		error = smbfs_smb_findopen(np, "*", 1,
3485 		    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
3486 		    &scred, &ctx);
3487 		if (error) {
3488 			SMBVDEBUG("can not open search, error = %d", error);
3489 			goto out;
3490 		}
3491 		/* free the old one */
3492 		(void) smbfs_smb_findclose(np->n_dirseq, &scred);
3493 		/* save the new one */
3494 		np->n_dirseq = ctx;
3495 		np->n_dirofs = FIRST_DIROFS;
3496 	} else {
3497 		ctx = np->n_dirseq;
3498 	}
3499 
3500 	/*
3501 	 * Skip entries before the requested offset.
3502 	 */
3503 	while (np->n_dirofs < offset) {
3504 		error = smbfs_smb_findnext(ctx, limit, &scred);
3505 		if (error != 0)
3506 			goto out;
3507 		np->n_dirofs++;
3508 	}
3509 
3510 	/*
3511 	 * While there's room in the caller's buffer:
3512 	 *	get a directory entry from SMB,
3513 	 *	convert to a dirent, copyout.
3514 	 * We stop when there is no longer room for a
3515 	 * maximum sized dirent because we must decide
3516 	 * before we know anything about the next entry.
3517 	 */
3518 	while (uio->uio_resid >= dbufsiz) {
3519 		error = smbfs_smb_findnext(ctx, limit, &scred);
3520 		if (error != 0)
3521 			goto out;
3522 		np->n_dirofs++;
3523 
3524 		/* Sanity check the name length. */
3525 		nmlen = ctx->f_nmlen;
3526 		if (nmlen > SMB_MAXFNAMELEN) {
3527 			nmlen = SMB_MAXFNAMELEN;
3528 			SMBVDEBUG("Truncating name: %s\n", ctx->f_name);
3529 		}
3530 		if (smbfs_fastlookup) {
3531 			/* See comment at smbfs_fastlookup above. */
3532 			if (smbfs_nget(vp, ctx->f_name, nmlen,
3533 			    &ctx->f_attr, &newvp) == 0)
3534 				VN_RELE(newvp);
3535 		}
3536 
3537 		reclen = DIRENT64_RECLEN(nmlen);
3538 		bzero(dp, reclen);
3539 		dp->d_reclen = reclen;
3540 		bcopy(ctx->f_name, dp->d_name, nmlen);
3541 		dp->d_name[nmlen] = '\0';
3542 		dp->d_ino = ctx->f_inum;
3543 		dp->d_off = offset + 1;	/* See d_off comment above */
3544 		error = uiomove(dp, reclen, UIO_READ, uio);
3545 		if (error)
3546 			goto out;
3547 		/* See comment re. uio_offset above. */
3548 		uio->uio_offset = ++offset;
3549 	}
3550 
3551 out:
3552 	/*
3553 	 * When we come to the end of a directory, the
3554 	 * SMB-level functions return ENOENT, but the
3555 	 * caller is not expecting an error return.
3556 	 *
3557 	 * Also note that we must delay the call to
3558 	 * smbfs_smb_findclose(np->n_dirseq, ...)
3559 	 * until smbfs_close so that all reads at the
3560 	 * end of the directory will return no data.
3561 	 */
3562 	if (error == ENOENT) {
3563 		error = 0;
3564 		if (eofp)
3565 			*eofp = 1;
3566 	}
3567 	/*
3568 	 * If we encountered an error (i.e. "access denied")
3569 	 * from the FindFirst call, we will have copied out
3570 	 * the "." and ".." entries leaving offset == 2.
3571 	 * In that case, restore the original offset/resid
3572 	 * so the caller gets no data with the error.
3573 	 *
3574 	 * If we encountered an error after reading some entries,
3575 	 * the server's notion of the directory offset may be
3576 	 * different from what we have, so invalidate n_dirofs
3577 	 * to make sure the caller will reopen the remote.
3578 	 * Most callers will just close the dir after an error
3579 	 * here but they could readdir again.
3580 	 */
3581 	if (error != 0) {
3582 		if (offset <= FIRST_DIROFS) {
3583 			uio->uio_loffset = save_offset;
3584 			uio->uio_resid = save_resid;
3585 		} else {
3586 			np->n_dirofs = INT32_MAX;
3587 		}
3588 	}
3589 	SMBVDEBUG("out: error=%d offset=%d, resid=%d\n",
3590 	    error, (int)uio->uio_offset, (int)uio->uio_resid);
3591 
3592 	kmem_free(dp, dbufsiz);
3593 	smb_credrele(&scred);
3594 	return (error);
3595 }
3596 
3597 /*
3598  * Here NFS has: nfs3_bio
3599  * See smbfs_bio above.
3600  */
3601 
3602 /* ARGSUSED */
3603 static int
smbfs_fid(vnode_t * vp,fid_t * fidp,caller_context_t * ct)3604 smbfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
3605 {
3606 	return (ENOSYS);
3607 }
3608 
3609 
3610 /*
3611  * The pair of functions VOP_RWLOCK, VOP_RWUNLOCK
3612  * are optional functions that are called by:
3613  *    getdents, before/after VOP_READDIR
3614  *    pread, before/after ... VOP_READ
3615  *    pwrite, before/after ... VOP_WRITE
3616  *    (other places)
3617  *
3618  * Careful here: None of the above check for any
3619  * error returns from VOP_RWLOCK / VOP_RWUNLOCK!
3620  * In fact, the return value from _rwlock is NOT
3621  * an error code, but V_WRITELOCK_TRUE / _FALSE.
3622  *
3623  * Therefore, it's up to _this_ code to make sure
3624  * the lock state remains balanced, which means
3625  * we can't "bail out" on interrupts, etc.
3626  */
3627 
3628 /* ARGSUSED2 */
3629 static int
smbfs_rwlock(vnode_t * vp,int write_lock,caller_context_t * ctp)3630 smbfs_rwlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
3631 {
3632 	smbnode_t	*np = VTOSMB(vp);
3633 
3634 	if (!write_lock) {
3635 		(void) smbfs_rw_enter_sig(&np->r_rwlock, RW_READER, FALSE);
3636 		return (V_WRITELOCK_FALSE);
3637 	}
3638 
3639 
3640 	(void) smbfs_rw_enter_sig(&np->r_rwlock, RW_WRITER, FALSE);
3641 	return (V_WRITELOCK_TRUE);
3642 }
3643 
3644 /* ARGSUSED */
3645 static void
smbfs_rwunlock(vnode_t * vp,int write_lock,caller_context_t * ctp)3646 smbfs_rwunlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
3647 {
3648 	smbnode_t	*np = VTOSMB(vp);
3649 
3650 	smbfs_rw_exit(&np->r_rwlock);
3651 }
3652 
3653 
3654 /* ARGSUSED */
3655 static int
smbfs_seek(vnode_t * vp,offset_t ooff,offset_t * noffp,caller_context_t * ct)3656 smbfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, caller_context_t *ct)
3657 {
3658 	smbmntinfo_t	*smi;
3659 
3660 	smi = VTOSMI(vp);
3661 
3662 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
3663 		return (EPERM);
3664 
3665 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
3666 		return (EIO);
3667 
3668 	/*
3669 	 * Because we stuff the readdir cookie into the offset field
3670 	 * someone may attempt to do an lseek with the cookie which
3671 	 * we want to succeed.
3672 	 */
3673 	if (vp->v_type == VDIR)
3674 		return (0);
3675 
3676 	/* Like NFS3, just check for 63-bit overflow. */
3677 	if (*noffp < 0)
3678 		return (EINVAL);
3679 
3680 	return (0);
3681 }
3682 
3683 /* mmap support ******************************************************** */
3684 
3685 #ifdef	_KERNEL
3686 
3687 #ifdef DEBUG
3688 static int smbfs_lostpage = 0;	/* number of times we lost original page */
3689 #endif
3690 
3691 /*
3692  * Return all the pages from [off..off+len) in file
3693  * Like nfs3_getpage
3694  */
3695 /* ARGSUSED */
3696 static int
smbfs_getpage(vnode_t * vp,offset_t off,size_t len,uint_t * protp,page_t * pl[],size_t plsz,struct seg * seg,caddr_t addr,enum seg_rw rw,cred_t * cr,caller_context_t * ct)3697 smbfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
3698 	page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
3699 	enum seg_rw rw, cred_t *cr, caller_context_t *ct)
3700 {
3701 	smbnode_t	*np;
3702 	smbmntinfo_t	*smi;
3703 	int		error;
3704 
3705 	np = VTOSMB(vp);
3706 	smi = VTOSMI(vp);
3707 
3708 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
3709 		return (EIO);
3710 
3711 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
3712 		return (EIO);
3713 
3714 	if (vp->v_flag & VNOMAP)
3715 		return (ENOSYS);
3716 
3717 	if (protp != NULL)
3718 		*protp = PROT_ALL;
3719 
3720 	/*
3721 	 * Now valididate that the caches are up to date.
3722 	 */
3723 	error = smbfs_validate_caches(vp, cr);
3724 	if (error)
3725 		return (error);
3726 
3727 retry:
3728 	mutex_enter(&np->r_statelock);
3729 
3730 	/*
3731 	 * Don't create dirty pages faster than they
3732 	 * can be cleaned ... (etc. see nfs)
3733 	 *
3734 	 * Here NFS also tests:
3735 	 *  (mi->mi_max_threads != 0 &&
3736 	 *  rp->r_awcount > 2 * mi->mi_max_threads)
3737 	 */
3738 	if (rw == S_CREATE) {
3739 		while (np->r_gcount > 0)
3740 			cv_wait(&np->r_cv, &np->r_statelock);
3741 	}
3742 
3743 	/*
3744 	 * If we are getting called as a side effect of a write
3745 	 * operation the local file size might not be extended yet.
3746 	 * In this case we want to be able to return pages of zeroes.
3747 	 */
3748 	if (off + len > np->r_size + PAGEOFFSET && seg != segkmap) {
3749 		mutex_exit(&np->r_statelock);
3750 		return (EFAULT);		/* beyond EOF */
3751 	}
3752 
3753 	mutex_exit(&np->r_statelock);
3754 
3755 	error = pvn_getpages(smbfs_getapage, vp, off, len, protp,
3756 	    pl, plsz, seg, addr, rw, cr);
3757 
3758 	switch (error) {
3759 	case SMBFS_EOF:
3760 		smbfs_purge_caches(vp, cr);
3761 		goto retry;
3762 	case ESTALE:
3763 		/*
3764 		 * Here NFS has: PURGE_STALE_FH(error, vp, cr);
3765 		 * In-line here as we only use it once.
3766 		 */
3767 		mutex_enter(&np->r_statelock);
3768 		np->r_flags |= RSTALE;
3769 		if (!np->r_error)
3770 			np->r_error = (error);
3771 		mutex_exit(&np->r_statelock);
3772 		if (vn_has_cached_data(vp))
3773 			smbfs_invalidate_pages(vp, (u_offset_t)0, cr);
3774 		smbfs_purge_caches(vp, cr);
3775 		break;
3776 	default:
3777 		break;
3778 	}
3779 
3780 	return (error);
3781 }
3782 
3783 /*
3784  * Called from pvn_getpages to get a particular page.
3785  * Like nfs3_getapage
3786  */
3787 /* ARGSUSED */
3788 static int
smbfs_getapage(vnode_t * vp,u_offset_t off,size_t len,uint_t * protp,page_t * pl[],size_t plsz,struct seg * seg,caddr_t addr,enum seg_rw rw,cred_t * cr)3789 smbfs_getapage(vnode_t *vp, u_offset_t off, size_t len, uint_t *protp,
3790 	page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
3791 	enum seg_rw rw, cred_t *cr)
3792 {
3793 	smbnode_t	*np;
3794 	smbmntinfo_t   *smi;
3795 
3796 	uint_t		bsize;
3797 	struct buf	*bp;
3798 	page_t		*pp;
3799 	u_offset_t	lbn;
3800 	u_offset_t	io_off;
3801 	u_offset_t	blkoff;
3802 	size_t		io_len;
3803 	uint_t blksize;
3804 	int error;
3805 	/* int readahead; */
3806 	int readahead_issued = 0;
3807 	/* int ra_window; * readahead window */
3808 	page_t *pagefound;
3809 
3810 	np = VTOSMB(vp);
3811 	smi = VTOSMI(vp);
3812 
3813 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
3814 		return (EIO);
3815 
3816 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
3817 		return (EIO);
3818 
3819 	bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
3820 
3821 reread:
3822 	bp = NULL;
3823 	pp = NULL;
3824 	pagefound = NULL;
3825 
3826 	if (pl != NULL)
3827 		pl[0] = NULL;
3828 
3829 	error = 0;
3830 	lbn = off / bsize;
3831 	blkoff = lbn * bsize;
3832 
3833 	/*
3834 	 * NFS queues up readahead work here.
3835 	 */
3836 
3837 again:
3838 	if ((pagefound = page_exists(vp, off)) == NULL) {
3839 		if (pl == NULL) {
3840 			(void) 0; /* Todo: smbfs_async_readahead(); */
3841 		} else if (rw == S_CREATE) {
3842 			/*
3843 			 * Block for this page is not allocated, or the offset
3844 			 * is beyond the current allocation size, or we're
3845 			 * allocating a swap slot and the page was not found,
3846 			 * so allocate it and return a zero page.
3847 			 */
3848 			if ((pp = page_create_va(vp, off,
3849 			    PAGESIZE, PG_WAIT, seg, addr)) == NULL)
3850 				cmn_err(CE_PANIC,
3851 				    "smbfs_getapage: page_create");
3852 			io_len = PAGESIZE;
3853 			mutex_enter(&np->r_statelock);
3854 			np->r_nextr = off + PAGESIZE;
3855 			mutex_exit(&np->r_statelock);
3856 		} else {
3857 			/*
3858 			 * Need to go to server to get a BLOCK, exception to
3859 			 * that being while reading at offset = 0 or doing
3860 			 * random i/o, in that case read only a PAGE.
3861 			 */
3862 			mutex_enter(&np->r_statelock);
3863 			if (blkoff < np->r_size &&
3864 			    blkoff + bsize >= np->r_size) {
3865 				/*
3866 				 * If only a block or less is left in
3867 				 * the file, read all that is remaining.
3868 				 */
3869 				if (np->r_size <= off) {
3870 					/*
3871 					 * Trying to access beyond EOF,
3872 					 * set up to get at least one page.
3873 					 */
3874 					blksize = off + PAGESIZE - blkoff;
3875 				} else
3876 					blksize = np->r_size - blkoff;
3877 			} else if ((off == 0) ||
3878 			    (off != np->r_nextr && !readahead_issued)) {
3879 				blksize = PAGESIZE;
3880 				blkoff = off; /* block = page here */
3881 			} else
3882 				blksize = bsize;
3883 			mutex_exit(&np->r_statelock);
3884 
3885 			pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
3886 			    &io_len, blkoff, blksize, 0);
3887 
3888 			/*
3889 			 * Some other thread has entered the page,
3890 			 * so just use it.
3891 			 */
3892 			if (pp == NULL)
3893 				goto again;
3894 
3895 			/*
3896 			 * Now round the request size up to page boundaries.
3897 			 * This ensures that the entire page will be
3898 			 * initialized to zeroes if EOF is encountered.
3899 			 */
3900 			io_len = ptob(btopr(io_len));
3901 
3902 			bp = pageio_setup(pp, io_len, vp, B_READ);
3903 			ASSERT(bp != NULL);
3904 
3905 			/*
3906 			 * pageio_setup should have set b_addr to 0.  This
3907 			 * is correct since we want to do I/O on a page
3908 			 * boundary.  bp_mapin will use this addr to calculate
3909 			 * an offset, and then set b_addr to the kernel virtual
3910 			 * address it allocated for us.
3911 			 */
3912 			ASSERT(bp->b_un.b_addr == 0);
3913 
3914 			bp->b_edev = 0;
3915 			bp->b_dev = 0;
3916 			bp->b_lblkno = lbtodb(io_off);
3917 			bp->b_file = vp;
3918 			bp->b_offset = (offset_t)off;
3919 			bp_mapin(bp);
3920 
3921 			/*
3922 			 * If doing a write beyond what we believe is EOF,
3923 			 * don't bother trying to read the pages from the
3924 			 * server, we'll just zero the pages here.  We
3925 			 * don't check that the rw flag is S_WRITE here
3926 			 * because some implementations may attempt a
3927 			 * read access to the buffer before copying data.
3928 			 */
3929 			mutex_enter(&np->r_statelock);
3930 			if (io_off >= np->r_size && seg == segkmap) {
3931 				mutex_exit(&np->r_statelock);
3932 				bzero(bp->b_un.b_addr, io_len);
3933 			} else {
3934 				mutex_exit(&np->r_statelock);
3935 				error = smbfs_bio(bp, 0, cr);
3936 			}
3937 
3938 			/*
3939 			 * Unmap the buffer before freeing it.
3940 			 */
3941 			bp_mapout(bp);
3942 			pageio_done(bp);
3943 
3944 			/* Here NFS3 updates all pp->p_fsdata */
3945 
3946 			if (error == SMBFS_EOF) {
3947 				/*
3948 				 * If doing a write system call just return
3949 				 * zeroed pages, else user tried to get pages
3950 				 * beyond EOF, return error.  We don't check
3951 				 * that the rw flag is S_WRITE here because
3952 				 * some implementations may attempt a read
3953 				 * access to the buffer before copying data.
3954 				 */
3955 				if (seg == segkmap)
3956 					error = 0;
3957 				else
3958 					error = EFAULT;
3959 			}
3960 
3961 			if (!readahead_issued && !error) {
3962 				mutex_enter(&np->r_statelock);
3963 				np->r_nextr = io_off + io_len;
3964 				mutex_exit(&np->r_statelock);
3965 			}
3966 		}
3967 	}
3968 
3969 	if (pl == NULL)
3970 		return (error);
3971 
3972 	if (error) {
3973 		if (pp != NULL)
3974 			pvn_read_done(pp, B_ERROR);
3975 		return (error);
3976 	}
3977 
3978 	if (pagefound) {
3979 		se_t se = (rw == S_CREATE ? SE_EXCL : SE_SHARED);
3980 
3981 		/*
3982 		 * Page exists in the cache, acquire the appropriate lock.
3983 		 * If this fails, start all over again.
3984 		 */
3985 		if ((pp = page_lookup(vp, off, se)) == NULL) {
3986 #ifdef DEBUG
3987 			smbfs_lostpage++;
3988 #endif
3989 			goto reread;
3990 		}
3991 		pl[0] = pp;
3992 		pl[1] = NULL;
3993 		return (0);
3994 	}
3995 
3996 	if (pp != NULL)
3997 		pvn_plist_init(pp, pl, plsz, off, io_len, rw);
3998 
3999 	return (error);
4000 }
4001 
4002 /*
4003  * Here NFS has: nfs3_readahead
4004  * No read-ahead in smbfs yet.
4005  */
4006 
4007 #endif	// _KERNEL
4008 
4009 /*
4010  * Flags are composed of {B_INVAL, B_FREE, B_DONTNEED, B_FORCE}
4011  * If len == 0, do from off to EOF.
4012  *
4013  * The normal cases should be len == 0 && off == 0 (entire vp list),
4014  * len == MAXBSIZE (from segmap_release actions), and len == PAGESIZE
4015  * (from pageout).
4016  *
4017  * Like nfs3_putpage + nfs_putpages
4018  */
4019 /* ARGSUSED */
4020 static int
smbfs_putpage(vnode_t * vp,offset_t off,size_t len,int flags,cred_t * cr,caller_context_t * ct)4021 smbfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr,
4022 	caller_context_t *ct)
4023 {
4024 #ifdef	_KERNEL
4025 	smbnode_t *np;
4026 	smbmntinfo_t *smi;
4027 	page_t *pp;
4028 	u_offset_t eoff;
4029 	u_offset_t io_off;
4030 	size_t io_len;
4031 	int error;
4032 	int rdirty;
4033 	int err;
4034 
4035 	np = VTOSMB(vp);
4036 	smi = VTOSMI(vp);
4037 
4038 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
4039 		return (EIO);
4040 
4041 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
4042 		return (EIO);
4043 
4044 	if (vp->v_flag & VNOMAP)
4045 		return (ENOSYS);
4046 
4047 	/* Here NFS does rp->r_count (++/--) stuff. */
4048 
4049 	/* Beginning of code from nfs_putpages. */
4050 
4051 	if (!vn_has_cached_data(vp))
4052 		return (0);
4053 
4054 	/*
4055 	 * If ROUTOFSPACE is set, then all writes turn into B_INVAL
4056 	 * writes.  B_FORCE is set to force the VM system to actually
4057 	 * invalidate the pages, even if the i/o failed.  The pages
4058 	 * need to get invalidated because they can't be written out
4059 	 * because there isn't any space left on either the server's
4060 	 * file system or in the user's disk quota.  The B_FREE bit
4061 	 * is cleared to avoid confusion as to whether this is a
4062 	 * request to place the page on the freelist or to destroy
4063 	 * it.
4064 	 */
4065 	if ((np->r_flags & ROUTOFSPACE) ||
4066 	    (vp->v_vfsp->vfs_flag & VFS_UNMOUNTED))
4067 		flags = (flags & ~B_FREE) | B_INVAL | B_FORCE;
4068 
4069 	if (len == 0) {
4070 		/*
4071 		 * If doing a full file synchronous operation, then clear
4072 		 * the RDIRTY bit.  If a page gets dirtied while the flush
4073 		 * is happening, then RDIRTY will get set again.  The
4074 		 * RDIRTY bit must get cleared before the flush so that
4075 		 * we don't lose this information.
4076 		 *
4077 		 * NFS has B_ASYNC vs sync stuff here.
4078 		 */
4079 		if (off == (u_offset_t)0 &&
4080 		    (np->r_flags & RDIRTY)) {
4081 			mutex_enter(&np->r_statelock);
4082 			rdirty = (np->r_flags & RDIRTY);
4083 			np->r_flags &= ~RDIRTY;
4084 			mutex_exit(&np->r_statelock);
4085 		} else
4086 			rdirty = 0;
4087 
4088 		/*
4089 		 * Search the entire vp list for pages >= off, and flush
4090 		 * the dirty pages.
4091 		 */
4092 		error = pvn_vplist_dirty(vp, off, smbfs_putapage,
4093 		    flags, cr);
4094 
4095 		/*
4096 		 * If an error occurred and the file was marked as dirty
4097 		 * before and we aren't forcibly invalidating pages, then
4098 		 * reset the RDIRTY flag.
4099 		 */
4100 		if (error && rdirty &&
4101 		    (flags & (B_INVAL | B_FORCE)) != (B_INVAL | B_FORCE)) {
4102 			mutex_enter(&np->r_statelock);
4103 			np->r_flags |= RDIRTY;
4104 			mutex_exit(&np->r_statelock);
4105 		}
4106 	} else {
4107 		/*
4108 		 * Do a range from [off...off + len) looking for pages
4109 		 * to deal with.
4110 		 */
4111 		error = 0;
4112 		io_len = 1; /* quiet warnings */
4113 		eoff = off + len;
4114 
4115 		for (io_off = off; io_off < eoff; io_off += io_len) {
4116 			mutex_enter(&np->r_statelock);
4117 			if (io_off >= np->r_size) {
4118 				mutex_exit(&np->r_statelock);
4119 				break;
4120 			}
4121 			mutex_exit(&np->r_statelock);
4122 			/*
4123 			 * If we are not invalidating, synchronously
4124 			 * freeing or writing pages use the routine
4125 			 * page_lookup_nowait() to prevent reclaiming
4126 			 * them from the free list.
4127 			 */
4128 			if ((flags & B_INVAL) || !(flags & B_ASYNC)) {
4129 				pp = page_lookup(vp, io_off,
4130 				    (flags & (B_INVAL | B_FREE)) ?
4131 				    SE_EXCL : SE_SHARED);
4132 			} else {
4133 				pp = page_lookup_nowait(vp, io_off,
4134 				    (flags & B_FREE) ? SE_EXCL : SE_SHARED);
4135 			}
4136 
4137 			if (pp == NULL || !pvn_getdirty(pp, flags))
4138 				io_len = PAGESIZE;
4139 			else {
4140 				err = smbfs_putapage(vp, pp, &io_off,
4141 				    &io_len, flags, cr);
4142 				if (!error)
4143 					error = err;
4144 				/*
4145 				 * "io_off" and "io_len" are returned as
4146 				 * the range of pages we actually wrote.
4147 				 * This allows us to skip ahead more quickly
4148 				 * since several pages may've been dealt
4149 				 * with by this iteration of the loop.
4150 				 */
4151 			}
4152 		}
4153 	}
4154 
4155 	return (error);
4156 
4157 #else	// _KERNEL
4158 	return (ENOSYS);
4159 #endif	// _KERNEL
4160 }
4161 
4162 #ifdef	_KERNEL
4163 
4164 /*
4165  * Write out a single page, possibly klustering adjacent dirty pages.
4166  *
4167  * Like nfs3_putapage / nfs3_sync_putapage
4168  */
4169 static int
smbfs_putapage(vnode_t * vp,page_t * pp,u_offset_t * offp,size_t * lenp,int flags,cred_t * cr)4170 smbfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
4171 	int flags, cred_t *cr)
4172 {
4173 	smbnode_t *np;
4174 	u_offset_t io_off;
4175 	u_offset_t lbn_off;
4176 	u_offset_t lbn;
4177 	size_t io_len;
4178 	uint_t bsize;
4179 	int error;
4180 
4181 	np = VTOSMB(vp);
4182 
4183 	ASSERT(!vn_is_readonly(vp));
4184 
4185 	bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
4186 	lbn = pp->p_offset / bsize;
4187 	lbn_off = lbn * bsize;
4188 
4189 	/*
4190 	 * Find a kluster that fits in one block, or in
4191 	 * one page if pages are bigger than blocks.  If
4192 	 * there is less file space allocated than a whole
4193 	 * page, we'll shorten the i/o request below.
4194 	 */
4195 	pp = pvn_write_kluster(vp, pp, &io_off, &io_len, lbn_off,
4196 	    roundup(bsize, PAGESIZE), flags);
4197 
4198 	/*
4199 	 * pvn_write_kluster shouldn't have returned a page with offset
4200 	 * behind the original page we were given.  Verify that.
4201 	 */
4202 	ASSERT((pp->p_offset / bsize) >= lbn);
4203 
4204 	/*
4205 	 * Now pp will have the list of kept dirty pages marked for
4206 	 * write back.  It will also handle invalidation and freeing
4207 	 * of pages that are not dirty.  Check for page length rounding
4208 	 * problems.
4209 	 */
4210 	if (io_off + io_len > lbn_off + bsize) {
4211 		ASSERT((io_off + io_len) - (lbn_off + bsize) < PAGESIZE);
4212 		io_len = lbn_off + bsize - io_off;
4213 	}
4214 	/*
4215 	 * The RMODINPROGRESS flag makes sure that smbfs_bio() sees a
4216 	 * consistent value of r_size. RMODINPROGRESS is set in writerp().
4217 	 * When RMODINPROGRESS is set it indicates that a uiomove() is in
4218 	 * progress and the r_size has not been made consistent with the
4219 	 * new size of the file. When the uiomove() completes the r_size is
4220 	 * updated and the RMODINPROGRESS flag is cleared.
4221 	 *
4222 	 * The RMODINPROGRESS flag makes sure that smbfs_bio() sees a
4223 	 * consistent value of r_size. Without this handshaking, it is
4224 	 * possible that smbfs_bio() picks  up the old value of r_size
4225 	 * before the uiomove() in writerp() completes. This will result
4226 	 * in the write through smbfs_bio() being dropped.
4227 	 *
4228 	 * More precisely, there is a window between the time the uiomove()
4229 	 * completes and the time the r_size is updated. If a VOP_PUTPAGE()
4230 	 * operation intervenes in this window, the page will be picked up,
4231 	 * because it is dirty (it will be unlocked, unless it was
4232 	 * pagecreate'd). When the page is picked up as dirty, the dirty
4233 	 * bit is reset (pvn_getdirty()). In smbfs_write(), r_size is
4234 	 * checked. This will still be the old size. Therefore the page will
4235 	 * not be written out. When segmap_release() calls VOP_PUTPAGE(),
4236 	 * the page will be found to be clean and the write will be dropped.
4237 	 */
4238 	if (np->r_flags & RMODINPROGRESS) {
4239 		mutex_enter(&np->r_statelock);
4240 		if ((np->r_flags & RMODINPROGRESS) &&
4241 		    np->r_modaddr + MAXBSIZE > io_off &&
4242 		    np->r_modaddr < io_off + io_len) {
4243 			page_t *plist;
4244 			/*
4245 			 * A write is in progress for this region of the file.
4246 			 * If we did not detect RMODINPROGRESS here then this
4247 			 * path through smbfs_putapage() would eventually go to
4248 			 * smbfs_bio() and may not write out all of the data
4249 			 * in the pages. We end up losing data. So we decide
4250 			 * to set the modified bit on each page in the page
4251 			 * list and mark the rnode with RDIRTY. This write
4252 			 * will be restarted at some later time.
4253 			 */
4254 			plist = pp;
4255 			while (plist != NULL) {
4256 				pp = plist;
4257 				page_sub(&plist, pp);
4258 				hat_setmod(pp);
4259 				page_io_unlock(pp);
4260 				page_unlock(pp);
4261 			}
4262 			np->r_flags |= RDIRTY;
4263 			mutex_exit(&np->r_statelock);
4264 			if (offp)
4265 				*offp = io_off;
4266 			if (lenp)
4267 				*lenp = io_len;
4268 			return (0);
4269 		}
4270 		mutex_exit(&np->r_statelock);
4271 	}
4272 
4273 	/*
4274 	 * NFS handles (flags & B_ASYNC) here...
4275 	 * (See nfs_async_putapage())
4276 	 *
4277 	 * This code section from: nfs3_sync_putapage()
4278 	 */
4279 
4280 	flags |= B_WRITE;
4281 
4282 	error = smbfs_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
4283 
4284 	if ((error == ENOSPC || error == EDQUOT || error == EFBIG ||
4285 	    error == EACCES) &&
4286 	    (flags & (B_INVAL|B_FORCE)) != (B_INVAL|B_FORCE)) {
4287 		if (!(np->r_flags & ROUTOFSPACE)) {
4288 			mutex_enter(&np->r_statelock);
4289 			np->r_flags |= ROUTOFSPACE;
4290 			mutex_exit(&np->r_statelock);
4291 		}
4292 		flags |= B_ERROR;
4293 		pvn_write_done(pp, flags);
4294 		/*
4295 		 * If this was not an async thread, then try again to
4296 		 * write out the pages, but this time, also destroy
4297 		 * them whether or not the write is successful.  This
4298 		 * will prevent memory from filling up with these
4299 		 * pages and destroying them is the only alternative
4300 		 * if they can't be written out.
4301 		 *
4302 		 * Don't do this if this is an async thread because
4303 		 * when the pages are unlocked in pvn_write_done,
4304 		 * some other thread could have come along, locked
4305 		 * them, and queued for an async thread.  It would be
4306 		 * possible for all of the async threads to be tied
4307 		 * up waiting to lock the pages again and they would
4308 		 * all already be locked and waiting for an async
4309 		 * thread to handle them.  Deadlock.
4310 		 */
4311 		if (!(flags & B_ASYNC)) {
4312 			error = smbfs_putpage(vp, io_off, io_len,
4313 			    B_INVAL | B_FORCE, cr, NULL);
4314 		}
4315 	} else {
4316 		if (error)
4317 			flags |= B_ERROR;
4318 		else if (np->r_flags & ROUTOFSPACE) {
4319 			mutex_enter(&np->r_statelock);
4320 			np->r_flags &= ~ROUTOFSPACE;
4321 			mutex_exit(&np->r_statelock);
4322 		}
4323 		pvn_write_done(pp, flags);
4324 	}
4325 
4326 	/* Now more code from: nfs3_putapage */
4327 
4328 	if (offp)
4329 		*offp = io_off;
4330 	if (lenp)
4331 		*lenp = io_len;
4332 
4333 	return (error);
4334 }
4335 
4336 #endif	// _KERNEL
4337 
4338 
4339 /*
4340  * NFS has this in nfs_client.c (shared by v2,v3,...)
4341  * We have it here so smbfs_putapage can be file scope.
4342  */
4343 void
smbfs_invalidate_pages(vnode_t * vp,u_offset_t off,cred_t * cr)4344 smbfs_invalidate_pages(vnode_t *vp, u_offset_t off, cred_t *cr)
4345 {
4346 	smbnode_t *np;
4347 
4348 	np = VTOSMB(vp);
4349 
4350 	mutex_enter(&np->r_statelock);
4351 	while (np->r_flags & RTRUNCATE)
4352 		cv_wait(&np->r_cv, &np->r_statelock);
4353 	np->r_flags |= RTRUNCATE;
4354 
4355 	if (off == (u_offset_t)0) {
4356 		np->r_flags &= ~RDIRTY;
4357 		if (!(np->r_flags & RSTALE))
4358 			np->r_error = 0;
4359 	}
4360 	/* Here NFSv3 has np->r_truncaddr = off; */
4361 	mutex_exit(&np->r_statelock);
4362 
4363 #ifdef	_KERNEL
4364 	(void) pvn_vplist_dirty(vp, off, smbfs_putapage,
4365 	    B_INVAL | B_TRUNC, cr);
4366 #endif	// _KERNEL
4367 
4368 	mutex_enter(&np->r_statelock);
4369 	np->r_flags &= ~RTRUNCATE;
4370 	cv_broadcast(&np->r_cv);
4371 	mutex_exit(&np->r_statelock);
4372 }
4373 
4374 #ifdef	_KERNEL
4375 
4376 /* Like nfs3_map */
4377 
4378 /* ARGSUSED */
4379 static int
smbfs_map(vnode_t * vp,offset_t off,struct as * as,caddr_t * addrp,size_t len,uchar_t prot,uchar_t maxprot,uint_t flags,cred_t * cr,caller_context_t * ct)4380 smbfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
4381 	size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
4382 	cred_t *cr, caller_context_t *ct)
4383 {
4384 	segvn_crargs_t	vn_a;
4385 	struct vattr	va;
4386 	smbnode_t	*np;
4387 	smbmntinfo_t	*smi;
4388 	int		error;
4389 
4390 	np = VTOSMB(vp);
4391 	smi = VTOSMI(vp);
4392 
4393 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
4394 		return (EIO);
4395 
4396 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
4397 		return (EIO);
4398 
4399 	/* Sanity check: should have a valid open */
4400 	if (np->n_fid == NULL)
4401 		return (EIO);
4402 
4403 	if (vp->v_flag & VNOMAP)
4404 		return (ENOSYS);
4405 
4406 	if (off < 0 || off + (ssize_t)len < 0)
4407 		return (ENXIO);
4408 
4409 	if (vp->v_type != VREG)
4410 		return (ENODEV);
4411 
4412 	/*
4413 	 * NFS does close-to-open consistency stuff here.
4414 	 * Just get (possibly cached) attributes.
4415 	 */
4416 	va.va_mask = AT_ALL;
4417 	if ((error = smbfsgetattr(vp, &va, cr)) != 0)
4418 		return (error);
4419 
4420 	/*
4421 	 * Check to see if the vnode is currently marked as not cachable.
4422 	 * This means portions of the file are locked (through VOP_FRLOCK).
4423 	 * In this case the map request must be refused.  We use
4424 	 * rp->r_lkserlock to avoid a race with concurrent lock requests.
4425 	 */
4426 	/*
4427 	 * Atomically increment r_inmap after acquiring r_rwlock. The
4428 	 * idea here is to acquire r_rwlock to block read/write and
4429 	 * not to protect r_inmap. r_inmap will inform smbfs_read/write()
4430 	 * that we are in smbfs_map(). Now, r_rwlock is acquired in order
4431 	 * and we can prevent the deadlock that would have occurred
4432 	 * when smbfs_addmap() would have acquired it out of order.
4433 	 *
4434 	 * Since we are not protecting r_inmap by any lock, we do not
4435 	 * hold any lock when we decrement it. We atomically decrement
4436 	 * r_inmap after we release r_lkserlock.  Note that rwlock is
4437 	 * re-entered as writer in smbfs_addmap (called via as_map).
4438 	 */
4439 
4440 	if (smbfs_rw_enter_sig(&np->r_rwlock, RW_WRITER, SMBINTR(vp)))
4441 		return (EINTR);
4442 	atomic_inc_uint(&np->r_inmap);
4443 	smbfs_rw_exit(&np->r_rwlock);
4444 
4445 	if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp))) {
4446 		atomic_dec_uint(&np->r_inmap);
4447 		return (EINTR);
4448 	}
4449 
4450 	if (vp->v_flag & VNOCACHE) {
4451 		error = EAGAIN;
4452 		goto done;
4453 	}
4454 
4455 	/*
4456 	 * Don't allow concurrent locks and mapping if mandatory locking is
4457 	 * enabled.
4458 	 */
4459 	if ((flk_has_remote_locks(vp) || smbfs_lm_has_sleep(vp)) &&
4460 	    MANDLOCK(vp, va.va_mode)) {
4461 		error = EAGAIN;
4462 		goto done;
4463 	}
4464 
4465 	as_rangelock(as);
4466 	error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
4467 	if (error != 0) {
4468 		as_rangeunlock(as);
4469 		goto done;
4470 	}
4471 
4472 	vn_a.vp = vp;
4473 	vn_a.offset = off;
4474 	vn_a.type = (flags & MAP_TYPE);
4475 	vn_a.prot = (uchar_t)prot;
4476 	vn_a.maxprot = (uchar_t)maxprot;
4477 	vn_a.flags = (flags & ~MAP_TYPE);
4478 	vn_a.cred = cr;
4479 	vn_a.amp = NULL;
4480 	vn_a.szc = 0;
4481 	vn_a.lgrp_mem_policy_flags = 0;
4482 
4483 	error = as_map(as, *addrp, len, segvn_create, &vn_a);
4484 	as_rangeunlock(as);
4485 
4486 done:
4487 	smbfs_rw_exit(&np->r_lkserlock);
4488 	atomic_dec_uint(&np->r_inmap);
4489 	return (error);
4490 }
4491 
4492 /*
4493  * This uses addmap/delmap functions to hold the SMB FID open as long as
4494  * there are pages mapped in this as/seg.  Increment the FID refs. when
4495  * the maping count goes from zero to non-zero, and release the FID ref
4496  * when the maping count goes from non-zero to zero.
4497  */
4498 
4499 /* ARGSUSED */
4500 static int
smbfs_addmap(vnode_t * vp,offset_t off,struct as * as,caddr_t addr,size_t len,uchar_t prot,uchar_t maxprot,uint_t flags,cred_t * cr,caller_context_t * ct)4501 smbfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4502 	size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
4503 	cred_t *cr, caller_context_t *ct)
4504 {
4505 	smbnode_t *np = VTOSMB(vp);
4506 	boolean_t inc_fidrefs = B_FALSE;
4507 
4508 	/*
4509 	 * When r_mapcnt goes from zero to non-zero,
4510 	 * increment n_fidrefs
4511 	 */
4512 	mutex_enter(&np->r_statelock);
4513 	if (np->r_mapcnt == 0)
4514 		inc_fidrefs = B_TRUE;
4515 	np->r_mapcnt += btopr(len);
4516 	mutex_exit(&np->r_statelock);
4517 
4518 	if (inc_fidrefs) {
4519 		(void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
4520 		np->n_fidrefs++;
4521 		smbfs_rw_exit(&np->r_lkserlock);
4522 	}
4523 
4524 	return (0);
4525 }
4526 
4527 /*
4528  * Args passed to smbfs_delmap_async
4529  */
4530 typedef struct smbfs_delmap_args {
4531 	taskq_ent_t		dm_tqent;
4532 	cred_t			*dm_cr;
4533 	vnode_t			*dm_vp;
4534 	offset_t		dm_off;
4535 	caddr_t			dm_addr;
4536 	size_t			dm_len;
4537 	uint_t			dm_prot;
4538 	uint_t			dm_maxprot;
4539 	uint_t			dm_flags;
4540 	boolean_t		dm_rele_fid;
4541 } smbfs_delmap_args_t;
4542 
4543 /*
4544  * Using delmap not only to release the SMB FID (as described above)
4545  * but to flush dirty pages as needed.  Both of those do the actual
4546  * work in an async taskq job to avoid interfering with locks held
4547  * in the VM layer when this is called.
4548  */
4549 
4550 /* ARGSUSED */
4551 static int
smbfs_delmap(vnode_t * vp,offset_t off,struct as * as,caddr_t addr,size_t len,uint_t prot,uint_t maxprot,uint_t flags,cred_t * cr,caller_context_t * ct)4552 smbfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4553 	size_t len, uint_t prot, uint_t maxprot, uint_t flags,
4554 	cred_t *cr, caller_context_t *ct)
4555 {
4556 	smbnode_t		*np = VTOSMB(vp);
4557 	smbmntinfo_t		*smi = VTOSMI(vp);
4558 	smbfs_delmap_args_t	*dmapp;
4559 
4560 	dmapp = kmem_zalloc(sizeof (*dmapp), KM_SLEEP);
4561 
4562 	/*
4563 	 * The VM layer may segvn_free the seg holding this vnode
4564 	 * before our callback has a chance run, so take a hold on
4565 	 * the vnode here and release it in the callback.
4566 	 * (same for the cred)
4567 	 */
4568 	crhold(cr);
4569 	VN_HOLD(vp);
4570 
4571 	dmapp->dm_vp = vp;
4572 	dmapp->dm_cr = cr;
4573 	dmapp->dm_off = off;
4574 	dmapp->dm_addr = addr;
4575 	dmapp->dm_len = len;
4576 	dmapp->dm_prot = prot;
4577 	dmapp->dm_maxprot = maxprot;
4578 	dmapp->dm_flags = flags;
4579 	dmapp->dm_rele_fid = B_FALSE;
4580 
4581 	/*
4582 	 * Go ahead and decrement r_mapcount now, which is
4583 	 * the primary purpose of this function.
4584 	 *
4585 	 * When r_mapcnt goes to zero, we need to call
4586 	 * smbfs_rele_fid, but can't do that here, so
4587 	 * set a flag telling the async task to do it.
4588 	 */
4589 	mutex_enter(&np->r_statelock);
4590 	np->r_mapcnt -= btopr(len);
4591 	ASSERT(np->r_mapcnt >= 0);
4592 	if (np->r_mapcnt == 0)
4593 		dmapp->dm_rele_fid = B_TRUE;
4594 	mutex_exit(&np->r_statelock);
4595 
4596 	taskq_dispatch_ent(smi->smi_taskq, smbfs_delmap_async, dmapp, 0,
4597 	    &dmapp->dm_tqent);
4598 
4599 	return (0);
4600 }
4601 
4602 /*
4603  * Remove some pages from an mmap'd vnode.  Flush any
4604  * dirty pages in the unmapped range.
4605  */
4606 /* ARGSUSED */
4607 static void
smbfs_delmap_async(void * varg)4608 smbfs_delmap_async(void *varg)
4609 {
4610 	smbfs_delmap_args_t	*dmapp = varg;
4611 	cred_t			*cr;
4612 	vnode_t			*vp;
4613 	smbnode_t		*np;
4614 	smbmntinfo_t		*smi;
4615 
4616 	cr = dmapp->dm_cr;
4617 	vp = dmapp->dm_vp;
4618 	np = VTOSMB(vp);
4619 	smi = VTOSMI(vp);
4620 
4621 	/* Decremented r_mapcnt in smbfs_delmap */
4622 
4623 	/*
4624 	 * Initiate a page flush and potential commit if there are
4625 	 * pages, the file system was not mounted readonly, the segment
4626 	 * was mapped shared, and the pages themselves were writeable.
4627 	 *
4628 	 * mark RDIRTY here, will be used to check if a file is dirty when
4629 	 * unmount smbfs
4630 	 */
4631 	if (vn_has_cached_data(vp) && !vn_is_readonly(vp) &&
4632 	    dmapp->dm_flags == MAP_SHARED &&
4633 	    (dmapp->dm_maxprot & PROT_WRITE) != 0) {
4634 		mutex_enter(&np->r_statelock);
4635 		np->r_flags |= RDIRTY;
4636 		mutex_exit(&np->r_statelock);
4637 
4638 		/*
4639 		 * Need to finish the putpage before we
4640 		 * close the OtW FID needed for I/O.
4641 		 */
4642 		(void) smbfs_putpage(vp, dmapp->dm_off, dmapp->dm_len, 0,
4643 		    dmapp->dm_cr, NULL);
4644 	}
4645 
4646 	if ((np->r_flags & RDIRECTIO) || (smi->smi_flags & SMI_DIRECTIO))
4647 		(void) smbfs_putpage(vp, dmapp->dm_off, dmapp->dm_len,
4648 		    B_INVAL, dmapp->dm_cr, NULL);
4649 
4650 	/*
4651 	 * If r_mapcnt went to zero, drop our FID ref now.
4652 	 * On the last fidref, this does an OtW close.
4653 	 */
4654 	if (dmapp->dm_rele_fid) {
4655 		struct smb_cred scred;
4656 
4657 		(void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
4658 		smb_credinit(&scred, dmapp->dm_cr);
4659 
4660 		smbfs_rele_fid(np, &scred);
4661 
4662 		smb_credrele(&scred);
4663 		smbfs_rw_exit(&np->r_lkserlock);
4664 	}
4665 
4666 	/* Release holds taken in smbfs_delmap */
4667 	VN_RELE(vp);
4668 	crfree(cr);
4669 
4670 	kmem_free(dmapp, sizeof (*dmapp));
4671 }
4672 
4673 /* No smbfs_pageio() or smbfs_dispose() ops. */
4674 
4675 #endif	// _KERNEL
4676 
4677 /* misc. ******************************************************** */
4678 
4679 
4680 /*
4681  * XXX
4682  * This op may need to support PSARC 2007/440, nbmand changes for CIFS Service.
4683  */
4684 static int
smbfs_frlock(vnode_t * vp,int cmd,struct flock64 * bfp,int flag,offset_t offset,struct flk_callback * flk_cbp,cred_t * cr,caller_context_t * ct)4685 smbfs_frlock(vnode_t *vp, int cmd, struct flock64 *bfp, int flag,
4686 	offset_t offset, struct flk_callback *flk_cbp, cred_t *cr,
4687 	caller_context_t *ct)
4688 {
4689 	if (curproc->p_zone != VTOSMI(vp)->smi_zone_ref.zref_zone)
4690 		return (EIO);
4691 
4692 	if (VTOSMI(vp)->smi_flags & SMI_LLOCK)
4693 		return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
4694 	else
4695 		return (ENOSYS);
4696 }
4697 
4698 /*
4699  * Free storage space associated with the specified vnode.  The portion
4700  * to be freed is specified by bfp->l_start and bfp->l_len (already
4701  * normalized to a "whence" of 0).
4702  *
4703  * Called by fcntl(fd, F_FREESP, lkp) for libc:ftruncate, etc.
4704  */
4705 /* ARGSUSED */
4706 static int
smbfs_space(vnode_t * vp,int cmd,struct flock64 * bfp,int flag,offset_t offset,cred_t * cr,caller_context_t * ct)4707 smbfs_space(vnode_t *vp, int cmd, struct flock64 *bfp, int flag,
4708 	offset_t offset, cred_t *cr, caller_context_t *ct)
4709 {
4710 	int		error;
4711 	smbmntinfo_t	*smi;
4712 
4713 	smi = VTOSMI(vp);
4714 
4715 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
4716 		return (EIO);
4717 
4718 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
4719 		return (EIO);
4720 
4721 	/* Caller (fcntl) has checked v_type */
4722 	ASSERT(vp->v_type == VREG);
4723 	if (cmd != F_FREESP)
4724 		return (EINVAL);
4725 
4726 	/*
4727 	 * Like NFS3, no 32-bit offset checks here.
4728 	 * Our SMB layer takes care to return EFBIG
4729 	 * when it has to fallback to a 32-bit call.
4730 	 */
4731 
4732 	error = convoff(vp, bfp, 0, offset);
4733 	if (!error) {
4734 		ASSERT(bfp->l_start >= 0);
4735 		if (bfp->l_len == 0) {
4736 			struct vattr va;
4737 
4738 			/*
4739 			 * ftruncate should not change the ctime and
4740 			 * mtime if we truncate the file to its
4741 			 * previous size.
4742 			 */
4743 			va.va_mask = AT_SIZE;
4744 			error = smbfsgetattr(vp, &va, cr);
4745 			if (error || va.va_size == bfp->l_start)
4746 				return (error);
4747 			va.va_mask = AT_SIZE;
4748 			va.va_size = bfp->l_start;
4749 			error = smbfssetattr(vp, &va, 0, cr);
4750 			/* SMBFS_VNEVENT... */
4751 		} else
4752 			error = EINVAL;
4753 	}
4754 
4755 	return (error);
4756 }
4757 
4758 
4759 /* ARGSUSED */
4760 static int
smbfs_realvp(vnode_t * vp,vnode_t ** vpp,caller_context_t * ct)4761 smbfs_realvp(vnode_t *vp, vnode_t **vpp, caller_context_t *ct)
4762 {
4763 
4764 	return (ENOSYS);
4765 }
4766 
4767 
4768 /* ARGSUSED */
4769 static int
smbfs_pathconf(vnode_t * vp,int cmd,ulong_t * valp,cred_t * cr,caller_context_t * ct)4770 smbfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
4771 	caller_context_t *ct)
4772 {
4773 	vfs_t *vfs;
4774 	smbmntinfo_t *smi;
4775 	struct smb_share *ssp;
4776 
4777 	vfs = vp->v_vfsp;
4778 	smi = VFTOSMI(vfs);
4779 
4780 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
4781 		return (EIO);
4782 
4783 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
4784 		return (EIO);
4785 
4786 	switch (cmd) {
4787 	case _PC_FILESIZEBITS:
4788 		ssp = smi->smi_share;
4789 		if (SSTOVC(ssp)->vc_sopt.sv_caps & SMB_CAP_LARGE_FILES)
4790 			*valp = 64;
4791 		else
4792 			*valp = 32;
4793 		break;
4794 
4795 	case _PC_LINK_MAX:
4796 		/* We only ever report one link to an object */
4797 		*valp = 1;
4798 		break;
4799 
4800 	case _PC_ACL_ENABLED:
4801 		/*
4802 		 * Always indicate that ACLs are enabled and
4803 		 * that we support ACE_T format, otherwise
4804 		 * libsec will ask for ACLENT_T format data
4805 		 * which we don't support.
4806 		 */
4807 		*valp = _ACL_ACE_ENABLED;
4808 		break;
4809 
4810 	case _PC_SYMLINK_MAX:	/* No symlinks until we do Unix extensions */
4811 		*valp = 0;
4812 		break;
4813 
4814 	case _PC_XATTR_EXISTS:
4815 		if (vfs->vfs_flag & VFS_XATTR) {
4816 			*valp = smbfs_xa_exists(vp, cr);
4817 			break;
4818 		}
4819 		return (EINVAL);
4820 
4821 	case _PC_SATTR_ENABLED:
4822 	case _PC_SATTR_EXISTS:
4823 		*valp = 1;
4824 		break;
4825 
4826 	case _PC_TIMESTAMP_RESOLUTION:
4827 		/*
4828 		 * Windows times are tenths of microseconds
4829 		 * (multiples of 100 nanoseconds).
4830 		 */
4831 		*valp = 100L;
4832 		break;
4833 
4834 	default:
4835 		return (fs_pathconf(vp, cmd, valp, cr, ct));
4836 	}
4837 	return (0);
4838 }
4839 
4840 /* ARGSUSED */
4841 static int
smbfs_getsecattr(vnode_t * vp,vsecattr_t * vsa,int flag,cred_t * cr,caller_context_t * ct)4842 smbfs_getsecattr(vnode_t *vp, vsecattr_t *vsa, int flag, cred_t *cr,
4843 	caller_context_t *ct)
4844 {
4845 	vfs_t *vfsp;
4846 	smbmntinfo_t *smi;
4847 	int	error;
4848 	uint_t	mask;
4849 
4850 	vfsp = vp->v_vfsp;
4851 	smi = VFTOSMI(vfsp);
4852 
4853 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
4854 		return (EIO);
4855 
4856 	if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
4857 		return (EIO);
4858 
4859 	/*
4860 	 * Our _pathconf indicates _ACL_ACE_ENABLED,
4861 	 * so we should only see VSA_ACE, etc here.
4862 	 * Note: vn_create asks for VSA_DFACLCNT,
4863 	 * and it expects ENOSYS and empty data.
4864 	 */
4865 	mask = vsa->vsa_mask & (VSA_ACE | VSA_ACECNT |
4866 	    VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES);
4867 	if (mask == 0)
4868 		return (ENOSYS);
4869 
4870 	if (smi->smi_flags & SMI_ACL)
4871 		error = smbfs_acl_getvsa(vp, vsa, flag, cr);
4872 	else
4873 		error = ENOSYS;
4874 
4875 	if (error == ENOSYS)
4876 		error = fs_fab_acl(vp, vsa, flag, cr, ct);
4877 
4878 	return (error);
4879 }
4880 
4881 /* ARGSUSED */
4882 static int
smbfs_setsecattr(vnode_t * vp,vsecattr_t * vsa,int flag,cred_t * cr,caller_context_t * ct)4883 smbfs_setsecattr(vnode_t *vp, vsecattr_t *vsa, int flag, cred_t *cr,
4884 	caller_context_t *ct)
4885 {
4886 	vfs_t *vfsp;
4887 	smbmntinfo_t *smi;
4888 	int	error;
4889 	uint_t	mask;
4890 
4891 	vfsp = vp->v_vfsp;
4892 	smi = VFTOSMI(vfsp);
4893 
4894 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
4895 		return (EIO);
4896 
4897 	if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
4898 		return (EIO);
4899 
4900 	/*
4901 	 * Our _pathconf indicates _ACL_ACE_ENABLED,
4902 	 * so we should only see VSA_ACE, etc here.
4903 	 */
4904 	mask = vsa->vsa_mask & (VSA_ACE | VSA_ACECNT);
4905 	if (mask == 0)
4906 		return (ENOSYS);
4907 
4908 	if (vfsp->vfs_flag & VFS_RDONLY)
4909 		return (EROFS);
4910 
4911 	/*
4912 	 * Allow only the mount owner to do this.
4913 	 * See comments at smbfs_access_rwx.
4914 	 */
4915 	error = secpolicy_vnode_setdac(cr, smi->smi_uid);
4916 	if (error != 0)
4917 		return (error);
4918 
4919 	if (smi->smi_flags & SMI_ACL)
4920 		error = smbfs_acl_setvsa(vp, vsa, flag, cr);
4921 	else
4922 		error = ENOSYS;
4923 
4924 	return (error);
4925 }
4926 
4927 
4928 /*
4929  * XXX
4930  * This op should eventually support PSARC 2007/268.
4931  */
4932 static int
smbfs_shrlock(vnode_t * vp,int cmd,struct shrlock * shr,int flag,cred_t * cr,caller_context_t * ct)4933 smbfs_shrlock(vnode_t *vp, int cmd, struct shrlock *shr, int flag, cred_t *cr,
4934 	caller_context_t *ct)
4935 {
4936 	if (curproc->p_zone != VTOSMI(vp)->smi_zone_ref.zref_zone)
4937 		return (EIO);
4938 
4939 	if (VTOSMI(vp)->smi_flags & SMI_LLOCK)
4940 		return (fs_shrlock(vp, cmd, shr, flag, cr, ct));
4941 	else
4942 		return (ENOSYS);
4943 }
4944 
4945 
4946 /*
4947  * Most unimplemented ops will return ENOSYS because of fs_nosys().
4948  * The only ops where that won't work are ACCESS (due to open(2)
4949  * failures) and ... (anything else left?)
4950  */
4951 const fs_operation_def_t smbfs_vnodeops_template[] = {
4952 	VOPNAME_OPEN,		{ .vop_open = smbfs_open },
4953 	VOPNAME_CLOSE,		{ .vop_close = smbfs_close },
4954 	VOPNAME_READ,		{ .vop_read = smbfs_read },
4955 	VOPNAME_WRITE,		{ .vop_write = smbfs_write },
4956 	VOPNAME_IOCTL,		{ .vop_ioctl = smbfs_ioctl },
4957 	VOPNAME_GETATTR,	{ .vop_getattr = smbfs_getattr },
4958 	VOPNAME_SETATTR,	{ .vop_setattr = smbfs_setattr },
4959 	VOPNAME_ACCESS,		{ .vop_access = smbfs_access },
4960 	VOPNAME_LOOKUP,		{ .vop_lookup = smbfs_lookup },
4961 	VOPNAME_CREATE,		{ .vop_create = smbfs_create },
4962 	VOPNAME_REMOVE,		{ .vop_remove = smbfs_remove },
4963 	VOPNAME_LINK,		{ .vop_link = smbfs_link },
4964 	VOPNAME_RENAME,		{ .vop_rename = smbfs_rename },
4965 	VOPNAME_MKDIR,		{ .vop_mkdir = smbfs_mkdir },
4966 	VOPNAME_RMDIR,		{ .vop_rmdir = smbfs_rmdir },
4967 	VOPNAME_READDIR,	{ .vop_readdir = smbfs_readdir },
4968 	VOPNAME_SYMLINK,	{ .vop_symlink = smbfs_symlink },
4969 	VOPNAME_READLINK,	{ .vop_readlink = smbfs_readlink },
4970 	VOPNAME_FSYNC,		{ .vop_fsync = smbfs_fsync },
4971 	VOPNAME_INACTIVE,	{ .vop_inactive = smbfs_inactive },
4972 	VOPNAME_FID,		{ .vop_fid = smbfs_fid },
4973 	VOPNAME_RWLOCK,		{ .vop_rwlock = smbfs_rwlock },
4974 	VOPNAME_RWUNLOCK,	{ .vop_rwunlock = smbfs_rwunlock },
4975 	VOPNAME_SEEK,		{ .vop_seek = smbfs_seek },
4976 	VOPNAME_FRLOCK,		{ .vop_frlock = smbfs_frlock },
4977 	VOPNAME_SPACE,		{ .vop_space = smbfs_space },
4978 	VOPNAME_REALVP,		{ .vop_realvp = smbfs_realvp },
4979 #ifdef	_KERNEL
4980 	VOPNAME_GETPAGE,	{ .vop_getpage = smbfs_getpage },
4981 	VOPNAME_PUTPAGE,	{ .vop_putpage = smbfs_putpage },
4982 	VOPNAME_MAP,		{ .vop_map = smbfs_map },
4983 	VOPNAME_ADDMAP,		{ .vop_addmap = smbfs_addmap },
4984 	VOPNAME_DELMAP,		{ .vop_delmap = smbfs_delmap },
4985 #endif	// _KERNEL
4986 	VOPNAME_PATHCONF,	{ .vop_pathconf = smbfs_pathconf },
4987 	VOPNAME_SETSECATTR,	{ .vop_setsecattr = smbfs_setsecattr },
4988 	VOPNAME_GETSECATTR,	{ .vop_getsecattr = smbfs_getsecattr },
4989 	VOPNAME_SHRLOCK,	{ .vop_shrlock = smbfs_shrlock },
4990 #ifdef	SMBFS_VNEVENT
4991 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
4992 #endif
4993 	{ NULL, NULL }
4994 };
4995