xref: /illumos-gate/usr/src/uts/common/nfs/nfs_clnt.h (revision 726fad2a65f16c200a03969c29cb5c86c2d427db)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*	Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T	*/
27 /*	  All Rights Reserved  	*/
28 
29 #ifndef	_NFS_NFS_CLNT_H
30 #define	_NFS_NFS_CLNT_H
31 
32 #include <sys/utsname.h>
33 #include <sys/kstat.h>
34 #include <sys/time.h>
35 #include <vm/page.h>
36 #include <sys/thread.h>
37 #include <nfs/rnode.h>
38 #include <sys/list.h>
39 #include <sys/condvar_impl.h>
40 
41 #ifdef	__cplusplus
42 extern "C" {
43 #endif
44 
45 #define	HOSTNAMESZ	32
46 #define	ACREGMIN	3	/* min secs to hold cached file attr */
47 #define	ACREGMAX	60	/* max secs to hold cached file attr */
48 #define	ACDIRMIN	30	/* min secs to hold cached dir attr */
49 #define	ACDIRMAX	60	/* max secs to hold cached dir attr */
50 #define	ACMINMAX	3600	/* 1 hr is longest min timeout */
51 #define	ACMAXMAX	36000	/* 10 hr is longest max timeout */
52 
53 #define	NFS_CALLTYPES	3	/* Lookups, Reads, Writes */
54 
55 /*
56  * rfscall() flags
57  */
58 #define	RFSCALL_SOFT	0x00000001	/* Do op as if fs was soft-mounted */
59 
60 /*
61  * Fake errno passed back from rfscall to indicate transfer size adjustment
62  */
63 #define	ENFS_TRYAGAIN	999
64 
65 /*
66  * The NFS specific async_reqs structure. iotype is grouped to support two
67  * types of async thread pools, please read comments section of mntinfo_t
68  * definition for more information. Care should be taken while adding new
69  * members to this group.
70  */
71 
72 enum iotype {
73 	NFS_PUTAPAGE,
74 	NFS_PAGEIO,
75 	NFS_COMMIT,
76 	NFS_READ_AHEAD,
77 	NFS_READDIR,
78 	NFS_INACTIVE,
79 	NFS_ASYNC_TYPES
80 };
81 #define	NFS_ASYNC_PGOPS_TYPES	(NFS_COMMIT + 1)
82 
83 /*
84  * NFS async requests queue type.
85  */
86 
87 enum ioqtype {
88 	NFS_ASYNC_QUEUE,
89 	NFS_ASYNC_PGOPS_QUEUE,
90 	NFS_MAX_ASYNC_QUEUES
91 };
92 
93 /*
94  * Number of NFS async threads operating exclusively on page op requests.
95  */
96 #define	NUM_ASYNC_PGOPS_THREADS	0x2
97 
98 struct nfs_async_read_req {
99 	void (*readahead)();		/* pointer to readahead function */
100 	u_offset_t blkoff;		/* offset in file */
101 	struct seg *seg;		/* segment to do i/o to */
102 	caddr_t addr;			/* address to do i/o to */
103 };
104 
105 struct nfs_pageio_req {
106 	int (*pageio)();		/* pointer to pageio function */
107 	page_t *pp;			/* page list */
108 	u_offset_t io_off;		/* offset in file */
109 	uint_t io_len;			/* size of request */
110 	int flags;
111 };
112 
113 struct nfs_readdir_req {
114 	int (*readdir)();		/* pointer to readdir function */
115 	struct rddir_cache *rdc;	/* pointer to cache entry to fill */
116 };
117 
118 struct nfs_commit_req {
119 	void (*commit)();		/* pointer to commit function */
120 	page_t *plist;			/* page list */
121 	offset3 offset;			/* starting offset */
122 	count3 count;			/* size of range to be commited */
123 };
124 
125 struct nfs_inactive_req {
126 	void (*inactive)();		/* pointer to inactive function */
127 };
128 
129 struct nfs_async_reqs {
130 	struct nfs_async_reqs *a_next;	/* pointer to next arg struct */
131 #ifdef DEBUG
132 	kthread_t *a_queuer;		/* thread id of queueing thread */
133 #endif
134 	struct vnode *a_vp;		/* vnode pointer */
135 	struct cred *a_cred;		/* cred pointer */
136 	enum iotype a_io;		/* i/o type */
137 	union {
138 		struct nfs_async_read_req a_read_args;
139 		struct nfs_pageio_req a_pageio_args;
140 		struct nfs_readdir_req a_readdir_args;
141 		struct nfs_commit_req a_commit_args;
142 		struct nfs_inactive_req a_inactive_args;
143 	} a_args;
144 };
145 
146 #define	a_nfs_readahead a_args.a_read_args.readahead
147 #define	a_nfs_blkoff a_args.a_read_args.blkoff
148 #define	a_nfs_seg a_args.a_read_args.seg
149 #define	a_nfs_addr a_args.a_read_args.addr
150 
151 #define	a_nfs_putapage a_args.a_pageio_args.pageio
152 #define	a_nfs_pageio a_args.a_pageio_args.pageio
153 #define	a_nfs_pp a_args.a_pageio_args.pp
154 #define	a_nfs_off a_args.a_pageio_args.io_off
155 #define	a_nfs_len a_args.a_pageio_args.io_len
156 #define	a_nfs_flags a_args.a_pageio_args.flags
157 
158 #define	a_nfs_readdir a_args.a_readdir_args.readdir
159 #define	a_nfs_rdc a_args.a_readdir_args.rdc
160 
161 #define	a_nfs_commit a_args.a_commit_args.commit
162 #define	a_nfs_plist a_args.a_commit_args.plist
163 #define	a_nfs_offset a_args.a_commit_args.offset
164 #define	a_nfs_count a_args.a_commit_args.count
165 
166 #define	a_nfs_inactive a_args.a_inactive_args.inactive
167 
168 /*
169  * Due to the way the address space callbacks are used to execute a delmap,
170  * we must keep track of how many times the same thread has called
171  * VOP_DELMAP()->nfs_delmap()/nfs3_delmap().  This is done by having a list of
172  * nfs_delmapcall_t's associated with each rnode_t.  This list is protected
173  * by the rnode_t's r_statelock.  The individual elements do not need to be
174  * protected as they will only ever be created, modified and destroyed by
175  * one thread (the call_id).
176  * See nfs_delmap()/nfs3_delmap() for further explanation.
177  */
178 typedef struct nfs_delmapcall {
179 	kthread_t	*call_id;
180 	int		error;	/* error from delmap */
181 	list_node_t	call_node;
182 } nfs_delmapcall_t;
183 
184 /*
185  * delmap address space callback args
186  */
187 typedef struct nfs_delmap_args {
188 	vnode_t			*vp;
189 	offset_t		off;
190 	caddr_t			addr;
191 	size_t			len;
192 	uint_t			prot;
193 	uint_t			maxprot;
194 	uint_t			flags;
195 	cred_t			*cr;
196 	nfs_delmapcall_t	*caller; /* to retrieve errors from the cb */
197 } nfs_delmap_args_t;
198 
199 #ifdef _KERNEL
200 extern nfs_delmapcall_t	*nfs_init_delmapcall(void);
201 extern void	nfs_free_delmapcall(nfs_delmapcall_t *);
202 extern int	nfs_find_and_delete_delmapcall(rnode_t *, int *errp);
203 #endif /* _KERNEL */
204 
205 /*
206  * The following structures, chhead and chtab,  make up the client handle
207  * cache.  chhead represents a quadruple(RPC program, RPC version, Protocol
208  * Family, and Transport).  For example, a chhead entry could represent
209  * NFS/V3/IPv4/TCP requests.  chhead nodes are linked together as a singly
210  * linked list and is referenced from chtable.
211  *
212  * chtab represents an allocated client handle bound to a particular
213  * quadruple. These nodes chain down from a chhead node.  chtab
214  * entries which are on the chain are considered free, so a thread may simply
215  * unlink the first node without traversing the chain.  When the thread is
216  * completed with its request, it puts the chtab node back on the chain.
217  */
218 typedef struct chhead {
219 	struct chhead *ch_next;	/* next quadruple */
220 	struct chtab *ch_list;	/* pointer to free client handle(s) */
221 	uint64_t ch_timesused;	/* times this quadruple was requested */
222 	rpcprog_t ch_prog;	/* RPC program number */
223 	rpcvers_t ch_vers;	/* RPC version number */
224 	dev_t ch_dev;		/* pseudo device number (i.e. /dev/udp) */
225 	char *ch_protofmly;	/* protocol (i.e. NC_INET, NC_LOOPBACK) */
226 } chhead_t;
227 
228 typedef struct chtab {
229 	struct chtab *ch_list;	/* next free client handle */
230 	struct chhead *ch_head;	/* associated quadruple */
231 	time_t ch_freed;	/* timestamp when freed */
232 	CLIENT *ch_client;	/* pointer to client handle */
233 } chtab_t;
234 
235 /*
236  * clinfo is a structure which encapsulates data that is needed to
237  * obtain a client handle from the cache
238  */
239 typedef struct clinfo {
240 	rpcprog_t cl_prog;	/* RPC program number */
241 	rpcvers_t cl_vers;	/* RPC version number */
242 	uint_t cl_readsize;	/* transfer size */
243 	int cl_retrans;		/* times to retry request */
244 	uint_t cl_flags;	/* info flags */
245 } clinfo_t;
246 
247 /*
248  * Failover information, passed opaquely through rfscall()
249  */
250 typedef struct failinfo {
251 	struct vnode	*vp;
252 	caddr_t		fhp;
253 	void (*copyproc)(caddr_t, vnode_t *);
254 	int (*lookupproc)(vnode_t *, char *, vnode_t **, struct pathname *,
255 			int, vnode_t *, struct cred *, int);
256 	int (*xattrdirproc)(vnode_t *, vnode_t **, bool_t, cred_t *, int);
257 } failinfo_t;
258 
259 /*
260  * Static server information
261  *
262  * These fields are protected by sv_lock:
263  *	sv_flags
264  */
265 typedef struct servinfo {
266 	struct knetconfig *sv_knconf;   /* bound TLI fd */
267 	struct knetconfig *sv_origknconf;	/* For RDMA save orig knconf */
268 	struct netbuf	sv_addr;	/* server's address */
269 	nfs_fhandle	sv_fhandle;	/* this server's filehandle */
270 	struct sec_data *sv_secdata;	/* security data for rpcsec module */
271 	char	*sv_hostname;		/* server's hostname */
272 	int	sv_hostnamelen;		/* server's hostname length */
273 	uint_t	sv_flags;		/* see below */
274 	struct servinfo	*sv_next;	/* next in list */
275 	kmutex_t sv_lock;
276 } servinfo_t;
277 
278 /*
279  * The values for sv_flags.
280  */
281 #define	SV_ROOT_STALE	0x1		/* root vnode got ESTALE */
282 
283 /*
284  * Switch from RDMA knconf to original mount knconf
285  */
286 
287 #define	ORIG_KNCONF(mi) (mi->mi_curr_serv->sv_origknconf ? \
288 	mi->mi_curr_serv->sv_origknconf : mi->mi_curr_serv->sv_knconf)
289 
290 /*
291  * NFS private data per mounted file system
292  *	The mi_lock mutex protects the following fields:
293  *		mi_flags
294  *		mi_printed
295  *		mi_down
296  *		mi_tsize
297  *		mi_stsize
298  *		mi_curread
299  *		mi_curwrite
300  *		mi_timers
301  *		mi_curr_serv
302  *		mi_readers
303  *		mi_klmconfig
304  *
305  *	The mi_async_lock mutex protects the following fields:
306  *		mi_async_reqs
307  *		mi_async_req_count
308  *		mi_async_tail
309  *		mi_async_curr[NFS_MAX_ASYNC_QUEUES]
310  *		mi_async_clusters
311  *		mi_async_init_clusters
312  *		mi_threads[NFS_MAX_ASYNC_QUEUES]
313  *		mi_manager_thread
314  *
315  *	Normally the netconfig information for the mount comes from
316  *	mi_curr_serv and mi_klmconfig is NULL.  If NLM calls need to use a
317  *	different transport, mi_klmconfig contains the necessary netconfig
318  *	information.
319  *
320  *	'mi_zone' is initialized at structure creation time, and never
321  *	changes; it may be read without a lock.
322  *
323  *	mi_zone_node is linkage into the mi4_globals.mig_list, and is
324  *	protected by mi4_globals.mig_list_lock.
325  *
326  *	Locking order:
327  *	  mi_globals::mig_lock > mi_async_lock > mi_lock
328  */
329 typedef struct mntinfo {
330 	kmutex_t	mi_lock;	/* protects mntinfo fields */
331 	struct servinfo *mi_servers;    /* server list */
332 	struct servinfo *mi_curr_serv;  /* current server */
333 	kcondvar_t	mi_failover_cv;	/* failover synchronization */
334 	int		mi_readers;	/* failover - users of mi_curr_serv */
335 	struct vfs	*mi_vfsp;	/* back pointer to vfs */
336 	enum vtype	mi_type;	/* file type of the root vnode */
337 	uint_t		mi_flags;	/* see below */
338 	uint_t		mi_tsize;	/* max read transfer size (bytes) */
339 	uint_t		mi_stsize;	/* max write transfer size (bytes) */
340 	int		mi_timeo;	/* inital timeout in 10th sec */
341 	int		mi_retrans;	/* times to retry request */
342 	hrtime_t	mi_acregmin;	/* min time to hold cached file attr */
343 	hrtime_t	mi_acregmax;	/* max time to hold cached file attr */
344 	hrtime_t	mi_acdirmin;	/* min time to hold cached dir attr */
345 	hrtime_t	mi_acdirmax;	/* max time to hold cached dir attr */
346 	len_t		mi_maxfilesize; /* for pathconf _PC_FILESIZEBITS */
347 	/*
348 	 * Extra fields for congestion control, one per NFS call type,
349 	 * plus one global one.
350 	 */
351 	struct rpc_timers mi_timers[NFS_CALLTYPES+1];
352 	int		mi_curread;	/* current read size */
353 	int		mi_curwrite;	/* current write size */
354 	/*
355 	 * Async I/O management
356 	 * We have 2 pools of threads working on async I/O:
357 	 *	(i) Threads which work on all async queues. Default number of
358 	 *	threads in this queue is 8. Threads in this pool work on async
359 	 *	queue pointed by mi_async_curr[NFS_ASYNC_QUEUE]. Number of
360 	 *	active threads in this pool is tracked by
361 	 *	mi_threads[NFS_ASYNC_QUEUE].
362 	 * 	(ii)Threads which work only on page op async queues.
363 	 *	Page ops queue comprises of NFS_PUTAPAGE, NFS_PAGEIO &
364 	 *	NFS_COMMIT. Default number of threads in this queue is 2
365 	 *	(NUM_ASYNC_PGOPS_THREADS). Threads in this pool work on async
366 	 *	queue pointed by mi_async_curr[NFS_ASYNC_PGOPS_QUEUE]. Number
367 	 *	of active threads in this pool is tracked by
368 	 *	mi_threads[NFS_ASYNC_PGOPS_QUEUE].
369 	 */
370 	struct nfs_async_reqs *mi_async_reqs[NFS_ASYNC_TYPES];
371 	struct nfs_async_reqs *mi_async_tail[NFS_ASYNC_TYPES];
372 	struct nfs_async_reqs **mi_async_curr[NFS_MAX_ASYNC_QUEUES];
373 						/* current async queue */
374 	uint_t		mi_async_clusters[NFS_ASYNC_TYPES];
375 	uint_t		mi_async_init_clusters;
376 	uint_t		mi_async_req_count; /* # outstanding work requests */
377 	kcondvar_t	mi_async_reqs_cv; /* signaled when there's work */
378 	ushort_t	mi_threads[NFS_MAX_ASYNC_QUEUES];
379 					/* number of active async threads */
380 	ushort_t	mi_max_threads;	/* max number of async worker threads */
381 	kthread_t	*mi_manager_thread;  /* async manager thread */
382 	kcondvar_t	mi_async_cv; /* signaled when the last worker dies */
383 	kcondvar_t	mi_async_work_cv[NFS_MAX_ASYNC_QUEUES];
384 					/* tell workers to work */
385 	kmutex_t	mi_async_lock;	/* lock to protect async list */
386 	/*
387 	 * Other stuff
388 	 */
389 	struct pathcnf *mi_pathconf;	/* static pathconf kludge */
390 	rpcprog_t	mi_prog;	/* RPC program number */
391 	rpcvers_t	mi_vers;	/* RPC program version number */
392 	char		**mi_rfsnames;	/* mapping to proc names */
393 	kstat_named_t	*mi_reqs;	/* count of requests */
394 	uchar_t		*mi_call_type;	/* dynamic retrans call types */
395 	uchar_t		*mi_ss_call_type;	/* semisoft call type */
396 	uchar_t		*mi_timer_type;	/* dynamic retrans timer types */
397 	clock_t		mi_printftime;	/* last error printf time */
398 	/*
399 	 * ACL entries
400 	 */
401 	char		**mi_aclnames;	/* mapping to proc names */
402 	kstat_named_t	*mi_aclreqs;	/* count of acl requests */
403 	uchar_t		*mi_acl_call_type; /* dynamic retrans call types */
404 	uchar_t		*mi_acl_ss_call_type; /* semisoft call types */
405 	uchar_t		*mi_acl_timer_type; /* dynamic retrans timer types */
406 	/*
407 	 * Client Side Failover stats
408 	 */
409 	uint_t		mi_noresponse;	/* server not responding count */
410 	uint_t		mi_failover; 	/* failover to new server count */
411 	uint_t		mi_remap;	/* remap to new server count */
412 	/*
413 	 * Kstat statistics
414 	 */
415 	struct kstat	*mi_io_kstats;
416 	struct kstat	*mi_ro_kstats;
417 	struct knetconfig *mi_klmconfig;
418 	/*
419 	 * Zones support.
420 	 */
421 	struct zone	*mi_zone;	/* Zone mounted in */
422 	list_node_t	mi_zone_node;	/* Linkage into per-zone mi list */
423 	/*
424 	 * Serializes threads in failover_remap.
425 	 * Need to acquire this lock first in failover_remap() function
426 	 * before acquiring any other rnode lock.
427 	 */
428 	kmutex_t	mi_remap_lock;
429 } mntinfo_t;
430 
431 /*
432  * vfs pointer to mount info
433  */
434 #define	VFTOMI(vfsp)	((mntinfo_t *)((vfsp)->vfs_data))
435 
436 /*
437  * vnode pointer to mount info
438  */
439 #define	VTOMI(vp)	((mntinfo_t *)(((vp)->v_vfsp)->vfs_data))
440 
441 /*
442  * The values for mi_flags.
443  */
444 #define	MI_HARD		0x1		/* hard or soft mount */
445 #define	MI_PRINTED	0x2		/* not responding message printed */
446 #define	MI_INT		0x4		/* interrupts allowed on hard mount */
447 #define	MI_DOWN		0x8		/* server is down */
448 #define	MI_NOAC		0x10		/* don't cache attributes */
449 #define	MI_NOCTO	0x20		/* no close-to-open consistency */
450 #define	MI_DYNAMIC	0x40		/* dynamic transfer size adjustment */
451 #define	MI_LLOCK	0x80		/* local locking only (no lockmgr) */
452 #define	MI_GRPID	0x100		/* System V group id inheritance */
453 #define	MI_RPCTIMESYNC	0x200		/* RPC time sync */
454 #define	MI_LINK		0x400		/* server supports link */
455 #define	MI_SYMLINK	0x800		/* server supports symlink */
456 #define	MI_READDIRONLY	0x1000		/* use readdir instead of readdirplus */
457 #define	MI_ACL		0x2000		/* server supports NFS_ACL */
458 #define	MI_BINDINPROG	0x4000		/* binding to server is changing */
459 #define	MI_LOOPBACK	0x8000		/* Set if this is a loopback mount */
460 #define	MI_SEMISOFT	0x10000		/* soft reads, hard modify */
461 #define	MI_NOPRINT	0x20000		/* don't print messages */
462 #define	MI_DIRECTIO	0x40000		/* do direct I/O */
463 #define	MI_EXTATTR	0x80000		/* server supports extended attrs */
464 #define	MI_ASYNC_MGR_STOP	0x100000	/* tell async mgr to die */
465 #define	MI_DEAD		0x200000	/* mount has been terminated */
466 
467 /*
468  * Read-only mntinfo statistics
469  */
470 struct mntinfo_kstat {
471 	char		mik_proto[KNC_STRSIZE];
472 	uint32_t	mik_vers;
473 	uint_t		mik_flags;
474 	uint_t		mik_secmod;
475 	uint32_t	mik_curread;
476 	uint32_t	mik_curwrite;
477 	int		mik_timeo;
478 	int		mik_retrans;
479 	uint_t		mik_acregmin;
480 	uint_t		mik_acregmax;
481 	uint_t		mik_acdirmin;
482 	uint_t		mik_acdirmax;
483 	struct {
484 		uint32_t srtt;
485 		uint32_t deviate;
486 		uint32_t rtxcur;
487 	} mik_timers[NFS_CALLTYPES+1];
488 	uint32_t	mik_noresponse;
489 	uint32_t	mik_failover;
490 	uint32_t	mik_remap;
491 	char		mik_curserver[SYS_NMLN];
492 };
493 
494 /*
495  * Macro to wakeup sleeping async worker threads.
496  */
497 #define	NFS_WAKE_ASYNC_WORKER(work_cv)	{				\
498 	if (CV_HAS_WAITERS(&work_cv[NFS_ASYNC_QUEUE]))			\
499 		cv_signal(&work_cv[NFS_ASYNC_QUEUE]);			\
500 	else if (CV_HAS_WAITERS(&work_cv[NFS_ASYNC_PGOPS_QUEUE]))	\
501 		cv_signal(&work_cv[NFS_ASYNC_PGOPS_QUEUE]);		\
502 }
503 
504 #define	NFS_WAKEALL_ASYNC_WORKERS(work_cv) {				\
505 	cv_broadcast(&work_cv[NFS_ASYNC_QUEUE]);			\
506 	cv_broadcast(&work_cv[NFS_ASYNC_PGOPS_QUEUE]);			\
507 }
508 
509 /*
510  * Mark cached attributes as timed out
511  *
512  * The caller must not be holding the rnode r_statelock mutex.
513  */
514 #define	PURGE_ATTRCACHE(vp)	{				\
515 	rnode_t *rp = VTOR(vp);					\
516 	mutex_enter(&rp->r_statelock);				\
517 	PURGE_ATTRCACHE_LOCKED(rp);				\
518 	mutex_exit(&rp->r_statelock);				\
519 }
520 
521 #define	PURGE_ATTRCACHE_LOCKED(rp)	{			\
522 	ASSERT(MUTEX_HELD(&rp->r_statelock));			\
523 	rp->r_attrtime = gethrtime();				\
524 	rp->r_mtime = rp->r_attrtime;				\
525 }
526 
527 /*
528  * Is the attribute cache valid?
529  */
530 #define	ATTRCACHE_VALID(vp)	(gethrtime() < VTOR(vp)->r_attrtime)
531 
532 /*
533  * Flags to indicate whether to purge the DNLC for non-directory vnodes
534  * in a call to nfs_purge_caches.
535  */
536 #define	NFS_NOPURGE_DNLC	0
537 #define	NFS_PURGE_DNLC		1
538 
539 /*
540  * If returned error is ESTALE flush all caches.
541  */
542 #define	PURGE_STALE_FH(error, vp, cr)				\
543 	if ((error) == ESTALE) {				\
544 		struct rnode *rp = VTOR(vp);			\
545 		if (vp->v_flag & VROOT) {			\
546 			servinfo_t *svp = rp->r_server;		\
547 			mutex_enter(&svp->sv_lock);		\
548 			svp->sv_flags |= SV_ROOT_STALE;		\
549 			mutex_exit(&svp->sv_lock);		\
550 		}						\
551 		mutex_enter(&rp->r_statelock);			\
552 		rp->r_flags |= RSTALE;				\
553 		if (!rp->r_error)				\
554 			rp->r_error = (error);			\
555 		mutex_exit(&rp->r_statelock);			\
556 		if (vn_has_cached_data(vp))			\
557 			nfs_invalidate_pages((vp), (u_offset_t)0, (cr)); \
558 		nfs_purge_caches((vp), NFS_PURGE_DNLC, (cr));	\
559 	}
560 
561 /*
562  * Is cache valid?
563  * Swap is always valid, if no attributes (attrtime == 0) or
564  * if mtime matches cached mtime it is valid
565  * NOTE: mtime is now a timestruc_t.
566  * Caller should be holding the rnode r_statelock mutex.
567  */
568 #define	CACHE_VALID(rp, mtime, fsize)				\
569 	((RTOV(rp)->v_flag & VISSWAP) == VISSWAP ||		\
570 	(((mtime).tv_sec == (rp)->r_attr.va_mtime.tv_sec &&	\
571 	(mtime).tv_nsec == (rp)->r_attr.va_mtime.tv_nsec) &&	\
572 	((fsize) == (rp)->r_attr.va_size)))
573 
574 /*
575  * Macro to detect forced unmount or a zone shutdown.
576  */
577 #define	FS_OR_ZONE_GONE(vfsp) \
578 	(((vfsp)->vfs_flag & VFS_UNMOUNTED) || \
579 	zone_status_get(curproc->p_zone) >= ZONE_IS_SHUTTING_DOWN)
580 
581 /*
582  * Convert NFS tunables to hrtime_t units, seconds to nanoseconds.
583  */
584 #define	SEC2HR(sec)	((sec) * (long long)NANOSEC)
585 #define	HR2SEC(hr)	((hr) / (long long)NANOSEC)
586 
587 /*
588  * Structure to identify owner of a PC file share reservation.
589  */
590 struct nfs_owner {
591 	int	magic;		/* magic uniquifying number */
592 	char	hname[16];	/* first 16 bytes of hostname */
593 	char	lowner[8];	/* local owner from fcntl */
594 };
595 
596 /*
597  * Values for magic.
598  */
599 #define	NFS_OWNER_MAGIC	0x1D81E
600 
601 /*
602  * Support for extended attributes
603  */
604 #define	XATTR_DIR_NAME	"/@/"		/* used for DNLC entries */
605 #define	XATTR_RPATH	"ExTaTtR"	/* used for r_path for failover */
606 
607 /*
608  * Short hand for checking to see whether the file system was mounted
609  * interruptible or not.
610  */
611 #define	INTR(vp)	(VTOMI(vp)->mi_flags & MI_INT)
612 
613 /*
614  * Short hand for checking whether failover is enabled or not
615  */
616 #define	FAILOVER_MOUNT(mi)	(mi->mi_servers->sv_next)
617 
618 /*
619  * How long will async threads wait for additional work.
620  */
621 #define	NFS_ASYNC_TIMEOUT	(60 * 1 * hz)	/* 1 minute */
622 
623 #ifdef _KERNEL
624 extern int	clget(clinfo_t *, servinfo_t *, cred_t *, CLIENT **,
625 		    struct chtab **);
626 extern void	clfree(CLIENT *, struct chtab *);
627 extern void	nfs_mi_zonelist_add(mntinfo_t *);
628 extern void	nfs_free_mi(mntinfo_t *);
629 extern void	nfs_mnt_kstat_init(struct vfs *);
630 #endif
631 
632 /*
633  * Per-zone data for managing client handles.  Included here solely for the
634  * benefit of MDB.
635  */
636 /*
637  * client side statistics
638  */
639 struct clstat {
640 	kstat_named_t	calls;			/* client requests */
641 	kstat_named_t	badcalls;		/* rpc failures */
642 	kstat_named_t	clgets;			/* client handle gets */
643 	kstat_named_t	cltoomany;		/* client handle cache misses */
644 #ifdef DEBUG
645 	kstat_named_t	clalloc;		/* number of client handles */
646 	kstat_named_t	noresponse;		/* server not responding cnt */
647 	kstat_named_t	failover;		/* server failover count */
648 	kstat_named_t	remap;			/* server remap count */
649 #endif
650 };
651 
652 struct nfs_clnt {
653 	struct chhead	*nfscl_chtable;
654 	kmutex_t	nfscl_chtable_lock;
655 	zoneid_t	nfscl_zoneid;
656 	list_node_t	nfscl_node;
657 	struct clstat	nfscl_stat;
658 };
659 
660 #ifdef	__cplusplus
661 }
662 #endif
663 
664 #endif	/* _NFS_NFS_CLNT_H */
665