1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 28 /* All Rights Reserved */ 29 30 #ifndef _NFS_NFS_CLNT_H 31 #define _NFS_NFS_CLNT_H 32 33 #pragma ident "%Z%%M% %I% %E% SMI" 34 35 #include <sys/utsname.h> 36 #include <sys/kstat.h> 37 #include <sys/time.h> 38 #include <vm/page.h> 39 #include <sys/thread.h> 40 #include <nfs/rnode.h> 41 #include <sys/list.h> 42 43 #ifdef __cplusplus 44 extern "C" { 45 #endif 46 47 #define HOSTNAMESZ 32 48 #define ACREGMIN 3 /* min secs to hold cached file attr */ 49 #define ACREGMAX 60 /* max secs to hold cached file attr */ 50 #define ACDIRMIN 30 /* min secs to hold cached dir attr */ 51 #define ACDIRMAX 60 /* max secs to hold cached dir attr */ 52 #define ACMINMAX 3600 /* 1 hr is longest min timeout */ 53 #define ACMAXMAX 36000 /* 10 hr is longest max timeout */ 54 55 #define NFS_CALLTYPES 3 /* Lookups, Reads, Writes */ 56 57 /* 58 * rfscall() flags 59 */ 60 #define RFSCALL_SOFT 0x00000001 /* Do op as if fs was soft-mounted */ 61 62 /* 63 * Fake errno passed back from rfscall to indicate transfer size adjustment 64 */ 65 #define ENFS_TRYAGAIN 999 66 67 /* 68 * The NFS specific async_reqs structure. 69 */ 70 71 enum iotype { 72 NFS_READ_AHEAD, 73 NFS_PUTAPAGE, 74 NFS_PAGEIO, 75 NFS_READDIR, 76 NFS_COMMIT, 77 NFS_INACTIVE 78 }; 79 #define NFS_ASYNC_TYPES (NFS_INACTIVE + 1) 80 81 struct nfs_async_read_req { 82 void (*readahead)(); /* pointer to readahead function */ 83 u_offset_t blkoff; /* offset in file */ 84 struct seg *seg; /* segment to do i/o to */ 85 caddr_t addr; /* address to do i/o to */ 86 }; 87 88 struct nfs_pageio_req { 89 int (*pageio)(); /* pointer to pageio function */ 90 page_t *pp; /* page list */ 91 u_offset_t io_off; /* offset in file */ 92 uint_t io_len; /* size of request */ 93 int flags; 94 }; 95 96 struct nfs_readdir_req { 97 int (*readdir)(); /* pointer to readdir function */ 98 struct rddir_cache *rdc; /* pointer to cache entry to fill */ 99 }; 100 101 struct nfs_commit_req { 102 void (*commit)(); /* pointer to commit function */ 103 page_t *plist; /* page list */ 104 offset3 offset; /* starting offset */ 105 count3 count; /* size of range to be commited */ 106 }; 107 108 struct nfs_inactive_req { 109 void (*inactive)(); /* pointer to inactive function */ 110 }; 111 112 struct nfs_async_reqs { 113 struct nfs_async_reqs *a_next; /* pointer to next arg struct */ 114 #ifdef DEBUG 115 kthread_t *a_queuer; /* thread id of queueing thread */ 116 #endif 117 struct vnode *a_vp; /* vnode pointer */ 118 struct cred *a_cred; /* cred pointer */ 119 enum iotype a_io; /* i/o type */ 120 union { 121 struct nfs_async_read_req a_read_args; 122 struct nfs_pageio_req a_pageio_args; 123 struct nfs_readdir_req a_readdir_args; 124 struct nfs_commit_req a_commit_args; 125 struct nfs_inactive_req a_inactive_args; 126 } a_args; 127 }; 128 129 #define a_nfs_readahead a_args.a_read_args.readahead 130 #define a_nfs_blkoff a_args.a_read_args.blkoff 131 #define a_nfs_seg a_args.a_read_args.seg 132 #define a_nfs_addr a_args.a_read_args.addr 133 134 #define a_nfs_putapage a_args.a_pageio_args.pageio 135 #define a_nfs_pageio a_args.a_pageio_args.pageio 136 #define a_nfs_pp a_args.a_pageio_args.pp 137 #define a_nfs_off a_args.a_pageio_args.io_off 138 #define a_nfs_len a_args.a_pageio_args.io_len 139 #define a_nfs_flags a_args.a_pageio_args.flags 140 141 #define a_nfs_readdir a_args.a_readdir_args.readdir 142 #define a_nfs_rdc a_args.a_readdir_args.rdc 143 144 #define a_nfs_commit a_args.a_commit_args.commit 145 #define a_nfs_plist a_args.a_commit_args.plist 146 #define a_nfs_offset a_args.a_commit_args.offset 147 #define a_nfs_count a_args.a_commit_args.count 148 149 #define a_nfs_inactive a_args.a_inactive_args.inactive 150 151 /* 152 * Due to the way the address space callbacks are used to execute a delmap, 153 * we must keep track of how many times the same thread has called 154 * VOP_DELMAP()->nfs_delmap()/nfs3_delmap(). This is done by having a list of 155 * nfs_delmapcall_t's associated with each rnode_t. This list is protected 156 * by the rnode_t's r_statelock. The individual elements do not need to be 157 * protected as they will only ever be created, modified and destroyed by 158 * one thread (the call_id). 159 * See nfs_delmap()/nfs3_delmap() for further explanation. 160 */ 161 typedef struct nfs_delmapcall { 162 kthread_t *call_id; 163 int error; /* error from delmap */ 164 list_node_t call_node; 165 } nfs_delmapcall_t; 166 167 /* 168 * delmap address space callback args 169 */ 170 typedef struct nfs_delmap_args { 171 vnode_t *vp; 172 offset_t off; 173 caddr_t addr; 174 size_t len; 175 uint_t prot; 176 uint_t maxprot; 177 uint_t flags; 178 cred_t *cr; 179 nfs_delmapcall_t *caller; /* to retrieve errors from the cb */ 180 } nfs_delmap_args_t; 181 182 #ifdef _KERNEL 183 extern nfs_delmapcall_t *nfs_init_delmapcall(void); 184 extern void nfs_free_delmapcall(nfs_delmapcall_t *); 185 extern int nfs_find_and_delete_delmapcall(rnode_t *, int *errp); 186 #endif /* _KERNEL */ 187 188 /* 189 * The following structures, chhead and chtab, make up the client handle 190 * cache. chhead represents a quadruple(RPC program, RPC version, Protocol 191 * Family, and Transport). For example, a chhead entry could represent 192 * NFS/V3/IPv4/TCP requests. chhead nodes are linked together as a singly 193 * linked list and is referenced from chtable. 194 * 195 * chtab represents an allocated client handle bound to a particular 196 * quadruple. These nodes chain down from a chhead node. chtab 197 * entries which are on the chain are considered free, so a thread may simply 198 * unlink the first node without traversing the chain. When the thread is 199 * completed with its request, it puts the chtab node back on the chain. 200 */ 201 typedef struct chhead { 202 struct chhead *ch_next; /* next quadruple */ 203 struct chtab *ch_list; /* pointer to free client handle(s) */ 204 uint64_t ch_timesused; /* times this quadruple was requested */ 205 rpcprog_t ch_prog; /* RPC program number */ 206 rpcvers_t ch_vers; /* RPC version number */ 207 dev_t ch_dev; /* pseudo device number (i.e. /dev/udp) */ 208 char *ch_protofmly; /* protocol (i.e. NC_INET, NC_LOOPBACK) */ 209 } chhead_t; 210 211 typedef struct chtab { 212 struct chtab *ch_list; /* next free client handle */ 213 struct chhead *ch_head; /* associated quadruple */ 214 time_t ch_freed; /* timestamp when freed */ 215 CLIENT *ch_client; /* pointer to client handle */ 216 } chtab_t; 217 218 /* 219 * clinfo is a structure which encapsulates data that is needed to 220 * obtain a client handle from the cache 221 */ 222 typedef struct clinfo { 223 rpcprog_t cl_prog; /* RPC program number */ 224 rpcvers_t cl_vers; /* RPC version number */ 225 uint_t cl_readsize; /* transfer size */ 226 int cl_retrans; /* times to retry request */ 227 uint_t cl_flags; /* info flags */ 228 } clinfo_t; 229 230 /* 231 * Failover information, passed opaquely through rfscall() 232 */ 233 typedef struct failinfo { 234 struct vnode *vp; 235 caddr_t fhp; 236 void (*copyproc)(caddr_t, vnode_t *); 237 int (*lookupproc)(vnode_t *, char *, vnode_t **, struct pathname *, 238 int, vnode_t *, struct cred *, int); 239 int (*xattrdirproc)(vnode_t *, vnode_t **, bool_t, cred_t *, int); 240 } failinfo_t; 241 242 /* 243 * Static server information 244 * 245 * These fields are protected by sv_lock: 246 * sv_flags 247 */ 248 typedef struct servinfo { 249 struct knetconfig *sv_knconf; /* bound TLI fd */ 250 struct knetconfig *sv_origknconf; /* For RDMA save orig knconf */ 251 struct netbuf sv_addr; /* server's address */ 252 nfs_fhandle sv_fhandle; /* this server's filehandle */ 253 struct sec_data *sv_secdata; /* security data for rpcsec module */ 254 char *sv_hostname; /* server's hostname */ 255 int sv_hostnamelen; /* server's hostname length */ 256 uint_t sv_flags; /* see below */ 257 struct servinfo *sv_next; /* next in list */ 258 kmutex_t sv_lock; 259 } servinfo_t; 260 261 /* 262 * The values for sv_flags. 263 */ 264 #define SV_ROOT_STALE 0x1 /* root vnode got ESTALE */ 265 266 /* 267 * Switch from RDMA knconf to original mount knconf 268 */ 269 270 #define ORIG_KNCONF(mi) (mi->mi_curr_serv->sv_origknconf ? \ 271 mi->mi_curr_serv->sv_origknconf : mi->mi_curr_serv->sv_knconf) 272 273 /* 274 * NFS private data per mounted file system 275 * The mi_lock mutex protects the following fields: 276 * mi_flags 277 * mi_printed 278 * mi_down 279 * mi_tsize 280 * mi_stsize 281 * mi_curread 282 * mi_curwrite 283 * mi_timers 284 * mi_curr_serv 285 * mi_readers 286 * mi_klmconfig 287 * 288 * The mi_async_lock mutex protects the following fields: 289 * mi_async_reqs 290 * mi_async_req_count 291 * mi_async_tail 292 * mi_async_curr 293 * mi_async_clusters 294 * mi_async_init_clusters 295 * mi_threads 296 * mi_manager_thread 297 * 298 * Normally the netconfig information for the mount comes from 299 * mi_curr_serv and mi_klmconfig is NULL. If NLM calls need to use a 300 * different transport, mi_klmconfig contains the necessary netconfig 301 * information. 302 * 303 * 'mi_zone' is initialized at structure creation time, and never 304 * changes; it may be read without a lock. 305 * 306 * mi_zone_node is linkage into the mi4_globals.mig_list, and is 307 * protected by mi4_globals.mig_list_lock. 308 * 309 * Locking order: 310 * mi_globals::mig_lock > mi_async_lock > mi_lock 311 */ 312 typedef struct mntinfo { 313 kmutex_t mi_lock; /* protects mntinfo fields */ 314 struct servinfo *mi_servers; /* server list */ 315 struct servinfo *mi_curr_serv; /* current server */ 316 kcondvar_t mi_failover_cv; /* failover synchronization */ 317 int mi_readers; /* failover - users of mi_curr_serv */ 318 struct vfs *mi_vfsp; /* back pointer to vfs */ 319 enum vtype mi_type; /* file type of the root vnode */ 320 uint_t mi_flags; /* see below */ 321 uint_t mi_tsize; /* max read transfer size (bytes) */ 322 uint_t mi_stsize; /* max write transfer size (bytes) */ 323 int mi_timeo; /* inital timeout in 10th sec */ 324 int mi_retrans; /* times to retry request */ 325 hrtime_t mi_acregmin; /* min time to hold cached file attr */ 326 hrtime_t mi_acregmax; /* max time to hold cached file attr */ 327 hrtime_t mi_acdirmin; /* min time to hold cached dir attr */ 328 hrtime_t mi_acdirmax; /* max time to hold cached dir attr */ 329 len_t mi_maxfilesize; /* for pathconf _PC_FILESIZEBITS */ 330 /* 331 * Extra fields for congestion control, one per NFS call type, 332 * plus one global one. 333 */ 334 struct rpc_timers mi_timers[NFS_CALLTYPES+1]; 335 int mi_curread; /* current read size */ 336 int mi_curwrite; /* current write size */ 337 /* 338 * async I/O management 339 */ 340 struct nfs_async_reqs *mi_async_reqs[NFS_ASYNC_TYPES]; 341 struct nfs_async_reqs *mi_async_tail[NFS_ASYNC_TYPES]; 342 struct nfs_async_reqs **mi_async_curr; /* current async queue */ 343 uint_t mi_async_clusters[NFS_ASYNC_TYPES]; 344 uint_t mi_async_init_clusters; 345 uint_t mi_async_req_count; /* # outstanding work requests */ 346 kcondvar_t mi_async_reqs_cv; /* signaled when there's work */ 347 ushort_t mi_threads; /* number of active async threads */ 348 ushort_t mi_max_threads; /* max number of async worker threads */ 349 kthread_t *mi_manager_thread; /* async manager thread */ 350 kcondvar_t mi_async_cv; /* signaled when the last worker dies */ 351 kcondvar_t mi_async_work_cv; /* tell workers to work */ 352 kmutex_t mi_async_lock; /* lock to protect async list */ 353 /* 354 * Other stuff 355 */ 356 struct pathcnf *mi_pathconf; /* static pathconf kludge */ 357 rpcprog_t mi_prog; /* RPC program number */ 358 rpcvers_t mi_vers; /* RPC program version number */ 359 char **mi_rfsnames; /* mapping to proc names */ 360 kstat_named_t *mi_reqs; /* count of requests */ 361 uchar_t *mi_call_type; /* dynamic retrans call types */ 362 uchar_t *mi_ss_call_type; /* semisoft call type */ 363 uchar_t *mi_timer_type; /* dynamic retrans timer types */ 364 clock_t mi_printftime; /* last error printf time */ 365 /* 366 * ACL entries 367 */ 368 char **mi_aclnames; /* mapping to proc names */ 369 kstat_named_t *mi_aclreqs; /* count of acl requests */ 370 uchar_t *mi_acl_call_type; /* dynamic retrans call types */ 371 uchar_t *mi_acl_ss_call_type; /* semisoft call types */ 372 uchar_t *mi_acl_timer_type; /* dynamic retrans timer types */ 373 /* 374 * Client Side Failover stats 375 */ 376 uint_t mi_noresponse; /* server not responding count */ 377 uint_t mi_failover; /* failover to new server count */ 378 uint_t mi_remap; /* remap to new server count */ 379 /* 380 * Kstat statistics 381 */ 382 struct kstat *mi_io_kstats; 383 struct kstat *mi_ro_kstats; 384 struct knetconfig *mi_klmconfig; 385 /* 386 * Zones support. 387 */ 388 struct zone *mi_zone; /* Zone mounted in */ 389 list_node_t mi_zone_node; /* Linkage into per-zone mi list */ 390 } mntinfo_t; 391 392 /* 393 * vfs pointer to mount info 394 */ 395 #define VFTOMI(vfsp) ((mntinfo_t *)((vfsp)->vfs_data)) 396 397 /* 398 * vnode pointer to mount info 399 */ 400 #define VTOMI(vp) ((mntinfo_t *)(((vp)->v_vfsp)->vfs_data)) 401 402 /* 403 * The values for mi_flags. 404 */ 405 #define MI_HARD 0x1 /* hard or soft mount */ 406 #define MI_PRINTED 0x2 /* not responding message printed */ 407 #define MI_INT 0x4 /* interrupts allowed on hard mount */ 408 #define MI_DOWN 0x8 /* server is down */ 409 #define MI_NOAC 0x10 /* don't cache attributes */ 410 #define MI_NOCTO 0x20 /* no close-to-open consistency */ 411 #define MI_DYNAMIC 0x40 /* dynamic transfer size adjustment */ 412 #define MI_LLOCK 0x80 /* local locking only (no lockmgr) */ 413 #define MI_GRPID 0x100 /* System V group id inheritance */ 414 #define MI_RPCTIMESYNC 0x200 /* RPC time sync */ 415 #define MI_LINK 0x400 /* server supports link */ 416 #define MI_SYMLINK 0x800 /* server supports symlink */ 417 #define MI_READDIRONLY 0x1000 /* use readdir instead of readdirplus */ 418 #define MI_ACL 0x2000 /* server supports NFS_ACL */ 419 #define MI_BINDINPROG 0x4000 /* binding to server is changing */ 420 #define MI_LOOPBACK 0x8000 /* Set if this is a loopback mount */ 421 #define MI_SEMISOFT 0x10000 /* soft reads, hard modify */ 422 #define MI_NOPRINT 0x20000 /* don't print messages */ 423 #define MI_DIRECTIO 0x40000 /* do direct I/O */ 424 #define MI_EXTATTR 0x80000 /* server supports extended attrs */ 425 #define MI_ASYNC_MGR_STOP 0x100000 /* tell async mgr to die */ 426 #define MI_DEAD 0x200000 /* mount has been terminated */ 427 428 /* 429 * Read-only mntinfo statistics 430 */ 431 struct mntinfo_kstat { 432 char mik_proto[KNC_STRSIZE]; 433 uint32_t mik_vers; 434 uint_t mik_flags; 435 uint_t mik_secmod; 436 uint32_t mik_curread; 437 uint32_t mik_curwrite; 438 int mik_timeo; 439 int mik_retrans; 440 uint_t mik_acregmin; 441 uint_t mik_acregmax; 442 uint_t mik_acdirmin; 443 uint_t mik_acdirmax; 444 struct { 445 uint32_t srtt; 446 uint32_t deviate; 447 uint32_t rtxcur; 448 } mik_timers[NFS_CALLTYPES+1]; 449 uint32_t mik_noresponse; 450 uint32_t mik_failover; 451 uint32_t mik_remap; 452 char mik_curserver[SYS_NMLN]; 453 }; 454 455 /* 456 * Mark cached attributes as timed out 457 * 458 * The caller must not be holding the rnode r_statelock mutex. 459 */ 460 #define PURGE_ATTRCACHE(vp) { \ 461 rnode_t *rp = VTOR(vp); \ 462 mutex_enter(&rp->r_statelock); \ 463 PURGE_ATTRCACHE_LOCKED(rp); \ 464 mutex_exit(&rp->r_statelock); \ 465 } 466 467 #define PURGE_ATTRCACHE_LOCKED(rp) { \ 468 ASSERT(MUTEX_HELD(&rp->r_statelock)); \ 469 rp->r_attrtime = gethrtime(); \ 470 rp->r_mtime = rp->r_attrtime; \ 471 } 472 473 /* 474 * Is the attribute cache valid? 475 */ 476 #define ATTRCACHE_VALID(vp) (gethrtime() < VTOR(vp)->r_attrtime) 477 478 /* 479 * Flags to indicate whether to purge the DNLC for non-directory vnodes 480 * in a call to nfs_purge_caches. 481 */ 482 #define NFS_NOPURGE_DNLC 0 483 #define NFS_PURGE_DNLC 1 484 485 /* 486 * If returned error is ESTALE flush all caches. 487 */ 488 #define PURGE_STALE_FH(error, vp, cr) \ 489 if ((error) == ESTALE) { \ 490 struct rnode *rp = VTOR(vp); \ 491 if (vp->v_flag & VROOT) { \ 492 servinfo_t *svp = rp->r_server; \ 493 mutex_enter(&svp->sv_lock); \ 494 svp->sv_flags |= SV_ROOT_STALE; \ 495 mutex_exit(&svp->sv_lock); \ 496 } \ 497 mutex_enter(&rp->r_statelock); \ 498 rp->r_flags |= RSTALE; \ 499 if (!rp->r_error) \ 500 rp->r_error = (error); \ 501 mutex_exit(&rp->r_statelock); \ 502 if (vn_has_cached_data(vp)) \ 503 nfs_invalidate_pages((vp), (u_offset_t)0, (cr)); \ 504 nfs_purge_caches((vp), NFS_PURGE_DNLC, (cr)); \ 505 } 506 507 /* 508 * Is cache valid? 509 * Swap is always valid, if no attributes (attrtime == 0) or 510 * if mtime matches cached mtime it is valid 511 * NOTE: mtime is now a timestruc_t. 512 * Caller should be holding the rnode r_statelock mutex. 513 */ 514 #define CACHE_VALID(rp, mtime, fsize) \ 515 ((RTOV(rp)->v_flag & VISSWAP) == VISSWAP || \ 516 (((mtime).tv_sec == (rp)->r_attr.va_mtime.tv_sec && \ 517 (mtime).tv_nsec == (rp)->r_attr.va_mtime.tv_nsec) && \ 518 ((fsize) == (rp)->r_attr.va_size))) 519 520 /* 521 * Macro to detect forced unmount or a zone shutdown. 522 */ 523 #define FS_OR_ZONE_GONE(vfsp) \ 524 (((vfsp)->vfs_flag & VFS_UNMOUNTED) || \ 525 zone_status_get(curproc->p_zone) >= ZONE_IS_SHUTTING_DOWN) 526 527 /* 528 * Convert NFS tunables to hrtime_t units, seconds to nanoseconds. 529 */ 530 #define SEC2HR(sec) ((sec) * (long long)NANOSEC) 531 #define HR2SEC(hr) ((hr) / (long long)NANOSEC) 532 533 /* 534 * Structure to identify owner of a PC file share reservation. 535 */ 536 struct nfs_owner { 537 int magic; /* magic uniquifying number */ 538 char hname[16]; /* first 16 bytes of hostname */ 539 char lowner[8]; /* local owner from fcntl */ 540 }; 541 542 /* 543 * Values for magic. 544 */ 545 #define NFS_OWNER_MAGIC 0x1D81E 546 547 /* 548 * Support for extended attributes 549 */ 550 #define XATTR_DIR_NAME "/@/" /* used for DNLC entries */ 551 #define XATTR_RPATH "ExTaTtR" /* used for r_path for failover */ 552 553 /* 554 * Short hand for checking to see whether the file system was mounted 555 * interruptible or not. 556 */ 557 #define INTR(vp) (VTOMI(vp)->mi_flags & MI_INT) 558 559 /* 560 * Short hand for checking whether failover is enabled or not 561 */ 562 #define FAILOVER_MOUNT(mi) (mi->mi_servers->sv_next) 563 564 /* 565 * How long will async threads wait for additional work. 566 */ 567 #define NFS_ASYNC_TIMEOUT (60 * 1 * hz) /* 1 minute */ 568 569 #ifdef _KERNEL 570 extern int clget(clinfo_t *, servinfo_t *, cred_t *, CLIENT **, 571 struct chtab **); 572 extern void clfree(CLIENT *, struct chtab *); 573 extern void nfs_mi_zonelist_add(mntinfo_t *); 574 extern void nfs_free_mi(mntinfo_t *); 575 extern void nfs_mnt_kstat_init(struct vfs *); 576 #endif 577 578 /* 579 * Per-zone data for managing client handles. Included here solely for the 580 * benefit of MDB. 581 */ 582 /* 583 * client side statistics 584 */ 585 struct clstat { 586 kstat_named_t calls; /* client requests */ 587 kstat_named_t badcalls; /* rpc failures */ 588 kstat_named_t clgets; /* client handle gets */ 589 kstat_named_t cltoomany; /* client handle cache misses */ 590 #ifdef DEBUG 591 kstat_named_t clalloc; /* number of client handles */ 592 kstat_named_t noresponse; /* server not responding cnt */ 593 kstat_named_t failover; /* server failover count */ 594 kstat_named_t remap; /* server remap count */ 595 #endif 596 }; 597 598 struct nfs_clnt { 599 struct chhead *nfscl_chtable; 600 kmutex_t nfscl_chtable_lock; 601 zoneid_t nfscl_zoneid; 602 list_node_t nfscl_node; 603 struct clstat nfscl_stat; 604 }; 605 606 #ifdef __cplusplus 607 } 608 #endif 609 610 #endif /* _NFS_NFS_CLNT_H */ 611