1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2009 Rick Macklem, University of Guelph 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #ifndef APPLEKEXT 34 #include <sys/extattr.h> 35 #include <fs/nfs/nfsport.h> 36 37 struct nfsrv_stablefirst nfsrv_stablefirst; 38 int nfsrv_issuedelegs = 0; 39 int nfsrv_dolocallocks = 0; 40 struct nfsv4lock nfsv4rootfs_lock; 41 time_t nfsdev_time = 0; 42 int nfsrv_layouthashsize; 43 volatile int nfsrv_layoutcnt = 0; 44 45 extern int newnfs_numnfsd; 46 extern struct nfsstatsv1 nfsstatsv1; 47 extern int nfsrv_lease; 48 extern struct timeval nfsboottime; 49 extern u_int32_t newnfs_true, newnfs_false; 50 extern struct mtx nfsrv_dslock_mtx; 51 extern struct mtx nfsrv_recalllock_mtx; 52 extern struct mtx nfsrv_dontlistlock_mtx; 53 extern int nfsd_debuglevel; 54 extern u_int nfsrv_dsdirsize; 55 extern struct nfsdevicehead nfsrv_devidhead; 56 extern int nfsrv_doflexfile; 57 extern int nfsrv_maxpnfsmirror; 58 NFSV4ROOTLOCKMUTEX; 59 NFSSTATESPINLOCK; 60 extern struct nfsdontlisthead nfsrv_dontlisthead; 61 extern volatile int nfsrv_devidcnt; 62 extern struct nfslayouthead nfsrv_recalllisthead; 63 64 SYSCTL_DECL(_vfs_nfsd); 65 int nfsrv_statehashsize = NFSSTATEHASHSIZE; 66 SYSCTL_INT(_vfs_nfsd, OID_AUTO, statehashsize, CTLFLAG_RDTUN, 67 &nfsrv_statehashsize, 0, 68 "Size of state hash table set via loader.conf"); 69 70 int nfsrv_clienthashsize = NFSCLIENTHASHSIZE; 71 SYSCTL_INT(_vfs_nfsd, OID_AUTO, clienthashsize, CTLFLAG_RDTUN, 72 &nfsrv_clienthashsize, 0, 73 "Size of client hash table set via loader.conf"); 74 75 int nfsrv_lockhashsize = NFSLOCKHASHSIZE; 76 SYSCTL_INT(_vfs_nfsd, OID_AUTO, fhhashsize, CTLFLAG_RDTUN, 77 &nfsrv_lockhashsize, 0, 78 "Size of file handle hash table set via loader.conf"); 79 80 int nfsrv_sessionhashsize = NFSSESSIONHASHSIZE; 81 SYSCTL_INT(_vfs_nfsd, OID_AUTO, sessionhashsize, CTLFLAG_RDTUN, 82 &nfsrv_sessionhashsize, 0, 83 "Size of session hash table set via loader.conf"); 84 85 int nfsrv_layouthighwater = NFSLAYOUTHIGHWATER; 86 SYSCTL_INT(_vfs_nfsd, OID_AUTO, layouthighwater, CTLFLAG_RDTUN, 87 &nfsrv_layouthighwater, 0, 88 "High water mark for number of layouts set via loader.conf"); 89 90 static int nfsrv_v4statelimit = NFSRV_V4STATELIMIT; 91 SYSCTL_INT(_vfs_nfsd, OID_AUTO, v4statelimit, CTLFLAG_RWTUN, 92 &nfsrv_v4statelimit, 0, 93 "High water limit for NFSv4 opens+locks+delegations"); 94 95 static int nfsrv_writedelegifpos = 0; 96 SYSCTL_INT(_vfs_nfsd, OID_AUTO, writedelegifpos, CTLFLAG_RW, 97 &nfsrv_writedelegifpos, 0, 98 "Issue a write delegation for read opens if possible"); 99 100 static int nfsrv_allowreadforwriteopen = 1; 101 SYSCTL_INT(_vfs_nfsd, OID_AUTO, allowreadforwriteopen, CTLFLAG_RW, 102 &nfsrv_allowreadforwriteopen, 0, 103 "Allow Reads to be done with Write Access StateIDs"); 104 105 int nfsrv_pnfsatime = 0; 106 SYSCTL_INT(_vfs_nfsd, OID_AUTO, pnfsstrictatime, CTLFLAG_RW, 107 &nfsrv_pnfsatime, 0, 108 "For pNFS service, do Getattr ops to keep atime up-to-date"); 109 110 int nfsrv_flexlinuxhack = 0; 111 SYSCTL_INT(_vfs_nfsd, OID_AUTO, flexlinuxhack, CTLFLAG_RW, 112 &nfsrv_flexlinuxhack, 0, 113 "For Linux clients, hack around Flex File Layout bug"); 114 115 /* 116 * Hash lists for nfs V4. 117 */ 118 struct nfsclienthashhead *nfsclienthash; 119 struct nfslockhashhead *nfslockhash; 120 struct nfssessionhash *nfssessionhash; 121 struct nfslayouthash *nfslayouthash; 122 volatile int nfsrv_dontlistlen = 0; 123 #endif /* !APPLEKEXT */ 124 125 static u_int32_t nfsrv_openpluslock = 0, nfsrv_delegatecnt = 0; 126 static time_t nfsrvboottime; 127 static int nfsrv_returnoldstateid = 0, nfsrv_clients = 0; 128 static int nfsrv_clienthighwater = NFSRV_CLIENTHIGHWATER; 129 static int nfsrv_nogsscallback = 0; 130 static volatile int nfsrv_writedelegcnt = 0; 131 132 /* local functions */ 133 static void nfsrv_dumpaclient(struct nfsclient *clp, 134 struct nfsd_dumpclients *dumpp); 135 static void nfsrv_freeopenowner(struct nfsstate *stp, int cansleep, 136 NFSPROC_T *p); 137 static int nfsrv_freeopen(struct nfsstate *stp, vnode_t vp, int cansleep, 138 NFSPROC_T *p); 139 static void nfsrv_freelockowner(struct nfsstate *stp, vnode_t vp, int cansleep, 140 NFSPROC_T *p); 141 static void nfsrv_freeallnfslocks(struct nfsstate *stp, vnode_t vp, 142 int cansleep, NFSPROC_T *p); 143 static void nfsrv_freenfslock(struct nfslock *lop); 144 static void nfsrv_freenfslockfile(struct nfslockfile *lfp); 145 static void nfsrv_freedeleg(struct nfsstate *); 146 static int nfsrv_getstate(struct nfsclient *clp, nfsv4stateid_t *stateidp, 147 u_int32_t flags, struct nfsstate **stpp); 148 static void nfsrv_getowner(struct nfsstatehead *hp, struct nfsstate *new_stp, 149 struct nfsstate **stpp); 150 static int nfsrv_getlockfh(vnode_t vp, u_short flags, 151 struct nfslockfile *new_lfp, fhandle_t *nfhp, NFSPROC_T *p); 152 static int nfsrv_getlockfile(u_short flags, struct nfslockfile **new_lfpp, 153 struct nfslockfile **lfpp, fhandle_t *nfhp, int lockit); 154 static void nfsrv_insertlock(struct nfslock *new_lop, 155 struct nfslock *insert_lop, struct nfsstate *stp, struct nfslockfile *lfp); 156 static void nfsrv_updatelock(struct nfsstate *stp, struct nfslock **new_lopp, 157 struct nfslock **other_lopp, struct nfslockfile *lfp); 158 static int nfsrv_getipnumber(u_char *cp); 159 static int nfsrv_checkrestart(nfsquad_t clientid, u_int32_t flags, 160 nfsv4stateid_t *stateidp, int specialid); 161 static int nfsrv_checkgrace(struct nfsrv_descript *nd, struct nfsclient *clp, 162 u_int32_t flags); 163 static int nfsrv_docallback(struct nfsclient *clp, int procnum, 164 nfsv4stateid_t *stateidp, int trunc, fhandle_t *fhp, 165 struct nfsvattr *nap, nfsattrbit_t *attrbitp, int laytype, NFSPROC_T *p); 166 static int nfsrv_cbcallargs(struct nfsrv_descript *nd, struct nfsclient *clp, 167 uint32_t callback, int op, const char *optag, struct nfsdsession **sepp); 168 static u_int32_t nfsrv_nextclientindex(void); 169 static u_int32_t nfsrv_nextstateindex(struct nfsclient *clp); 170 static void nfsrv_markstable(struct nfsclient *clp); 171 static void nfsrv_markreclaim(struct nfsclient *clp); 172 static int nfsrv_checkstable(struct nfsclient *clp); 173 static int nfsrv_clientconflict(struct nfsclient *clp, int *haslockp, struct 174 vnode *vp, NFSPROC_T *p); 175 static int nfsrv_delegconflict(struct nfsstate *stp, int *haslockp, 176 NFSPROC_T *p, vnode_t vp); 177 static int nfsrv_cleandeleg(vnode_t vp, struct nfslockfile *lfp, 178 struct nfsclient *clp, int *haslockp, NFSPROC_T *p); 179 static int nfsrv_notsamecredname(struct nfsrv_descript *nd, 180 struct nfsclient *clp); 181 static time_t nfsrv_leaseexpiry(void); 182 static void nfsrv_delaydelegtimeout(struct nfsstate *stp); 183 static int nfsrv_checkseqid(struct nfsrv_descript *nd, u_int32_t seqid, 184 struct nfsstate *stp, struct nfsrvcache *op); 185 static int nfsrv_nootherstate(struct nfsstate *stp); 186 static int nfsrv_locallock(vnode_t vp, struct nfslockfile *lfp, int flags, 187 uint64_t first, uint64_t end, struct nfslockconflict *cfp, NFSPROC_T *p); 188 static void nfsrv_localunlock(vnode_t vp, struct nfslockfile *lfp, 189 uint64_t init_first, uint64_t init_end, NFSPROC_T *p); 190 static int nfsrv_dolocal(vnode_t vp, struct nfslockfile *lfp, int flags, 191 int oldflags, uint64_t first, uint64_t end, struct nfslockconflict *cfp, 192 NFSPROC_T *p); 193 static void nfsrv_locallock_rollback(vnode_t vp, struct nfslockfile *lfp, 194 NFSPROC_T *p); 195 static void nfsrv_locallock_commit(struct nfslockfile *lfp, int flags, 196 uint64_t first, uint64_t end); 197 static void nfsrv_locklf(struct nfslockfile *lfp); 198 static void nfsrv_unlocklf(struct nfslockfile *lfp); 199 static struct nfsdsession *nfsrv_findsession(uint8_t *sessionid); 200 static int nfsrv_freesession(struct nfsdsession *sep, uint8_t *sessionid); 201 static int nfsv4_setcbsequence(struct nfsrv_descript *nd, struct nfsclient *clp, 202 int dont_replycache, struct nfsdsession **sepp); 203 static int nfsv4_getcbsession(struct nfsclient *clp, struct nfsdsession **sepp); 204 static int nfsrv_addlayout(struct nfsrv_descript *nd, struct nfslayout **lypp, 205 nfsv4stateid_t *stateidp, char *layp, int *layoutlenp, NFSPROC_T *p); 206 static void nfsrv_freelayout(struct nfslayouthead *lhp, struct nfslayout *lyp); 207 static void nfsrv_freelayoutlist(nfsquad_t clientid); 208 static void nfsrv_freelayouts(nfsquad_t *clid, fsid_t *fs, int laytype, 209 int iomode); 210 static void nfsrv_freealllayouts(void); 211 static void nfsrv_freedevid(struct nfsdevice *ds); 212 static int nfsrv_setdsserver(char *dspathp, NFSPROC_T *p, 213 struct nfsdevice **dsp); 214 static int nfsrv_delds(char *devid, NFSPROC_T *p); 215 static void nfsrv_deleteds(struct nfsdevice *fndds); 216 static void nfsrv_allocdevid(struct nfsdevice *ds, char *addr, char *dnshost); 217 static void nfsrv_freealldevids(void); 218 static void nfsrv_flexlayouterr(struct nfsrv_descript *nd, uint32_t *layp, 219 int maxcnt, NFSPROC_T *p); 220 static int nfsrv_recalllayout(nfsquad_t clid, nfsv4stateid_t *stateidp, 221 fhandle_t *fhp, struct nfslayout *lyp, struct nfslayouthead *lyheadp, 222 int laytype, NFSPROC_T *p); 223 static int nfsrv_findlayout(nfsquad_t *clientidp, fhandle_t *fhp, int laytype, 224 NFSPROC_T *, struct nfslayout **lypp); 225 static int nfsrv_fndclid(nfsquad_t *clidvec, nfsquad_t clid, int clidcnt); 226 static struct nfslayout *nfsrv_filelayout(struct nfsrv_descript *nd, int iomode, 227 fhandle_t *fhp, fhandle_t *dsfhp, char *devid, fsid_t fs); 228 static struct nfslayout *nfsrv_flexlayout(struct nfsrv_descript *nd, int iomode, 229 int mirrorcnt, fhandle_t *fhp, fhandle_t *dsfhp, char *devid, fsid_t fs); 230 static int nfsrv_dontlayout(fhandle_t *fhp); 231 static int nfsrv_createdsfile(vnode_t vp, fhandle_t *fhp, struct pnfsdsfile *pf, 232 vnode_t dvp, struct nfsdevice *ds, struct ucred *cred, NFSPROC_T *p, 233 vnode_t *tvpp); 234 235 /* 236 * Scan the client list for a match and either return the current one, 237 * create a new entry or return an error. 238 * If returning a non-error, the clp structure must either be linked into 239 * the client list or free'd. 240 */ 241 APPLESTATIC int 242 nfsrv_setclient(struct nfsrv_descript *nd, struct nfsclient **new_clpp, 243 nfsquad_t *clientidp, nfsquad_t *confirmp, NFSPROC_T *p) 244 { 245 struct nfsclient *clp = NULL, *new_clp = *new_clpp; 246 int i, error = 0, ret; 247 struct nfsstate *stp, *tstp; 248 struct sockaddr_in *sad, *rad; 249 struct nfsdsession *sep, *nsep; 250 int zapit = 0, gotit, hasstate = 0, igotlock; 251 static u_int64_t confirm_index = 0; 252 253 /* 254 * Check for state resource limit exceeded. 255 */ 256 if (nfsrv_openpluslock > nfsrv_v4statelimit) { 257 error = NFSERR_RESOURCE; 258 goto out; 259 } 260 261 if (nfsrv_issuedelegs == 0 || 262 ((nd->nd_flag & ND_GSS) != 0 && nfsrv_nogsscallback != 0)) 263 /* 264 * Don't do callbacks when delegations are disabled or 265 * for AUTH_GSS unless enabled via nfsrv_nogsscallback. 266 * If establishing a callback connection is attempted 267 * when a firewall is blocking the callback path, the 268 * server may wait too long for the connect attempt to 269 * succeed during the Open. Some clients, such as Linux, 270 * may timeout and give up on the Open before the server 271 * replies. Also, since AUTH_GSS callbacks are not 272 * yet interoperability tested, they might cause the 273 * server to crap out, if they get past the Init call to 274 * the client. 275 */ 276 new_clp->lc_program = 0; 277 278 /* Lock out other nfsd threads */ 279 NFSLOCKV4ROOTMUTEX(); 280 nfsv4_relref(&nfsv4rootfs_lock); 281 do { 282 igotlock = nfsv4_lock(&nfsv4rootfs_lock, 1, NULL, 283 NFSV4ROOTLOCKMUTEXPTR, NULL); 284 } while (!igotlock); 285 NFSUNLOCKV4ROOTMUTEX(); 286 287 /* 288 * Search for a match in the client list. 289 */ 290 gotit = i = 0; 291 while (i < nfsrv_clienthashsize && !gotit) { 292 LIST_FOREACH(clp, &nfsclienthash[i], lc_hash) { 293 if (new_clp->lc_idlen == clp->lc_idlen && 294 !NFSBCMP(new_clp->lc_id, clp->lc_id, clp->lc_idlen)) { 295 gotit = 1; 296 break; 297 } 298 } 299 if (gotit == 0) 300 i++; 301 } 302 if (!gotit || 303 (clp->lc_flags & (LCL_NEEDSCONFIRM | LCL_ADMINREVOKED))) { 304 if ((nd->nd_flag & ND_NFSV41) != 0 && confirmp->lval[1] != 0) { 305 /* 306 * For NFSv4.1, if confirmp->lval[1] is non-zero, the 307 * client is trying to update a confirmed clientid. 308 */ 309 NFSLOCKV4ROOTMUTEX(); 310 nfsv4_unlock(&nfsv4rootfs_lock, 1); 311 NFSUNLOCKV4ROOTMUTEX(); 312 confirmp->lval[1] = 0; 313 error = NFSERR_NOENT; 314 goto out; 315 } 316 /* 317 * Get rid of the old one. 318 */ 319 if (i != nfsrv_clienthashsize) { 320 LIST_REMOVE(clp, lc_hash); 321 nfsrv_cleanclient(clp, p); 322 nfsrv_freedeleglist(&clp->lc_deleg); 323 nfsrv_freedeleglist(&clp->lc_olddeleg); 324 zapit = 1; 325 } 326 /* 327 * Add it after assigning a client id to it. 328 */ 329 new_clp->lc_flags |= LCL_NEEDSCONFIRM; 330 if ((nd->nd_flag & ND_NFSV41) != 0) 331 new_clp->lc_confirm.lval[0] = confirmp->lval[0] = 332 ++confirm_index; 333 else 334 confirmp->qval = new_clp->lc_confirm.qval = 335 ++confirm_index; 336 clientidp->lval[0] = new_clp->lc_clientid.lval[0] = 337 (u_int32_t)nfsrvboottime; 338 clientidp->lval[1] = new_clp->lc_clientid.lval[1] = 339 nfsrv_nextclientindex(); 340 new_clp->lc_stateindex = 0; 341 new_clp->lc_statemaxindex = 0; 342 new_clp->lc_cbref = 0; 343 new_clp->lc_expiry = nfsrv_leaseexpiry(); 344 LIST_INIT(&new_clp->lc_open); 345 LIST_INIT(&new_clp->lc_deleg); 346 LIST_INIT(&new_clp->lc_olddeleg); 347 LIST_INIT(&new_clp->lc_session); 348 for (i = 0; i < nfsrv_statehashsize; i++) 349 LIST_INIT(&new_clp->lc_stateid[i]); 350 LIST_INSERT_HEAD(NFSCLIENTHASH(new_clp->lc_clientid), new_clp, 351 lc_hash); 352 nfsstatsv1.srvclients++; 353 nfsrv_openpluslock++; 354 nfsrv_clients++; 355 NFSLOCKV4ROOTMUTEX(); 356 nfsv4_unlock(&nfsv4rootfs_lock, 1); 357 NFSUNLOCKV4ROOTMUTEX(); 358 if (zapit) 359 nfsrv_zapclient(clp, p); 360 *new_clpp = NULL; 361 goto out; 362 } 363 364 /* 365 * Now, handle the cases where the id is already issued. 366 */ 367 if (nfsrv_notsamecredname(nd, clp)) { 368 /* 369 * Check to see if there is expired state that should go away. 370 */ 371 if (clp->lc_expiry < NFSD_MONOSEC && 372 (!LIST_EMPTY(&clp->lc_open) || !LIST_EMPTY(&clp->lc_deleg))) { 373 nfsrv_cleanclient(clp, p); 374 nfsrv_freedeleglist(&clp->lc_deleg); 375 } 376 377 /* 378 * If there is outstanding state, then reply NFSERR_CLIDINUSE per 379 * RFC3530 Sec. 8.1.2 last para. 380 */ 381 if (!LIST_EMPTY(&clp->lc_deleg)) { 382 hasstate = 1; 383 } else if (LIST_EMPTY(&clp->lc_open)) { 384 hasstate = 0; 385 } else { 386 hasstate = 0; 387 /* Look for an Open on the OpenOwner */ 388 LIST_FOREACH(stp, &clp->lc_open, ls_list) { 389 if (!LIST_EMPTY(&stp->ls_open)) { 390 hasstate = 1; 391 break; 392 } 393 } 394 } 395 if (hasstate) { 396 /* 397 * If the uid doesn't match, return NFSERR_CLIDINUSE after 398 * filling out the correct ipaddr and portnum. 399 */ 400 sad = NFSSOCKADDR(new_clp->lc_req.nr_nam, struct sockaddr_in *); 401 rad = NFSSOCKADDR(clp->lc_req.nr_nam, struct sockaddr_in *); 402 sad->sin_addr.s_addr = rad->sin_addr.s_addr; 403 sad->sin_port = rad->sin_port; 404 NFSLOCKV4ROOTMUTEX(); 405 nfsv4_unlock(&nfsv4rootfs_lock, 1); 406 NFSUNLOCKV4ROOTMUTEX(); 407 error = NFSERR_CLIDINUSE; 408 goto out; 409 } 410 } 411 412 if (NFSBCMP(new_clp->lc_verf, clp->lc_verf, NFSX_VERF)) { 413 /* 414 * If the verifier has changed, the client has rebooted 415 * and a new client id is issued. The old state info 416 * can be thrown away once the SETCLIENTID_CONFIRM occurs. 417 */ 418 LIST_REMOVE(clp, lc_hash); 419 420 /* Get rid of all sessions on this clientid. */ 421 LIST_FOREACH_SAFE(sep, &clp->lc_session, sess_list, nsep) { 422 ret = nfsrv_freesession(sep, NULL); 423 if (ret != 0) 424 printf("nfsrv_setclient: verifier changed free" 425 " session failed=%d\n", ret); 426 } 427 428 new_clp->lc_flags |= LCL_NEEDSCONFIRM; 429 if ((nd->nd_flag & ND_NFSV41) != 0) 430 new_clp->lc_confirm.lval[0] = confirmp->lval[0] = 431 ++confirm_index; 432 else 433 confirmp->qval = new_clp->lc_confirm.qval = 434 ++confirm_index; 435 clientidp->lval[0] = new_clp->lc_clientid.lval[0] = 436 nfsrvboottime; 437 clientidp->lval[1] = new_clp->lc_clientid.lval[1] = 438 nfsrv_nextclientindex(); 439 new_clp->lc_stateindex = 0; 440 new_clp->lc_statemaxindex = 0; 441 new_clp->lc_cbref = 0; 442 new_clp->lc_expiry = nfsrv_leaseexpiry(); 443 444 /* 445 * Save the state until confirmed. 446 */ 447 LIST_NEWHEAD(&new_clp->lc_open, &clp->lc_open, ls_list); 448 LIST_FOREACH(tstp, &new_clp->lc_open, ls_list) 449 tstp->ls_clp = new_clp; 450 LIST_NEWHEAD(&new_clp->lc_deleg, &clp->lc_deleg, ls_list); 451 LIST_FOREACH(tstp, &new_clp->lc_deleg, ls_list) 452 tstp->ls_clp = new_clp; 453 LIST_NEWHEAD(&new_clp->lc_olddeleg, &clp->lc_olddeleg, 454 ls_list); 455 LIST_FOREACH(tstp, &new_clp->lc_olddeleg, ls_list) 456 tstp->ls_clp = new_clp; 457 for (i = 0; i < nfsrv_statehashsize; i++) { 458 LIST_NEWHEAD(&new_clp->lc_stateid[i], 459 &clp->lc_stateid[i], ls_hash); 460 LIST_FOREACH(tstp, &new_clp->lc_stateid[i], ls_hash) 461 tstp->ls_clp = new_clp; 462 } 463 LIST_INIT(&new_clp->lc_session); 464 LIST_INSERT_HEAD(NFSCLIENTHASH(new_clp->lc_clientid), new_clp, 465 lc_hash); 466 nfsstatsv1.srvclients++; 467 nfsrv_openpluslock++; 468 nfsrv_clients++; 469 NFSLOCKV4ROOTMUTEX(); 470 nfsv4_unlock(&nfsv4rootfs_lock, 1); 471 NFSUNLOCKV4ROOTMUTEX(); 472 473 /* 474 * Must wait until any outstanding callback on the old clp 475 * completes. 476 */ 477 NFSLOCKSTATE(); 478 while (clp->lc_cbref) { 479 clp->lc_flags |= LCL_WAKEUPWANTED; 480 (void)mtx_sleep(clp, NFSSTATEMUTEXPTR, PZERO - 1, 481 "nfsd clp", 10 * hz); 482 } 483 NFSUNLOCKSTATE(); 484 nfsrv_zapclient(clp, p); 485 *new_clpp = NULL; 486 goto out; 487 } 488 489 /* For NFSv4.1, mark that we found a confirmed clientid. */ 490 if ((nd->nd_flag & ND_NFSV41) != 0) { 491 clientidp->lval[0] = clp->lc_clientid.lval[0]; 492 clientidp->lval[1] = clp->lc_clientid.lval[1]; 493 confirmp->lval[0] = 0; /* Ignored by client */ 494 confirmp->lval[1] = 1; 495 } else { 496 /* 497 * id and verifier match, so update the net address info 498 * and get rid of any existing callback authentication 499 * handle, so a new one will be acquired. 500 */ 501 LIST_REMOVE(clp, lc_hash); 502 new_clp->lc_flags |= (LCL_NEEDSCONFIRM | LCL_DONTCLEAN); 503 new_clp->lc_expiry = nfsrv_leaseexpiry(); 504 confirmp->qval = new_clp->lc_confirm.qval = ++confirm_index; 505 clientidp->lval[0] = new_clp->lc_clientid.lval[0] = 506 clp->lc_clientid.lval[0]; 507 clientidp->lval[1] = new_clp->lc_clientid.lval[1] = 508 clp->lc_clientid.lval[1]; 509 new_clp->lc_delegtime = clp->lc_delegtime; 510 new_clp->lc_stateindex = clp->lc_stateindex; 511 new_clp->lc_statemaxindex = clp->lc_statemaxindex; 512 new_clp->lc_cbref = 0; 513 LIST_NEWHEAD(&new_clp->lc_open, &clp->lc_open, ls_list); 514 LIST_FOREACH(tstp, &new_clp->lc_open, ls_list) 515 tstp->ls_clp = new_clp; 516 LIST_NEWHEAD(&new_clp->lc_deleg, &clp->lc_deleg, ls_list); 517 LIST_FOREACH(tstp, &new_clp->lc_deleg, ls_list) 518 tstp->ls_clp = new_clp; 519 LIST_NEWHEAD(&new_clp->lc_olddeleg, &clp->lc_olddeleg, ls_list); 520 LIST_FOREACH(tstp, &new_clp->lc_olddeleg, ls_list) 521 tstp->ls_clp = new_clp; 522 for (i = 0; i < nfsrv_statehashsize; i++) { 523 LIST_NEWHEAD(&new_clp->lc_stateid[i], 524 &clp->lc_stateid[i], ls_hash); 525 LIST_FOREACH(tstp, &new_clp->lc_stateid[i], ls_hash) 526 tstp->ls_clp = new_clp; 527 } 528 LIST_INIT(&new_clp->lc_session); 529 LIST_INSERT_HEAD(NFSCLIENTHASH(new_clp->lc_clientid), new_clp, 530 lc_hash); 531 nfsstatsv1.srvclients++; 532 nfsrv_openpluslock++; 533 nfsrv_clients++; 534 } 535 NFSLOCKV4ROOTMUTEX(); 536 nfsv4_unlock(&nfsv4rootfs_lock, 1); 537 NFSUNLOCKV4ROOTMUTEX(); 538 539 if ((nd->nd_flag & ND_NFSV41) == 0) { 540 /* 541 * Must wait until any outstanding callback on the old clp 542 * completes. 543 */ 544 NFSLOCKSTATE(); 545 while (clp->lc_cbref) { 546 clp->lc_flags |= LCL_WAKEUPWANTED; 547 (void)mtx_sleep(clp, NFSSTATEMUTEXPTR, PZERO - 1, 548 "nfsdclp", 10 * hz); 549 } 550 NFSUNLOCKSTATE(); 551 nfsrv_zapclient(clp, p); 552 *new_clpp = NULL; 553 } 554 555 out: 556 NFSEXITCODE2(error, nd); 557 return (error); 558 } 559 560 /* 561 * Check to see if the client id exists and optionally confirm it. 562 */ 563 APPLESTATIC int 564 nfsrv_getclient(nfsquad_t clientid, int opflags, struct nfsclient **clpp, 565 struct nfsdsession *nsep, nfsquad_t confirm, uint32_t cbprogram, 566 struct nfsrv_descript *nd, NFSPROC_T *p) 567 { 568 struct nfsclient *clp; 569 struct nfsstate *stp; 570 int i; 571 struct nfsclienthashhead *hp; 572 int error = 0, igotlock, doneok; 573 struct nfssessionhash *shp; 574 struct nfsdsession *sep; 575 uint64_t sessid[2]; 576 static uint64_t next_sess = 0; 577 578 if (clpp) 579 *clpp = NULL; 580 if ((nd == NULL || (nd->nd_flag & ND_NFSV41) == 0 || 581 opflags != CLOPS_RENEW) && nfsrvboottime != clientid.lval[0]) { 582 error = NFSERR_STALECLIENTID; 583 goto out; 584 } 585 586 /* 587 * If called with opflags == CLOPS_RENEW, the State Lock is 588 * already held. Otherwise, we need to get either that or, 589 * for the case of Confirm, lock out the nfsd threads. 590 */ 591 if (opflags & CLOPS_CONFIRM) { 592 NFSLOCKV4ROOTMUTEX(); 593 nfsv4_relref(&nfsv4rootfs_lock); 594 do { 595 igotlock = nfsv4_lock(&nfsv4rootfs_lock, 1, NULL, 596 NFSV4ROOTLOCKMUTEXPTR, NULL); 597 } while (!igotlock); 598 /* 599 * Create a new sessionid here, since we need to do it where 600 * there is a mutex held to serialize update of next_sess. 601 */ 602 if ((nd->nd_flag & ND_NFSV41) != 0) { 603 sessid[0] = ++next_sess; 604 sessid[1] = clientid.qval; 605 } 606 NFSUNLOCKV4ROOTMUTEX(); 607 } else if (opflags != CLOPS_RENEW) { 608 NFSLOCKSTATE(); 609 } 610 611 /* For NFSv4.1, the clp is acquired from the associated session. */ 612 if (nd != NULL && (nd->nd_flag & ND_NFSV41) != 0 && 613 opflags == CLOPS_RENEW) { 614 clp = NULL; 615 if ((nd->nd_flag & ND_HASSEQUENCE) != 0) { 616 shp = NFSSESSIONHASH(nd->nd_sessionid); 617 NFSLOCKSESSION(shp); 618 sep = nfsrv_findsession(nd->nd_sessionid); 619 if (sep != NULL) 620 clp = sep->sess_clp; 621 NFSUNLOCKSESSION(shp); 622 } 623 } else { 624 hp = NFSCLIENTHASH(clientid); 625 LIST_FOREACH(clp, hp, lc_hash) { 626 if (clp->lc_clientid.lval[1] == clientid.lval[1]) 627 break; 628 } 629 } 630 if (clp == NULL) { 631 if (opflags & CLOPS_CONFIRM) 632 error = NFSERR_STALECLIENTID; 633 else 634 error = NFSERR_EXPIRED; 635 } else if (clp->lc_flags & LCL_ADMINREVOKED) { 636 /* 637 * If marked admin revoked, just return the error. 638 */ 639 error = NFSERR_ADMINREVOKED; 640 } 641 if (error) { 642 if (opflags & CLOPS_CONFIRM) { 643 NFSLOCKV4ROOTMUTEX(); 644 nfsv4_unlock(&nfsv4rootfs_lock, 1); 645 NFSUNLOCKV4ROOTMUTEX(); 646 } else if (opflags != CLOPS_RENEW) { 647 NFSUNLOCKSTATE(); 648 } 649 goto out; 650 } 651 652 /* 653 * Perform any operations specified by the opflags. 654 */ 655 if (opflags & CLOPS_CONFIRM) { 656 if (((nd->nd_flag & ND_NFSV41) != 0 && 657 clp->lc_confirm.lval[0] != confirm.lval[0]) || 658 ((nd->nd_flag & ND_NFSV41) == 0 && 659 clp->lc_confirm.qval != confirm.qval)) 660 error = NFSERR_STALECLIENTID; 661 else if (nfsrv_notsamecredname(nd, clp)) 662 error = NFSERR_CLIDINUSE; 663 664 if (!error) { 665 if ((clp->lc_flags & (LCL_NEEDSCONFIRM | LCL_DONTCLEAN)) == 666 LCL_NEEDSCONFIRM) { 667 /* 668 * Hang onto the delegations (as old delegations) 669 * for an Open with CLAIM_DELEGATE_PREV unless in 670 * grace, but get rid of the rest of the state. 671 */ 672 nfsrv_cleanclient(clp, p); 673 nfsrv_freedeleglist(&clp->lc_olddeleg); 674 if (nfsrv_checkgrace(nd, clp, 0)) { 675 /* In grace, so just delete delegations */ 676 nfsrv_freedeleglist(&clp->lc_deleg); 677 } else { 678 LIST_FOREACH(stp, &clp->lc_deleg, ls_list) 679 stp->ls_flags |= NFSLCK_OLDDELEG; 680 clp->lc_delegtime = NFSD_MONOSEC + 681 nfsrv_lease + NFSRV_LEASEDELTA; 682 LIST_NEWHEAD(&clp->lc_olddeleg, &clp->lc_deleg, 683 ls_list); 684 } 685 if ((nd->nd_flag & ND_NFSV41) != 0) 686 clp->lc_program = cbprogram; 687 } 688 clp->lc_flags &= ~(LCL_NEEDSCONFIRM | LCL_DONTCLEAN); 689 if (clp->lc_program) 690 clp->lc_flags |= LCL_NEEDSCBNULL; 691 /* For NFSv4.1, link the session onto the client. */ 692 if (nsep != NULL) { 693 /* Hold a reference on the xprt for a backchannel. */ 694 if ((nsep->sess_crflags & NFSV4CRSESS_CONNBACKCHAN) 695 != 0) { 696 if (clp->lc_req.nr_client == NULL) 697 clp->lc_req.nr_client = (struct __rpc_client *) 698 clnt_bck_create(nd->nd_xprt->xp_socket, 699 cbprogram, NFSV4_CBVERS); 700 if (clp->lc_req.nr_client != NULL) { 701 SVC_ACQUIRE(nd->nd_xprt); 702 nd->nd_xprt->xp_p2 = 703 clp->lc_req.nr_client->cl_private; 704 /* Disable idle timeout. */ 705 nd->nd_xprt->xp_idletimeout = 0; 706 nsep->sess_cbsess.nfsess_xprt = nd->nd_xprt; 707 } else 708 nsep->sess_crflags &= ~NFSV4CRSESS_CONNBACKCHAN; 709 } 710 NFSBCOPY(sessid, nsep->sess_sessionid, 711 NFSX_V4SESSIONID); 712 NFSBCOPY(sessid, nsep->sess_cbsess.nfsess_sessionid, 713 NFSX_V4SESSIONID); 714 shp = NFSSESSIONHASH(nsep->sess_sessionid); 715 NFSLOCKSTATE(); 716 NFSLOCKSESSION(shp); 717 LIST_INSERT_HEAD(&shp->list, nsep, sess_hash); 718 LIST_INSERT_HEAD(&clp->lc_session, nsep, sess_list); 719 nsep->sess_clp = clp; 720 NFSUNLOCKSESSION(shp); 721 NFSUNLOCKSTATE(); 722 } 723 } 724 } else if (clp->lc_flags & LCL_NEEDSCONFIRM) { 725 error = NFSERR_EXPIRED; 726 } 727 728 /* 729 * If called by the Renew Op, we must check the principal. 730 */ 731 if (!error && (opflags & CLOPS_RENEWOP)) { 732 if (nfsrv_notsamecredname(nd, clp)) { 733 doneok = 0; 734 for (i = 0; i < nfsrv_statehashsize && doneok == 0; i++) { 735 LIST_FOREACH(stp, &clp->lc_stateid[i], ls_hash) { 736 if ((stp->ls_flags & NFSLCK_OPEN) && 737 stp->ls_uid == nd->nd_cred->cr_uid) { 738 doneok = 1; 739 break; 740 } 741 } 742 } 743 if (!doneok) 744 error = NFSERR_ACCES; 745 } 746 if (!error && (clp->lc_flags & LCL_CBDOWN)) 747 error = NFSERR_CBPATHDOWN; 748 } 749 if ((!error || error == NFSERR_CBPATHDOWN) && 750 (opflags & CLOPS_RENEW)) { 751 clp->lc_expiry = nfsrv_leaseexpiry(); 752 } 753 if (opflags & CLOPS_CONFIRM) { 754 NFSLOCKV4ROOTMUTEX(); 755 nfsv4_unlock(&nfsv4rootfs_lock, 1); 756 NFSUNLOCKV4ROOTMUTEX(); 757 } else if (opflags != CLOPS_RENEW) { 758 NFSUNLOCKSTATE(); 759 } 760 if (clpp) 761 *clpp = clp; 762 763 out: 764 NFSEXITCODE2(error, nd); 765 return (error); 766 } 767 768 /* 769 * Perform the NFSv4.1 destroy clientid. 770 */ 771 int 772 nfsrv_destroyclient(nfsquad_t clientid, NFSPROC_T *p) 773 { 774 struct nfsclient *clp; 775 struct nfsclienthashhead *hp; 776 int error = 0, i, igotlock; 777 778 if (nfsrvboottime != clientid.lval[0]) { 779 error = NFSERR_STALECLIENTID; 780 goto out; 781 } 782 783 /* Lock out other nfsd threads */ 784 NFSLOCKV4ROOTMUTEX(); 785 nfsv4_relref(&nfsv4rootfs_lock); 786 do { 787 igotlock = nfsv4_lock(&nfsv4rootfs_lock, 1, NULL, 788 NFSV4ROOTLOCKMUTEXPTR, NULL); 789 } while (igotlock == 0); 790 NFSUNLOCKV4ROOTMUTEX(); 791 792 hp = NFSCLIENTHASH(clientid); 793 LIST_FOREACH(clp, hp, lc_hash) { 794 if (clp->lc_clientid.lval[1] == clientid.lval[1]) 795 break; 796 } 797 if (clp == NULL) { 798 NFSLOCKV4ROOTMUTEX(); 799 nfsv4_unlock(&nfsv4rootfs_lock, 1); 800 NFSUNLOCKV4ROOTMUTEX(); 801 /* Just return ok, since it is gone. */ 802 goto out; 803 } 804 805 /* 806 * Free up all layouts on the clientid. Should the client return the 807 * layouts? 808 */ 809 nfsrv_freelayoutlist(clientid); 810 811 /* Scan for state on the clientid. */ 812 for (i = 0; i < nfsrv_statehashsize; i++) 813 if (!LIST_EMPTY(&clp->lc_stateid[i])) { 814 NFSLOCKV4ROOTMUTEX(); 815 nfsv4_unlock(&nfsv4rootfs_lock, 1); 816 NFSUNLOCKV4ROOTMUTEX(); 817 error = NFSERR_CLIENTIDBUSY; 818 goto out; 819 } 820 if (!LIST_EMPTY(&clp->lc_session) || !LIST_EMPTY(&clp->lc_deleg)) { 821 NFSLOCKV4ROOTMUTEX(); 822 nfsv4_unlock(&nfsv4rootfs_lock, 1); 823 NFSUNLOCKV4ROOTMUTEX(); 824 error = NFSERR_CLIENTIDBUSY; 825 goto out; 826 } 827 828 /* Destroy the clientid and return ok. */ 829 nfsrv_cleanclient(clp, p); 830 nfsrv_freedeleglist(&clp->lc_deleg); 831 nfsrv_freedeleglist(&clp->lc_olddeleg); 832 LIST_REMOVE(clp, lc_hash); 833 NFSLOCKV4ROOTMUTEX(); 834 nfsv4_unlock(&nfsv4rootfs_lock, 1); 835 NFSUNLOCKV4ROOTMUTEX(); 836 nfsrv_zapclient(clp, p); 837 out: 838 NFSEXITCODE2(error, nd); 839 return (error); 840 } 841 842 /* 843 * Called from the new nfssvc syscall to admin revoke a clientid. 844 * Returns 0 for success, error otherwise. 845 */ 846 APPLESTATIC int 847 nfsrv_adminrevoke(struct nfsd_clid *revokep, NFSPROC_T *p) 848 { 849 struct nfsclient *clp = NULL; 850 int i, error = 0; 851 int gotit, igotlock; 852 853 /* 854 * First, lock out the nfsd so that state won't change while the 855 * revocation record is being written to the stable storage restart 856 * file. 857 */ 858 NFSLOCKV4ROOTMUTEX(); 859 do { 860 igotlock = nfsv4_lock(&nfsv4rootfs_lock, 1, NULL, 861 NFSV4ROOTLOCKMUTEXPTR, NULL); 862 } while (!igotlock); 863 NFSUNLOCKV4ROOTMUTEX(); 864 865 /* 866 * Search for a match in the client list. 867 */ 868 gotit = i = 0; 869 while (i < nfsrv_clienthashsize && !gotit) { 870 LIST_FOREACH(clp, &nfsclienthash[i], lc_hash) { 871 if (revokep->nclid_idlen == clp->lc_idlen && 872 !NFSBCMP(revokep->nclid_id, clp->lc_id, clp->lc_idlen)) { 873 gotit = 1; 874 break; 875 } 876 } 877 i++; 878 } 879 if (!gotit) { 880 NFSLOCKV4ROOTMUTEX(); 881 nfsv4_unlock(&nfsv4rootfs_lock, 0); 882 NFSUNLOCKV4ROOTMUTEX(); 883 error = EPERM; 884 goto out; 885 } 886 887 /* 888 * Now, write out the revocation record 889 */ 890 nfsrv_writestable(clp->lc_id, clp->lc_idlen, NFSNST_REVOKE, p); 891 nfsrv_backupstable(); 892 893 /* 894 * and clear out the state, marking the clientid revoked. 895 */ 896 clp->lc_flags &= ~LCL_CALLBACKSON; 897 clp->lc_flags |= LCL_ADMINREVOKED; 898 nfsrv_cleanclient(clp, p); 899 nfsrv_freedeleglist(&clp->lc_deleg); 900 nfsrv_freedeleglist(&clp->lc_olddeleg); 901 NFSLOCKV4ROOTMUTEX(); 902 nfsv4_unlock(&nfsv4rootfs_lock, 0); 903 NFSUNLOCKV4ROOTMUTEX(); 904 905 out: 906 NFSEXITCODE(error); 907 return (error); 908 } 909 910 /* 911 * Dump out stats for all clients. Called from nfssvc(2), that is used 912 * nfsstatsv1. 913 */ 914 APPLESTATIC void 915 nfsrv_dumpclients(struct nfsd_dumpclients *dumpp, int maxcnt) 916 { 917 struct nfsclient *clp; 918 int i = 0, cnt = 0; 919 920 /* 921 * First, get a reference on the nfsv4rootfs_lock so that an 922 * exclusive lock cannot be acquired while dumping the clients. 923 */ 924 NFSLOCKV4ROOTMUTEX(); 925 nfsv4_getref(&nfsv4rootfs_lock, NULL, NFSV4ROOTLOCKMUTEXPTR, NULL); 926 NFSUNLOCKV4ROOTMUTEX(); 927 NFSLOCKSTATE(); 928 /* 929 * Rattle through the client lists until done. 930 */ 931 while (i < nfsrv_clienthashsize && cnt < maxcnt) { 932 clp = LIST_FIRST(&nfsclienthash[i]); 933 while (clp != LIST_END(&nfsclienthash[i]) && cnt < maxcnt) { 934 nfsrv_dumpaclient(clp, &dumpp[cnt]); 935 cnt++; 936 clp = LIST_NEXT(clp, lc_hash); 937 } 938 i++; 939 } 940 if (cnt < maxcnt) 941 dumpp[cnt].ndcl_clid.nclid_idlen = 0; 942 NFSUNLOCKSTATE(); 943 NFSLOCKV4ROOTMUTEX(); 944 nfsv4_relref(&nfsv4rootfs_lock); 945 NFSUNLOCKV4ROOTMUTEX(); 946 } 947 948 /* 949 * Dump stats for a client. Must be called with the NFSSTATELOCK and spl'd. 950 */ 951 static void 952 nfsrv_dumpaclient(struct nfsclient *clp, struct nfsd_dumpclients *dumpp) 953 { 954 struct nfsstate *stp, *openstp, *lckownstp; 955 struct nfslock *lop; 956 struct sockaddr *sad; 957 struct sockaddr_in *rad; 958 struct sockaddr_in6 *rad6; 959 960 dumpp->ndcl_nopenowners = dumpp->ndcl_nlockowners = 0; 961 dumpp->ndcl_nopens = dumpp->ndcl_nlocks = 0; 962 dumpp->ndcl_ndelegs = dumpp->ndcl_nolddelegs = 0; 963 dumpp->ndcl_flags = clp->lc_flags; 964 dumpp->ndcl_clid.nclid_idlen = clp->lc_idlen; 965 NFSBCOPY(clp->lc_id, dumpp->ndcl_clid.nclid_id, clp->lc_idlen); 966 sad = NFSSOCKADDR(clp->lc_req.nr_nam, struct sockaddr *); 967 dumpp->ndcl_addrfam = sad->sa_family; 968 if (sad->sa_family == AF_INET) { 969 rad = (struct sockaddr_in *)sad; 970 dumpp->ndcl_cbaddr.sin_addr = rad->sin_addr; 971 } else { 972 rad6 = (struct sockaddr_in6 *)sad; 973 dumpp->ndcl_cbaddr.sin6_addr = rad6->sin6_addr; 974 } 975 976 /* 977 * Now, scan the state lists and total up the opens and locks. 978 */ 979 LIST_FOREACH(stp, &clp->lc_open, ls_list) { 980 dumpp->ndcl_nopenowners++; 981 LIST_FOREACH(openstp, &stp->ls_open, ls_list) { 982 dumpp->ndcl_nopens++; 983 LIST_FOREACH(lckownstp, &openstp->ls_open, ls_list) { 984 dumpp->ndcl_nlockowners++; 985 LIST_FOREACH(lop, &lckownstp->ls_lock, lo_lckowner) { 986 dumpp->ndcl_nlocks++; 987 } 988 } 989 } 990 } 991 992 /* 993 * and the delegation lists. 994 */ 995 LIST_FOREACH(stp, &clp->lc_deleg, ls_list) { 996 dumpp->ndcl_ndelegs++; 997 } 998 LIST_FOREACH(stp, &clp->lc_olddeleg, ls_list) { 999 dumpp->ndcl_nolddelegs++; 1000 } 1001 } 1002 1003 /* 1004 * Dump out lock stats for a file. 1005 */ 1006 APPLESTATIC void 1007 nfsrv_dumplocks(vnode_t vp, struct nfsd_dumplocks *ldumpp, int maxcnt, 1008 NFSPROC_T *p) 1009 { 1010 struct nfsstate *stp; 1011 struct nfslock *lop; 1012 int cnt = 0; 1013 struct nfslockfile *lfp; 1014 struct sockaddr *sad; 1015 struct sockaddr_in *rad; 1016 struct sockaddr_in6 *rad6; 1017 int ret; 1018 fhandle_t nfh; 1019 1020 ret = nfsrv_getlockfh(vp, 0, NULL, &nfh, p); 1021 /* 1022 * First, get a reference on the nfsv4rootfs_lock so that an 1023 * exclusive lock on it cannot be acquired while dumping the locks. 1024 */ 1025 NFSLOCKV4ROOTMUTEX(); 1026 nfsv4_getref(&nfsv4rootfs_lock, NULL, NFSV4ROOTLOCKMUTEXPTR, NULL); 1027 NFSUNLOCKV4ROOTMUTEX(); 1028 NFSLOCKSTATE(); 1029 if (!ret) 1030 ret = nfsrv_getlockfile(0, NULL, &lfp, &nfh, 0); 1031 if (ret) { 1032 ldumpp[0].ndlck_clid.nclid_idlen = 0; 1033 NFSUNLOCKSTATE(); 1034 NFSLOCKV4ROOTMUTEX(); 1035 nfsv4_relref(&nfsv4rootfs_lock); 1036 NFSUNLOCKV4ROOTMUTEX(); 1037 return; 1038 } 1039 1040 /* 1041 * For each open share on file, dump it out. 1042 */ 1043 stp = LIST_FIRST(&lfp->lf_open); 1044 while (stp != LIST_END(&lfp->lf_open) && cnt < maxcnt) { 1045 ldumpp[cnt].ndlck_flags = stp->ls_flags; 1046 ldumpp[cnt].ndlck_stateid.seqid = stp->ls_stateid.seqid; 1047 ldumpp[cnt].ndlck_stateid.other[0] = stp->ls_stateid.other[0]; 1048 ldumpp[cnt].ndlck_stateid.other[1] = stp->ls_stateid.other[1]; 1049 ldumpp[cnt].ndlck_stateid.other[2] = stp->ls_stateid.other[2]; 1050 ldumpp[cnt].ndlck_owner.nclid_idlen = 1051 stp->ls_openowner->ls_ownerlen; 1052 NFSBCOPY(stp->ls_openowner->ls_owner, 1053 ldumpp[cnt].ndlck_owner.nclid_id, 1054 stp->ls_openowner->ls_ownerlen); 1055 ldumpp[cnt].ndlck_clid.nclid_idlen = stp->ls_clp->lc_idlen; 1056 NFSBCOPY(stp->ls_clp->lc_id, ldumpp[cnt].ndlck_clid.nclid_id, 1057 stp->ls_clp->lc_idlen); 1058 sad=NFSSOCKADDR(stp->ls_clp->lc_req.nr_nam, struct sockaddr *); 1059 ldumpp[cnt].ndlck_addrfam = sad->sa_family; 1060 if (sad->sa_family == AF_INET) { 1061 rad = (struct sockaddr_in *)sad; 1062 ldumpp[cnt].ndlck_cbaddr.sin_addr = rad->sin_addr; 1063 } else { 1064 rad6 = (struct sockaddr_in6 *)sad; 1065 ldumpp[cnt].ndlck_cbaddr.sin6_addr = rad6->sin6_addr; 1066 } 1067 stp = LIST_NEXT(stp, ls_file); 1068 cnt++; 1069 } 1070 1071 /* 1072 * and all locks. 1073 */ 1074 lop = LIST_FIRST(&lfp->lf_lock); 1075 while (lop != LIST_END(&lfp->lf_lock) && cnt < maxcnt) { 1076 stp = lop->lo_stp; 1077 ldumpp[cnt].ndlck_flags = lop->lo_flags; 1078 ldumpp[cnt].ndlck_first = lop->lo_first; 1079 ldumpp[cnt].ndlck_end = lop->lo_end; 1080 ldumpp[cnt].ndlck_stateid.seqid = stp->ls_stateid.seqid; 1081 ldumpp[cnt].ndlck_stateid.other[0] = stp->ls_stateid.other[0]; 1082 ldumpp[cnt].ndlck_stateid.other[1] = stp->ls_stateid.other[1]; 1083 ldumpp[cnt].ndlck_stateid.other[2] = stp->ls_stateid.other[2]; 1084 ldumpp[cnt].ndlck_owner.nclid_idlen = stp->ls_ownerlen; 1085 NFSBCOPY(stp->ls_owner, ldumpp[cnt].ndlck_owner.nclid_id, 1086 stp->ls_ownerlen); 1087 ldumpp[cnt].ndlck_clid.nclid_idlen = stp->ls_clp->lc_idlen; 1088 NFSBCOPY(stp->ls_clp->lc_id, ldumpp[cnt].ndlck_clid.nclid_id, 1089 stp->ls_clp->lc_idlen); 1090 sad=NFSSOCKADDR(stp->ls_clp->lc_req.nr_nam, struct sockaddr *); 1091 ldumpp[cnt].ndlck_addrfam = sad->sa_family; 1092 if (sad->sa_family == AF_INET) { 1093 rad = (struct sockaddr_in *)sad; 1094 ldumpp[cnt].ndlck_cbaddr.sin_addr = rad->sin_addr; 1095 } else { 1096 rad6 = (struct sockaddr_in6 *)sad; 1097 ldumpp[cnt].ndlck_cbaddr.sin6_addr = rad6->sin6_addr; 1098 } 1099 lop = LIST_NEXT(lop, lo_lckfile); 1100 cnt++; 1101 } 1102 1103 /* 1104 * and the delegations. 1105 */ 1106 stp = LIST_FIRST(&lfp->lf_deleg); 1107 while (stp != LIST_END(&lfp->lf_deleg) && cnt < maxcnt) { 1108 ldumpp[cnt].ndlck_flags = stp->ls_flags; 1109 ldumpp[cnt].ndlck_stateid.seqid = stp->ls_stateid.seqid; 1110 ldumpp[cnt].ndlck_stateid.other[0] = stp->ls_stateid.other[0]; 1111 ldumpp[cnt].ndlck_stateid.other[1] = stp->ls_stateid.other[1]; 1112 ldumpp[cnt].ndlck_stateid.other[2] = stp->ls_stateid.other[2]; 1113 ldumpp[cnt].ndlck_owner.nclid_idlen = 0; 1114 ldumpp[cnt].ndlck_clid.nclid_idlen = stp->ls_clp->lc_idlen; 1115 NFSBCOPY(stp->ls_clp->lc_id, ldumpp[cnt].ndlck_clid.nclid_id, 1116 stp->ls_clp->lc_idlen); 1117 sad=NFSSOCKADDR(stp->ls_clp->lc_req.nr_nam, struct sockaddr *); 1118 ldumpp[cnt].ndlck_addrfam = sad->sa_family; 1119 if (sad->sa_family == AF_INET) { 1120 rad = (struct sockaddr_in *)sad; 1121 ldumpp[cnt].ndlck_cbaddr.sin_addr = rad->sin_addr; 1122 } else { 1123 rad6 = (struct sockaddr_in6 *)sad; 1124 ldumpp[cnt].ndlck_cbaddr.sin6_addr = rad6->sin6_addr; 1125 } 1126 stp = LIST_NEXT(stp, ls_file); 1127 cnt++; 1128 } 1129 1130 /* 1131 * If list isn't full, mark end of list by setting the client name 1132 * to zero length. 1133 */ 1134 if (cnt < maxcnt) 1135 ldumpp[cnt].ndlck_clid.nclid_idlen = 0; 1136 NFSUNLOCKSTATE(); 1137 NFSLOCKV4ROOTMUTEX(); 1138 nfsv4_relref(&nfsv4rootfs_lock); 1139 NFSUNLOCKV4ROOTMUTEX(); 1140 } 1141 1142 /* 1143 * Server timer routine. It can scan any linked list, so long 1144 * as it holds the spin/mutex lock and there is no exclusive lock on 1145 * nfsv4rootfs_lock. 1146 * (For OpenBSD, a kthread is ok. For FreeBSD, I think it is ok 1147 * to do this from a callout, since the spin locks work. For 1148 * Darwin, I'm not sure what will work correctly yet.) 1149 * Should be called once per second. 1150 */ 1151 APPLESTATIC void 1152 nfsrv_servertimer(void) 1153 { 1154 struct nfsclient *clp, *nclp; 1155 struct nfsstate *stp, *nstp; 1156 int got_ref, i; 1157 1158 /* 1159 * Make sure nfsboottime is set. This is used by V3 as well 1160 * as V4. Note that nfsboottime is not nfsrvboottime, which is 1161 * only used by the V4 server for leases. 1162 */ 1163 if (nfsboottime.tv_sec == 0) 1164 NFSSETBOOTTIME(nfsboottime); 1165 1166 /* 1167 * If server hasn't started yet, just return. 1168 */ 1169 NFSLOCKSTATE(); 1170 if (nfsrv_stablefirst.nsf_eograce == 0) { 1171 NFSUNLOCKSTATE(); 1172 return; 1173 } 1174 if (!(nfsrv_stablefirst.nsf_flags & NFSNSF_UPDATEDONE)) { 1175 if (!(nfsrv_stablefirst.nsf_flags & NFSNSF_GRACEOVER) && 1176 NFSD_MONOSEC > nfsrv_stablefirst.nsf_eograce) 1177 nfsrv_stablefirst.nsf_flags |= 1178 (NFSNSF_GRACEOVER | NFSNSF_NEEDLOCK); 1179 NFSUNLOCKSTATE(); 1180 return; 1181 } 1182 1183 /* 1184 * Try and get a reference count on the nfsv4rootfs_lock so that 1185 * no nfsd thread can acquire an exclusive lock on it before this 1186 * call is done. If it is already exclusively locked, just return. 1187 */ 1188 NFSLOCKV4ROOTMUTEX(); 1189 got_ref = nfsv4_getref_nonblock(&nfsv4rootfs_lock); 1190 NFSUNLOCKV4ROOTMUTEX(); 1191 if (got_ref == 0) { 1192 NFSUNLOCKSTATE(); 1193 return; 1194 } 1195 1196 /* 1197 * For each client... 1198 */ 1199 for (i = 0; i < nfsrv_clienthashsize; i++) { 1200 clp = LIST_FIRST(&nfsclienthash[i]); 1201 while (clp != LIST_END(&nfsclienthash[i])) { 1202 nclp = LIST_NEXT(clp, lc_hash); 1203 if (!(clp->lc_flags & LCL_EXPIREIT)) { 1204 if (((clp->lc_expiry + NFSRV_STALELEASE) < NFSD_MONOSEC 1205 && ((LIST_EMPTY(&clp->lc_deleg) 1206 && LIST_EMPTY(&clp->lc_open)) || 1207 nfsrv_clients > nfsrv_clienthighwater)) || 1208 (clp->lc_expiry + NFSRV_MOULDYLEASE) < NFSD_MONOSEC || 1209 (clp->lc_expiry < NFSD_MONOSEC && 1210 (nfsrv_openpluslock * 10 / 9) > nfsrv_v4statelimit)) { 1211 /* 1212 * Lease has expired several nfsrv_lease times ago: 1213 * PLUS 1214 * - no state is associated with it 1215 * OR 1216 * - above high water mark for number of clients 1217 * (nfsrv_clienthighwater should be large enough 1218 * that this only occurs when clients fail to 1219 * use the same nfs_client_id4.id. Maybe somewhat 1220 * higher that the maximum number of clients that 1221 * will mount this server?) 1222 * OR 1223 * Lease has expired a very long time ago 1224 * OR 1225 * Lease has expired PLUS the number of opens + locks 1226 * has exceeded 90% of capacity 1227 * 1228 * --> Mark for expiry. The actual expiry will be done 1229 * by an nfsd sometime soon. 1230 */ 1231 clp->lc_flags |= LCL_EXPIREIT; 1232 nfsrv_stablefirst.nsf_flags |= 1233 (NFSNSF_NEEDLOCK | NFSNSF_EXPIREDCLIENT); 1234 } else { 1235 /* 1236 * If there are no opens, increment no open tick cnt 1237 * If time exceeds NFSNOOPEN, mark it to be thrown away 1238 * otherwise, if there is an open, reset no open time 1239 * Hopefully, this will avoid excessive re-creation 1240 * of open owners and subsequent open confirms. 1241 */ 1242 stp = LIST_FIRST(&clp->lc_open); 1243 while (stp != LIST_END(&clp->lc_open)) { 1244 nstp = LIST_NEXT(stp, ls_list); 1245 if (LIST_EMPTY(&stp->ls_open)) { 1246 stp->ls_noopens++; 1247 if (stp->ls_noopens > NFSNOOPEN || 1248 (nfsrv_openpluslock * 2) > 1249 nfsrv_v4statelimit) 1250 nfsrv_stablefirst.nsf_flags |= 1251 NFSNSF_NOOPENS; 1252 } else { 1253 stp->ls_noopens = 0; 1254 } 1255 stp = nstp; 1256 } 1257 } 1258 } 1259 clp = nclp; 1260 } 1261 } 1262 NFSUNLOCKSTATE(); 1263 NFSLOCKV4ROOTMUTEX(); 1264 nfsv4_relref(&nfsv4rootfs_lock); 1265 NFSUNLOCKV4ROOTMUTEX(); 1266 } 1267 1268 /* 1269 * The following set of functions free up the various data structures. 1270 */ 1271 /* 1272 * Clear out all open/lock state related to this nfsclient. 1273 * Caller must hold an exclusive lock on nfsv4rootfs_lock, so that 1274 * there are no other active nfsd threads. 1275 */ 1276 APPLESTATIC void 1277 nfsrv_cleanclient(struct nfsclient *clp, NFSPROC_T *p) 1278 { 1279 struct nfsstate *stp, *nstp; 1280 struct nfsdsession *sep, *nsep; 1281 1282 LIST_FOREACH_SAFE(stp, &clp->lc_open, ls_list, nstp) 1283 nfsrv_freeopenowner(stp, 1, p); 1284 if ((clp->lc_flags & LCL_ADMINREVOKED) == 0) 1285 LIST_FOREACH_SAFE(sep, &clp->lc_session, sess_list, nsep) 1286 (void)nfsrv_freesession(sep, NULL); 1287 } 1288 1289 /* 1290 * Free a client that has been cleaned. It should also already have been 1291 * removed from the lists. 1292 * (Just to be safe w.r.t. newnfs_disconnect(), call this function when 1293 * softclock interrupts are enabled.) 1294 */ 1295 APPLESTATIC void 1296 nfsrv_zapclient(struct nfsclient *clp, NFSPROC_T *p) 1297 { 1298 1299 #ifdef notyet 1300 if ((clp->lc_flags & (LCL_GSS | LCL_CALLBACKSON)) == 1301 (LCL_GSS | LCL_CALLBACKSON) && 1302 (clp->lc_hand.nfsh_flag & NFSG_COMPLETE) && 1303 clp->lc_handlelen > 0) { 1304 clp->lc_hand.nfsh_flag &= ~NFSG_COMPLETE; 1305 clp->lc_hand.nfsh_flag |= NFSG_DESTROYED; 1306 (void) nfsrv_docallback(clp, NFSV4PROC_CBNULL, 1307 NULL, 0, NULL, NULL, NULL, 0, p); 1308 } 1309 #endif 1310 newnfs_disconnect(&clp->lc_req); 1311 free(clp->lc_req.nr_nam, M_SONAME); 1312 NFSFREEMUTEX(&clp->lc_req.nr_mtx); 1313 free(clp->lc_stateid, M_NFSDCLIENT); 1314 free(clp, M_NFSDCLIENT); 1315 NFSLOCKSTATE(); 1316 nfsstatsv1.srvclients--; 1317 nfsrv_openpluslock--; 1318 nfsrv_clients--; 1319 NFSUNLOCKSTATE(); 1320 } 1321 1322 /* 1323 * Free a list of delegation state structures. 1324 * (This function will also free all nfslockfile structures that no 1325 * longer have associated state.) 1326 */ 1327 APPLESTATIC void 1328 nfsrv_freedeleglist(struct nfsstatehead *sthp) 1329 { 1330 struct nfsstate *stp, *nstp; 1331 1332 LIST_FOREACH_SAFE(stp, sthp, ls_list, nstp) { 1333 nfsrv_freedeleg(stp); 1334 } 1335 LIST_INIT(sthp); 1336 } 1337 1338 /* 1339 * Free up a delegation. 1340 */ 1341 static void 1342 nfsrv_freedeleg(struct nfsstate *stp) 1343 { 1344 struct nfslockfile *lfp; 1345 1346 LIST_REMOVE(stp, ls_hash); 1347 LIST_REMOVE(stp, ls_list); 1348 LIST_REMOVE(stp, ls_file); 1349 if ((stp->ls_flags & NFSLCK_DELEGWRITE) != 0) 1350 nfsrv_writedelegcnt--; 1351 lfp = stp->ls_lfp; 1352 if (LIST_EMPTY(&lfp->lf_open) && 1353 LIST_EMPTY(&lfp->lf_lock) && LIST_EMPTY(&lfp->lf_deleg) && 1354 LIST_EMPTY(&lfp->lf_locallock) && LIST_EMPTY(&lfp->lf_rollback) && 1355 lfp->lf_usecount == 0 && 1356 nfsv4_testlock(&lfp->lf_locallock_lck) == 0) 1357 nfsrv_freenfslockfile(lfp); 1358 free(stp, M_NFSDSTATE); 1359 nfsstatsv1.srvdelegates--; 1360 nfsrv_openpluslock--; 1361 nfsrv_delegatecnt--; 1362 } 1363 1364 /* 1365 * This function frees an open owner and all associated opens. 1366 */ 1367 static void 1368 nfsrv_freeopenowner(struct nfsstate *stp, int cansleep, NFSPROC_T *p) 1369 { 1370 struct nfsstate *nstp, *tstp; 1371 1372 LIST_REMOVE(stp, ls_list); 1373 /* 1374 * Now, free all associated opens. 1375 */ 1376 nstp = LIST_FIRST(&stp->ls_open); 1377 while (nstp != LIST_END(&stp->ls_open)) { 1378 tstp = nstp; 1379 nstp = LIST_NEXT(nstp, ls_list); 1380 (void) nfsrv_freeopen(tstp, NULL, cansleep, p); 1381 } 1382 if (stp->ls_op) 1383 nfsrvd_derefcache(stp->ls_op); 1384 free(stp, M_NFSDSTATE); 1385 nfsstatsv1.srvopenowners--; 1386 nfsrv_openpluslock--; 1387 } 1388 1389 /* 1390 * This function frees an open (nfsstate open structure) with all associated 1391 * lock_owners and locks. It also frees the nfslockfile structure iff there 1392 * are no other opens on the file. 1393 * Returns 1 if it free'd the nfslockfile, 0 otherwise. 1394 */ 1395 static int 1396 nfsrv_freeopen(struct nfsstate *stp, vnode_t vp, int cansleep, NFSPROC_T *p) 1397 { 1398 struct nfsstate *nstp, *tstp; 1399 struct nfslockfile *lfp; 1400 int ret; 1401 1402 LIST_REMOVE(stp, ls_hash); 1403 LIST_REMOVE(stp, ls_list); 1404 LIST_REMOVE(stp, ls_file); 1405 1406 lfp = stp->ls_lfp; 1407 /* 1408 * Now, free all lockowners associated with this open. 1409 */ 1410 LIST_FOREACH_SAFE(tstp, &stp->ls_open, ls_list, nstp) 1411 nfsrv_freelockowner(tstp, vp, cansleep, p); 1412 1413 /* 1414 * The nfslockfile is freed here if there are no locks 1415 * associated with the open. 1416 * If there are locks associated with the open, the 1417 * nfslockfile structure can be freed via nfsrv_freelockowner(). 1418 * Acquire the state mutex to avoid races with calls to 1419 * nfsrv_getlockfile(). 1420 */ 1421 if (cansleep != 0) 1422 NFSLOCKSTATE(); 1423 if (lfp != NULL && LIST_EMPTY(&lfp->lf_open) && 1424 LIST_EMPTY(&lfp->lf_deleg) && LIST_EMPTY(&lfp->lf_lock) && 1425 LIST_EMPTY(&lfp->lf_locallock) && LIST_EMPTY(&lfp->lf_rollback) && 1426 lfp->lf_usecount == 0 && 1427 (cansleep != 0 || nfsv4_testlock(&lfp->lf_locallock_lck) == 0)) { 1428 nfsrv_freenfslockfile(lfp); 1429 ret = 1; 1430 } else 1431 ret = 0; 1432 if (cansleep != 0) 1433 NFSUNLOCKSTATE(); 1434 free(stp, M_NFSDSTATE); 1435 nfsstatsv1.srvopens--; 1436 nfsrv_openpluslock--; 1437 return (ret); 1438 } 1439 1440 /* 1441 * Frees a lockowner and all associated locks. 1442 */ 1443 static void 1444 nfsrv_freelockowner(struct nfsstate *stp, vnode_t vp, int cansleep, 1445 NFSPROC_T *p) 1446 { 1447 1448 LIST_REMOVE(stp, ls_hash); 1449 LIST_REMOVE(stp, ls_list); 1450 nfsrv_freeallnfslocks(stp, vp, cansleep, p); 1451 if (stp->ls_op) 1452 nfsrvd_derefcache(stp->ls_op); 1453 free(stp, M_NFSDSTATE); 1454 nfsstatsv1.srvlockowners--; 1455 nfsrv_openpluslock--; 1456 } 1457 1458 /* 1459 * Free all the nfs locks on a lockowner. 1460 */ 1461 static void 1462 nfsrv_freeallnfslocks(struct nfsstate *stp, vnode_t vp, int cansleep, 1463 NFSPROC_T *p) 1464 { 1465 struct nfslock *lop, *nlop; 1466 struct nfsrollback *rlp, *nrlp; 1467 struct nfslockfile *lfp = NULL; 1468 int gottvp = 0; 1469 vnode_t tvp = NULL; 1470 uint64_t first, end; 1471 1472 if (vp != NULL) 1473 ASSERT_VOP_UNLOCKED(vp, "nfsrv_freeallnfslocks: vnode locked"); 1474 lop = LIST_FIRST(&stp->ls_lock); 1475 while (lop != LIST_END(&stp->ls_lock)) { 1476 nlop = LIST_NEXT(lop, lo_lckowner); 1477 /* 1478 * Since all locks should be for the same file, lfp should 1479 * not change. 1480 */ 1481 if (lfp == NULL) 1482 lfp = lop->lo_lfp; 1483 else if (lfp != lop->lo_lfp) 1484 panic("allnfslocks"); 1485 /* 1486 * If vp is NULL and cansleep != 0, a vnode must be acquired 1487 * from the file handle. This only occurs when called from 1488 * nfsrv_cleanclient(). 1489 */ 1490 if (gottvp == 0) { 1491 if (nfsrv_dolocallocks == 0) 1492 tvp = NULL; 1493 else if (vp == NULL && cansleep != 0) { 1494 tvp = nfsvno_getvp(&lfp->lf_fh); 1495 NFSVOPUNLOCK(tvp, 0); 1496 } else 1497 tvp = vp; 1498 gottvp = 1; 1499 } 1500 1501 if (tvp != NULL) { 1502 if (cansleep == 0) 1503 panic("allnfs2"); 1504 first = lop->lo_first; 1505 end = lop->lo_end; 1506 nfsrv_freenfslock(lop); 1507 nfsrv_localunlock(tvp, lfp, first, end, p); 1508 LIST_FOREACH_SAFE(rlp, &lfp->lf_rollback, rlck_list, 1509 nrlp) 1510 free(rlp, M_NFSDROLLBACK); 1511 LIST_INIT(&lfp->lf_rollback); 1512 } else 1513 nfsrv_freenfslock(lop); 1514 lop = nlop; 1515 } 1516 if (vp == NULL && tvp != NULL) 1517 vrele(tvp); 1518 } 1519 1520 /* 1521 * Free an nfslock structure. 1522 */ 1523 static void 1524 nfsrv_freenfslock(struct nfslock *lop) 1525 { 1526 1527 if (lop->lo_lckfile.le_prev != NULL) { 1528 LIST_REMOVE(lop, lo_lckfile); 1529 nfsstatsv1.srvlocks--; 1530 nfsrv_openpluslock--; 1531 } 1532 LIST_REMOVE(lop, lo_lckowner); 1533 free(lop, M_NFSDLOCK); 1534 } 1535 1536 /* 1537 * This function frees an nfslockfile structure. 1538 */ 1539 static void 1540 nfsrv_freenfslockfile(struct nfslockfile *lfp) 1541 { 1542 1543 LIST_REMOVE(lfp, lf_hash); 1544 free(lfp, M_NFSDLOCKFILE); 1545 } 1546 1547 /* 1548 * This function looks up an nfsstate structure via stateid. 1549 */ 1550 static int 1551 nfsrv_getstate(struct nfsclient *clp, nfsv4stateid_t *stateidp, __unused u_int32_t flags, 1552 struct nfsstate **stpp) 1553 { 1554 struct nfsstate *stp; 1555 struct nfsstatehead *hp; 1556 int error = 0; 1557 1558 *stpp = NULL; 1559 hp = NFSSTATEHASH(clp, *stateidp); 1560 LIST_FOREACH(stp, hp, ls_hash) { 1561 if (!NFSBCMP(stp->ls_stateid.other, stateidp->other, 1562 NFSX_STATEIDOTHER)) 1563 break; 1564 } 1565 1566 /* 1567 * If no state id in list, return NFSERR_BADSTATEID. 1568 */ 1569 if (stp == LIST_END(hp)) { 1570 error = NFSERR_BADSTATEID; 1571 goto out; 1572 } 1573 *stpp = stp; 1574 1575 out: 1576 NFSEXITCODE(error); 1577 return (error); 1578 } 1579 1580 /* 1581 * This function gets an nfsstate structure via owner string. 1582 */ 1583 static void 1584 nfsrv_getowner(struct nfsstatehead *hp, struct nfsstate *new_stp, 1585 struct nfsstate **stpp) 1586 { 1587 struct nfsstate *stp; 1588 1589 *stpp = NULL; 1590 LIST_FOREACH(stp, hp, ls_list) { 1591 if (new_stp->ls_ownerlen == stp->ls_ownerlen && 1592 !NFSBCMP(new_stp->ls_owner,stp->ls_owner,stp->ls_ownerlen)) { 1593 *stpp = stp; 1594 return; 1595 } 1596 } 1597 } 1598 1599 /* 1600 * Lock control function called to update lock status. 1601 * Returns 0 upon success, -1 if there is no lock and the flags indicate 1602 * that one isn't to be created and an NFSERR_xxx for other errors. 1603 * The structures new_stp and new_lop are passed in as pointers that should 1604 * be set to NULL if the structure is used and shouldn't be free'd. 1605 * For the NFSLCK_TEST and NFSLCK_CHECK cases, the structures are 1606 * never used and can safely be allocated on the stack. For all other 1607 * cases, *new_stpp and *new_lopp should be malloc'd before the call, 1608 * in case they are used. 1609 */ 1610 APPLESTATIC int 1611 nfsrv_lockctrl(vnode_t vp, struct nfsstate **new_stpp, 1612 struct nfslock **new_lopp, struct nfslockconflict *cfp, 1613 nfsquad_t clientid, nfsv4stateid_t *stateidp, 1614 __unused struct nfsexstuff *exp, 1615 struct nfsrv_descript *nd, NFSPROC_T *p) 1616 { 1617 struct nfslock *lop; 1618 struct nfsstate *new_stp = *new_stpp; 1619 struct nfslock *new_lop = *new_lopp; 1620 struct nfsstate *tstp, *mystp, *nstp; 1621 int specialid = 0; 1622 struct nfslockfile *lfp; 1623 struct nfslock *other_lop = NULL; 1624 struct nfsstate *stp, *lckstp = NULL; 1625 struct nfsclient *clp = NULL; 1626 u_int32_t bits; 1627 int error = 0, haslock = 0, ret, reterr; 1628 int getlckret, delegation = 0, filestruct_locked, vnode_unlocked = 0; 1629 fhandle_t nfh; 1630 uint64_t first, end; 1631 uint32_t lock_flags; 1632 1633 if (new_stp->ls_flags & (NFSLCK_CHECK | NFSLCK_SETATTR)) { 1634 /* 1635 * Note the special cases of "all 1s" or "all 0s" stateids and 1636 * let reads with all 1s go ahead. 1637 */ 1638 if (new_stp->ls_stateid.seqid == 0x0 && 1639 new_stp->ls_stateid.other[0] == 0x0 && 1640 new_stp->ls_stateid.other[1] == 0x0 && 1641 new_stp->ls_stateid.other[2] == 0x0) 1642 specialid = 1; 1643 else if (new_stp->ls_stateid.seqid == 0xffffffff && 1644 new_stp->ls_stateid.other[0] == 0xffffffff && 1645 new_stp->ls_stateid.other[1] == 0xffffffff && 1646 new_stp->ls_stateid.other[2] == 0xffffffff) 1647 specialid = 2; 1648 } 1649 1650 /* 1651 * Check for restart conditions (client and server). 1652 */ 1653 error = nfsrv_checkrestart(clientid, new_stp->ls_flags, 1654 &new_stp->ls_stateid, specialid); 1655 if (error) 1656 goto out; 1657 1658 /* 1659 * Check for state resource limit exceeded. 1660 */ 1661 if ((new_stp->ls_flags & NFSLCK_LOCK) && 1662 nfsrv_openpluslock > nfsrv_v4statelimit) { 1663 error = NFSERR_RESOURCE; 1664 goto out; 1665 } 1666 1667 /* 1668 * For the lock case, get another nfslock structure, 1669 * just in case we need it. 1670 * Malloc now, before we start sifting through the linked lists, 1671 * in case we have to wait for memory. 1672 */ 1673 tryagain: 1674 if (new_stp->ls_flags & NFSLCK_LOCK) 1675 other_lop = malloc(sizeof (struct nfslock), 1676 M_NFSDLOCK, M_WAITOK); 1677 filestruct_locked = 0; 1678 reterr = 0; 1679 lfp = NULL; 1680 1681 /* 1682 * Get the lockfile structure for CFH now, so we can do a sanity 1683 * check against the stateid, before incrementing the seqid#, since 1684 * we want to return NFSERR_BADSTATEID on failure and the seqid# 1685 * shouldn't be incremented for this case. 1686 * If nfsrv_getlockfile() returns -1, it means "not found", which 1687 * will be handled later. 1688 * If we are doing Lock/LockU and local locking is enabled, sleep 1689 * lock the nfslockfile structure. 1690 */ 1691 getlckret = nfsrv_getlockfh(vp, new_stp->ls_flags, NULL, &nfh, p); 1692 NFSLOCKSTATE(); 1693 if (getlckret == 0) { 1694 if ((new_stp->ls_flags & (NFSLCK_LOCK | NFSLCK_UNLOCK)) != 0 && 1695 nfsrv_dolocallocks != 0 && nd->nd_repstat == 0) { 1696 getlckret = nfsrv_getlockfile(new_stp->ls_flags, NULL, 1697 &lfp, &nfh, 1); 1698 if (getlckret == 0) 1699 filestruct_locked = 1; 1700 } else 1701 getlckret = nfsrv_getlockfile(new_stp->ls_flags, NULL, 1702 &lfp, &nfh, 0); 1703 } 1704 if (getlckret != 0 && getlckret != -1) 1705 reterr = getlckret; 1706 1707 if (filestruct_locked != 0) { 1708 LIST_INIT(&lfp->lf_rollback); 1709 if ((new_stp->ls_flags & NFSLCK_LOCK)) { 1710 /* 1711 * For local locking, do the advisory locking now, so 1712 * that any conflict can be detected. A failure later 1713 * can be rolled back locally. If an error is returned, 1714 * struct nfslockfile has been unlocked and any local 1715 * locking rolled back. 1716 */ 1717 NFSUNLOCKSTATE(); 1718 if (vnode_unlocked == 0) { 1719 ASSERT_VOP_ELOCKED(vp, "nfsrv_lockctrl1"); 1720 vnode_unlocked = 1; 1721 NFSVOPUNLOCK(vp, 0); 1722 } 1723 reterr = nfsrv_locallock(vp, lfp, 1724 (new_lop->lo_flags & (NFSLCK_READ | NFSLCK_WRITE)), 1725 new_lop->lo_first, new_lop->lo_end, cfp, p); 1726 NFSLOCKSTATE(); 1727 } 1728 } 1729 1730 if (specialid == 0) { 1731 if (new_stp->ls_flags & NFSLCK_TEST) { 1732 /* 1733 * RFC 3530 does not list LockT as an op that renews a 1734 * lease, but the consensus seems to be that it is ok 1735 * for a server to do so. 1736 */ 1737 error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp, NULL, 1738 (nfsquad_t)((u_quad_t)0), 0, nd, p); 1739 1740 /* 1741 * Since NFSERR_EXPIRED, NFSERR_ADMINREVOKED are not valid 1742 * error returns for LockT, just go ahead and test for a lock, 1743 * since there are no locks for this client, but other locks 1744 * can conflict. (ie. same client will always be false) 1745 */ 1746 if (error == NFSERR_EXPIRED || error == NFSERR_ADMINREVOKED) 1747 error = 0; 1748 lckstp = new_stp; 1749 } else { 1750 error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp, NULL, 1751 (nfsquad_t)((u_quad_t)0), 0, nd, p); 1752 if (error == 0) 1753 /* 1754 * Look up the stateid 1755 */ 1756 error = nfsrv_getstate(clp, &new_stp->ls_stateid, 1757 new_stp->ls_flags, &stp); 1758 /* 1759 * do some sanity checks for an unconfirmed open or a 1760 * stateid that refers to the wrong file, for an open stateid 1761 */ 1762 if (error == 0 && (stp->ls_flags & NFSLCK_OPEN) && 1763 ((stp->ls_openowner->ls_flags & NFSLCK_NEEDSCONFIRM) || 1764 (getlckret == 0 && stp->ls_lfp != lfp))){ 1765 /* 1766 * NFSLCK_SETATTR should return OK rather than NFSERR_BADSTATEID 1767 * The only exception is using SETATTR with SIZE. 1768 * */ 1769 if ((new_stp->ls_flags & 1770 (NFSLCK_SETATTR | NFSLCK_CHECK)) != NFSLCK_SETATTR) 1771 error = NFSERR_BADSTATEID; 1772 } 1773 1774 if (error == 0 && 1775 (stp->ls_flags & (NFSLCK_DELEGREAD | NFSLCK_DELEGWRITE)) && 1776 getlckret == 0 && stp->ls_lfp != lfp) 1777 error = NFSERR_BADSTATEID; 1778 1779 /* 1780 * If the lockowner stateid doesn't refer to the same file, 1781 * I believe that is considered ok, since some clients will 1782 * only create a single lockowner and use that for all locks 1783 * on all files. 1784 * For now, log it as a diagnostic, instead of considering it 1785 * a BadStateid. 1786 */ 1787 if (error == 0 && (stp->ls_flags & 1788 (NFSLCK_OPEN | NFSLCK_DELEGREAD | NFSLCK_DELEGWRITE)) == 0 && 1789 getlckret == 0 && stp->ls_lfp != lfp) { 1790 #ifdef DIAGNOSTIC 1791 printf("Got a lock statid for different file open\n"); 1792 #endif 1793 /* 1794 error = NFSERR_BADSTATEID; 1795 */ 1796 } 1797 1798 if (error == 0) { 1799 if (new_stp->ls_flags & NFSLCK_OPENTOLOCK) { 1800 /* 1801 * If haslock set, we've already checked the seqid. 1802 */ 1803 if (!haslock) { 1804 if (stp->ls_flags & NFSLCK_OPEN) 1805 error = nfsrv_checkseqid(nd, new_stp->ls_seq, 1806 stp->ls_openowner, new_stp->ls_op); 1807 else 1808 error = NFSERR_BADSTATEID; 1809 } 1810 if (!error) 1811 nfsrv_getowner(&stp->ls_open, new_stp, &lckstp); 1812 if (lckstp) 1813 /* 1814 * I believe this should be an error, but it 1815 * isn't obvious what NFSERR_xxx would be 1816 * appropriate, so I'll use NFSERR_INVAL for now. 1817 */ 1818 error = NFSERR_INVAL; 1819 else 1820 lckstp = new_stp; 1821 } else if (new_stp->ls_flags&(NFSLCK_LOCK|NFSLCK_UNLOCK)) { 1822 /* 1823 * If haslock set, ditto above. 1824 */ 1825 if (!haslock) { 1826 if (stp->ls_flags & NFSLCK_OPEN) 1827 error = NFSERR_BADSTATEID; 1828 else 1829 error = nfsrv_checkseqid(nd, new_stp->ls_seq, 1830 stp, new_stp->ls_op); 1831 } 1832 lckstp = stp; 1833 } else { 1834 lckstp = stp; 1835 } 1836 } 1837 /* 1838 * If the seqid part of the stateid isn't the same, return 1839 * NFSERR_OLDSTATEID for cases other than I/O Ops. 1840 * For I/O Ops, only return NFSERR_OLDSTATEID if 1841 * nfsrv_returnoldstateid is set. (The consensus on the email 1842 * list was that most clients would prefer to not receive 1843 * NFSERR_OLDSTATEID for I/O Ops, but the RFC suggests that that 1844 * is what will happen, so I use the nfsrv_returnoldstateid to 1845 * allow for either server configuration.) 1846 */ 1847 if (!error && stp->ls_stateid.seqid!=new_stp->ls_stateid.seqid && 1848 (((nd->nd_flag & ND_NFSV41) == 0 && 1849 (!(new_stp->ls_flags & NFSLCK_CHECK) || 1850 nfsrv_returnoldstateid)) || 1851 ((nd->nd_flag & ND_NFSV41) != 0 && 1852 new_stp->ls_stateid.seqid != 0))) 1853 error = NFSERR_OLDSTATEID; 1854 } 1855 } 1856 1857 /* 1858 * Now we can check for grace. 1859 */ 1860 if (!error) 1861 error = nfsrv_checkgrace(nd, clp, new_stp->ls_flags); 1862 if ((new_stp->ls_flags & NFSLCK_RECLAIM) && !error && 1863 nfsrv_checkstable(clp)) 1864 error = NFSERR_NOGRACE; 1865 /* 1866 * If we successfully Reclaimed state, note that. 1867 */ 1868 if ((new_stp->ls_flags & NFSLCK_RECLAIM) && !error) 1869 nfsrv_markstable(clp); 1870 1871 /* 1872 * At this point, either error == NFSERR_BADSTATEID or the 1873 * seqid# has been updated, so we can return any error. 1874 * If error == 0, there may be an error in: 1875 * nd_repstat - Set by the calling function. 1876 * reterr - Set above, if getting the nfslockfile structure 1877 * or acquiring the local lock failed. 1878 * (If both of these are set, nd_repstat should probably be 1879 * returned, since that error was detected before this 1880 * function call.) 1881 */ 1882 if (error != 0 || nd->nd_repstat != 0 || reterr != 0) { 1883 if (error == 0) { 1884 if (nd->nd_repstat != 0) 1885 error = nd->nd_repstat; 1886 else 1887 error = reterr; 1888 } 1889 if (filestruct_locked != 0) { 1890 /* Roll back local locks. */ 1891 NFSUNLOCKSTATE(); 1892 if (vnode_unlocked == 0) { 1893 ASSERT_VOP_ELOCKED(vp, "nfsrv_lockctrl2"); 1894 vnode_unlocked = 1; 1895 NFSVOPUNLOCK(vp, 0); 1896 } 1897 nfsrv_locallock_rollback(vp, lfp, p); 1898 NFSLOCKSTATE(); 1899 nfsrv_unlocklf(lfp); 1900 } 1901 NFSUNLOCKSTATE(); 1902 goto out; 1903 } 1904 1905 /* 1906 * Check the nfsrv_getlockfile return. 1907 * Returned -1 if no structure found. 1908 */ 1909 if (getlckret == -1) { 1910 error = NFSERR_EXPIRED; 1911 /* 1912 * Called from lockt, so no lock is OK. 1913 */ 1914 if (new_stp->ls_flags & NFSLCK_TEST) { 1915 error = 0; 1916 } else if (new_stp->ls_flags & 1917 (NFSLCK_CHECK | NFSLCK_SETATTR)) { 1918 /* 1919 * Called to check for a lock, OK if the stateid is all 1920 * 1s or all 0s, but there should be an nfsstate 1921 * otherwise. 1922 * (ie. If there is no open, I'll assume no share 1923 * deny bits.) 1924 */ 1925 if (specialid) 1926 error = 0; 1927 else 1928 error = NFSERR_BADSTATEID; 1929 } 1930 NFSUNLOCKSTATE(); 1931 goto out; 1932 } 1933 1934 /* 1935 * For NFSLCK_CHECK and NFSLCK_LOCK, test for a share conflict. 1936 * For NFSLCK_CHECK, allow a read if write access is granted, 1937 * but check for a deny. For NFSLCK_LOCK, require correct access, 1938 * which implies a conflicting deny can't exist. 1939 */ 1940 if (new_stp->ls_flags & (NFSLCK_CHECK | NFSLCK_LOCK)) { 1941 /* 1942 * Four kinds of state id: 1943 * - specialid (all 0s or all 1s), only for NFSLCK_CHECK 1944 * - stateid for an open 1945 * - stateid for a delegation 1946 * - stateid for a lock owner 1947 */ 1948 if (!specialid) { 1949 if (stp->ls_flags & (NFSLCK_DELEGREAD | NFSLCK_DELEGWRITE)) { 1950 delegation = 1; 1951 mystp = stp; 1952 nfsrv_delaydelegtimeout(stp); 1953 } else if (stp->ls_flags & NFSLCK_OPEN) { 1954 mystp = stp; 1955 } else { 1956 mystp = stp->ls_openstp; 1957 } 1958 /* 1959 * If locking or checking, require correct access 1960 * bit set. 1961 */ 1962 if (((new_stp->ls_flags & NFSLCK_LOCK) && 1963 !((new_lop->lo_flags >> NFSLCK_LOCKSHIFT) & 1964 mystp->ls_flags & NFSLCK_ACCESSBITS)) || 1965 ((new_stp->ls_flags & (NFSLCK_CHECK|NFSLCK_READACCESS)) == 1966 (NFSLCK_CHECK | NFSLCK_READACCESS) && 1967 !(mystp->ls_flags & NFSLCK_READACCESS) && 1968 nfsrv_allowreadforwriteopen == 0) || 1969 ((new_stp->ls_flags & (NFSLCK_CHECK|NFSLCK_WRITEACCESS)) == 1970 (NFSLCK_CHECK | NFSLCK_WRITEACCESS) && 1971 !(mystp->ls_flags & NFSLCK_WRITEACCESS))) { 1972 if (filestruct_locked != 0) { 1973 /* Roll back local locks. */ 1974 NFSUNLOCKSTATE(); 1975 if (vnode_unlocked == 0) { 1976 ASSERT_VOP_ELOCKED(vp, 1977 "nfsrv_lockctrl3"); 1978 vnode_unlocked = 1; 1979 NFSVOPUNLOCK(vp, 0); 1980 } 1981 nfsrv_locallock_rollback(vp, lfp, p); 1982 NFSLOCKSTATE(); 1983 nfsrv_unlocklf(lfp); 1984 } 1985 NFSUNLOCKSTATE(); 1986 error = NFSERR_OPENMODE; 1987 goto out; 1988 } 1989 } else 1990 mystp = NULL; 1991 if ((new_stp->ls_flags & NFSLCK_CHECK) && !delegation) { 1992 /* 1993 * Check for a conflicting deny bit. 1994 */ 1995 LIST_FOREACH(tstp, &lfp->lf_open, ls_file) { 1996 if (tstp != mystp) { 1997 bits = tstp->ls_flags; 1998 bits >>= NFSLCK_SHIFT; 1999 if (new_stp->ls_flags & bits & NFSLCK_ACCESSBITS) { 2000 KASSERT(vnode_unlocked == 0, 2001 ("nfsrv_lockctrl: vnode unlocked1")); 2002 ret = nfsrv_clientconflict(tstp->ls_clp, &haslock, 2003 vp, p); 2004 if (ret == 1) { 2005 /* 2006 * nfsrv_clientconflict unlocks state 2007 * when it returns non-zero. 2008 */ 2009 lckstp = NULL; 2010 goto tryagain; 2011 } 2012 if (ret == 0) 2013 NFSUNLOCKSTATE(); 2014 if (ret == 2) 2015 error = NFSERR_PERM; 2016 else 2017 error = NFSERR_OPENMODE; 2018 goto out; 2019 } 2020 } 2021 } 2022 2023 /* We're outta here */ 2024 NFSUNLOCKSTATE(); 2025 goto out; 2026 } 2027 } 2028 2029 /* 2030 * For setattr, just get rid of all the Delegations for other clients. 2031 */ 2032 if (new_stp->ls_flags & NFSLCK_SETATTR) { 2033 KASSERT(vnode_unlocked == 0, 2034 ("nfsrv_lockctrl: vnode unlocked2")); 2035 ret = nfsrv_cleandeleg(vp, lfp, clp, &haslock, p); 2036 if (ret) { 2037 /* 2038 * nfsrv_cleandeleg() unlocks state when it 2039 * returns non-zero. 2040 */ 2041 if (ret == -1) { 2042 lckstp = NULL; 2043 goto tryagain; 2044 } 2045 error = ret; 2046 goto out; 2047 } 2048 if (!(new_stp->ls_flags & NFSLCK_CHECK) || 2049 (LIST_EMPTY(&lfp->lf_open) && LIST_EMPTY(&lfp->lf_lock) && 2050 LIST_EMPTY(&lfp->lf_deleg))) { 2051 NFSUNLOCKSTATE(); 2052 goto out; 2053 } 2054 } 2055 2056 /* 2057 * Check for a conflicting delegation. If one is found, call 2058 * nfsrv_delegconflict() to handle it. If the v4root lock hasn't 2059 * been set yet, it will get the lock. Otherwise, it will recall 2060 * the delegation. Then, we try try again... 2061 * I currently believe the conflict algorithm to be: 2062 * For Lock Ops (Lock/LockT/LockU) 2063 * - there is a conflict iff a different client has a write delegation 2064 * For Reading (Read Op) 2065 * - there is a conflict iff a different client has a write delegation 2066 * (the specialids are always a different client) 2067 * For Writing (Write/Setattr of size) 2068 * - there is a conflict if a different client has any delegation 2069 * - there is a conflict if the same client has a read delegation 2070 * (I don't understand why this isn't allowed, but that seems to be 2071 * the current consensus?) 2072 */ 2073 tstp = LIST_FIRST(&lfp->lf_deleg); 2074 while (tstp != LIST_END(&lfp->lf_deleg)) { 2075 nstp = LIST_NEXT(tstp, ls_file); 2076 if ((((new_stp->ls_flags&(NFSLCK_LOCK|NFSLCK_UNLOCK|NFSLCK_TEST))|| 2077 ((new_stp->ls_flags & NFSLCK_CHECK) && 2078 (new_lop->lo_flags & NFSLCK_READ))) && 2079 clp != tstp->ls_clp && 2080 (tstp->ls_flags & NFSLCK_DELEGWRITE)) || 2081 ((new_stp->ls_flags & NFSLCK_CHECK) && 2082 (new_lop->lo_flags & NFSLCK_WRITE) && 2083 (clp != tstp->ls_clp || 2084 (tstp->ls_flags & NFSLCK_DELEGREAD)))) { 2085 ret = 0; 2086 if (filestruct_locked != 0) { 2087 /* Roll back local locks. */ 2088 NFSUNLOCKSTATE(); 2089 if (vnode_unlocked == 0) { 2090 ASSERT_VOP_ELOCKED(vp, "nfsrv_lockctrl4"); 2091 NFSVOPUNLOCK(vp, 0); 2092 } 2093 nfsrv_locallock_rollback(vp, lfp, p); 2094 NFSLOCKSTATE(); 2095 nfsrv_unlocklf(lfp); 2096 NFSUNLOCKSTATE(); 2097 NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); 2098 vnode_unlocked = 0; 2099 if ((vp->v_iflag & VI_DOOMED) != 0) 2100 ret = NFSERR_SERVERFAULT; 2101 NFSLOCKSTATE(); 2102 } 2103 if (ret == 0) 2104 ret = nfsrv_delegconflict(tstp, &haslock, p, vp); 2105 if (ret) { 2106 /* 2107 * nfsrv_delegconflict unlocks state when it 2108 * returns non-zero, which it always does. 2109 */ 2110 if (other_lop) { 2111 free(other_lop, M_NFSDLOCK); 2112 other_lop = NULL; 2113 } 2114 if (ret == -1) { 2115 lckstp = NULL; 2116 goto tryagain; 2117 } 2118 error = ret; 2119 goto out; 2120 } 2121 /* Never gets here. */ 2122 } 2123 tstp = nstp; 2124 } 2125 2126 /* 2127 * Handle the unlock case by calling nfsrv_updatelock(). 2128 * (Should I have done some access checking above for unlock? For now, 2129 * just let it happen.) 2130 */ 2131 if (new_stp->ls_flags & NFSLCK_UNLOCK) { 2132 first = new_lop->lo_first; 2133 end = new_lop->lo_end; 2134 nfsrv_updatelock(stp, new_lopp, &other_lop, lfp); 2135 stateidp->seqid = ++(stp->ls_stateid.seqid); 2136 if ((nd->nd_flag & ND_NFSV41) != 0 && stateidp->seqid == 0) 2137 stateidp->seqid = stp->ls_stateid.seqid = 1; 2138 stateidp->other[0] = stp->ls_stateid.other[0]; 2139 stateidp->other[1] = stp->ls_stateid.other[1]; 2140 stateidp->other[2] = stp->ls_stateid.other[2]; 2141 if (filestruct_locked != 0) { 2142 NFSUNLOCKSTATE(); 2143 if (vnode_unlocked == 0) { 2144 ASSERT_VOP_ELOCKED(vp, "nfsrv_lockctrl5"); 2145 vnode_unlocked = 1; 2146 NFSVOPUNLOCK(vp, 0); 2147 } 2148 /* Update the local locks. */ 2149 nfsrv_localunlock(vp, lfp, first, end, p); 2150 NFSLOCKSTATE(); 2151 nfsrv_unlocklf(lfp); 2152 } 2153 NFSUNLOCKSTATE(); 2154 goto out; 2155 } 2156 2157 /* 2158 * Search for a conflicting lock. A lock conflicts if: 2159 * - the lock range overlaps and 2160 * - at least one lock is a write lock and 2161 * - it is not owned by the same lock owner 2162 */ 2163 if (!delegation) { 2164 LIST_FOREACH(lop, &lfp->lf_lock, lo_lckfile) { 2165 if (new_lop->lo_end > lop->lo_first && 2166 new_lop->lo_first < lop->lo_end && 2167 (new_lop->lo_flags == NFSLCK_WRITE || 2168 lop->lo_flags == NFSLCK_WRITE) && 2169 lckstp != lop->lo_stp && 2170 (clp != lop->lo_stp->ls_clp || 2171 lckstp->ls_ownerlen != lop->lo_stp->ls_ownerlen || 2172 NFSBCMP(lckstp->ls_owner, lop->lo_stp->ls_owner, 2173 lckstp->ls_ownerlen))) { 2174 if (other_lop) { 2175 free(other_lop, M_NFSDLOCK); 2176 other_lop = NULL; 2177 } 2178 if (vnode_unlocked != 0) 2179 ret = nfsrv_clientconflict(lop->lo_stp->ls_clp, &haslock, 2180 NULL, p); 2181 else 2182 ret = nfsrv_clientconflict(lop->lo_stp->ls_clp, &haslock, 2183 vp, p); 2184 if (ret == 1) { 2185 if (filestruct_locked != 0) { 2186 if (vnode_unlocked == 0) { 2187 ASSERT_VOP_ELOCKED(vp, "nfsrv_lockctrl6"); 2188 NFSVOPUNLOCK(vp, 0); 2189 } 2190 /* Roll back local locks. */ 2191 nfsrv_locallock_rollback(vp, lfp, p); 2192 NFSLOCKSTATE(); 2193 nfsrv_unlocklf(lfp); 2194 NFSUNLOCKSTATE(); 2195 NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); 2196 vnode_unlocked = 0; 2197 if ((vp->v_iflag & VI_DOOMED) != 0) { 2198 error = NFSERR_SERVERFAULT; 2199 goto out; 2200 } 2201 } 2202 /* 2203 * nfsrv_clientconflict() unlocks state when it 2204 * returns non-zero. 2205 */ 2206 lckstp = NULL; 2207 goto tryagain; 2208 } 2209 /* 2210 * Found a conflicting lock, so record the conflict and 2211 * return the error. 2212 */ 2213 if (cfp != NULL && ret == 0) { 2214 cfp->cl_clientid.lval[0]=lop->lo_stp->ls_stateid.other[0]; 2215 cfp->cl_clientid.lval[1]=lop->lo_stp->ls_stateid.other[1]; 2216 cfp->cl_first = lop->lo_first; 2217 cfp->cl_end = lop->lo_end; 2218 cfp->cl_flags = lop->lo_flags; 2219 cfp->cl_ownerlen = lop->lo_stp->ls_ownerlen; 2220 NFSBCOPY(lop->lo_stp->ls_owner, cfp->cl_owner, 2221 cfp->cl_ownerlen); 2222 } 2223 if (ret == 2) 2224 error = NFSERR_PERM; 2225 else if (new_stp->ls_flags & NFSLCK_RECLAIM) 2226 error = NFSERR_RECLAIMCONFLICT; 2227 else if (new_stp->ls_flags & NFSLCK_CHECK) 2228 error = NFSERR_LOCKED; 2229 else 2230 error = NFSERR_DENIED; 2231 if (filestruct_locked != 0 && ret == 0) { 2232 /* Roll back local locks. */ 2233 NFSUNLOCKSTATE(); 2234 if (vnode_unlocked == 0) { 2235 ASSERT_VOP_ELOCKED(vp, "nfsrv_lockctrl7"); 2236 vnode_unlocked = 1; 2237 NFSVOPUNLOCK(vp, 0); 2238 } 2239 nfsrv_locallock_rollback(vp, lfp, p); 2240 NFSLOCKSTATE(); 2241 nfsrv_unlocklf(lfp); 2242 } 2243 if (ret == 0) 2244 NFSUNLOCKSTATE(); 2245 goto out; 2246 } 2247 } 2248 } 2249 2250 /* 2251 * We only get here if there was no lock that conflicted. 2252 */ 2253 if (new_stp->ls_flags & (NFSLCK_TEST | NFSLCK_CHECK)) { 2254 NFSUNLOCKSTATE(); 2255 goto out; 2256 } 2257 2258 /* 2259 * We only get here when we are creating or modifying a lock. 2260 * There are two variants: 2261 * - exist_lock_owner where lock_owner exists 2262 * - open_to_lock_owner with new lock_owner 2263 */ 2264 first = new_lop->lo_first; 2265 end = new_lop->lo_end; 2266 lock_flags = new_lop->lo_flags; 2267 if (!(new_stp->ls_flags & NFSLCK_OPENTOLOCK)) { 2268 nfsrv_updatelock(lckstp, new_lopp, &other_lop, lfp); 2269 stateidp->seqid = ++(lckstp->ls_stateid.seqid); 2270 if ((nd->nd_flag & ND_NFSV41) != 0 && stateidp->seqid == 0) 2271 stateidp->seqid = lckstp->ls_stateid.seqid = 1; 2272 stateidp->other[0] = lckstp->ls_stateid.other[0]; 2273 stateidp->other[1] = lckstp->ls_stateid.other[1]; 2274 stateidp->other[2] = lckstp->ls_stateid.other[2]; 2275 } else { 2276 /* 2277 * The new open_to_lock_owner case. 2278 * Link the new nfsstate into the lists. 2279 */ 2280 new_stp->ls_seq = new_stp->ls_opentolockseq; 2281 nfsrvd_refcache(new_stp->ls_op); 2282 stateidp->seqid = new_stp->ls_stateid.seqid = 1; 2283 stateidp->other[0] = new_stp->ls_stateid.other[0] = 2284 clp->lc_clientid.lval[0]; 2285 stateidp->other[1] = new_stp->ls_stateid.other[1] = 2286 clp->lc_clientid.lval[1]; 2287 stateidp->other[2] = new_stp->ls_stateid.other[2] = 2288 nfsrv_nextstateindex(clp); 2289 new_stp->ls_clp = clp; 2290 LIST_INIT(&new_stp->ls_lock); 2291 new_stp->ls_openstp = stp; 2292 new_stp->ls_lfp = lfp; 2293 nfsrv_insertlock(new_lop, (struct nfslock *)new_stp, new_stp, 2294 lfp); 2295 LIST_INSERT_HEAD(NFSSTATEHASH(clp, new_stp->ls_stateid), 2296 new_stp, ls_hash); 2297 LIST_INSERT_HEAD(&stp->ls_open, new_stp, ls_list); 2298 *new_lopp = NULL; 2299 *new_stpp = NULL; 2300 nfsstatsv1.srvlockowners++; 2301 nfsrv_openpluslock++; 2302 } 2303 if (filestruct_locked != 0) { 2304 NFSUNLOCKSTATE(); 2305 nfsrv_locallock_commit(lfp, lock_flags, first, end); 2306 NFSLOCKSTATE(); 2307 nfsrv_unlocklf(lfp); 2308 } 2309 NFSUNLOCKSTATE(); 2310 2311 out: 2312 if (haslock) { 2313 NFSLOCKV4ROOTMUTEX(); 2314 nfsv4_unlock(&nfsv4rootfs_lock, 1); 2315 NFSUNLOCKV4ROOTMUTEX(); 2316 } 2317 if (vnode_unlocked != 0) { 2318 NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); 2319 if (error == 0 && (vp->v_iflag & VI_DOOMED) != 0) 2320 error = NFSERR_SERVERFAULT; 2321 } 2322 if (other_lop) 2323 free(other_lop, M_NFSDLOCK); 2324 NFSEXITCODE2(error, nd); 2325 return (error); 2326 } 2327 2328 /* 2329 * Check for state errors for Open. 2330 * repstat is passed back out as an error if more critical errors 2331 * are not detected. 2332 */ 2333 APPLESTATIC int 2334 nfsrv_opencheck(nfsquad_t clientid, nfsv4stateid_t *stateidp, 2335 struct nfsstate *new_stp, vnode_t vp, struct nfsrv_descript *nd, 2336 NFSPROC_T *p, int repstat) 2337 { 2338 struct nfsstate *stp, *nstp; 2339 struct nfsclient *clp; 2340 struct nfsstate *ownerstp; 2341 struct nfslockfile *lfp, *new_lfp; 2342 int error = 0, haslock = 0, ret, readonly = 0, getfhret = 0; 2343 2344 if ((new_stp->ls_flags & NFSLCK_SHAREBITS) == NFSLCK_READACCESS) 2345 readonly = 1; 2346 /* 2347 * Check for restart conditions (client and server). 2348 */ 2349 error = nfsrv_checkrestart(clientid, new_stp->ls_flags, 2350 &new_stp->ls_stateid, 0); 2351 if (error) 2352 goto out; 2353 2354 /* 2355 * Check for state resource limit exceeded. 2356 * Technically this should be SMP protected, but the worst 2357 * case error is "out by one or two" on the count when it 2358 * returns NFSERR_RESOURCE and the limit is just a rather 2359 * arbitrary high water mark, so no harm is done. 2360 */ 2361 if (nfsrv_openpluslock > nfsrv_v4statelimit) { 2362 error = NFSERR_RESOURCE; 2363 goto out; 2364 } 2365 2366 tryagain: 2367 new_lfp = malloc(sizeof (struct nfslockfile), 2368 M_NFSDLOCKFILE, M_WAITOK); 2369 if (vp) 2370 getfhret = nfsrv_getlockfh(vp, new_stp->ls_flags, new_lfp, 2371 NULL, p); 2372 NFSLOCKSTATE(); 2373 /* 2374 * Get the nfsclient structure. 2375 */ 2376 error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp, NULL, 2377 (nfsquad_t)((u_quad_t)0), 0, nd, p); 2378 2379 /* 2380 * Look up the open owner. See if it needs confirmation and 2381 * check the seq#, as required. 2382 */ 2383 if (!error) 2384 nfsrv_getowner(&clp->lc_open, new_stp, &ownerstp); 2385 2386 if (!error && ownerstp) { 2387 error = nfsrv_checkseqid(nd, new_stp->ls_seq, ownerstp, 2388 new_stp->ls_op); 2389 /* 2390 * If the OpenOwner hasn't been confirmed, assume the 2391 * old one was a replay and this one is ok. 2392 * See: RFC3530 Sec. 14.2.18. 2393 */ 2394 if (error == NFSERR_BADSEQID && 2395 (ownerstp->ls_flags & NFSLCK_NEEDSCONFIRM)) 2396 error = 0; 2397 } 2398 2399 /* 2400 * Check for grace. 2401 */ 2402 if (!error) 2403 error = nfsrv_checkgrace(nd, clp, new_stp->ls_flags); 2404 if ((new_stp->ls_flags & NFSLCK_RECLAIM) && !error && 2405 nfsrv_checkstable(clp)) 2406 error = NFSERR_NOGRACE; 2407 2408 /* 2409 * If none of the above errors occurred, let repstat be 2410 * returned. 2411 */ 2412 if (repstat && !error) 2413 error = repstat; 2414 if (error) { 2415 NFSUNLOCKSTATE(); 2416 if (haslock) { 2417 NFSLOCKV4ROOTMUTEX(); 2418 nfsv4_unlock(&nfsv4rootfs_lock, 1); 2419 NFSUNLOCKV4ROOTMUTEX(); 2420 } 2421 free(new_lfp, M_NFSDLOCKFILE); 2422 goto out; 2423 } 2424 2425 /* 2426 * If vp == NULL, the file doesn't exist yet, so return ok. 2427 * (This always happens on the first pass, so haslock must be 0.) 2428 */ 2429 if (vp == NULL) { 2430 NFSUNLOCKSTATE(); 2431 free(new_lfp, M_NFSDLOCKFILE); 2432 goto out; 2433 } 2434 2435 /* 2436 * Get the structure for the underlying file. 2437 */ 2438 if (getfhret) 2439 error = getfhret; 2440 else 2441 error = nfsrv_getlockfile(new_stp->ls_flags, &new_lfp, &lfp, 2442 NULL, 0); 2443 if (new_lfp) 2444 free(new_lfp, M_NFSDLOCKFILE); 2445 if (error) { 2446 NFSUNLOCKSTATE(); 2447 if (haslock) { 2448 NFSLOCKV4ROOTMUTEX(); 2449 nfsv4_unlock(&nfsv4rootfs_lock, 1); 2450 NFSUNLOCKV4ROOTMUTEX(); 2451 } 2452 goto out; 2453 } 2454 2455 /* 2456 * Search for a conflicting open/share. 2457 */ 2458 if (new_stp->ls_flags & NFSLCK_DELEGCUR) { 2459 /* 2460 * For Delegate_Cur, search for the matching Delegation, 2461 * which indicates no conflict. 2462 * An old delegation should have been recovered by the 2463 * client doing a Claim_DELEGATE_Prev, so I won't let 2464 * it match and return NFSERR_EXPIRED. Should I let it 2465 * match? 2466 */ 2467 LIST_FOREACH(stp, &lfp->lf_deleg, ls_file) { 2468 if (!(stp->ls_flags & NFSLCK_OLDDELEG) && 2469 (((nd->nd_flag & ND_NFSV41) != 0 && 2470 stateidp->seqid == 0) || 2471 stateidp->seqid == stp->ls_stateid.seqid) && 2472 !NFSBCMP(stateidp->other, stp->ls_stateid.other, 2473 NFSX_STATEIDOTHER)) 2474 break; 2475 } 2476 if (stp == LIST_END(&lfp->lf_deleg) || 2477 ((new_stp->ls_flags & NFSLCK_WRITEACCESS) && 2478 (stp->ls_flags & NFSLCK_DELEGREAD))) { 2479 NFSUNLOCKSTATE(); 2480 if (haslock) { 2481 NFSLOCKV4ROOTMUTEX(); 2482 nfsv4_unlock(&nfsv4rootfs_lock, 1); 2483 NFSUNLOCKV4ROOTMUTEX(); 2484 } 2485 error = NFSERR_EXPIRED; 2486 goto out; 2487 } 2488 } 2489 2490 /* 2491 * Check for access/deny bit conflicts. I check for the same 2492 * owner as well, in case the client didn't bother. 2493 */ 2494 LIST_FOREACH(stp, &lfp->lf_open, ls_file) { 2495 if (!(new_stp->ls_flags & NFSLCK_DELEGCUR) && 2496 (((new_stp->ls_flags & NFSLCK_ACCESSBITS) & 2497 ((stp->ls_flags>>NFSLCK_SHIFT) & NFSLCK_ACCESSBITS))|| 2498 ((stp->ls_flags & NFSLCK_ACCESSBITS) & 2499 ((new_stp->ls_flags>>NFSLCK_SHIFT)&NFSLCK_ACCESSBITS)))){ 2500 ret = nfsrv_clientconflict(stp->ls_clp,&haslock,vp,p); 2501 if (ret == 1) { 2502 /* 2503 * nfsrv_clientconflict() unlocks 2504 * state when it returns non-zero. 2505 */ 2506 goto tryagain; 2507 } 2508 if (ret == 2) 2509 error = NFSERR_PERM; 2510 else if (new_stp->ls_flags & NFSLCK_RECLAIM) 2511 error = NFSERR_RECLAIMCONFLICT; 2512 else 2513 error = NFSERR_SHAREDENIED; 2514 if (ret == 0) 2515 NFSUNLOCKSTATE(); 2516 if (haslock) { 2517 NFSLOCKV4ROOTMUTEX(); 2518 nfsv4_unlock(&nfsv4rootfs_lock, 1); 2519 NFSUNLOCKV4ROOTMUTEX(); 2520 } 2521 goto out; 2522 } 2523 } 2524 2525 /* 2526 * Check for a conflicting delegation. If one is found, call 2527 * nfsrv_delegconflict() to handle it. If the v4root lock hasn't 2528 * been set yet, it will get the lock. Otherwise, it will recall 2529 * the delegation. Then, we try try again... 2530 * (If NFSLCK_DELEGCUR is set, it has a delegation, so there 2531 * isn't a conflict.) 2532 * I currently believe the conflict algorithm to be: 2533 * For Open with Read Access and Deny None 2534 * - there is a conflict iff a different client has a write delegation 2535 * For Open with other Write Access or any Deny except None 2536 * - there is a conflict if a different client has any delegation 2537 * - there is a conflict if the same client has a read delegation 2538 * (The current consensus is that this last case should be 2539 * considered a conflict since the client with a read delegation 2540 * could have done an Open with ReadAccess and WriteDeny 2541 * locally and then not have checked for the WriteDeny.) 2542 * Don't check for a Reclaim, since that will be dealt with 2543 * by nfsrv_openctrl(). 2544 */ 2545 if (!(new_stp->ls_flags & 2546 (NFSLCK_DELEGPREV | NFSLCK_DELEGCUR | NFSLCK_RECLAIM))) { 2547 stp = LIST_FIRST(&lfp->lf_deleg); 2548 while (stp != LIST_END(&lfp->lf_deleg)) { 2549 nstp = LIST_NEXT(stp, ls_file); 2550 if ((readonly && stp->ls_clp != clp && 2551 (stp->ls_flags & NFSLCK_DELEGWRITE)) || 2552 (!readonly && (stp->ls_clp != clp || 2553 (stp->ls_flags & NFSLCK_DELEGREAD)))) { 2554 ret = nfsrv_delegconflict(stp, &haslock, p, vp); 2555 if (ret) { 2556 /* 2557 * nfsrv_delegconflict() unlocks state 2558 * when it returns non-zero. 2559 */ 2560 if (ret == -1) 2561 goto tryagain; 2562 error = ret; 2563 goto out; 2564 } 2565 } 2566 stp = nstp; 2567 } 2568 } 2569 NFSUNLOCKSTATE(); 2570 if (haslock) { 2571 NFSLOCKV4ROOTMUTEX(); 2572 nfsv4_unlock(&nfsv4rootfs_lock, 1); 2573 NFSUNLOCKV4ROOTMUTEX(); 2574 } 2575 2576 out: 2577 NFSEXITCODE2(error, nd); 2578 return (error); 2579 } 2580 2581 /* 2582 * Open control function to create/update open state for an open. 2583 */ 2584 APPLESTATIC int 2585 nfsrv_openctrl(struct nfsrv_descript *nd, vnode_t vp, 2586 struct nfsstate **new_stpp, nfsquad_t clientid, nfsv4stateid_t *stateidp, 2587 nfsv4stateid_t *delegstateidp, u_int32_t *rflagsp, struct nfsexstuff *exp, 2588 NFSPROC_T *p, u_quad_t filerev) 2589 { 2590 struct nfsstate *new_stp = *new_stpp; 2591 struct nfsstate *stp, *nstp; 2592 struct nfsstate *openstp = NULL, *new_open, *ownerstp, *new_deleg; 2593 struct nfslockfile *lfp, *new_lfp; 2594 struct nfsclient *clp; 2595 int error = 0, haslock = 0, ret, delegate = 1, writedeleg = 1; 2596 int readonly = 0, cbret = 1, getfhret = 0; 2597 int gotstate = 0, len = 0; 2598 u_char *clidp = NULL; 2599 2600 if ((new_stp->ls_flags & NFSLCK_SHAREBITS) == NFSLCK_READACCESS) 2601 readonly = 1; 2602 /* 2603 * Check for restart conditions (client and server). 2604 * (Paranoia, should have been detected by nfsrv_opencheck().) 2605 * If an error does show up, return NFSERR_EXPIRED, since the 2606 * the seqid# has already been incremented. 2607 */ 2608 error = nfsrv_checkrestart(clientid, new_stp->ls_flags, 2609 &new_stp->ls_stateid, 0); 2610 if (error) { 2611 printf("Nfsd: openctrl unexpected restart err=%d\n", 2612 error); 2613 error = NFSERR_EXPIRED; 2614 goto out; 2615 } 2616 2617 clidp = malloc(NFSV4_OPAQUELIMIT, M_TEMP, M_WAITOK); 2618 tryagain: 2619 new_lfp = malloc(sizeof (struct nfslockfile), 2620 M_NFSDLOCKFILE, M_WAITOK); 2621 new_open = malloc(sizeof (struct nfsstate), 2622 M_NFSDSTATE, M_WAITOK); 2623 new_deleg = malloc(sizeof (struct nfsstate), 2624 M_NFSDSTATE, M_WAITOK); 2625 getfhret = nfsrv_getlockfh(vp, new_stp->ls_flags, new_lfp, 2626 NULL, p); 2627 NFSLOCKSTATE(); 2628 /* 2629 * Get the client structure. Since the linked lists could be changed 2630 * by other nfsd processes if this process does a tsleep(), one of 2631 * two things must be done. 2632 * 1 - don't tsleep() 2633 * or 2634 * 2 - get the nfsv4_lock() { indicated by haslock == 1 } 2635 * before using the lists, since this lock stops the other 2636 * nfsd. This should only be used for rare cases, since it 2637 * essentially single threads the nfsd. 2638 * At this time, it is only done for cases where the stable 2639 * storage file must be written prior to completion of state 2640 * expiration. 2641 */ 2642 error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp, NULL, 2643 (nfsquad_t)((u_quad_t)0), 0, nd, p); 2644 if (!error && (clp->lc_flags & LCL_NEEDSCBNULL) && 2645 clp->lc_program) { 2646 /* 2647 * This happens on the first open for a client 2648 * that supports callbacks. 2649 */ 2650 NFSUNLOCKSTATE(); 2651 /* 2652 * Although nfsrv_docallback() will sleep, clp won't 2653 * go away, since they are only removed when the 2654 * nfsv4_lock() has blocked the nfsd threads. The 2655 * fields in clp can change, but having multiple 2656 * threads do this Null callback RPC should be 2657 * harmless. 2658 */ 2659 cbret = nfsrv_docallback(clp, NFSV4PROC_CBNULL, 2660 NULL, 0, NULL, NULL, NULL, 0, p); 2661 NFSLOCKSTATE(); 2662 clp->lc_flags &= ~LCL_NEEDSCBNULL; 2663 if (!cbret) 2664 clp->lc_flags |= LCL_CALLBACKSON; 2665 } 2666 2667 /* 2668 * Look up the open owner. See if it needs confirmation and 2669 * check the seq#, as required. 2670 */ 2671 if (!error) 2672 nfsrv_getowner(&clp->lc_open, new_stp, &ownerstp); 2673 2674 if (error) { 2675 NFSUNLOCKSTATE(); 2676 printf("Nfsd: openctrl unexpected state err=%d\n", 2677 error); 2678 free(new_lfp, M_NFSDLOCKFILE); 2679 free(new_open, M_NFSDSTATE); 2680 free(new_deleg, M_NFSDSTATE); 2681 if (haslock) { 2682 NFSLOCKV4ROOTMUTEX(); 2683 nfsv4_unlock(&nfsv4rootfs_lock, 1); 2684 NFSUNLOCKV4ROOTMUTEX(); 2685 } 2686 error = NFSERR_EXPIRED; 2687 goto out; 2688 } 2689 2690 if (new_stp->ls_flags & NFSLCK_RECLAIM) 2691 nfsrv_markstable(clp); 2692 2693 /* 2694 * Get the structure for the underlying file. 2695 */ 2696 if (getfhret) 2697 error = getfhret; 2698 else 2699 error = nfsrv_getlockfile(new_stp->ls_flags, &new_lfp, &lfp, 2700 NULL, 0); 2701 if (new_lfp) 2702 free(new_lfp, M_NFSDLOCKFILE); 2703 if (error) { 2704 NFSUNLOCKSTATE(); 2705 printf("Nfsd openctrl unexpected getlockfile err=%d\n", 2706 error); 2707 free(new_open, M_NFSDSTATE); 2708 free(new_deleg, M_NFSDSTATE); 2709 if (haslock) { 2710 NFSLOCKV4ROOTMUTEX(); 2711 nfsv4_unlock(&nfsv4rootfs_lock, 1); 2712 NFSUNLOCKV4ROOTMUTEX(); 2713 } 2714 goto out; 2715 } 2716 2717 /* 2718 * Search for a conflicting open/share. 2719 */ 2720 if (new_stp->ls_flags & NFSLCK_DELEGCUR) { 2721 /* 2722 * For Delegate_Cur, search for the matching Delegation, 2723 * which indicates no conflict. 2724 * An old delegation should have been recovered by the 2725 * client doing a Claim_DELEGATE_Prev, so I won't let 2726 * it match and return NFSERR_EXPIRED. Should I let it 2727 * match? 2728 */ 2729 LIST_FOREACH(stp, &lfp->lf_deleg, ls_file) { 2730 if (!(stp->ls_flags & NFSLCK_OLDDELEG) && 2731 (((nd->nd_flag & ND_NFSV41) != 0 && 2732 stateidp->seqid == 0) || 2733 stateidp->seqid == stp->ls_stateid.seqid) && 2734 !NFSBCMP(stateidp->other, stp->ls_stateid.other, 2735 NFSX_STATEIDOTHER)) 2736 break; 2737 } 2738 if (stp == LIST_END(&lfp->lf_deleg) || 2739 ((new_stp->ls_flags & NFSLCK_WRITEACCESS) && 2740 (stp->ls_flags & NFSLCK_DELEGREAD))) { 2741 NFSUNLOCKSTATE(); 2742 printf("Nfsd openctrl unexpected expiry\n"); 2743 free(new_open, M_NFSDSTATE); 2744 free(new_deleg, M_NFSDSTATE); 2745 if (haslock) { 2746 NFSLOCKV4ROOTMUTEX(); 2747 nfsv4_unlock(&nfsv4rootfs_lock, 1); 2748 NFSUNLOCKV4ROOTMUTEX(); 2749 } 2750 error = NFSERR_EXPIRED; 2751 goto out; 2752 } 2753 2754 /* 2755 * Don't issue a Delegation, since one already exists and 2756 * delay delegation timeout, as required. 2757 */ 2758 delegate = 0; 2759 nfsrv_delaydelegtimeout(stp); 2760 } 2761 2762 /* 2763 * Check for access/deny bit conflicts. I also check for the 2764 * same owner, since the client might not have bothered to check. 2765 * Also, note an open for the same file and owner, if found, 2766 * which is all we do here for Delegate_Cur, since conflict 2767 * checking is already done. 2768 */ 2769 LIST_FOREACH(stp, &lfp->lf_open, ls_file) { 2770 if (ownerstp && stp->ls_openowner == ownerstp) 2771 openstp = stp; 2772 if (!(new_stp->ls_flags & NFSLCK_DELEGCUR)) { 2773 /* 2774 * If another client has the file open, the only 2775 * delegation that can be issued is a Read delegation 2776 * and only if it is a Read open with Deny none. 2777 */ 2778 if (clp != stp->ls_clp) { 2779 if ((stp->ls_flags & NFSLCK_SHAREBITS) == 2780 NFSLCK_READACCESS) 2781 writedeleg = 0; 2782 else 2783 delegate = 0; 2784 } 2785 if(((new_stp->ls_flags & NFSLCK_ACCESSBITS) & 2786 ((stp->ls_flags>>NFSLCK_SHIFT) & NFSLCK_ACCESSBITS))|| 2787 ((stp->ls_flags & NFSLCK_ACCESSBITS) & 2788 ((new_stp->ls_flags>>NFSLCK_SHIFT)&NFSLCK_ACCESSBITS))){ 2789 ret = nfsrv_clientconflict(stp->ls_clp,&haslock,vp,p); 2790 if (ret == 1) { 2791 /* 2792 * nfsrv_clientconflict() unlocks state 2793 * when it returns non-zero. 2794 */ 2795 free(new_open, M_NFSDSTATE); 2796 free(new_deleg, M_NFSDSTATE); 2797 openstp = NULL; 2798 goto tryagain; 2799 } 2800 if (ret == 2) 2801 error = NFSERR_PERM; 2802 else if (new_stp->ls_flags & NFSLCK_RECLAIM) 2803 error = NFSERR_RECLAIMCONFLICT; 2804 else 2805 error = NFSERR_SHAREDENIED; 2806 if (ret == 0) 2807 NFSUNLOCKSTATE(); 2808 if (haslock) { 2809 NFSLOCKV4ROOTMUTEX(); 2810 nfsv4_unlock(&nfsv4rootfs_lock, 1); 2811 NFSUNLOCKV4ROOTMUTEX(); 2812 } 2813 free(new_open, M_NFSDSTATE); 2814 free(new_deleg, M_NFSDSTATE); 2815 printf("nfsd openctrl unexpected client cnfl\n"); 2816 goto out; 2817 } 2818 } 2819 } 2820 2821 /* 2822 * Check for a conflicting delegation. If one is found, call 2823 * nfsrv_delegconflict() to handle it. If the v4root lock hasn't 2824 * been set yet, it will get the lock. Otherwise, it will recall 2825 * the delegation. Then, we try try again... 2826 * (If NFSLCK_DELEGCUR is set, it has a delegation, so there 2827 * isn't a conflict.) 2828 * I currently believe the conflict algorithm to be: 2829 * For Open with Read Access and Deny None 2830 * - there is a conflict iff a different client has a write delegation 2831 * For Open with other Write Access or any Deny except None 2832 * - there is a conflict if a different client has any delegation 2833 * - there is a conflict if the same client has a read delegation 2834 * (The current consensus is that this last case should be 2835 * considered a conflict since the client with a read delegation 2836 * could have done an Open with ReadAccess and WriteDeny 2837 * locally and then not have checked for the WriteDeny.) 2838 */ 2839 if (!(new_stp->ls_flags & (NFSLCK_DELEGPREV | NFSLCK_DELEGCUR))) { 2840 stp = LIST_FIRST(&lfp->lf_deleg); 2841 while (stp != LIST_END(&lfp->lf_deleg)) { 2842 nstp = LIST_NEXT(stp, ls_file); 2843 if (stp->ls_clp != clp && (stp->ls_flags & NFSLCK_DELEGREAD)) 2844 writedeleg = 0; 2845 else 2846 delegate = 0; 2847 if ((readonly && stp->ls_clp != clp && 2848 (stp->ls_flags & NFSLCK_DELEGWRITE)) || 2849 (!readonly && (stp->ls_clp != clp || 2850 (stp->ls_flags & NFSLCK_DELEGREAD)))) { 2851 if (new_stp->ls_flags & NFSLCK_RECLAIM) { 2852 delegate = 2; 2853 } else { 2854 ret = nfsrv_delegconflict(stp, &haslock, p, vp); 2855 if (ret) { 2856 /* 2857 * nfsrv_delegconflict() unlocks state 2858 * when it returns non-zero. 2859 */ 2860 printf("Nfsd openctrl unexpected deleg cnfl\n"); 2861 free(new_open, M_NFSDSTATE); 2862 free(new_deleg, M_NFSDSTATE); 2863 if (ret == -1) { 2864 openstp = NULL; 2865 goto tryagain; 2866 } 2867 error = ret; 2868 goto out; 2869 } 2870 } 2871 } 2872 stp = nstp; 2873 } 2874 } 2875 2876 /* 2877 * We only get here if there was no open that conflicted. 2878 * If an open for the owner exists, or in the access/deny bits. 2879 * Otherwise it is a new open. If the open_owner hasn't been 2880 * confirmed, replace the open with the new one needing confirmation, 2881 * otherwise add the open. 2882 */ 2883 if (new_stp->ls_flags & NFSLCK_DELEGPREV) { 2884 /* 2885 * Handle NFSLCK_DELEGPREV by searching the old delegations for 2886 * a match. If found, just move the old delegation to the current 2887 * delegation list and issue open. If not found, return 2888 * NFSERR_EXPIRED. 2889 */ 2890 LIST_FOREACH(stp, &clp->lc_olddeleg, ls_list) { 2891 if (stp->ls_lfp == lfp) { 2892 /* Found it */ 2893 if (stp->ls_clp != clp) 2894 panic("olddeleg clp"); 2895 LIST_REMOVE(stp, ls_list); 2896 LIST_REMOVE(stp, ls_hash); 2897 stp->ls_flags &= ~NFSLCK_OLDDELEG; 2898 stp->ls_stateid.seqid = delegstateidp->seqid = 1; 2899 stp->ls_stateid.other[0] = delegstateidp->other[0] = 2900 clp->lc_clientid.lval[0]; 2901 stp->ls_stateid.other[1] = delegstateidp->other[1] = 2902 clp->lc_clientid.lval[1]; 2903 stp->ls_stateid.other[2] = delegstateidp->other[2] = 2904 nfsrv_nextstateindex(clp); 2905 stp->ls_compref = nd->nd_compref; 2906 LIST_INSERT_HEAD(&clp->lc_deleg, stp, ls_list); 2907 LIST_INSERT_HEAD(NFSSTATEHASH(clp, 2908 stp->ls_stateid), stp, ls_hash); 2909 if (stp->ls_flags & NFSLCK_DELEGWRITE) 2910 *rflagsp |= NFSV4OPEN_WRITEDELEGATE; 2911 else 2912 *rflagsp |= NFSV4OPEN_READDELEGATE; 2913 clp->lc_delegtime = NFSD_MONOSEC + 2914 nfsrv_lease + NFSRV_LEASEDELTA; 2915 2916 /* 2917 * Now, do the associated open. 2918 */ 2919 new_open->ls_stateid.seqid = 1; 2920 new_open->ls_stateid.other[0] = clp->lc_clientid.lval[0]; 2921 new_open->ls_stateid.other[1] = clp->lc_clientid.lval[1]; 2922 new_open->ls_stateid.other[2] = nfsrv_nextstateindex(clp); 2923 new_open->ls_flags = (new_stp->ls_flags&NFSLCK_DENYBITS)| 2924 NFSLCK_OPEN; 2925 if (stp->ls_flags & NFSLCK_DELEGWRITE) 2926 new_open->ls_flags |= (NFSLCK_READACCESS | 2927 NFSLCK_WRITEACCESS); 2928 else 2929 new_open->ls_flags |= NFSLCK_READACCESS; 2930 new_open->ls_uid = new_stp->ls_uid; 2931 new_open->ls_lfp = lfp; 2932 new_open->ls_clp = clp; 2933 LIST_INIT(&new_open->ls_open); 2934 LIST_INSERT_HEAD(&lfp->lf_open, new_open, ls_file); 2935 LIST_INSERT_HEAD(NFSSTATEHASH(clp, new_open->ls_stateid), 2936 new_open, ls_hash); 2937 /* 2938 * and handle the open owner 2939 */ 2940 if (ownerstp) { 2941 new_open->ls_openowner = ownerstp; 2942 LIST_INSERT_HEAD(&ownerstp->ls_open,new_open,ls_list); 2943 } else { 2944 new_open->ls_openowner = new_stp; 2945 new_stp->ls_flags = 0; 2946 nfsrvd_refcache(new_stp->ls_op); 2947 new_stp->ls_noopens = 0; 2948 LIST_INIT(&new_stp->ls_open); 2949 LIST_INSERT_HEAD(&new_stp->ls_open, new_open, ls_list); 2950 LIST_INSERT_HEAD(&clp->lc_open, new_stp, ls_list); 2951 *new_stpp = NULL; 2952 nfsstatsv1.srvopenowners++; 2953 nfsrv_openpluslock++; 2954 } 2955 openstp = new_open; 2956 new_open = NULL; 2957 nfsstatsv1.srvopens++; 2958 nfsrv_openpluslock++; 2959 break; 2960 } 2961 } 2962 if (stp == LIST_END(&clp->lc_olddeleg)) 2963 error = NFSERR_EXPIRED; 2964 } else if (new_stp->ls_flags & (NFSLCK_DELEGREAD | NFSLCK_DELEGWRITE)) { 2965 /* 2966 * Scan to see that no delegation for this client and file 2967 * doesn't already exist. 2968 * There also shouldn't yet be an Open for this file and 2969 * openowner. 2970 */ 2971 LIST_FOREACH(stp, &lfp->lf_deleg, ls_file) { 2972 if (stp->ls_clp == clp) 2973 break; 2974 } 2975 if (stp == LIST_END(&lfp->lf_deleg) && openstp == NULL) { 2976 /* 2977 * This is the Claim_Previous case with a delegation 2978 * type != Delegate_None. 2979 */ 2980 /* 2981 * First, add the delegation. (Although we must issue the 2982 * delegation, we can also ask for an immediate return.) 2983 */ 2984 new_deleg->ls_stateid.seqid = delegstateidp->seqid = 1; 2985 new_deleg->ls_stateid.other[0] = delegstateidp->other[0] = 2986 clp->lc_clientid.lval[0]; 2987 new_deleg->ls_stateid.other[1] = delegstateidp->other[1] = 2988 clp->lc_clientid.lval[1]; 2989 new_deleg->ls_stateid.other[2] = delegstateidp->other[2] = 2990 nfsrv_nextstateindex(clp); 2991 if (new_stp->ls_flags & NFSLCK_DELEGWRITE) { 2992 new_deleg->ls_flags = (NFSLCK_DELEGWRITE | 2993 NFSLCK_READACCESS | NFSLCK_WRITEACCESS); 2994 *rflagsp |= NFSV4OPEN_WRITEDELEGATE; 2995 nfsrv_writedelegcnt++; 2996 } else { 2997 new_deleg->ls_flags = (NFSLCK_DELEGREAD | 2998 NFSLCK_READACCESS); 2999 *rflagsp |= NFSV4OPEN_READDELEGATE; 3000 } 3001 new_deleg->ls_uid = new_stp->ls_uid; 3002 new_deleg->ls_lfp = lfp; 3003 new_deleg->ls_clp = clp; 3004 new_deleg->ls_filerev = filerev; 3005 new_deleg->ls_compref = nd->nd_compref; 3006 LIST_INSERT_HEAD(&lfp->lf_deleg, new_deleg, ls_file); 3007 LIST_INSERT_HEAD(NFSSTATEHASH(clp, 3008 new_deleg->ls_stateid), new_deleg, ls_hash); 3009 LIST_INSERT_HEAD(&clp->lc_deleg, new_deleg, ls_list); 3010 new_deleg = NULL; 3011 if (delegate == 2 || nfsrv_issuedelegs == 0 || 3012 (clp->lc_flags & (LCL_CALLBACKSON | LCL_CBDOWN)) != 3013 LCL_CALLBACKSON || 3014 NFSRV_V4DELEGLIMIT(nfsrv_delegatecnt) || 3015 !NFSVNO_DELEGOK(vp)) 3016 *rflagsp |= NFSV4OPEN_RECALL; 3017 nfsstatsv1.srvdelegates++; 3018 nfsrv_openpluslock++; 3019 nfsrv_delegatecnt++; 3020 3021 /* 3022 * Now, do the associated open. 3023 */ 3024 new_open->ls_stateid.seqid = 1; 3025 new_open->ls_stateid.other[0] = clp->lc_clientid.lval[0]; 3026 new_open->ls_stateid.other[1] = clp->lc_clientid.lval[1]; 3027 new_open->ls_stateid.other[2] = nfsrv_nextstateindex(clp); 3028 new_open->ls_flags = (new_stp->ls_flags & NFSLCK_DENYBITS) | 3029 NFSLCK_OPEN; 3030 if (new_stp->ls_flags & NFSLCK_DELEGWRITE) 3031 new_open->ls_flags |= (NFSLCK_READACCESS | 3032 NFSLCK_WRITEACCESS); 3033 else 3034 new_open->ls_flags |= NFSLCK_READACCESS; 3035 new_open->ls_uid = new_stp->ls_uid; 3036 new_open->ls_lfp = lfp; 3037 new_open->ls_clp = clp; 3038 LIST_INIT(&new_open->ls_open); 3039 LIST_INSERT_HEAD(&lfp->lf_open, new_open, ls_file); 3040 LIST_INSERT_HEAD(NFSSTATEHASH(clp, new_open->ls_stateid), 3041 new_open, ls_hash); 3042 /* 3043 * and handle the open owner 3044 */ 3045 if (ownerstp) { 3046 new_open->ls_openowner = ownerstp; 3047 LIST_INSERT_HEAD(&ownerstp->ls_open, new_open, ls_list); 3048 } else { 3049 new_open->ls_openowner = new_stp; 3050 new_stp->ls_flags = 0; 3051 nfsrvd_refcache(new_stp->ls_op); 3052 new_stp->ls_noopens = 0; 3053 LIST_INIT(&new_stp->ls_open); 3054 LIST_INSERT_HEAD(&new_stp->ls_open, new_open, ls_list); 3055 LIST_INSERT_HEAD(&clp->lc_open, new_stp, ls_list); 3056 *new_stpp = NULL; 3057 nfsstatsv1.srvopenowners++; 3058 nfsrv_openpluslock++; 3059 } 3060 openstp = new_open; 3061 new_open = NULL; 3062 nfsstatsv1.srvopens++; 3063 nfsrv_openpluslock++; 3064 } else { 3065 error = NFSERR_RECLAIMCONFLICT; 3066 } 3067 } else if (ownerstp) { 3068 if (ownerstp->ls_flags & NFSLCK_NEEDSCONFIRM) { 3069 /* Replace the open */ 3070 if (ownerstp->ls_op) 3071 nfsrvd_derefcache(ownerstp->ls_op); 3072 ownerstp->ls_op = new_stp->ls_op; 3073 nfsrvd_refcache(ownerstp->ls_op); 3074 ownerstp->ls_seq = new_stp->ls_seq; 3075 *rflagsp |= NFSV4OPEN_RESULTCONFIRM; 3076 stp = LIST_FIRST(&ownerstp->ls_open); 3077 stp->ls_flags = (new_stp->ls_flags & NFSLCK_SHAREBITS) | 3078 NFSLCK_OPEN; 3079 stp->ls_stateid.seqid = 1; 3080 stp->ls_uid = new_stp->ls_uid; 3081 if (lfp != stp->ls_lfp) { 3082 LIST_REMOVE(stp, ls_file); 3083 LIST_INSERT_HEAD(&lfp->lf_open, stp, ls_file); 3084 stp->ls_lfp = lfp; 3085 } 3086 openstp = stp; 3087 } else if (openstp) { 3088 openstp->ls_flags |= (new_stp->ls_flags & NFSLCK_SHAREBITS); 3089 openstp->ls_stateid.seqid++; 3090 if ((nd->nd_flag & ND_NFSV41) != 0 && 3091 openstp->ls_stateid.seqid == 0) 3092 openstp->ls_stateid.seqid = 1; 3093 3094 /* 3095 * This is where we can choose to issue a delegation. 3096 */ 3097 if (delegate == 0 || writedeleg == 0 || 3098 NFSVNO_EXRDONLY(exp) || (readonly != 0 && 3099 nfsrv_writedelegifpos == 0) || 3100 !NFSVNO_DELEGOK(vp) || 3101 (new_stp->ls_flags & NFSLCK_WANTRDELEG) != 0 || 3102 (clp->lc_flags & (LCL_CALLBACKSON | LCL_CBDOWN)) != 3103 LCL_CALLBACKSON) 3104 *rflagsp |= NFSV4OPEN_WDCONTENTION; 3105 else if (nfsrv_issuedelegs == 0 || 3106 NFSRV_V4DELEGLIMIT(nfsrv_delegatecnt)) 3107 *rflagsp |= NFSV4OPEN_WDRESOURCE; 3108 else if ((new_stp->ls_flags & NFSLCK_WANTNODELEG) != 0) 3109 *rflagsp |= NFSV4OPEN_WDNOTWANTED; 3110 else { 3111 new_deleg->ls_stateid.seqid = delegstateidp->seqid = 1; 3112 new_deleg->ls_stateid.other[0] = delegstateidp->other[0] 3113 = clp->lc_clientid.lval[0]; 3114 new_deleg->ls_stateid.other[1] = delegstateidp->other[1] 3115 = clp->lc_clientid.lval[1]; 3116 new_deleg->ls_stateid.other[2] = delegstateidp->other[2] 3117 = nfsrv_nextstateindex(clp); 3118 new_deleg->ls_flags = (NFSLCK_DELEGWRITE | 3119 NFSLCK_READACCESS | NFSLCK_WRITEACCESS); 3120 *rflagsp |= NFSV4OPEN_WRITEDELEGATE; 3121 new_deleg->ls_uid = new_stp->ls_uid; 3122 new_deleg->ls_lfp = lfp; 3123 new_deleg->ls_clp = clp; 3124 new_deleg->ls_filerev = filerev; 3125 new_deleg->ls_compref = nd->nd_compref; 3126 nfsrv_writedelegcnt++; 3127 LIST_INSERT_HEAD(&lfp->lf_deleg, new_deleg, ls_file); 3128 LIST_INSERT_HEAD(NFSSTATEHASH(clp, 3129 new_deleg->ls_stateid), new_deleg, ls_hash); 3130 LIST_INSERT_HEAD(&clp->lc_deleg, new_deleg, ls_list); 3131 new_deleg = NULL; 3132 nfsstatsv1.srvdelegates++; 3133 nfsrv_openpluslock++; 3134 nfsrv_delegatecnt++; 3135 } 3136 } else { 3137 new_open->ls_stateid.seqid = 1; 3138 new_open->ls_stateid.other[0] = clp->lc_clientid.lval[0]; 3139 new_open->ls_stateid.other[1] = clp->lc_clientid.lval[1]; 3140 new_open->ls_stateid.other[2] = nfsrv_nextstateindex(clp); 3141 new_open->ls_flags = (new_stp->ls_flags & NFSLCK_SHAREBITS)| 3142 NFSLCK_OPEN; 3143 new_open->ls_uid = new_stp->ls_uid; 3144 new_open->ls_openowner = ownerstp; 3145 new_open->ls_lfp = lfp; 3146 new_open->ls_clp = clp; 3147 LIST_INIT(&new_open->ls_open); 3148 LIST_INSERT_HEAD(&lfp->lf_open, new_open, ls_file); 3149 LIST_INSERT_HEAD(&ownerstp->ls_open, new_open, ls_list); 3150 LIST_INSERT_HEAD(NFSSTATEHASH(clp, new_open->ls_stateid), 3151 new_open, ls_hash); 3152 openstp = new_open; 3153 new_open = NULL; 3154 nfsstatsv1.srvopens++; 3155 nfsrv_openpluslock++; 3156 3157 /* 3158 * This is where we can choose to issue a delegation. 3159 */ 3160 if (delegate == 0 || (writedeleg == 0 && readonly == 0) || 3161 !NFSVNO_DELEGOK(vp) || 3162 (clp->lc_flags & (LCL_CALLBACKSON | LCL_CBDOWN)) != 3163 LCL_CALLBACKSON) 3164 *rflagsp |= NFSV4OPEN_WDCONTENTION; 3165 else if (nfsrv_issuedelegs == 0 || 3166 NFSRV_V4DELEGLIMIT(nfsrv_delegatecnt)) 3167 *rflagsp |= NFSV4OPEN_WDRESOURCE; 3168 else if ((new_stp->ls_flags & NFSLCK_WANTNODELEG) != 0) 3169 *rflagsp |= NFSV4OPEN_WDNOTWANTED; 3170 else { 3171 new_deleg->ls_stateid.seqid = delegstateidp->seqid = 1; 3172 new_deleg->ls_stateid.other[0] = delegstateidp->other[0] 3173 = clp->lc_clientid.lval[0]; 3174 new_deleg->ls_stateid.other[1] = delegstateidp->other[1] 3175 = clp->lc_clientid.lval[1]; 3176 new_deleg->ls_stateid.other[2] = delegstateidp->other[2] 3177 = nfsrv_nextstateindex(clp); 3178 if (writedeleg && !NFSVNO_EXRDONLY(exp) && 3179 (nfsrv_writedelegifpos || !readonly) && 3180 (new_stp->ls_flags & NFSLCK_WANTRDELEG) == 0) { 3181 new_deleg->ls_flags = (NFSLCK_DELEGWRITE | 3182 NFSLCK_READACCESS | NFSLCK_WRITEACCESS); 3183 *rflagsp |= NFSV4OPEN_WRITEDELEGATE; 3184 nfsrv_writedelegcnt++; 3185 } else { 3186 new_deleg->ls_flags = (NFSLCK_DELEGREAD | 3187 NFSLCK_READACCESS); 3188 *rflagsp |= NFSV4OPEN_READDELEGATE; 3189 } 3190 new_deleg->ls_uid = new_stp->ls_uid; 3191 new_deleg->ls_lfp = lfp; 3192 new_deleg->ls_clp = clp; 3193 new_deleg->ls_filerev = filerev; 3194 new_deleg->ls_compref = nd->nd_compref; 3195 LIST_INSERT_HEAD(&lfp->lf_deleg, new_deleg, ls_file); 3196 LIST_INSERT_HEAD(NFSSTATEHASH(clp, 3197 new_deleg->ls_stateid), new_deleg, ls_hash); 3198 LIST_INSERT_HEAD(&clp->lc_deleg, new_deleg, ls_list); 3199 new_deleg = NULL; 3200 nfsstatsv1.srvdelegates++; 3201 nfsrv_openpluslock++; 3202 nfsrv_delegatecnt++; 3203 } 3204 } 3205 } else { 3206 /* 3207 * New owner case. Start the open_owner sequence with a 3208 * Needs confirmation (unless a reclaim) and hang the 3209 * new open off it. 3210 */ 3211 new_open->ls_stateid.seqid = 1; 3212 new_open->ls_stateid.other[0] = clp->lc_clientid.lval[0]; 3213 new_open->ls_stateid.other[1] = clp->lc_clientid.lval[1]; 3214 new_open->ls_stateid.other[2] = nfsrv_nextstateindex(clp); 3215 new_open->ls_flags = (new_stp->ls_flags & NFSLCK_SHAREBITS) | 3216 NFSLCK_OPEN; 3217 new_open->ls_uid = new_stp->ls_uid; 3218 LIST_INIT(&new_open->ls_open); 3219 new_open->ls_openowner = new_stp; 3220 new_open->ls_lfp = lfp; 3221 new_open->ls_clp = clp; 3222 LIST_INSERT_HEAD(&lfp->lf_open, new_open, ls_file); 3223 if (new_stp->ls_flags & NFSLCK_RECLAIM) { 3224 new_stp->ls_flags = 0; 3225 } else if ((nd->nd_flag & ND_NFSV41) != 0) { 3226 /* NFSv4.1 never needs confirmation. */ 3227 new_stp->ls_flags = 0; 3228 3229 /* 3230 * This is where we can choose to issue a delegation. 3231 */ 3232 if (delegate && nfsrv_issuedelegs && 3233 (writedeleg || readonly) && 3234 (clp->lc_flags & (LCL_CALLBACKSON | LCL_CBDOWN)) == 3235 LCL_CALLBACKSON && 3236 !NFSRV_V4DELEGLIMIT(nfsrv_delegatecnt) && 3237 NFSVNO_DELEGOK(vp) && 3238 ((nd->nd_flag & ND_NFSV41) == 0 || 3239 (new_stp->ls_flags & NFSLCK_WANTNODELEG) == 0)) { 3240 new_deleg->ls_stateid.seqid = 3241 delegstateidp->seqid = 1; 3242 new_deleg->ls_stateid.other[0] = 3243 delegstateidp->other[0] 3244 = clp->lc_clientid.lval[0]; 3245 new_deleg->ls_stateid.other[1] = 3246 delegstateidp->other[1] 3247 = clp->lc_clientid.lval[1]; 3248 new_deleg->ls_stateid.other[2] = 3249 delegstateidp->other[2] 3250 = nfsrv_nextstateindex(clp); 3251 if (writedeleg && !NFSVNO_EXRDONLY(exp) && 3252 (nfsrv_writedelegifpos || !readonly) && 3253 ((nd->nd_flag & ND_NFSV41) == 0 || 3254 (new_stp->ls_flags & NFSLCK_WANTRDELEG) == 3255 0)) { 3256 new_deleg->ls_flags = 3257 (NFSLCK_DELEGWRITE | 3258 NFSLCK_READACCESS | 3259 NFSLCK_WRITEACCESS); 3260 *rflagsp |= NFSV4OPEN_WRITEDELEGATE; 3261 nfsrv_writedelegcnt++; 3262 } else { 3263 new_deleg->ls_flags = 3264 (NFSLCK_DELEGREAD | 3265 NFSLCK_READACCESS); 3266 *rflagsp |= NFSV4OPEN_READDELEGATE; 3267 } 3268 new_deleg->ls_uid = new_stp->ls_uid; 3269 new_deleg->ls_lfp = lfp; 3270 new_deleg->ls_clp = clp; 3271 new_deleg->ls_filerev = filerev; 3272 new_deleg->ls_compref = nd->nd_compref; 3273 LIST_INSERT_HEAD(&lfp->lf_deleg, new_deleg, 3274 ls_file); 3275 LIST_INSERT_HEAD(NFSSTATEHASH(clp, 3276 new_deleg->ls_stateid), new_deleg, ls_hash); 3277 LIST_INSERT_HEAD(&clp->lc_deleg, new_deleg, 3278 ls_list); 3279 new_deleg = NULL; 3280 nfsstatsv1.srvdelegates++; 3281 nfsrv_openpluslock++; 3282 nfsrv_delegatecnt++; 3283 } 3284 /* 3285 * Since NFSv4.1 never does an OpenConfirm, the first 3286 * open state will be acquired here. 3287 */ 3288 if (!(clp->lc_flags & LCL_STAMPEDSTABLE)) { 3289 clp->lc_flags |= LCL_STAMPEDSTABLE; 3290 len = clp->lc_idlen; 3291 NFSBCOPY(clp->lc_id, clidp, len); 3292 gotstate = 1; 3293 } 3294 } else { 3295 *rflagsp |= NFSV4OPEN_RESULTCONFIRM; 3296 new_stp->ls_flags = NFSLCK_NEEDSCONFIRM; 3297 } 3298 nfsrvd_refcache(new_stp->ls_op); 3299 new_stp->ls_noopens = 0; 3300 LIST_INIT(&new_stp->ls_open); 3301 LIST_INSERT_HEAD(&new_stp->ls_open, new_open, ls_list); 3302 LIST_INSERT_HEAD(&clp->lc_open, new_stp, ls_list); 3303 LIST_INSERT_HEAD(NFSSTATEHASH(clp, new_open->ls_stateid), 3304 new_open, ls_hash); 3305 openstp = new_open; 3306 new_open = NULL; 3307 *new_stpp = NULL; 3308 nfsstatsv1.srvopens++; 3309 nfsrv_openpluslock++; 3310 nfsstatsv1.srvopenowners++; 3311 nfsrv_openpluslock++; 3312 } 3313 if (!error) { 3314 stateidp->seqid = openstp->ls_stateid.seqid; 3315 stateidp->other[0] = openstp->ls_stateid.other[0]; 3316 stateidp->other[1] = openstp->ls_stateid.other[1]; 3317 stateidp->other[2] = openstp->ls_stateid.other[2]; 3318 } 3319 NFSUNLOCKSTATE(); 3320 if (haslock) { 3321 NFSLOCKV4ROOTMUTEX(); 3322 nfsv4_unlock(&nfsv4rootfs_lock, 1); 3323 NFSUNLOCKV4ROOTMUTEX(); 3324 } 3325 if (new_open) 3326 free(new_open, M_NFSDSTATE); 3327 if (new_deleg) 3328 free(new_deleg, M_NFSDSTATE); 3329 3330 /* 3331 * If the NFSv4.1 client just acquired its first open, write a timestamp 3332 * to the stable storage file. 3333 */ 3334 if (gotstate != 0) { 3335 nfsrv_writestable(clidp, len, NFSNST_NEWSTATE, p); 3336 nfsrv_backupstable(); 3337 } 3338 3339 out: 3340 free(clidp, M_TEMP); 3341 NFSEXITCODE2(error, nd); 3342 return (error); 3343 } 3344 3345 /* 3346 * Open update. Does the confirm, downgrade and close. 3347 */ 3348 APPLESTATIC int 3349 nfsrv_openupdate(vnode_t vp, struct nfsstate *new_stp, nfsquad_t clientid, 3350 nfsv4stateid_t *stateidp, struct nfsrv_descript *nd, NFSPROC_T *p, 3351 int *retwriteaccessp) 3352 { 3353 struct nfsstate *stp; 3354 struct nfsclient *clp; 3355 struct nfslockfile *lfp; 3356 u_int32_t bits; 3357 int error = 0, gotstate = 0, len = 0; 3358 u_char *clidp = NULL; 3359 3360 /* 3361 * Check for restart conditions (client and server). 3362 */ 3363 error = nfsrv_checkrestart(clientid, new_stp->ls_flags, 3364 &new_stp->ls_stateid, 0); 3365 if (error) 3366 goto out; 3367 3368 clidp = malloc(NFSV4_OPAQUELIMIT, M_TEMP, M_WAITOK); 3369 NFSLOCKSTATE(); 3370 /* 3371 * Get the open structure via clientid and stateid. 3372 */ 3373 error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp, NULL, 3374 (nfsquad_t)((u_quad_t)0), 0, nd, p); 3375 if (!error) 3376 error = nfsrv_getstate(clp, &new_stp->ls_stateid, 3377 new_stp->ls_flags, &stp); 3378 3379 /* 3380 * Sanity check the open. 3381 */ 3382 if (!error && (!(stp->ls_flags & NFSLCK_OPEN) || 3383 (!(new_stp->ls_flags & NFSLCK_CONFIRM) && 3384 (stp->ls_openowner->ls_flags & NFSLCK_NEEDSCONFIRM)) || 3385 ((new_stp->ls_flags & NFSLCK_CONFIRM) && 3386 (!(stp->ls_openowner->ls_flags & NFSLCK_NEEDSCONFIRM))))) 3387 error = NFSERR_BADSTATEID; 3388 3389 if (!error) 3390 error = nfsrv_checkseqid(nd, new_stp->ls_seq, 3391 stp->ls_openowner, new_stp->ls_op); 3392 if (!error && stp->ls_stateid.seqid != new_stp->ls_stateid.seqid && 3393 (((nd->nd_flag & ND_NFSV41) == 0 && 3394 !(new_stp->ls_flags & NFSLCK_CONFIRM)) || 3395 ((nd->nd_flag & ND_NFSV41) != 0 && 3396 new_stp->ls_stateid.seqid != 0))) 3397 error = NFSERR_OLDSTATEID; 3398 if (!error && vnode_vtype(vp) != VREG) { 3399 if (vnode_vtype(vp) == VDIR) 3400 error = NFSERR_ISDIR; 3401 else 3402 error = NFSERR_INVAL; 3403 } 3404 3405 if (error) { 3406 /* 3407 * If a client tries to confirm an Open with a bad 3408 * seqid# and there are no byte range locks or other Opens 3409 * on the openowner, just throw it away, so the next use of the 3410 * openowner will start a fresh seq#. 3411 */ 3412 if (error == NFSERR_BADSEQID && 3413 (new_stp->ls_flags & NFSLCK_CONFIRM) && 3414 nfsrv_nootherstate(stp)) 3415 nfsrv_freeopenowner(stp->ls_openowner, 0, p); 3416 NFSUNLOCKSTATE(); 3417 goto out; 3418 } 3419 3420 /* 3421 * Set the return stateid. 3422 */ 3423 stateidp->seqid = stp->ls_stateid.seqid + 1; 3424 if ((nd->nd_flag & ND_NFSV41) != 0 && stateidp->seqid == 0) 3425 stateidp->seqid = 1; 3426 stateidp->other[0] = stp->ls_stateid.other[0]; 3427 stateidp->other[1] = stp->ls_stateid.other[1]; 3428 stateidp->other[2] = stp->ls_stateid.other[2]; 3429 /* 3430 * Now, handle the three cases. 3431 */ 3432 if (new_stp->ls_flags & NFSLCK_CONFIRM) { 3433 /* 3434 * If the open doesn't need confirmation, it seems to me that 3435 * there is a client error, but I'll just log it and keep going? 3436 */ 3437 if (!(stp->ls_openowner->ls_flags & NFSLCK_NEEDSCONFIRM)) 3438 printf("Nfsv4d: stray open confirm\n"); 3439 stp->ls_openowner->ls_flags = 0; 3440 stp->ls_stateid.seqid++; 3441 if ((nd->nd_flag & ND_NFSV41) != 0 && 3442 stp->ls_stateid.seqid == 0) 3443 stp->ls_stateid.seqid = 1; 3444 if (!(clp->lc_flags & LCL_STAMPEDSTABLE)) { 3445 clp->lc_flags |= LCL_STAMPEDSTABLE; 3446 len = clp->lc_idlen; 3447 NFSBCOPY(clp->lc_id, clidp, len); 3448 gotstate = 1; 3449 } 3450 NFSUNLOCKSTATE(); 3451 } else if (new_stp->ls_flags & NFSLCK_CLOSE) { 3452 lfp = stp->ls_lfp; 3453 if (retwriteaccessp != NULL) { 3454 if ((stp->ls_flags & NFSLCK_WRITEACCESS) != 0) 3455 *retwriteaccessp = 1; 3456 else 3457 *retwriteaccessp = 0; 3458 } 3459 if (nfsrv_dolocallocks != 0 && !LIST_EMPTY(&stp->ls_open)) { 3460 /* Get the lf lock */ 3461 nfsrv_locklf(lfp); 3462 NFSUNLOCKSTATE(); 3463 ASSERT_VOP_ELOCKED(vp, "nfsrv_openupdate"); 3464 NFSVOPUNLOCK(vp, 0); 3465 if (nfsrv_freeopen(stp, vp, 1, p) == 0) { 3466 NFSLOCKSTATE(); 3467 nfsrv_unlocklf(lfp); 3468 NFSUNLOCKSTATE(); 3469 } 3470 NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); 3471 } else { 3472 (void) nfsrv_freeopen(stp, NULL, 0, p); 3473 NFSUNLOCKSTATE(); 3474 } 3475 } else { 3476 /* 3477 * Update the share bits, making sure that the new set are a 3478 * subset of the old ones. 3479 */ 3480 bits = (new_stp->ls_flags & NFSLCK_SHAREBITS); 3481 if (~(stp->ls_flags) & bits) { 3482 NFSUNLOCKSTATE(); 3483 error = NFSERR_INVAL; 3484 goto out; 3485 } 3486 stp->ls_flags = (bits | NFSLCK_OPEN); 3487 stp->ls_stateid.seqid++; 3488 if ((nd->nd_flag & ND_NFSV41) != 0 && 3489 stp->ls_stateid.seqid == 0) 3490 stp->ls_stateid.seqid = 1; 3491 NFSUNLOCKSTATE(); 3492 } 3493 3494 /* 3495 * If the client just confirmed its first open, write a timestamp 3496 * to the stable storage file. 3497 */ 3498 if (gotstate != 0) { 3499 nfsrv_writestable(clidp, len, NFSNST_NEWSTATE, p); 3500 nfsrv_backupstable(); 3501 } 3502 3503 out: 3504 free(clidp, M_TEMP); 3505 NFSEXITCODE2(error, nd); 3506 return (error); 3507 } 3508 3509 /* 3510 * Delegation update. Does the purge and return. 3511 */ 3512 APPLESTATIC int 3513 nfsrv_delegupdate(struct nfsrv_descript *nd, nfsquad_t clientid, 3514 nfsv4stateid_t *stateidp, vnode_t vp, int op, struct ucred *cred, 3515 NFSPROC_T *p, int *retwriteaccessp) 3516 { 3517 struct nfsstate *stp; 3518 struct nfsclient *clp; 3519 int error = 0; 3520 fhandle_t fh; 3521 3522 /* 3523 * Do a sanity check against the file handle for DelegReturn. 3524 */ 3525 if (vp) { 3526 error = nfsvno_getfh(vp, &fh, p); 3527 if (error) 3528 goto out; 3529 } 3530 /* 3531 * Check for restart conditions (client and server). 3532 */ 3533 if (op == NFSV4OP_DELEGRETURN) 3534 error = nfsrv_checkrestart(clientid, NFSLCK_DELEGRETURN, 3535 stateidp, 0); 3536 else 3537 error = nfsrv_checkrestart(clientid, NFSLCK_DELEGPURGE, 3538 stateidp, 0); 3539 3540 NFSLOCKSTATE(); 3541 /* 3542 * Get the open structure via clientid and stateid. 3543 */ 3544 if (!error) 3545 error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp, NULL, 3546 (nfsquad_t)((u_quad_t)0), 0, nd, p); 3547 if (error) { 3548 if (error == NFSERR_CBPATHDOWN) 3549 error = 0; 3550 if (error == NFSERR_STALECLIENTID && op == NFSV4OP_DELEGRETURN) 3551 error = NFSERR_STALESTATEID; 3552 } 3553 if (!error && op == NFSV4OP_DELEGRETURN) { 3554 error = nfsrv_getstate(clp, stateidp, NFSLCK_DELEGRETURN, &stp); 3555 if (!error && stp->ls_stateid.seqid != stateidp->seqid && 3556 ((nd->nd_flag & ND_NFSV41) == 0 || stateidp->seqid != 0)) 3557 error = NFSERR_OLDSTATEID; 3558 } 3559 /* 3560 * NFSERR_EXPIRED means that the state has gone away, 3561 * so Delegations have been purged. Just return ok. 3562 */ 3563 if (error == NFSERR_EXPIRED && op == NFSV4OP_DELEGPURGE) { 3564 NFSUNLOCKSTATE(); 3565 error = 0; 3566 goto out; 3567 } 3568 if (error) { 3569 NFSUNLOCKSTATE(); 3570 goto out; 3571 } 3572 3573 if (op == NFSV4OP_DELEGRETURN) { 3574 if (NFSBCMP((caddr_t)&fh, (caddr_t)&stp->ls_lfp->lf_fh, 3575 sizeof (fhandle_t))) { 3576 NFSUNLOCKSTATE(); 3577 error = NFSERR_BADSTATEID; 3578 goto out; 3579 } 3580 if (retwriteaccessp != NULL) { 3581 if ((stp->ls_flags & NFSLCK_DELEGWRITE) != 0) 3582 *retwriteaccessp = 1; 3583 else 3584 *retwriteaccessp = 0; 3585 } 3586 nfsrv_freedeleg(stp); 3587 } else { 3588 nfsrv_freedeleglist(&clp->lc_olddeleg); 3589 } 3590 NFSUNLOCKSTATE(); 3591 error = 0; 3592 3593 out: 3594 NFSEXITCODE(error); 3595 return (error); 3596 } 3597 3598 /* 3599 * Release lock owner. 3600 */ 3601 APPLESTATIC int 3602 nfsrv_releaselckown(struct nfsstate *new_stp, nfsquad_t clientid, 3603 NFSPROC_T *p) 3604 { 3605 struct nfsstate *stp, *nstp, *openstp, *ownstp; 3606 struct nfsclient *clp; 3607 int error = 0; 3608 3609 /* 3610 * Check for restart conditions (client and server). 3611 */ 3612 error = nfsrv_checkrestart(clientid, new_stp->ls_flags, 3613 &new_stp->ls_stateid, 0); 3614 if (error) 3615 goto out; 3616 3617 NFSLOCKSTATE(); 3618 /* 3619 * Get the lock owner by name. 3620 */ 3621 error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp, NULL, 3622 (nfsquad_t)((u_quad_t)0), 0, NULL, p); 3623 if (error) { 3624 NFSUNLOCKSTATE(); 3625 goto out; 3626 } 3627 LIST_FOREACH(ownstp, &clp->lc_open, ls_list) { 3628 LIST_FOREACH(openstp, &ownstp->ls_open, ls_list) { 3629 stp = LIST_FIRST(&openstp->ls_open); 3630 while (stp != LIST_END(&openstp->ls_open)) { 3631 nstp = LIST_NEXT(stp, ls_list); 3632 /* 3633 * If the owner matches, check for locks and 3634 * then free or return an error. 3635 */ 3636 if (stp->ls_ownerlen == new_stp->ls_ownerlen && 3637 !NFSBCMP(stp->ls_owner, new_stp->ls_owner, 3638 stp->ls_ownerlen)){ 3639 if (LIST_EMPTY(&stp->ls_lock)) { 3640 nfsrv_freelockowner(stp, NULL, 0, p); 3641 } else { 3642 NFSUNLOCKSTATE(); 3643 error = NFSERR_LOCKSHELD; 3644 goto out; 3645 } 3646 } 3647 stp = nstp; 3648 } 3649 } 3650 } 3651 NFSUNLOCKSTATE(); 3652 3653 out: 3654 NFSEXITCODE(error); 3655 return (error); 3656 } 3657 3658 /* 3659 * Get the file handle for a lock structure. 3660 */ 3661 static int 3662 nfsrv_getlockfh(vnode_t vp, u_short flags, struct nfslockfile *new_lfp, 3663 fhandle_t *nfhp, NFSPROC_T *p) 3664 { 3665 fhandle_t *fhp = NULL; 3666 int error; 3667 3668 /* 3669 * For lock, use the new nfslock structure, otherwise just 3670 * a fhandle_t on the stack. 3671 */ 3672 if (flags & NFSLCK_OPEN) { 3673 KASSERT(new_lfp != NULL, ("nfsrv_getlockfh: new_lfp NULL")); 3674 fhp = &new_lfp->lf_fh; 3675 } else if (nfhp) { 3676 fhp = nfhp; 3677 } else { 3678 panic("nfsrv_getlockfh"); 3679 } 3680 error = nfsvno_getfh(vp, fhp, p); 3681 NFSEXITCODE(error); 3682 return (error); 3683 } 3684 3685 /* 3686 * Get an nfs lock structure. Allocate one, as required, and return a 3687 * pointer to it. 3688 * Returns an NFSERR_xxx upon failure or -1 to indicate no current lock. 3689 */ 3690 static int 3691 nfsrv_getlockfile(u_short flags, struct nfslockfile **new_lfpp, 3692 struct nfslockfile **lfpp, fhandle_t *nfhp, int lockit) 3693 { 3694 struct nfslockfile *lfp; 3695 fhandle_t *fhp = NULL, *tfhp; 3696 struct nfslockhashhead *hp; 3697 struct nfslockfile *new_lfp = NULL; 3698 3699 /* 3700 * For lock, use the new nfslock structure, otherwise just 3701 * a fhandle_t on the stack. 3702 */ 3703 if (flags & NFSLCK_OPEN) { 3704 new_lfp = *new_lfpp; 3705 fhp = &new_lfp->lf_fh; 3706 } else if (nfhp) { 3707 fhp = nfhp; 3708 } else { 3709 panic("nfsrv_getlockfile"); 3710 } 3711 3712 hp = NFSLOCKHASH(fhp); 3713 LIST_FOREACH(lfp, hp, lf_hash) { 3714 tfhp = &lfp->lf_fh; 3715 if (NFSVNO_CMPFH(fhp, tfhp)) { 3716 if (lockit) 3717 nfsrv_locklf(lfp); 3718 *lfpp = lfp; 3719 return (0); 3720 } 3721 } 3722 if (!(flags & NFSLCK_OPEN)) 3723 return (-1); 3724 3725 /* 3726 * No match, so chain the new one into the list. 3727 */ 3728 LIST_INIT(&new_lfp->lf_open); 3729 LIST_INIT(&new_lfp->lf_lock); 3730 LIST_INIT(&new_lfp->lf_deleg); 3731 LIST_INIT(&new_lfp->lf_locallock); 3732 LIST_INIT(&new_lfp->lf_rollback); 3733 new_lfp->lf_locallock_lck.nfslock_usecnt = 0; 3734 new_lfp->lf_locallock_lck.nfslock_lock = 0; 3735 new_lfp->lf_usecount = 0; 3736 LIST_INSERT_HEAD(hp, new_lfp, lf_hash); 3737 *lfpp = new_lfp; 3738 *new_lfpp = NULL; 3739 return (0); 3740 } 3741 3742 /* 3743 * This function adds a nfslock lock structure to the list for the associated 3744 * nfsstate and nfslockfile structures. It will be inserted after the 3745 * entry pointed at by insert_lop. 3746 */ 3747 static void 3748 nfsrv_insertlock(struct nfslock *new_lop, struct nfslock *insert_lop, 3749 struct nfsstate *stp, struct nfslockfile *lfp) 3750 { 3751 struct nfslock *lop, *nlop; 3752 3753 new_lop->lo_stp = stp; 3754 new_lop->lo_lfp = lfp; 3755 3756 if (stp != NULL) { 3757 /* Insert in increasing lo_first order */ 3758 lop = LIST_FIRST(&lfp->lf_lock); 3759 if (lop == LIST_END(&lfp->lf_lock) || 3760 new_lop->lo_first <= lop->lo_first) { 3761 LIST_INSERT_HEAD(&lfp->lf_lock, new_lop, lo_lckfile); 3762 } else { 3763 nlop = LIST_NEXT(lop, lo_lckfile); 3764 while (nlop != LIST_END(&lfp->lf_lock) && 3765 nlop->lo_first < new_lop->lo_first) { 3766 lop = nlop; 3767 nlop = LIST_NEXT(lop, lo_lckfile); 3768 } 3769 LIST_INSERT_AFTER(lop, new_lop, lo_lckfile); 3770 } 3771 } else { 3772 new_lop->lo_lckfile.le_prev = NULL; /* list not used */ 3773 } 3774 3775 /* 3776 * Insert after insert_lop, which is overloaded as stp or lfp for 3777 * an empty list. 3778 */ 3779 if (stp == NULL && (struct nfslockfile *)insert_lop == lfp) 3780 LIST_INSERT_HEAD(&lfp->lf_locallock, new_lop, lo_lckowner); 3781 else if ((struct nfsstate *)insert_lop == stp) 3782 LIST_INSERT_HEAD(&stp->ls_lock, new_lop, lo_lckowner); 3783 else 3784 LIST_INSERT_AFTER(insert_lop, new_lop, lo_lckowner); 3785 if (stp != NULL) { 3786 nfsstatsv1.srvlocks++; 3787 nfsrv_openpluslock++; 3788 } 3789 } 3790 3791 /* 3792 * This function updates the locking for a lock owner and given file. It 3793 * maintains a list of lock ranges ordered on increasing file offset that 3794 * are NFSLCK_READ or NFSLCK_WRITE and non-overlapping (aka POSIX style). 3795 * It always adds new_lop to the list and sometimes uses the one pointed 3796 * at by other_lopp. 3797 */ 3798 static void 3799 nfsrv_updatelock(struct nfsstate *stp, struct nfslock **new_lopp, 3800 struct nfslock **other_lopp, struct nfslockfile *lfp) 3801 { 3802 struct nfslock *new_lop = *new_lopp; 3803 struct nfslock *lop, *tlop, *ilop; 3804 struct nfslock *other_lop = *other_lopp; 3805 int unlock = 0, myfile = 0; 3806 u_int64_t tmp; 3807 3808 /* 3809 * Work down the list until the lock is merged. 3810 */ 3811 if (new_lop->lo_flags & NFSLCK_UNLOCK) 3812 unlock = 1; 3813 if (stp != NULL) { 3814 ilop = (struct nfslock *)stp; 3815 lop = LIST_FIRST(&stp->ls_lock); 3816 } else { 3817 ilop = (struct nfslock *)lfp; 3818 lop = LIST_FIRST(&lfp->lf_locallock); 3819 } 3820 while (lop != NULL) { 3821 /* 3822 * Only check locks for this file that aren't before the start of 3823 * new lock's range. 3824 */ 3825 if (lop->lo_lfp == lfp) { 3826 myfile = 1; 3827 if (lop->lo_end >= new_lop->lo_first) { 3828 if (new_lop->lo_end < lop->lo_first) { 3829 /* 3830 * If the new lock ends before the start of the 3831 * current lock's range, no merge, just insert 3832 * the new lock. 3833 */ 3834 break; 3835 } 3836 if (new_lop->lo_flags == lop->lo_flags || 3837 (new_lop->lo_first <= lop->lo_first && 3838 new_lop->lo_end >= lop->lo_end)) { 3839 /* 3840 * This lock can be absorbed by the new lock/unlock. 3841 * This happens when it covers the entire range 3842 * of the old lock or is contiguous 3843 * with the old lock and is of the same type or an 3844 * unlock. 3845 */ 3846 if (lop->lo_first < new_lop->lo_first) 3847 new_lop->lo_first = lop->lo_first; 3848 if (lop->lo_end > new_lop->lo_end) 3849 new_lop->lo_end = lop->lo_end; 3850 tlop = lop; 3851 lop = LIST_NEXT(lop, lo_lckowner); 3852 nfsrv_freenfslock(tlop); 3853 continue; 3854 } 3855 3856 /* 3857 * All these cases are for contiguous locks that are not the 3858 * same type, so they can't be merged. 3859 */ 3860 if (new_lop->lo_first <= lop->lo_first) { 3861 /* 3862 * This case is where the new lock overlaps with the 3863 * first part of the old lock. Move the start of the 3864 * old lock to just past the end of the new lock. The 3865 * new lock will be inserted in front of the old, since 3866 * ilop hasn't been updated. (We are done now.) 3867 */ 3868 lop->lo_first = new_lop->lo_end; 3869 break; 3870 } 3871 if (new_lop->lo_end >= lop->lo_end) { 3872 /* 3873 * This case is where the new lock overlaps with the 3874 * end of the old lock's range. Move the old lock's 3875 * end to just before the new lock's first and insert 3876 * the new lock after the old lock. 3877 * Might not be done yet, since the new lock could 3878 * overlap further locks with higher ranges. 3879 */ 3880 lop->lo_end = new_lop->lo_first; 3881 ilop = lop; 3882 lop = LIST_NEXT(lop, lo_lckowner); 3883 continue; 3884 } 3885 /* 3886 * The final case is where the new lock's range is in the 3887 * middle of the current lock's and splits the current lock 3888 * up. Use *other_lopp to handle the second part of the 3889 * split old lock range. (We are done now.) 3890 * For unlock, we use new_lop as other_lop and tmp, since 3891 * other_lop and new_lop are the same for this case. 3892 * We noted the unlock case above, so we don't need 3893 * new_lop->lo_flags any longer. 3894 */ 3895 tmp = new_lop->lo_first; 3896 if (other_lop == NULL) { 3897 if (!unlock) 3898 panic("nfsd srv update unlock"); 3899 other_lop = new_lop; 3900 *new_lopp = NULL; 3901 } 3902 other_lop->lo_first = new_lop->lo_end; 3903 other_lop->lo_end = lop->lo_end; 3904 other_lop->lo_flags = lop->lo_flags; 3905 other_lop->lo_stp = stp; 3906 other_lop->lo_lfp = lfp; 3907 lop->lo_end = tmp; 3908 nfsrv_insertlock(other_lop, lop, stp, lfp); 3909 *other_lopp = NULL; 3910 ilop = lop; 3911 break; 3912 } 3913 } 3914 ilop = lop; 3915 lop = LIST_NEXT(lop, lo_lckowner); 3916 if (myfile && (lop == NULL || lop->lo_lfp != lfp)) 3917 break; 3918 } 3919 3920 /* 3921 * Insert the new lock in the list at the appropriate place. 3922 */ 3923 if (!unlock) { 3924 nfsrv_insertlock(new_lop, ilop, stp, lfp); 3925 *new_lopp = NULL; 3926 } 3927 } 3928 3929 /* 3930 * This function handles sequencing of locks, etc. 3931 * It returns an error that indicates what the caller should do. 3932 */ 3933 static int 3934 nfsrv_checkseqid(struct nfsrv_descript *nd, u_int32_t seqid, 3935 struct nfsstate *stp, struct nfsrvcache *op) 3936 { 3937 int error = 0; 3938 3939 if ((nd->nd_flag & ND_NFSV41) != 0) 3940 /* NFSv4.1 ignores the open_seqid and lock_seqid. */ 3941 goto out; 3942 if (op != nd->nd_rp) 3943 panic("nfsrvstate checkseqid"); 3944 if (!(op->rc_flag & RC_INPROG)) 3945 panic("nfsrvstate not inprog"); 3946 if (stp->ls_op && stp->ls_op->rc_refcnt <= 0) { 3947 printf("refcnt=%d\n", stp->ls_op->rc_refcnt); 3948 panic("nfsrvstate op refcnt"); 3949 } 3950 if ((stp->ls_seq + 1) == seqid) { 3951 if (stp->ls_op) 3952 nfsrvd_derefcache(stp->ls_op); 3953 stp->ls_op = op; 3954 nfsrvd_refcache(op); 3955 stp->ls_seq = seqid; 3956 goto out; 3957 } else if (stp->ls_seq == seqid && stp->ls_op && 3958 op->rc_xid == stp->ls_op->rc_xid && 3959 op->rc_refcnt == 0 && 3960 op->rc_reqlen == stp->ls_op->rc_reqlen && 3961 op->rc_cksum == stp->ls_op->rc_cksum) { 3962 if (stp->ls_op->rc_flag & RC_INPROG) { 3963 error = NFSERR_DONTREPLY; 3964 goto out; 3965 } 3966 nd->nd_rp = stp->ls_op; 3967 nd->nd_rp->rc_flag |= RC_INPROG; 3968 nfsrvd_delcache(op); 3969 error = NFSERR_REPLYFROMCACHE; 3970 goto out; 3971 } 3972 error = NFSERR_BADSEQID; 3973 3974 out: 3975 NFSEXITCODE2(error, nd); 3976 return (error); 3977 } 3978 3979 /* 3980 * Get the client ip address for callbacks. If the strings can't be parsed, 3981 * just set lc_program to 0 to indicate no callbacks are possible. 3982 * (For cases where the address can't be parsed or is 0.0.0.0.0.0, set 3983 * the address to the client's transport address. This won't be used 3984 * for callbacks, but can be printed out by nfsstats for info.) 3985 * Return error if the xdr can't be parsed, 0 otherwise. 3986 */ 3987 APPLESTATIC int 3988 nfsrv_getclientipaddr(struct nfsrv_descript *nd, struct nfsclient *clp) 3989 { 3990 u_int32_t *tl; 3991 u_char *cp, *cp2; 3992 int i, j; 3993 struct sockaddr_in *rad, *sad; 3994 u_char protocol[5], addr[24]; 3995 int error = 0, cantparse = 0; 3996 union { 3997 in_addr_t ival; 3998 u_char cval[4]; 3999 } ip; 4000 union { 4001 in_port_t sval; 4002 u_char cval[2]; 4003 } port; 4004 4005 rad = NFSSOCKADDR(clp->lc_req.nr_nam, struct sockaddr_in *); 4006 rad->sin_family = AF_INET; 4007 rad->sin_len = sizeof (struct sockaddr_in); 4008 rad->sin_addr.s_addr = 0; 4009 rad->sin_port = 0; 4010 clp->lc_req.nr_client = NULL; 4011 clp->lc_req.nr_lock = 0; 4012 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); 4013 i = fxdr_unsigned(int, *tl); 4014 if (i >= 3 && i <= 4) { 4015 error = nfsrv_mtostr(nd, protocol, i); 4016 if (error) 4017 goto nfsmout; 4018 if (!strcmp(protocol, "tcp")) { 4019 clp->lc_flags |= LCL_TCPCALLBACK; 4020 clp->lc_req.nr_sotype = SOCK_STREAM; 4021 clp->lc_req.nr_soproto = IPPROTO_TCP; 4022 } else if (!strcmp(protocol, "udp")) { 4023 clp->lc_req.nr_sotype = SOCK_DGRAM; 4024 clp->lc_req.nr_soproto = IPPROTO_UDP; 4025 } else { 4026 cantparse = 1; 4027 } 4028 } else { 4029 cantparse = 1; 4030 if (i > 0) { 4031 error = nfsm_advance(nd, NFSM_RNDUP(i), -1); 4032 if (error) 4033 goto nfsmout; 4034 } 4035 } 4036 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); 4037 i = fxdr_unsigned(int, *tl); 4038 if (i < 0) { 4039 error = NFSERR_BADXDR; 4040 goto nfsmout; 4041 } else if (i == 0) { 4042 cantparse = 1; 4043 } else if (!cantparse && i <= 23 && i >= 11) { 4044 error = nfsrv_mtostr(nd, addr, i); 4045 if (error) 4046 goto nfsmout; 4047 4048 /* 4049 * Parse out the address fields. We expect 6 decimal numbers 4050 * separated by '.'s. 4051 */ 4052 cp = addr; 4053 i = 0; 4054 while (*cp && i < 6) { 4055 cp2 = cp; 4056 while (*cp2 && *cp2 != '.') 4057 cp2++; 4058 if (*cp2) 4059 *cp2++ = '\0'; 4060 else if (i != 5) { 4061 cantparse = 1; 4062 break; 4063 } 4064 j = nfsrv_getipnumber(cp); 4065 if (j >= 0) { 4066 if (i < 4) 4067 ip.cval[3 - i] = j; 4068 else 4069 port.cval[5 - i] = j; 4070 } else { 4071 cantparse = 1; 4072 break; 4073 } 4074 cp = cp2; 4075 i++; 4076 } 4077 if (!cantparse) { 4078 if (ip.ival != 0x0) { 4079 rad->sin_addr.s_addr = htonl(ip.ival); 4080 rad->sin_port = htons(port.sval); 4081 } else { 4082 cantparse = 1; 4083 } 4084 } 4085 } else { 4086 cantparse = 1; 4087 if (i > 0) { 4088 error = nfsm_advance(nd, NFSM_RNDUP(i), -1); 4089 if (error) 4090 goto nfsmout; 4091 } 4092 } 4093 if (cantparse) { 4094 sad = NFSSOCKADDR(nd->nd_nam, struct sockaddr_in *); 4095 if (sad->sin_family == AF_INET) { 4096 rad->sin_addr.s_addr = sad->sin_addr.s_addr; 4097 rad->sin_port = 0x0; 4098 } 4099 clp->lc_program = 0; 4100 } 4101 nfsmout: 4102 NFSEXITCODE2(error, nd); 4103 return (error); 4104 } 4105 4106 /* 4107 * Turn a string of up to three decimal digits into a number. Return -1 upon 4108 * error. 4109 */ 4110 static int 4111 nfsrv_getipnumber(u_char *cp) 4112 { 4113 int i = 0, j = 0; 4114 4115 while (*cp) { 4116 if (j > 2 || *cp < '0' || *cp > '9') 4117 return (-1); 4118 i *= 10; 4119 i += (*cp - '0'); 4120 cp++; 4121 j++; 4122 } 4123 if (i < 256) 4124 return (i); 4125 return (-1); 4126 } 4127 4128 /* 4129 * This function checks for restart conditions. 4130 */ 4131 static int 4132 nfsrv_checkrestart(nfsquad_t clientid, u_int32_t flags, 4133 nfsv4stateid_t *stateidp, int specialid) 4134 { 4135 int ret = 0; 4136 4137 /* 4138 * First check for a server restart. Open, LockT, ReleaseLockOwner 4139 * and DelegPurge have a clientid, the rest a stateid. 4140 */ 4141 if (flags & 4142 (NFSLCK_OPEN | NFSLCK_TEST | NFSLCK_RELEASE | NFSLCK_DELEGPURGE)) { 4143 if (clientid.lval[0] != nfsrvboottime) { 4144 ret = NFSERR_STALECLIENTID; 4145 goto out; 4146 } 4147 } else if (stateidp->other[0] != nfsrvboottime && 4148 specialid == 0) { 4149 ret = NFSERR_STALESTATEID; 4150 goto out; 4151 } 4152 4153 /* 4154 * Read, Write, Setattr and LockT can return NFSERR_GRACE and do 4155 * not use a lock/open owner seqid#, so the check can be done now. 4156 * (The others will be checked, as required, later.) 4157 */ 4158 if (!(flags & (NFSLCK_CHECK | NFSLCK_TEST))) 4159 goto out; 4160 4161 NFSLOCKSTATE(); 4162 ret = nfsrv_checkgrace(NULL, NULL, flags); 4163 NFSUNLOCKSTATE(); 4164 4165 out: 4166 NFSEXITCODE(ret); 4167 return (ret); 4168 } 4169 4170 /* 4171 * Check for grace. 4172 */ 4173 static int 4174 nfsrv_checkgrace(struct nfsrv_descript *nd, struct nfsclient *clp, 4175 u_int32_t flags) 4176 { 4177 int error = 0, notreclaimed; 4178 struct nfsrv_stable *sp; 4179 4180 if ((nfsrv_stablefirst.nsf_flags & (NFSNSF_UPDATEDONE | 4181 NFSNSF_GRACEOVER)) == 0) { 4182 /* 4183 * First, check to see if all of the clients have done a 4184 * ReclaimComplete. If so, grace can end now. 4185 */ 4186 notreclaimed = 0; 4187 LIST_FOREACH(sp, &nfsrv_stablefirst.nsf_head, nst_list) { 4188 if ((sp->nst_flag & NFSNST_RECLAIMED) == 0) { 4189 notreclaimed = 1; 4190 break; 4191 } 4192 } 4193 if (notreclaimed == 0) 4194 nfsrv_stablefirst.nsf_flags |= (NFSNSF_GRACEOVER | 4195 NFSNSF_NEEDLOCK); 4196 } 4197 4198 if ((nfsrv_stablefirst.nsf_flags & NFSNSF_GRACEOVER) != 0) { 4199 if (flags & NFSLCK_RECLAIM) { 4200 error = NFSERR_NOGRACE; 4201 goto out; 4202 } 4203 } else { 4204 if (!(flags & NFSLCK_RECLAIM)) { 4205 error = NFSERR_GRACE; 4206 goto out; 4207 } 4208 if (nd != NULL && clp != NULL && 4209 (nd->nd_flag & ND_NFSV41) != 0 && 4210 (clp->lc_flags & LCL_RECLAIMCOMPLETE) != 0) { 4211 error = NFSERR_NOGRACE; 4212 goto out; 4213 } 4214 4215 /* 4216 * If grace is almost over and we are still getting Reclaims, 4217 * extend grace a bit. 4218 */ 4219 if ((NFSD_MONOSEC + NFSRV_LEASEDELTA) > 4220 nfsrv_stablefirst.nsf_eograce) 4221 nfsrv_stablefirst.nsf_eograce = NFSD_MONOSEC + 4222 NFSRV_LEASEDELTA; 4223 } 4224 4225 out: 4226 NFSEXITCODE(error); 4227 return (error); 4228 } 4229 4230 /* 4231 * Do a server callback. 4232 */ 4233 static int 4234 nfsrv_docallback(struct nfsclient *clp, int procnum, nfsv4stateid_t *stateidp, 4235 int trunc, fhandle_t *fhp, struct nfsvattr *nap, nfsattrbit_t *attrbitp, 4236 int laytype, NFSPROC_T *p) 4237 { 4238 mbuf_t m; 4239 u_int32_t *tl; 4240 struct nfsrv_descript *nd; 4241 struct ucred *cred; 4242 int error = 0; 4243 u_int32_t callback; 4244 struct nfsdsession *sep = NULL; 4245 uint64_t tval; 4246 4247 nd = malloc(sizeof(*nd), M_TEMP, M_WAITOK | M_ZERO); 4248 cred = newnfs_getcred(); 4249 NFSLOCKSTATE(); /* mostly for lc_cbref++ */ 4250 if (clp->lc_flags & LCL_NEEDSCONFIRM) { 4251 NFSUNLOCKSTATE(); 4252 panic("docallb"); 4253 } 4254 clp->lc_cbref++; 4255 4256 /* 4257 * Fill the callback program# and version into the request 4258 * structure for newnfs_connect() to use. 4259 */ 4260 clp->lc_req.nr_prog = clp->lc_program; 4261 #ifdef notnow 4262 if ((clp->lc_flags & LCL_NFSV41) != 0) 4263 clp->lc_req.nr_vers = NFSV41_CBVERS; 4264 else 4265 #endif 4266 clp->lc_req.nr_vers = NFSV4_CBVERS; 4267 4268 /* 4269 * First, fill in some of the fields of nd and cr. 4270 */ 4271 nd->nd_flag = ND_NFSV4; 4272 if (clp->lc_flags & LCL_GSS) 4273 nd->nd_flag |= ND_KERBV; 4274 if ((clp->lc_flags & LCL_NFSV41) != 0) 4275 nd->nd_flag |= ND_NFSV41; 4276 nd->nd_repstat = 0; 4277 cred->cr_uid = clp->lc_uid; 4278 cred->cr_gid = clp->lc_gid; 4279 callback = clp->lc_callback; 4280 NFSUNLOCKSTATE(); 4281 cred->cr_ngroups = 1; 4282 4283 /* 4284 * Get the first mbuf for the request. 4285 */ 4286 MGET(m, M_WAITOK, MT_DATA); 4287 mbuf_setlen(m, 0); 4288 nd->nd_mreq = nd->nd_mb = m; 4289 nd->nd_bpos = NFSMTOD(m, caddr_t); 4290 4291 /* 4292 * and build the callback request. 4293 */ 4294 if (procnum == NFSV4OP_CBGETATTR) { 4295 nd->nd_procnum = NFSV4PROC_CBCOMPOUND; 4296 error = nfsrv_cbcallargs(nd, clp, callback, NFSV4OP_CBGETATTR, 4297 "CB Getattr", &sep); 4298 if (error != 0) { 4299 mbuf_freem(nd->nd_mreq); 4300 goto errout; 4301 } 4302 (void)nfsm_fhtom(nd, (u_int8_t *)fhp, NFSX_MYFH, 0); 4303 (void)nfsrv_putattrbit(nd, attrbitp); 4304 } else if (procnum == NFSV4OP_CBRECALL) { 4305 nd->nd_procnum = NFSV4PROC_CBCOMPOUND; 4306 error = nfsrv_cbcallargs(nd, clp, callback, NFSV4OP_CBRECALL, 4307 "CB Recall", &sep); 4308 if (error != 0) { 4309 mbuf_freem(nd->nd_mreq); 4310 goto errout; 4311 } 4312 NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED + NFSX_STATEID); 4313 *tl++ = txdr_unsigned(stateidp->seqid); 4314 NFSBCOPY((caddr_t)stateidp->other, (caddr_t)tl, 4315 NFSX_STATEIDOTHER); 4316 tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED); 4317 if (trunc) 4318 *tl = newnfs_true; 4319 else 4320 *tl = newnfs_false; 4321 (void)nfsm_fhtom(nd, (u_int8_t *)fhp, NFSX_MYFH, 0); 4322 } else if (procnum == NFSV4OP_CBLAYOUTRECALL) { 4323 NFSD_DEBUG(4, "docallback layout recall\n"); 4324 nd->nd_procnum = NFSV4PROC_CBCOMPOUND; 4325 error = nfsrv_cbcallargs(nd, clp, callback, 4326 NFSV4OP_CBLAYOUTRECALL, "CB Reclayout", &sep); 4327 NFSD_DEBUG(4, "aft cbcallargs=%d\n", error); 4328 if (error != 0) { 4329 mbuf_freem(nd->nd_mreq); 4330 goto errout; 4331 } 4332 NFSM_BUILD(tl, u_int32_t *, 4 * NFSX_UNSIGNED); 4333 *tl++ = txdr_unsigned(laytype); 4334 *tl++ = txdr_unsigned(NFSLAYOUTIOMODE_ANY); 4335 *tl++ = newnfs_true; 4336 *tl = txdr_unsigned(NFSV4LAYOUTRET_FILE); 4337 nfsm_fhtom(nd, (uint8_t *)fhp, NFSX_MYFH, 0); 4338 NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_HYPER + NFSX_STATEID); 4339 tval = 0; 4340 txdr_hyper(tval, tl); tl += 2; 4341 tval = UINT64_MAX; 4342 txdr_hyper(tval, tl); tl += 2; 4343 *tl++ = txdr_unsigned(stateidp->seqid); 4344 NFSBCOPY(stateidp->other, tl, NFSX_STATEIDOTHER); 4345 tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED); 4346 NFSD_DEBUG(4, "aft args\n"); 4347 } else if (procnum == NFSV4PROC_CBNULL) { 4348 nd->nd_procnum = NFSV4PROC_CBNULL; 4349 if ((clp->lc_flags & LCL_NFSV41) != 0) { 4350 error = nfsv4_getcbsession(clp, &sep); 4351 if (error != 0) { 4352 mbuf_freem(nd->nd_mreq); 4353 goto errout; 4354 } 4355 } 4356 } else { 4357 error = NFSERR_SERVERFAULT; 4358 mbuf_freem(nd->nd_mreq); 4359 goto errout; 4360 } 4361 4362 /* 4363 * Call newnfs_connect(), as required, and then newnfs_request(). 4364 */ 4365 (void) newnfs_sndlock(&clp->lc_req.nr_lock); 4366 if (clp->lc_req.nr_client == NULL) { 4367 if ((clp->lc_flags & LCL_NFSV41) != 0) { 4368 error = ECONNREFUSED; 4369 nfsrv_freesession(sep, NULL); 4370 } else if (nd->nd_procnum == NFSV4PROC_CBNULL) 4371 error = newnfs_connect(NULL, &clp->lc_req, cred, 4372 NULL, 1); 4373 else 4374 error = newnfs_connect(NULL, &clp->lc_req, cred, 4375 NULL, 3); 4376 } 4377 newnfs_sndunlock(&clp->lc_req.nr_lock); 4378 NFSD_DEBUG(4, "aft sndunlock=%d\n", error); 4379 if (!error) { 4380 if ((nd->nd_flag & ND_NFSV41) != 0) { 4381 KASSERT(sep != NULL, ("sep NULL")); 4382 if (sep->sess_cbsess.nfsess_xprt != NULL) 4383 error = newnfs_request(nd, NULL, clp, 4384 &clp->lc_req, NULL, NULL, cred, 4385 clp->lc_program, clp->lc_req.nr_vers, NULL, 4386 1, NULL, &sep->sess_cbsess); 4387 else { 4388 /* 4389 * This should probably never occur, but if a 4390 * client somehow does an RPC without a 4391 * SequenceID Op that causes a callback just 4392 * after the nfsd threads have been terminated 4393 * and restared we could conceivably get here 4394 * without a backchannel xprt. 4395 */ 4396 printf("nfsrv_docallback: no xprt\n"); 4397 error = ECONNREFUSED; 4398 } 4399 NFSD_DEBUG(4, "aft newnfs_request=%d\n", error); 4400 nfsrv_freesession(sep, NULL); 4401 } else 4402 error = newnfs_request(nd, NULL, clp, &clp->lc_req, 4403 NULL, NULL, cred, clp->lc_program, 4404 clp->lc_req.nr_vers, NULL, 1, NULL, NULL); 4405 } 4406 errout: 4407 NFSFREECRED(cred); 4408 4409 /* 4410 * If error is set here, the Callback path isn't working 4411 * properly, so twiddle the appropriate LCL_ flags. 4412 * (nd_repstat != 0 indicates the Callback path is working, 4413 * but the callback failed on the client.) 4414 */ 4415 if (error) { 4416 /* 4417 * Mark the callback pathway down, which disabled issuing 4418 * of delegations and gets Renew to return NFSERR_CBPATHDOWN. 4419 */ 4420 NFSLOCKSTATE(); 4421 clp->lc_flags |= LCL_CBDOWN; 4422 NFSUNLOCKSTATE(); 4423 } else { 4424 /* 4425 * Callback worked. If the callback path was down, disable 4426 * callbacks, so no more delegations will be issued. (This 4427 * is done on the assumption that the callback pathway is 4428 * flakey.) 4429 */ 4430 NFSLOCKSTATE(); 4431 if (clp->lc_flags & LCL_CBDOWN) 4432 clp->lc_flags &= ~(LCL_CBDOWN | LCL_CALLBACKSON); 4433 NFSUNLOCKSTATE(); 4434 if (nd->nd_repstat) { 4435 error = nd->nd_repstat; 4436 NFSD_DEBUG(1, "nfsrv_docallback op=%d err=%d\n", 4437 procnum, error); 4438 } else if (error == 0 && procnum == NFSV4OP_CBGETATTR) 4439 error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0, 4440 NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL, NULL, 4441 p, NULL); 4442 mbuf_freem(nd->nd_mrep); 4443 } 4444 NFSLOCKSTATE(); 4445 clp->lc_cbref--; 4446 if ((clp->lc_flags & LCL_WAKEUPWANTED) && clp->lc_cbref == 0) { 4447 clp->lc_flags &= ~LCL_WAKEUPWANTED; 4448 wakeup(clp); 4449 } 4450 NFSUNLOCKSTATE(); 4451 4452 free(nd, M_TEMP); 4453 NFSEXITCODE(error); 4454 return (error); 4455 } 4456 4457 /* 4458 * Set up the compound RPC for the callback. 4459 */ 4460 static int 4461 nfsrv_cbcallargs(struct nfsrv_descript *nd, struct nfsclient *clp, 4462 uint32_t callback, int op, const char *optag, struct nfsdsession **sepp) 4463 { 4464 uint32_t *tl; 4465 int error, len; 4466 4467 len = strlen(optag); 4468 (void)nfsm_strtom(nd, optag, len); 4469 NFSM_BUILD(tl, uint32_t *, 4 * NFSX_UNSIGNED); 4470 if ((nd->nd_flag & ND_NFSV41) != 0) { 4471 *tl++ = txdr_unsigned(NFSV41_MINORVERSION); 4472 *tl++ = txdr_unsigned(callback); 4473 *tl++ = txdr_unsigned(2); 4474 *tl = txdr_unsigned(NFSV4OP_CBSEQUENCE); 4475 error = nfsv4_setcbsequence(nd, clp, 1, sepp); 4476 if (error != 0) 4477 return (error); 4478 NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); 4479 *tl = txdr_unsigned(op); 4480 } else { 4481 *tl++ = txdr_unsigned(NFSV4_MINORVERSION); 4482 *tl++ = txdr_unsigned(callback); 4483 *tl++ = txdr_unsigned(1); 4484 *tl = txdr_unsigned(op); 4485 } 4486 return (0); 4487 } 4488 4489 /* 4490 * Return the next index# for a clientid. Mostly just increment and return 4491 * the next one, but... if the 32bit unsigned does actually wrap around, 4492 * it should be rebooted. 4493 * At an average rate of one new client per second, it will wrap around in 4494 * approximately 136 years. (I think the server will have been shut 4495 * down or rebooted before then.) 4496 */ 4497 static u_int32_t 4498 nfsrv_nextclientindex(void) 4499 { 4500 static u_int32_t client_index = 0; 4501 4502 client_index++; 4503 if (client_index != 0) 4504 return (client_index); 4505 4506 printf("%s: out of clientids\n", __func__); 4507 return (client_index); 4508 } 4509 4510 /* 4511 * Return the next index# for a stateid. Mostly just increment and return 4512 * the next one, but... if the 32bit unsigned does actually wrap around 4513 * (will a BSD server stay up that long?), find 4514 * new start and end values. 4515 */ 4516 static u_int32_t 4517 nfsrv_nextstateindex(struct nfsclient *clp) 4518 { 4519 struct nfsstate *stp; 4520 int i; 4521 u_int32_t canuse, min_index, max_index; 4522 4523 if (!(clp->lc_flags & LCL_INDEXNOTOK)) { 4524 clp->lc_stateindex++; 4525 if (clp->lc_stateindex != clp->lc_statemaxindex) 4526 return (clp->lc_stateindex); 4527 } 4528 4529 /* 4530 * Yuck, we've hit the end. 4531 * Look for a new min and max. 4532 */ 4533 min_index = 0; 4534 max_index = 0xffffffff; 4535 for (i = 0; i < nfsrv_statehashsize; i++) { 4536 LIST_FOREACH(stp, &clp->lc_stateid[i], ls_hash) { 4537 if (stp->ls_stateid.other[2] > 0x80000000) { 4538 if (stp->ls_stateid.other[2] < max_index) 4539 max_index = stp->ls_stateid.other[2]; 4540 } else { 4541 if (stp->ls_stateid.other[2] > min_index) 4542 min_index = stp->ls_stateid.other[2]; 4543 } 4544 } 4545 } 4546 4547 /* 4548 * Yikes, highly unlikely, but I'll handle it anyhow. 4549 */ 4550 if (min_index == 0x80000000 && max_index == 0x80000001) { 4551 canuse = 0; 4552 /* 4553 * Loop around until we find an unused entry. Return that 4554 * and set LCL_INDEXNOTOK, so the search will continue next time. 4555 * (This is one of those rare cases where a goto is the 4556 * cleanest way to code the loop.) 4557 */ 4558 tryagain: 4559 for (i = 0; i < nfsrv_statehashsize; i++) { 4560 LIST_FOREACH(stp, &clp->lc_stateid[i], ls_hash) { 4561 if (stp->ls_stateid.other[2] == canuse) { 4562 canuse++; 4563 goto tryagain; 4564 } 4565 } 4566 } 4567 clp->lc_flags |= LCL_INDEXNOTOK; 4568 return (canuse); 4569 } 4570 4571 /* 4572 * Ok to start again from min + 1. 4573 */ 4574 clp->lc_stateindex = min_index + 1; 4575 clp->lc_statemaxindex = max_index; 4576 clp->lc_flags &= ~LCL_INDEXNOTOK; 4577 return (clp->lc_stateindex); 4578 } 4579 4580 /* 4581 * The following functions handle the stable storage file that deals with 4582 * the edge conditions described in RFC3530 Sec. 8.6.3. 4583 * The file is as follows: 4584 * - a single record at the beginning that has the lease time of the 4585 * previous server instance (before the last reboot) and the nfsrvboottime 4586 * values for the previous server boots. 4587 * These previous boot times are used to ensure that the current 4588 * nfsrvboottime does not, somehow, get set to a previous one. 4589 * (This is important so that Stale ClientIDs and StateIDs can 4590 * be recognized.) 4591 * The number of previous nfsvrboottime values precedes the list. 4592 * - followed by some number of appended records with: 4593 * - client id string 4594 * - flag that indicates it is a record revoking state via lease 4595 * expiration or similar 4596 * OR has successfully acquired state. 4597 * These structures vary in length, with the client string at the end, up 4598 * to NFSV4_OPAQUELIMIT in size. 4599 * 4600 * At the end of the grace period, the file is truncated, the first 4601 * record is rewritten with updated information and any acquired state 4602 * records for successful reclaims of state are written. 4603 * 4604 * Subsequent records are appended when the first state is issued to 4605 * a client and when state is revoked for a client. 4606 * 4607 * When reading the file in, state issued records that come later in 4608 * the file override older ones, since the append log is in cronological order. 4609 * If, for some reason, the file can't be read, the grace period is 4610 * immediately terminated and all reclaims get NFSERR_NOGRACE. 4611 */ 4612 4613 /* 4614 * Read in the stable storage file. Called by nfssvc() before the nfsd 4615 * processes start servicing requests. 4616 */ 4617 APPLESTATIC void 4618 nfsrv_setupstable(NFSPROC_T *p) 4619 { 4620 struct nfsrv_stablefirst *sf = &nfsrv_stablefirst; 4621 struct nfsrv_stable *sp, *nsp; 4622 struct nfst_rec *tsp; 4623 int error, i, tryagain; 4624 off_t off = 0; 4625 ssize_t aresid, len; 4626 4627 /* 4628 * If NFSNSF_UPDATEDONE is set, this is a restart of the nfsds without 4629 * a reboot, so state has not been lost. 4630 */ 4631 if (sf->nsf_flags & NFSNSF_UPDATEDONE) 4632 return; 4633 /* 4634 * Set Grace over just until the file reads successfully. 4635 */ 4636 nfsrvboottime = time_second; 4637 LIST_INIT(&sf->nsf_head); 4638 sf->nsf_flags = (NFSNSF_GRACEOVER | NFSNSF_NEEDLOCK); 4639 sf->nsf_eograce = NFSD_MONOSEC + NFSRV_LEASEDELTA; 4640 if (sf->nsf_fp == NULL) 4641 return; 4642 error = NFSD_RDWR(UIO_READ, NFSFPVNODE(sf->nsf_fp), 4643 (caddr_t)&sf->nsf_rec, sizeof (struct nfsf_rec), off, UIO_SYSSPACE, 4644 0, NFSFPCRED(sf->nsf_fp), &aresid, p); 4645 if (error || aresid || sf->nsf_numboots == 0 || 4646 sf->nsf_numboots > NFSNSF_MAXNUMBOOTS) 4647 return; 4648 4649 /* 4650 * Now, read in the boottimes. 4651 */ 4652 sf->nsf_bootvals = (time_t *)malloc((sf->nsf_numboots + 1) * 4653 sizeof (time_t), M_TEMP, M_WAITOK); 4654 off = sizeof (struct nfsf_rec); 4655 error = NFSD_RDWR(UIO_READ, NFSFPVNODE(sf->nsf_fp), 4656 (caddr_t)sf->nsf_bootvals, sf->nsf_numboots * sizeof (time_t), off, 4657 UIO_SYSSPACE, 0, NFSFPCRED(sf->nsf_fp), &aresid, p); 4658 if (error || aresid) { 4659 free(sf->nsf_bootvals, M_TEMP); 4660 sf->nsf_bootvals = NULL; 4661 return; 4662 } 4663 4664 /* 4665 * Make sure this nfsrvboottime is different from all recorded 4666 * previous ones. 4667 */ 4668 do { 4669 tryagain = 0; 4670 for (i = 0; i < sf->nsf_numboots; i++) { 4671 if (nfsrvboottime == sf->nsf_bootvals[i]) { 4672 nfsrvboottime++; 4673 tryagain = 1; 4674 break; 4675 } 4676 } 4677 } while (tryagain); 4678 4679 sf->nsf_flags |= NFSNSF_OK; 4680 off += (sf->nsf_numboots * sizeof (time_t)); 4681 4682 /* 4683 * Read through the file, building a list of records for grace 4684 * checking. 4685 * Each record is between sizeof (struct nfst_rec) and 4686 * sizeof (struct nfst_rec) + NFSV4_OPAQUELIMIT - 1 4687 * and is actually sizeof (struct nfst_rec) + nst_len - 1. 4688 */ 4689 tsp = (struct nfst_rec *)malloc(sizeof (struct nfst_rec) + 4690 NFSV4_OPAQUELIMIT - 1, M_TEMP, M_WAITOK); 4691 do { 4692 error = NFSD_RDWR(UIO_READ, NFSFPVNODE(sf->nsf_fp), 4693 (caddr_t)tsp, sizeof (struct nfst_rec) + NFSV4_OPAQUELIMIT - 1, 4694 off, UIO_SYSSPACE, 0, NFSFPCRED(sf->nsf_fp), &aresid, p); 4695 len = (sizeof (struct nfst_rec) + NFSV4_OPAQUELIMIT - 1) - aresid; 4696 if (error || (len > 0 && (len < sizeof (struct nfst_rec) || 4697 len < (sizeof (struct nfst_rec) + tsp->len - 1)))) { 4698 /* 4699 * Yuck, the file has been corrupted, so just return 4700 * after clearing out any restart state, so the grace period 4701 * is over. 4702 */ 4703 LIST_FOREACH_SAFE(sp, &sf->nsf_head, nst_list, nsp) { 4704 LIST_REMOVE(sp, nst_list); 4705 free(sp, M_TEMP); 4706 } 4707 free(tsp, M_TEMP); 4708 sf->nsf_flags &= ~NFSNSF_OK; 4709 free(sf->nsf_bootvals, M_TEMP); 4710 sf->nsf_bootvals = NULL; 4711 return; 4712 } 4713 if (len > 0) { 4714 off += sizeof (struct nfst_rec) + tsp->len - 1; 4715 /* 4716 * Search the list for a matching client. 4717 */ 4718 LIST_FOREACH(sp, &sf->nsf_head, nst_list) { 4719 if (tsp->len == sp->nst_len && 4720 !NFSBCMP(tsp->client, sp->nst_client, tsp->len)) 4721 break; 4722 } 4723 if (sp == LIST_END(&sf->nsf_head)) { 4724 sp = (struct nfsrv_stable *)malloc(tsp->len + 4725 sizeof (struct nfsrv_stable) - 1, M_TEMP, 4726 M_WAITOK); 4727 NFSBCOPY((caddr_t)tsp, (caddr_t)&sp->nst_rec, 4728 sizeof (struct nfst_rec) + tsp->len - 1); 4729 LIST_INSERT_HEAD(&sf->nsf_head, sp, nst_list); 4730 } else { 4731 if (tsp->flag == NFSNST_REVOKE) 4732 sp->nst_flag |= NFSNST_REVOKE; 4733 else 4734 /* 4735 * A subsequent timestamp indicates the client 4736 * did a setclientid/confirm and any previous 4737 * revoke is no longer relevant. 4738 */ 4739 sp->nst_flag &= ~NFSNST_REVOKE; 4740 } 4741 } 4742 } while (len > 0); 4743 free(tsp, M_TEMP); 4744 sf->nsf_flags = NFSNSF_OK; 4745 sf->nsf_eograce = NFSD_MONOSEC + sf->nsf_lease + 4746 NFSRV_LEASEDELTA; 4747 } 4748 4749 /* 4750 * Update the stable storage file, now that the grace period is over. 4751 */ 4752 APPLESTATIC void 4753 nfsrv_updatestable(NFSPROC_T *p) 4754 { 4755 struct nfsrv_stablefirst *sf = &nfsrv_stablefirst; 4756 struct nfsrv_stable *sp, *nsp; 4757 int i; 4758 struct nfsvattr nva; 4759 vnode_t vp; 4760 #if defined(__FreeBSD_version) && (__FreeBSD_version >= 500000) 4761 mount_t mp = NULL; 4762 #endif 4763 int error; 4764 4765 if (sf->nsf_fp == NULL || (sf->nsf_flags & NFSNSF_UPDATEDONE)) 4766 return; 4767 sf->nsf_flags |= NFSNSF_UPDATEDONE; 4768 /* 4769 * Ok, we need to rewrite the stable storage file. 4770 * - truncate to 0 length 4771 * - write the new first structure 4772 * - loop through the data structures, writing out any that 4773 * have timestamps older than the old boot 4774 */ 4775 if (sf->nsf_bootvals) { 4776 sf->nsf_numboots++; 4777 for (i = sf->nsf_numboots - 2; i >= 0; i--) 4778 sf->nsf_bootvals[i + 1] = sf->nsf_bootvals[i]; 4779 } else { 4780 sf->nsf_numboots = 1; 4781 sf->nsf_bootvals = (time_t *)malloc(sizeof (time_t), 4782 M_TEMP, M_WAITOK); 4783 } 4784 sf->nsf_bootvals[0] = nfsrvboottime; 4785 sf->nsf_lease = nfsrv_lease; 4786 NFSVNO_ATTRINIT(&nva); 4787 NFSVNO_SETATTRVAL(&nva, size, 0); 4788 vp = NFSFPVNODE(sf->nsf_fp); 4789 vn_start_write(vp, &mp, V_WAIT); 4790 if (NFSVOPLOCK(vp, LK_EXCLUSIVE) == 0) { 4791 error = nfsvno_setattr(vp, &nva, NFSFPCRED(sf->nsf_fp), p, 4792 NULL); 4793 NFSVOPUNLOCK(vp, 0); 4794 } else 4795 error = EPERM; 4796 vn_finished_write(mp); 4797 if (!error) 4798 error = NFSD_RDWR(UIO_WRITE, vp, 4799 (caddr_t)&sf->nsf_rec, sizeof (struct nfsf_rec), (off_t)0, 4800 UIO_SYSSPACE, IO_SYNC, NFSFPCRED(sf->nsf_fp), NULL, p); 4801 if (!error) 4802 error = NFSD_RDWR(UIO_WRITE, vp, 4803 (caddr_t)sf->nsf_bootvals, 4804 sf->nsf_numboots * sizeof (time_t), 4805 (off_t)(sizeof (struct nfsf_rec)), 4806 UIO_SYSSPACE, IO_SYNC, NFSFPCRED(sf->nsf_fp), NULL, p); 4807 free(sf->nsf_bootvals, M_TEMP); 4808 sf->nsf_bootvals = NULL; 4809 if (error) { 4810 sf->nsf_flags &= ~NFSNSF_OK; 4811 printf("EEK! Can't write NfsV4 stable storage file\n"); 4812 return; 4813 } 4814 sf->nsf_flags |= NFSNSF_OK; 4815 4816 /* 4817 * Loop through the list and write out timestamp records for 4818 * any clients that successfully reclaimed state. 4819 */ 4820 LIST_FOREACH_SAFE(sp, &sf->nsf_head, nst_list, nsp) { 4821 if (sp->nst_flag & NFSNST_GOTSTATE) { 4822 nfsrv_writestable(sp->nst_client, sp->nst_len, 4823 NFSNST_NEWSTATE, p); 4824 sp->nst_clp->lc_flags |= LCL_STAMPEDSTABLE; 4825 } 4826 LIST_REMOVE(sp, nst_list); 4827 free(sp, M_TEMP); 4828 } 4829 nfsrv_backupstable(); 4830 } 4831 4832 /* 4833 * Append a record to the stable storage file. 4834 */ 4835 APPLESTATIC void 4836 nfsrv_writestable(u_char *client, int len, int flag, NFSPROC_T *p) 4837 { 4838 struct nfsrv_stablefirst *sf = &nfsrv_stablefirst; 4839 struct nfst_rec *sp; 4840 int error; 4841 4842 if (!(sf->nsf_flags & NFSNSF_OK) || sf->nsf_fp == NULL) 4843 return; 4844 sp = (struct nfst_rec *)malloc(sizeof (struct nfst_rec) + 4845 len - 1, M_TEMP, M_WAITOK); 4846 sp->len = len; 4847 NFSBCOPY(client, sp->client, len); 4848 sp->flag = flag; 4849 error = NFSD_RDWR(UIO_WRITE, NFSFPVNODE(sf->nsf_fp), 4850 (caddr_t)sp, sizeof (struct nfst_rec) + len - 1, (off_t)0, 4851 UIO_SYSSPACE, (IO_SYNC | IO_APPEND), NFSFPCRED(sf->nsf_fp), NULL, p); 4852 free(sp, M_TEMP); 4853 if (error) { 4854 sf->nsf_flags &= ~NFSNSF_OK; 4855 printf("EEK! Can't write NfsV4 stable storage file\n"); 4856 } 4857 } 4858 4859 /* 4860 * This function is called during the grace period to mark a client 4861 * that successfully reclaimed state. 4862 */ 4863 static void 4864 nfsrv_markstable(struct nfsclient *clp) 4865 { 4866 struct nfsrv_stable *sp; 4867 4868 /* 4869 * First find the client structure. 4870 */ 4871 LIST_FOREACH(sp, &nfsrv_stablefirst.nsf_head, nst_list) { 4872 if (sp->nst_len == clp->lc_idlen && 4873 !NFSBCMP(sp->nst_client, clp->lc_id, sp->nst_len)) 4874 break; 4875 } 4876 if (sp == LIST_END(&nfsrv_stablefirst.nsf_head)) 4877 return; 4878 4879 /* 4880 * Now, just mark it and set the nfsclient back pointer. 4881 */ 4882 sp->nst_flag |= NFSNST_GOTSTATE; 4883 sp->nst_clp = clp; 4884 } 4885 4886 /* 4887 * This function is called when a NFSv4.1 client does a ReclaimComplete. 4888 * Very similar to nfsrv_markstable(), except for the flag being set. 4889 */ 4890 static void 4891 nfsrv_markreclaim(struct nfsclient *clp) 4892 { 4893 struct nfsrv_stable *sp; 4894 4895 /* 4896 * First find the client structure. 4897 */ 4898 LIST_FOREACH(sp, &nfsrv_stablefirst.nsf_head, nst_list) { 4899 if (sp->nst_len == clp->lc_idlen && 4900 !NFSBCMP(sp->nst_client, clp->lc_id, sp->nst_len)) 4901 break; 4902 } 4903 if (sp == LIST_END(&nfsrv_stablefirst.nsf_head)) 4904 return; 4905 4906 /* 4907 * Now, just set the flag. 4908 */ 4909 sp->nst_flag |= NFSNST_RECLAIMED; 4910 } 4911 4912 /* 4913 * This function is called for a reclaim, to see if it gets grace. 4914 * It returns 0 if a reclaim is allowed, 1 otherwise. 4915 */ 4916 static int 4917 nfsrv_checkstable(struct nfsclient *clp) 4918 { 4919 struct nfsrv_stable *sp; 4920 4921 /* 4922 * First, find the entry for the client. 4923 */ 4924 LIST_FOREACH(sp, &nfsrv_stablefirst.nsf_head, nst_list) { 4925 if (sp->nst_len == clp->lc_idlen && 4926 !NFSBCMP(sp->nst_client, clp->lc_id, sp->nst_len)) 4927 break; 4928 } 4929 4930 /* 4931 * If not in the list, state was revoked or no state was issued 4932 * since the previous reboot, a reclaim is denied. 4933 */ 4934 if (sp == LIST_END(&nfsrv_stablefirst.nsf_head) || 4935 (sp->nst_flag & NFSNST_REVOKE) || 4936 !(nfsrv_stablefirst.nsf_flags & NFSNSF_OK)) 4937 return (1); 4938 return (0); 4939 } 4940 4941 /* 4942 * Test for and try to clear out a conflicting client. This is called by 4943 * nfsrv_lockctrl() and nfsrv_openctrl() when conflicts with other clients 4944 * a found. 4945 * The trick here is that it can't revoke a conflicting client with an 4946 * expired lease unless it holds the v4root lock, so... 4947 * If no v4root lock, get the lock and return 1 to indicate "try again". 4948 * Return 0 to indicate the conflict can't be revoked and 1 to indicate 4949 * the revocation worked and the conflicting client is "bye, bye", so it 4950 * can be tried again. 4951 * Return 2 to indicate that the vnode is VI_DOOMED after NFSVOPLOCK(). 4952 * Unlocks State before a non-zero value is returned. 4953 */ 4954 static int 4955 nfsrv_clientconflict(struct nfsclient *clp, int *haslockp, vnode_t vp, 4956 NFSPROC_T *p) 4957 { 4958 int gotlock, lktype = 0; 4959 4960 /* 4961 * If lease hasn't expired, we can't fix it. 4962 */ 4963 if (clp->lc_expiry >= NFSD_MONOSEC || 4964 !(nfsrv_stablefirst.nsf_flags & NFSNSF_UPDATEDONE)) 4965 return (0); 4966 if (*haslockp == 0) { 4967 NFSUNLOCKSTATE(); 4968 if (vp != NULL) { 4969 lktype = NFSVOPISLOCKED(vp); 4970 NFSVOPUNLOCK(vp, 0); 4971 } 4972 NFSLOCKV4ROOTMUTEX(); 4973 nfsv4_relref(&nfsv4rootfs_lock); 4974 do { 4975 gotlock = nfsv4_lock(&nfsv4rootfs_lock, 1, NULL, 4976 NFSV4ROOTLOCKMUTEXPTR, NULL); 4977 } while (!gotlock); 4978 NFSUNLOCKV4ROOTMUTEX(); 4979 *haslockp = 1; 4980 if (vp != NULL) { 4981 NFSVOPLOCK(vp, lktype | LK_RETRY); 4982 if ((vp->v_iflag & VI_DOOMED) != 0) 4983 return (2); 4984 } 4985 return (1); 4986 } 4987 NFSUNLOCKSTATE(); 4988 4989 /* 4990 * Ok, we can expire the conflicting client. 4991 */ 4992 nfsrv_writestable(clp->lc_id, clp->lc_idlen, NFSNST_REVOKE, p); 4993 nfsrv_backupstable(); 4994 nfsrv_cleanclient(clp, p); 4995 nfsrv_freedeleglist(&clp->lc_deleg); 4996 nfsrv_freedeleglist(&clp->lc_olddeleg); 4997 LIST_REMOVE(clp, lc_hash); 4998 nfsrv_zapclient(clp, p); 4999 return (1); 5000 } 5001 5002 /* 5003 * Resolve a delegation conflict. 5004 * Returns 0 to indicate the conflict was resolved without sleeping. 5005 * Return -1 to indicate that the caller should check for conflicts again. 5006 * Return > 0 for an error that should be returned, normally NFSERR_DELAY. 5007 * 5008 * Also, manipulate the nfsv4root_lock, as required. It isn't changed 5009 * for a return of 0, since there was no sleep and it could be required 5010 * later. It is released for a return of NFSERR_DELAY, since the caller 5011 * will return that error. It is released when a sleep was done waiting 5012 * for the delegation to be returned or expire (so that other nfsds can 5013 * handle ops). Then, it must be acquired for the write to stable storage. 5014 * (This function is somewhat similar to nfsrv_clientconflict(), but 5015 * the semantics differ in a couple of subtle ways. The return of 0 5016 * indicates the conflict was resolved without sleeping here, not 5017 * that the conflict can't be resolved and the handling of nfsv4root_lock 5018 * differs, as noted above.) 5019 * Unlocks State before returning a non-zero value. 5020 */ 5021 static int 5022 nfsrv_delegconflict(struct nfsstate *stp, int *haslockp, NFSPROC_T *p, 5023 vnode_t vp) 5024 { 5025 struct nfsclient *clp = stp->ls_clp; 5026 int gotlock, error, lktype = 0, retrycnt, zapped_clp; 5027 nfsv4stateid_t tstateid; 5028 fhandle_t tfh; 5029 5030 /* 5031 * If the conflict is with an old delegation... 5032 */ 5033 if (stp->ls_flags & NFSLCK_OLDDELEG) { 5034 /* 5035 * You can delete it, if it has expired. 5036 */ 5037 if (clp->lc_delegtime < NFSD_MONOSEC) { 5038 nfsrv_freedeleg(stp); 5039 NFSUNLOCKSTATE(); 5040 error = -1; 5041 goto out; 5042 } 5043 NFSUNLOCKSTATE(); 5044 /* 5045 * During this delay, the old delegation could expire or it 5046 * could be recovered by the client via an Open with 5047 * CLAIM_DELEGATE_PREV. 5048 * Release the nfsv4root_lock, if held. 5049 */ 5050 if (*haslockp) { 5051 *haslockp = 0; 5052 NFSLOCKV4ROOTMUTEX(); 5053 nfsv4_unlock(&nfsv4rootfs_lock, 1); 5054 NFSUNLOCKV4ROOTMUTEX(); 5055 } 5056 error = NFSERR_DELAY; 5057 goto out; 5058 } 5059 5060 /* 5061 * It's a current delegation, so: 5062 * - check to see if the delegation has expired 5063 * - if so, get the v4root lock and then expire it 5064 */ 5065 if (!(stp->ls_flags & NFSLCK_DELEGRECALL)) { 5066 /* 5067 * - do a recall callback, since not yet done 5068 * For now, never allow truncate to be set. To use 5069 * truncate safely, it must be guaranteed that the 5070 * Remove, Rename or Setattr with size of 0 will 5071 * succeed and that would require major changes to 5072 * the VFS/Vnode OPs. 5073 * Set the expiry time large enough so that it won't expire 5074 * until after the callback, then set it correctly, once 5075 * the callback is done. (The delegation will now time 5076 * out whether or not the Recall worked ok. The timeout 5077 * will be extended when ops are done on the delegation 5078 * stateid, up to the timelimit.) 5079 */ 5080 stp->ls_delegtime = NFSD_MONOSEC + (2 * nfsrv_lease) + 5081 NFSRV_LEASEDELTA; 5082 stp->ls_delegtimelimit = NFSD_MONOSEC + (6 * nfsrv_lease) + 5083 NFSRV_LEASEDELTA; 5084 stp->ls_flags |= NFSLCK_DELEGRECALL; 5085 5086 /* 5087 * Loop NFSRV_CBRETRYCNT times while the CBRecall replies 5088 * NFSERR_BADSTATEID or NFSERR_BADHANDLE. This is done 5089 * in order to try and avoid a race that could happen 5090 * when a CBRecall request passed the Open reply with 5091 * the delegation in it when transitting the network. 5092 * Since nfsrv_docallback will sleep, don't use stp after 5093 * the call. 5094 */ 5095 NFSBCOPY((caddr_t)&stp->ls_stateid, (caddr_t)&tstateid, 5096 sizeof (tstateid)); 5097 NFSBCOPY((caddr_t)&stp->ls_lfp->lf_fh, (caddr_t)&tfh, 5098 sizeof (tfh)); 5099 NFSUNLOCKSTATE(); 5100 if (*haslockp) { 5101 *haslockp = 0; 5102 NFSLOCKV4ROOTMUTEX(); 5103 nfsv4_unlock(&nfsv4rootfs_lock, 1); 5104 NFSUNLOCKV4ROOTMUTEX(); 5105 } 5106 retrycnt = 0; 5107 do { 5108 error = nfsrv_docallback(clp, NFSV4OP_CBRECALL, 5109 &tstateid, 0, &tfh, NULL, NULL, 0, p); 5110 retrycnt++; 5111 } while ((error == NFSERR_BADSTATEID || 5112 error == NFSERR_BADHANDLE) && retrycnt < NFSV4_CBRETRYCNT); 5113 error = NFSERR_DELAY; 5114 goto out; 5115 } 5116 5117 if (clp->lc_expiry >= NFSD_MONOSEC && 5118 stp->ls_delegtime >= NFSD_MONOSEC) { 5119 NFSUNLOCKSTATE(); 5120 /* 5121 * A recall has been done, but it has not yet expired. 5122 * So, RETURN_DELAY. 5123 */ 5124 if (*haslockp) { 5125 *haslockp = 0; 5126 NFSLOCKV4ROOTMUTEX(); 5127 nfsv4_unlock(&nfsv4rootfs_lock, 1); 5128 NFSUNLOCKV4ROOTMUTEX(); 5129 } 5130 error = NFSERR_DELAY; 5131 goto out; 5132 } 5133 5134 /* 5135 * If we don't yet have the lock, just get it and then return, 5136 * since we need that before deleting expired state, such as 5137 * this delegation. 5138 * When getting the lock, unlock the vnode, so other nfsds that 5139 * are in progress, won't get stuck waiting for the vnode lock. 5140 */ 5141 if (*haslockp == 0) { 5142 NFSUNLOCKSTATE(); 5143 if (vp != NULL) { 5144 lktype = NFSVOPISLOCKED(vp); 5145 NFSVOPUNLOCK(vp, 0); 5146 } 5147 NFSLOCKV4ROOTMUTEX(); 5148 nfsv4_relref(&nfsv4rootfs_lock); 5149 do { 5150 gotlock = nfsv4_lock(&nfsv4rootfs_lock, 1, NULL, 5151 NFSV4ROOTLOCKMUTEXPTR, NULL); 5152 } while (!gotlock); 5153 NFSUNLOCKV4ROOTMUTEX(); 5154 *haslockp = 1; 5155 if (vp != NULL) { 5156 NFSVOPLOCK(vp, lktype | LK_RETRY); 5157 if ((vp->v_iflag & VI_DOOMED) != 0) { 5158 *haslockp = 0; 5159 NFSLOCKV4ROOTMUTEX(); 5160 nfsv4_unlock(&nfsv4rootfs_lock, 1); 5161 NFSUNLOCKV4ROOTMUTEX(); 5162 error = NFSERR_PERM; 5163 goto out; 5164 } 5165 } 5166 error = -1; 5167 goto out; 5168 } 5169 5170 NFSUNLOCKSTATE(); 5171 /* 5172 * Ok, we can delete the expired delegation. 5173 * First, write the Revoke record to stable storage and then 5174 * clear out the conflict. 5175 * Since all other nfsd threads are now blocked, we can safely 5176 * sleep without the state changing. 5177 */ 5178 nfsrv_writestable(clp->lc_id, clp->lc_idlen, NFSNST_REVOKE, p); 5179 nfsrv_backupstable(); 5180 if (clp->lc_expiry < NFSD_MONOSEC) { 5181 nfsrv_cleanclient(clp, p); 5182 nfsrv_freedeleglist(&clp->lc_deleg); 5183 nfsrv_freedeleglist(&clp->lc_olddeleg); 5184 LIST_REMOVE(clp, lc_hash); 5185 zapped_clp = 1; 5186 } else { 5187 nfsrv_freedeleg(stp); 5188 zapped_clp = 0; 5189 } 5190 if (zapped_clp) 5191 nfsrv_zapclient(clp, p); 5192 error = -1; 5193 5194 out: 5195 NFSEXITCODE(error); 5196 return (error); 5197 } 5198 5199 /* 5200 * Check for a remove allowed, if remove is set to 1 and get rid of 5201 * delegations. 5202 */ 5203 APPLESTATIC int 5204 nfsrv_checkremove(vnode_t vp, int remove, NFSPROC_T *p) 5205 { 5206 struct nfsstate *stp; 5207 struct nfslockfile *lfp; 5208 int error, haslock = 0; 5209 fhandle_t nfh; 5210 5211 /* 5212 * First, get the lock file structure. 5213 * (A return of -1 means no associated state, so remove ok.) 5214 */ 5215 error = nfsrv_getlockfh(vp, NFSLCK_CHECK, NULL, &nfh, p); 5216 tryagain: 5217 NFSLOCKSTATE(); 5218 if (!error) 5219 error = nfsrv_getlockfile(NFSLCK_CHECK, NULL, &lfp, &nfh, 0); 5220 if (error) { 5221 NFSUNLOCKSTATE(); 5222 if (haslock) { 5223 NFSLOCKV4ROOTMUTEX(); 5224 nfsv4_unlock(&nfsv4rootfs_lock, 1); 5225 NFSUNLOCKV4ROOTMUTEX(); 5226 } 5227 if (error == -1) 5228 error = 0; 5229 goto out; 5230 } 5231 5232 /* 5233 * Now, we must Recall any delegations. 5234 */ 5235 error = nfsrv_cleandeleg(vp, lfp, NULL, &haslock, p); 5236 if (error) { 5237 /* 5238 * nfsrv_cleandeleg() unlocks state for non-zero 5239 * return. 5240 */ 5241 if (error == -1) 5242 goto tryagain; 5243 if (haslock) { 5244 NFSLOCKV4ROOTMUTEX(); 5245 nfsv4_unlock(&nfsv4rootfs_lock, 1); 5246 NFSUNLOCKV4ROOTMUTEX(); 5247 } 5248 goto out; 5249 } 5250 5251 /* 5252 * Now, look for a conflicting open share. 5253 */ 5254 if (remove) { 5255 /* 5256 * If the entry in the directory was the last reference to the 5257 * corresponding filesystem object, the object can be destroyed 5258 * */ 5259 if(lfp->lf_usecount>1) 5260 LIST_FOREACH(stp, &lfp->lf_open, ls_file) { 5261 if (stp->ls_flags & NFSLCK_WRITEDENY) { 5262 error = NFSERR_FILEOPEN; 5263 break; 5264 } 5265 } 5266 } 5267 5268 NFSUNLOCKSTATE(); 5269 if (haslock) { 5270 NFSLOCKV4ROOTMUTEX(); 5271 nfsv4_unlock(&nfsv4rootfs_lock, 1); 5272 NFSUNLOCKV4ROOTMUTEX(); 5273 } 5274 5275 out: 5276 NFSEXITCODE(error); 5277 return (error); 5278 } 5279 5280 /* 5281 * Clear out all delegations for the file referred to by lfp. 5282 * May return NFSERR_DELAY, if there will be a delay waiting for 5283 * delegations to expire. 5284 * Returns -1 to indicate it slept while recalling a delegation. 5285 * This function has the side effect of deleting the nfslockfile structure, 5286 * if it no longer has associated state and didn't have to sleep. 5287 * Unlocks State before a non-zero value is returned. 5288 */ 5289 static int 5290 nfsrv_cleandeleg(vnode_t vp, struct nfslockfile *lfp, 5291 struct nfsclient *clp, int *haslockp, NFSPROC_T *p) 5292 { 5293 struct nfsstate *stp, *nstp; 5294 int ret = 0; 5295 5296 stp = LIST_FIRST(&lfp->lf_deleg); 5297 while (stp != LIST_END(&lfp->lf_deleg)) { 5298 nstp = LIST_NEXT(stp, ls_file); 5299 if (stp->ls_clp != clp) { 5300 ret = nfsrv_delegconflict(stp, haslockp, p, vp); 5301 if (ret) { 5302 /* 5303 * nfsrv_delegconflict() unlocks state 5304 * when it returns non-zero. 5305 */ 5306 goto out; 5307 } 5308 } 5309 stp = nstp; 5310 } 5311 out: 5312 NFSEXITCODE(ret); 5313 return (ret); 5314 } 5315 5316 /* 5317 * There are certain operations that, when being done outside of NFSv4, 5318 * require that any NFSv4 delegation for the file be recalled. 5319 * This function is to be called for those cases: 5320 * VOP_RENAME() - When a delegation is being recalled for any reason, 5321 * the client may have to do Opens against the server, using the file's 5322 * final component name. If the file has been renamed on the server, 5323 * that component name will be incorrect and the Open will fail. 5324 * VOP_REMOVE() - Theoretically, a client could Open a file after it has 5325 * been removed on the server, if there is a delegation issued to 5326 * that client for the file. I say "theoretically" since clients 5327 * normally do an Access Op before the Open and that Access Op will 5328 * fail with ESTALE. Note that NFSv2 and 3 don't even do Opens, so 5329 * they will detect the file's removal in the same manner. (There is 5330 * one case where RFC3530 allows a client to do an Open without first 5331 * doing an Access Op, which is passage of a check against the ACE 5332 * returned with a Write delegation, but current practice is to ignore 5333 * the ACE and always do an Access Op.) 5334 * Since the functions can only be called with an unlocked vnode, this 5335 * can't be done at this time. 5336 * VOP_ADVLOCK() - When a client holds a delegation, it can issue byte range 5337 * locks locally in the client, which are not visible to the server. To 5338 * deal with this, issuing of delegations for a vnode must be disabled 5339 * and all delegations for the vnode recalled. This is done via the 5340 * second function, using the VV_DISABLEDELEG vflag on the vnode. 5341 */ 5342 APPLESTATIC void 5343 nfsd_recalldelegation(vnode_t vp, NFSPROC_T *p) 5344 { 5345 time_t starttime; 5346 int error; 5347 5348 /* 5349 * First, check to see if the server is currently running and it has 5350 * been called for a regular file when issuing delegations. 5351 */ 5352 if (newnfs_numnfsd == 0 || vp->v_type != VREG || 5353 nfsrv_issuedelegs == 0) 5354 return; 5355 5356 KASSERT((NFSVOPISLOCKED(vp) != LK_EXCLUSIVE), ("vp %p is locked", vp)); 5357 /* 5358 * First, get a reference on the nfsv4rootfs_lock so that an 5359 * exclusive lock cannot be acquired by another thread. 5360 */ 5361 NFSLOCKV4ROOTMUTEX(); 5362 nfsv4_getref(&nfsv4rootfs_lock, NULL, NFSV4ROOTLOCKMUTEXPTR, NULL); 5363 NFSUNLOCKV4ROOTMUTEX(); 5364 5365 /* 5366 * Now, call nfsrv_checkremove() in a loop while it returns 5367 * NFSERR_DELAY. Return upon any other error or when timed out. 5368 */ 5369 starttime = NFSD_MONOSEC; 5370 do { 5371 if (NFSVOPLOCK(vp, LK_EXCLUSIVE) == 0) { 5372 error = nfsrv_checkremove(vp, 0, p); 5373 NFSVOPUNLOCK(vp, 0); 5374 } else 5375 error = EPERM; 5376 if (error == NFSERR_DELAY) { 5377 if (NFSD_MONOSEC - starttime > NFS_REMOVETIMEO) 5378 break; 5379 /* Sleep for a short period of time */ 5380 (void) nfs_catnap(PZERO, 0, "nfsremove"); 5381 } 5382 } while (error == NFSERR_DELAY); 5383 NFSLOCKV4ROOTMUTEX(); 5384 nfsv4_relref(&nfsv4rootfs_lock); 5385 NFSUNLOCKV4ROOTMUTEX(); 5386 } 5387 5388 APPLESTATIC void 5389 nfsd_disabledelegation(vnode_t vp, NFSPROC_T *p) 5390 { 5391 5392 #ifdef VV_DISABLEDELEG 5393 /* 5394 * First, flag issuance of delegations disabled. 5395 */ 5396 atomic_set_long(&vp->v_vflag, VV_DISABLEDELEG); 5397 #endif 5398 5399 /* 5400 * Then call nfsd_recalldelegation() to get rid of all extant 5401 * delegations. 5402 */ 5403 nfsd_recalldelegation(vp, p); 5404 } 5405 5406 /* 5407 * Check for conflicting locks, etc. and then get rid of delegations. 5408 * (At one point I thought that I should get rid of delegations for any 5409 * Setattr, since it could potentially disallow the I/O op (read or write) 5410 * allowed by the delegation. However, Setattr Ops that aren't changing 5411 * the size get a stateid of all 0s, so you can't tell if it is a delegation 5412 * for the same client or a different one, so I decided to only get rid 5413 * of delegations for other clients when the size is being changed.) 5414 * In general, a Setattr can disable NFS I/O Ops that are outstanding, such 5415 * as Write backs, even if there is no delegation, so it really isn't any 5416 * different?) 5417 */ 5418 APPLESTATIC int 5419 nfsrv_checksetattr(vnode_t vp, struct nfsrv_descript *nd, 5420 nfsv4stateid_t *stateidp, struct nfsvattr *nvap, nfsattrbit_t *attrbitp, 5421 struct nfsexstuff *exp, NFSPROC_T *p) 5422 { 5423 struct nfsstate st, *stp = &st; 5424 struct nfslock lo, *lop = &lo; 5425 int error = 0; 5426 nfsquad_t clientid; 5427 5428 if (NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_SIZE)) { 5429 stp->ls_flags = (NFSLCK_CHECK | NFSLCK_WRITEACCESS); 5430 lop->lo_first = nvap->na_size; 5431 } else { 5432 stp->ls_flags = 0; 5433 lop->lo_first = 0; 5434 } 5435 if (NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_OWNER) || 5436 NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_OWNERGROUP) || 5437 NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_MODE) || 5438 NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_ACL)) 5439 stp->ls_flags |= NFSLCK_SETATTR; 5440 if (stp->ls_flags == 0) 5441 goto out; 5442 lop->lo_end = NFS64BITSSET; 5443 lop->lo_flags = NFSLCK_WRITE; 5444 stp->ls_ownerlen = 0; 5445 stp->ls_op = NULL; 5446 stp->ls_uid = nd->nd_cred->cr_uid; 5447 stp->ls_stateid.seqid = stateidp->seqid; 5448 clientid.lval[0] = stp->ls_stateid.other[0] = stateidp->other[0]; 5449 clientid.lval[1] = stp->ls_stateid.other[1] = stateidp->other[1]; 5450 stp->ls_stateid.other[2] = stateidp->other[2]; 5451 error = nfsrv_lockctrl(vp, &stp, &lop, NULL, clientid, 5452 stateidp, exp, nd, p); 5453 5454 out: 5455 NFSEXITCODE2(error, nd); 5456 return (error); 5457 } 5458 5459 /* 5460 * Check for a write delegation and do a CBGETATTR if there is one, updating 5461 * the attributes, as required. 5462 * Should I return an error if I can't get the attributes? (For now, I'll 5463 * just return ok. 5464 */ 5465 APPLESTATIC int 5466 nfsrv_checkgetattr(struct nfsrv_descript *nd, vnode_t vp, 5467 struct nfsvattr *nvap, nfsattrbit_t *attrbitp, NFSPROC_T *p) 5468 { 5469 struct nfsstate *stp; 5470 struct nfslockfile *lfp; 5471 struct nfsclient *clp; 5472 struct nfsvattr nva; 5473 fhandle_t nfh; 5474 int error = 0; 5475 nfsattrbit_t cbbits; 5476 u_quad_t delegfilerev; 5477 5478 NFSCBGETATTR_ATTRBIT(attrbitp, &cbbits); 5479 if (!NFSNONZERO_ATTRBIT(&cbbits)) 5480 goto out; 5481 if (nfsrv_writedelegcnt == 0) 5482 goto out; 5483 5484 /* 5485 * Get the lock file structure. 5486 * (A return of -1 means no associated state, so return ok.) 5487 */ 5488 error = nfsrv_getlockfh(vp, NFSLCK_CHECK, NULL, &nfh, p); 5489 NFSLOCKSTATE(); 5490 if (!error) 5491 error = nfsrv_getlockfile(NFSLCK_CHECK, NULL, &lfp, &nfh, 0); 5492 if (error) { 5493 NFSUNLOCKSTATE(); 5494 if (error == -1) 5495 error = 0; 5496 goto out; 5497 } 5498 5499 /* 5500 * Now, look for a write delegation. 5501 */ 5502 LIST_FOREACH(stp, &lfp->lf_deleg, ls_file) { 5503 if (stp->ls_flags & NFSLCK_DELEGWRITE) 5504 break; 5505 } 5506 if (stp == LIST_END(&lfp->lf_deleg)) { 5507 NFSUNLOCKSTATE(); 5508 goto out; 5509 } 5510 clp = stp->ls_clp; 5511 delegfilerev = stp->ls_filerev; 5512 5513 /* 5514 * If the Write delegation was issued as a part of this Compound RPC 5515 * or if we have an Implied Clientid (used in a previous Op in this 5516 * compound) and it is the client the delegation was issued to, 5517 * just return ok. 5518 * I also assume that it is from the same client iff the network 5519 * host IP address is the same as the callback address. (Not 5520 * exactly correct by the RFC, but avoids a lot of Getattr 5521 * callbacks.) 5522 */ 5523 if (nd->nd_compref == stp->ls_compref || 5524 ((nd->nd_flag & ND_IMPLIEDCLID) && 5525 clp->lc_clientid.qval == nd->nd_clientid.qval) || 5526 nfsaddr2_match(clp->lc_req.nr_nam, nd->nd_nam)) { 5527 NFSUNLOCKSTATE(); 5528 goto out; 5529 } 5530 5531 /* 5532 * We are now done with the delegation state structure, 5533 * so the statelock can be released and we can now tsleep(). 5534 */ 5535 5536 /* 5537 * Now, we must do the CB Getattr callback, to see if Change or Size 5538 * has changed. 5539 */ 5540 if (clp->lc_expiry >= NFSD_MONOSEC) { 5541 NFSUNLOCKSTATE(); 5542 NFSVNO_ATTRINIT(&nva); 5543 nva.na_filerev = NFS64BITSSET; 5544 error = nfsrv_docallback(clp, NFSV4OP_CBGETATTR, NULL, 5545 0, &nfh, &nva, &cbbits, 0, p); 5546 if (!error) { 5547 if ((nva.na_filerev != NFS64BITSSET && 5548 nva.na_filerev > delegfilerev) || 5549 (NFSVNO_ISSETSIZE(&nva) && 5550 nva.na_size != nvap->na_size)) { 5551 error = nfsvno_updfilerev(vp, nvap, nd, p); 5552 if (NFSVNO_ISSETSIZE(&nva)) 5553 nvap->na_size = nva.na_size; 5554 } 5555 } else 5556 error = 0; /* Ignore callback errors for now. */ 5557 } else { 5558 NFSUNLOCKSTATE(); 5559 } 5560 5561 out: 5562 NFSEXITCODE2(error, nd); 5563 return (error); 5564 } 5565 5566 /* 5567 * This function looks for openowners that haven't had any opens for 5568 * a while and throws them away. Called by an nfsd when NFSNSF_NOOPENS 5569 * is set. 5570 */ 5571 APPLESTATIC void 5572 nfsrv_throwawayopens(NFSPROC_T *p) 5573 { 5574 struct nfsclient *clp, *nclp; 5575 struct nfsstate *stp, *nstp; 5576 int i; 5577 5578 NFSLOCKSTATE(); 5579 nfsrv_stablefirst.nsf_flags &= ~NFSNSF_NOOPENS; 5580 /* 5581 * For each client... 5582 */ 5583 for (i = 0; i < nfsrv_clienthashsize; i++) { 5584 LIST_FOREACH_SAFE(clp, &nfsclienthash[i], lc_hash, nclp) { 5585 LIST_FOREACH_SAFE(stp, &clp->lc_open, ls_list, nstp) { 5586 if (LIST_EMPTY(&stp->ls_open) && 5587 (stp->ls_noopens > NFSNOOPEN || 5588 (nfsrv_openpluslock * 2) > 5589 nfsrv_v4statelimit)) 5590 nfsrv_freeopenowner(stp, 0, p); 5591 } 5592 } 5593 } 5594 NFSUNLOCKSTATE(); 5595 } 5596 5597 /* 5598 * This function checks to see if the credentials are the same. 5599 * Returns 1 for not same, 0 otherwise. 5600 */ 5601 static int 5602 nfsrv_notsamecredname(struct nfsrv_descript *nd, struct nfsclient *clp) 5603 { 5604 5605 if (nd->nd_flag & ND_GSS) { 5606 if (!(clp->lc_flags & LCL_GSS)) 5607 return (1); 5608 if (clp->lc_flags & LCL_NAME) { 5609 if (nd->nd_princlen != clp->lc_namelen || 5610 NFSBCMP(nd->nd_principal, clp->lc_name, 5611 clp->lc_namelen)) 5612 return (1); 5613 else 5614 return (0); 5615 } 5616 if (nd->nd_cred->cr_uid == clp->lc_uid) 5617 return (0); 5618 else 5619 return (1); 5620 } else if (clp->lc_flags & LCL_GSS) 5621 return (1); 5622 /* 5623 * For AUTH_SYS, allow the same uid or root. (This is underspecified 5624 * in RFC3530, which talks about principals, but doesn't say anything 5625 * about uids for AUTH_SYS.) 5626 */ 5627 if (nd->nd_cred->cr_uid == clp->lc_uid || nd->nd_cred->cr_uid == 0) 5628 return (0); 5629 else 5630 return (1); 5631 } 5632 5633 /* 5634 * Calculate the lease expiry time. 5635 */ 5636 static time_t 5637 nfsrv_leaseexpiry(void) 5638 { 5639 5640 if (nfsrv_stablefirst.nsf_eograce > NFSD_MONOSEC) 5641 return (NFSD_MONOSEC + 2 * (nfsrv_lease + NFSRV_LEASEDELTA)); 5642 return (NFSD_MONOSEC + nfsrv_lease + NFSRV_LEASEDELTA); 5643 } 5644 5645 /* 5646 * Delay the delegation timeout as far as ls_delegtimelimit, as required. 5647 */ 5648 static void 5649 nfsrv_delaydelegtimeout(struct nfsstate *stp) 5650 { 5651 5652 if ((stp->ls_flags & NFSLCK_DELEGRECALL) == 0) 5653 return; 5654 5655 if ((stp->ls_delegtime + 15) > NFSD_MONOSEC && 5656 stp->ls_delegtime < stp->ls_delegtimelimit) { 5657 stp->ls_delegtime += nfsrv_lease; 5658 if (stp->ls_delegtime > stp->ls_delegtimelimit) 5659 stp->ls_delegtime = stp->ls_delegtimelimit; 5660 } 5661 } 5662 5663 /* 5664 * This function checks to see if there is any other state associated 5665 * with the openowner for this Open. 5666 * It returns 1 if there is no other state, 0 otherwise. 5667 */ 5668 static int 5669 nfsrv_nootherstate(struct nfsstate *stp) 5670 { 5671 struct nfsstate *tstp; 5672 5673 LIST_FOREACH(tstp, &stp->ls_openowner->ls_open, ls_list) { 5674 if (tstp != stp || !LIST_EMPTY(&tstp->ls_lock)) 5675 return (0); 5676 } 5677 return (1); 5678 } 5679 5680 /* 5681 * Create a list of lock deltas (changes to local byte range locking 5682 * that can be rolled back using the list) and apply the changes via 5683 * nfsvno_advlock(). Optionally, lock the list. It is expected that either 5684 * the rollback or update function will be called after this. 5685 * It returns an error (and rolls back, as required), if any nfsvno_advlock() 5686 * call fails. If it returns an error, it will unlock the list. 5687 */ 5688 static int 5689 nfsrv_locallock(vnode_t vp, struct nfslockfile *lfp, int flags, 5690 uint64_t first, uint64_t end, struct nfslockconflict *cfp, NFSPROC_T *p) 5691 { 5692 struct nfslock *lop, *nlop; 5693 int error = 0; 5694 5695 /* Loop through the list of locks. */ 5696 lop = LIST_FIRST(&lfp->lf_locallock); 5697 while (first < end && lop != NULL) { 5698 nlop = LIST_NEXT(lop, lo_lckowner); 5699 if (first >= lop->lo_end) { 5700 /* not there yet */ 5701 lop = nlop; 5702 } else if (first < lop->lo_first) { 5703 /* new one starts before entry in list */ 5704 if (end <= lop->lo_first) { 5705 /* no overlap between old and new */ 5706 error = nfsrv_dolocal(vp, lfp, flags, 5707 NFSLCK_UNLOCK, first, end, cfp, p); 5708 if (error != 0) 5709 break; 5710 first = end; 5711 } else { 5712 /* handle fragment overlapped with new one */ 5713 error = nfsrv_dolocal(vp, lfp, flags, 5714 NFSLCK_UNLOCK, first, lop->lo_first, cfp, 5715 p); 5716 if (error != 0) 5717 break; 5718 first = lop->lo_first; 5719 } 5720 } else { 5721 /* new one overlaps this entry in list */ 5722 if (end <= lop->lo_end) { 5723 /* overlaps all of new one */ 5724 error = nfsrv_dolocal(vp, lfp, flags, 5725 lop->lo_flags, first, end, cfp, p); 5726 if (error != 0) 5727 break; 5728 first = end; 5729 } else { 5730 /* handle fragment overlapped with new one */ 5731 error = nfsrv_dolocal(vp, lfp, flags, 5732 lop->lo_flags, first, lop->lo_end, cfp, p); 5733 if (error != 0) 5734 break; 5735 first = lop->lo_end; 5736 lop = nlop; 5737 } 5738 } 5739 } 5740 if (first < end && error == 0) 5741 /* handle fragment past end of list */ 5742 error = nfsrv_dolocal(vp, lfp, flags, NFSLCK_UNLOCK, first, 5743 end, cfp, p); 5744 5745 NFSEXITCODE(error); 5746 return (error); 5747 } 5748 5749 /* 5750 * Local lock unlock. Unlock all byte ranges that are no longer locked 5751 * by NFSv4. To do this, unlock any subranges of first-->end that 5752 * do not overlap with the byte ranges of any lock in the lfp->lf_lock 5753 * list. This list has all locks for the file held by other 5754 * <clientid, lockowner> tuples. The list is ordered by increasing 5755 * lo_first value, but may have entries that overlap each other, for 5756 * the case of read locks. 5757 */ 5758 static void 5759 nfsrv_localunlock(vnode_t vp, struct nfslockfile *lfp, uint64_t init_first, 5760 uint64_t init_end, NFSPROC_T *p) 5761 { 5762 struct nfslock *lop; 5763 uint64_t first, end, prevfirst; 5764 5765 first = init_first; 5766 end = init_end; 5767 while (first < init_end) { 5768 /* Loop through all nfs locks, adjusting first and end */ 5769 prevfirst = 0; 5770 LIST_FOREACH(lop, &lfp->lf_lock, lo_lckfile) { 5771 KASSERT(prevfirst <= lop->lo_first, 5772 ("nfsv4 locks out of order")); 5773 KASSERT(lop->lo_first < lop->lo_end, 5774 ("nfsv4 bogus lock")); 5775 prevfirst = lop->lo_first; 5776 if (first >= lop->lo_first && 5777 first < lop->lo_end) 5778 /* 5779 * Overlaps with initial part, so trim 5780 * off that initial part by moving first past 5781 * it. 5782 */ 5783 first = lop->lo_end; 5784 else if (end > lop->lo_first && 5785 lop->lo_first > first) { 5786 /* 5787 * This lock defines the end of the 5788 * segment to unlock, so set end to the 5789 * start of it and break out of the loop. 5790 */ 5791 end = lop->lo_first; 5792 break; 5793 } 5794 if (first >= end) 5795 /* 5796 * There is no segment left to do, so 5797 * break out of this loop and then exit 5798 * the outer while() since first will be set 5799 * to end, which must equal init_end here. 5800 */ 5801 break; 5802 } 5803 if (first < end) { 5804 /* Unlock this segment */ 5805 (void) nfsrv_dolocal(vp, lfp, NFSLCK_UNLOCK, 5806 NFSLCK_READ, first, end, NULL, p); 5807 nfsrv_locallock_commit(lfp, NFSLCK_UNLOCK, 5808 first, end); 5809 } 5810 /* 5811 * Now move past this segment and look for any further 5812 * segment in the range, if there is one. 5813 */ 5814 first = end; 5815 end = init_end; 5816 } 5817 } 5818 5819 /* 5820 * Do the local lock operation and update the rollback list, as required. 5821 * Perform the rollback and return the error if nfsvno_advlock() fails. 5822 */ 5823 static int 5824 nfsrv_dolocal(vnode_t vp, struct nfslockfile *lfp, int flags, int oldflags, 5825 uint64_t first, uint64_t end, struct nfslockconflict *cfp, NFSPROC_T *p) 5826 { 5827 struct nfsrollback *rlp; 5828 int error = 0, ltype, oldltype; 5829 5830 if (flags & NFSLCK_WRITE) 5831 ltype = F_WRLCK; 5832 else if (flags & NFSLCK_READ) 5833 ltype = F_RDLCK; 5834 else 5835 ltype = F_UNLCK; 5836 if (oldflags & NFSLCK_WRITE) 5837 oldltype = F_WRLCK; 5838 else if (oldflags & NFSLCK_READ) 5839 oldltype = F_RDLCK; 5840 else 5841 oldltype = F_UNLCK; 5842 if (ltype == oldltype || (oldltype == F_WRLCK && ltype == F_RDLCK)) 5843 /* nothing to do */ 5844 goto out; 5845 error = nfsvno_advlock(vp, ltype, first, end, p); 5846 if (error != 0) { 5847 if (cfp != NULL) { 5848 cfp->cl_clientid.lval[0] = 0; 5849 cfp->cl_clientid.lval[1] = 0; 5850 cfp->cl_first = 0; 5851 cfp->cl_end = NFS64BITSSET; 5852 cfp->cl_flags = NFSLCK_WRITE; 5853 cfp->cl_ownerlen = 5; 5854 NFSBCOPY("LOCAL", cfp->cl_owner, 5); 5855 } 5856 nfsrv_locallock_rollback(vp, lfp, p); 5857 } else if (ltype != F_UNLCK) { 5858 rlp = malloc(sizeof (struct nfsrollback), M_NFSDROLLBACK, 5859 M_WAITOK); 5860 rlp->rlck_first = first; 5861 rlp->rlck_end = end; 5862 rlp->rlck_type = oldltype; 5863 LIST_INSERT_HEAD(&lfp->lf_rollback, rlp, rlck_list); 5864 } 5865 5866 out: 5867 NFSEXITCODE(error); 5868 return (error); 5869 } 5870 5871 /* 5872 * Roll back local lock changes and free up the rollback list. 5873 */ 5874 static void 5875 nfsrv_locallock_rollback(vnode_t vp, struct nfslockfile *lfp, NFSPROC_T *p) 5876 { 5877 struct nfsrollback *rlp, *nrlp; 5878 5879 LIST_FOREACH_SAFE(rlp, &lfp->lf_rollback, rlck_list, nrlp) { 5880 (void) nfsvno_advlock(vp, rlp->rlck_type, rlp->rlck_first, 5881 rlp->rlck_end, p); 5882 free(rlp, M_NFSDROLLBACK); 5883 } 5884 LIST_INIT(&lfp->lf_rollback); 5885 } 5886 5887 /* 5888 * Update local lock list and delete rollback list (ie now committed to the 5889 * local locks). Most of the work is done by the internal function. 5890 */ 5891 static void 5892 nfsrv_locallock_commit(struct nfslockfile *lfp, int flags, uint64_t first, 5893 uint64_t end) 5894 { 5895 struct nfsrollback *rlp, *nrlp; 5896 struct nfslock *new_lop, *other_lop; 5897 5898 new_lop = malloc(sizeof (struct nfslock), M_NFSDLOCK, M_WAITOK); 5899 if (flags & (NFSLCK_READ | NFSLCK_WRITE)) 5900 other_lop = malloc(sizeof (struct nfslock), M_NFSDLOCK, 5901 M_WAITOK); 5902 else 5903 other_lop = NULL; 5904 new_lop->lo_flags = flags; 5905 new_lop->lo_first = first; 5906 new_lop->lo_end = end; 5907 nfsrv_updatelock(NULL, &new_lop, &other_lop, lfp); 5908 if (new_lop != NULL) 5909 free(new_lop, M_NFSDLOCK); 5910 if (other_lop != NULL) 5911 free(other_lop, M_NFSDLOCK); 5912 5913 /* and get rid of the rollback list */ 5914 LIST_FOREACH_SAFE(rlp, &lfp->lf_rollback, rlck_list, nrlp) 5915 free(rlp, M_NFSDROLLBACK); 5916 LIST_INIT(&lfp->lf_rollback); 5917 } 5918 5919 /* 5920 * Lock the struct nfslockfile for local lock updating. 5921 */ 5922 static void 5923 nfsrv_locklf(struct nfslockfile *lfp) 5924 { 5925 int gotlock; 5926 5927 /* lf_usecount ensures *lfp won't be free'd */ 5928 lfp->lf_usecount++; 5929 do { 5930 gotlock = nfsv4_lock(&lfp->lf_locallock_lck, 1, NULL, 5931 NFSSTATEMUTEXPTR, NULL); 5932 } while (gotlock == 0); 5933 lfp->lf_usecount--; 5934 } 5935 5936 /* 5937 * Unlock the struct nfslockfile after local lock updating. 5938 */ 5939 static void 5940 nfsrv_unlocklf(struct nfslockfile *lfp) 5941 { 5942 5943 nfsv4_unlock(&lfp->lf_locallock_lck, 0); 5944 } 5945 5946 /* 5947 * Clear out all state for the NFSv4 server. 5948 * Must be called by a thread that can sleep when no nfsds are running. 5949 */ 5950 void 5951 nfsrv_throwawayallstate(NFSPROC_T *p) 5952 { 5953 struct nfsclient *clp, *nclp; 5954 struct nfslockfile *lfp, *nlfp; 5955 int i; 5956 5957 /* 5958 * For each client, clean out the state and then free the structure. 5959 */ 5960 for (i = 0; i < nfsrv_clienthashsize; i++) { 5961 LIST_FOREACH_SAFE(clp, &nfsclienthash[i], lc_hash, nclp) { 5962 nfsrv_cleanclient(clp, p); 5963 nfsrv_freedeleglist(&clp->lc_deleg); 5964 nfsrv_freedeleglist(&clp->lc_olddeleg); 5965 free(clp->lc_stateid, M_NFSDCLIENT); 5966 free(clp, M_NFSDCLIENT); 5967 } 5968 } 5969 5970 /* 5971 * Also, free up any remaining lock file structures. 5972 */ 5973 for (i = 0; i < nfsrv_lockhashsize; i++) { 5974 LIST_FOREACH_SAFE(lfp, &nfslockhash[i], lf_hash, nlfp) { 5975 printf("nfsd unload: fnd a lock file struct\n"); 5976 nfsrv_freenfslockfile(lfp); 5977 } 5978 } 5979 5980 /* And get rid of the deviceid structures and layouts. */ 5981 nfsrv_freealllayoutsanddevids(); 5982 } 5983 5984 /* 5985 * Check the sequence# for the session and slot provided as an argument. 5986 * Also, renew the lease if the session will return NFS_OK. 5987 */ 5988 int 5989 nfsrv_checksequence(struct nfsrv_descript *nd, uint32_t sequenceid, 5990 uint32_t *highest_slotidp, uint32_t *target_highest_slotidp, int cache_this, 5991 uint32_t *sflagsp, NFSPROC_T *p) 5992 { 5993 struct nfsdsession *sep; 5994 struct nfssessionhash *shp; 5995 int error; 5996 SVCXPRT *savxprt; 5997 5998 shp = NFSSESSIONHASH(nd->nd_sessionid); 5999 NFSLOCKSESSION(shp); 6000 sep = nfsrv_findsession(nd->nd_sessionid); 6001 if (sep == NULL) { 6002 NFSUNLOCKSESSION(shp); 6003 return (NFSERR_BADSESSION); 6004 } 6005 error = nfsv4_seqsession(sequenceid, nd->nd_slotid, *highest_slotidp, 6006 sep->sess_slots, NULL, NFSV4_SLOTS - 1); 6007 if (error != 0) { 6008 NFSUNLOCKSESSION(shp); 6009 return (error); 6010 } 6011 if (cache_this != 0) 6012 nd->nd_flag |= ND_SAVEREPLY; 6013 /* Renew the lease. */ 6014 sep->sess_clp->lc_expiry = nfsrv_leaseexpiry(); 6015 nd->nd_clientid.qval = sep->sess_clp->lc_clientid.qval; 6016 nd->nd_flag |= ND_IMPLIEDCLID; 6017 6018 /* 6019 * If this session handles the backchannel, save the nd_xprt for this 6020 * RPC, since this is the one being used. 6021 * RFC-5661 specifies that the fore channel will be implicitly 6022 * bound by a Sequence operation. However, since some NFSv4.1 clients 6023 * erroneously assumed that the back channel would be implicitly 6024 * bound as well, do the implicit binding unless a 6025 * BindConnectiontoSession has already been done on the session. 6026 */ 6027 if (sep->sess_clp->lc_req.nr_client != NULL && 6028 sep->sess_cbsess.nfsess_xprt != nd->nd_xprt && 6029 (sep->sess_crflags & NFSV4CRSESS_CONNBACKCHAN) != 0 && 6030 (sep->sess_clp->lc_flags & LCL_DONEBINDCONN) == 0) { 6031 NFSD_DEBUG(2, 6032 "nfsrv_checksequence: implicit back channel bind\n"); 6033 savxprt = sep->sess_cbsess.nfsess_xprt; 6034 SVC_ACQUIRE(nd->nd_xprt); 6035 nd->nd_xprt->xp_p2 = 6036 sep->sess_clp->lc_req.nr_client->cl_private; 6037 nd->nd_xprt->xp_idletimeout = 0; /* Disable timeout. */ 6038 sep->sess_cbsess.nfsess_xprt = nd->nd_xprt; 6039 if (savxprt != NULL) 6040 SVC_RELEASE(savxprt); 6041 } 6042 6043 *sflagsp = 0; 6044 if (sep->sess_clp->lc_req.nr_client == NULL) 6045 *sflagsp |= NFSV4SEQ_CBPATHDOWN; 6046 NFSUNLOCKSESSION(shp); 6047 if (error == NFSERR_EXPIRED) { 6048 *sflagsp |= NFSV4SEQ_EXPIREDALLSTATEREVOKED; 6049 error = 0; 6050 } else if (error == NFSERR_ADMINREVOKED) { 6051 *sflagsp |= NFSV4SEQ_ADMINSTATEREVOKED; 6052 error = 0; 6053 } 6054 *highest_slotidp = *target_highest_slotidp = NFSV4_SLOTS - 1; 6055 return (0); 6056 } 6057 6058 /* 6059 * Check/set reclaim complete for this session/clientid. 6060 */ 6061 int 6062 nfsrv_checkreclaimcomplete(struct nfsrv_descript *nd) 6063 { 6064 struct nfsdsession *sep; 6065 struct nfssessionhash *shp; 6066 int error = 0; 6067 6068 shp = NFSSESSIONHASH(nd->nd_sessionid); 6069 NFSLOCKSTATE(); 6070 NFSLOCKSESSION(shp); 6071 sep = nfsrv_findsession(nd->nd_sessionid); 6072 if (sep == NULL) { 6073 NFSUNLOCKSESSION(shp); 6074 NFSUNLOCKSTATE(); 6075 return (NFSERR_BADSESSION); 6076 } 6077 6078 /* Check to see if reclaim complete has already happened. */ 6079 if ((sep->sess_clp->lc_flags & LCL_RECLAIMCOMPLETE) != 0) 6080 error = NFSERR_COMPLETEALREADY; 6081 else { 6082 sep->sess_clp->lc_flags |= LCL_RECLAIMCOMPLETE; 6083 nfsrv_markreclaim(sep->sess_clp); 6084 } 6085 NFSUNLOCKSESSION(shp); 6086 NFSUNLOCKSTATE(); 6087 return (error); 6088 } 6089 6090 /* 6091 * Cache the reply in a session slot. 6092 */ 6093 void 6094 nfsrv_cache_session(uint8_t *sessionid, uint32_t slotid, int repstat, 6095 struct mbuf **m) 6096 { 6097 struct nfsdsession *sep; 6098 struct nfssessionhash *shp; 6099 6100 shp = NFSSESSIONHASH(sessionid); 6101 NFSLOCKSESSION(shp); 6102 sep = nfsrv_findsession(sessionid); 6103 if (sep == NULL) { 6104 NFSUNLOCKSESSION(shp); 6105 printf("nfsrv_cache_session: no session\n"); 6106 m_freem(*m); 6107 return; 6108 } 6109 nfsv4_seqsess_cacherep(slotid, sep->sess_slots, repstat, m); 6110 NFSUNLOCKSESSION(shp); 6111 } 6112 6113 /* 6114 * Search for a session that matches the sessionid. 6115 */ 6116 static struct nfsdsession * 6117 nfsrv_findsession(uint8_t *sessionid) 6118 { 6119 struct nfsdsession *sep; 6120 struct nfssessionhash *shp; 6121 6122 shp = NFSSESSIONHASH(sessionid); 6123 LIST_FOREACH(sep, &shp->list, sess_hash) { 6124 if (!NFSBCMP(sessionid, sep->sess_sessionid, NFSX_V4SESSIONID)) 6125 break; 6126 } 6127 return (sep); 6128 } 6129 6130 /* 6131 * Destroy a session. 6132 */ 6133 int 6134 nfsrv_destroysession(struct nfsrv_descript *nd, uint8_t *sessionid) 6135 { 6136 int error, igotlock, samesess; 6137 6138 samesess = 0; 6139 if (!NFSBCMP(sessionid, nd->nd_sessionid, NFSX_V4SESSIONID) && 6140 (nd->nd_flag & ND_HASSEQUENCE) != 0) { 6141 samesess = 1; 6142 if ((nd->nd_flag & ND_LASTOP) == 0) 6143 return (NFSERR_BADSESSION); 6144 } 6145 6146 /* Lock out other nfsd threads */ 6147 NFSLOCKV4ROOTMUTEX(); 6148 nfsv4_relref(&nfsv4rootfs_lock); 6149 do { 6150 igotlock = nfsv4_lock(&nfsv4rootfs_lock, 1, NULL, 6151 NFSV4ROOTLOCKMUTEXPTR, NULL); 6152 } while (igotlock == 0); 6153 NFSUNLOCKV4ROOTMUTEX(); 6154 6155 error = nfsrv_freesession(NULL, sessionid); 6156 if (error == 0 && samesess != 0) 6157 nd->nd_flag &= ~ND_HASSEQUENCE; 6158 6159 NFSLOCKV4ROOTMUTEX(); 6160 nfsv4_unlock(&nfsv4rootfs_lock, 1); 6161 NFSUNLOCKV4ROOTMUTEX(); 6162 return (error); 6163 } 6164 6165 /* 6166 * Bind a connection to a session. 6167 * For now, only certain variants are supported, since the current session 6168 * structure can only handle a single backchannel entry, which will be 6169 * applied to all connections if it is set. 6170 */ 6171 int 6172 nfsrv_bindconnsess(struct nfsrv_descript *nd, uint8_t *sessionid, int *foreaftp) 6173 { 6174 struct nfssessionhash *shp; 6175 struct nfsdsession *sep; 6176 struct nfsclient *clp; 6177 SVCXPRT *savxprt; 6178 int error; 6179 6180 error = 0; 6181 shp = NFSSESSIONHASH(sessionid); 6182 NFSLOCKSTATE(); 6183 NFSLOCKSESSION(shp); 6184 sep = nfsrv_findsession(sessionid); 6185 if (sep != NULL) { 6186 clp = sep->sess_clp; 6187 if (*foreaftp == NFSCDFC4_BACK || 6188 *foreaftp == NFSCDFC4_BACK_OR_BOTH || 6189 *foreaftp == NFSCDFC4_FORE_OR_BOTH) { 6190 /* Try to set up a backchannel. */ 6191 if (clp->lc_req.nr_client == NULL) { 6192 NFSD_DEBUG(2, "nfsrv_bindconnsess: acquire " 6193 "backchannel\n"); 6194 clp->lc_req.nr_client = (struct __rpc_client *) 6195 clnt_bck_create(nd->nd_xprt->xp_socket, 6196 sep->sess_cbprogram, NFSV4_CBVERS); 6197 } 6198 if (clp->lc_req.nr_client != NULL) { 6199 NFSD_DEBUG(2, "nfsrv_bindconnsess: set up " 6200 "backchannel\n"); 6201 savxprt = sep->sess_cbsess.nfsess_xprt; 6202 SVC_ACQUIRE(nd->nd_xprt); 6203 nd->nd_xprt->xp_p2 = 6204 clp->lc_req.nr_client->cl_private; 6205 /* Disable idle timeout. */ 6206 nd->nd_xprt->xp_idletimeout = 0; 6207 sep->sess_cbsess.nfsess_xprt = nd->nd_xprt; 6208 if (savxprt != NULL) 6209 SVC_RELEASE(savxprt); 6210 sep->sess_crflags |= NFSV4CRSESS_CONNBACKCHAN; 6211 clp->lc_flags |= LCL_DONEBINDCONN; 6212 if (*foreaftp == NFSCDFS4_BACK) 6213 *foreaftp = NFSCDFS4_BACK; 6214 else 6215 *foreaftp = NFSCDFS4_BOTH; 6216 } else if (*foreaftp != NFSCDFC4_BACK) { 6217 NFSD_DEBUG(2, "nfsrv_bindconnsess: can't set " 6218 "up backchannel\n"); 6219 sep->sess_crflags &= ~NFSV4CRSESS_CONNBACKCHAN; 6220 clp->lc_flags |= LCL_DONEBINDCONN; 6221 *foreaftp = NFSCDFS4_FORE; 6222 } else { 6223 error = NFSERR_NOTSUPP; 6224 printf("nfsrv_bindconnsess: Can't add " 6225 "backchannel\n"); 6226 } 6227 } else { 6228 NFSD_DEBUG(2, "nfsrv_bindconnsess: Set forechannel\n"); 6229 clp->lc_flags |= LCL_DONEBINDCONN; 6230 *foreaftp = NFSCDFS4_FORE; 6231 } 6232 } else 6233 error = NFSERR_BADSESSION; 6234 NFSUNLOCKSESSION(shp); 6235 NFSUNLOCKSTATE(); 6236 return (error); 6237 } 6238 6239 /* 6240 * Free up a session structure. 6241 */ 6242 static int 6243 nfsrv_freesession(struct nfsdsession *sep, uint8_t *sessionid) 6244 { 6245 struct nfssessionhash *shp; 6246 int i; 6247 6248 NFSLOCKSTATE(); 6249 if (sep == NULL) { 6250 shp = NFSSESSIONHASH(sessionid); 6251 NFSLOCKSESSION(shp); 6252 sep = nfsrv_findsession(sessionid); 6253 } else { 6254 shp = NFSSESSIONHASH(sep->sess_sessionid); 6255 NFSLOCKSESSION(shp); 6256 } 6257 if (sep != NULL) { 6258 sep->sess_refcnt--; 6259 if (sep->sess_refcnt > 0) { 6260 NFSUNLOCKSESSION(shp); 6261 NFSUNLOCKSTATE(); 6262 return (NFSERR_BACKCHANBUSY); 6263 } 6264 LIST_REMOVE(sep, sess_hash); 6265 LIST_REMOVE(sep, sess_list); 6266 } 6267 NFSUNLOCKSESSION(shp); 6268 NFSUNLOCKSTATE(); 6269 if (sep == NULL) 6270 return (NFSERR_BADSESSION); 6271 for (i = 0; i < NFSV4_SLOTS; i++) 6272 if (sep->sess_slots[i].nfssl_reply != NULL) 6273 m_freem(sep->sess_slots[i].nfssl_reply); 6274 if (sep->sess_cbsess.nfsess_xprt != NULL) 6275 SVC_RELEASE(sep->sess_cbsess.nfsess_xprt); 6276 free(sep, M_NFSDSESSION); 6277 return (0); 6278 } 6279 6280 /* 6281 * Free a stateid. 6282 * RFC5661 says that it should fail when there are associated opens, locks 6283 * or delegations. Since stateids represent opens, I don't see how you can 6284 * free an open stateid (it will be free'd when closed), so this function 6285 * only works for lock stateids (freeing the lock_owner) or delegations. 6286 */ 6287 int 6288 nfsrv_freestateid(struct nfsrv_descript *nd, nfsv4stateid_t *stateidp, 6289 NFSPROC_T *p) 6290 { 6291 struct nfsclient *clp; 6292 struct nfsstate *stp; 6293 int error; 6294 6295 NFSLOCKSTATE(); 6296 /* 6297 * Look up the stateid 6298 */ 6299 error = nfsrv_getclient((nfsquad_t)((u_quad_t)0), CLOPS_RENEW, &clp, 6300 NULL, (nfsquad_t)((u_quad_t)0), 0, nd, p); 6301 if (error == 0) { 6302 /* First, check for a delegation. */ 6303 LIST_FOREACH(stp, &clp->lc_deleg, ls_list) { 6304 if (!NFSBCMP(stp->ls_stateid.other, stateidp->other, 6305 NFSX_STATEIDOTHER)) 6306 break; 6307 } 6308 if (stp != NULL) { 6309 nfsrv_freedeleg(stp); 6310 NFSUNLOCKSTATE(); 6311 return (error); 6312 } 6313 } 6314 /* Not a delegation, try for a lock_owner. */ 6315 if (error == 0) 6316 error = nfsrv_getstate(clp, stateidp, 0, &stp); 6317 if (error == 0 && ((stp->ls_flags & (NFSLCK_OPEN | NFSLCK_DELEGREAD | 6318 NFSLCK_DELEGWRITE)) != 0 || (stp->ls_flags & NFSLCK_LOCK) == 0)) 6319 /* Not a lock_owner stateid. */ 6320 error = NFSERR_LOCKSHELD; 6321 if (error == 0 && !LIST_EMPTY(&stp->ls_lock)) 6322 error = NFSERR_LOCKSHELD; 6323 if (error == 0) 6324 nfsrv_freelockowner(stp, NULL, 0, p); 6325 NFSUNLOCKSTATE(); 6326 return (error); 6327 } 6328 6329 /* 6330 * Test a stateid. 6331 */ 6332 int 6333 nfsrv_teststateid(struct nfsrv_descript *nd, nfsv4stateid_t *stateidp, 6334 NFSPROC_T *p) 6335 { 6336 struct nfsclient *clp; 6337 struct nfsstate *stp; 6338 int error; 6339 6340 NFSLOCKSTATE(); 6341 /* 6342 * Look up the stateid 6343 */ 6344 error = nfsrv_getclient((nfsquad_t)((u_quad_t)0), CLOPS_RENEW, &clp, 6345 NULL, (nfsquad_t)((u_quad_t)0), 0, nd, p); 6346 if (error == 0) 6347 error = nfsrv_getstate(clp, stateidp, 0, &stp); 6348 if (error == 0 && stateidp->seqid != 0 && 6349 SEQ_LT(stateidp->seqid, stp->ls_stateid.seqid)) 6350 error = NFSERR_OLDSTATEID; 6351 NFSUNLOCKSTATE(); 6352 return (error); 6353 } 6354 6355 /* 6356 * Generate the xdr for an NFSv4.1 CBSequence Operation. 6357 */ 6358 static int 6359 nfsv4_setcbsequence(struct nfsrv_descript *nd, struct nfsclient *clp, 6360 int dont_replycache, struct nfsdsession **sepp) 6361 { 6362 struct nfsdsession *sep; 6363 uint32_t *tl, slotseq = 0; 6364 int maxslot, slotpos; 6365 uint8_t sessionid[NFSX_V4SESSIONID]; 6366 int error; 6367 6368 error = nfsv4_getcbsession(clp, sepp); 6369 if (error != 0) 6370 return (error); 6371 sep = *sepp; 6372 (void)nfsv4_sequencelookup(NULL, &sep->sess_cbsess, &slotpos, &maxslot, 6373 &slotseq, sessionid); 6374 KASSERT(maxslot >= 0, ("nfsv4_setcbsequence neg maxslot")); 6375 6376 /* Build the Sequence arguments. */ 6377 NFSM_BUILD(tl, uint32_t *, NFSX_V4SESSIONID + 5 * NFSX_UNSIGNED); 6378 bcopy(sessionid, tl, NFSX_V4SESSIONID); 6379 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED; 6380 nd->nd_slotseq = tl; 6381 *tl++ = txdr_unsigned(slotseq); 6382 *tl++ = txdr_unsigned(slotpos); 6383 *tl++ = txdr_unsigned(maxslot); 6384 if (dont_replycache == 0) 6385 *tl++ = newnfs_true; 6386 else 6387 *tl++ = newnfs_false; 6388 *tl = 0; /* No referring call list, for now. */ 6389 nd->nd_flag |= ND_HASSEQUENCE; 6390 return (0); 6391 } 6392 6393 /* 6394 * Get a session for the callback. 6395 */ 6396 static int 6397 nfsv4_getcbsession(struct nfsclient *clp, struct nfsdsession **sepp) 6398 { 6399 struct nfsdsession *sep; 6400 6401 NFSLOCKSTATE(); 6402 LIST_FOREACH(sep, &clp->lc_session, sess_list) { 6403 if ((sep->sess_crflags & NFSV4CRSESS_CONNBACKCHAN) != 0) 6404 break; 6405 } 6406 if (sep == NULL) { 6407 NFSUNLOCKSTATE(); 6408 return (NFSERR_BADSESSION); 6409 } 6410 sep->sess_refcnt++; 6411 *sepp = sep; 6412 NFSUNLOCKSTATE(); 6413 return (0); 6414 } 6415 6416 /* 6417 * Free up all backchannel xprts. This needs to be done when the nfsd threads 6418 * exit, since those transports will all be going away. 6419 * This is only called after all the nfsd threads are done performing RPCs, 6420 * so locking shouldn't be an issue. 6421 */ 6422 APPLESTATIC void 6423 nfsrv_freeallbackchannel_xprts(void) 6424 { 6425 struct nfsdsession *sep; 6426 struct nfsclient *clp; 6427 SVCXPRT *xprt; 6428 int i; 6429 6430 for (i = 0; i < nfsrv_clienthashsize; i++) { 6431 LIST_FOREACH(clp, &nfsclienthash[i], lc_hash) { 6432 LIST_FOREACH(sep, &clp->lc_session, sess_list) { 6433 xprt = sep->sess_cbsess.nfsess_xprt; 6434 sep->sess_cbsess.nfsess_xprt = NULL; 6435 if (xprt != NULL) 6436 SVC_RELEASE(xprt); 6437 } 6438 } 6439 } 6440 } 6441 6442 /* 6443 * Do a layout commit. Actually just call nfsrv_updatemdsattr(). 6444 * I have no idea if the rest of these arguments will ever be useful? 6445 */ 6446 int 6447 nfsrv_layoutcommit(struct nfsrv_descript *nd, vnode_t vp, int layouttype, 6448 int hasnewoff, uint64_t newoff, uint64_t offset, uint64_t len, 6449 int hasnewmtime, struct timespec *newmtimep, int reclaim, 6450 nfsv4stateid_t *stateidp, int maxcnt, char *layp, int *hasnewsizep, 6451 uint64_t *newsizep, struct ucred *cred, NFSPROC_T *p) 6452 { 6453 struct nfsvattr na; 6454 int error; 6455 6456 error = nfsrv_updatemdsattr(vp, &na, p); 6457 if (error == 0) { 6458 *hasnewsizep = 1; 6459 *newsizep = na.na_size; 6460 } 6461 return (error); 6462 } 6463 6464 /* 6465 * Try and get a layout. 6466 */ 6467 int 6468 nfsrv_layoutget(struct nfsrv_descript *nd, vnode_t vp, struct nfsexstuff *exp, 6469 int layouttype, int *iomode, uint64_t *offset, uint64_t *len, 6470 uint64_t minlen, nfsv4stateid_t *stateidp, int maxcnt, int *retonclose, 6471 int *layoutlenp, char *layp, struct ucred *cred, NFSPROC_T *p) 6472 { 6473 struct nfslayouthash *lhyp; 6474 struct nfslayout *lyp; 6475 char *devid; 6476 fhandle_t fh, *dsfhp; 6477 int error, mirrorcnt; 6478 6479 if (nfsrv_devidcnt == 0) 6480 return (NFSERR_UNKNLAYOUTTYPE); 6481 6482 if (*offset != 0) 6483 printf("nfsrv_layoutget: off=%ju len=%ju\n", (uintmax_t)*offset, 6484 (uintmax_t)*len); 6485 error = nfsvno_getfh(vp, &fh, p); 6486 NFSD_DEBUG(4, "layoutget getfh=%d\n", error); 6487 if (error != 0) 6488 return (error); 6489 6490 /* 6491 * For now, all layouts are for entire files. 6492 * Only issue Read/Write layouts if requested for a non-readonly fs. 6493 */ 6494 if (NFSVNO_EXRDONLY(exp)) { 6495 if (*iomode == NFSLAYOUTIOMODE_RW) 6496 return (NFSERR_LAYOUTTRYLATER); 6497 *iomode = NFSLAYOUTIOMODE_READ; 6498 } 6499 if (*iomode != NFSLAYOUTIOMODE_RW) 6500 *iomode = NFSLAYOUTIOMODE_READ; 6501 6502 /* 6503 * Check to see if a write layout can be issued for this file. 6504 * This is used during mirror recovery to avoid RW layouts being 6505 * issued for a file while it is being copied to the recovered 6506 * mirror. 6507 */ 6508 if (*iomode == NFSLAYOUTIOMODE_RW && nfsrv_dontlayout(&fh) != 0) 6509 return (NFSERR_LAYOUTTRYLATER); 6510 6511 *retonclose = 0; 6512 *offset = 0; 6513 *len = UINT64_MAX; 6514 6515 /* First, see if a layout already exists and return if found. */ 6516 lhyp = NFSLAYOUTHASH(&fh); 6517 NFSLOCKLAYOUT(lhyp); 6518 error = nfsrv_findlayout(&nd->nd_clientid, &fh, layouttype, p, &lyp); 6519 NFSD_DEBUG(4, "layoutget findlay=%d\n", error); 6520 /* 6521 * Not sure if the seqid must be the same, so I won't check it. 6522 */ 6523 if (error == 0 && (stateidp->other[0] != lyp->lay_stateid.other[0] || 6524 stateidp->other[1] != lyp->lay_stateid.other[1] || 6525 stateidp->other[2] != lyp->lay_stateid.other[2])) { 6526 if ((lyp->lay_flags & NFSLAY_CALLB) == 0) { 6527 NFSUNLOCKLAYOUT(lhyp); 6528 NFSD_DEBUG(1, "ret bad stateid\n"); 6529 return (NFSERR_BADSTATEID); 6530 } 6531 /* 6532 * I believe we get here because there is a race between 6533 * the client processing the CBLAYOUTRECALL and the layout 6534 * being deleted here on the server. 6535 * The client has now done a LayoutGet with a non-layout 6536 * stateid, as it would when there is no layout. 6537 * As such, free this layout and set error == NFSERR_BADSTATEID 6538 * so the code below will create a new layout structure as 6539 * would happen if no layout was found. 6540 * "lyp" will be set before being used below, but set it NULL 6541 * as a safety belt. 6542 */ 6543 nfsrv_freelayout(&lhyp->list, lyp); 6544 lyp = NULL; 6545 error = NFSERR_BADSTATEID; 6546 } 6547 if (error == 0) { 6548 if (lyp->lay_layoutlen > maxcnt) { 6549 NFSUNLOCKLAYOUT(lhyp); 6550 NFSD_DEBUG(1, "ret layout too small\n"); 6551 return (NFSERR_TOOSMALL); 6552 } 6553 if (*iomode == NFSLAYOUTIOMODE_RW) 6554 lyp->lay_flags |= NFSLAY_RW; 6555 else 6556 lyp->lay_flags |= NFSLAY_READ; 6557 NFSBCOPY(lyp->lay_xdr, layp, lyp->lay_layoutlen); 6558 *layoutlenp = lyp->lay_layoutlen; 6559 if (++lyp->lay_stateid.seqid == 0) 6560 lyp->lay_stateid.seqid = 1; 6561 stateidp->seqid = lyp->lay_stateid.seqid; 6562 NFSUNLOCKLAYOUT(lhyp); 6563 NFSD_DEBUG(4, "ret fnd layout\n"); 6564 return (0); 6565 } 6566 NFSUNLOCKLAYOUT(lhyp); 6567 6568 /* Find the device id and file handle. */ 6569 dsfhp = malloc(sizeof(fhandle_t) * NFSDEV_MAXMIRRORS, M_TEMP, M_WAITOK); 6570 devid = malloc(NFSX_V4DEVICEID * NFSDEV_MAXMIRRORS, M_TEMP, M_WAITOK); 6571 error = nfsrv_dsgetdevandfh(vp, p, &mirrorcnt, dsfhp, devid); 6572 NFSD_DEBUG(4, "layoutget devandfh=%d\n", error); 6573 if (error == 0) { 6574 if (layouttype == NFSLAYOUT_NFSV4_1_FILES) { 6575 if (NFSX_V4FILELAYOUT > maxcnt) 6576 error = NFSERR_TOOSMALL; 6577 else 6578 lyp = nfsrv_filelayout(nd, *iomode, &fh, dsfhp, 6579 devid, vp->v_mount->mnt_stat.f_fsid); 6580 } else { 6581 if (NFSX_V4FLEXLAYOUT(mirrorcnt) > maxcnt) 6582 error = NFSERR_TOOSMALL; 6583 else 6584 lyp = nfsrv_flexlayout(nd, *iomode, mirrorcnt, 6585 &fh, dsfhp, devid, 6586 vp->v_mount->mnt_stat.f_fsid); 6587 } 6588 } 6589 free(dsfhp, M_TEMP); 6590 free(devid, M_TEMP); 6591 if (error != 0) 6592 return (error); 6593 6594 /* 6595 * Now, add this layout to the list. 6596 */ 6597 error = nfsrv_addlayout(nd, &lyp, stateidp, layp, layoutlenp, p); 6598 NFSD_DEBUG(4, "layoutget addl=%d\n", error); 6599 /* 6600 * The lyp will be set to NULL by nfsrv_addlayout() if it 6601 * linked the new structure into the lists. 6602 */ 6603 free(lyp, M_NFSDSTATE); 6604 return (error); 6605 } 6606 6607 /* 6608 * Generate a File Layout. 6609 */ 6610 static struct nfslayout * 6611 nfsrv_filelayout(struct nfsrv_descript *nd, int iomode, fhandle_t *fhp, 6612 fhandle_t *dsfhp, char *devid, fsid_t fs) 6613 { 6614 uint32_t *tl; 6615 struct nfslayout *lyp; 6616 uint64_t pattern_offset; 6617 6618 lyp = malloc(sizeof(struct nfslayout) + NFSX_V4FILELAYOUT, M_NFSDSTATE, 6619 M_WAITOK | M_ZERO); 6620 lyp->lay_type = NFSLAYOUT_NFSV4_1_FILES; 6621 if (iomode == NFSLAYOUTIOMODE_RW) 6622 lyp->lay_flags = NFSLAY_RW; 6623 else 6624 lyp->lay_flags = NFSLAY_READ; 6625 NFSBCOPY(fhp, &lyp->lay_fh, sizeof(*fhp)); 6626 lyp->lay_clientid.qval = nd->nd_clientid.qval; 6627 lyp->lay_fsid = fs; 6628 6629 /* Fill in the xdr for the files layout. */ 6630 tl = (uint32_t *)lyp->lay_xdr; 6631 NFSBCOPY(devid, tl, NFSX_V4DEVICEID); /* Device ID. */ 6632 tl += (NFSX_V4DEVICEID / NFSX_UNSIGNED); 6633 6634 /* 6635 * Make the stripe size as many 64K blocks as will fit in the stripe 6636 * mask. Since there is only one stripe, the stripe size doesn't really 6637 * matter, except that the Linux client will only handle an exact 6638 * multiple of their PAGE_SIZE (usually 4K). I chose 64K as a value 6639 * that should cover most/all arches w.r.t. PAGE_SIZE. 6640 */ 6641 *tl++ = txdr_unsigned(NFSFLAYUTIL_STRIPE_MASK & ~0xffff); 6642 *tl++ = 0; /* 1st stripe index. */ 6643 pattern_offset = 0; 6644 txdr_hyper(pattern_offset, tl); tl += 2; /* Pattern offset. */ 6645 *tl++ = txdr_unsigned(1); /* 1 file handle. */ 6646 *tl++ = txdr_unsigned(NFSX_V4PNFSFH); 6647 NFSBCOPY(dsfhp, tl, sizeof(*dsfhp)); 6648 lyp->lay_layoutlen = NFSX_V4FILELAYOUT; 6649 return (lyp); 6650 } 6651 6652 #define FLEX_OWNERID "999" 6653 #define FLEX_UID0 "0" 6654 /* 6655 * Generate a Flex File Layout. 6656 * The FLEX_OWNERID can be any string of 3 decimal digits. Although this 6657 * string goes on the wire, it isn't supposed to be used by the client, 6658 * since this server uses tight coupling. 6659 * Although not recommended by the spec., if vfs.nfsd.flexlinuxhack=1 use 6660 * a string of "0". This works around the Linux Flex File Layout driver bug 6661 * which uses the synthetic uid/gid strings for the "tightly coupled" case. 6662 */ 6663 static struct nfslayout * 6664 nfsrv_flexlayout(struct nfsrv_descript *nd, int iomode, int mirrorcnt, 6665 fhandle_t *fhp, fhandle_t *dsfhp, char *devid, fsid_t fs) 6666 { 6667 uint32_t *tl; 6668 struct nfslayout *lyp; 6669 uint64_t lenval; 6670 int i; 6671 6672 lyp = malloc(sizeof(struct nfslayout) + NFSX_V4FLEXLAYOUT(mirrorcnt), 6673 M_NFSDSTATE, M_WAITOK | M_ZERO); 6674 lyp->lay_type = NFSLAYOUT_FLEXFILE; 6675 if (iomode == NFSLAYOUTIOMODE_RW) 6676 lyp->lay_flags = NFSLAY_RW; 6677 else 6678 lyp->lay_flags = NFSLAY_READ; 6679 NFSBCOPY(fhp, &lyp->lay_fh, sizeof(*fhp)); 6680 lyp->lay_clientid.qval = nd->nd_clientid.qval; 6681 lyp->lay_fsid = fs; 6682 lyp->lay_mirrorcnt = mirrorcnt; 6683 6684 /* Fill in the xdr for the files layout. */ 6685 tl = (uint32_t *)lyp->lay_xdr; 6686 lenval = 0; 6687 txdr_hyper(lenval, tl); tl += 2; /* Stripe unit. */ 6688 *tl++ = txdr_unsigned(mirrorcnt); /* # of mirrors. */ 6689 for (i = 0; i < mirrorcnt; i++) { 6690 *tl++ = txdr_unsigned(1); /* One stripe. */ 6691 NFSBCOPY(devid, tl, NFSX_V4DEVICEID); /* Device ID. */ 6692 tl += (NFSX_V4DEVICEID / NFSX_UNSIGNED); 6693 devid += NFSX_V4DEVICEID; 6694 *tl++ = txdr_unsigned(1); /* Efficiency. */ 6695 *tl++ = 0; /* Proxy Stateid. */ 6696 *tl++ = 0x55555555; 6697 *tl++ = 0x55555555; 6698 *tl++ = 0x55555555; 6699 *tl++ = txdr_unsigned(1); /* 1 file handle. */ 6700 *tl++ = txdr_unsigned(NFSX_V4PNFSFH); 6701 NFSBCOPY(dsfhp, tl, sizeof(*dsfhp)); 6702 tl += (NFSM_RNDUP(NFSX_V4PNFSFH) / NFSX_UNSIGNED); 6703 dsfhp++; 6704 if (nfsrv_flexlinuxhack != 0) { 6705 *tl++ = txdr_unsigned(strlen(FLEX_UID0)); 6706 *tl = 0; /* 0 pad string. */ 6707 NFSBCOPY(FLEX_UID0, tl++, strlen(FLEX_UID0)); 6708 *tl++ = txdr_unsigned(strlen(FLEX_UID0)); 6709 *tl = 0; /* 0 pad string. */ 6710 NFSBCOPY(FLEX_UID0, tl++, strlen(FLEX_UID0)); 6711 } else { 6712 *tl++ = txdr_unsigned(strlen(FLEX_OWNERID)); 6713 NFSBCOPY(FLEX_OWNERID, tl++, NFSX_UNSIGNED); 6714 *tl++ = txdr_unsigned(strlen(FLEX_OWNERID)); 6715 NFSBCOPY(FLEX_OWNERID, tl++, NFSX_UNSIGNED); 6716 } 6717 } 6718 *tl++ = txdr_unsigned(0); /* ff_flags. */ 6719 *tl = txdr_unsigned(60); /* Status interval hint. */ 6720 lyp->lay_layoutlen = NFSX_V4FLEXLAYOUT(mirrorcnt); 6721 return (lyp); 6722 } 6723 6724 /* 6725 * Parse and process Flex File errors returned via LayoutReturn. 6726 */ 6727 static void 6728 nfsrv_flexlayouterr(struct nfsrv_descript *nd, uint32_t *layp, int maxcnt, 6729 NFSPROC_T *p) 6730 { 6731 uint32_t *tl; 6732 int cnt, errcnt, i, j, opnum, stat; 6733 char devid[NFSX_V4DEVICEID]; 6734 6735 tl = layp; 6736 cnt = fxdr_unsigned(int, *tl++); 6737 NFSD_DEBUG(4, "flexlayouterr cnt=%d\n", cnt); 6738 for (i = 0; i < cnt; i++) { 6739 /* Skip offset, length and stateid for now. */ 6740 tl += (4 + NFSX_STATEID / NFSX_UNSIGNED); 6741 errcnt = fxdr_unsigned(int, *tl++); 6742 NFSD_DEBUG(4, "flexlayouterr errcnt=%d\n", errcnt); 6743 for (j = 0; j < errcnt; j++) { 6744 NFSBCOPY(tl, devid, NFSX_V4DEVICEID); 6745 tl += (NFSX_V4DEVICEID / NFSX_UNSIGNED); 6746 stat = fxdr_unsigned(int, *tl++); 6747 opnum = fxdr_unsigned(int, *tl++); 6748 NFSD_DEBUG(4, "flexlayouterr op=%d stat=%d\n", opnum, 6749 stat); 6750 /* 6751 * Except for NFSERR_ACCES errors for Reading, 6752 * shut the mirror down. 6753 */ 6754 if (opnum != NFSV4OP_READ || stat != NFSERR_ACCES) 6755 nfsrv_delds(devid, p); 6756 } 6757 } 6758 } 6759 6760 /* 6761 * This function removes all flex file layouts which has a mirror with 6762 * a device id that matches the argument. 6763 * Called when the DS represented by the device id has failed. 6764 */ 6765 void 6766 nfsrv_flexmirrordel(char *devid, NFSPROC_T *p) 6767 { 6768 uint32_t *tl; 6769 struct nfslayout *lyp, *nlyp; 6770 struct nfslayouthash *lhyp; 6771 struct nfslayouthead loclyp; 6772 int i, j; 6773 6774 NFSD_DEBUG(4, "flexmirrordel\n"); 6775 /* Move all layouts found onto a local list. */ 6776 TAILQ_INIT(&loclyp); 6777 for (i = 0; i < nfsrv_layouthashsize; i++) { 6778 lhyp = &nfslayouthash[i]; 6779 NFSLOCKLAYOUT(lhyp); 6780 TAILQ_FOREACH_SAFE(lyp, &lhyp->list, lay_list, nlyp) { 6781 if (lyp->lay_type == NFSLAYOUT_FLEXFILE && 6782 lyp->lay_mirrorcnt > 1) { 6783 NFSD_DEBUG(4, "possible match\n"); 6784 tl = lyp->lay_xdr; 6785 tl += 3; 6786 for (j = 0; j < lyp->lay_mirrorcnt; j++) { 6787 tl++; 6788 if (NFSBCMP(devid, tl, NFSX_V4DEVICEID) 6789 == 0) { 6790 /* Found one. */ 6791 NFSD_DEBUG(4, "fnd one\n"); 6792 TAILQ_REMOVE(&lhyp->list, lyp, 6793 lay_list); 6794 TAILQ_INSERT_HEAD(&loclyp, lyp, 6795 lay_list); 6796 break; 6797 } 6798 tl += (NFSX_V4DEVICEID / NFSX_UNSIGNED + 6799 NFSM_RNDUP(NFSX_V4PNFSFH) / 6800 NFSX_UNSIGNED + 11 * NFSX_UNSIGNED); 6801 } 6802 } 6803 } 6804 NFSUNLOCKLAYOUT(lhyp); 6805 } 6806 6807 /* Now, try to do a Layout recall for each one found. */ 6808 TAILQ_FOREACH_SAFE(lyp, &loclyp, lay_list, nlyp) { 6809 NFSD_DEBUG(4, "do layout recall\n"); 6810 /* 6811 * The layout stateid.seqid needs to be incremented 6812 * before doing a LAYOUT_RECALL callback. 6813 * Set lay_trycnt to UINT16_MAX so it won't set up a retry. 6814 */ 6815 if (++lyp->lay_stateid.seqid == 0) 6816 lyp->lay_stateid.seqid = 1; 6817 lyp->lay_trycnt = UINT16_MAX; 6818 nfsrv_recalllayout(lyp->lay_clientid, &lyp->lay_stateid, 6819 &lyp->lay_fh, lyp, &loclyp, lyp->lay_type, p); 6820 nfsrv_freelayout(&loclyp, lyp); 6821 } 6822 } 6823 6824 /* 6825 * Do a recall callback to the client for this layout. 6826 */ 6827 static int 6828 nfsrv_recalllayout(nfsquad_t clid, nfsv4stateid_t *stateidp, fhandle_t *fhp, 6829 struct nfslayout *lyp, struct nfslayouthead *lyheadp, int laytype, 6830 NFSPROC_T *p) 6831 { 6832 struct nfsclient *clp; 6833 int error; 6834 6835 NFSD_DEBUG(4, "nfsrv_recalllayout\n"); 6836 error = nfsrv_getclient(clid, 0, &clp, NULL, (nfsquad_t)((u_quad_t)0), 6837 0, NULL, p); 6838 NFSD_DEBUG(4, "aft nfsrv_getclient=%d\n", error); 6839 if (error != 0) 6840 return (error); 6841 if ((clp->lc_flags & LCL_NFSV41) != 0) { 6842 error = nfsrv_docallback(clp, NFSV4OP_CBLAYOUTRECALL, 6843 stateidp, 0, fhp, NULL, NULL, laytype, p); 6844 /* If lyp != NULL, handle an error return here. */ 6845 if (error != 0 && lyp != NULL) { 6846 NFSDRECALLLOCK(); 6847 if (error == NFSERR_NOMATCHLAYOUT) { 6848 /* 6849 * Mark it returned, since there is no layout. 6850 */ 6851 if ((lyp->lay_flags & NFSLAY_RECALL) != 0) { 6852 lyp->lay_flags |= NFSLAY_RETURNED; 6853 wakeup(lyp); 6854 } 6855 NFSDRECALLUNLOCK(); 6856 } else if ((lyp->lay_flags & NFSLAY_RETURNED) == 0 && 6857 lyp->lay_trycnt < 10) { 6858 /* 6859 * Clear recall, so it can be tried again 6860 * and put it at the end of the list to 6861 * delay the retry a little longer. 6862 */ 6863 lyp->lay_flags &= ~NFSLAY_RECALL; 6864 lyp->lay_trycnt++; 6865 TAILQ_REMOVE(lyheadp, lyp, lay_list); 6866 TAILQ_INSERT_TAIL(lyheadp, lyp, lay_list); 6867 NFSDRECALLUNLOCK(); 6868 nfs_catnap(PVFS, 0, "nfsrclay"); 6869 } else 6870 NFSDRECALLUNLOCK(); 6871 } 6872 } else 6873 printf("nfsrv_recalllayout: clp not NFSv4.1\n"); 6874 return (error); 6875 } 6876 6877 /* 6878 * Find a layout to recall when we exceed our high water mark. 6879 */ 6880 void 6881 nfsrv_recalloldlayout(NFSPROC_T *p) 6882 { 6883 struct nfslayouthash *lhyp; 6884 struct nfslayout *lyp; 6885 nfsquad_t clientid; 6886 nfsv4stateid_t stateid; 6887 fhandle_t fh; 6888 int error, laytype, ret; 6889 6890 lhyp = &nfslayouthash[arc4random() % nfsrv_layouthashsize]; 6891 NFSLOCKLAYOUT(lhyp); 6892 TAILQ_FOREACH_REVERSE(lyp, &lhyp->list, nfslayouthead, lay_list) { 6893 if ((lyp->lay_flags & NFSLAY_CALLB) == 0) { 6894 lyp->lay_flags |= NFSLAY_CALLB; 6895 /* 6896 * The layout stateid.seqid needs to be incremented 6897 * before doing a LAYOUT_RECALL callback. 6898 */ 6899 if (++lyp->lay_stateid.seqid == 0) 6900 lyp->lay_stateid.seqid = 1; 6901 clientid = lyp->lay_clientid; 6902 stateid = lyp->lay_stateid; 6903 fh = lyp->lay_fh; 6904 laytype = lyp->lay_type; 6905 break; 6906 } 6907 } 6908 NFSUNLOCKLAYOUT(lhyp); 6909 if (lyp != NULL) { 6910 error = nfsrv_recalllayout(clientid, &stateid, &fh, NULL, NULL, 6911 laytype, p); 6912 if (error != 0 && error != NFSERR_NOMATCHLAYOUT) 6913 printf("recallold=%d\n", error); 6914 if (error != 0) { 6915 NFSLOCKLAYOUT(lhyp); 6916 /* 6917 * Since the hash list was unlocked, we need to 6918 * find it again. 6919 */ 6920 ret = nfsrv_findlayout(&clientid, &fh, laytype, p, 6921 &lyp); 6922 if (ret == 0 && 6923 (lyp->lay_flags & NFSLAY_CALLB) != 0 && 6924 lyp->lay_stateid.other[0] == stateid.other[0] && 6925 lyp->lay_stateid.other[1] == stateid.other[1] && 6926 lyp->lay_stateid.other[2] == stateid.other[2]) { 6927 /* 6928 * The client no longer knows this layout, so 6929 * it can be free'd now. 6930 */ 6931 if (error == NFSERR_NOMATCHLAYOUT) 6932 nfsrv_freelayout(&lhyp->list, lyp); 6933 else { 6934 /* 6935 * Leave it to be tried later by 6936 * clearing NFSLAY_CALLB and moving 6937 * it to the head of the list, so it 6938 * won't be tried again for a while. 6939 */ 6940 lyp->lay_flags &= ~NFSLAY_CALLB; 6941 TAILQ_REMOVE(&lhyp->list, lyp, 6942 lay_list); 6943 TAILQ_INSERT_HEAD(&lhyp->list, lyp, 6944 lay_list); 6945 } 6946 } 6947 NFSUNLOCKLAYOUT(lhyp); 6948 } 6949 } 6950 } 6951 6952 /* 6953 * Try and return layout(s). 6954 */ 6955 int 6956 nfsrv_layoutreturn(struct nfsrv_descript *nd, vnode_t vp, 6957 int layouttype, int iomode, uint64_t offset, uint64_t len, int reclaim, 6958 int kind, nfsv4stateid_t *stateidp, int maxcnt, uint32_t *layp, int *fndp, 6959 struct ucred *cred, NFSPROC_T *p) 6960 { 6961 struct nfsvattr na; 6962 struct nfslayouthash *lhyp; 6963 struct nfslayout *lyp; 6964 fhandle_t fh; 6965 int error = 0; 6966 6967 *fndp = 0; 6968 if (kind == NFSV4LAYOUTRET_FILE) { 6969 error = nfsvno_getfh(vp, &fh, p); 6970 if (error == 0) { 6971 error = nfsrv_updatemdsattr(vp, &na, p); 6972 if (error != 0) 6973 printf("nfsrv_layoutreturn: updatemdsattr" 6974 " failed=%d\n", error); 6975 } 6976 if (error == 0) { 6977 if (reclaim == newnfs_true) { 6978 error = nfsrv_checkgrace(NULL, NULL, 6979 NFSLCK_RECLAIM); 6980 if (error != NFSERR_NOGRACE) 6981 error = 0; 6982 return (error); 6983 } 6984 lhyp = NFSLAYOUTHASH(&fh); 6985 NFSDRECALLLOCK(); 6986 NFSLOCKLAYOUT(lhyp); 6987 error = nfsrv_findlayout(&nd->nd_clientid, &fh, 6988 layouttype, p, &lyp); 6989 NFSD_DEBUG(4, "layoutret findlay=%d\n", error); 6990 if (error == 0 && 6991 stateidp->other[0] == lyp->lay_stateid.other[0] && 6992 stateidp->other[1] == lyp->lay_stateid.other[1] && 6993 stateidp->other[2] == lyp->lay_stateid.other[2]) { 6994 NFSD_DEBUG(4, "nfsrv_layoutreturn: stateid %d" 6995 " %x %x %x laystateid %d %x %x %x" 6996 " off=%ju len=%ju flgs=0x%x\n", 6997 stateidp->seqid, stateidp->other[0], 6998 stateidp->other[1], stateidp->other[2], 6999 lyp->lay_stateid.seqid, 7000 lyp->lay_stateid.other[0], 7001 lyp->lay_stateid.other[1], 7002 lyp->lay_stateid.other[2], 7003 (uintmax_t)offset, (uintmax_t)len, 7004 lyp->lay_flags); 7005 if (++lyp->lay_stateid.seqid == 0) 7006 lyp->lay_stateid.seqid = 1; 7007 stateidp->seqid = lyp->lay_stateid.seqid; 7008 if (offset == 0 && len == UINT64_MAX) { 7009 if ((iomode & NFSLAYOUTIOMODE_READ) != 7010 0) 7011 lyp->lay_flags &= ~NFSLAY_READ; 7012 if ((iomode & NFSLAYOUTIOMODE_RW) != 0) 7013 lyp->lay_flags &= ~NFSLAY_RW; 7014 if ((lyp->lay_flags & (NFSLAY_READ | 7015 NFSLAY_RW)) == 0) 7016 nfsrv_freelayout(&lhyp->list, 7017 lyp); 7018 else 7019 *fndp = 1; 7020 } else 7021 *fndp = 1; 7022 } 7023 NFSUNLOCKLAYOUT(lhyp); 7024 /* Search the nfsrv_recalllist for a match. */ 7025 TAILQ_FOREACH(lyp, &nfsrv_recalllisthead, lay_list) { 7026 if (NFSBCMP(&lyp->lay_fh, &fh, 7027 sizeof(fh)) == 0 && 7028 lyp->lay_clientid.qval == 7029 nd->nd_clientid.qval && 7030 stateidp->other[0] == 7031 lyp->lay_stateid.other[0] && 7032 stateidp->other[1] == 7033 lyp->lay_stateid.other[1] && 7034 stateidp->other[2] == 7035 lyp->lay_stateid.other[2]) { 7036 lyp->lay_flags |= NFSLAY_RETURNED; 7037 wakeup(lyp); 7038 error = 0; 7039 } 7040 } 7041 NFSDRECALLUNLOCK(); 7042 } 7043 if (layouttype == NFSLAYOUT_FLEXFILE) 7044 nfsrv_flexlayouterr(nd, layp, maxcnt, p); 7045 } else if (kind == NFSV4LAYOUTRET_FSID) 7046 nfsrv_freelayouts(&nd->nd_clientid, 7047 &vp->v_mount->mnt_stat.f_fsid, layouttype, iomode); 7048 else if (kind == NFSV4LAYOUTRET_ALL) 7049 nfsrv_freelayouts(&nd->nd_clientid, NULL, layouttype, iomode); 7050 else 7051 error = NFSERR_INVAL; 7052 if (error == -1) 7053 error = 0; 7054 return (error); 7055 } 7056 7057 /* 7058 * Look for an existing layout. 7059 */ 7060 static int 7061 nfsrv_findlayout(nfsquad_t *clientidp, fhandle_t *fhp, int laytype, 7062 NFSPROC_T *p, struct nfslayout **lypp) 7063 { 7064 struct nfslayouthash *lhyp; 7065 struct nfslayout *lyp; 7066 int ret; 7067 7068 *lypp = NULL; 7069 ret = 0; 7070 lhyp = NFSLAYOUTHASH(fhp); 7071 TAILQ_FOREACH(lyp, &lhyp->list, lay_list) { 7072 if (NFSBCMP(&lyp->lay_fh, fhp, sizeof(*fhp)) == 0 && 7073 lyp->lay_clientid.qval == clientidp->qval && 7074 lyp->lay_type == laytype) 7075 break; 7076 } 7077 if (lyp != NULL) 7078 *lypp = lyp; 7079 else 7080 ret = -1; 7081 return (ret); 7082 } 7083 7084 /* 7085 * Add the new layout, as required. 7086 */ 7087 static int 7088 nfsrv_addlayout(struct nfsrv_descript *nd, struct nfslayout **lypp, 7089 nfsv4stateid_t *stateidp, char *layp, int *layoutlenp, NFSPROC_T *p) 7090 { 7091 struct nfsclient *clp; 7092 struct nfslayouthash *lhyp; 7093 struct nfslayout *lyp, *nlyp; 7094 fhandle_t *fhp; 7095 int error; 7096 7097 KASSERT((nd->nd_flag & ND_IMPLIEDCLID) != 0, 7098 ("nfsrv_layoutget: no nd_clientid\n")); 7099 lyp = *lypp; 7100 fhp = &lyp->lay_fh; 7101 NFSLOCKSTATE(); 7102 error = nfsrv_getclient((nfsquad_t)((u_quad_t)0), CLOPS_RENEW, &clp, 7103 NULL, (nfsquad_t)((u_quad_t)0), 0, nd, p); 7104 if (error != 0) { 7105 NFSUNLOCKSTATE(); 7106 return (error); 7107 } 7108 lyp->lay_stateid.seqid = stateidp->seqid = 1; 7109 lyp->lay_stateid.other[0] = stateidp->other[0] = 7110 clp->lc_clientid.lval[0]; 7111 lyp->lay_stateid.other[1] = stateidp->other[1] = 7112 clp->lc_clientid.lval[1]; 7113 lyp->lay_stateid.other[2] = stateidp->other[2] = 7114 nfsrv_nextstateindex(clp); 7115 NFSUNLOCKSTATE(); 7116 7117 lhyp = NFSLAYOUTHASH(fhp); 7118 NFSLOCKLAYOUT(lhyp); 7119 TAILQ_FOREACH(nlyp, &lhyp->list, lay_list) { 7120 if (NFSBCMP(&nlyp->lay_fh, fhp, sizeof(*fhp)) == 0 && 7121 nlyp->lay_clientid.qval == nd->nd_clientid.qval) 7122 break; 7123 } 7124 if (nlyp != NULL) { 7125 /* A layout already exists, so use it. */ 7126 nlyp->lay_flags |= (lyp->lay_flags & (NFSLAY_READ | NFSLAY_RW)); 7127 NFSBCOPY(nlyp->lay_xdr, layp, nlyp->lay_layoutlen); 7128 *layoutlenp = nlyp->lay_layoutlen; 7129 if (++nlyp->lay_stateid.seqid == 0) 7130 nlyp->lay_stateid.seqid = 1; 7131 stateidp->seqid = nlyp->lay_stateid.seqid; 7132 stateidp->other[0] = nlyp->lay_stateid.other[0]; 7133 stateidp->other[1] = nlyp->lay_stateid.other[1]; 7134 stateidp->other[2] = nlyp->lay_stateid.other[2]; 7135 NFSUNLOCKLAYOUT(lhyp); 7136 return (0); 7137 } 7138 7139 /* Insert the new layout in the lists. */ 7140 *lypp = NULL; 7141 atomic_add_int(&nfsrv_layoutcnt, 1); 7142 NFSBCOPY(lyp->lay_xdr, layp, lyp->lay_layoutlen); 7143 *layoutlenp = lyp->lay_layoutlen; 7144 TAILQ_INSERT_HEAD(&lhyp->list, lyp, lay_list); 7145 NFSUNLOCKLAYOUT(lhyp); 7146 return (0); 7147 } 7148 7149 /* 7150 * Get the devinfo for a deviceid. 7151 */ 7152 int 7153 nfsrv_getdevinfo(char *devid, int layouttype, uint32_t *maxcnt, 7154 uint32_t *notify, int *devaddrlen, char **devaddr) 7155 { 7156 struct nfsdevice *ds; 7157 7158 if ((layouttype != NFSLAYOUT_NFSV4_1_FILES && layouttype != 7159 NFSLAYOUT_FLEXFILE) || 7160 (nfsrv_maxpnfsmirror > 1 && layouttype == NFSLAYOUT_NFSV4_1_FILES)) 7161 return (NFSERR_UNKNLAYOUTTYPE); 7162 7163 /* 7164 * Now, search for the device id. Note that the structures won't go 7165 * away, but the order changes in the list. As such, the lock only 7166 * needs to be held during the search through the list. 7167 */ 7168 NFSDDSLOCK(); 7169 TAILQ_FOREACH(ds, &nfsrv_devidhead, nfsdev_list) { 7170 if (NFSBCMP(devid, ds->nfsdev_deviceid, NFSX_V4DEVICEID) == 0 && 7171 ds->nfsdev_nmp != NULL) 7172 break; 7173 } 7174 NFSDDSUNLOCK(); 7175 if (ds == NULL) 7176 return (NFSERR_NOENT); 7177 7178 /* If the correct nfsdev_XXXXaddrlen is > 0, we have the device info. */ 7179 *devaddrlen = 0; 7180 if (layouttype == NFSLAYOUT_NFSV4_1_FILES) { 7181 *devaddrlen = ds->nfsdev_fileaddrlen; 7182 *devaddr = ds->nfsdev_fileaddr; 7183 } else if (layouttype == NFSLAYOUT_FLEXFILE) { 7184 *devaddrlen = ds->nfsdev_flexaddrlen; 7185 *devaddr = ds->nfsdev_flexaddr; 7186 } 7187 if (*devaddrlen == 0) 7188 return (NFSERR_UNKNLAYOUTTYPE); 7189 7190 /* 7191 * The XDR overhead is 3 unsigned values: layout_type, 7192 * length_of_address and notify bitmap. 7193 * If the notify array is changed to not all zeros, the 7194 * count of unsigned values must be increased. 7195 */ 7196 if (*maxcnt > 0 && *maxcnt < NFSM_RNDUP(*devaddrlen) + 7197 3 * NFSX_UNSIGNED) { 7198 *maxcnt = NFSM_RNDUP(*devaddrlen) + 3 * NFSX_UNSIGNED; 7199 return (NFSERR_TOOSMALL); 7200 } 7201 return (0); 7202 } 7203 7204 /* 7205 * Free a list of layout state structures. 7206 */ 7207 static void 7208 nfsrv_freelayoutlist(nfsquad_t clientid) 7209 { 7210 struct nfslayouthash *lhyp; 7211 struct nfslayout *lyp, *nlyp; 7212 int i; 7213 7214 for (i = 0; i < nfsrv_layouthashsize; i++) { 7215 lhyp = &nfslayouthash[i]; 7216 NFSLOCKLAYOUT(lhyp); 7217 TAILQ_FOREACH_SAFE(lyp, &lhyp->list, lay_list, nlyp) { 7218 if (lyp->lay_clientid.qval == clientid.qval) 7219 nfsrv_freelayout(&lhyp->list, lyp); 7220 } 7221 NFSUNLOCKLAYOUT(lhyp); 7222 } 7223 } 7224 7225 /* 7226 * Free up a layout. 7227 */ 7228 static void 7229 nfsrv_freelayout(struct nfslayouthead *lhp, struct nfslayout *lyp) 7230 { 7231 7232 NFSD_DEBUG(4, "Freelayout=%p\n", lyp); 7233 atomic_add_int(&nfsrv_layoutcnt, -1); 7234 TAILQ_REMOVE(lhp, lyp, lay_list); 7235 free(lyp, M_NFSDSTATE); 7236 } 7237 7238 /* 7239 * Free up a device id. 7240 */ 7241 void 7242 nfsrv_freeonedevid(struct nfsdevice *ds) 7243 { 7244 int i; 7245 7246 atomic_add_int(&nfsrv_devidcnt, -1); 7247 vrele(ds->nfsdev_dvp); 7248 for (i = 0; i < nfsrv_dsdirsize; i++) 7249 if (ds->nfsdev_dsdir[i] != NULL) 7250 vrele(ds->nfsdev_dsdir[i]); 7251 free(ds->nfsdev_fileaddr, M_NFSDSTATE); 7252 free(ds->nfsdev_flexaddr, M_NFSDSTATE); 7253 free(ds->nfsdev_host, M_NFSDSTATE); 7254 free(ds, M_NFSDSTATE); 7255 } 7256 7257 /* 7258 * Free up a device id and its mirrors. 7259 */ 7260 static void 7261 nfsrv_freedevid(struct nfsdevice *ds) 7262 { 7263 7264 TAILQ_REMOVE(&nfsrv_devidhead, ds, nfsdev_list); 7265 nfsrv_freeonedevid(ds); 7266 } 7267 7268 /* 7269 * Free all layouts and device ids. 7270 * Done when the nfsd threads are shut down since there may be a new 7271 * modified device id list created when the nfsd is restarted. 7272 */ 7273 void 7274 nfsrv_freealllayoutsanddevids(void) 7275 { 7276 struct nfsdontlist *mrp, *nmrp; 7277 struct nfslayout *lyp, *nlyp; 7278 7279 /* Get rid of the deviceid structures. */ 7280 nfsrv_freealldevids(); 7281 TAILQ_INIT(&nfsrv_devidhead); 7282 nfsrv_devidcnt = 0; 7283 7284 /* Get rid of all layouts. */ 7285 nfsrv_freealllayouts(); 7286 7287 /* Get rid of any nfsdontlist entries. */ 7288 LIST_FOREACH_SAFE(mrp, &nfsrv_dontlisthead, nfsmr_list, nmrp) 7289 free(mrp, M_NFSDSTATE); 7290 LIST_INIT(&nfsrv_dontlisthead); 7291 nfsrv_dontlistlen = 0; 7292 7293 /* Free layouts in the recall list. */ 7294 TAILQ_FOREACH_SAFE(lyp, &nfsrv_recalllisthead, lay_list, nlyp) 7295 nfsrv_freelayout(&nfsrv_recalllisthead, lyp); 7296 TAILQ_INIT(&nfsrv_recalllisthead); 7297 } 7298 7299 /* 7300 * Free layouts that match the arguments. 7301 */ 7302 static void 7303 nfsrv_freelayouts(nfsquad_t *clid, fsid_t *fs, int laytype, int iomode) 7304 { 7305 struct nfslayouthash *lhyp; 7306 struct nfslayout *lyp, *nlyp; 7307 int i; 7308 7309 for (i = 0; i < nfsrv_layouthashsize; i++) { 7310 lhyp = &nfslayouthash[i]; 7311 NFSLOCKLAYOUT(lhyp); 7312 TAILQ_FOREACH_SAFE(lyp, &lhyp->list, lay_list, nlyp) { 7313 if (clid->qval != lyp->lay_clientid.qval) 7314 continue; 7315 if (fs != NULL && (fs->val[0] != lyp->lay_fsid.val[0] || 7316 fs->val[1] != lyp->lay_fsid.val[1])) 7317 continue; 7318 if (laytype != lyp->lay_type) 7319 continue; 7320 if ((iomode & NFSLAYOUTIOMODE_READ) != 0) 7321 lyp->lay_flags &= ~NFSLAY_READ; 7322 if ((iomode & NFSLAYOUTIOMODE_RW) != 0) 7323 lyp->lay_flags &= ~NFSLAY_RW; 7324 if ((lyp->lay_flags & (NFSLAY_READ | NFSLAY_RW)) == 0) 7325 nfsrv_freelayout(&lhyp->list, lyp); 7326 } 7327 NFSUNLOCKLAYOUT(lhyp); 7328 } 7329 } 7330 7331 /* 7332 * Free all layouts for the argument file. 7333 */ 7334 void 7335 nfsrv_freefilelayouts(fhandle_t *fhp) 7336 { 7337 struct nfslayouthash *lhyp; 7338 struct nfslayout *lyp, *nlyp; 7339 7340 lhyp = NFSLAYOUTHASH(fhp); 7341 NFSLOCKLAYOUT(lhyp); 7342 TAILQ_FOREACH_SAFE(lyp, &lhyp->list, lay_list, nlyp) { 7343 if (NFSBCMP(&lyp->lay_fh, fhp, sizeof(*fhp)) == 0) 7344 nfsrv_freelayout(&lhyp->list, lyp); 7345 } 7346 NFSUNLOCKLAYOUT(lhyp); 7347 } 7348 7349 /* 7350 * Free all layouts. 7351 */ 7352 static void 7353 nfsrv_freealllayouts(void) 7354 { 7355 struct nfslayouthash *lhyp; 7356 struct nfslayout *lyp, *nlyp; 7357 int i; 7358 7359 for (i = 0; i < nfsrv_layouthashsize; i++) { 7360 lhyp = &nfslayouthash[i]; 7361 NFSLOCKLAYOUT(lhyp); 7362 TAILQ_FOREACH_SAFE(lyp, &lhyp->list, lay_list, nlyp) 7363 nfsrv_freelayout(&lhyp->list, lyp); 7364 NFSUNLOCKLAYOUT(lhyp); 7365 } 7366 } 7367 7368 /* 7369 * Look up the mount path for the DS server. 7370 */ 7371 static int 7372 nfsrv_setdsserver(char *dspathp, NFSPROC_T *p, struct nfsdevice **dsp) 7373 { 7374 struct nameidata nd; 7375 struct nfsdevice *ds; 7376 int error, i; 7377 char *dsdirpath; 7378 size_t dsdirsize; 7379 7380 NFSD_DEBUG(4, "setdssrv path=%s\n", dspathp); 7381 *dsp = NULL; 7382 NDINIT(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE, 7383 dspathp, p); 7384 error = namei(&nd); 7385 NFSD_DEBUG(4, "lookup=%d\n", error); 7386 if (error != 0) 7387 return (error); 7388 if (nd.ni_vp->v_type != VDIR) { 7389 vput(nd.ni_vp); 7390 NFSD_DEBUG(4, "dspath not dir\n"); 7391 return (ENOTDIR); 7392 } 7393 if (strcmp(nd.ni_vp->v_mount->mnt_vfc->vfc_name, "nfs") != 0) { 7394 vput(nd.ni_vp); 7395 NFSD_DEBUG(4, "dspath not an NFS mount\n"); 7396 return (ENXIO); 7397 } 7398 7399 /* 7400 * Allocate a DS server structure with the NFS mounted directory 7401 * vnode reference counted, so that a non-forced dismount will 7402 * fail with EBUSY. 7403 */ 7404 *dsp = ds = malloc(sizeof(*ds) + nfsrv_dsdirsize * sizeof(vnode_t), 7405 M_NFSDSTATE, M_WAITOK | M_ZERO); 7406 ds->nfsdev_dvp = nd.ni_vp; 7407 ds->nfsdev_nmp = VFSTONFS(nd.ni_vp->v_mount); 7408 NFSVOPUNLOCK(nd.ni_vp, 0); 7409 7410 dsdirsize = strlen(dspathp) + 16; 7411 dsdirpath = malloc(dsdirsize, M_TEMP, M_WAITOK); 7412 /* Now, create the DS directory structures. */ 7413 for (i = 0; i < nfsrv_dsdirsize; i++) { 7414 snprintf(dsdirpath, dsdirsize, "%s/ds%d", dspathp, i); 7415 NDINIT(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, 7416 UIO_SYSSPACE, dsdirpath, p); 7417 error = namei(&nd); 7418 NFSD_DEBUG(4, "dsdirpath=%s lookup=%d\n", dsdirpath, error); 7419 if (error != 0) 7420 break; 7421 if (nd.ni_vp->v_type != VDIR) { 7422 vput(nd.ni_vp); 7423 error = ENOTDIR; 7424 NFSD_DEBUG(4, "dsdirpath not a VDIR\n"); 7425 break; 7426 } 7427 if (strcmp(nd.ni_vp->v_mount->mnt_vfc->vfc_name, "nfs") != 0) { 7428 vput(nd.ni_vp); 7429 error = ENXIO; 7430 NFSD_DEBUG(4, "dsdirpath not an NFS mount\n"); 7431 break; 7432 } 7433 ds->nfsdev_dsdir[i] = nd.ni_vp; 7434 NFSVOPUNLOCK(nd.ni_vp, 0); 7435 } 7436 free(dsdirpath, M_TEMP); 7437 7438 TAILQ_INSERT_TAIL(&nfsrv_devidhead, ds, nfsdev_list); 7439 atomic_add_int(&nfsrv_devidcnt, 1); 7440 return (error); 7441 } 7442 7443 /* 7444 * Look up the mount path for the DS server and delete it. 7445 */ 7446 int 7447 nfsrv_deldsserver(char *dspathp, NFSPROC_T *p) 7448 { 7449 struct mount *mp; 7450 struct nfsmount *nmp; 7451 struct nfsdevice *ds; 7452 int error; 7453 7454 NFSD_DEBUG(4, "deldssrv path=%s\n", dspathp); 7455 /* 7456 * Search for the path in the mount list. Avoid looking the path 7457 * up, since this mount point may be hung, with associated locked 7458 * vnodes, etc. 7459 * Set NFSMNTP_CANCELRPCS so that any forced dismount will be blocked 7460 * until this completes. 7461 * As noted in the man page, this should be done before any forced 7462 * dismount on the mount point, but at least the handshake on 7463 * NFSMNTP_CANCELRPCS should make it safe. 7464 */ 7465 error = 0; 7466 ds = NULL; 7467 nmp = NULL; 7468 mtx_lock(&mountlist_mtx); 7469 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 7470 if (strcmp(mp->mnt_stat.f_mntonname, dspathp) == 0 && 7471 strcmp(mp->mnt_stat.f_fstypename, "nfs") == 0 && 7472 mp->mnt_data != NULL) { 7473 nmp = VFSTONFS(mp); 7474 NFSLOCKMNT(nmp); 7475 if ((nmp->nm_privflag & (NFSMNTP_FORCEDISM | 7476 NFSMNTP_CANCELRPCS)) == 0) { 7477 nmp->nm_privflag |= NFSMNTP_CANCELRPCS; 7478 NFSUNLOCKMNT(nmp); 7479 } else { 7480 NFSUNLOCKMNT(nmp); 7481 nmp = NULL; 7482 } 7483 break; 7484 } 7485 } 7486 mtx_unlock(&mountlist_mtx); 7487 7488 if (nmp != NULL) { 7489 ds = nfsrv_deldsnmp(nmp, p); 7490 NFSD_DEBUG(4, "deldsnmp=%p\n", ds); 7491 if (ds != NULL) { 7492 nfsrv_killrpcs(nmp); 7493 NFSD_DEBUG(4, "aft killrpcs\n"); 7494 } else 7495 error = ENXIO; 7496 NFSLOCKMNT(nmp); 7497 nmp->nm_privflag &= ~NFSMNTP_CANCELRPCS; 7498 wakeup(nmp); 7499 NFSUNLOCKMNT(nmp); 7500 } else 7501 error = EINVAL; 7502 return (error); 7503 } 7504 7505 /* 7506 * Search for and remove a DS entry which matches the "nmp" argument. 7507 * The nfsdevice structure pointer is returned so that the caller can 7508 * free it via nfsrv_freeonedevid(). 7509 */ 7510 struct nfsdevice * 7511 nfsrv_deldsnmp(struct nfsmount *nmp, NFSPROC_T *p) 7512 { 7513 struct nfsdevice *fndds; 7514 7515 NFSD_DEBUG(4, "deldsdvp\n"); 7516 NFSDDSLOCK(); 7517 fndds = nfsv4_findmirror(nmp); 7518 if (fndds != NULL) 7519 nfsrv_deleteds(fndds); 7520 NFSDDSUNLOCK(); 7521 if (fndds != NULL) { 7522 nfsrv_flexmirrordel(fndds->nfsdev_deviceid, p); 7523 printf("pNFS server: mirror %s failed\n", fndds->nfsdev_host); 7524 } 7525 return (fndds); 7526 } 7527 7528 /* 7529 * Similar to nfsrv_deldsnmp(), except that the DS is indicated by deviceid. 7530 * This function also calls nfsrv_killrpcs() to unblock RPCs on the mount 7531 * point. 7532 * Also, returns an error instead of the nfsdevice found. 7533 */ 7534 static int 7535 nfsrv_delds(char *devid, NFSPROC_T *p) 7536 { 7537 struct nfsdevice *ds, *fndds; 7538 struct nfsmount *nmp; 7539 int fndmirror; 7540 7541 NFSD_DEBUG(4, "delds\n"); 7542 /* 7543 * Search the DS server list for a match with devid. 7544 * Remove the DS entry if found and there is a mirror. 7545 */ 7546 fndds = NULL; 7547 nmp = NULL; 7548 fndmirror = 0; 7549 NFSDDSLOCK(); 7550 TAILQ_FOREACH(ds, &nfsrv_devidhead, nfsdev_list) { 7551 if (NFSBCMP(ds->nfsdev_deviceid, devid, NFSX_V4DEVICEID) == 0 && 7552 ds->nfsdev_nmp != NULL) { 7553 NFSD_DEBUG(4, "fnd main ds\n"); 7554 fndds = ds; 7555 } else if (ds->nfsdev_nmp != NULL) 7556 fndmirror = 1; 7557 if (fndds != NULL && fndmirror != 0) 7558 break; 7559 } 7560 if (fndds != NULL && fndmirror != 0) { 7561 nmp = fndds->nfsdev_nmp; 7562 NFSLOCKMNT(nmp); 7563 if ((nmp->nm_privflag & (NFSMNTP_FORCEDISM | 7564 NFSMNTP_CANCELRPCS)) == 0) { 7565 nmp->nm_privflag |= NFSMNTP_CANCELRPCS; 7566 NFSUNLOCKMNT(nmp); 7567 nfsrv_deleteds(fndds); 7568 } else { 7569 NFSUNLOCKMNT(nmp); 7570 nmp = NULL; 7571 } 7572 } 7573 NFSDDSUNLOCK(); 7574 if (fndds != NULL && nmp != NULL) { 7575 nfsrv_flexmirrordel(fndds->nfsdev_deviceid, p); 7576 printf("pNFS server: mirror %s failed\n", fndds->nfsdev_host); 7577 nfsrv_killrpcs(nmp); 7578 NFSLOCKMNT(nmp); 7579 nmp->nm_privflag &= ~NFSMNTP_CANCELRPCS; 7580 wakeup(nmp); 7581 NFSUNLOCKMNT(nmp); 7582 return (0); 7583 } 7584 return (ENXIO); 7585 } 7586 7587 /* 7588 * Mark a DS as disabled by setting nfsdev_nmp = NULL. 7589 */ 7590 static void 7591 nfsrv_deleteds(struct nfsdevice *fndds) 7592 { 7593 7594 NFSD_DEBUG(4, "deleteds: deleting a mirror\n"); 7595 fndds->nfsdev_nmp = NULL; 7596 } 7597 7598 /* 7599 * Fill in the addr structures for the File and Flex File layouts. 7600 */ 7601 static void 7602 nfsrv_allocdevid(struct nfsdevice *ds, char *addr, char *dnshost) 7603 { 7604 uint32_t *tl; 7605 char *netprot; 7606 int addrlen; 7607 static uint64_t new_devid = 0; 7608 7609 if (strchr(addr, ':') != NULL) 7610 netprot = "tcp6"; 7611 else 7612 netprot = "tcp"; 7613 7614 /* Fill in the device id. */ 7615 NFSBCOPY(&nfsdev_time, ds->nfsdev_deviceid, sizeof(nfsdev_time)); 7616 new_devid++; 7617 NFSBCOPY(&new_devid, &ds->nfsdev_deviceid[sizeof(nfsdev_time)], 7618 sizeof(new_devid)); 7619 7620 /* 7621 * Fill in the file addr (actually the nfsv4_file_layout_ds_addr4 7622 * as defined in RFC5661) in XDR. 7623 */ 7624 addrlen = NFSM_RNDUP(strlen(addr)) + NFSM_RNDUP(strlen(netprot)) + 7625 6 * NFSX_UNSIGNED; 7626 NFSD_DEBUG(4, "hn=%s addr=%s netprot=%s\n", dnshost, addr, netprot); 7627 ds->nfsdev_fileaddrlen = addrlen; 7628 tl = malloc(addrlen, M_NFSDSTATE, M_WAITOK | M_ZERO); 7629 ds->nfsdev_fileaddr = (char *)tl; 7630 *tl++ = txdr_unsigned(1); /* One stripe with index 0. */ 7631 *tl++ = 0; 7632 *tl++ = txdr_unsigned(1); /* One multipath list */ 7633 *tl++ = txdr_unsigned(1); /* with one entry in it. */ 7634 /* The netaddr for this one entry. */ 7635 *tl++ = txdr_unsigned(strlen(netprot)); 7636 NFSBCOPY(netprot, tl, strlen(netprot)); 7637 tl += (NFSM_RNDUP(strlen(netprot)) / NFSX_UNSIGNED); 7638 *tl++ = txdr_unsigned(strlen(addr)); 7639 NFSBCOPY(addr, tl, strlen(addr)); 7640 7641 /* 7642 * Fill in the flex file addr (actually the ff_device_addr4 7643 * as defined for Flexible File Layout) in XDR. 7644 */ 7645 addrlen = NFSM_RNDUP(strlen(addr)) + NFSM_RNDUP(strlen(netprot)) + 7646 9 * NFSX_UNSIGNED; 7647 ds->nfsdev_flexaddrlen = addrlen; 7648 tl = malloc(addrlen, M_NFSDSTATE, M_WAITOK | M_ZERO); 7649 ds->nfsdev_flexaddr = (char *)tl; 7650 *tl++ = txdr_unsigned(1); /* One multipath entry. */ 7651 /* The netaddr for this one entry. */ 7652 *tl++ = txdr_unsigned(strlen(netprot)); 7653 NFSBCOPY(netprot, tl, strlen(netprot)); 7654 tl += (NFSM_RNDUP(strlen(netprot)) / NFSX_UNSIGNED); 7655 *tl++ = txdr_unsigned(strlen(addr)); 7656 NFSBCOPY(addr, tl, strlen(addr)); 7657 tl += (NFSM_RNDUP(strlen(addr)) / NFSX_UNSIGNED); 7658 *tl++ = txdr_unsigned(1); /* One NFS Version. */ 7659 *tl++ = txdr_unsigned(NFS_VER4); /* NFSv4. */ 7660 *tl++ = txdr_unsigned(NFSV41_MINORVERSION); /* Minor version 1. */ 7661 *tl++ = txdr_unsigned(NFS_SRVMAXIO); /* DS max rsize. */ 7662 *tl++ = txdr_unsigned(NFS_SRVMAXIO); /* DS max wsize. */ 7663 *tl = newnfs_true; /* Tightly coupled. */ 7664 7665 ds->nfsdev_hostnamelen = strlen(dnshost); 7666 ds->nfsdev_host = malloc(ds->nfsdev_hostnamelen + 1, M_NFSDSTATE, 7667 M_WAITOK); 7668 NFSBCOPY(dnshost, ds->nfsdev_host, ds->nfsdev_hostnamelen + 1); 7669 } 7670 7671 7672 /* 7673 * Create the device id list. 7674 * Return 0 if the nfsd threads are to run and ENXIO if the "-p" argument 7675 * is misconfigured. 7676 */ 7677 int 7678 nfsrv_createdevids(struct nfsd_nfsd_args *args, NFSPROC_T *p) 7679 { 7680 struct nfsdevice *ds; 7681 char *addrp, *dnshostp, *dspathp; 7682 int error, i; 7683 7684 addrp = args->addr; 7685 dnshostp = args->dnshost; 7686 dspathp = args->dspath; 7687 nfsrv_maxpnfsmirror = args->mirrorcnt; 7688 if (addrp == NULL || dnshostp == NULL || dspathp == NULL) 7689 return (0); 7690 7691 /* 7692 * Loop around for each nul-terminated string in args->addr, 7693 * args->dnshost and args->dnspath. 7694 */ 7695 while (addrp < (args->addr + args->addrlen) && 7696 dnshostp < (args->dnshost + args->dnshostlen) && 7697 dspathp < (args->dspath + args->dspathlen)) { 7698 error = nfsrv_setdsserver(dspathp, p, &ds); 7699 if (error != 0) { 7700 /* Free all DS servers. */ 7701 nfsrv_freealldevids(); 7702 nfsrv_devidcnt = 0; 7703 return (ENXIO); 7704 } 7705 nfsrv_allocdevid(ds, addrp, dnshostp); 7706 addrp += (strlen(addrp) + 1); 7707 dnshostp += (strlen(dnshostp) + 1); 7708 dspathp += (strlen(dspathp) + 1); 7709 } 7710 if (nfsrv_devidcnt < nfsrv_maxpnfsmirror) { 7711 /* Free all DS servers. */ 7712 nfsrv_freealldevids(); 7713 nfsrv_devidcnt = 0; 7714 nfsrv_maxpnfsmirror = 1; 7715 return (ENXIO); 7716 } 7717 7718 /* 7719 * Allocate the nfslayout hash table now, since this is a pNFS server. 7720 * Make it 1% of the high water mark and at least 100. 7721 */ 7722 if (nfslayouthash == NULL) { 7723 nfsrv_layouthashsize = nfsrv_layouthighwater / 100; 7724 if (nfsrv_layouthashsize < 100) 7725 nfsrv_layouthashsize = 100; 7726 nfslayouthash = mallocarray(nfsrv_layouthashsize, 7727 sizeof(struct nfslayouthash), M_NFSDSESSION, M_WAITOK | 7728 M_ZERO); 7729 for (i = 0; i < nfsrv_layouthashsize; i++) { 7730 mtx_init(&nfslayouthash[i].mtx, "nfslm", NULL, MTX_DEF); 7731 TAILQ_INIT(&nfslayouthash[i].list); 7732 } 7733 } 7734 return (0); 7735 } 7736 7737 /* 7738 * Free all device ids. 7739 */ 7740 static void 7741 nfsrv_freealldevids(void) 7742 { 7743 struct nfsdevice *ds, *nds; 7744 7745 TAILQ_FOREACH_SAFE(ds, &nfsrv_devidhead, nfsdev_list, nds) 7746 nfsrv_freedevid(ds); 7747 } 7748 7749 /* 7750 * Check to see if there is a Read/Write Layout plus either: 7751 * - A Write Delegation 7752 * or 7753 * - An Open with Write_access. 7754 * Return 1 if this is the case and 0 otherwise. 7755 * This function is used by nfsrv_proxyds() to decide if doing a Proxy 7756 * Getattr RPC to the Data Server (DS) is necessary. 7757 */ 7758 #define NFSCLIDVECSIZE 6 7759 APPLESTATIC int 7760 nfsrv_checkdsattr(struct nfsrv_descript *nd, vnode_t vp, NFSPROC_T *p) 7761 { 7762 fhandle_t fh, *tfhp; 7763 struct nfsstate *stp; 7764 struct nfslayout *lyp; 7765 struct nfslayouthash *lhyp; 7766 struct nfslockhashhead *hp; 7767 struct nfslockfile *lfp; 7768 nfsquad_t clid[NFSCLIDVECSIZE]; 7769 int clidcnt, ret; 7770 7771 ret = nfsvno_getfh(vp, &fh, p); 7772 if (ret != 0) 7773 return (0); 7774 7775 /* First check for a Read/Write Layout. */ 7776 clidcnt = 0; 7777 lhyp = NFSLAYOUTHASH(&fh); 7778 NFSLOCKLAYOUT(lhyp); 7779 TAILQ_FOREACH(lyp, &lhyp->list, lay_list) { 7780 if (NFSBCMP(&lyp->lay_fh, &fh, sizeof(fh)) == 0 && 7781 ((lyp->lay_flags & NFSLAY_RW) != 0 || 7782 ((lyp->lay_flags & NFSLAY_READ) != 0 && 7783 nfsrv_pnfsatime != 0))) { 7784 if (clidcnt < NFSCLIDVECSIZE) 7785 clid[clidcnt].qval = lyp->lay_clientid.qval; 7786 clidcnt++; 7787 } 7788 } 7789 NFSUNLOCKLAYOUT(lhyp); 7790 if (clidcnt == 0) { 7791 /* None found, so return 0. */ 7792 return (0); 7793 } 7794 7795 /* Get the nfslockfile for this fh. */ 7796 NFSLOCKSTATE(); 7797 hp = NFSLOCKHASH(&fh); 7798 LIST_FOREACH(lfp, hp, lf_hash) { 7799 tfhp = &lfp->lf_fh; 7800 if (NFSVNO_CMPFH(&fh, tfhp)) 7801 break; 7802 } 7803 if (lfp == NULL) { 7804 /* None found, so return 0. */ 7805 NFSUNLOCKSTATE(); 7806 return (0); 7807 } 7808 7809 /* Now, look for a Write delegation for this clientid. */ 7810 LIST_FOREACH(stp, &lfp->lf_deleg, ls_file) { 7811 if ((stp->ls_flags & NFSLCK_DELEGWRITE) != 0 && 7812 nfsrv_fndclid(clid, stp->ls_clp->lc_clientid, clidcnt) != 0) 7813 break; 7814 } 7815 if (stp != NULL) { 7816 /* Found one, so return 1. */ 7817 NFSUNLOCKSTATE(); 7818 return (1); 7819 } 7820 7821 /* No Write delegation, so look for an Open with Write_access. */ 7822 LIST_FOREACH(stp, &lfp->lf_open, ls_file) { 7823 KASSERT((stp->ls_flags & NFSLCK_OPEN) != 0, 7824 ("nfsrv_checkdsattr: Non-open in Open list\n")); 7825 if ((stp->ls_flags & NFSLCK_WRITEACCESS) != 0 && 7826 nfsrv_fndclid(clid, stp->ls_clp->lc_clientid, clidcnt) != 0) 7827 break; 7828 } 7829 NFSUNLOCKSTATE(); 7830 if (stp != NULL) 7831 return (1); 7832 return (0); 7833 } 7834 7835 /* 7836 * Look for a matching clientid in the vector. Return 1 if one might match. 7837 */ 7838 static int 7839 nfsrv_fndclid(nfsquad_t *clidvec, nfsquad_t clid, int clidcnt) 7840 { 7841 int i; 7842 7843 /* If too many for the vector, return 1 since there might be a match. */ 7844 if (clidcnt > NFSCLIDVECSIZE) 7845 return (1); 7846 7847 for (i = 0; i < clidcnt; i++) 7848 if (clidvec[i].qval == clid.qval) 7849 return (1); 7850 return (0); 7851 } 7852 7853 /* 7854 * Check the don't list for "vp" and see if issuing an rw layout is allowed. 7855 * Return 1 if issuing an rw layout isn't allowed, 0 otherwise. 7856 */ 7857 static int 7858 nfsrv_dontlayout(fhandle_t *fhp) 7859 { 7860 struct nfsdontlist *mrp; 7861 int ret; 7862 7863 if (nfsrv_dontlistlen == 0) 7864 return (0); 7865 ret = 0; 7866 NFSDDONTLISTLOCK(); 7867 LIST_FOREACH(mrp, &nfsrv_dontlisthead, nfsmr_list) { 7868 if (NFSBCMP(fhp, &mrp->nfsmr_fh, sizeof(*fhp)) == 0 && 7869 (mrp->nfsmr_flags & NFSMR_DONTLAYOUT) != 0) { 7870 ret = 1; 7871 break; 7872 } 7873 } 7874 NFSDDONTLISTUNLOCK(); 7875 return (ret); 7876 } 7877 7878 #define PNFSDS_COPYSIZ 65536 7879 /* 7880 * Create a new file on a DS and copy the contents of an extant DS file to it. 7881 * This can be used for recovery of a DS file onto a recovered DS. 7882 * The steps are: 7883 * - When called, the MDS file's vnode is locked, blocking LayoutGet operations. 7884 * - Disable issuing of read/write layouts for the file via the nfsdontlist, 7885 * so that they will be disabled after the MDS file's vnode is unlocked. 7886 * - Set up the nfsrv_recalllist so that recall of read/write layouts can 7887 * be done. 7888 * - Unlock the MDS file's vnode, so that the client(s) can perform proxied 7889 * writes, LayoutCommits and LayoutReturns for the file when completing the 7890 * LayoutReturn requested by the LayoutRecall callback. 7891 * - Issue a LayoutRecall callback for all read/write layouts and wait for 7892 * them to be returned. (If the LayoutRecall callback replies 7893 * NFSERR_NOMATCHLAYOUT, they are gone and no LayoutReturn is needed.) 7894 * - Exclusively lock the MDS file's vnode. This ensures that no proxied 7895 * writes are in progress or can occur during the DS file copy. 7896 * It also blocks Setattr operations. 7897 * - Create the file on the recovered mirror. 7898 * - Copy the file from the operational DS. 7899 * - Copy any ACL from the MDS file to the new DS file. 7900 * - Set the modify time of the new DS file to that of the MDS file. 7901 * - Update the extended attribute for the MDS file. 7902 * - Enable issuing of rw layouts by deleting the nfsdontlist entry. 7903 * - The caller will unlock the MDS file's vnode allowing operations 7904 * to continue normally, since it is now on the mirror again. 7905 */ 7906 int 7907 nfsrv_copymr(vnode_t vp, vnode_t fvp, vnode_t dvp, struct nfsdevice *ds, 7908 struct pnfsdsfile *pf, struct pnfsdsfile *wpf, int mirrorcnt, 7909 struct ucred *cred, NFSPROC_T *p) 7910 { 7911 struct nfsdontlist *mrp, *nmrp; 7912 struct nfslayouthash *lhyp; 7913 struct nfslayout *lyp, *nlyp; 7914 struct nfslayouthead thl; 7915 struct mount *mp; 7916 struct acl *aclp; 7917 struct vattr va; 7918 struct timespec mtime; 7919 fhandle_t fh; 7920 vnode_t tvp; 7921 off_t rdpos, wrpos; 7922 ssize_t aresid; 7923 char *dat; 7924 int didprintf, ret, retacl, xfer; 7925 7926 ASSERT_VOP_LOCKED(fvp, "nfsrv_copymr fvp"); 7927 ASSERT_VOP_LOCKED(vp, "nfsrv_copymr vp"); 7928 /* 7929 * Allocate a nfsdontlist entry and set the NFSMR_DONTLAYOUT flag 7930 * so that no more RW layouts will get issued. 7931 */ 7932 ret = nfsvno_getfh(vp, &fh, p); 7933 if (ret != 0) { 7934 NFSD_DEBUG(4, "nfsrv_copymr: getfh=%d\n", ret); 7935 return (ret); 7936 } 7937 nmrp = malloc(sizeof(*nmrp), M_NFSDSTATE, M_WAITOK); 7938 nmrp->nfsmr_flags = NFSMR_DONTLAYOUT; 7939 NFSBCOPY(&fh, &nmrp->nfsmr_fh, sizeof(fh)); 7940 NFSDDONTLISTLOCK(); 7941 LIST_FOREACH(mrp, &nfsrv_dontlisthead, nfsmr_list) { 7942 if (NFSBCMP(&fh, &mrp->nfsmr_fh, sizeof(fh)) == 0) 7943 break; 7944 } 7945 if (mrp == NULL) { 7946 LIST_INSERT_HEAD(&nfsrv_dontlisthead, nmrp, nfsmr_list); 7947 mrp = nmrp; 7948 nmrp = NULL; 7949 nfsrv_dontlistlen++; 7950 NFSD_DEBUG(4, "nfsrv_copymr: in dontlist\n"); 7951 } else { 7952 NFSDDONTLISTUNLOCK(); 7953 free(nmrp, M_NFSDSTATE); 7954 NFSD_DEBUG(4, "nfsrv_copymr: dup dontlist\n"); 7955 return (ENXIO); 7956 } 7957 NFSDDONTLISTUNLOCK(); 7958 7959 /* 7960 * Search for all RW layouts for this file. Move them to the 7961 * recall list, so they can be recalled and their return noted. 7962 */ 7963 lhyp = NFSLAYOUTHASH(&fh); 7964 NFSDRECALLLOCK(); 7965 NFSLOCKLAYOUT(lhyp); 7966 TAILQ_FOREACH_SAFE(lyp, &lhyp->list, lay_list, nlyp) { 7967 if (NFSBCMP(&lyp->lay_fh, &fh, sizeof(fh)) == 0 && 7968 (lyp->lay_flags & NFSLAY_RW) != 0) { 7969 TAILQ_REMOVE(&lhyp->list, lyp, lay_list); 7970 TAILQ_INSERT_HEAD(&nfsrv_recalllisthead, lyp, lay_list); 7971 lyp->lay_trycnt = 0; 7972 } 7973 } 7974 NFSUNLOCKLAYOUT(lhyp); 7975 NFSDRECALLUNLOCK(); 7976 7977 ret = 0; 7978 didprintf = 0; 7979 TAILQ_INIT(&thl); 7980 /* Unlock the MDS vp, so that a LayoutReturn can be done on it. */ 7981 NFSVOPUNLOCK(vp, 0); 7982 /* Now, do a recall for all layouts not yet recalled. */ 7983 tryagain: 7984 NFSDRECALLLOCK(); 7985 TAILQ_FOREACH(lyp, &nfsrv_recalllisthead, lay_list) { 7986 if (NFSBCMP(&lyp->lay_fh, &fh, sizeof(fh)) == 0 && 7987 (lyp->lay_flags & NFSLAY_RECALL) == 0) { 7988 lyp->lay_flags |= NFSLAY_RECALL; 7989 /* 7990 * The layout stateid.seqid needs to be incremented 7991 * before doing a LAYOUT_RECALL callback. 7992 */ 7993 if (++lyp->lay_stateid.seqid == 0) 7994 lyp->lay_stateid.seqid = 1; 7995 NFSDRECALLUNLOCK(); 7996 nfsrv_recalllayout(lyp->lay_clientid, &lyp->lay_stateid, 7997 &lyp->lay_fh, lyp, &nfsrv_recalllisthead, 7998 lyp->lay_type, p); 7999 NFSD_DEBUG(4, "nfsrv_copymr: recalled layout\n"); 8000 goto tryagain; 8001 } 8002 } 8003 8004 /* Now wait for them to be returned. */ 8005 tryagain2: 8006 TAILQ_FOREACH(lyp, &nfsrv_recalllisthead, lay_list) { 8007 if (NFSBCMP(&lyp->lay_fh, &fh, sizeof(fh)) == 0) { 8008 if ((lyp->lay_flags & NFSLAY_RETURNED) != 0) { 8009 TAILQ_REMOVE(&nfsrv_recalllisthead, lyp, 8010 lay_list); 8011 TAILQ_INSERT_HEAD(&thl, lyp, lay_list); 8012 NFSD_DEBUG(4, 8013 "nfsrv_copymr: layout returned\n"); 8014 } else { 8015 ret = mtx_sleep(lyp, NFSDRECALLMUTEXPTR, 8016 PVFS | PCATCH, "nfsmrl", hz); 8017 NFSD_DEBUG(4, "nfsrv_copymr: aft sleep=%d\n", 8018 ret); 8019 if (ret == EINTR || ret == ERESTART) 8020 break; 8021 if ((lyp->lay_flags & NFSLAY_RETURNED) == 0 && 8022 didprintf == 0) { 8023 printf("nfsrv_copymr: layout not " 8024 "returned\n"); 8025 didprintf = 1; 8026 } 8027 } 8028 goto tryagain2; 8029 } 8030 } 8031 NFSDRECALLUNLOCK(); 8032 /* We can now get rid of the layouts that have been returned. */ 8033 TAILQ_FOREACH_SAFE(lyp, &thl, lay_list, nlyp) 8034 nfsrv_freelayout(&thl, lyp); 8035 8036 /* 8037 * LK_EXCLUSIVE lock the MDS vnode, so that any 8038 * proxied writes through the MDS will be blocked until we have 8039 * completed the copy and update of the extended attributes. 8040 * This will also ensure that any attributes and ACL will not be 8041 * changed until the copy is complete. 8042 */ 8043 NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); 8044 if ((vp->v_iflag & VI_DOOMED) != 0) { 8045 NFSD_DEBUG(4, "nfsrv_copymr: lk_exclusive doomed\n"); 8046 ret = ESTALE; 8047 } 8048 8049 /* Create the data file on the recovered DS. */ 8050 if (ret == 0) 8051 ret = nfsrv_createdsfile(vp, &fh, pf, dvp, ds, cred, p, &tvp); 8052 8053 /* Copy the DS file, if created successfully. */ 8054 if (ret == 0) { 8055 /* 8056 * Get any NFSv4 ACL on the MDS file, so that it can be set 8057 * on the new DS file. 8058 */ 8059 aclp = acl_alloc(M_WAITOK | M_ZERO); 8060 retacl = VOP_GETACL(vp, ACL_TYPE_NFS4, aclp, cred, p); 8061 if (retacl != 0 && retacl != ENOATTR) 8062 NFSD_DEBUG(1, "nfsrv_copymr: vop_getacl=%d\n", retacl); 8063 dat = malloc(PNFSDS_COPYSIZ, M_TEMP, M_WAITOK); 8064 rdpos = wrpos = 0; 8065 mp = NULL; 8066 ret = vn_start_write(tvp, &mp, V_WAIT | PCATCH); 8067 aresid = 0; 8068 while (ret == 0 && aresid == 0) { 8069 ret = vn_rdwr(UIO_READ, fvp, dat, PNFSDS_COPYSIZ, 8070 rdpos, UIO_SYSSPACE, IO_NODELOCKED, cred, NULL, 8071 &aresid, p); 8072 xfer = PNFSDS_COPYSIZ - aresid; 8073 if (ret == 0 && xfer > 0) { 8074 rdpos += xfer; 8075 ret = vn_rdwr(UIO_WRITE, tvp, dat, xfer, 8076 wrpos, UIO_SYSSPACE, IO_NODELOCKED, 8077 cred, NULL, NULL, p); 8078 if (ret == 0) 8079 wrpos += xfer; 8080 } 8081 } 8082 8083 /* If there is an ACL and the copy succeeded, set the ACL. */ 8084 if (ret == 0 && retacl == 0) { 8085 ret = VOP_SETACL(tvp, ACL_TYPE_NFS4, aclp, cred, p); 8086 /* 8087 * Don't consider these as errors, since VOP_GETACL() 8088 * can return an ACL when they are not actually 8089 * supported. For example, for UFS, VOP_GETACL() 8090 * will return a trivial ACL based on the uid/gid/mode 8091 * when there is no ACL on the file. 8092 * This case should be recognized as a trivial ACL 8093 * by UFS's VOP_SETACL() and succeed, but... 8094 */ 8095 if (ret == ENOATTR || ret == EOPNOTSUPP || ret == EPERM) 8096 ret = 0; 8097 } 8098 8099 if (mp != NULL) 8100 vn_finished_write(mp); 8101 if (ret == 0) 8102 ret = VOP_FSYNC(tvp, MNT_WAIT, p); 8103 8104 /* Set the DS data file's modify time that of the MDS file. */ 8105 if (ret == 0) 8106 ret = VOP_GETATTR(vp, &va, cred); 8107 if (ret == 0) { 8108 mtime = va.va_mtime; 8109 VATTR_NULL(&va); 8110 va.va_mtime = mtime; 8111 ret = VOP_SETATTR(tvp, &va, cred); 8112 } 8113 8114 vput(tvp); 8115 acl_free(aclp); 8116 free(dat, M_TEMP); 8117 } 8118 8119 /* Update the extended attributes for the newly created DS file. */ 8120 if (ret == 0) { 8121 mp = NULL; 8122 ret = vn_start_write(vp, &mp, V_WAIT | PCATCH); 8123 if (ret == 0) 8124 ret = vn_extattr_set(vp, IO_NODELOCKED, 8125 EXTATTR_NAMESPACE_SYSTEM, "pnfsd.dsfile", 8126 sizeof(*wpf) * mirrorcnt, (char *)wpf, p); 8127 if (mp != NULL) 8128 vn_finished_write(mp); 8129 } 8130 8131 /* Get rid of the dontlist entry, so that Layouts can be issued. */ 8132 NFSDDONTLISTLOCK(); 8133 LIST_REMOVE(mrp, nfsmr_list); 8134 NFSDDONTLISTUNLOCK(); 8135 free(mrp, M_NFSDSTATE); 8136 return (ret); 8137 } 8138 8139 /* 8140 * Create a data storage file on the recovered DS. 8141 */ 8142 static int 8143 nfsrv_createdsfile(vnode_t vp, fhandle_t *fhp, struct pnfsdsfile *pf, 8144 vnode_t dvp, struct nfsdevice *ds, struct ucred *cred, NFSPROC_T *p, 8145 vnode_t *tvpp) 8146 { 8147 struct vattr va, nva; 8148 int error; 8149 8150 /* Make data file name based on FH. */ 8151 error = VOP_GETATTR(vp, &va, cred); 8152 if (error == 0) { 8153 /* Set the attributes for "vp" to Setattr the DS vp. */ 8154 VATTR_NULL(&nva); 8155 nva.va_uid = va.va_uid; 8156 nva.va_gid = va.va_gid; 8157 nva.va_mode = va.va_mode; 8158 nva.va_size = 0; 8159 VATTR_NULL(&va); 8160 va.va_type = VREG; 8161 va.va_mode = nva.va_mode; 8162 NFSD_DEBUG(4, "nfsrv_dscreatefile: dvp=%p pf=%p\n", dvp, pf); 8163 error = nfsrv_dscreate(dvp, &va, &nva, fhp, pf, NULL, 8164 pf->dsf_filename, cred, p, tvpp); 8165 } 8166 return (error); 8167 } 8168 8169 /* 8170 * Look up the MDS file shared locked, and then get the extended attribute 8171 * to find the extant DS file to be copied to the new mirror. 8172 * If successful, *vpp is set to the MDS file's vp and *nvpp is 8173 * set to a DS data file for the MDS file, both exclusively locked. 8174 * The "buf" argument has the pnfsdsfile structure from the MDS file 8175 * in it and buflen is set to its length. 8176 */ 8177 int 8178 nfsrv_mdscopymr(char *mdspathp, char *dspathp, char *curdspathp, char *buf, 8179 int *buflenp, char *fname, NFSPROC_T *p, struct vnode **vpp, 8180 struct vnode **nvpp, struct pnfsdsfile **pfp, struct nfsdevice **dsp, 8181 struct nfsdevice **fdsp) 8182 { 8183 struct nameidata nd; 8184 struct vnode *vp, *curvp; 8185 struct pnfsdsfile *pf; 8186 struct nfsmount *nmp, *curnmp; 8187 int dsdir, error, mirrorcnt, ippos; 8188 8189 vp = NULL; 8190 curvp = NULL; 8191 curnmp = NULL; 8192 *dsp = NULL; 8193 *fdsp = NULL; 8194 if (dspathp == NULL && curdspathp != NULL) 8195 return (EPERM); 8196 8197 /* 8198 * Look up the MDS file shared locked. The lock will be upgraded 8199 * to an exclusive lock after any rw layouts have been returned. 8200 */ 8201 NFSD_DEBUG(4, "mdsopen path=%s\n", mdspathp); 8202 NDINIT(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE, 8203 mdspathp, p); 8204 error = namei(&nd); 8205 NFSD_DEBUG(4, "lookup=%d\n", error); 8206 if (error != 0) 8207 return (error); 8208 if (nd.ni_vp->v_type != VREG) { 8209 vput(nd.ni_vp); 8210 NFSD_DEBUG(4, "mdspath not reg\n"); 8211 return (EISDIR); 8212 } 8213 vp = nd.ni_vp; 8214 8215 if (curdspathp != NULL) { 8216 /* 8217 * Look up the current DS path and find the nfsdev structure for 8218 * it. 8219 */ 8220 NFSD_DEBUG(4, "curmdsdev path=%s\n", curdspathp); 8221 NDINIT(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, 8222 UIO_SYSSPACE, curdspathp, p); 8223 error = namei(&nd); 8224 NFSD_DEBUG(4, "ds lookup=%d\n", error); 8225 if (error != 0) { 8226 vput(vp); 8227 return (error); 8228 } 8229 if (nd.ni_vp->v_type != VDIR) { 8230 vput(nd.ni_vp); 8231 vput(vp); 8232 NFSD_DEBUG(4, "curdspath not dir\n"); 8233 return (ENOTDIR); 8234 } 8235 if (strcmp(nd.ni_vp->v_mount->mnt_vfc->vfc_name, "nfs") != 0) { 8236 vput(nd.ni_vp); 8237 vput(vp); 8238 NFSD_DEBUG(4, "curdspath not an NFS mount\n"); 8239 return (ENXIO); 8240 } 8241 curnmp = VFSTONFS(nd.ni_vp->v_mount); 8242 8243 /* Search the nfsdev list for a match. */ 8244 NFSDDSLOCK(); 8245 *fdsp = nfsv4_findmirror(curnmp); 8246 NFSDDSUNLOCK(); 8247 if (*fdsp == NULL) 8248 curnmp = NULL; 8249 if (curnmp == NULL) { 8250 vput(nd.ni_vp); 8251 vput(vp); 8252 NFSD_DEBUG(4, "mdscopymr: no current ds\n"); 8253 return (ENXIO); 8254 } 8255 curvp = nd.ni_vp; 8256 } 8257 8258 if (dspathp != NULL) { 8259 /* Look up the nfsdev path and find the nfsdev structure. */ 8260 NFSD_DEBUG(4, "mdsdev path=%s\n", dspathp); 8261 NDINIT(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, 8262 UIO_SYSSPACE, dspathp, p); 8263 error = namei(&nd); 8264 NFSD_DEBUG(4, "ds lookup=%d\n", error); 8265 if (error != 0) { 8266 vput(vp); 8267 if (curvp != NULL) 8268 vput(curvp); 8269 return (error); 8270 } 8271 if (nd.ni_vp->v_type != VDIR || nd.ni_vp == curvp) { 8272 vput(nd.ni_vp); 8273 vput(vp); 8274 if (curvp != NULL) 8275 vput(curvp); 8276 NFSD_DEBUG(4, "dspath not dir\n"); 8277 if (nd.ni_vp == curvp) 8278 return (EPERM); 8279 return (ENOTDIR); 8280 } 8281 if (strcmp(nd.ni_vp->v_mount->mnt_vfc->vfc_name, "nfs") != 0) { 8282 vput(nd.ni_vp); 8283 vput(vp); 8284 if (curvp != NULL) 8285 vput(curvp); 8286 NFSD_DEBUG(4, "dspath not an NFS mount\n"); 8287 return (ENXIO); 8288 } 8289 nmp = VFSTONFS(nd.ni_vp->v_mount); 8290 8291 /* Search the nfsdev list for a match. */ 8292 NFSDDSLOCK(); 8293 *dsp = nfsv4_findmirror(nmp); 8294 NFSDDSUNLOCK(); 8295 if (*dsp == NULL) { 8296 vput(nd.ni_vp); 8297 vput(vp); 8298 if (curvp != NULL) 8299 vput(curvp); 8300 NFSD_DEBUG(4, "mdscopymr: no ds\n"); 8301 return (ENXIO); 8302 } 8303 } else { 8304 nd.ni_vp = NULL; 8305 nmp = NULL; 8306 } 8307 8308 /* 8309 * Get a vp for an available DS data file using the extended 8310 * attribute on the MDS file. 8311 * If there is a valid entry for the new DS in the extended attribute 8312 * on the MDS file (as checked via the nmp argument), 8313 * nfsrv_dsgetsockmnt() returns EEXIST, so no copying will occur. 8314 */ 8315 error = nfsrv_dsgetsockmnt(vp, 0, buf, buflenp, &mirrorcnt, p, 8316 NULL, NULL, NULL, fname, nvpp, &nmp, curnmp, &ippos, &dsdir); 8317 if (curvp != NULL) 8318 vput(curvp); 8319 if (nd.ni_vp == NULL) { 8320 if (error == 0 && nmp != NULL) { 8321 /* Search the nfsdev list for a match. */ 8322 NFSDDSLOCK(); 8323 *dsp = nfsv4_findmirror(nmp); 8324 NFSDDSUNLOCK(); 8325 } 8326 if (error == 0 && (nmp == NULL || *dsp == NULL)) { 8327 if (nvpp != NULL && *nvpp != NULL) { 8328 vput(*nvpp); 8329 *nvpp = NULL; 8330 } 8331 error = ENXIO; 8332 } 8333 } else 8334 vput(nd.ni_vp); 8335 8336 /* 8337 * When dspathp != NULL and curdspathp == NULL, this is a recovery 8338 * and is only allowed if there is a 0.0.0.0 IP address entry. 8339 * When curdspathp != NULL, the ippos will be set to that entry. 8340 */ 8341 if (error == 0 && dspathp != NULL && ippos == -1) { 8342 if (nvpp != NULL && *nvpp != NULL) { 8343 vput(*nvpp); 8344 *nvpp = NULL; 8345 } 8346 error = ENXIO; 8347 } 8348 if (error == 0) { 8349 *vpp = vp; 8350 8351 pf = (struct pnfsdsfile *)buf; 8352 if (ippos == -1) { 8353 /* If no zeroip pnfsdsfile, add one. */ 8354 ippos = *buflenp / sizeof(*pf); 8355 *buflenp += sizeof(*pf); 8356 pf += ippos; 8357 pf->dsf_dir = dsdir; 8358 strlcpy(pf->dsf_filename, fname, 8359 sizeof(pf->dsf_filename)); 8360 } else 8361 pf += ippos; 8362 *pfp = pf; 8363 } else 8364 vput(vp); 8365 return (error); 8366 } 8367 8368