1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/nfs/delegation.c
4 *
5 * Copyright (C) 2004 Trond Myklebust
6 *
7 * NFS file delegation management
8 *
9 */
10 #include <linux/completion.h>
11 #include <linux/kthread.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/iversion.h>
17
18 #include <linux/nfs4.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_xdr.h>
21
22 #include "nfs4_fs.h"
23 #include "nfs4session.h"
24 #include "delegation.h"
25 #include "internal.h"
26 #include "nfs4trace.h"
27
28 #define NFS_DEFAULT_DELEGATION_WATERMARK (5000U)
29
30 static atomic_long_t nfs_active_delegations;
31 static unsigned nfs_delegation_watermark = NFS_DEFAULT_DELEGATION_WATERMARK;
32
__nfs_free_delegation(struct nfs_delegation * delegation)33 static void __nfs_free_delegation(struct nfs_delegation *delegation)
34 {
35 put_cred(delegation->cred);
36 delegation->cred = NULL;
37 kfree_rcu(delegation, rcu);
38 }
39
nfs_mark_delegation_revoked(struct nfs_delegation * delegation)40 static void nfs_mark_delegation_revoked(struct nfs_delegation *delegation)
41 {
42 if (!test_and_set_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
43 delegation->stateid.type = NFS4_INVALID_STATEID_TYPE;
44 atomic_long_dec(&nfs_active_delegations);
45 if (!test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
46 nfs_clear_verifier_delegated(delegation->inode);
47 }
48 }
49
nfs_get_delegation(struct nfs_delegation * delegation)50 static struct nfs_delegation *nfs_get_delegation(struct nfs_delegation *delegation)
51 {
52 refcount_inc(&delegation->refcount);
53 return delegation;
54 }
55
nfs_put_delegation(struct nfs_delegation * delegation)56 static void nfs_put_delegation(struct nfs_delegation *delegation)
57 {
58 if (refcount_dec_and_test(&delegation->refcount))
59 __nfs_free_delegation(delegation);
60 }
61
nfs_free_delegation(struct nfs_delegation * delegation)62 static void nfs_free_delegation(struct nfs_delegation *delegation)
63 {
64 nfs_mark_delegation_revoked(delegation);
65 nfs_put_delegation(delegation);
66 }
67
68 /**
69 * nfs_mark_delegation_referenced - set delegation's REFERENCED flag
70 * @delegation: delegation to process
71 *
72 */
nfs_mark_delegation_referenced(struct nfs_delegation * delegation)73 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
74 {
75 set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
76 }
77
nfs_mark_return_delegation(struct nfs_server * server,struct nfs_delegation * delegation)78 static void nfs_mark_return_delegation(struct nfs_server *server,
79 struct nfs_delegation *delegation)
80 {
81 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
82 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
83 }
84
nfs4_is_valid_delegation(const struct nfs_delegation * delegation,fmode_t type)85 static bool nfs4_is_valid_delegation(const struct nfs_delegation *delegation,
86 fmode_t type)
87 {
88 if (delegation != NULL && (delegation->type & type) == type &&
89 !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) &&
90 !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
91 return true;
92 return false;
93 }
94
nfs4_get_valid_delegation(const struct inode * inode)95 struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode)
96 {
97 struct nfs_delegation *delegation;
98
99 delegation = rcu_dereference(NFS_I(inode)->delegation);
100 if (nfs4_is_valid_delegation(delegation, 0))
101 return delegation;
102 return NULL;
103 }
104
nfs4_do_check_delegation(struct inode * inode,fmode_t type,int flags,bool mark)105 static int nfs4_do_check_delegation(struct inode *inode, fmode_t type,
106 int flags, bool mark)
107 {
108 struct nfs_delegation *delegation;
109 int ret = 0;
110
111 type &= FMODE_READ|FMODE_WRITE;
112 rcu_read_lock();
113 delegation = rcu_dereference(NFS_I(inode)->delegation);
114 if (nfs4_is_valid_delegation(delegation, type)) {
115 if (mark)
116 nfs_mark_delegation_referenced(delegation);
117 ret = 1;
118 if ((flags & NFS_DELEGATION_FLAG_TIME) &&
119 !test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags))
120 ret = 0;
121 }
122 rcu_read_unlock();
123 return ret;
124 }
125 /**
126 * nfs4_have_delegation - check if inode has a delegation, mark it
127 * NFS_DELEGATION_REFERENCED if there is one.
128 * @inode: inode to check
129 * @type: delegation types to check for
130 * @flags: various modifiers
131 *
132 * Returns one if inode has the indicated delegation, otherwise zero.
133 */
nfs4_have_delegation(struct inode * inode,fmode_t type,int flags)134 int nfs4_have_delegation(struct inode *inode, fmode_t type, int flags)
135 {
136 return nfs4_do_check_delegation(inode, type, flags, true);
137 }
138
139 /*
140 * nfs4_check_delegation - check if inode has a delegation, do not mark
141 * NFS_DELEGATION_REFERENCED if it has one.
142 */
nfs4_check_delegation(struct inode * inode,fmode_t type)143 int nfs4_check_delegation(struct inode *inode, fmode_t type)
144 {
145 return nfs4_do_check_delegation(inode, type, 0, false);
146 }
147
nfs_delegation_claim_locks(struct nfs4_state * state,const nfs4_stateid * stateid)148 static int nfs_delegation_claim_locks(struct nfs4_state *state, const nfs4_stateid *stateid)
149 {
150 struct inode *inode = state->inode;
151 struct file_lock *fl;
152 struct file_lock_context *flctx = locks_inode_context(inode);
153 struct list_head *list;
154 int status = 0;
155
156 if (flctx == NULL)
157 goto out;
158
159 list = &flctx->flc_posix;
160 spin_lock(&flctx->flc_lock);
161 restart:
162 for_each_file_lock(fl, list) {
163 if (nfs_file_open_context(fl->c.flc_file)->state != state)
164 continue;
165 spin_unlock(&flctx->flc_lock);
166 status = nfs4_lock_delegation_recall(fl, state, stateid);
167 if (status < 0)
168 goto out;
169 spin_lock(&flctx->flc_lock);
170 }
171 if (list == &flctx->flc_posix) {
172 list = &flctx->flc_flock;
173 goto restart;
174 }
175 spin_unlock(&flctx->flc_lock);
176 out:
177 return status;
178 }
179
nfs_delegation_claim_opens(struct inode * inode,const nfs4_stateid * stateid,fmode_t type)180 static int nfs_delegation_claim_opens(struct inode *inode,
181 const nfs4_stateid *stateid, fmode_t type)
182 {
183 struct nfs_inode *nfsi = NFS_I(inode);
184 struct nfs_open_context *ctx;
185 struct nfs4_state_owner *sp;
186 struct nfs4_state *state;
187 int err;
188
189 again:
190 rcu_read_lock();
191 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
192 state = ctx->state;
193 if (state == NULL)
194 continue;
195 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
196 continue;
197 if (!nfs4_valid_open_stateid(state))
198 continue;
199 if (!nfs4_stateid_match(&state->stateid, stateid))
200 continue;
201 if (!get_nfs_open_context(ctx))
202 continue;
203 rcu_read_unlock();
204 sp = state->owner;
205 /* Block nfs4_proc_unlck */
206 mutex_lock(&sp->so_delegreturn_mutex);
207 err = nfs4_open_delegation_recall(ctx, state, stateid);
208 if (!err)
209 err = nfs_delegation_claim_locks(state, stateid);
210 mutex_unlock(&sp->so_delegreturn_mutex);
211 put_nfs_open_context(ctx);
212 if (err != 0)
213 return err;
214 goto again;
215 }
216 rcu_read_unlock();
217 return 0;
218 }
219
220 /**
221 * nfs_inode_reclaim_delegation - process a delegation reclaim request
222 * @inode: inode to process
223 * @cred: credential to use for request
224 * @type: delegation type
225 * @stateid: delegation stateid
226 * @pagemod_limit: write delegation "space_limit"
227 * @deleg_type: raw delegation type
228 *
229 */
nfs_inode_reclaim_delegation(struct inode * inode,const struct cred * cred,fmode_t type,const nfs4_stateid * stateid,unsigned long pagemod_limit,u32 deleg_type)230 void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
231 fmode_t type, const nfs4_stateid *stateid,
232 unsigned long pagemod_limit, u32 deleg_type)
233 {
234 struct nfs_delegation *delegation;
235 const struct cred *oldcred = NULL;
236
237 rcu_read_lock();
238 delegation = rcu_dereference(NFS_I(inode)->delegation);
239 if (delegation != NULL) {
240 spin_lock(&delegation->lock);
241 nfs4_stateid_copy(&delegation->stateid, stateid);
242 delegation->type = type;
243 delegation->pagemod_limit = pagemod_limit;
244 oldcred = delegation->cred;
245 delegation->cred = get_cred(cred);
246 switch (deleg_type) {
247 case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG:
248 case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG:
249 set_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags);
250 break;
251 default:
252 clear_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags);
253 }
254 clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
255 if (test_and_clear_bit(NFS_DELEGATION_REVOKED,
256 &delegation->flags))
257 atomic_long_inc(&nfs_active_delegations);
258 spin_unlock(&delegation->lock);
259 rcu_read_unlock();
260 put_cred(oldcred);
261 trace_nfs4_reclaim_delegation(inode, type);
262 } else {
263 rcu_read_unlock();
264 nfs_inode_set_delegation(inode, cred, type, stateid,
265 pagemod_limit, deleg_type);
266 }
267 }
268
nfs_do_return_delegation(struct inode * inode,struct nfs_delegation * delegation,int issync)269 static int nfs_do_return_delegation(struct inode *inode,
270 struct nfs_delegation *delegation,
271 int issync)
272 {
273 const struct cred *cred;
274 int res = 0;
275
276 if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
277 spin_lock(&delegation->lock);
278 cred = get_cred(delegation->cred);
279 spin_unlock(&delegation->lock);
280 res = nfs4_proc_delegreturn(inode, cred, &delegation->stateid,
281 delegation, issync);
282 put_cred(cred);
283 }
284 return res;
285 }
286
nfs_delegation_grab_inode(struct nfs_delegation * delegation)287 static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation)
288 {
289 struct inode *inode = NULL;
290
291 spin_lock(&delegation->lock);
292 if (delegation->inode != NULL)
293 inode = igrab(delegation->inode);
294 if (!inode)
295 set_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags);
296 spin_unlock(&delegation->lock);
297 return inode;
298 }
299
300 static struct nfs_delegation *
nfs_start_delegation_return_locked(struct nfs_inode * nfsi)301 nfs_start_delegation_return_locked(struct nfs_inode *nfsi)
302 {
303 struct nfs_delegation *ret = NULL;
304 struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
305
306 if (delegation == NULL)
307 goto out;
308 spin_lock(&delegation->lock);
309 if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
310 clear_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
311 /* Refcount matched in nfs_end_delegation_return() */
312 ret = nfs_get_delegation(delegation);
313 }
314 spin_unlock(&delegation->lock);
315 if (ret)
316 nfs_clear_verifier_delegated(&nfsi->vfs_inode);
317 out:
318 return ret;
319 }
320
321 static struct nfs_delegation *
nfs_start_delegation_return(struct nfs_inode * nfsi)322 nfs_start_delegation_return(struct nfs_inode *nfsi)
323 {
324 struct nfs_delegation *delegation;
325
326 rcu_read_lock();
327 delegation = nfs_start_delegation_return_locked(nfsi);
328 rcu_read_unlock();
329 return delegation;
330 }
331
nfs_abort_delegation_return(struct nfs_delegation * delegation,struct nfs_client * clp,int err)332 static void nfs_abort_delegation_return(struct nfs_delegation *delegation,
333 struct nfs_client *clp, int err)
334 {
335
336 spin_lock(&delegation->lock);
337 clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
338 if (err == -EAGAIN) {
339 set_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
340 set_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state);
341 }
342 spin_unlock(&delegation->lock);
343 }
344
345 static struct nfs_delegation *
nfs_detach_delegation_locked(struct nfs_inode * nfsi,struct nfs_delegation * delegation,struct nfs_client * clp)346 nfs_detach_delegation_locked(struct nfs_inode *nfsi,
347 struct nfs_delegation *delegation,
348 struct nfs_client *clp)
349 {
350 struct nfs_delegation *deleg_cur =
351 rcu_dereference_protected(nfsi->delegation,
352 lockdep_is_held(&clp->cl_lock));
353
354 if (deleg_cur == NULL || delegation != deleg_cur)
355 return NULL;
356
357 spin_lock(&delegation->lock);
358 if (!delegation->inode) {
359 spin_unlock(&delegation->lock);
360 return NULL;
361 }
362 list_del_rcu(&delegation->super_list);
363 delegation->inode = NULL;
364 rcu_assign_pointer(nfsi->delegation, NULL);
365 spin_unlock(&delegation->lock);
366 return delegation;
367 }
368
nfs_detach_delegation(struct nfs_inode * nfsi,struct nfs_delegation * delegation,struct nfs_server * server)369 static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi,
370 struct nfs_delegation *delegation,
371 struct nfs_server *server)
372 {
373 struct nfs_client *clp = server->nfs_client;
374
375 spin_lock(&clp->cl_lock);
376 delegation = nfs_detach_delegation_locked(nfsi, delegation, clp);
377 spin_unlock(&clp->cl_lock);
378 return delegation;
379 }
380
381 static struct nfs_delegation *
nfs_inode_detach_delegation(struct inode * inode)382 nfs_inode_detach_delegation(struct inode *inode)
383 {
384 struct nfs_inode *nfsi = NFS_I(inode);
385 struct nfs_server *server = NFS_SERVER(inode);
386 struct nfs_delegation *delegation;
387
388 rcu_read_lock();
389 delegation = rcu_dereference(nfsi->delegation);
390 if (delegation != NULL)
391 delegation = nfs_detach_delegation(nfsi, delegation, server);
392 rcu_read_unlock();
393 return delegation;
394 }
395
396 static void
nfs_update_delegation_cred(struct nfs_delegation * delegation,const struct cred * cred)397 nfs_update_delegation_cred(struct nfs_delegation *delegation,
398 const struct cred *cred)
399 {
400 const struct cred *old;
401
402 if (cred_fscmp(delegation->cred, cred) != 0) {
403 old = xchg(&delegation->cred, get_cred(cred));
404 put_cred(old);
405 }
406 }
407
408 static void
nfs_update_inplace_delegation(struct nfs_delegation * delegation,const struct nfs_delegation * update)409 nfs_update_inplace_delegation(struct nfs_delegation *delegation,
410 const struct nfs_delegation *update)
411 {
412 if (nfs4_stateid_is_newer(&update->stateid, &delegation->stateid)) {
413 delegation->stateid.seqid = update->stateid.seqid;
414 smp_wmb();
415 delegation->type = update->type;
416 delegation->pagemod_limit = update->pagemod_limit;
417 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
418 delegation->change_attr = update->change_attr;
419 nfs_update_delegation_cred(delegation, update->cred);
420 /* smp_mb__before_atomic() is implicit due to xchg() */
421 clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
422 atomic_long_inc(&nfs_active_delegations);
423 }
424 }
425 }
426
427 /**
428 * nfs_inode_set_delegation - set up a delegation on an inode
429 * @inode: inode to which delegation applies
430 * @cred: cred to use for subsequent delegation processing
431 * @type: delegation type
432 * @stateid: delegation stateid
433 * @pagemod_limit: write delegation "space_limit"
434 * @deleg_type: raw delegation type
435 *
436 * Returns zero on success, or a negative errno value.
437 */
nfs_inode_set_delegation(struct inode * inode,const struct cred * cred,fmode_t type,const nfs4_stateid * stateid,unsigned long pagemod_limit,u32 deleg_type)438 int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
439 fmode_t type, const nfs4_stateid *stateid,
440 unsigned long pagemod_limit, u32 deleg_type)
441 {
442 struct nfs_server *server = NFS_SERVER(inode);
443 struct nfs_client *clp = server->nfs_client;
444 struct nfs_inode *nfsi = NFS_I(inode);
445 struct nfs_delegation *delegation, *old_delegation;
446 struct nfs_delegation *freeme = NULL;
447 int status = 0;
448
449 delegation = kmalloc(sizeof(*delegation), GFP_KERNEL_ACCOUNT);
450 if (delegation == NULL)
451 return -ENOMEM;
452 nfs4_stateid_copy(&delegation->stateid, stateid);
453 refcount_set(&delegation->refcount, 1);
454 delegation->type = type;
455 delegation->pagemod_limit = pagemod_limit;
456 delegation->change_attr = inode_peek_iversion_raw(inode);
457 delegation->cred = get_cred(cred);
458 delegation->inode = inode;
459 delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
460 switch (deleg_type) {
461 case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG:
462 case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG:
463 delegation->flags |= BIT(NFS_DELEGATION_DELEGTIME);
464 }
465 delegation->test_gen = 0;
466 spin_lock_init(&delegation->lock);
467
468 spin_lock(&clp->cl_lock);
469 old_delegation = rcu_dereference_protected(nfsi->delegation,
470 lockdep_is_held(&clp->cl_lock));
471 if (old_delegation == NULL)
472 goto add_new;
473 /* Is this an update of the existing delegation? */
474 if (nfs4_stateid_match_other(&old_delegation->stateid,
475 &delegation->stateid)) {
476 spin_lock(&old_delegation->lock);
477 nfs_update_inplace_delegation(old_delegation,
478 delegation);
479 spin_unlock(&old_delegation->lock);
480 goto out;
481 }
482 if (!test_bit(NFS_DELEGATION_REVOKED, &old_delegation->flags)) {
483 /*
484 * Deal with broken servers that hand out two
485 * delegations for the same file.
486 * Allow for upgrades to a WRITE delegation, but
487 * nothing else.
488 */
489 dfprintk(FILE, "%s: server %s handed out "
490 "a duplicate delegation!\n",
491 __func__, clp->cl_hostname);
492 if (delegation->type == old_delegation->type ||
493 !(delegation->type & FMODE_WRITE)) {
494 freeme = delegation;
495 delegation = NULL;
496 goto out;
497 }
498 if (test_and_set_bit(NFS_DELEGATION_RETURNING,
499 &old_delegation->flags))
500 goto out;
501 }
502 freeme = nfs_detach_delegation_locked(nfsi, old_delegation, clp);
503 if (freeme == NULL)
504 goto out;
505 add_new:
506 /*
507 * If we didn't revalidate the change attribute before setting
508 * the delegation, then pre-emptively ask for a full attribute
509 * cache revalidation.
510 */
511 spin_lock(&inode->i_lock);
512 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_CHANGE)
513 nfs_set_cache_invalid(inode,
514 NFS_INO_INVALID_ATIME | NFS_INO_INVALID_CTIME |
515 NFS_INO_INVALID_MTIME | NFS_INO_INVALID_SIZE |
516 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
517 NFS_INO_INVALID_OTHER | NFS_INO_INVALID_DATA |
518 NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
519 NFS_INO_INVALID_XATTR);
520 spin_unlock(&inode->i_lock);
521
522 list_add_tail_rcu(&delegation->super_list, &server->delegations);
523 rcu_assign_pointer(nfsi->delegation, delegation);
524 delegation = NULL;
525
526 atomic_long_inc(&nfs_active_delegations);
527
528 trace_nfs4_set_delegation(inode, type);
529
530 /* If we hold writebacks and have delegated mtime then update */
531 if (deleg_type == NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG &&
532 nfs_have_writebacks(inode))
533 nfs_update_delegated_mtime(inode);
534 out:
535 spin_unlock(&clp->cl_lock);
536 if (delegation != NULL)
537 __nfs_free_delegation(delegation);
538 if (freeme != NULL) {
539 nfs_do_return_delegation(inode, freeme, 0);
540 nfs_free_delegation(freeme);
541 }
542 return status;
543 }
544
545 /*
546 * Basic procedure for returning a delegation to the server
547 */
nfs_end_delegation_return(struct inode * inode,struct nfs_delegation * delegation,int issync)548 static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync)
549 {
550 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
551 unsigned int mode = O_WRONLY | O_RDWR;
552 int err = 0;
553
554 if (delegation == NULL)
555 return 0;
556
557 if (!issync)
558 mode |= O_NONBLOCK;
559 /* Recall of any remaining application leases */
560 err = break_lease(inode, mode);
561
562 while (err == 0) {
563 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
564 break;
565 err = nfs_delegation_claim_opens(inode, &delegation->stateid,
566 delegation->type);
567 if (!issync || err != -EAGAIN)
568 break;
569 /*
570 * Guard against state recovery
571 */
572 err = nfs4_wait_clnt_recover(clp);
573 }
574
575 if (err) {
576 nfs_abort_delegation_return(delegation, clp, err);
577 goto out;
578 }
579
580 err = nfs_do_return_delegation(inode, delegation, issync);
581 out:
582 /* Refcount matched in nfs_start_delegation_return_locked() */
583 nfs_put_delegation(delegation);
584 return err;
585 }
586
nfs_delegation_need_return(struct nfs_delegation * delegation)587 static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
588 {
589 bool ret = false;
590
591 if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
592 ret = true;
593 else if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) {
594 struct inode *inode;
595
596 spin_lock(&delegation->lock);
597 inode = delegation->inode;
598 if (inode && list_empty(&NFS_I(inode)->open_files))
599 ret = true;
600 spin_unlock(&delegation->lock);
601 }
602 if (ret)
603 clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
604 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) ||
605 test_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags) ||
606 test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
607 ret = false;
608
609 return ret;
610 }
611
nfs_server_return_marked_delegations(struct nfs_server * server,void __always_unused * data)612 static int nfs_server_return_marked_delegations(struct nfs_server *server,
613 void __always_unused *data)
614 {
615 struct nfs_delegation *delegation;
616 struct nfs_delegation *prev;
617 struct inode *inode;
618 struct inode *place_holder = NULL;
619 struct nfs_delegation *place_holder_deleg = NULL;
620 int err = 0;
621
622 restart:
623 /*
624 * To avoid quadratic looping we hold a reference
625 * to an inode place_holder. Each time we restart, we
626 * list delegation in the server from the delegations
627 * of that inode.
628 * prev is an RCU-protected pointer to a delegation which
629 * wasn't marked for return and might be a good choice for
630 * the next place_holder.
631 */
632 prev = NULL;
633 delegation = NULL;
634 rcu_read_lock();
635 if (place_holder)
636 delegation = rcu_dereference(NFS_I(place_holder)->delegation);
637 if (!delegation || delegation != place_holder_deleg)
638 delegation = list_entry_rcu(server->delegations.next,
639 struct nfs_delegation, super_list);
640 list_for_each_entry_from_rcu(delegation, &server->delegations, super_list) {
641 struct inode *to_put = NULL;
642
643 if (test_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags))
644 continue;
645 if (!nfs_delegation_need_return(delegation)) {
646 if (nfs4_is_valid_delegation(delegation, 0))
647 prev = delegation;
648 continue;
649 }
650 inode = nfs_delegation_grab_inode(delegation);
651 if (inode == NULL)
652 continue;
653
654 if (prev) {
655 struct inode *tmp = nfs_delegation_grab_inode(prev);
656 if (tmp) {
657 to_put = place_holder;
658 place_holder = tmp;
659 place_holder_deleg = prev;
660 }
661 }
662
663 delegation = nfs_start_delegation_return_locked(NFS_I(inode));
664 rcu_read_unlock();
665
666 iput(to_put);
667
668 err = nfs_end_delegation_return(inode, delegation, 0);
669 iput(inode);
670 cond_resched();
671 if (!err)
672 goto restart;
673 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
674 goto out;
675 }
676 rcu_read_unlock();
677 out:
678 iput(place_holder);
679 return err;
680 }
681
nfs_server_clear_delayed_delegations(struct nfs_server * server)682 static bool nfs_server_clear_delayed_delegations(struct nfs_server *server)
683 {
684 struct nfs_delegation *d;
685 bool ret = false;
686
687 list_for_each_entry_rcu (d, &server->delegations, super_list) {
688 if (!test_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags))
689 continue;
690 nfs_mark_return_delegation(server, d);
691 clear_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags);
692 ret = true;
693 }
694 return ret;
695 }
696
nfs_client_clear_delayed_delegations(struct nfs_client * clp)697 static bool nfs_client_clear_delayed_delegations(struct nfs_client *clp)
698 {
699 struct nfs_server *server;
700 bool ret = false;
701
702 if (!test_and_clear_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state))
703 goto out;
704 rcu_read_lock();
705 list_for_each_entry_rcu (server, &clp->cl_superblocks, client_link) {
706 if (nfs_server_clear_delayed_delegations(server))
707 ret = true;
708 }
709 rcu_read_unlock();
710 out:
711 return ret;
712 }
713
714 /**
715 * nfs_client_return_marked_delegations - return previously marked delegations
716 * @clp: nfs_client to process
717 *
718 * Note that this function is designed to be called by the state
719 * manager thread. For this reason, it cannot flush the dirty data,
720 * since that could deadlock in case of a state recovery error.
721 *
722 * Returns zero on success, or a negative errno value.
723 */
nfs_client_return_marked_delegations(struct nfs_client * clp)724 int nfs_client_return_marked_delegations(struct nfs_client *clp)
725 {
726 int err = nfs_client_for_each_server(
727 clp, nfs_server_return_marked_delegations, NULL);
728 if (err)
729 return err;
730 /* If a return was delayed, sleep to prevent hard looping */
731 if (nfs_client_clear_delayed_delegations(clp))
732 ssleep(1);
733 return 0;
734 }
735
736 /**
737 * nfs_inode_evict_delegation - return delegation, don't reclaim opens
738 * @inode: inode to process
739 *
740 * Does not protect against delegation reclaims, therefore really only safe
741 * to be called from nfs4_clear_inode(). Guaranteed to always free
742 * the delegation structure.
743 */
nfs_inode_evict_delegation(struct inode * inode)744 void nfs_inode_evict_delegation(struct inode *inode)
745 {
746 struct nfs_delegation *delegation;
747
748 delegation = nfs_inode_detach_delegation(inode);
749 if (delegation != NULL) {
750 set_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
751 set_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags);
752 nfs_do_return_delegation(inode, delegation, 1);
753 nfs_free_delegation(delegation);
754 }
755 }
756
757 /**
758 * nfs4_inode_return_delegation - synchronously return a delegation
759 * @inode: inode to process
760 *
761 * This routine will always flush any dirty data to disk on the
762 * assumption that if we need to return the delegation, then
763 * we should stop caching.
764 *
765 * Returns zero on success, or a negative errno value.
766 */
nfs4_inode_return_delegation(struct inode * inode)767 int nfs4_inode_return_delegation(struct inode *inode)
768 {
769 struct nfs_inode *nfsi = NFS_I(inode);
770 struct nfs_delegation *delegation;
771
772 delegation = nfs_start_delegation_return(nfsi);
773 if (delegation != NULL) {
774 /* Synchronous recall of any application leases */
775 break_lease(inode, O_WRONLY | O_RDWR);
776 if (S_ISREG(inode->i_mode))
777 nfs_wb_all(inode);
778 return nfs_end_delegation_return(inode, delegation, 1);
779 }
780 return 0;
781 }
782
783 /**
784 * nfs4_inode_return_delegation_on_close - asynchronously return a delegation
785 * @inode: inode to process
786 *
787 * This routine is called on file close in order to determine if the
788 * inode delegation needs to be returned immediately.
789 */
nfs4_inode_return_delegation_on_close(struct inode * inode)790 void nfs4_inode_return_delegation_on_close(struct inode *inode)
791 {
792 struct nfs_delegation *delegation;
793 struct nfs_delegation *ret = NULL;
794
795 if (!inode)
796 return;
797 rcu_read_lock();
798 delegation = nfs4_get_valid_delegation(inode);
799 if (!delegation)
800 goto out;
801 if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) ||
802 atomic_long_read(&nfs_active_delegations) >= nfs_delegation_watermark) {
803 spin_lock(&delegation->lock);
804 if (delegation->inode &&
805 list_empty(&NFS_I(inode)->open_files) &&
806 !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
807 clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
808 /* Refcount matched in nfs_end_delegation_return() */
809 ret = nfs_get_delegation(delegation);
810 }
811 spin_unlock(&delegation->lock);
812 if (ret)
813 nfs_clear_verifier_delegated(inode);
814 }
815 out:
816 rcu_read_unlock();
817 nfs_end_delegation_return(inode, ret, 0);
818 }
819
820 /**
821 * nfs4_inode_make_writeable
822 * @inode: pointer to inode
823 *
824 * Make the inode writeable by returning the delegation if necessary
825 *
826 * Returns zero on success, or a negative errno value.
827 */
nfs4_inode_make_writeable(struct inode * inode)828 int nfs4_inode_make_writeable(struct inode *inode)
829 {
830 struct nfs_delegation *delegation;
831
832 rcu_read_lock();
833 delegation = nfs4_get_valid_delegation(inode);
834 if (delegation == NULL ||
835 (nfs4_has_session(NFS_SERVER(inode)->nfs_client) &&
836 (delegation->type & FMODE_WRITE))) {
837 rcu_read_unlock();
838 return 0;
839 }
840 rcu_read_unlock();
841 return nfs4_inode_return_delegation(inode);
842 }
843
nfs_mark_return_if_closed_delegation(struct nfs_server * server,struct nfs_delegation * delegation)844 static void nfs_mark_return_if_closed_delegation(struct nfs_server *server,
845 struct nfs_delegation *delegation)
846 {
847 set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
848 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
849 }
850
nfs_server_mark_return_all_delegations(struct nfs_server * server)851 static bool nfs_server_mark_return_all_delegations(struct nfs_server *server)
852 {
853 struct nfs_delegation *delegation;
854 bool ret = false;
855
856 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
857 nfs_mark_return_delegation(server, delegation);
858 ret = true;
859 }
860 return ret;
861 }
862
nfs_client_mark_return_all_delegations(struct nfs_client * clp)863 static void nfs_client_mark_return_all_delegations(struct nfs_client *clp)
864 {
865 struct nfs_server *server;
866
867 rcu_read_lock();
868 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
869 nfs_server_mark_return_all_delegations(server);
870 rcu_read_unlock();
871 }
872
nfs_delegation_run_state_manager(struct nfs_client * clp)873 static void nfs_delegation_run_state_manager(struct nfs_client *clp)
874 {
875 if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state))
876 nfs4_schedule_state_manager(clp);
877 }
878
879 /**
880 * nfs_expire_all_delegations
881 * @clp: client to process
882 *
883 */
nfs_expire_all_delegations(struct nfs_client * clp)884 void nfs_expire_all_delegations(struct nfs_client *clp)
885 {
886 nfs_client_mark_return_all_delegations(clp);
887 nfs_delegation_run_state_manager(clp);
888 }
889
890 /**
891 * nfs_server_return_all_delegations - return delegations for one superblock
892 * @server: pointer to nfs_server to process
893 *
894 */
nfs_server_return_all_delegations(struct nfs_server * server)895 void nfs_server_return_all_delegations(struct nfs_server *server)
896 {
897 struct nfs_client *clp = server->nfs_client;
898 bool need_wait;
899
900 if (clp == NULL)
901 return;
902
903 rcu_read_lock();
904 need_wait = nfs_server_mark_return_all_delegations(server);
905 rcu_read_unlock();
906
907 if (need_wait) {
908 nfs4_schedule_state_manager(clp);
909 nfs4_wait_clnt_recover(clp);
910 }
911 }
912
nfs_mark_return_unused_delegation_types(struct nfs_server * server,fmode_t flags)913 static void nfs_mark_return_unused_delegation_types(struct nfs_server *server,
914 fmode_t flags)
915 {
916 struct nfs_delegation *delegation;
917
918 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
919 if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
920 continue;
921 if (delegation->type & flags)
922 nfs_mark_return_if_closed_delegation(server, delegation);
923 }
924 }
925
nfs_client_mark_return_unused_delegation_types(struct nfs_client * clp,fmode_t flags)926 static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *clp,
927 fmode_t flags)
928 {
929 struct nfs_server *server;
930
931 rcu_read_lock();
932 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
933 nfs_mark_return_unused_delegation_types(server, flags);
934 rcu_read_unlock();
935 }
936
nfs_revoke_delegation(struct inode * inode,const nfs4_stateid * stateid)937 static void nfs_revoke_delegation(struct inode *inode,
938 const nfs4_stateid *stateid)
939 {
940 struct nfs_delegation *delegation;
941 nfs4_stateid tmp;
942 bool ret = false;
943
944 rcu_read_lock();
945 delegation = rcu_dereference(NFS_I(inode)->delegation);
946 if (delegation == NULL)
947 goto out;
948 if (stateid == NULL) {
949 nfs4_stateid_copy(&tmp, &delegation->stateid);
950 stateid = &tmp;
951 } else {
952 if (!nfs4_stateid_match_other(stateid, &delegation->stateid))
953 goto out;
954 spin_lock(&delegation->lock);
955 if (stateid->seqid) {
956 if (nfs4_stateid_is_newer(&delegation->stateid, stateid)) {
957 spin_unlock(&delegation->lock);
958 goto out;
959 }
960 delegation->stateid.seqid = stateid->seqid;
961 }
962 spin_unlock(&delegation->lock);
963 }
964 nfs_mark_delegation_revoked(delegation);
965 ret = true;
966 out:
967 rcu_read_unlock();
968 if (ret)
969 nfs_inode_find_state_and_recover(inode, stateid);
970 }
971
nfs_remove_bad_delegation(struct inode * inode,const nfs4_stateid * stateid)972 void nfs_remove_bad_delegation(struct inode *inode,
973 const nfs4_stateid *stateid)
974 {
975 nfs_revoke_delegation(inode, stateid);
976 }
977 EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation);
978
nfs_delegation_mark_returned(struct inode * inode,const nfs4_stateid * stateid)979 void nfs_delegation_mark_returned(struct inode *inode,
980 const nfs4_stateid *stateid)
981 {
982 struct nfs_delegation *delegation;
983
984 if (!inode)
985 return;
986
987 rcu_read_lock();
988 delegation = rcu_dereference(NFS_I(inode)->delegation);
989 if (!delegation)
990 goto out_rcu_unlock;
991
992 spin_lock(&delegation->lock);
993 if (!nfs4_stateid_match_other(stateid, &delegation->stateid))
994 goto out_spin_unlock;
995 if (stateid->seqid) {
996 /* If delegation->stateid is newer, dont mark as returned */
997 if (nfs4_stateid_is_newer(&delegation->stateid, stateid))
998 goto out_clear_returning;
999 if (delegation->stateid.seqid != stateid->seqid)
1000 delegation->stateid.seqid = stateid->seqid;
1001 }
1002
1003 nfs_mark_delegation_revoked(delegation);
1004 clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
1005 spin_unlock(&delegation->lock);
1006 if (nfs_detach_delegation(NFS_I(inode), delegation, NFS_SERVER(inode)))
1007 nfs_put_delegation(delegation);
1008 goto out_rcu_unlock;
1009
1010 out_clear_returning:
1011 clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
1012 out_spin_unlock:
1013 spin_unlock(&delegation->lock);
1014 out_rcu_unlock:
1015 rcu_read_unlock();
1016
1017 nfs_inode_find_state_and_recover(inode, stateid);
1018 }
1019
1020 /**
1021 * nfs_expire_unused_delegation_types
1022 * @clp: client to process
1023 * @flags: delegation types to expire
1024 *
1025 */
nfs_expire_unused_delegation_types(struct nfs_client * clp,fmode_t flags)1026 void nfs_expire_unused_delegation_types(struct nfs_client *clp, fmode_t flags)
1027 {
1028 nfs_client_mark_return_unused_delegation_types(clp, flags);
1029 nfs_delegation_run_state_manager(clp);
1030 }
1031
nfs_mark_return_unreferenced_delegations(struct nfs_server * server)1032 static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
1033 {
1034 struct nfs_delegation *delegation;
1035
1036 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
1037 if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
1038 continue;
1039 nfs_mark_return_if_closed_delegation(server, delegation);
1040 }
1041 }
1042
1043 /**
1044 * nfs_expire_unreferenced_delegations - Eliminate unused delegations
1045 * @clp: nfs_client to process
1046 *
1047 */
nfs_expire_unreferenced_delegations(struct nfs_client * clp)1048 void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
1049 {
1050 struct nfs_server *server;
1051
1052 rcu_read_lock();
1053 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
1054 nfs_mark_return_unreferenced_delegations(server);
1055 rcu_read_unlock();
1056
1057 nfs_delegation_run_state_manager(clp);
1058 }
1059
1060 /**
1061 * nfs_async_inode_return_delegation - asynchronously return a delegation
1062 * @inode: inode to process
1063 * @stateid: state ID information
1064 *
1065 * Returns zero on success, or a negative errno value.
1066 */
nfs_async_inode_return_delegation(struct inode * inode,const nfs4_stateid * stateid)1067 int nfs_async_inode_return_delegation(struct inode *inode,
1068 const nfs4_stateid *stateid)
1069 {
1070 struct nfs_server *server = NFS_SERVER(inode);
1071 struct nfs_client *clp = server->nfs_client;
1072 struct nfs_delegation *delegation;
1073
1074 rcu_read_lock();
1075 delegation = nfs4_get_valid_delegation(inode);
1076 if (delegation == NULL)
1077 goto out_enoent;
1078 if (stateid != NULL &&
1079 !clp->cl_mvops->match_stateid(&delegation->stateid, stateid))
1080 goto out_enoent;
1081 nfs_mark_return_delegation(server, delegation);
1082 rcu_read_unlock();
1083
1084 /* If there are any application leases or delegations, recall them */
1085 break_lease(inode, O_WRONLY | O_RDWR | O_NONBLOCK);
1086
1087 nfs_delegation_run_state_manager(clp);
1088 return 0;
1089 out_enoent:
1090 rcu_read_unlock();
1091 return -ENOENT;
1092 }
1093
1094 static struct inode *
nfs_delegation_find_inode_server(struct nfs_server * server,const struct nfs_fh * fhandle)1095 nfs_delegation_find_inode_server(struct nfs_server *server,
1096 const struct nfs_fh *fhandle)
1097 {
1098 struct nfs_delegation *delegation;
1099 struct super_block *freeme = NULL;
1100 struct inode *res = NULL;
1101
1102 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
1103 spin_lock(&delegation->lock);
1104 if (delegation->inode != NULL &&
1105 !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) &&
1106 nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
1107 if (nfs_sb_active(server->super)) {
1108 freeme = server->super;
1109 res = igrab(delegation->inode);
1110 }
1111 spin_unlock(&delegation->lock);
1112 if (res != NULL)
1113 return res;
1114 if (freeme) {
1115 rcu_read_unlock();
1116 nfs_sb_deactive(freeme);
1117 rcu_read_lock();
1118 }
1119 return ERR_PTR(-EAGAIN);
1120 }
1121 spin_unlock(&delegation->lock);
1122 }
1123 return ERR_PTR(-ENOENT);
1124 }
1125
1126 /**
1127 * nfs_delegation_find_inode - retrieve the inode associated with a delegation
1128 * @clp: client state handle
1129 * @fhandle: filehandle from a delegation recall
1130 *
1131 * Returns pointer to inode matching "fhandle," or NULL if a matching inode
1132 * cannot be found.
1133 */
nfs_delegation_find_inode(struct nfs_client * clp,const struct nfs_fh * fhandle)1134 struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
1135 const struct nfs_fh *fhandle)
1136 {
1137 struct nfs_server *server;
1138 struct inode *res;
1139
1140 rcu_read_lock();
1141 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
1142 res = nfs_delegation_find_inode_server(server, fhandle);
1143 if (res != ERR_PTR(-ENOENT)) {
1144 rcu_read_unlock();
1145 return res;
1146 }
1147 }
1148 rcu_read_unlock();
1149 return ERR_PTR(-ENOENT);
1150 }
1151
nfs_delegation_mark_reclaim_server(struct nfs_server * server)1152 static void nfs_delegation_mark_reclaim_server(struct nfs_server *server)
1153 {
1154 struct nfs_delegation *delegation;
1155
1156 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
1157 /*
1158 * If the delegation may have been admin revoked, then we
1159 * cannot reclaim it.
1160 */
1161 if (test_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags))
1162 continue;
1163 set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
1164 }
1165 }
1166
1167 /**
1168 * nfs_delegation_mark_reclaim - mark all delegations as needing to be reclaimed
1169 * @clp: nfs_client to process
1170 *
1171 */
nfs_delegation_mark_reclaim(struct nfs_client * clp)1172 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
1173 {
1174 struct nfs_server *server;
1175
1176 rcu_read_lock();
1177 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
1178 nfs_delegation_mark_reclaim_server(server);
1179 rcu_read_unlock();
1180 }
1181
nfs_server_reap_unclaimed_delegations(struct nfs_server * server,void __always_unused * data)1182 static int nfs_server_reap_unclaimed_delegations(struct nfs_server *server,
1183 void __always_unused *data)
1184 {
1185 struct nfs_delegation *delegation;
1186 struct inode *inode;
1187 restart:
1188 rcu_read_lock();
1189 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
1190 if (test_bit(NFS_DELEGATION_INODE_FREEING,
1191 &delegation->flags) ||
1192 test_bit(NFS_DELEGATION_RETURNING,
1193 &delegation->flags) ||
1194 test_bit(NFS_DELEGATION_NEED_RECLAIM,
1195 &delegation->flags) == 0)
1196 continue;
1197 inode = nfs_delegation_grab_inode(delegation);
1198 if (inode == NULL)
1199 continue;
1200 delegation = nfs_start_delegation_return_locked(NFS_I(inode));
1201 rcu_read_unlock();
1202 if (delegation != NULL) {
1203 if (nfs_detach_delegation(NFS_I(inode), delegation,
1204 server) != NULL)
1205 nfs_free_delegation(delegation);
1206 /* Match nfs_start_delegation_return_locked */
1207 nfs_put_delegation(delegation);
1208 }
1209 iput(inode);
1210 cond_resched();
1211 goto restart;
1212 }
1213 rcu_read_unlock();
1214 return 0;
1215 }
1216
1217 /**
1218 * nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done
1219 * @clp: nfs_client to process
1220 *
1221 */
nfs_delegation_reap_unclaimed(struct nfs_client * clp)1222 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
1223 {
1224 nfs_client_for_each_server(clp, nfs_server_reap_unclaimed_delegations,
1225 NULL);
1226 }
1227
nfs4_server_rebooted(const struct nfs_client * clp)1228 static inline bool nfs4_server_rebooted(const struct nfs_client *clp)
1229 {
1230 return (clp->cl_state & (BIT(NFS4CLNT_CHECK_LEASE) |
1231 BIT(NFS4CLNT_LEASE_EXPIRED) |
1232 BIT(NFS4CLNT_SESSION_RESET))) != 0;
1233 }
1234
nfs_mark_test_expired_delegation(struct nfs_server * server,struct nfs_delegation * delegation)1235 static void nfs_mark_test_expired_delegation(struct nfs_server *server,
1236 struct nfs_delegation *delegation)
1237 {
1238 if (delegation->stateid.type == NFS4_INVALID_STATEID_TYPE)
1239 return;
1240 clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
1241 set_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
1242 set_bit(NFS4CLNT_DELEGATION_EXPIRED, &server->nfs_client->cl_state);
1243 }
1244
nfs_inode_mark_test_expired_delegation(struct nfs_server * server,struct inode * inode)1245 static void nfs_inode_mark_test_expired_delegation(struct nfs_server *server,
1246 struct inode *inode)
1247 {
1248 struct nfs_delegation *delegation;
1249
1250 rcu_read_lock();
1251 delegation = rcu_dereference(NFS_I(inode)->delegation);
1252 if (delegation)
1253 nfs_mark_test_expired_delegation(server, delegation);
1254 rcu_read_unlock();
1255
1256 }
1257
nfs_delegation_mark_test_expired_server(struct nfs_server * server)1258 static void nfs_delegation_mark_test_expired_server(struct nfs_server *server)
1259 {
1260 struct nfs_delegation *delegation;
1261
1262 list_for_each_entry_rcu(delegation, &server->delegations, super_list)
1263 nfs_mark_test_expired_delegation(server, delegation);
1264 }
1265
1266 /**
1267 * nfs_mark_test_expired_all_delegations - mark all delegations for testing
1268 * @clp: nfs_client to process
1269 *
1270 * Iterates through all the delegations associated with this server and
1271 * marks them as needing to be checked for validity.
1272 */
nfs_mark_test_expired_all_delegations(struct nfs_client * clp)1273 void nfs_mark_test_expired_all_delegations(struct nfs_client *clp)
1274 {
1275 struct nfs_server *server;
1276
1277 rcu_read_lock();
1278 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
1279 nfs_delegation_mark_test_expired_server(server);
1280 rcu_read_unlock();
1281 }
1282
1283 /**
1284 * nfs_test_expired_all_delegations - test all delegations for a client
1285 * @clp: nfs_client to process
1286 *
1287 * Helper for handling "recallable state revoked" status from server.
1288 */
nfs_test_expired_all_delegations(struct nfs_client * clp)1289 void nfs_test_expired_all_delegations(struct nfs_client *clp)
1290 {
1291 nfs_mark_test_expired_all_delegations(clp);
1292 nfs4_schedule_state_manager(clp);
1293 }
1294
1295 static void
nfs_delegation_test_free_expired(struct inode * inode,nfs4_stateid * stateid,const struct cred * cred)1296 nfs_delegation_test_free_expired(struct inode *inode,
1297 nfs4_stateid *stateid,
1298 const struct cred *cred)
1299 {
1300 struct nfs_server *server = NFS_SERVER(inode);
1301 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
1302 int status;
1303
1304 if (!cred)
1305 return;
1306 status = ops->test_and_free_expired(server, stateid, cred);
1307 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
1308 nfs_remove_bad_delegation(inode, stateid);
1309 }
1310
nfs_server_reap_expired_delegations(struct nfs_server * server,void __always_unused * data)1311 static int nfs_server_reap_expired_delegations(struct nfs_server *server,
1312 void __always_unused *data)
1313 {
1314 struct nfs_delegation *delegation;
1315 struct inode *inode;
1316 const struct cred *cred;
1317 nfs4_stateid stateid;
1318 unsigned long gen = ++server->delegation_gen;
1319
1320 restart:
1321 rcu_read_lock();
1322 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
1323 if (test_bit(NFS_DELEGATION_INODE_FREEING,
1324 &delegation->flags) ||
1325 test_bit(NFS_DELEGATION_RETURNING,
1326 &delegation->flags) ||
1327 test_bit(NFS_DELEGATION_TEST_EXPIRED,
1328 &delegation->flags) == 0 ||
1329 delegation->test_gen == gen)
1330 continue;
1331 inode = nfs_delegation_grab_inode(delegation);
1332 if (inode == NULL)
1333 continue;
1334 spin_lock(&delegation->lock);
1335 cred = get_cred_rcu(delegation->cred);
1336 nfs4_stateid_copy(&stateid, &delegation->stateid);
1337 spin_unlock(&delegation->lock);
1338 delegation->test_gen = gen;
1339 clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
1340 rcu_read_unlock();
1341 nfs_delegation_test_free_expired(inode, &stateid, cred);
1342 put_cred(cred);
1343 if (!nfs4_server_rebooted(server->nfs_client)) {
1344 iput(inode);
1345 cond_resched();
1346 goto restart;
1347 }
1348 nfs_inode_mark_test_expired_delegation(server,inode);
1349 iput(inode);
1350 return -EAGAIN;
1351 }
1352 rcu_read_unlock();
1353 return 0;
1354 }
1355
1356 /**
1357 * nfs_reap_expired_delegations - reap expired delegations
1358 * @clp: nfs_client to process
1359 *
1360 * Iterates through all the delegations associated with this server and
1361 * checks if they have may have been revoked. This function is usually
1362 * expected to be called in cases where the server may have lost its
1363 * lease.
1364 */
nfs_reap_expired_delegations(struct nfs_client * clp)1365 void nfs_reap_expired_delegations(struct nfs_client *clp)
1366 {
1367 nfs_client_for_each_server(clp, nfs_server_reap_expired_delegations,
1368 NULL);
1369 }
1370
nfs_inode_find_delegation_state_and_recover(struct inode * inode,const nfs4_stateid * stateid)1371 void nfs_inode_find_delegation_state_and_recover(struct inode *inode,
1372 const nfs4_stateid *stateid)
1373 {
1374 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
1375 struct nfs_delegation *delegation;
1376 bool found = false;
1377
1378 rcu_read_lock();
1379 delegation = rcu_dereference(NFS_I(inode)->delegation);
1380 if (delegation &&
1381 nfs4_stateid_match_or_older(&delegation->stateid, stateid) &&
1382 !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
1383 nfs_mark_test_expired_delegation(NFS_SERVER(inode), delegation);
1384 found = true;
1385 }
1386 rcu_read_unlock();
1387 if (found)
1388 nfs4_schedule_state_manager(clp);
1389 }
1390
1391 /**
1392 * nfs_delegations_present - check for existence of delegations
1393 * @clp: client state handle
1394 *
1395 * Returns one if there are any nfs_delegation structures attached
1396 * to this nfs_client.
1397 */
nfs_delegations_present(struct nfs_client * clp)1398 int nfs_delegations_present(struct nfs_client *clp)
1399 {
1400 struct nfs_server *server;
1401 int ret = 0;
1402
1403 rcu_read_lock();
1404 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
1405 if (!list_empty(&server->delegations)) {
1406 ret = 1;
1407 break;
1408 }
1409 rcu_read_unlock();
1410 return ret;
1411 }
1412
1413 /**
1414 * nfs4_refresh_delegation_stateid - Update delegation stateid seqid
1415 * @dst: stateid to refresh
1416 * @inode: inode to check
1417 *
1418 * Returns "true" and updates "dst->seqid" * if inode had a delegation
1419 * that matches our delegation stateid. Otherwise "false" is returned.
1420 */
nfs4_refresh_delegation_stateid(nfs4_stateid * dst,struct inode * inode)1421 bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
1422 {
1423 struct nfs_delegation *delegation;
1424 bool ret = false;
1425 if (!inode)
1426 goto out;
1427
1428 rcu_read_lock();
1429 delegation = rcu_dereference(NFS_I(inode)->delegation);
1430 if (delegation != NULL &&
1431 nfs4_stateid_match_other(dst, &delegation->stateid) &&
1432 nfs4_stateid_is_newer(&delegation->stateid, dst) &&
1433 !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
1434 dst->seqid = delegation->stateid.seqid;
1435 ret = true;
1436 }
1437 rcu_read_unlock();
1438 out:
1439 return ret;
1440 }
1441
1442 /**
1443 * nfs4_copy_delegation_stateid - Copy inode's state ID information
1444 * @inode: inode to check
1445 * @flags: delegation type requirement
1446 * @dst: stateid data structure to fill in
1447 * @cred: optional argument to retrieve credential
1448 *
1449 * Returns "true" and fills in "dst->data" * if inode had a delegation,
1450 * otherwise "false" is returned.
1451 */
nfs4_copy_delegation_stateid(struct inode * inode,fmode_t flags,nfs4_stateid * dst,const struct cred ** cred)1452 bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags,
1453 nfs4_stateid *dst, const struct cred **cred)
1454 {
1455 struct nfs_inode *nfsi = NFS_I(inode);
1456 struct nfs_delegation *delegation;
1457 bool ret = false;
1458
1459 flags &= FMODE_READ|FMODE_WRITE;
1460 rcu_read_lock();
1461 delegation = rcu_dereference(nfsi->delegation);
1462 if (!delegation)
1463 goto out;
1464 spin_lock(&delegation->lock);
1465 ret = nfs4_is_valid_delegation(delegation, flags);
1466 if (ret) {
1467 nfs4_stateid_copy(dst, &delegation->stateid);
1468 nfs_mark_delegation_referenced(delegation);
1469 if (cred)
1470 *cred = get_cred(delegation->cred);
1471 }
1472 spin_unlock(&delegation->lock);
1473 out:
1474 rcu_read_unlock();
1475 return ret;
1476 }
1477
1478 /**
1479 * nfs4_delegation_flush_on_close - Check if we must flush file on close
1480 * @inode: inode to check
1481 *
1482 * This function checks the number of outstanding writes to the file
1483 * against the delegation 'space_limit' field to see if
1484 * the spec requires us to flush the file on close.
1485 */
nfs4_delegation_flush_on_close(const struct inode * inode)1486 bool nfs4_delegation_flush_on_close(const struct inode *inode)
1487 {
1488 struct nfs_inode *nfsi = NFS_I(inode);
1489 struct nfs_delegation *delegation;
1490 bool ret = true;
1491
1492 rcu_read_lock();
1493 delegation = rcu_dereference(nfsi->delegation);
1494 if (delegation == NULL || !(delegation->type & FMODE_WRITE))
1495 goto out;
1496 if (atomic_long_read(&nfsi->nrequests) < delegation->pagemod_limit)
1497 ret = false;
1498 out:
1499 rcu_read_unlock();
1500 return ret;
1501 }
1502
1503 module_param_named(delegation_watermark, nfs_delegation_watermark, uint, 0644);
1504