1 /* 2 * linux/fs/nfs/delegation.c 3 * 4 * Copyright (C) 2004 Trond Myklebust 5 * 6 * NFS file delegation management 7 * 8 */ 9 #include <linux/completion.h> 10 #include <linux/kthread.h> 11 #include <linux/module.h> 12 #include <linux/sched.h> 13 #include <linux/spinlock.h> 14 15 #include <linux/nfs4.h> 16 #include <linux/nfs_fs.h> 17 #include <linux/nfs_xdr.h> 18 19 #include "nfs4_fs.h" 20 #include "delegation.h" 21 22 static struct nfs_delegation *nfs_alloc_delegation(void) 23 { 24 return (struct nfs_delegation *)kmalloc(sizeof(struct nfs_delegation), GFP_KERNEL); 25 } 26 27 static void nfs_free_delegation(struct nfs_delegation *delegation) 28 { 29 if (delegation->cred) 30 put_rpccred(delegation->cred); 31 kfree(delegation); 32 } 33 34 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state) 35 { 36 struct inode *inode = state->inode; 37 struct file_lock *fl; 38 int status; 39 40 for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) { 41 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK))) 42 continue; 43 if ((struct nfs_open_context *)fl->fl_file->private_data != ctx) 44 continue; 45 status = nfs4_lock_delegation_recall(state, fl); 46 if (status >= 0) 47 continue; 48 switch (status) { 49 default: 50 printk(KERN_ERR "%s: unhandled error %d.\n", 51 __FUNCTION__, status); 52 case -NFS4ERR_EXPIRED: 53 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 54 case -NFS4ERR_STALE_CLIENTID: 55 nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs4_state); 56 goto out_err; 57 } 58 } 59 return 0; 60 out_err: 61 return status; 62 } 63 64 static void nfs_delegation_claim_opens(struct inode *inode) 65 { 66 struct nfs_inode *nfsi = NFS_I(inode); 67 struct nfs_open_context *ctx; 68 struct nfs4_state *state; 69 int err; 70 71 again: 72 spin_lock(&inode->i_lock); 73 list_for_each_entry(ctx, &nfsi->open_files, list) { 74 state = ctx->state; 75 if (state == NULL) 76 continue; 77 if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) 78 continue; 79 get_nfs_open_context(ctx); 80 spin_unlock(&inode->i_lock); 81 err = nfs4_open_delegation_recall(ctx->dentry, state); 82 if (err >= 0) 83 err = nfs_delegation_claim_locks(ctx, state); 84 put_nfs_open_context(ctx); 85 if (err != 0) 86 return; 87 goto again; 88 } 89 spin_unlock(&inode->i_lock); 90 } 91 92 /* 93 * Set up a delegation on an inode 94 */ 95 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res) 96 { 97 struct nfs_delegation *delegation = NFS_I(inode)->delegation; 98 99 if (delegation == NULL) 100 return; 101 memcpy(delegation->stateid.data, res->delegation.data, 102 sizeof(delegation->stateid.data)); 103 delegation->type = res->delegation_type; 104 delegation->maxsize = res->maxsize; 105 put_rpccred(cred); 106 delegation->cred = get_rpccred(cred); 107 delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM; 108 NFS_I(inode)->delegation_state = delegation->type; 109 smp_wmb(); 110 } 111 112 /* 113 * Set up a delegation on an inode 114 */ 115 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res) 116 { 117 struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state; 118 struct nfs_inode *nfsi = NFS_I(inode); 119 struct nfs_delegation *delegation; 120 int status = 0; 121 122 /* Ensure we first revalidate the attributes and page cache! */ 123 if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR))) 124 __nfs_revalidate_inode(NFS_SERVER(inode), inode); 125 126 delegation = nfs_alloc_delegation(); 127 if (delegation == NULL) 128 return -ENOMEM; 129 memcpy(delegation->stateid.data, res->delegation.data, 130 sizeof(delegation->stateid.data)); 131 delegation->type = res->delegation_type; 132 delegation->maxsize = res->maxsize; 133 delegation->change_attr = nfsi->change_attr; 134 delegation->cred = get_rpccred(cred); 135 delegation->inode = inode; 136 137 spin_lock(&clp->cl_lock); 138 if (nfsi->delegation == NULL) { 139 list_add(&delegation->super_list, &clp->cl_delegations); 140 nfsi->delegation = delegation; 141 nfsi->delegation_state = delegation->type; 142 delegation = NULL; 143 } else { 144 if (memcmp(&delegation->stateid, &nfsi->delegation->stateid, 145 sizeof(delegation->stateid)) != 0 || 146 delegation->type != nfsi->delegation->type) { 147 printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n", 148 __FUNCTION__, NIPQUAD(clp->cl_addr)); 149 status = -EIO; 150 } 151 } 152 spin_unlock(&clp->cl_lock); 153 kfree(delegation); 154 return status; 155 } 156 157 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation) 158 { 159 int res = 0; 160 161 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid); 162 nfs_free_delegation(delegation); 163 return res; 164 } 165 166 /* Sync all data to disk upon delegation return */ 167 static void nfs_msync_inode(struct inode *inode) 168 { 169 filemap_fdatawrite(inode->i_mapping); 170 nfs_wb_all(inode); 171 filemap_fdatawait(inode->i_mapping); 172 } 173 174 /* 175 * Basic procedure for returning a delegation to the server 176 */ 177 int __nfs_inode_return_delegation(struct inode *inode) 178 { 179 struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state; 180 struct nfs_inode *nfsi = NFS_I(inode); 181 struct nfs_delegation *delegation; 182 int res = 0; 183 184 nfs_msync_inode(inode); 185 down_read(&clp->cl_sem); 186 /* Guard against new delegated open calls */ 187 down_write(&nfsi->rwsem); 188 spin_lock(&clp->cl_lock); 189 delegation = nfsi->delegation; 190 if (delegation != NULL) { 191 list_del_init(&delegation->super_list); 192 nfsi->delegation = NULL; 193 nfsi->delegation_state = 0; 194 } 195 spin_unlock(&clp->cl_lock); 196 nfs_delegation_claim_opens(inode); 197 up_write(&nfsi->rwsem); 198 up_read(&clp->cl_sem); 199 nfs_msync_inode(inode); 200 201 if (delegation != NULL) 202 res = nfs_do_return_delegation(inode, delegation); 203 return res; 204 } 205 206 /* 207 * Return all delegations associated to a super block 208 */ 209 void nfs_return_all_delegations(struct super_block *sb) 210 { 211 struct nfs4_client *clp = NFS_SB(sb)->nfs4_state; 212 struct nfs_delegation *delegation; 213 struct inode *inode; 214 215 if (clp == NULL) 216 return; 217 restart: 218 spin_lock(&clp->cl_lock); 219 list_for_each_entry(delegation, &clp->cl_delegations, super_list) { 220 if (delegation->inode->i_sb != sb) 221 continue; 222 inode = igrab(delegation->inode); 223 if (inode == NULL) 224 continue; 225 spin_unlock(&clp->cl_lock); 226 nfs_inode_return_delegation(inode); 227 iput(inode); 228 goto restart; 229 } 230 spin_unlock(&clp->cl_lock); 231 } 232 233 int nfs_do_expire_all_delegations(void *ptr) 234 { 235 struct nfs4_client *clp = ptr; 236 struct nfs_delegation *delegation; 237 struct inode *inode; 238 239 allow_signal(SIGKILL); 240 restart: 241 spin_lock(&clp->cl_lock); 242 if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0) 243 goto out; 244 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) 245 goto out; 246 list_for_each_entry(delegation, &clp->cl_delegations, super_list) { 247 inode = igrab(delegation->inode); 248 if (inode == NULL) 249 continue; 250 spin_unlock(&clp->cl_lock); 251 nfs_inode_return_delegation(inode); 252 iput(inode); 253 goto restart; 254 } 255 out: 256 spin_unlock(&clp->cl_lock); 257 nfs4_put_client(clp); 258 module_put_and_exit(0); 259 } 260 261 void nfs_expire_all_delegations(struct nfs4_client *clp) 262 { 263 struct task_struct *task; 264 265 __module_get(THIS_MODULE); 266 atomic_inc(&clp->cl_count); 267 task = kthread_run(nfs_do_expire_all_delegations, clp, 268 "%u.%u.%u.%u-delegreturn", 269 NIPQUAD(clp->cl_addr)); 270 if (!IS_ERR(task)) 271 return; 272 nfs4_put_client(clp); 273 module_put(THIS_MODULE); 274 } 275 276 /* 277 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error. 278 */ 279 void nfs_handle_cb_pathdown(struct nfs4_client *clp) 280 { 281 struct nfs_delegation *delegation; 282 struct inode *inode; 283 284 if (clp == NULL) 285 return; 286 restart: 287 spin_lock(&clp->cl_lock); 288 list_for_each_entry(delegation, &clp->cl_delegations, super_list) { 289 inode = igrab(delegation->inode); 290 if (inode == NULL) 291 continue; 292 spin_unlock(&clp->cl_lock); 293 nfs_inode_return_delegation(inode); 294 iput(inode); 295 goto restart; 296 } 297 spin_unlock(&clp->cl_lock); 298 } 299 300 struct recall_threadargs { 301 struct inode *inode; 302 struct nfs4_client *clp; 303 const nfs4_stateid *stateid; 304 305 struct completion started; 306 int result; 307 }; 308 309 static int recall_thread(void *data) 310 { 311 struct recall_threadargs *args = (struct recall_threadargs *)data; 312 struct inode *inode = igrab(args->inode); 313 struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state; 314 struct nfs_inode *nfsi = NFS_I(inode); 315 struct nfs_delegation *delegation; 316 317 daemonize("nfsv4-delegreturn"); 318 319 nfs_msync_inode(inode); 320 down_read(&clp->cl_sem); 321 down_write(&nfsi->rwsem); 322 spin_lock(&clp->cl_lock); 323 delegation = nfsi->delegation; 324 if (delegation != NULL && memcmp(delegation->stateid.data, 325 args->stateid->data, 326 sizeof(delegation->stateid.data)) == 0) { 327 list_del_init(&delegation->super_list); 328 nfsi->delegation = NULL; 329 nfsi->delegation_state = 0; 330 args->result = 0; 331 } else { 332 delegation = NULL; 333 args->result = -ENOENT; 334 } 335 spin_unlock(&clp->cl_lock); 336 complete(&args->started); 337 nfs_delegation_claim_opens(inode); 338 up_write(&nfsi->rwsem); 339 up_read(&clp->cl_sem); 340 nfs_msync_inode(inode); 341 342 if (delegation != NULL) 343 nfs_do_return_delegation(inode, delegation); 344 iput(inode); 345 module_put_and_exit(0); 346 } 347 348 /* 349 * Asynchronous delegation recall! 350 */ 351 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid) 352 { 353 struct recall_threadargs data = { 354 .inode = inode, 355 .stateid = stateid, 356 }; 357 int status; 358 359 init_completion(&data.started); 360 __module_get(THIS_MODULE); 361 status = kernel_thread(recall_thread, &data, CLONE_KERNEL); 362 if (status < 0) 363 goto out_module_put; 364 wait_for_completion(&data.started); 365 return data.result; 366 out_module_put: 367 module_put(THIS_MODULE); 368 return status; 369 } 370 371 /* 372 * Retrieve the inode associated with a delegation 373 */ 374 struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle) 375 { 376 struct nfs_delegation *delegation; 377 struct inode *res = NULL; 378 spin_lock(&clp->cl_lock); 379 list_for_each_entry(delegation, &clp->cl_delegations, super_list) { 380 if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) { 381 res = igrab(delegation->inode); 382 break; 383 } 384 } 385 spin_unlock(&clp->cl_lock); 386 return res; 387 } 388 389 /* 390 * Mark all delegations as needing to be reclaimed 391 */ 392 void nfs_delegation_mark_reclaim(struct nfs4_client *clp) 393 { 394 struct nfs_delegation *delegation; 395 spin_lock(&clp->cl_lock); 396 list_for_each_entry(delegation, &clp->cl_delegations, super_list) 397 delegation->flags |= NFS_DELEGATION_NEED_RECLAIM; 398 spin_unlock(&clp->cl_lock); 399 } 400 401 /* 402 * Reap all unclaimed delegations after reboot recovery is done 403 */ 404 void nfs_delegation_reap_unclaimed(struct nfs4_client *clp) 405 { 406 struct nfs_delegation *delegation, *n; 407 LIST_HEAD(head); 408 spin_lock(&clp->cl_lock); 409 list_for_each_entry_safe(delegation, n, &clp->cl_delegations, super_list) { 410 if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0) 411 continue; 412 list_move(&delegation->super_list, &head); 413 NFS_I(delegation->inode)->delegation = NULL; 414 NFS_I(delegation->inode)->delegation_state = 0; 415 } 416 spin_unlock(&clp->cl_lock); 417 while(!list_empty(&head)) { 418 delegation = list_entry(head.next, struct nfs_delegation, super_list); 419 list_del(&delegation->super_list); 420 nfs_free_delegation(delegation); 421 } 422 } 423 424 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode) 425 { 426 struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state; 427 struct nfs_inode *nfsi = NFS_I(inode); 428 struct nfs_delegation *delegation; 429 int res = 0; 430 431 if (nfsi->delegation_state == 0) 432 return 0; 433 spin_lock(&clp->cl_lock); 434 delegation = nfsi->delegation; 435 if (delegation != NULL) { 436 memcpy(dst->data, delegation->stateid.data, sizeof(dst->data)); 437 res = 1; 438 } 439 spin_unlock(&clp->cl_lock); 440 return res; 441 } 442