1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2014 Anna Schumaker <Anna.Schumaker@Netapp.com> 4 */ 5 #include <linux/fs.h> 6 #include <linux/sunrpc/addr.h> 7 #include <linux/sunrpc/sched.h> 8 #include <linux/nfs.h> 9 #include <linux/nfs3.h> 10 #include <linux/nfs4.h> 11 #include <linux/nfs_xdr.h> 12 #include <linux/nfs_fs.h> 13 #include "nfs4_fs.h" 14 #include "nfs42.h" 15 #include "iostat.h" 16 #include "pnfs.h" 17 #include "nfs4session.h" 18 #include "internal.h" 19 #include "delegation.h" 20 #include "nfs4trace.h" 21 22 #define NFSDBG_FACILITY NFSDBG_PROC 23 static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std); 24 static int nfs42_proc_offload_status(struct file *file, nfs4_stateid *stateid, 25 u64 *copied); 26 27 static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr) 28 { 29 struct nfs_client *clp = (NFS_SERVER(file_inode(filep)))->nfs_client; 30 unsigned short port = 2049; 31 32 rcu_read_lock(); 33 naddr->netid_len = scnprintf(naddr->netid, 34 sizeof(naddr->netid), "%s", 35 rpc_peeraddr2str(clp->cl_rpcclient, 36 RPC_DISPLAY_NETID)); 37 naddr->addr_len = scnprintf(naddr->addr, 38 sizeof(naddr->addr), 39 "%s.%u.%u", 40 rpc_peeraddr2str(clp->cl_rpcclient, 41 RPC_DISPLAY_ADDR), 42 port >> 8, port & 255); 43 rcu_read_unlock(); 44 } 45 46 static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 47 struct nfs_lock_context *lock, loff_t offset, loff_t len) 48 { 49 struct inode *inode = file_inode(filep); 50 struct nfs_server *server = NFS_SERVER(inode); 51 u32 bitmask[NFS_BITMASK_SZ]; 52 struct nfs42_falloc_args args = { 53 .falloc_fh = NFS_FH(inode), 54 .falloc_offset = offset, 55 .falloc_length = len, 56 .falloc_bitmask = bitmask, 57 }; 58 struct nfs42_falloc_res res = { 59 .falloc_server = server, 60 }; 61 int status; 62 63 msg->rpc_argp = &args; 64 msg->rpc_resp = &res; 65 66 status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context, 67 lock, FMODE_WRITE); 68 if (status) { 69 if (status == -EAGAIN) 70 status = -NFS4ERR_BAD_STATEID; 71 return status; 72 } 73 74 nfs4_bitmask_set(bitmask, server->cache_consistency_bitmask, inode, 75 NFS_INO_INVALID_BLOCKS); 76 77 res.falloc_fattr = nfs_alloc_fattr(); 78 if (!res.falloc_fattr) 79 return -ENOMEM; 80 81 status = nfs4_call_sync(server->client, server, msg, 82 &args.seq_args, &res.seq_res, 0); 83 if (status == 0) { 84 if (nfs_should_remove_suid(inode)) { 85 spin_lock(&inode->i_lock); 86 nfs_set_cache_invalid(inode, 87 NFS_INO_REVAL_FORCED | NFS_INO_INVALID_MODE); 88 spin_unlock(&inode->i_lock); 89 } 90 status = nfs_post_op_update_inode_force_wcc(inode, 91 res.falloc_fattr); 92 } 93 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE]) 94 trace_nfs4_fallocate(inode, &args, status); 95 else 96 trace_nfs4_deallocate(inode, &args, status); 97 kfree(res.falloc_fattr); 98 return status; 99 } 100 101 static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 102 loff_t offset, loff_t len) 103 { 104 struct inode *inode = file_inode(filep); 105 struct nfs_server *server = NFS_SERVER(inode); 106 struct nfs4_exception exception = { }; 107 struct nfs_lock_context *lock; 108 int err; 109 110 lock = nfs_get_lock_context(nfs_file_open_context(filep)); 111 if (IS_ERR(lock)) 112 return PTR_ERR(lock); 113 114 exception.inode = inode; 115 exception.state = lock->open_context->state; 116 117 nfs_file_block_o_direct(NFS_I(inode)); 118 err = nfs_sync_inode(inode); 119 if (err) 120 goto out; 121 122 do { 123 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len); 124 if (err == -ENOTSUPP) { 125 err = -EOPNOTSUPP; 126 break; 127 } 128 err = nfs4_handle_exception(server, err, &exception); 129 } while (exception.retry); 130 out: 131 nfs_put_lock_context(lock); 132 return err; 133 } 134 135 int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len) 136 { 137 struct rpc_message msg = { 138 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE], 139 }; 140 struct inode *inode = file_inode(filep); 141 loff_t oldsize = i_size_read(inode); 142 int err; 143 144 if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE)) 145 return -EOPNOTSUPP; 146 147 inode_lock(inode); 148 149 err = nfs42_proc_fallocate(&msg, filep, offset, len); 150 151 if (err == 0) 152 nfs_truncate_last_folio(inode->i_mapping, oldsize, 153 offset + len); 154 else if (err == -EOPNOTSUPP) 155 NFS_SERVER(inode)->caps &= ~(NFS_CAP_ALLOCATE | 156 NFS_CAP_ZERO_RANGE); 157 158 inode_unlock(inode); 159 return err; 160 } 161 162 int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len) 163 { 164 struct rpc_message msg = { 165 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE], 166 }; 167 struct inode *inode = file_inode(filep); 168 int err; 169 170 if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE)) 171 return -EOPNOTSUPP; 172 173 inode_lock(inode); 174 175 err = nfs42_proc_fallocate(&msg, filep, offset, len); 176 if (err == 0) 177 truncate_pagecache_range(inode, offset, (offset + len) -1); 178 if (err == -EOPNOTSUPP) 179 NFS_SERVER(inode)->caps &= ~(NFS_CAP_DEALLOCATE | 180 NFS_CAP_ZERO_RANGE); 181 182 inode_unlock(inode); 183 return err; 184 } 185 186 int nfs42_proc_zero_range(struct file *filep, loff_t offset, loff_t len) 187 { 188 struct rpc_message msg = { 189 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ZERO_RANGE], 190 }; 191 struct inode *inode = file_inode(filep); 192 loff_t oldsize = i_size_read(inode); 193 int err; 194 195 if (!nfs_server_capable(inode, NFS_CAP_ZERO_RANGE)) 196 return -EOPNOTSUPP; 197 198 inode_lock(inode); 199 200 err = nfs42_proc_fallocate(&msg, filep, offset, len); 201 if (err == 0) { 202 nfs_truncate_last_folio(inode->i_mapping, oldsize, 203 offset + len); 204 truncate_pagecache_range(inode, offset, (offset + len) -1); 205 } else if (err == -EOPNOTSUPP) 206 NFS_SERVER(inode)->caps &= ~NFS_CAP_ZERO_RANGE; 207 208 inode_unlock(inode); 209 return err; 210 } 211 212 static void nfs4_copy_dequeue_callback(struct nfs_server *dst_server, 213 struct nfs_server *src_server, 214 struct nfs4_copy_state *copy) 215 { 216 spin_lock(&dst_server->nfs_client->cl_lock); 217 list_del_init(©->copies); 218 spin_unlock(&dst_server->nfs_client->cl_lock); 219 if (dst_server != src_server) { 220 spin_lock(&src_server->nfs_client->cl_lock); 221 list_del_init(©->src_copies); 222 spin_unlock(&src_server->nfs_client->cl_lock); 223 } 224 } 225 226 static int handle_async_copy(struct nfs42_copy_res *res, 227 struct nfs_server *dst_server, 228 struct nfs_server *src_server, 229 struct file *src, 230 struct file *dst, 231 nfs4_stateid *src_stateid, 232 bool *restart) 233 { 234 struct nfs4_copy_state *copy, *tmp_copy = NULL, *iter; 235 struct nfs_open_context *dst_ctx = nfs_file_open_context(dst); 236 struct nfs_open_context *src_ctx = nfs_file_open_context(src); 237 struct nfs_client *clp = dst_server->nfs_client; 238 unsigned long timeout = 3 * HZ; 239 int status = NFS4_OK; 240 u64 copied; 241 242 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL); 243 if (!copy) 244 return -ENOMEM; 245 246 spin_lock(&dst_server->nfs_client->cl_lock); 247 list_for_each_entry(iter, 248 &dst_server->nfs_client->pending_cb_stateids, 249 copies) { 250 if (memcmp(&res->write_res.stateid, &iter->stateid, 251 NFS4_STATEID_SIZE)) 252 continue; 253 tmp_copy = iter; 254 list_del(&iter->copies); 255 break; 256 } 257 if (tmp_copy) { 258 spin_unlock(&dst_server->nfs_client->cl_lock); 259 kfree(copy); 260 copy = tmp_copy; 261 goto out; 262 } 263 264 memcpy(©->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE); 265 init_completion(©->completion); 266 copy->parent_dst_state = dst_ctx->state; 267 copy->parent_src_state = src_ctx->state; 268 269 list_add_tail(©->copies, &dst_server->ss_copies); 270 spin_unlock(&dst_server->nfs_client->cl_lock); 271 272 if (dst_server != src_server) { 273 spin_lock(&src_server->nfs_client->cl_lock); 274 list_add_tail(©->src_copies, &src_server->ss_src_copies); 275 spin_unlock(&src_server->nfs_client->cl_lock); 276 } 277 278 wait: 279 status = wait_for_completion_interruptible_timeout(©->completion, 280 timeout); 281 if (!status) 282 goto timeout; 283 nfs4_copy_dequeue_callback(dst_server, src_server, copy); 284 if (status == -ERESTARTSYS) { 285 goto out_cancel; 286 } else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) { 287 status = -EAGAIN; 288 *restart = true; 289 goto out_cancel; 290 } 291 out: 292 res->write_res.count = copy->count; 293 /* Copy out the updated write verifier provided by CB_OFFLOAD. */ 294 memcpy(&res->write_res.verifier, ©->verf, sizeof(copy->verf)); 295 status = -copy->error; 296 297 out_free: 298 kfree(copy); 299 return status; 300 out_cancel: 301 nfs42_do_offload_cancel_async(dst, ©->stateid); 302 if (!nfs42_files_from_same_server(src, dst)) 303 nfs42_do_offload_cancel_async(src, src_stateid); 304 goto out_free; 305 timeout: 306 timeout <<= 1; 307 if (timeout > (clp->cl_lease_time >> 1)) 308 timeout = clp->cl_lease_time >> 1; 309 status = nfs42_proc_offload_status(dst, ©->stateid, &copied); 310 if (status == -EINPROGRESS) 311 goto wait; 312 nfs4_copy_dequeue_callback(dst_server, src_server, copy); 313 switch (status) { 314 case 0: 315 /* The server recognized the copy stateid, so it hasn't 316 * rebooted. Don't overwrite the verifier returned in the 317 * COPY result. */ 318 res->write_res.count = copied; 319 goto out_free; 320 case -EREMOTEIO: 321 /* COPY operation failed on the server. */ 322 status = -EOPNOTSUPP; 323 res->write_res.count = copied; 324 goto out_free; 325 case -EBADF: 326 /* Server did not recognize the copy stateid. It has 327 * probably restarted and lost the plot. */ 328 res->write_res.count = 0; 329 status = -EOPNOTSUPP; 330 break; 331 case -EOPNOTSUPP: 332 /* RFC 7862 REQUIREs server to support OFFLOAD_STATUS when 333 * it has signed up for an async COPY, so server is not 334 * spec-compliant. */ 335 res->write_res.count = 0; 336 } 337 goto out_free; 338 } 339 340 static int process_copy_commit(struct file *dst, loff_t pos_dst, 341 struct nfs42_copy_res *res) 342 { 343 struct nfs_commitres cres; 344 int status = -ENOMEM; 345 346 cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_KERNEL); 347 if (!cres.verf) 348 goto out; 349 350 status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres); 351 if (status) 352 goto out_free; 353 if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 354 &cres.verf->verifier)) { 355 dprintk("commit verf differs from copy verf\n"); 356 status = -EAGAIN; 357 } 358 out_free: 359 kfree(cres.verf); 360 out: 361 return status; 362 } 363 364 /** 365 * nfs42_copy_dest_done - perform inode cache updates after clone/copy offload 366 * @file: pointer to destination file 367 * @pos: destination offset 368 * @len: copy length 369 * @oldsize: length of the file prior to clone/copy 370 * 371 * Punch a hole in the inode page cache, so that the NFS client will 372 * know to retrieve new data. 373 * Update the file size if necessary, and then mark the inode as having 374 * invalid cached values for change attribute, ctime, mtime and space used. 375 */ 376 static void nfs42_copy_dest_done(struct file *file, loff_t pos, loff_t len, 377 loff_t oldsize) 378 { 379 struct inode *inode = file_inode(file); 380 struct address_space *mapping = file->f_mapping; 381 loff_t newsize = pos + len; 382 loff_t end = newsize - 1; 383 384 nfs_truncate_last_folio(mapping, oldsize, pos); 385 WARN_ON_ONCE(invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, 386 end >> PAGE_SHIFT)); 387 388 spin_lock(&inode->i_lock); 389 if (newsize > i_size_read(inode)) 390 i_size_write(inode, newsize); 391 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 392 NFS_INO_INVALID_CTIME | 393 NFS_INO_INVALID_MTIME | 394 NFS_INO_INVALID_BLOCKS); 395 spin_unlock(&inode->i_lock); 396 } 397 398 static ssize_t _nfs42_proc_copy(struct file *src, 399 struct nfs_lock_context *src_lock, 400 struct file *dst, 401 struct nfs_lock_context *dst_lock, 402 struct nfs42_copy_args *args, 403 struct nfs42_copy_res *res, 404 struct nl4_server *nss, 405 nfs4_stateid *cnr_stateid, 406 bool *restart) 407 { 408 struct rpc_message msg = { 409 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY], 410 .rpc_argp = args, 411 .rpc_resp = res, 412 }; 413 struct inode *dst_inode = file_inode(dst); 414 struct inode *src_inode = file_inode(src); 415 struct nfs_server *dst_server = NFS_SERVER(dst_inode); 416 struct nfs_server *src_server = NFS_SERVER(src_inode); 417 loff_t pos_src = args->src_pos; 418 loff_t pos_dst = args->dst_pos; 419 loff_t oldsize_dst = i_size_read(dst_inode); 420 size_t count = args->count; 421 ssize_t status; 422 423 if (nss) { 424 args->cp_src = nss; 425 nfs4_stateid_copy(&args->src_stateid, cnr_stateid); 426 } else { 427 status = nfs4_set_rw_stateid(&args->src_stateid, 428 src_lock->open_context, src_lock, FMODE_READ); 429 if (status) { 430 if (status == -EAGAIN) 431 status = -NFS4ERR_BAD_STATEID; 432 return status; 433 } 434 } 435 status = nfs_filemap_write_and_wait_range(src->f_mapping, 436 pos_src, pos_src + (loff_t)count - 1); 437 if (status) 438 return status; 439 440 status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context, 441 dst_lock, FMODE_WRITE); 442 if (status) { 443 if (status == -EAGAIN) 444 status = -NFS4ERR_BAD_STATEID; 445 return status; 446 } 447 448 nfs_file_block_o_direct(NFS_I(dst_inode)); 449 status = nfs_sync_inode(dst_inode); 450 if (status) 451 return status; 452 453 res->commit_res.verf = NULL; 454 if (args->sync) { 455 res->commit_res.verf = 456 kzalloc(sizeof(struct nfs_writeverf), GFP_KERNEL); 457 if (!res->commit_res.verf) 458 return -ENOMEM; 459 } 460 set_bit(NFS_CLNT_SRC_SSC_COPY_STATE, 461 &src_lock->open_context->state->flags); 462 set_bit(NFS_CLNT_DST_SSC_COPY_STATE, 463 &dst_lock->open_context->state->flags); 464 465 status = nfs4_call_sync(dst_server->client, dst_server, &msg, 466 &args->seq_args, &res->seq_res, 0); 467 trace_nfs4_copy(src_inode, dst_inode, args, res, nss, status); 468 if (status == -ENOTSUPP) 469 dst_server->caps &= ~NFS_CAP_COPY; 470 if (status) 471 goto out; 472 473 if (args->sync && 474 nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 475 &res->commit_res.verf->verifier)) { 476 status = -EAGAIN; 477 goto out; 478 } 479 480 if (!res->synchronous) { 481 status = handle_async_copy(res, dst_server, src_server, src, 482 dst, &args->src_stateid, restart); 483 if (status) 484 goto out; 485 } 486 487 if ((!res->synchronous || !args->sync) && 488 res->write_res.verifier.committed != NFS_FILE_SYNC) { 489 status = process_copy_commit(dst, pos_dst, res); 490 if (status) 491 goto out; 492 } 493 494 nfs42_copy_dest_done(dst, pos_dst, res->write_res.count, oldsize_dst); 495 nfs_invalidate_atime(src_inode); 496 status = res->write_res.count; 497 out: 498 if (args->sync) 499 kfree(res->commit_res.verf); 500 return status; 501 } 502 503 ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, 504 struct file *dst, loff_t pos_dst, size_t count, 505 struct nl4_server *nss, 506 nfs4_stateid *cnr_stateid, bool sync) 507 { 508 struct nfs_server *server = NFS_SERVER(file_inode(dst)); 509 struct nfs_lock_context *src_lock; 510 struct nfs_lock_context *dst_lock; 511 struct nfs42_copy_args args = { 512 .src_fh = NFS_FH(file_inode(src)), 513 .src_pos = pos_src, 514 .dst_fh = NFS_FH(file_inode(dst)), 515 .dst_pos = pos_dst, 516 .count = count, 517 .sync = sync, 518 }; 519 struct nfs42_copy_res res; 520 struct nfs4_exception src_exception = { 521 .inode = file_inode(src), 522 .stateid = &args.src_stateid, 523 }; 524 struct nfs4_exception dst_exception = { 525 .inode = file_inode(dst), 526 .stateid = &args.dst_stateid, 527 }; 528 ssize_t err, err2; 529 bool restart = false; 530 531 src_lock = nfs_get_lock_context(nfs_file_open_context(src)); 532 if (IS_ERR(src_lock)) 533 return PTR_ERR(src_lock); 534 535 src_exception.state = src_lock->open_context->state; 536 537 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst)); 538 if (IS_ERR(dst_lock)) { 539 err = PTR_ERR(dst_lock); 540 goto out_put_src_lock; 541 } 542 543 dst_exception.state = dst_lock->open_context->state; 544 545 do { 546 inode_lock(file_inode(dst)); 547 err = _nfs42_proc_copy(src, src_lock, 548 dst, dst_lock, 549 &args, &res, 550 nss, cnr_stateid, &restart); 551 inode_unlock(file_inode(dst)); 552 553 if (err >= 0) 554 break; 555 if ((err == -ENOTSUPP || 556 err == -NFS4ERR_OFFLOAD_DENIED) && 557 nfs42_files_from_same_server(src, dst)) { 558 err = -EOPNOTSUPP; 559 break; 560 } else if (err == -EAGAIN) { 561 if (!restart) { 562 dst_exception.retry = 1; 563 continue; 564 } 565 break; 566 } else if (err == -NFS4ERR_OFFLOAD_NO_REQS && 567 args.sync != res.synchronous) { 568 args.sync = res.synchronous; 569 dst_exception.retry = 1; 570 continue; 571 } else if ((err == -ESTALE || 572 err == -NFS4ERR_OFFLOAD_DENIED || 573 err == -ENOTSUPP) && 574 !nfs42_files_from_same_server(src, dst)) { 575 nfs42_do_offload_cancel_async(src, &args.src_stateid); 576 err = -EOPNOTSUPP; 577 break; 578 } 579 580 err2 = nfs4_handle_exception(server, err, &src_exception); 581 err = nfs4_handle_exception(server, err, &dst_exception); 582 if (!err) 583 err = err2; 584 } while (src_exception.retry || dst_exception.retry); 585 586 nfs_put_lock_context(dst_lock); 587 out_put_src_lock: 588 nfs_put_lock_context(src_lock); 589 return err; 590 } 591 592 struct nfs42_offload_data { 593 struct nfs_server *seq_server; 594 struct nfs42_offload_status_args args; 595 struct nfs42_offload_status_res res; 596 }; 597 598 static void nfs42_offload_prepare(struct rpc_task *task, void *calldata) 599 { 600 struct nfs42_offload_data *data = calldata; 601 602 nfs4_setup_sequence(data->seq_server->nfs_client, 603 &data->args.osa_seq_args, 604 &data->res.osr_seq_res, task); 605 } 606 607 static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata) 608 { 609 struct nfs42_offload_data *data = calldata; 610 611 trace_nfs4_offload_cancel(&data->args, task->tk_status); 612 nfs41_sequence_done(task, &data->res.osr_seq_res); 613 if (task->tk_status && 614 nfs4_async_handle_error(task, data->seq_server, NULL, 615 NULL) == -EAGAIN) 616 rpc_restart_call_prepare(task); 617 } 618 619 static void nfs42_offload_release(void *data) 620 { 621 kfree(data); 622 } 623 624 static const struct rpc_call_ops nfs42_offload_cancel_ops = { 625 .rpc_call_prepare = nfs42_offload_prepare, 626 .rpc_call_done = nfs42_offload_cancel_done, 627 .rpc_release = nfs42_offload_release, 628 }; 629 630 static int nfs42_do_offload_cancel_async(struct file *dst, 631 nfs4_stateid *stateid) 632 { 633 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 634 struct nfs42_offload_data *data = NULL; 635 struct nfs_open_context *ctx = nfs_file_open_context(dst); 636 struct rpc_task *task; 637 struct rpc_message msg = { 638 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_CANCEL], 639 .rpc_cred = ctx->cred, 640 }; 641 struct rpc_task_setup task_setup_data = { 642 .rpc_client = dst_server->client, 643 .rpc_message = &msg, 644 .callback_ops = &nfs42_offload_cancel_ops, 645 .workqueue = nfsiod_workqueue, 646 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, 647 }; 648 int status; 649 650 if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL)) 651 return -EOPNOTSUPP; 652 653 data = kzalloc(sizeof(struct nfs42_offload_data), GFP_KERNEL); 654 if (data == NULL) 655 return -ENOMEM; 656 657 data->seq_server = dst_server; 658 data->args.osa_src_fh = NFS_FH(file_inode(dst)); 659 memcpy(&data->args.osa_stateid, stateid, 660 sizeof(data->args.osa_stateid)); 661 msg.rpc_argp = &data->args; 662 msg.rpc_resp = &data->res; 663 task_setup_data.callback_data = data; 664 nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res, 665 1, 0); 666 task = rpc_run_task(&task_setup_data); 667 if (IS_ERR(task)) 668 return PTR_ERR(task); 669 status = rpc_wait_for_completion_task(task); 670 if (status == -ENOTSUPP) 671 dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL; 672 rpc_put_task(task); 673 return status; 674 } 675 676 static int 677 _nfs42_proc_offload_status(struct nfs_server *server, struct file *file, 678 struct nfs42_offload_data *data) 679 { 680 struct nfs_open_context *ctx = nfs_file_open_context(file); 681 struct rpc_message msg = { 682 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_STATUS], 683 .rpc_argp = &data->args, 684 .rpc_resp = &data->res, 685 .rpc_cred = ctx->cred, 686 }; 687 int status; 688 689 status = nfs4_call_sync(server->client, server, &msg, 690 &data->args.osa_seq_args, 691 &data->res.osr_seq_res, 1); 692 trace_nfs4_offload_status(&data->args, status); 693 switch (status) { 694 case 0: 695 break; 696 697 case -NFS4ERR_ADMIN_REVOKED: 698 case -NFS4ERR_BAD_STATEID: 699 case -NFS4ERR_OLD_STATEID: 700 /* 701 * Server does not recognize the COPY stateid. CB_OFFLOAD 702 * could have purged it, or server might have rebooted. 703 * Since COPY stateids don't have an associated inode, 704 * avoid triggering state recovery. 705 */ 706 status = -EBADF; 707 break; 708 case -NFS4ERR_NOTSUPP: 709 case -ENOTSUPP: 710 case -EOPNOTSUPP: 711 server->caps &= ~NFS_CAP_OFFLOAD_STATUS; 712 status = -EOPNOTSUPP; 713 break; 714 } 715 716 return status; 717 } 718 719 /** 720 * nfs42_proc_offload_status - Poll completion status of an async copy operation 721 * @dst: handle of file being copied into 722 * @stateid: copy stateid (from async COPY result) 723 * @copied: OUT: number of bytes copied so far 724 * 725 * Return values: 726 * %0: Server returned an NFS4_OK completion status 727 * %-EINPROGRESS: Server returned no completion status 728 * %-EREMOTEIO: Server returned an error completion status 729 * %-EBADF: Server did not recognize the copy stateid 730 * %-EOPNOTSUPP: Server does not support OFFLOAD_STATUS 731 * %-ERESTARTSYS: Wait interrupted by signal 732 * 733 * Other negative errnos indicate the client could not complete the 734 * request. 735 */ 736 static int 737 nfs42_proc_offload_status(struct file *dst, nfs4_stateid *stateid, u64 *copied) 738 { 739 struct inode *inode = file_inode(dst); 740 struct nfs_server *server = NFS_SERVER(inode); 741 struct nfs4_exception exception = { 742 .inode = inode, 743 }; 744 struct nfs42_offload_data *data; 745 int status; 746 747 if (!(server->caps & NFS_CAP_OFFLOAD_STATUS)) 748 return -EOPNOTSUPP; 749 750 data = kzalloc(sizeof(*data), GFP_KERNEL); 751 if (!data) 752 return -ENOMEM; 753 data->seq_server = server; 754 data->args.osa_src_fh = NFS_FH(inode); 755 memcpy(&data->args.osa_stateid, stateid, 756 sizeof(data->args.osa_stateid)); 757 exception.stateid = &data->args.osa_stateid; 758 do { 759 status = _nfs42_proc_offload_status(server, dst, data); 760 if (status == -EOPNOTSUPP) 761 goto out; 762 status = nfs4_handle_exception(server, status, &exception); 763 } while (exception.retry); 764 if (status) 765 goto out; 766 767 *copied = data->res.osr_count; 768 if (!data->res.complete_count) 769 status = -EINPROGRESS; 770 else if (data->res.osr_complete != NFS_OK) 771 status = -EREMOTEIO; 772 773 out: 774 kfree(data); 775 return status; 776 } 777 778 static int _nfs42_proc_copy_notify(struct file *src, struct file *dst, 779 struct nfs42_copy_notify_args *args, 780 struct nfs42_copy_notify_res *res) 781 { 782 struct nfs_server *src_server = NFS_SERVER(file_inode(src)); 783 struct rpc_message msg = { 784 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY_NOTIFY], 785 .rpc_argp = args, 786 .rpc_resp = res, 787 }; 788 int status; 789 struct nfs_open_context *ctx; 790 struct nfs_lock_context *l_ctx; 791 792 ctx = get_nfs_open_context(nfs_file_open_context(src)); 793 l_ctx = nfs_get_lock_context(ctx); 794 if (IS_ERR(l_ctx)) { 795 status = PTR_ERR(l_ctx); 796 goto out; 797 } 798 799 status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx, 800 FMODE_READ); 801 nfs_put_lock_context(l_ctx); 802 if (status) { 803 if (status == -EAGAIN) 804 status = -NFS4ERR_BAD_STATEID; 805 goto out; 806 } 807 808 status = nfs4_call_sync(src_server->client, src_server, &msg, 809 &args->cna_seq_args, &res->cnr_seq_res, 0); 810 trace_nfs4_copy_notify(file_inode(src), args, res, status); 811 if (status == -ENOTSUPP) 812 src_server->caps &= ~NFS_CAP_COPY_NOTIFY; 813 814 out: 815 put_nfs_open_context(nfs_file_open_context(src)); 816 return status; 817 } 818 819 int nfs42_proc_copy_notify(struct file *src, struct file *dst, 820 struct nfs42_copy_notify_res *res) 821 { 822 struct nfs_server *src_server = NFS_SERVER(file_inode(src)); 823 struct nfs42_copy_notify_args *args; 824 struct nfs4_exception exception = { 825 .inode = file_inode(src), 826 }; 827 int status; 828 829 if (!(src_server->caps & NFS_CAP_COPY_NOTIFY)) 830 return -EOPNOTSUPP; 831 832 args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_KERNEL); 833 if (args == NULL) 834 return -ENOMEM; 835 836 args->cna_src_fh = NFS_FH(file_inode(src)), 837 args->cna_dst.nl4_type = NL4_NETADDR; 838 nfs42_set_netaddr(dst, &args->cna_dst.u.nl4_addr); 839 exception.stateid = &args->cna_src_stateid; 840 841 do { 842 status = _nfs42_proc_copy_notify(src, dst, args, res); 843 if (status == -ENOTSUPP) { 844 status = -EOPNOTSUPP; 845 goto out; 846 } 847 status = nfs4_handle_exception(src_server, status, &exception); 848 } while (exception.retry); 849 850 out: 851 kfree(args); 852 return status; 853 } 854 855 static loff_t _nfs42_proc_llseek(struct file *filep, 856 struct nfs_lock_context *lock, loff_t offset, int whence) 857 { 858 struct inode *inode = file_inode(filep); 859 struct nfs42_seek_args args = { 860 .sa_fh = NFS_FH(inode), 861 .sa_offset = offset, 862 .sa_what = (whence == SEEK_HOLE) ? 863 NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA, 864 }; 865 struct nfs42_seek_res res; 866 struct rpc_message msg = { 867 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK], 868 .rpc_argp = &args, 869 .rpc_resp = &res, 870 }; 871 struct nfs_server *server = NFS_SERVER(inode); 872 int status; 873 874 if (!nfs_server_capable(inode, NFS_CAP_SEEK)) 875 return -ENOTSUPP; 876 877 status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context, 878 lock, FMODE_READ); 879 if (status) { 880 if (status == -EAGAIN) 881 status = -NFS4ERR_BAD_STATEID; 882 return status; 883 } 884 885 status = nfs_filemap_write_and_wait_range(inode->i_mapping, 886 offset, LLONG_MAX); 887 if (status) 888 return status; 889 890 status = nfs4_call_sync(server->client, server, &msg, 891 &args.seq_args, &res.seq_res, 0); 892 trace_nfs4_llseek(inode, &args, &res, status); 893 if (status == -ENOTSUPP) 894 server->caps &= ~NFS_CAP_SEEK; 895 if (status) 896 return status; 897 898 if (whence == SEEK_DATA && res.sr_eof) 899 return -NFS4ERR_NXIO; 900 else 901 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); 902 } 903 904 loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) 905 { 906 struct nfs_server *server = NFS_SERVER(file_inode(filep)); 907 struct nfs4_exception exception = { }; 908 struct nfs_lock_context *lock; 909 loff_t err; 910 911 lock = nfs_get_lock_context(nfs_file_open_context(filep)); 912 if (IS_ERR(lock)) 913 return PTR_ERR(lock); 914 915 exception.inode = file_inode(filep); 916 exception.state = lock->open_context->state; 917 918 do { 919 err = _nfs42_proc_llseek(filep, lock, offset, whence); 920 if (err >= 0) 921 break; 922 if (err == -ENOTSUPP) { 923 err = -EOPNOTSUPP; 924 break; 925 } 926 err = nfs4_handle_exception(server, err, &exception); 927 } while (exception.retry); 928 929 nfs_put_lock_context(lock); 930 return err; 931 } 932 933 934 static void 935 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) 936 { 937 struct nfs42_layoutstat_data *data = calldata; 938 struct inode *inode = data->inode; 939 struct nfs_server *server = NFS_SERVER(inode); 940 struct pnfs_layout_hdr *lo; 941 942 spin_lock(&inode->i_lock); 943 lo = NFS_I(inode)->layout; 944 if (!pnfs_layout_is_valid(lo)) { 945 spin_unlock(&inode->i_lock); 946 rpc_exit(task, 0); 947 return; 948 } 949 nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid); 950 spin_unlock(&inode->i_lock); 951 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 952 &data->res.seq_res, task); 953 } 954 955 static void 956 nfs42_layoutstat_done(struct rpc_task *task, void *calldata) 957 { 958 struct nfs42_layoutstat_data *data = calldata; 959 struct inode *inode = data->inode; 960 struct pnfs_layout_hdr *lo; 961 962 if (!nfs4_sequence_done(task, &data->res.seq_res)) 963 return; 964 965 switch (task->tk_status) { 966 case 0: 967 return; 968 case -NFS4ERR_BADHANDLE: 969 case -ESTALE: 970 pnfs_destroy_layout(NFS_I(inode)); 971 break; 972 case -NFS4ERR_EXPIRED: 973 case -NFS4ERR_ADMIN_REVOKED: 974 case -NFS4ERR_DELEG_REVOKED: 975 case -NFS4ERR_STALE_STATEID: 976 case -NFS4ERR_BAD_STATEID: 977 spin_lock(&inode->i_lock); 978 lo = NFS_I(inode)->layout; 979 if (pnfs_layout_is_valid(lo) && 980 nfs4_stateid_match(&data->args.stateid, 981 &lo->plh_stateid)) { 982 LIST_HEAD(head); 983 984 /* 985 * Mark the bad layout state as invalid, then retry 986 * with the current stateid. 987 */ 988 pnfs_mark_layout_stateid_invalid(lo, &head); 989 spin_unlock(&inode->i_lock); 990 pnfs_free_lseg_list(&head); 991 nfs_commit_inode(inode, 0); 992 } else 993 spin_unlock(&inode->i_lock); 994 break; 995 case -NFS4ERR_OLD_STATEID: 996 spin_lock(&inode->i_lock); 997 lo = NFS_I(inode)->layout; 998 if (pnfs_layout_is_valid(lo) && 999 nfs4_stateid_match_other(&data->args.stateid, 1000 &lo->plh_stateid)) { 1001 /* Do we need to delay before resending? */ 1002 if (!nfs4_stateid_is_newer(&lo->plh_stateid, 1003 &data->args.stateid)) 1004 rpc_delay(task, HZ); 1005 rpc_restart_call_prepare(task); 1006 } 1007 spin_unlock(&inode->i_lock); 1008 break; 1009 case -ENOTSUPP: 1010 case -EOPNOTSUPP: 1011 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS; 1012 } 1013 1014 trace_nfs4_layoutstats(inode, &data->args.stateid, task->tk_status); 1015 } 1016 1017 static void 1018 nfs42_layoutstat_release(void *calldata) 1019 { 1020 struct nfs42_layoutstat_data *data = calldata; 1021 struct nfs42_layoutstat_devinfo *devinfo = data->args.devinfo; 1022 int i; 1023 1024 for (i = 0; i < data->args.num_dev; i++) { 1025 if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free) 1026 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private); 1027 } 1028 1029 pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout); 1030 smp_mb__before_atomic(); 1031 clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags); 1032 smp_mb__after_atomic(); 1033 nfs_iput_and_deactive(data->inode); 1034 kfree(data->args.devinfo); 1035 kfree(data); 1036 } 1037 1038 static const struct rpc_call_ops nfs42_layoutstat_ops = { 1039 .rpc_call_prepare = nfs42_layoutstat_prepare, 1040 .rpc_call_done = nfs42_layoutstat_done, 1041 .rpc_release = nfs42_layoutstat_release, 1042 }; 1043 1044 int nfs42_proc_layoutstats_generic(struct nfs_server *server, 1045 struct nfs42_layoutstat_data *data) 1046 { 1047 struct rpc_message msg = { 1048 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS], 1049 .rpc_argp = &data->args, 1050 .rpc_resp = &data->res, 1051 }; 1052 struct rpc_task_setup task_setup = { 1053 .rpc_client = server->client, 1054 .rpc_message = &msg, 1055 .callback_ops = &nfs42_layoutstat_ops, 1056 .callback_data = data, 1057 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, 1058 }; 1059 struct rpc_task *task; 1060 1061 data->inode = nfs_igrab_and_active(data->args.inode); 1062 if (!data->inode) { 1063 nfs42_layoutstat_release(data); 1064 return -EAGAIN; 1065 } 1066 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 1067 task = rpc_run_task(&task_setup); 1068 if (IS_ERR(task)) 1069 return PTR_ERR(task); 1070 rpc_put_task(task); 1071 return 0; 1072 } 1073 1074 static struct nfs42_layouterror_data * 1075 nfs42_alloc_layouterror_data(struct pnfs_layout_segment *lseg, gfp_t gfp_flags) 1076 { 1077 struct nfs42_layouterror_data *data; 1078 struct inode *inode = lseg->pls_layout->plh_inode; 1079 1080 data = kzalloc(sizeof(*data), gfp_flags); 1081 if (data) { 1082 data->args.inode = data->inode = nfs_igrab_and_active(inode); 1083 if (data->inode) { 1084 data->lseg = pnfs_get_lseg(lseg); 1085 if (data->lseg) 1086 return data; 1087 nfs_iput_and_deactive(data->inode); 1088 } 1089 kfree(data); 1090 } 1091 return NULL; 1092 } 1093 1094 static void 1095 nfs42_free_layouterror_data(struct nfs42_layouterror_data *data) 1096 { 1097 pnfs_put_lseg(data->lseg); 1098 nfs_iput_and_deactive(data->inode); 1099 kfree(data); 1100 } 1101 1102 static void 1103 nfs42_layouterror_prepare(struct rpc_task *task, void *calldata) 1104 { 1105 struct nfs42_layouterror_data *data = calldata; 1106 struct inode *inode = data->inode; 1107 struct nfs_server *server = NFS_SERVER(inode); 1108 struct pnfs_layout_hdr *lo = data->lseg->pls_layout; 1109 unsigned i; 1110 1111 spin_lock(&inode->i_lock); 1112 if (!pnfs_layout_is_valid(lo)) { 1113 spin_unlock(&inode->i_lock); 1114 rpc_exit(task, 0); 1115 return; 1116 } 1117 for (i = 0; i < data->args.num_errors; i++) 1118 nfs4_stateid_copy(&data->args.errors[i].stateid, 1119 &lo->plh_stateid); 1120 spin_unlock(&inode->i_lock); 1121 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 1122 &data->res.seq_res, task); 1123 } 1124 1125 static void 1126 nfs42_layouterror_done(struct rpc_task *task, void *calldata) 1127 { 1128 struct nfs42_layouterror_data *data = calldata; 1129 struct inode *inode = data->inode; 1130 struct pnfs_layout_hdr *lo = data->lseg->pls_layout; 1131 1132 if (!nfs4_sequence_done(task, &data->res.seq_res)) 1133 return; 1134 1135 switch (task->tk_status) { 1136 case 0: 1137 return; 1138 case -NFS4ERR_BADHANDLE: 1139 case -ESTALE: 1140 pnfs_destroy_layout(NFS_I(inode)); 1141 break; 1142 case -NFS4ERR_EXPIRED: 1143 case -NFS4ERR_ADMIN_REVOKED: 1144 case -NFS4ERR_DELEG_REVOKED: 1145 case -NFS4ERR_STALE_STATEID: 1146 case -NFS4ERR_BAD_STATEID: 1147 spin_lock(&inode->i_lock); 1148 if (pnfs_layout_is_valid(lo) && 1149 nfs4_stateid_match(&data->args.errors[0].stateid, 1150 &lo->plh_stateid)) { 1151 LIST_HEAD(head); 1152 1153 /* 1154 * Mark the bad layout state as invalid, then retry 1155 * with the current stateid. 1156 */ 1157 pnfs_mark_layout_stateid_invalid(lo, &head); 1158 spin_unlock(&inode->i_lock); 1159 pnfs_free_lseg_list(&head); 1160 nfs_commit_inode(inode, 0); 1161 } else 1162 spin_unlock(&inode->i_lock); 1163 break; 1164 case -NFS4ERR_OLD_STATEID: 1165 spin_lock(&inode->i_lock); 1166 if (pnfs_layout_is_valid(lo) && 1167 nfs4_stateid_match_other(&data->args.errors[0].stateid, 1168 &lo->plh_stateid)) { 1169 /* Do we need to delay before resending? */ 1170 if (!nfs4_stateid_is_newer(&lo->plh_stateid, 1171 &data->args.errors[0].stateid)) 1172 rpc_delay(task, HZ); 1173 rpc_restart_call_prepare(task); 1174 } 1175 spin_unlock(&inode->i_lock); 1176 break; 1177 case -ENOTSUPP: 1178 case -EOPNOTSUPP: 1179 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR; 1180 } 1181 1182 trace_nfs4_layouterror(inode, &data->args.errors[0].stateid, 1183 task->tk_status); 1184 } 1185 1186 static void 1187 nfs42_layouterror_release(void *calldata) 1188 { 1189 struct nfs42_layouterror_data *data = calldata; 1190 1191 nfs42_free_layouterror_data(data); 1192 } 1193 1194 static const struct rpc_call_ops nfs42_layouterror_ops = { 1195 .rpc_call_prepare = nfs42_layouterror_prepare, 1196 .rpc_call_done = nfs42_layouterror_done, 1197 .rpc_release = nfs42_layouterror_release, 1198 }; 1199 1200 int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg, 1201 const struct nfs42_layout_error *errors, size_t n) 1202 { 1203 struct inode *inode = lseg->pls_layout->plh_inode; 1204 struct nfs42_layouterror_data *data; 1205 struct rpc_task *task; 1206 struct rpc_message msg = { 1207 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTERROR], 1208 }; 1209 struct rpc_task_setup task_setup = { 1210 .rpc_message = &msg, 1211 .callback_ops = &nfs42_layouterror_ops, 1212 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, 1213 }; 1214 unsigned int i; 1215 1216 if (!nfs_server_capable(inode, NFS_CAP_LAYOUTERROR)) 1217 return -EOPNOTSUPP; 1218 if (n > NFS42_LAYOUTERROR_MAX) 1219 return -EINVAL; 1220 data = nfs42_alloc_layouterror_data(lseg, nfs_io_gfp_mask()); 1221 if (!data) 1222 return -ENOMEM; 1223 for (i = 0; i < n; i++) { 1224 data->args.errors[i] = errors[i]; 1225 data->args.num_errors++; 1226 data->res.num_errors++; 1227 } 1228 msg.rpc_argp = &data->args; 1229 msg.rpc_resp = &data->res; 1230 task_setup.callback_data = data; 1231 task_setup.rpc_client = NFS_SERVER(inode)->client; 1232 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 1233 task = rpc_run_task(&task_setup); 1234 if (IS_ERR(task)) 1235 return PTR_ERR(task); 1236 rpc_put_task(task); 1237 return 0; 1238 } 1239 EXPORT_SYMBOL_GPL(nfs42_proc_layouterror); 1240 1241 static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f, 1242 struct file *dst_f, struct nfs_lock_context *src_lock, 1243 struct nfs_lock_context *dst_lock, loff_t src_offset, 1244 loff_t dst_offset, loff_t count) 1245 { 1246 struct inode *src_inode = file_inode(src_f); 1247 struct inode *dst_inode = file_inode(dst_f); 1248 struct nfs_server *server = NFS_SERVER(dst_inode); 1249 __u32 dst_bitmask[NFS_BITMASK_SZ]; 1250 struct nfs42_clone_args args = { 1251 .src_fh = NFS_FH(src_inode), 1252 .dst_fh = NFS_FH(dst_inode), 1253 .src_offset = src_offset, 1254 .dst_offset = dst_offset, 1255 .count = count, 1256 .dst_bitmask = dst_bitmask, 1257 }; 1258 struct nfs42_clone_res res = { 1259 .server = server, 1260 }; 1261 loff_t oldsize_dst = i_size_read(dst_inode); 1262 int status; 1263 1264 msg->rpc_argp = &args; 1265 msg->rpc_resp = &res; 1266 1267 status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context, 1268 src_lock, FMODE_READ); 1269 if (status) { 1270 if (status == -EAGAIN) 1271 status = -NFS4ERR_BAD_STATEID; 1272 return status; 1273 } 1274 status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context, 1275 dst_lock, FMODE_WRITE); 1276 if (status) { 1277 if (status == -EAGAIN) 1278 status = -NFS4ERR_BAD_STATEID; 1279 return status; 1280 } 1281 1282 res.dst_fattr = nfs_alloc_fattr(); 1283 if (!res.dst_fattr) 1284 return -ENOMEM; 1285 1286 nfs4_bitmask_set(dst_bitmask, server->cache_consistency_bitmask, 1287 dst_inode, NFS_INO_INVALID_BLOCKS); 1288 1289 status = nfs4_call_sync(server->client, server, msg, 1290 &args.seq_args, &res.seq_res, 0); 1291 trace_nfs4_clone(src_inode, dst_inode, &args, status); 1292 if (status == 0) { 1293 /* a zero-length count means clone to EOF in src */ 1294 if (count == 0 && res.dst_fattr->valid & NFS_ATTR_FATTR_SIZE) 1295 count = nfs_size_to_loff_t(res.dst_fattr->size) - dst_offset; 1296 nfs42_copy_dest_done(dst_f, dst_offset, count, oldsize_dst); 1297 status = nfs_post_op_update_inode(dst_inode, res.dst_fattr); 1298 } 1299 1300 kfree(res.dst_fattr); 1301 return status; 1302 } 1303 1304 int nfs42_proc_clone(struct file *src_f, struct file *dst_f, 1305 loff_t src_offset, loff_t dst_offset, loff_t count) 1306 { 1307 struct rpc_message msg = { 1308 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE], 1309 }; 1310 struct inode *inode = file_inode(src_f); 1311 struct nfs_server *server = NFS_SERVER(file_inode(src_f)); 1312 struct nfs_lock_context *src_lock; 1313 struct nfs_lock_context *dst_lock; 1314 struct nfs4_exception src_exception = { }; 1315 struct nfs4_exception dst_exception = { }; 1316 int err, err2; 1317 1318 if (!nfs_server_capable(inode, NFS_CAP_CLONE)) 1319 return -EOPNOTSUPP; 1320 1321 src_lock = nfs_get_lock_context(nfs_file_open_context(src_f)); 1322 if (IS_ERR(src_lock)) 1323 return PTR_ERR(src_lock); 1324 1325 src_exception.inode = file_inode(src_f); 1326 src_exception.state = src_lock->open_context->state; 1327 1328 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f)); 1329 if (IS_ERR(dst_lock)) { 1330 err = PTR_ERR(dst_lock); 1331 goto out_put_src_lock; 1332 } 1333 1334 dst_exception.inode = file_inode(dst_f); 1335 dst_exception.state = dst_lock->open_context->state; 1336 1337 do { 1338 err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock, 1339 src_offset, dst_offset, count); 1340 if (err == -ENOTSUPP || err == -EOPNOTSUPP) { 1341 NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE; 1342 err = -EOPNOTSUPP; 1343 break; 1344 } 1345 1346 err2 = nfs4_handle_exception(server, err, &src_exception); 1347 err = nfs4_handle_exception(server, err, &dst_exception); 1348 if (!err) 1349 err = err2; 1350 } while (src_exception.retry || dst_exception.retry); 1351 1352 nfs_put_lock_context(dst_lock); 1353 out_put_src_lock: 1354 nfs_put_lock_context(src_lock); 1355 return err; 1356 } 1357 1358 #define NFS4XATTR_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 1359 1360 static int _nfs42_proc_removexattr(struct inode *inode, const char *name) 1361 { 1362 struct nfs_server *server = NFS_SERVER(inode); 1363 struct nfs42_removexattrargs args = { 1364 .fh = NFS_FH(inode), 1365 .xattr_name = name, 1366 }; 1367 struct nfs42_removexattrres res; 1368 struct rpc_message msg = { 1369 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVEXATTR], 1370 .rpc_argp = &args, 1371 .rpc_resp = &res, 1372 }; 1373 int ret; 1374 unsigned long timestamp = jiffies; 1375 1376 ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 1377 &res.seq_res, 1); 1378 trace_nfs4_removexattr(inode, name, ret); 1379 if (!ret) 1380 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); 1381 1382 return ret; 1383 } 1384 1385 static int _nfs42_proc_setxattr(struct inode *inode, const char *name, 1386 const void *buf, size_t buflen, int flags) 1387 { 1388 struct nfs_server *server = NFS_SERVER(inode); 1389 __u32 bitmask[NFS_BITMASK_SZ]; 1390 struct page *pages[NFS4XATTR_MAXPAGES]; 1391 struct nfs42_setxattrargs arg = { 1392 .fh = NFS_FH(inode), 1393 .bitmask = bitmask, 1394 .xattr_pages = pages, 1395 .xattr_len = buflen, 1396 .xattr_name = name, 1397 .xattr_flags = flags, 1398 }; 1399 struct nfs42_setxattrres res = { 1400 .server = server, 1401 }; 1402 struct rpc_message msg = { 1403 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETXATTR], 1404 .rpc_argp = &arg, 1405 .rpc_resp = &res, 1406 }; 1407 int ret, np; 1408 unsigned long timestamp = jiffies; 1409 1410 if (buflen > server->sxasize) 1411 return -ERANGE; 1412 1413 res.fattr = nfs_alloc_fattr(); 1414 if (!res.fattr) 1415 return -ENOMEM; 1416 1417 if (buflen > 0) { 1418 np = nfs4_buf_to_pages_noslab(buf, buflen, arg.xattr_pages); 1419 if (np < 0) { 1420 ret = np; 1421 goto out; 1422 } 1423 } else 1424 np = 0; 1425 1426 nfs4_bitmask_set(bitmask, server->cache_consistency_bitmask, 1427 inode, NFS_INO_INVALID_CHANGE); 1428 1429 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1430 &res.seq_res, 1); 1431 trace_nfs4_setxattr(inode, name, ret); 1432 1433 for (; np > 0; np--) 1434 put_page(pages[np - 1]); 1435 1436 if (!ret) { 1437 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); 1438 ret = nfs_post_op_update_inode(inode, res.fattr); 1439 } 1440 1441 out: 1442 kfree(res.fattr); 1443 return ret; 1444 } 1445 1446 static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name, 1447 void *buf, size_t buflen, struct page **pages, 1448 size_t plen) 1449 { 1450 struct nfs_server *server = NFS_SERVER(inode); 1451 struct nfs42_getxattrargs arg = { 1452 .fh = NFS_FH(inode), 1453 .xattr_name = name, 1454 }; 1455 struct nfs42_getxattrres res; 1456 struct rpc_message msg = { 1457 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETXATTR], 1458 .rpc_argp = &arg, 1459 .rpc_resp = &res, 1460 }; 1461 ssize_t ret; 1462 1463 arg.xattr_len = plen; 1464 arg.xattr_pages = pages; 1465 1466 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1467 &res.seq_res, 0); 1468 trace_nfs4_getxattr(inode, name, ret); 1469 if (ret < 0) 1470 return ret; 1471 1472 /* 1473 * Normally, the caching is done one layer up, but for successful 1474 * RPCS, always cache the result here, even if the caller was 1475 * just querying the length, or if the reply was too big for 1476 * the caller. This avoids a second RPC in the case of the 1477 * common query-alloc-retrieve cycle for xattrs. 1478 * 1479 * Note that xattr_len is always capped to XATTR_SIZE_MAX. 1480 */ 1481 1482 nfs4_xattr_cache_add(inode, name, NULL, pages, res.xattr_len); 1483 1484 if (buflen) { 1485 if (res.xattr_len > buflen) 1486 return -ERANGE; 1487 _copy_from_pages(buf, pages, 0, res.xattr_len); 1488 } 1489 1490 return res.xattr_len; 1491 } 1492 1493 static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf, 1494 size_t buflen, u64 *cookiep, bool *eofp) 1495 { 1496 struct nfs_server *server = NFS_SERVER(inode); 1497 struct page **pages; 1498 struct nfs42_listxattrsargs arg = { 1499 .fh = NFS_FH(inode), 1500 .cookie = *cookiep, 1501 }; 1502 struct nfs42_listxattrsres res = { 1503 .eof = false, 1504 .xattr_buf = buf, 1505 .xattr_len = buflen, 1506 }; 1507 struct rpc_message msg = { 1508 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LISTXATTRS], 1509 .rpc_argp = &arg, 1510 .rpc_resp = &res, 1511 }; 1512 u32 xdrlen; 1513 int ret, np, i; 1514 1515 1516 ret = -ENOMEM; 1517 res.scratch = alloc_page(GFP_KERNEL); 1518 if (!res.scratch) 1519 goto out; 1520 1521 xdrlen = nfs42_listxattr_xdrsize(buflen); 1522 if (xdrlen > server->lxasize) 1523 xdrlen = server->lxasize; 1524 np = xdrlen / PAGE_SIZE + 1; 1525 1526 pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL); 1527 if (!pages) 1528 goto out_free_scratch; 1529 for (i = 0; i < np; i++) { 1530 pages[i] = alloc_page(GFP_KERNEL); 1531 if (!pages[i]) 1532 goto out_free_pages; 1533 } 1534 1535 arg.xattr_pages = pages; 1536 arg.count = xdrlen; 1537 1538 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1539 &res.seq_res, 0); 1540 trace_nfs4_listxattr(inode, ret); 1541 1542 if (ret >= 0) { 1543 ret = res.copied; 1544 *cookiep = res.cookie; 1545 *eofp = res.eof; 1546 } 1547 1548 out_free_pages: 1549 while (--np >= 0) { 1550 if (pages[np]) 1551 __free_page(pages[np]); 1552 } 1553 kfree(pages); 1554 out_free_scratch: 1555 __free_page(res.scratch); 1556 out: 1557 return ret; 1558 1559 } 1560 1561 ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name, 1562 void *buf, size_t buflen) 1563 { 1564 struct nfs4_exception exception = { }; 1565 ssize_t err, np, i; 1566 struct page **pages; 1567 1568 np = nfs_page_array_len(0, buflen ?: XATTR_SIZE_MAX); 1569 pages = kmalloc_array(np, sizeof(*pages), GFP_KERNEL); 1570 if (!pages) 1571 return -ENOMEM; 1572 1573 for (i = 0; i < np; i++) { 1574 pages[i] = alloc_page(GFP_KERNEL); 1575 if (!pages[i]) { 1576 err = -ENOMEM; 1577 goto out; 1578 } 1579 } 1580 1581 /* 1582 * The GETXATTR op has no length field in the call, and the 1583 * xattr data is at the end of the reply. 1584 * 1585 * There is no downside in using the page-aligned length. It will 1586 * allow receiving and caching xattrs that are too large for the 1587 * caller but still fit in the page-rounded value. 1588 */ 1589 do { 1590 err = _nfs42_proc_getxattr(inode, name, buf, buflen, 1591 pages, np * PAGE_SIZE); 1592 if (err >= 0) 1593 break; 1594 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1595 &exception); 1596 } while (exception.retry); 1597 1598 out: 1599 while (--i >= 0) 1600 __free_page(pages[i]); 1601 kfree(pages); 1602 1603 return err; 1604 } 1605 1606 int nfs42_proc_setxattr(struct inode *inode, const char *name, 1607 const void *buf, size_t buflen, int flags) 1608 { 1609 struct nfs4_exception exception = { }; 1610 int err; 1611 1612 do { 1613 err = _nfs42_proc_setxattr(inode, name, buf, buflen, flags); 1614 if (!err) 1615 break; 1616 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1617 &exception); 1618 } while (exception.retry); 1619 1620 return err; 1621 } 1622 1623 ssize_t nfs42_proc_listxattrs(struct inode *inode, void *buf, 1624 size_t buflen, u64 *cookiep, bool *eofp) 1625 { 1626 struct nfs4_exception exception = { }; 1627 ssize_t err; 1628 1629 do { 1630 err = _nfs42_proc_listxattrs(inode, buf, buflen, 1631 cookiep, eofp); 1632 if (err >= 0) 1633 break; 1634 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1635 &exception); 1636 } while (exception.retry); 1637 1638 return err; 1639 } 1640 1641 int nfs42_proc_removexattr(struct inode *inode, const char *name) 1642 { 1643 struct nfs4_exception exception = { }; 1644 int err; 1645 1646 do { 1647 err = _nfs42_proc_removexattr(inode, name); 1648 if (!err) 1649 break; 1650 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1651 &exception); 1652 } while (exception.retry); 1653 1654 return err; 1655 } 1656