1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2014 Anna Schumaker <Anna.Schumaker@Netapp.com> 4 */ 5 #include <linux/fs.h> 6 #include <linux/sunrpc/addr.h> 7 #include <linux/sunrpc/sched.h> 8 #include <linux/nfs.h> 9 #include <linux/nfs3.h> 10 #include <linux/nfs4.h> 11 #include <linux/nfs_xdr.h> 12 #include <linux/nfs_fs.h> 13 #include "nfs4_fs.h" 14 #include "nfs42.h" 15 #include "iostat.h" 16 #include "pnfs.h" 17 #include "nfs4session.h" 18 #include "internal.h" 19 #include "delegation.h" 20 #include "nfs4trace.h" 21 22 #define NFSDBG_FACILITY NFSDBG_PROC 23 static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std); 24 static int nfs42_proc_offload_status(struct file *file, nfs4_stateid *stateid, 25 u64 *copied); 26 27 static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr) 28 { 29 struct nfs_client *clp = (NFS_SERVER(file_inode(filep)))->nfs_client; 30 unsigned short port = 2049; 31 32 rcu_read_lock(); 33 naddr->netid_len = scnprintf(naddr->netid, 34 sizeof(naddr->netid), "%s", 35 rpc_peeraddr2str(clp->cl_rpcclient, 36 RPC_DISPLAY_NETID)); 37 naddr->addr_len = scnprintf(naddr->addr, 38 sizeof(naddr->addr), 39 "%s.%u.%u", 40 rpc_peeraddr2str(clp->cl_rpcclient, 41 RPC_DISPLAY_ADDR), 42 port >> 8, port & 255); 43 rcu_read_unlock(); 44 } 45 46 static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 47 struct nfs_lock_context *lock, loff_t offset, loff_t len) 48 { 49 struct inode *inode = file_inode(filep); 50 struct nfs_server *server = NFS_SERVER(inode); 51 u32 bitmask[NFS_BITMASK_SZ]; 52 struct nfs42_falloc_args args = { 53 .falloc_fh = NFS_FH(inode), 54 .falloc_offset = offset, 55 .falloc_length = len, 56 .falloc_bitmask = bitmask, 57 }; 58 struct nfs42_falloc_res res = { 59 .falloc_server = server, 60 }; 61 int status; 62 63 msg->rpc_argp = &args; 64 msg->rpc_resp = &res; 65 66 status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context, 67 lock, FMODE_WRITE); 68 if (status) { 69 if (status == -EAGAIN) 70 status = -NFS4ERR_BAD_STATEID; 71 return status; 72 } 73 74 nfs4_bitmask_set(bitmask, server->cache_consistency_bitmask, inode, 75 NFS_INO_INVALID_BLOCKS); 76 77 res.falloc_fattr = nfs_alloc_fattr(); 78 if (!res.falloc_fattr) 79 return -ENOMEM; 80 81 status = nfs4_call_sync(server->client, server, msg, 82 &args.seq_args, &res.seq_res, 0); 83 if (status == 0) { 84 if (nfs_should_remove_suid(inode)) { 85 spin_lock(&inode->i_lock); 86 nfs_set_cache_invalid(inode, 87 NFS_INO_REVAL_FORCED | NFS_INO_INVALID_MODE); 88 spin_unlock(&inode->i_lock); 89 } 90 status = nfs_post_op_update_inode_force_wcc(inode, 91 res.falloc_fattr); 92 } 93 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE]) 94 trace_nfs4_fallocate(inode, &args, status); 95 else 96 trace_nfs4_deallocate(inode, &args, status); 97 kfree(res.falloc_fattr); 98 return status; 99 } 100 101 static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 102 loff_t offset, loff_t len) 103 { 104 struct inode *inode = file_inode(filep); 105 struct nfs_server *server = NFS_SERVER(inode); 106 struct nfs4_exception exception = { }; 107 struct nfs_lock_context *lock; 108 int err; 109 110 lock = nfs_get_lock_context(nfs_file_open_context(filep)); 111 if (IS_ERR(lock)) 112 return PTR_ERR(lock); 113 114 exception.inode = inode; 115 exception.state = lock->open_context->state; 116 117 err = nfs_sync_inode(inode); 118 if (err) 119 goto out; 120 121 do { 122 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len); 123 if (err == -ENOTSUPP) { 124 err = -EOPNOTSUPP; 125 break; 126 } 127 err = nfs4_handle_exception(server, err, &exception); 128 } while (exception.retry); 129 out: 130 nfs_put_lock_context(lock); 131 return err; 132 } 133 134 int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len) 135 { 136 struct rpc_message msg = { 137 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE], 138 }; 139 struct inode *inode = file_inode(filep); 140 loff_t oldsize; 141 int err; 142 143 if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE)) 144 return -EOPNOTSUPP; 145 146 err = nfs_start_io_write(inode); 147 if (err) 148 return err; 149 150 oldsize = i_size_read(inode); 151 152 err = nfs42_proc_fallocate(&msg, filep, offset, len); 153 154 if (err == 0) 155 nfs_truncate_last_folio(inode->i_mapping, oldsize, 156 offset + len); 157 else if (err == -EOPNOTSUPP) 158 NFS_SERVER(inode)->caps &= ~(NFS_CAP_ALLOCATE | 159 NFS_CAP_ZERO_RANGE); 160 161 nfs_end_io_write(inode); 162 return err; 163 } 164 165 int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len) 166 { 167 struct rpc_message msg = { 168 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE], 169 }; 170 struct inode *inode = file_inode(filep); 171 int err; 172 173 if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE)) 174 return -EOPNOTSUPP; 175 176 err = nfs_start_io_write(inode); 177 if (err) 178 return err; 179 180 err = nfs42_proc_fallocate(&msg, filep, offset, len); 181 if (err == 0) 182 truncate_pagecache_range(inode, offset, (offset + len) -1); 183 if (err == -EOPNOTSUPP) 184 NFS_SERVER(inode)->caps &= ~(NFS_CAP_DEALLOCATE | 185 NFS_CAP_ZERO_RANGE); 186 187 nfs_end_io_write(inode); 188 return err; 189 } 190 191 int nfs42_proc_zero_range(struct file *filep, loff_t offset, loff_t len) 192 { 193 struct rpc_message msg = { 194 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ZERO_RANGE], 195 }; 196 struct inode *inode = file_inode(filep); 197 loff_t oldsize; 198 int err; 199 200 if (!nfs_server_capable(inode, NFS_CAP_ZERO_RANGE)) 201 return -EOPNOTSUPP; 202 203 err = nfs_start_io_write(inode); 204 if (err) 205 return err; 206 207 oldsize = i_size_read(inode); 208 err = nfs42_proc_fallocate(&msg, filep, offset, len); 209 if (err == 0) { 210 nfs_truncate_last_folio(inode->i_mapping, oldsize, 211 offset + len); 212 truncate_pagecache_range(inode, offset, (offset + len) -1); 213 } else if (err == -EOPNOTSUPP) 214 NFS_SERVER(inode)->caps &= ~NFS_CAP_ZERO_RANGE; 215 216 nfs_end_io_write(inode); 217 return err; 218 } 219 220 static void nfs4_copy_dequeue_callback(struct nfs_server *dst_server, 221 struct nfs_server *src_server, 222 struct nfs4_copy_state *copy) 223 { 224 spin_lock(&dst_server->nfs_client->cl_lock); 225 list_del_init(©->copies); 226 spin_unlock(&dst_server->nfs_client->cl_lock); 227 if (dst_server != src_server) { 228 spin_lock(&src_server->nfs_client->cl_lock); 229 list_del_init(©->src_copies); 230 spin_unlock(&src_server->nfs_client->cl_lock); 231 } 232 } 233 234 static int handle_async_copy(struct nfs42_copy_res *res, 235 struct nfs_server *dst_server, 236 struct nfs_server *src_server, 237 struct file *src, 238 struct file *dst, 239 nfs4_stateid *src_stateid, 240 bool *restart) 241 { 242 struct nfs4_copy_state *copy, *tmp_copy = NULL, *iter; 243 struct nfs_open_context *dst_ctx = nfs_file_open_context(dst); 244 struct nfs_open_context *src_ctx = nfs_file_open_context(src); 245 struct nfs_client *clp = dst_server->nfs_client; 246 unsigned long timeout = 3 * HZ; 247 int status = NFS4_OK; 248 u64 copied; 249 250 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL); 251 if (!copy) 252 return -ENOMEM; 253 254 spin_lock(&dst_server->nfs_client->cl_lock); 255 list_for_each_entry(iter, 256 &dst_server->nfs_client->pending_cb_stateids, 257 copies) { 258 if (memcmp(&res->write_res.stateid, &iter->stateid, 259 NFS4_STATEID_SIZE)) 260 continue; 261 tmp_copy = iter; 262 list_del(&iter->copies); 263 break; 264 } 265 if (tmp_copy) { 266 spin_unlock(&dst_server->nfs_client->cl_lock); 267 kfree(copy); 268 copy = tmp_copy; 269 goto out; 270 } 271 272 memcpy(©->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE); 273 init_completion(©->completion); 274 copy->parent_dst_state = dst_ctx->state; 275 copy->parent_src_state = src_ctx->state; 276 277 list_add_tail(©->copies, &dst_server->ss_copies); 278 spin_unlock(&dst_server->nfs_client->cl_lock); 279 280 if (dst_server != src_server) { 281 spin_lock(&src_server->nfs_client->cl_lock); 282 list_add_tail(©->src_copies, &src_server->ss_src_copies); 283 spin_unlock(&src_server->nfs_client->cl_lock); 284 } 285 286 wait: 287 status = wait_for_completion_interruptible_timeout(©->completion, 288 timeout); 289 if (!status) 290 goto timeout; 291 nfs4_copy_dequeue_callback(dst_server, src_server, copy); 292 if (status == -ERESTARTSYS) { 293 goto out_cancel; 294 } else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) { 295 status = -EAGAIN; 296 *restart = true; 297 goto out_cancel; 298 } 299 out: 300 res->write_res.count = copy->count; 301 /* Copy out the updated write verifier provided by CB_OFFLOAD. */ 302 memcpy(&res->write_res.verifier, ©->verf, sizeof(copy->verf)); 303 status = -copy->error; 304 305 out_free: 306 kfree(copy); 307 return status; 308 out_cancel: 309 nfs42_do_offload_cancel_async(dst, ©->stateid); 310 if (!nfs42_files_from_same_server(src, dst)) 311 nfs42_do_offload_cancel_async(src, src_stateid); 312 goto out_free; 313 timeout: 314 timeout <<= 1; 315 if (timeout > (clp->cl_lease_time >> 1)) 316 timeout = clp->cl_lease_time >> 1; 317 status = nfs42_proc_offload_status(dst, ©->stateid, &copied); 318 if (status == -EINPROGRESS) 319 goto wait; 320 nfs4_copy_dequeue_callback(dst_server, src_server, copy); 321 switch (status) { 322 case 0: 323 /* The server recognized the copy stateid, so it hasn't 324 * rebooted. Don't overwrite the verifier returned in the 325 * COPY result. */ 326 res->write_res.count = copied; 327 goto out_free; 328 case -EREMOTEIO: 329 /* COPY operation failed on the server. */ 330 status = -EOPNOTSUPP; 331 res->write_res.count = copied; 332 goto out_free; 333 case -EBADF: 334 /* Server did not recognize the copy stateid. It has 335 * probably restarted and lost the plot. */ 336 res->write_res.count = 0; 337 status = -EOPNOTSUPP; 338 break; 339 case -EOPNOTSUPP: 340 /* RFC 7862 REQUIREs server to support OFFLOAD_STATUS when 341 * it has signed up for an async COPY, so server is not 342 * spec-compliant. */ 343 res->write_res.count = 0; 344 } 345 goto out_free; 346 } 347 348 static int process_copy_commit(struct file *dst, loff_t pos_dst, 349 struct nfs42_copy_res *res) 350 { 351 struct nfs_commitres cres; 352 int status = -ENOMEM; 353 354 cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_KERNEL); 355 if (!cres.verf) 356 goto out; 357 358 status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres); 359 if (status) 360 goto out_free; 361 if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 362 &cres.verf->verifier)) { 363 dprintk("commit verf differs from copy verf\n"); 364 status = -EAGAIN; 365 } 366 out_free: 367 kfree(cres.verf); 368 out: 369 return status; 370 } 371 372 /** 373 * nfs42_copy_dest_done - perform inode cache updates after clone/copy offload 374 * @file: pointer to destination file 375 * @pos: destination offset 376 * @len: copy length 377 * @oldsize: length of the file prior to clone/copy 378 * 379 * Punch a hole in the inode page cache, so that the NFS client will 380 * know to retrieve new data. 381 * Update the file size if necessary, and then mark the inode as having 382 * invalid cached values for change attribute, ctime, mtime and space used. 383 */ 384 static void nfs42_copy_dest_done(struct file *file, loff_t pos, loff_t len, 385 loff_t oldsize) 386 { 387 struct inode *inode = file_inode(file); 388 struct address_space *mapping = file->f_mapping; 389 loff_t newsize = pos + len; 390 loff_t end = newsize - 1; 391 392 nfs_truncate_last_folio(mapping, oldsize, pos); 393 WARN_ON_ONCE(invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, 394 end >> PAGE_SHIFT)); 395 396 spin_lock(&inode->i_lock); 397 if (newsize > i_size_read(inode)) 398 i_size_write(inode, newsize); 399 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 400 NFS_INO_INVALID_CTIME | 401 NFS_INO_INVALID_MTIME | 402 NFS_INO_INVALID_BLOCKS); 403 spin_unlock(&inode->i_lock); 404 } 405 406 static ssize_t _nfs42_proc_copy(struct file *src, 407 struct nfs_lock_context *src_lock, 408 struct file *dst, 409 struct nfs_lock_context *dst_lock, 410 struct nfs42_copy_args *args, 411 struct nfs42_copy_res *res, 412 struct nl4_server *nss, 413 nfs4_stateid *cnr_stateid, 414 bool *restart) 415 { 416 struct rpc_message msg = { 417 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY], 418 .rpc_argp = args, 419 .rpc_resp = res, 420 }; 421 struct inode *dst_inode = file_inode(dst); 422 struct inode *src_inode = file_inode(src); 423 struct nfs_server *dst_server = NFS_SERVER(dst_inode); 424 struct nfs_server *src_server = NFS_SERVER(src_inode); 425 loff_t pos_src = args->src_pos; 426 loff_t pos_dst = args->dst_pos; 427 loff_t oldsize_dst; 428 size_t count = args->count; 429 ssize_t status; 430 431 if (nss) { 432 args->cp_src = nss; 433 nfs4_stateid_copy(&args->src_stateid, cnr_stateid); 434 } else { 435 status = nfs4_set_rw_stateid(&args->src_stateid, 436 src_lock->open_context, src_lock, FMODE_READ); 437 if (status) { 438 if (status == -EAGAIN) 439 status = -NFS4ERR_BAD_STATEID; 440 return status; 441 } 442 } 443 status = nfs_filemap_write_and_wait_range(src->f_mapping, 444 pos_src, pos_src + (loff_t)count - 1); 445 if (status) 446 return status; 447 448 status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context, 449 dst_lock, FMODE_WRITE); 450 if (status) { 451 if (status == -EAGAIN) 452 status = -NFS4ERR_BAD_STATEID; 453 return status; 454 } 455 456 nfs_file_block_o_direct(NFS_I(dst_inode)); 457 status = nfs_sync_inode(dst_inode); 458 if (status) 459 return status; 460 461 res->commit_res.verf = NULL; 462 if (args->sync) { 463 res->commit_res.verf = 464 kzalloc(sizeof(struct nfs_writeverf), GFP_KERNEL); 465 if (!res->commit_res.verf) 466 return -ENOMEM; 467 } 468 set_bit(NFS_CLNT_SRC_SSC_COPY_STATE, 469 &src_lock->open_context->state->flags); 470 set_bit(NFS_CLNT_DST_SSC_COPY_STATE, 471 &dst_lock->open_context->state->flags); 472 oldsize_dst = i_size_read(dst_inode); 473 474 status = nfs4_call_sync(dst_server->client, dst_server, &msg, 475 &args->seq_args, &res->seq_res, 0); 476 trace_nfs4_copy(src_inode, dst_inode, args, res, nss, status); 477 if (status == -ENOTSUPP) 478 dst_server->caps &= ~NFS_CAP_COPY; 479 if (status) 480 goto out; 481 482 if (args->sync && 483 nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 484 &res->commit_res.verf->verifier)) { 485 status = -EAGAIN; 486 goto out; 487 } 488 489 if (!res->synchronous) { 490 status = handle_async_copy(res, dst_server, src_server, src, 491 dst, &args->src_stateid, restart); 492 if (status) 493 goto out; 494 } 495 496 if ((!res->synchronous || !args->sync) && 497 res->write_res.verifier.committed != NFS_FILE_SYNC) { 498 status = process_copy_commit(dst, pos_dst, res); 499 if (status) 500 goto out; 501 } 502 503 nfs42_copy_dest_done(dst, pos_dst, res->write_res.count, oldsize_dst); 504 nfs_invalidate_atime(src_inode); 505 status = res->write_res.count; 506 out: 507 if (args->sync) 508 kfree(res->commit_res.verf); 509 return status; 510 } 511 512 ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, 513 struct file *dst, loff_t pos_dst, size_t count, 514 struct nl4_server *nss, 515 nfs4_stateid *cnr_stateid, bool sync) 516 { 517 struct nfs_server *server = NFS_SERVER(file_inode(dst)); 518 struct nfs_lock_context *src_lock; 519 struct nfs_lock_context *dst_lock; 520 struct nfs42_copy_args args = { 521 .src_fh = NFS_FH(file_inode(src)), 522 .src_pos = pos_src, 523 .dst_fh = NFS_FH(file_inode(dst)), 524 .dst_pos = pos_dst, 525 .count = count, 526 .sync = sync, 527 }; 528 struct nfs42_copy_res res; 529 struct nfs4_exception src_exception = { 530 .inode = file_inode(src), 531 .stateid = &args.src_stateid, 532 }; 533 struct nfs4_exception dst_exception = { 534 .inode = file_inode(dst), 535 .stateid = &args.dst_stateid, 536 }; 537 ssize_t err, err2; 538 bool restart = false; 539 540 src_lock = nfs_get_lock_context(nfs_file_open_context(src)); 541 if (IS_ERR(src_lock)) 542 return PTR_ERR(src_lock); 543 544 src_exception.state = src_lock->open_context->state; 545 546 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst)); 547 if (IS_ERR(dst_lock)) { 548 err = PTR_ERR(dst_lock); 549 goto out_put_src_lock; 550 } 551 552 dst_exception.state = dst_lock->open_context->state; 553 554 do { 555 inode_lock(file_inode(dst)); 556 err = _nfs42_proc_copy(src, src_lock, 557 dst, dst_lock, 558 &args, &res, 559 nss, cnr_stateid, &restart); 560 inode_unlock(file_inode(dst)); 561 562 if (err >= 0) 563 break; 564 if ((err == -ENOTSUPP || 565 err == -NFS4ERR_OFFLOAD_DENIED) && 566 nfs42_files_from_same_server(src, dst)) { 567 err = -EOPNOTSUPP; 568 break; 569 } else if (err == -EAGAIN) { 570 if (!restart) { 571 dst_exception.retry = 1; 572 continue; 573 } 574 break; 575 } else if (err == -NFS4ERR_OFFLOAD_NO_REQS && 576 args.sync != res.synchronous) { 577 args.sync = res.synchronous; 578 dst_exception.retry = 1; 579 continue; 580 } else if ((err == -ESTALE || 581 err == -NFS4ERR_OFFLOAD_DENIED || 582 err == -ENOTSUPP) && 583 !nfs42_files_from_same_server(src, dst)) { 584 nfs42_do_offload_cancel_async(src, &args.src_stateid); 585 err = -EOPNOTSUPP; 586 break; 587 } 588 589 err2 = nfs4_handle_exception(server, err, &src_exception); 590 err = nfs4_handle_exception(server, err, &dst_exception); 591 if (!err) 592 err = err2; 593 } while (src_exception.retry || dst_exception.retry); 594 595 nfs_put_lock_context(dst_lock); 596 out_put_src_lock: 597 nfs_put_lock_context(src_lock); 598 return err; 599 } 600 601 struct nfs42_offload_data { 602 struct nfs_server *seq_server; 603 struct nfs42_offload_status_args args; 604 struct nfs42_offload_status_res res; 605 }; 606 607 static void nfs42_offload_prepare(struct rpc_task *task, void *calldata) 608 { 609 struct nfs42_offload_data *data = calldata; 610 611 nfs4_setup_sequence(data->seq_server->nfs_client, 612 &data->args.osa_seq_args, 613 &data->res.osr_seq_res, task); 614 } 615 616 static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata) 617 { 618 struct nfs42_offload_data *data = calldata; 619 620 trace_nfs4_offload_cancel(&data->args, task->tk_status); 621 nfs41_sequence_done(task, &data->res.osr_seq_res); 622 if (task->tk_status && 623 nfs4_async_handle_error(task, data->seq_server, NULL, 624 NULL) == -EAGAIN) 625 rpc_restart_call_prepare(task); 626 } 627 628 static void nfs42_offload_release(void *data) 629 { 630 kfree(data); 631 } 632 633 static const struct rpc_call_ops nfs42_offload_cancel_ops = { 634 .rpc_call_prepare = nfs42_offload_prepare, 635 .rpc_call_done = nfs42_offload_cancel_done, 636 .rpc_release = nfs42_offload_release, 637 }; 638 639 static int nfs42_do_offload_cancel_async(struct file *dst, 640 nfs4_stateid *stateid) 641 { 642 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 643 struct nfs42_offload_data *data = NULL; 644 struct nfs_open_context *ctx = nfs_file_open_context(dst); 645 struct rpc_task *task; 646 struct rpc_message msg = { 647 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_CANCEL], 648 .rpc_cred = ctx->cred, 649 }; 650 struct rpc_task_setup task_setup_data = { 651 .rpc_client = dst_server->client, 652 .rpc_message = &msg, 653 .callback_ops = &nfs42_offload_cancel_ops, 654 .workqueue = nfsiod_workqueue, 655 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, 656 }; 657 int status; 658 659 if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL)) 660 return -EOPNOTSUPP; 661 662 data = kzalloc(sizeof(struct nfs42_offload_data), GFP_KERNEL); 663 if (data == NULL) 664 return -ENOMEM; 665 666 data->seq_server = dst_server; 667 data->args.osa_src_fh = NFS_FH(file_inode(dst)); 668 memcpy(&data->args.osa_stateid, stateid, 669 sizeof(data->args.osa_stateid)); 670 msg.rpc_argp = &data->args; 671 msg.rpc_resp = &data->res; 672 task_setup_data.callback_data = data; 673 nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res, 674 1, 0); 675 task = rpc_run_task(&task_setup_data); 676 if (IS_ERR(task)) 677 return PTR_ERR(task); 678 status = rpc_wait_for_completion_task(task); 679 if (status == -ENOTSUPP) 680 dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL; 681 rpc_put_task(task); 682 return status; 683 } 684 685 static int 686 _nfs42_proc_offload_status(struct nfs_server *server, struct file *file, 687 struct nfs42_offload_data *data) 688 { 689 struct nfs_open_context *ctx = nfs_file_open_context(file); 690 struct rpc_message msg = { 691 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_STATUS], 692 .rpc_argp = &data->args, 693 .rpc_resp = &data->res, 694 .rpc_cred = ctx->cred, 695 }; 696 int status; 697 698 status = nfs4_call_sync(server->client, server, &msg, 699 &data->args.osa_seq_args, 700 &data->res.osr_seq_res, 1); 701 trace_nfs4_offload_status(&data->args, status); 702 switch (status) { 703 case 0: 704 break; 705 706 case -NFS4ERR_ADMIN_REVOKED: 707 case -NFS4ERR_BAD_STATEID: 708 case -NFS4ERR_OLD_STATEID: 709 /* 710 * Server does not recognize the COPY stateid. CB_OFFLOAD 711 * could have purged it, or server might have rebooted. 712 * Since COPY stateids don't have an associated inode, 713 * avoid triggering state recovery. 714 */ 715 status = -EBADF; 716 break; 717 case -NFS4ERR_NOTSUPP: 718 case -ENOTSUPP: 719 case -EOPNOTSUPP: 720 server->caps &= ~NFS_CAP_OFFLOAD_STATUS; 721 status = -EOPNOTSUPP; 722 break; 723 } 724 725 return status; 726 } 727 728 /** 729 * nfs42_proc_offload_status - Poll completion status of an async copy operation 730 * @dst: handle of file being copied into 731 * @stateid: copy stateid (from async COPY result) 732 * @copied: OUT: number of bytes copied so far 733 * 734 * Return values: 735 * %0: Server returned an NFS4_OK completion status 736 * %-EINPROGRESS: Server returned no completion status 737 * %-EREMOTEIO: Server returned an error completion status 738 * %-EBADF: Server did not recognize the copy stateid 739 * %-EOPNOTSUPP: Server does not support OFFLOAD_STATUS 740 * %-ERESTARTSYS: Wait interrupted by signal 741 * 742 * Other negative errnos indicate the client could not complete the 743 * request. 744 */ 745 static int 746 nfs42_proc_offload_status(struct file *dst, nfs4_stateid *stateid, u64 *copied) 747 { 748 struct inode *inode = file_inode(dst); 749 struct nfs_server *server = NFS_SERVER(inode); 750 struct nfs4_exception exception = { 751 .inode = inode, 752 }; 753 struct nfs42_offload_data *data; 754 int status; 755 756 if (!(server->caps & NFS_CAP_OFFLOAD_STATUS)) 757 return -EOPNOTSUPP; 758 759 data = kzalloc(sizeof(*data), GFP_KERNEL); 760 if (!data) 761 return -ENOMEM; 762 data->seq_server = server; 763 data->args.osa_src_fh = NFS_FH(inode); 764 memcpy(&data->args.osa_stateid, stateid, 765 sizeof(data->args.osa_stateid)); 766 exception.stateid = &data->args.osa_stateid; 767 do { 768 status = _nfs42_proc_offload_status(server, dst, data); 769 if (status == -EOPNOTSUPP) 770 goto out; 771 status = nfs4_handle_exception(server, status, &exception); 772 } while (exception.retry); 773 if (status) 774 goto out; 775 776 *copied = data->res.osr_count; 777 if (!data->res.complete_count) 778 status = -EINPROGRESS; 779 else if (data->res.osr_complete != NFS_OK) 780 status = -EREMOTEIO; 781 782 out: 783 kfree(data); 784 return status; 785 } 786 787 static int _nfs42_proc_copy_notify(struct file *src, struct file *dst, 788 struct nfs42_copy_notify_args *args, 789 struct nfs42_copy_notify_res *res) 790 { 791 struct nfs_server *src_server = NFS_SERVER(file_inode(src)); 792 struct rpc_message msg = { 793 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY_NOTIFY], 794 .rpc_argp = args, 795 .rpc_resp = res, 796 }; 797 int status; 798 struct nfs_open_context *ctx; 799 struct nfs_lock_context *l_ctx; 800 801 ctx = get_nfs_open_context(nfs_file_open_context(src)); 802 l_ctx = nfs_get_lock_context(ctx); 803 if (IS_ERR(l_ctx)) { 804 status = PTR_ERR(l_ctx); 805 goto out; 806 } 807 808 status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx, 809 FMODE_READ); 810 nfs_put_lock_context(l_ctx); 811 if (status) { 812 if (status == -EAGAIN) 813 status = -NFS4ERR_BAD_STATEID; 814 goto out; 815 } 816 817 status = nfs4_call_sync(src_server->client, src_server, &msg, 818 &args->cna_seq_args, &res->cnr_seq_res, 0); 819 trace_nfs4_copy_notify(file_inode(src), args, res, status); 820 if (status == -ENOTSUPP) 821 src_server->caps &= ~NFS_CAP_COPY_NOTIFY; 822 823 out: 824 put_nfs_open_context(nfs_file_open_context(src)); 825 return status; 826 } 827 828 int nfs42_proc_copy_notify(struct file *src, struct file *dst, 829 struct nfs42_copy_notify_res *res) 830 { 831 struct nfs_server *src_server = NFS_SERVER(file_inode(src)); 832 struct nfs42_copy_notify_args *args; 833 struct nfs4_exception exception = { 834 .inode = file_inode(src), 835 }; 836 int status; 837 838 if (!(src_server->caps & NFS_CAP_COPY_NOTIFY)) 839 return -EOPNOTSUPP; 840 841 args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_KERNEL); 842 if (args == NULL) 843 return -ENOMEM; 844 845 args->cna_src_fh = NFS_FH(file_inode(src)), 846 args->cna_dst.nl4_type = NL4_NETADDR; 847 nfs42_set_netaddr(dst, &args->cna_dst.u.nl4_addr); 848 exception.stateid = &args->cna_src_stateid; 849 850 do { 851 status = _nfs42_proc_copy_notify(src, dst, args, res); 852 if (status == -ENOTSUPP) { 853 status = -EOPNOTSUPP; 854 goto out; 855 } 856 status = nfs4_handle_exception(src_server, status, &exception); 857 } while (exception.retry); 858 859 out: 860 kfree(args); 861 return status; 862 } 863 864 static loff_t _nfs42_proc_llseek(struct file *filep, 865 struct nfs_lock_context *lock, loff_t offset, int whence) 866 { 867 struct inode *inode = file_inode(filep); 868 struct nfs42_seek_args args = { 869 .sa_fh = NFS_FH(inode), 870 .sa_offset = offset, 871 .sa_what = (whence == SEEK_HOLE) ? 872 NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA, 873 }; 874 struct nfs42_seek_res res; 875 struct rpc_message msg = { 876 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK], 877 .rpc_argp = &args, 878 .rpc_resp = &res, 879 }; 880 struct nfs_server *server = NFS_SERVER(inode); 881 int status; 882 883 if (!nfs_server_capable(inode, NFS_CAP_SEEK)) 884 return -ENOTSUPP; 885 886 status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context, 887 lock, FMODE_READ); 888 if (status) { 889 if (status == -EAGAIN) 890 status = -NFS4ERR_BAD_STATEID; 891 return status; 892 } 893 894 status = nfs_filemap_write_and_wait_range(inode->i_mapping, 895 offset, LLONG_MAX); 896 if (status) 897 return status; 898 899 status = nfs4_call_sync(server->client, server, &msg, 900 &args.seq_args, &res.seq_res, 0); 901 trace_nfs4_llseek(inode, &args, &res, status); 902 if (status == -ENOTSUPP) 903 server->caps &= ~NFS_CAP_SEEK; 904 if (status) 905 return status; 906 907 if (whence == SEEK_DATA && res.sr_eof) 908 return -NFS4ERR_NXIO; 909 else 910 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); 911 } 912 913 loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) 914 { 915 struct nfs_server *server = NFS_SERVER(file_inode(filep)); 916 struct nfs4_exception exception = { }; 917 struct nfs_lock_context *lock; 918 loff_t err; 919 920 lock = nfs_get_lock_context(nfs_file_open_context(filep)); 921 if (IS_ERR(lock)) 922 return PTR_ERR(lock); 923 924 exception.inode = file_inode(filep); 925 exception.state = lock->open_context->state; 926 927 do { 928 err = _nfs42_proc_llseek(filep, lock, offset, whence); 929 if (err >= 0) 930 break; 931 if (err == -ENOTSUPP) { 932 err = -EOPNOTSUPP; 933 break; 934 } 935 err = nfs4_handle_exception(server, err, &exception); 936 } while (exception.retry); 937 938 nfs_put_lock_context(lock); 939 return err; 940 } 941 942 943 static void 944 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) 945 { 946 struct nfs42_layoutstat_data *data = calldata; 947 struct inode *inode = data->inode; 948 struct nfs_server *server = NFS_SERVER(inode); 949 struct pnfs_layout_hdr *lo; 950 951 spin_lock(&inode->i_lock); 952 lo = NFS_I(inode)->layout; 953 if (!pnfs_layout_is_valid(lo)) { 954 spin_unlock(&inode->i_lock); 955 rpc_exit(task, 0); 956 return; 957 } 958 nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid); 959 spin_unlock(&inode->i_lock); 960 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 961 &data->res.seq_res, task); 962 } 963 964 static void 965 nfs42_layoutstat_done(struct rpc_task *task, void *calldata) 966 { 967 struct nfs42_layoutstat_data *data = calldata; 968 struct inode *inode = data->inode; 969 struct pnfs_layout_hdr *lo; 970 971 if (!nfs4_sequence_done(task, &data->res.seq_res)) 972 return; 973 974 switch (task->tk_status) { 975 case 0: 976 return; 977 case -NFS4ERR_BADHANDLE: 978 case -ESTALE: 979 pnfs_destroy_layout(NFS_I(inode)); 980 break; 981 case -NFS4ERR_EXPIRED: 982 case -NFS4ERR_ADMIN_REVOKED: 983 case -NFS4ERR_DELEG_REVOKED: 984 case -NFS4ERR_STALE_STATEID: 985 case -NFS4ERR_BAD_STATEID: 986 spin_lock(&inode->i_lock); 987 lo = NFS_I(inode)->layout; 988 if (pnfs_layout_is_valid(lo) && 989 nfs4_stateid_match(&data->args.stateid, 990 &lo->plh_stateid)) { 991 LIST_HEAD(head); 992 993 /* 994 * Mark the bad layout state as invalid, then retry 995 * with the current stateid. 996 */ 997 pnfs_mark_layout_stateid_invalid(lo, &head); 998 spin_unlock(&inode->i_lock); 999 pnfs_free_lseg_list(&head); 1000 nfs_commit_inode(inode, 0); 1001 } else 1002 spin_unlock(&inode->i_lock); 1003 break; 1004 case -NFS4ERR_OLD_STATEID: 1005 spin_lock(&inode->i_lock); 1006 lo = NFS_I(inode)->layout; 1007 if (pnfs_layout_is_valid(lo) && 1008 nfs4_stateid_match_other(&data->args.stateid, 1009 &lo->plh_stateid)) { 1010 /* Do we need to delay before resending? */ 1011 if (!nfs4_stateid_is_newer(&lo->plh_stateid, 1012 &data->args.stateid)) 1013 rpc_delay(task, HZ); 1014 rpc_restart_call_prepare(task); 1015 } 1016 spin_unlock(&inode->i_lock); 1017 break; 1018 case -ENOTSUPP: 1019 case -EOPNOTSUPP: 1020 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS; 1021 } 1022 1023 trace_nfs4_layoutstats(inode, &data->args.stateid, task->tk_status); 1024 } 1025 1026 static void 1027 nfs42_layoutstat_release(void *calldata) 1028 { 1029 struct nfs42_layoutstat_data *data = calldata; 1030 struct nfs42_layoutstat_devinfo *devinfo = data->args.devinfo; 1031 int i; 1032 1033 for (i = 0; i < data->args.num_dev; i++) { 1034 if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free) 1035 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private); 1036 } 1037 1038 pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout); 1039 smp_mb__before_atomic(); 1040 clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags); 1041 smp_mb__after_atomic(); 1042 nfs_iput_and_deactive(data->inode); 1043 kfree(data->args.devinfo); 1044 kfree(data); 1045 } 1046 1047 static const struct rpc_call_ops nfs42_layoutstat_ops = { 1048 .rpc_call_prepare = nfs42_layoutstat_prepare, 1049 .rpc_call_done = nfs42_layoutstat_done, 1050 .rpc_release = nfs42_layoutstat_release, 1051 }; 1052 1053 int nfs42_proc_layoutstats_generic(struct nfs_server *server, 1054 struct nfs42_layoutstat_data *data) 1055 { 1056 struct rpc_message msg = { 1057 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS], 1058 .rpc_argp = &data->args, 1059 .rpc_resp = &data->res, 1060 }; 1061 struct rpc_task_setup task_setup = { 1062 .rpc_client = server->client, 1063 .rpc_message = &msg, 1064 .callback_ops = &nfs42_layoutstat_ops, 1065 .callback_data = data, 1066 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, 1067 }; 1068 struct rpc_task *task; 1069 1070 data->inode = nfs_igrab_and_active(data->args.inode); 1071 if (!data->inode) { 1072 nfs42_layoutstat_release(data); 1073 return -EAGAIN; 1074 } 1075 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 1076 task = rpc_run_task(&task_setup); 1077 if (IS_ERR(task)) 1078 return PTR_ERR(task); 1079 rpc_put_task(task); 1080 return 0; 1081 } 1082 1083 static struct nfs42_layouterror_data * 1084 nfs42_alloc_layouterror_data(struct pnfs_layout_segment *lseg, gfp_t gfp_flags) 1085 { 1086 struct nfs42_layouterror_data *data; 1087 struct inode *inode = lseg->pls_layout->plh_inode; 1088 1089 data = kzalloc(sizeof(*data), gfp_flags); 1090 if (data) { 1091 data->args.inode = data->inode = nfs_igrab_and_active(inode); 1092 if (data->inode) { 1093 data->lseg = pnfs_get_lseg(lseg); 1094 if (data->lseg) 1095 return data; 1096 nfs_iput_and_deactive(data->inode); 1097 } 1098 kfree(data); 1099 } 1100 return NULL; 1101 } 1102 1103 static void 1104 nfs42_free_layouterror_data(struct nfs42_layouterror_data *data) 1105 { 1106 pnfs_put_lseg(data->lseg); 1107 nfs_iput_and_deactive(data->inode); 1108 kfree(data); 1109 } 1110 1111 static void 1112 nfs42_layouterror_prepare(struct rpc_task *task, void *calldata) 1113 { 1114 struct nfs42_layouterror_data *data = calldata; 1115 struct inode *inode = data->inode; 1116 struct nfs_server *server = NFS_SERVER(inode); 1117 struct pnfs_layout_hdr *lo = data->lseg->pls_layout; 1118 unsigned i; 1119 1120 spin_lock(&inode->i_lock); 1121 if (!pnfs_layout_is_valid(lo)) { 1122 spin_unlock(&inode->i_lock); 1123 rpc_exit(task, 0); 1124 return; 1125 } 1126 for (i = 0; i < data->args.num_errors; i++) 1127 nfs4_stateid_copy(&data->args.errors[i].stateid, 1128 &lo->plh_stateid); 1129 spin_unlock(&inode->i_lock); 1130 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 1131 &data->res.seq_res, task); 1132 } 1133 1134 static void 1135 nfs42_layouterror_done(struct rpc_task *task, void *calldata) 1136 { 1137 struct nfs42_layouterror_data *data = calldata; 1138 struct inode *inode = data->inode; 1139 struct pnfs_layout_hdr *lo = data->lseg->pls_layout; 1140 1141 if (!nfs4_sequence_done(task, &data->res.seq_res)) 1142 return; 1143 1144 switch (task->tk_status) { 1145 case 0: 1146 return; 1147 case -NFS4ERR_BADHANDLE: 1148 case -ESTALE: 1149 pnfs_destroy_layout(NFS_I(inode)); 1150 break; 1151 case -NFS4ERR_EXPIRED: 1152 case -NFS4ERR_ADMIN_REVOKED: 1153 case -NFS4ERR_DELEG_REVOKED: 1154 case -NFS4ERR_STALE_STATEID: 1155 case -NFS4ERR_BAD_STATEID: 1156 spin_lock(&inode->i_lock); 1157 if (pnfs_layout_is_valid(lo) && 1158 nfs4_stateid_match(&data->args.errors[0].stateid, 1159 &lo->plh_stateid)) { 1160 LIST_HEAD(head); 1161 1162 /* 1163 * Mark the bad layout state as invalid, then retry 1164 * with the current stateid. 1165 */ 1166 pnfs_mark_layout_stateid_invalid(lo, &head); 1167 spin_unlock(&inode->i_lock); 1168 pnfs_free_lseg_list(&head); 1169 nfs_commit_inode(inode, 0); 1170 } else 1171 spin_unlock(&inode->i_lock); 1172 break; 1173 case -NFS4ERR_OLD_STATEID: 1174 spin_lock(&inode->i_lock); 1175 if (pnfs_layout_is_valid(lo) && 1176 nfs4_stateid_match_other(&data->args.errors[0].stateid, 1177 &lo->plh_stateid)) { 1178 /* Do we need to delay before resending? */ 1179 if (!nfs4_stateid_is_newer(&lo->plh_stateid, 1180 &data->args.errors[0].stateid)) 1181 rpc_delay(task, HZ); 1182 rpc_restart_call_prepare(task); 1183 } 1184 spin_unlock(&inode->i_lock); 1185 break; 1186 case -ENOTSUPP: 1187 case -EOPNOTSUPP: 1188 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR; 1189 } 1190 1191 trace_nfs4_layouterror(inode, &data->args.errors[0].stateid, 1192 task->tk_status); 1193 } 1194 1195 static void 1196 nfs42_layouterror_release(void *calldata) 1197 { 1198 struct nfs42_layouterror_data *data = calldata; 1199 1200 nfs42_free_layouterror_data(data); 1201 } 1202 1203 static const struct rpc_call_ops nfs42_layouterror_ops = { 1204 .rpc_call_prepare = nfs42_layouterror_prepare, 1205 .rpc_call_done = nfs42_layouterror_done, 1206 .rpc_release = nfs42_layouterror_release, 1207 }; 1208 1209 int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg, 1210 const struct nfs42_layout_error *errors, size_t n) 1211 { 1212 struct inode *inode = lseg->pls_layout->plh_inode; 1213 struct nfs42_layouterror_data *data; 1214 struct rpc_task *task; 1215 struct rpc_message msg = { 1216 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTERROR], 1217 }; 1218 struct rpc_task_setup task_setup = { 1219 .rpc_message = &msg, 1220 .callback_ops = &nfs42_layouterror_ops, 1221 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, 1222 }; 1223 unsigned int i; 1224 1225 if (!nfs_server_capable(inode, NFS_CAP_LAYOUTERROR)) 1226 return -EOPNOTSUPP; 1227 if (n > NFS42_LAYOUTERROR_MAX) 1228 return -EINVAL; 1229 data = nfs42_alloc_layouterror_data(lseg, nfs_io_gfp_mask()); 1230 if (!data) 1231 return -ENOMEM; 1232 for (i = 0; i < n; i++) { 1233 data->args.errors[i] = errors[i]; 1234 data->args.num_errors++; 1235 data->res.num_errors++; 1236 } 1237 msg.rpc_argp = &data->args; 1238 msg.rpc_resp = &data->res; 1239 task_setup.callback_data = data; 1240 task_setup.rpc_client = NFS_SERVER(inode)->client; 1241 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 1242 task = rpc_run_task(&task_setup); 1243 if (IS_ERR(task)) 1244 return PTR_ERR(task); 1245 rpc_put_task(task); 1246 return 0; 1247 } 1248 EXPORT_SYMBOL_GPL(nfs42_proc_layouterror); 1249 1250 static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f, 1251 struct file *dst_f, struct nfs_lock_context *src_lock, 1252 struct nfs_lock_context *dst_lock, loff_t src_offset, 1253 loff_t dst_offset, loff_t count) 1254 { 1255 struct inode *src_inode = file_inode(src_f); 1256 struct inode *dst_inode = file_inode(dst_f); 1257 struct nfs_server *server = NFS_SERVER(dst_inode); 1258 __u32 dst_bitmask[NFS_BITMASK_SZ]; 1259 struct nfs42_clone_args args = { 1260 .src_fh = NFS_FH(src_inode), 1261 .dst_fh = NFS_FH(dst_inode), 1262 .src_offset = src_offset, 1263 .dst_offset = dst_offset, 1264 .count = count, 1265 .dst_bitmask = dst_bitmask, 1266 }; 1267 struct nfs42_clone_res res = { 1268 .server = server, 1269 }; 1270 loff_t oldsize_dst = i_size_read(dst_inode); 1271 int status; 1272 1273 msg->rpc_argp = &args; 1274 msg->rpc_resp = &res; 1275 1276 status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context, 1277 src_lock, FMODE_READ); 1278 if (status) { 1279 if (status == -EAGAIN) 1280 status = -NFS4ERR_BAD_STATEID; 1281 return status; 1282 } 1283 status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context, 1284 dst_lock, FMODE_WRITE); 1285 if (status) { 1286 if (status == -EAGAIN) 1287 status = -NFS4ERR_BAD_STATEID; 1288 return status; 1289 } 1290 1291 res.dst_fattr = nfs_alloc_fattr(); 1292 if (!res.dst_fattr) 1293 return -ENOMEM; 1294 1295 nfs4_bitmask_set(dst_bitmask, server->cache_consistency_bitmask, 1296 dst_inode, NFS_INO_INVALID_BLOCKS); 1297 1298 status = nfs4_call_sync(server->client, server, msg, 1299 &args.seq_args, &res.seq_res, 0); 1300 trace_nfs4_clone(src_inode, dst_inode, &args, status); 1301 if (status == 0) { 1302 /* a zero-length count means clone to EOF in src */ 1303 if (count == 0 && res.dst_fattr->valid & NFS_ATTR_FATTR_SIZE) 1304 count = nfs_size_to_loff_t(res.dst_fattr->size) - dst_offset; 1305 nfs42_copy_dest_done(dst_f, dst_offset, count, oldsize_dst); 1306 status = nfs_post_op_update_inode(dst_inode, res.dst_fattr); 1307 } 1308 1309 kfree(res.dst_fattr); 1310 return status; 1311 } 1312 1313 int nfs42_proc_clone(struct file *src_f, struct file *dst_f, 1314 loff_t src_offset, loff_t dst_offset, loff_t count) 1315 { 1316 struct rpc_message msg = { 1317 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE], 1318 }; 1319 struct inode *inode = file_inode(src_f); 1320 struct nfs_server *server = NFS_SERVER(file_inode(src_f)); 1321 struct nfs_lock_context *src_lock; 1322 struct nfs_lock_context *dst_lock; 1323 struct nfs4_exception src_exception = { }; 1324 struct nfs4_exception dst_exception = { }; 1325 int err, err2; 1326 1327 if (!nfs_server_capable(inode, NFS_CAP_CLONE)) 1328 return -EOPNOTSUPP; 1329 1330 src_lock = nfs_get_lock_context(nfs_file_open_context(src_f)); 1331 if (IS_ERR(src_lock)) 1332 return PTR_ERR(src_lock); 1333 1334 src_exception.inode = file_inode(src_f); 1335 src_exception.state = src_lock->open_context->state; 1336 1337 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f)); 1338 if (IS_ERR(dst_lock)) { 1339 err = PTR_ERR(dst_lock); 1340 goto out_put_src_lock; 1341 } 1342 1343 dst_exception.inode = file_inode(dst_f); 1344 dst_exception.state = dst_lock->open_context->state; 1345 1346 do { 1347 err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock, 1348 src_offset, dst_offset, count); 1349 if (err == -ENOTSUPP || err == -EOPNOTSUPP) { 1350 NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE; 1351 err = -EOPNOTSUPP; 1352 break; 1353 } 1354 1355 err2 = nfs4_handle_exception(server, err, &src_exception); 1356 err = nfs4_handle_exception(server, err, &dst_exception); 1357 if (!err) 1358 err = err2; 1359 } while (src_exception.retry || dst_exception.retry); 1360 1361 nfs_put_lock_context(dst_lock); 1362 out_put_src_lock: 1363 nfs_put_lock_context(src_lock); 1364 return err; 1365 } 1366 1367 #define NFS4XATTR_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 1368 1369 static int _nfs42_proc_removexattr(struct inode *inode, const char *name) 1370 { 1371 struct nfs_server *server = NFS_SERVER(inode); 1372 struct nfs42_removexattrargs args = { 1373 .fh = NFS_FH(inode), 1374 .xattr_name = name, 1375 }; 1376 struct nfs42_removexattrres res; 1377 struct rpc_message msg = { 1378 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVEXATTR], 1379 .rpc_argp = &args, 1380 .rpc_resp = &res, 1381 }; 1382 int ret; 1383 unsigned long timestamp = jiffies; 1384 1385 ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 1386 &res.seq_res, 1); 1387 trace_nfs4_removexattr(inode, name, ret); 1388 if (!ret) 1389 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); 1390 1391 return ret; 1392 } 1393 1394 static int _nfs42_proc_setxattr(struct inode *inode, const char *name, 1395 const void *buf, size_t buflen, int flags) 1396 { 1397 struct nfs_server *server = NFS_SERVER(inode); 1398 __u32 bitmask[NFS_BITMASK_SZ]; 1399 struct page *pages[NFS4XATTR_MAXPAGES]; 1400 struct nfs42_setxattrargs arg = { 1401 .fh = NFS_FH(inode), 1402 .bitmask = bitmask, 1403 .xattr_pages = pages, 1404 .xattr_len = buflen, 1405 .xattr_name = name, 1406 .xattr_flags = flags, 1407 }; 1408 struct nfs42_setxattrres res = { 1409 .server = server, 1410 }; 1411 struct rpc_message msg = { 1412 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETXATTR], 1413 .rpc_argp = &arg, 1414 .rpc_resp = &res, 1415 }; 1416 int ret, np; 1417 unsigned long timestamp = jiffies; 1418 1419 if (buflen > server->sxasize) 1420 return -ERANGE; 1421 1422 res.fattr = nfs_alloc_fattr(); 1423 if (!res.fattr) 1424 return -ENOMEM; 1425 1426 if (buflen > 0) { 1427 np = nfs4_buf_to_pages_noslab(buf, buflen, arg.xattr_pages); 1428 if (np < 0) { 1429 ret = np; 1430 goto out; 1431 } 1432 } else 1433 np = 0; 1434 1435 nfs4_bitmask_set(bitmask, server->cache_consistency_bitmask, 1436 inode, NFS_INO_INVALID_CHANGE); 1437 1438 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1439 &res.seq_res, 1); 1440 trace_nfs4_setxattr(inode, name, ret); 1441 1442 for (; np > 0; np--) 1443 put_page(pages[np - 1]); 1444 1445 if (!ret) { 1446 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); 1447 ret = nfs_post_op_update_inode(inode, res.fattr); 1448 } 1449 1450 out: 1451 kfree(res.fattr); 1452 return ret; 1453 } 1454 1455 static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name, 1456 void *buf, size_t buflen, struct page **pages, 1457 size_t plen) 1458 { 1459 struct nfs_server *server = NFS_SERVER(inode); 1460 struct nfs42_getxattrargs arg = { 1461 .fh = NFS_FH(inode), 1462 .xattr_name = name, 1463 }; 1464 struct nfs42_getxattrres res; 1465 struct rpc_message msg = { 1466 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETXATTR], 1467 .rpc_argp = &arg, 1468 .rpc_resp = &res, 1469 }; 1470 ssize_t ret; 1471 1472 arg.xattr_len = plen; 1473 arg.xattr_pages = pages; 1474 1475 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1476 &res.seq_res, 0); 1477 trace_nfs4_getxattr(inode, name, ret); 1478 if (ret < 0) 1479 return ret; 1480 1481 /* 1482 * Normally, the caching is done one layer up, but for successful 1483 * RPCS, always cache the result here, even if the caller was 1484 * just querying the length, or if the reply was too big for 1485 * the caller. This avoids a second RPC in the case of the 1486 * common query-alloc-retrieve cycle for xattrs. 1487 * 1488 * Note that xattr_len is always capped to XATTR_SIZE_MAX. 1489 */ 1490 1491 nfs4_xattr_cache_add(inode, name, NULL, pages, res.xattr_len); 1492 1493 if (buflen) { 1494 if (res.xattr_len > buflen) 1495 return -ERANGE; 1496 _copy_from_pages(buf, pages, 0, res.xattr_len); 1497 } 1498 1499 return res.xattr_len; 1500 } 1501 1502 static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf, 1503 size_t buflen, u64 *cookiep, bool *eofp) 1504 { 1505 struct nfs_server *server = NFS_SERVER(inode); 1506 struct page **pages; 1507 struct nfs42_listxattrsargs arg = { 1508 .fh = NFS_FH(inode), 1509 .cookie = *cookiep, 1510 }; 1511 struct nfs42_listxattrsres res = { 1512 .eof = false, 1513 .xattr_buf = buf, 1514 .xattr_len = buflen, 1515 }; 1516 struct rpc_message msg = { 1517 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LISTXATTRS], 1518 .rpc_argp = &arg, 1519 .rpc_resp = &res, 1520 }; 1521 u32 xdrlen; 1522 int ret, np, i; 1523 1524 1525 ret = -ENOMEM; 1526 res.scratch = folio_alloc(GFP_KERNEL, 0); 1527 if (!res.scratch) 1528 goto out; 1529 1530 xdrlen = nfs42_listxattr_xdrsize(buflen); 1531 if (xdrlen > server->lxasize) 1532 xdrlen = server->lxasize; 1533 np = xdrlen / PAGE_SIZE + 1; 1534 1535 pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL); 1536 if (!pages) 1537 goto out_free_scratch; 1538 for (i = 0; i < np; i++) { 1539 pages[i] = alloc_page(GFP_KERNEL); 1540 if (!pages[i]) 1541 goto out_free_pages; 1542 } 1543 1544 arg.xattr_pages = pages; 1545 arg.count = xdrlen; 1546 1547 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1548 &res.seq_res, 0); 1549 trace_nfs4_listxattr(inode, ret); 1550 1551 if (ret >= 0) { 1552 ret = res.copied; 1553 *cookiep = res.cookie; 1554 *eofp = res.eof; 1555 } 1556 1557 out_free_pages: 1558 while (--np >= 0) { 1559 if (pages[np]) 1560 __free_page(pages[np]); 1561 } 1562 kfree(pages); 1563 out_free_scratch: 1564 folio_put(res.scratch); 1565 out: 1566 return ret; 1567 1568 } 1569 1570 ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name, 1571 void *buf, size_t buflen) 1572 { 1573 struct nfs4_exception exception = { }; 1574 ssize_t err, np, i; 1575 struct page **pages; 1576 1577 np = nfs_page_array_len(0, buflen ?: XATTR_SIZE_MAX); 1578 pages = kmalloc_array(np, sizeof(*pages), GFP_KERNEL); 1579 if (!pages) 1580 return -ENOMEM; 1581 1582 for (i = 0; i < np; i++) { 1583 pages[i] = alloc_page(GFP_KERNEL); 1584 if (!pages[i]) { 1585 err = -ENOMEM; 1586 goto out; 1587 } 1588 } 1589 1590 /* 1591 * The GETXATTR op has no length field in the call, and the 1592 * xattr data is at the end of the reply. 1593 * 1594 * There is no downside in using the page-aligned length. It will 1595 * allow receiving and caching xattrs that are too large for the 1596 * caller but still fit in the page-rounded value. 1597 */ 1598 do { 1599 err = _nfs42_proc_getxattr(inode, name, buf, buflen, 1600 pages, np * PAGE_SIZE); 1601 if (err >= 0) 1602 break; 1603 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1604 &exception); 1605 } while (exception.retry); 1606 1607 out: 1608 while (--i >= 0) 1609 __free_page(pages[i]); 1610 kfree(pages); 1611 1612 return err; 1613 } 1614 1615 int nfs42_proc_setxattr(struct inode *inode, const char *name, 1616 const void *buf, size_t buflen, int flags) 1617 { 1618 struct nfs4_exception exception = { }; 1619 int err; 1620 1621 do { 1622 err = _nfs42_proc_setxattr(inode, name, buf, buflen, flags); 1623 if (!err) 1624 break; 1625 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1626 &exception); 1627 } while (exception.retry); 1628 1629 return err; 1630 } 1631 1632 ssize_t nfs42_proc_listxattrs(struct inode *inode, void *buf, 1633 size_t buflen, u64 *cookiep, bool *eofp) 1634 { 1635 struct nfs4_exception exception = { }; 1636 ssize_t err; 1637 1638 do { 1639 err = _nfs42_proc_listxattrs(inode, buf, buflen, 1640 cookiep, eofp); 1641 if (err >= 0) 1642 break; 1643 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1644 &exception); 1645 } while (exception.retry); 1646 1647 return err; 1648 } 1649 1650 int nfs42_proc_removexattr(struct inode *inode, const char *name) 1651 { 1652 struct nfs4_exception exception = { }; 1653 int err; 1654 1655 do { 1656 err = _nfs42_proc_removexattr(inode, name); 1657 if (!err) 1658 break; 1659 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1660 &exception); 1661 } while (exception.retry); 1662 1663 return err; 1664 } 1665