1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/clnt.c 4 * 5 * This file contains the high-level RPC interface. 6 * It is modeled as a finite state machine to support both synchronous 7 * and asynchronous requests. 8 * 9 * - RPC header generation and argument serialization. 10 * - Credential refresh. 11 * - TCP connect handling. 12 * - Retry of operation when it is suspected the operation failed because 13 * of uid squashing on the server, or when the credentials were stale 14 * and need to be refreshed, or when a packet was damaged in transit. 15 * This may be have to be moved to the VFS layer. 16 * 17 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> 18 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> 19 */ 20 21 22 #include <linux/module.h> 23 #include <linux/types.h> 24 #include <linux/kallsyms.h> 25 #include <linux/mm.h> 26 #include <linux/namei.h> 27 #include <linux/mount.h> 28 #include <linux/slab.h> 29 #include <linux/rcupdate.h> 30 #include <linux/utsname.h> 31 #include <linux/workqueue.h> 32 #include <linux/in.h> 33 #include <linux/in6.h> 34 #include <linux/un.h> 35 36 #include <linux/sunrpc/clnt.h> 37 #include <linux/sunrpc/addr.h> 38 #include <linux/sunrpc/rpc_pipe_fs.h> 39 #include <linux/sunrpc/metrics.h> 40 #include <linux/sunrpc/bc_xprt.h> 41 #include <trace/events/sunrpc.h> 42 43 #include "sunrpc.h" 44 #include "sysfs.h" 45 #include "netns.h" 46 47 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 48 # define RPCDBG_FACILITY RPCDBG_CALL 49 #endif 50 51 /* 52 * All RPC clients are linked into this list 53 */ 54 55 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); 56 57 58 static void call_start(struct rpc_task *task); 59 static void call_reserve(struct rpc_task *task); 60 static void call_reserveresult(struct rpc_task *task); 61 static void call_allocate(struct rpc_task *task); 62 static void call_encode(struct rpc_task *task); 63 static void call_decode(struct rpc_task *task); 64 static void call_bind(struct rpc_task *task); 65 static void call_bind_status(struct rpc_task *task); 66 static void call_transmit(struct rpc_task *task); 67 static void call_status(struct rpc_task *task); 68 static void call_transmit_status(struct rpc_task *task); 69 static void call_refresh(struct rpc_task *task); 70 static void call_refreshresult(struct rpc_task *task); 71 static void call_connect(struct rpc_task *task); 72 static void call_connect_status(struct rpc_task *task); 73 74 static int rpc_encode_header(struct rpc_task *task, 75 struct xdr_stream *xdr); 76 static int rpc_decode_header(struct rpc_task *task, 77 struct xdr_stream *xdr); 78 static int rpc_ping(struct rpc_clnt *clnt); 79 static int rpc_ping_noreply(struct rpc_clnt *clnt); 80 static void rpc_check_timeout(struct rpc_task *task); 81 82 static void rpc_register_client(struct rpc_clnt *clnt) 83 { 84 struct net *net = rpc_net_ns(clnt); 85 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 86 87 spin_lock(&sn->rpc_client_lock); 88 list_add(&clnt->cl_clients, &sn->all_clients); 89 spin_unlock(&sn->rpc_client_lock); 90 } 91 92 static void rpc_unregister_client(struct rpc_clnt *clnt) 93 { 94 struct net *net = rpc_net_ns(clnt); 95 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 96 97 spin_lock(&sn->rpc_client_lock); 98 list_del(&clnt->cl_clients); 99 spin_unlock(&sn->rpc_client_lock); 100 } 101 102 static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) 103 { 104 rpc_remove_client_dir(clnt); 105 } 106 107 static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) 108 { 109 struct net *net = rpc_net_ns(clnt); 110 struct super_block *pipefs_sb; 111 112 pipefs_sb = rpc_get_sb_net(net); 113 if (pipefs_sb) { 114 if (pipefs_sb == clnt->pipefs_sb) 115 __rpc_clnt_remove_pipedir(clnt); 116 rpc_put_sb_net(net); 117 } 118 } 119 120 static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb, 121 struct rpc_clnt *clnt) 122 { 123 static uint32_t clntid; 124 const char *dir_name = clnt->cl_program->pipe_dir_name; 125 char name[15]; 126 struct dentry *dir, *dentry; 127 128 dir = rpc_d_lookup_sb(sb, dir_name); 129 if (dir == NULL) { 130 pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name); 131 return dir; 132 } 133 for (;;) { 134 snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++); 135 name[sizeof(name) - 1] = '\0'; 136 dentry = rpc_create_client_dir(dir, name, clnt); 137 if (!IS_ERR(dentry)) 138 break; 139 if (dentry == ERR_PTR(-EEXIST)) 140 continue; 141 printk(KERN_INFO "RPC: Couldn't create pipefs entry" 142 " %s/%s, error %ld\n", 143 dir_name, name, PTR_ERR(dentry)); 144 break; 145 } 146 dput(dir); 147 return dentry; 148 } 149 150 static int 151 rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt) 152 { 153 struct dentry *dentry; 154 155 clnt->pipefs_sb = pipefs_sb; 156 157 if (clnt->cl_program->pipe_dir_name != NULL) { 158 dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt); 159 if (IS_ERR(dentry)) 160 return PTR_ERR(dentry); 161 } 162 return 0; 163 } 164 165 static int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event) 166 { 167 if (clnt->cl_program->pipe_dir_name == NULL) 168 return 1; 169 170 switch (event) { 171 case RPC_PIPEFS_MOUNT: 172 if (clnt->cl_pipedir_objects.pdh_dentry != NULL) 173 return 1; 174 if (refcount_read(&clnt->cl_count) == 0) 175 return 1; 176 break; 177 case RPC_PIPEFS_UMOUNT: 178 if (clnt->cl_pipedir_objects.pdh_dentry == NULL) 179 return 1; 180 break; 181 } 182 return 0; 183 } 184 185 static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event, 186 struct super_block *sb) 187 { 188 struct dentry *dentry; 189 190 switch (event) { 191 case RPC_PIPEFS_MOUNT: 192 dentry = rpc_setup_pipedir_sb(sb, clnt); 193 if (!dentry) 194 return -ENOENT; 195 if (IS_ERR(dentry)) 196 return PTR_ERR(dentry); 197 break; 198 case RPC_PIPEFS_UMOUNT: 199 __rpc_clnt_remove_pipedir(clnt); 200 break; 201 default: 202 printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event); 203 return -ENOTSUPP; 204 } 205 return 0; 206 } 207 208 static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, 209 struct super_block *sb) 210 { 211 int error = 0; 212 213 for (;; clnt = clnt->cl_parent) { 214 if (!rpc_clnt_skip_event(clnt, event)) 215 error = __rpc_clnt_handle_event(clnt, event, sb); 216 if (error || clnt == clnt->cl_parent) 217 break; 218 } 219 return error; 220 } 221 222 static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event) 223 { 224 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 225 struct rpc_clnt *clnt; 226 227 spin_lock(&sn->rpc_client_lock); 228 list_for_each_entry(clnt, &sn->all_clients, cl_clients) { 229 if (rpc_clnt_skip_event(clnt, event)) 230 continue; 231 spin_unlock(&sn->rpc_client_lock); 232 return clnt; 233 } 234 spin_unlock(&sn->rpc_client_lock); 235 return NULL; 236 } 237 238 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, 239 void *ptr) 240 { 241 struct super_block *sb = ptr; 242 struct rpc_clnt *clnt; 243 int error = 0; 244 245 while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) { 246 error = __rpc_pipefs_event(clnt, event, sb); 247 if (error) 248 break; 249 } 250 return error; 251 } 252 253 static struct notifier_block rpc_clients_block = { 254 .notifier_call = rpc_pipefs_event, 255 .priority = SUNRPC_PIPEFS_RPC_PRIO, 256 }; 257 258 int rpc_clients_notifier_register(void) 259 { 260 return rpc_pipefs_notifier_register(&rpc_clients_block); 261 } 262 263 void rpc_clients_notifier_unregister(void) 264 { 265 return rpc_pipefs_notifier_unregister(&rpc_clients_block); 266 } 267 268 static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt, 269 struct rpc_xprt *xprt, 270 const struct rpc_timeout *timeout) 271 { 272 struct rpc_xprt *old; 273 274 spin_lock(&clnt->cl_lock); 275 old = rcu_dereference_protected(clnt->cl_xprt, 276 lockdep_is_held(&clnt->cl_lock)); 277 278 if (!xprt_bound(xprt)) 279 clnt->cl_autobind = 1; 280 281 clnt->cl_timeout = timeout; 282 rcu_assign_pointer(clnt->cl_xprt, xprt); 283 spin_unlock(&clnt->cl_lock); 284 285 return old; 286 } 287 288 static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename) 289 { 290 ssize_t copied; 291 292 copied = strscpy(clnt->cl_nodename, 293 nodename, sizeof(clnt->cl_nodename)); 294 295 clnt->cl_nodelen = copied < 0 296 ? sizeof(clnt->cl_nodename) - 1 297 : copied; 298 } 299 300 static int rpc_client_register(struct rpc_clnt *clnt, 301 rpc_authflavor_t pseudoflavor, 302 const char *client_name) 303 { 304 struct rpc_auth_create_args auth_args = { 305 .pseudoflavor = pseudoflavor, 306 .target_name = client_name, 307 }; 308 struct rpc_auth *auth; 309 struct net *net = rpc_net_ns(clnt); 310 struct super_block *pipefs_sb; 311 int err; 312 313 rpc_clnt_debugfs_register(clnt); 314 315 pipefs_sb = rpc_get_sb_net(net); 316 if (pipefs_sb) { 317 err = rpc_setup_pipedir(pipefs_sb, clnt); 318 if (err) 319 goto out; 320 } 321 322 rpc_register_client(clnt); 323 if (pipefs_sb) 324 rpc_put_sb_net(net); 325 326 auth = rpcauth_create(&auth_args, clnt); 327 if (IS_ERR(auth)) { 328 dprintk("RPC: Couldn't create auth handle (flavor %u)\n", 329 pseudoflavor); 330 err = PTR_ERR(auth); 331 goto err_auth; 332 } 333 return 0; 334 err_auth: 335 pipefs_sb = rpc_get_sb_net(net); 336 rpc_unregister_client(clnt); 337 __rpc_clnt_remove_pipedir(clnt); 338 out: 339 if (pipefs_sb) 340 rpc_put_sb_net(net); 341 rpc_sysfs_client_destroy(clnt); 342 rpc_clnt_debugfs_unregister(clnt); 343 return err; 344 } 345 346 static DEFINE_IDA(rpc_clids); 347 348 void rpc_cleanup_clids(void) 349 { 350 ida_destroy(&rpc_clids); 351 } 352 353 static int rpc_alloc_clid(struct rpc_clnt *clnt) 354 { 355 int clid; 356 357 clid = ida_alloc(&rpc_clids, GFP_KERNEL); 358 if (clid < 0) 359 return clid; 360 clnt->cl_clid = clid; 361 return 0; 362 } 363 364 static void rpc_free_clid(struct rpc_clnt *clnt) 365 { 366 ida_free(&rpc_clids, clnt->cl_clid); 367 } 368 369 static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, 370 struct rpc_xprt_switch *xps, 371 struct rpc_xprt *xprt, 372 struct rpc_clnt *parent) 373 { 374 const struct rpc_program *program = args->program; 375 const struct rpc_version *version; 376 struct rpc_clnt *clnt = NULL; 377 const struct rpc_timeout *timeout; 378 const char *nodename = args->nodename; 379 int err; 380 381 err = rpciod_up(); 382 if (err) 383 goto out_no_rpciod; 384 385 err = -EINVAL; 386 if (args->version >= program->nrvers) 387 goto out_err; 388 version = program->version[args->version]; 389 if (version == NULL) 390 goto out_err; 391 392 err = -ENOMEM; 393 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); 394 if (!clnt) 395 goto out_err; 396 clnt->cl_parent = parent ? : clnt; 397 clnt->cl_xprtsec = args->xprtsec; 398 399 err = rpc_alloc_clid(clnt); 400 if (err) 401 goto out_no_clid; 402 403 clnt->cl_cred = get_cred(args->cred); 404 clnt->cl_procinfo = version->procs; 405 clnt->cl_maxproc = version->nrprocs; 406 clnt->cl_prog = args->prognumber ? : program->number; 407 clnt->cl_vers = version->number; 408 clnt->cl_stats = program->stats; 409 clnt->cl_metrics = rpc_alloc_iostats(clnt); 410 rpc_init_pipe_dir_head(&clnt->cl_pipedir_objects); 411 err = -ENOMEM; 412 if (clnt->cl_metrics == NULL) 413 goto out_no_stats; 414 clnt->cl_program = program; 415 INIT_LIST_HEAD(&clnt->cl_tasks); 416 spin_lock_init(&clnt->cl_lock); 417 418 timeout = xprt->timeout; 419 if (args->timeout != NULL) { 420 memcpy(&clnt->cl_timeout_default, args->timeout, 421 sizeof(clnt->cl_timeout_default)); 422 timeout = &clnt->cl_timeout_default; 423 } 424 425 rpc_clnt_set_transport(clnt, xprt, timeout); 426 xprt->main = true; 427 xprt_iter_init(&clnt->cl_xpi, xps); 428 xprt_switch_put(xps); 429 430 clnt->cl_rtt = &clnt->cl_rtt_default; 431 rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval); 432 433 refcount_set(&clnt->cl_count, 1); 434 435 if (nodename == NULL) 436 nodename = utsname()->nodename; 437 /* save the nodename */ 438 rpc_clnt_set_nodename(clnt, nodename); 439 440 rpc_sysfs_client_setup(clnt, xps, rpc_net_ns(clnt)); 441 err = rpc_client_register(clnt, args->authflavor, args->client_name); 442 if (err) 443 goto out_no_path; 444 if (parent) 445 refcount_inc(&parent->cl_count); 446 447 trace_rpc_clnt_new(clnt, xprt, args); 448 return clnt; 449 450 out_no_path: 451 rpc_free_iostats(clnt->cl_metrics); 452 out_no_stats: 453 put_cred(clnt->cl_cred); 454 rpc_free_clid(clnt); 455 out_no_clid: 456 kfree(clnt); 457 out_err: 458 rpciod_down(); 459 out_no_rpciod: 460 xprt_switch_put(xps); 461 xprt_put(xprt); 462 trace_rpc_clnt_new_err(program->name, args->servername, err); 463 return ERR_PTR(err); 464 } 465 466 static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, 467 struct rpc_xprt *xprt) 468 { 469 struct rpc_clnt *clnt = NULL; 470 struct rpc_xprt_switch *xps; 471 472 if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) { 473 WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC)); 474 xps = args->bc_xprt->xpt_bc_xps; 475 xprt_switch_get(xps); 476 } else { 477 xps = xprt_switch_alloc(xprt, GFP_KERNEL); 478 if (xps == NULL) { 479 xprt_put(xprt); 480 return ERR_PTR(-ENOMEM); 481 } 482 if (xprt->bc_xprt) { 483 xprt_switch_get(xps); 484 xprt->bc_xprt->xpt_bc_xps = xps; 485 } 486 } 487 clnt = rpc_new_client(args, xps, xprt, NULL); 488 if (IS_ERR(clnt)) 489 return clnt; 490 491 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { 492 int err = rpc_ping(clnt); 493 if (err != 0) { 494 rpc_shutdown_client(clnt); 495 return ERR_PTR(err); 496 } 497 } else if (args->flags & RPC_CLNT_CREATE_CONNECTED) { 498 int err = rpc_ping_noreply(clnt); 499 if (err != 0) { 500 rpc_shutdown_client(clnt); 501 return ERR_PTR(err); 502 } 503 } 504 505 clnt->cl_softrtry = 1; 506 if (args->flags & (RPC_CLNT_CREATE_HARDRTRY|RPC_CLNT_CREATE_SOFTERR)) { 507 clnt->cl_softrtry = 0; 508 if (args->flags & RPC_CLNT_CREATE_SOFTERR) 509 clnt->cl_softerr = 1; 510 } 511 512 if (args->flags & RPC_CLNT_CREATE_AUTOBIND) 513 clnt->cl_autobind = 1; 514 if (args->flags & RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT) 515 clnt->cl_noretranstimeo = 1; 516 if (args->flags & RPC_CLNT_CREATE_DISCRTRY) 517 clnt->cl_discrtry = 1; 518 if (!(args->flags & RPC_CLNT_CREATE_QUIET)) 519 clnt->cl_chatty = 1; 520 521 return clnt; 522 } 523 524 /** 525 * rpc_create - create an RPC client and transport with one call 526 * @args: rpc_clnt create argument structure 527 * 528 * Creates and initializes an RPC transport and an RPC client. 529 * 530 * It can ping the server in order to determine if it is up, and to see if 531 * it supports this program and version. RPC_CLNT_CREATE_NOPING disables 532 * this behavior so asynchronous tasks can also use rpc_create. 533 */ 534 struct rpc_clnt *rpc_create(struct rpc_create_args *args) 535 { 536 struct rpc_xprt *xprt; 537 struct xprt_create xprtargs = { 538 .net = args->net, 539 .ident = args->protocol, 540 .srcaddr = args->saddress, 541 .dstaddr = args->address, 542 .addrlen = args->addrsize, 543 .servername = args->servername, 544 .bc_xprt = args->bc_xprt, 545 .xprtsec = args->xprtsec, 546 .connect_timeout = args->connect_timeout, 547 .reconnect_timeout = args->reconnect_timeout, 548 }; 549 char servername[48]; 550 struct rpc_clnt *clnt; 551 int i; 552 553 if (args->bc_xprt) { 554 WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC)); 555 xprt = args->bc_xprt->xpt_bc_xprt; 556 if (xprt) { 557 xprt_get(xprt); 558 return rpc_create_xprt(args, xprt); 559 } 560 } 561 562 if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS) 563 xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS; 564 if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT) 565 xprtargs.flags |= XPRT_CREATE_NO_IDLE_TIMEOUT; 566 /* 567 * If the caller chooses not to specify a hostname, whip 568 * up a string representation of the passed-in address. 569 */ 570 if (xprtargs.servername == NULL) { 571 struct sockaddr_un *sun = 572 (struct sockaddr_un *)args->address; 573 struct sockaddr_in *sin = 574 (struct sockaddr_in *)args->address; 575 struct sockaddr_in6 *sin6 = 576 (struct sockaddr_in6 *)args->address; 577 578 servername[0] = '\0'; 579 switch (args->address->sa_family) { 580 case AF_LOCAL: 581 if (sun->sun_path[0]) 582 snprintf(servername, sizeof(servername), "%s", 583 sun->sun_path); 584 else 585 snprintf(servername, sizeof(servername), "@%s", 586 sun->sun_path+1); 587 break; 588 case AF_INET: 589 snprintf(servername, sizeof(servername), "%pI4", 590 &sin->sin_addr.s_addr); 591 break; 592 case AF_INET6: 593 snprintf(servername, sizeof(servername), "%pI6", 594 &sin6->sin6_addr); 595 break; 596 default: 597 /* caller wants default server name, but 598 * address family isn't recognized. */ 599 return ERR_PTR(-EINVAL); 600 } 601 xprtargs.servername = servername; 602 } 603 604 xprt = xprt_create_transport(&xprtargs); 605 if (IS_ERR(xprt)) 606 return (struct rpc_clnt *)xprt; 607 608 /* 609 * By default, kernel RPC client connects from a reserved port. 610 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, 611 * but it is always enabled for rpciod, which handles the connect 612 * operation. 613 */ 614 xprt->resvport = 1; 615 if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT) 616 xprt->resvport = 0; 617 xprt->reuseport = 0; 618 if (args->flags & RPC_CLNT_CREATE_REUSEPORT) 619 xprt->reuseport = 1; 620 621 clnt = rpc_create_xprt(args, xprt); 622 if (IS_ERR(clnt) || args->nconnect <= 1) 623 return clnt; 624 625 for (i = 0; i < args->nconnect - 1; i++) { 626 if (rpc_clnt_add_xprt(clnt, &xprtargs, NULL, NULL) < 0) 627 break; 628 } 629 return clnt; 630 } 631 EXPORT_SYMBOL_GPL(rpc_create); 632 633 /* 634 * This function clones the RPC client structure. It allows us to share the 635 * same transport while varying parameters such as the authentication 636 * flavour. 637 */ 638 static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, 639 struct rpc_clnt *clnt) 640 { 641 struct rpc_xprt_switch *xps; 642 struct rpc_xprt *xprt; 643 struct rpc_clnt *new; 644 int err; 645 646 err = -ENOMEM; 647 rcu_read_lock(); 648 xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); 649 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 650 rcu_read_unlock(); 651 if (xprt == NULL || xps == NULL) { 652 xprt_put(xprt); 653 xprt_switch_put(xps); 654 goto out_err; 655 } 656 args->servername = xprt->servername; 657 args->nodename = clnt->cl_nodename; 658 659 new = rpc_new_client(args, xps, xprt, clnt); 660 if (IS_ERR(new)) 661 return new; 662 663 /* Turn off autobind on clones */ 664 new->cl_autobind = 0; 665 new->cl_softrtry = clnt->cl_softrtry; 666 new->cl_softerr = clnt->cl_softerr; 667 new->cl_noretranstimeo = clnt->cl_noretranstimeo; 668 new->cl_discrtry = clnt->cl_discrtry; 669 new->cl_chatty = clnt->cl_chatty; 670 new->cl_principal = clnt->cl_principal; 671 new->cl_max_connect = clnt->cl_max_connect; 672 return new; 673 674 out_err: 675 trace_rpc_clnt_clone_err(clnt, err); 676 return ERR_PTR(err); 677 } 678 679 /** 680 * rpc_clone_client - Clone an RPC client structure 681 * 682 * @clnt: RPC client whose parameters are copied 683 * 684 * Returns a fresh RPC client or an ERR_PTR. 685 */ 686 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt) 687 { 688 struct rpc_create_args args = { 689 .program = clnt->cl_program, 690 .prognumber = clnt->cl_prog, 691 .version = clnt->cl_vers, 692 .authflavor = clnt->cl_auth->au_flavor, 693 .cred = clnt->cl_cred, 694 }; 695 return __rpc_clone_client(&args, clnt); 696 } 697 EXPORT_SYMBOL_GPL(rpc_clone_client); 698 699 /** 700 * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth 701 * 702 * @clnt: RPC client whose parameters are copied 703 * @flavor: security flavor for new client 704 * 705 * Returns a fresh RPC client or an ERR_PTR. 706 */ 707 struct rpc_clnt * 708 rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor) 709 { 710 struct rpc_create_args args = { 711 .program = clnt->cl_program, 712 .prognumber = clnt->cl_prog, 713 .version = clnt->cl_vers, 714 .authflavor = flavor, 715 .cred = clnt->cl_cred, 716 }; 717 return __rpc_clone_client(&args, clnt); 718 } 719 EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth); 720 721 /** 722 * rpc_switch_client_transport: switch the RPC transport on the fly 723 * @clnt: pointer to a struct rpc_clnt 724 * @args: pointer to the new transport arguments 725 * @timeout: pointer to the new timeout parameters 726 * 727 * This function allows the caller to switch the RPC transport for the 728 * rpc_clnt structure 'clnt' to allow it to connect to a mirrored NFS 729 * server, for instance. It assumes that the caller has ensured that 730 * there are no active RPC tasks by using some form of locking. 731 * 732 * Returns zero if "clnt" is now using the new xprt. Otherwise a 733 * negative errno is returned, and "clnt" continues to use the old 734 * xprt. 735 */ 736 int rpc_switch_client_transport(struct rpc_clnt *clnt, 737 struct xprt_create *args, 738 const struct rpc_timeout *timeout) 739 { 740 const struct rpc_timeout *old_timeo; 741 rpc_authflavor_t pseudoflavor; 742 struct rpc_xprt_switch *xps, *oldxps; 743 struct rpc_xprt *xprt, *old; 744 struct rpc_clnt *parent; 745 int err; 746 747 args->xprtsec = clnt->cl_xprtsec; 748 xprt = xprt_create_transport(args); 749 if (IS_ERR(xprt)) 750 return PTR_ERR(xprt); 751 752 xps = xprt_switch_alloc(xprt, GFP_KERNEL); 753 if (xps == NULL) { 754 xprt_put(xprt); 755 return -ENOMEM; 756 } 757 758 pseudoflavor = clnt->cl_auth->au_flavor; 759 760 old_timeo = clnt->cl_timeout; 761 old = rpc_clnt_set_transport(clnt, xprt, timeout); 762 oldxps = xprt_iter_xchg_switch(&clnt->cl_xpi, xps); 763 764 rpc_unregister_client(clnt); 765 __rpc_clnt_remove_pipedir(clnt); 766 rpc_sysfs_client_destroy(clnt); 767 rpc_clnt_debugfs_unregister(clnt); 768 769 /* 770 * A new transport was created. "clnt" therefore 771 * becomes the root of a new cl_parent tree. clnt's 772 * children, if it has any, still point to the old xprt. 773 */ 774 parent = clnt->cl_parent; 775 clnt->cl_parent = clnt; 776 777 /* 778 * The old rpc_auth cache cannot be re-used. GSS 779 * contexts in particular are between a single 780 * client and server. 781 */ 782 err = rpc_client_register(clnt, pseudoflavor, NULL); 783 if (err) 784 goto out_revert; 785 786 synchronize_rcu(); 787 if (parent != clnt) 788 rpc_release_client(parent); 789 xprt_switch_put(oldxps); 790 xprt_put(old); 791 trace_rpc_clnt_replace_xprt(clnt); 792 return 0; 793 794 out_revert: 795 xps = xprt_iter_xchg_switch(&clnt->cl_xpi, oldxps); 796 rpc_clnt_set_transport(clnt, old, old_timeo); 797 clnt->cl_parent = parent; 798 rpc_client_register(clnt, pseudoflavor, NULL); 799 xprt_switch_put(xps); 800 xprt_put(xprt); 801 trace_rpc_clnt_replace_xprt_err(clnt); 802 return err; 803 } 804 EXPORT_SYMBOL_GPL(rpc_switch_client_transport); 805 806 static struct rpc_xprt_switch *rpc_clnt_xprt_switch_get(struct rpc_clnt *clnt) 807 { 808 struct rpc_xprt_switch *xps; 809 810 rcu_read_lock(); 811 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 812 rcu_read_unlock(); 813 814 return xps; 815 } 816 817 static 818 int _rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi, 819 void func(struct rpc_xprt_iter *xpi, struct rpc_xprt_switch *xps)) 820 { 821 struct rpc_xprt_switch *xps; 822 823 xps = rpc_clnt_xprt_switch_get(clnt); 824 if (xps == NULL) 825 return -EAGAIN; 826 func(xpi, xps); 827 xprt_switch_put(xps); 828 return 0; 829 } 830 831 static 832 int rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi) 833 { 834 return _rpc_clnt_xprt_iter_init(clnt, xpi, xprt_iter_init_listall); 835 } 836 837 static 838 int rpc_clnt_xprt_iter_offline_init(struct rpc_clnt *clnt, 839 struct rpc_xprt_iter *xpi) 840 { 841 return _rpc_clnt_xprt_iter_init(clnt, xpi, xprt_iter_init_listoffline); 842 } 843 844 /** 845 * rpc_clnt_iterate_for_each_xprt - Apply a function to all transports 846 * @clnt: pointer to client 847 * @fn: function to apply 848 * @data: void pointer to function data 849 * 850 * Iterates through the list of RPC transports currently attached to the 851 * client and applies the function fn(clnt, xprt, data). 852 * 853 * On error, the iteration stops, and the function returns the error value. 854 */ 855 int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt *clnt, 856 int (*fn)(struct rpc_clnt *, struct rpc_xprt *, void *), 857 void *data) 858 { 859 struct rpc_xprt_iter xpi; 860 int ret; 861 862 ret = rpc_clnt_xprt_iter_init(clnt, &xpi); 863 if (ret) 864 return ret; 865 for (;;) { 866 struct rpc_xprt *xprt = xprt_iter_get_next(&xpi); 867 868 if (!xprt) 869 break; 870 ret = fn(clnt, xprt, data); 871 xprt_put(xprt); 872 if (ret < 0) 873 break; 874 } 875 xprt_iter_destroy(&xpi); 876 return ret; 877 } 878 EXPORT_SYMBOL_GPL(rpc_clnt_iterate_for_each_xprt); 879 880 /* 881 * Kill all tasks for the given client. 882 * XXX: kill their descendants as well? 883 */ 884 void rpc_killall_tasks(struct rpc_clnt *clnt) 885 { 886 struct rpc_task *rovr; 887 888 889 if (list_empty(&clnt->cl_tasks)) 890 return; 891 892 /* 893 * Spin lock all_tasks to prevent changes... 894 */ 895 trace_rpc_clnt_killall(clnt); 896 spin_lock(&clnt->cl_lock); 897 list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) 898 rpc_signal_task(rovr); 899 spin_unlock(&clnt->cl_lock); 900 } 901 EXPORT_SYMBOL_GPL(rpc_killall_tasks); 902 903 /** 904 * rpc_cancel_tasks - try to cancel a set of RPC tasks 905 * @clnt: Pointer to RPC client 906 * @error: RPC task error value to set 907 * @fnmatch: Pointer to selector function 908 * @data: User data 909 * 910 * Uses @fnmatch to define a set of RPC tasks that are to be cancelled. 911 * The argument @error must be a negative error value. 912 */ 913 unsigned long rpc_cancel_tasks(struct rpc_clnt *clnt, int error, 914 bool (*fnmatch)(const struct rpc_task *, 915 const void *), 916 const void *data) 917 { 918 struct rpc_task *task; 919 unsigned long count = 0; 920 921 if (list_empty(&clnt->cl_tasks)) 922 return 0; 923 /* 924 * Spin lock all_tasks to prevent changes... 925 */ 926 spin_lock(&clnt->cl_lock); 927 list_for_each_entry(task, &clnt->cl_tasks, tk_task) { 928 if (!RPC_IS_ACTIVATED(task)) 929 continue; 930 if (!fnmatch(task, data)) 931 continue; 932 rpc_task_try_cancel(task, error); 933 count++; 934 } 935 spin_unlock(&clnt->cl_lock); 936 return count; 937 } 938 EXPORT_SYMBOL_GPL(rpc_cancel_tasks); 939 940 static int rpc_clnt_disconnect_xprt(struct rpc_clnt *clnt, 941 struct rpc_xprt *xprt, void *dummy) 942 { 943 if (xprt_connected(xprt)) 944 xprt_force_disconnect(xprt); 945 return 0; 946 } 947 948 void rpc_clnt_disconnect(struct rpc_clnt *clnt) 949 { 950 rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_disconnect_xprt, NULL); 951 } 952 EXPORT_SYMBOL_GPL(rpc_clnt_disconnect); 953 954 /* 955 * Properly shut down an RPC client, terminating all outstanding 956 * requests. 957 */ 958 void rpc_shutdown_client(struct rpc_clnt *clnt) 959 { 960 might_sleep(); 961 962 trace_rpc_clnt_shutdown(clnt); 963 964 while (!list_empty(&clnt->cl_tasks)) { 965 rpc_killall_tasks(clnt); 966 wait_event_timeout(destroy_wait, 967 list_empty(&clnt->cl_tasks), 1*HZ); 968 } 969 970 rpc_release_client(clnt); 971 } 972 EXPORT_SYMBOL_GPL(rpc_shutdown_client); 973 974 /* 975 * Free an RPC client 976 */ 977 static void rpc_free_client_work(struct work_struct *work) 978 { 979 struct rpc_clnt *clnt = container_of(work, struct rpc_clnt, cl_work); 980 981 trace_rpc_clnt_free(clnt); 982 983 /* These might block on processes that might allocate memory, 984 * so they cannot be called in rpciod, so they are handled separately 985 * here. 986 */ 987 rpc_sysfs_client_destroy(clnt); 988 rpc_clnt_debugfs_unregister(clnt); 989 rpc_free_clid(clnt); 990 rpc_clnt_remove_pipedir(clnt); 991 xprt_put(rcu_dereference_raw(clnt->cl_xprt)); 992 993 kfree(clnt); 994 rpciod_down(); 995 } 996 static struct rpc_clnt * 997 rpc_free_client(struct rpc_clnt *clnt) 998 { 999 struct rpc_clnt *parent = NULL; 1000 1001 trace_rpc_clnt_release(clnt); 1002 if (clnt->cl_parent != clnt) 1003 parent = clnt->cl_parent; 1004 rpc_unregister_client(clnt); 1005 rpc_free_iostats(clnt->cl_metrics); 1006 clnt->cl_metrics = NULL; 1007 xprt_iter_destroy(&clnt->cl_xpi); 1008 put_cred(clnt->cl_cred); 1009 1010 INIT_WORK(&clnt->cl_work, rpc_free_client_work); 1011 schedule_work(&clnt->cl_work); 1012 return parent; 1013 } 1014 1015 /* 1016 * Free an RPC client 1017 */ 1018 static struct rpc_clnt * 1019 rpc_free_auth(struct rpc_clnt *clnt) 1020 { 1021 /* 1022 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to 1023 * release remaining GSS contexts. This mechanism ensures 1024 * that it can do so safely. 1025 */ 1026 if (clnt->cl_auth != NULL) { 1027 rpcauth_release(clnt->cl_auth); 1028 clnt->cl_auth = NULL; 1029 } 1030 if (refcount_dec_and_test(&clnt->cl_count)) 1031 return rpc_free_client(clnt); 1032 return NULL; 1033 } 1034 1035 /* 1036 * Release reference to the RPC client 1037 */ 1038 void 1039 rpc_release_client(struct rpc_clnt *clnt) 1040 { 1041 do { 1042 if (list_empty(&clnt->cl_tasks)) 1043 wake_up(&destroy_wait); 1044 if (refcount_dec_not_one(&clnt->cl_count)) 1045 break; 1046 clnt = rpc_free_auth(clnt); 1047 } while (clnt != NULL); 1048 } 1049 EXPORT_SYMBOL_GPL(rpc_release_client); 1050 1051 /** 1052 * rpc_bind_new_program - bind a new RPC program to an existing client 1053 * @old: old rpc_client 1054 * @program: rpc program to set 1055 * @vers: rpc program version 1056 * 1057 * Clones the rpc client and sets up a new RPC program. This is mainly 1058 * of use for enabling different RPC programs to share the same transport. 1059 * The Sun NFSv2/v3 ACL protocol can do this. 1060 */ 1061 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, 1062 const struct rpc_program *program, 1063 u32 vers) 1064 { 1065 struct rpc_create_args args = { 1066 .program = program, 1067 .prognumber = program->number, 1068 .version = vers, 1069 .authflavor = old->cl_auth->au_flavor, 1070 .cred = old->cl_cred, 1071 }; 1072 struct rpc_clnt *clnt; 1073 int err; 1074 1075 clnt = __rpc_clone_client(&args, old); 1076 if (IS_ERR(clnt)) 1077 goto out; 1078 err = rpc_ping(clnt); 1079 if (err != 0) { 1080 rpc_shutdown_client(clnt); 1081 clnt = ERR_PTR(err); 1082 } 1083 out: 1084 return clnt; 1085 } 1086 EXPORT_SYMBOL_GPL(rpc_bind_new_program); 1087 1088 struct rpc_xprt * 1089 rpc_task_get_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 1090 { 1091 struct rpc_xprt_switch *xps; 1092 1093 if (!xprt) 1094 return NULL; 1095 rcu_read_lock(); 1096 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 1097 atomic_long_inc(&xps->xps_queuelen); 1098 rcu_read_unlock(); 1099 atomic_long_inc(&xprt->queuelen); 1100 1101 return xprt; 1102 } 1103 1104 static void 1105 rpc_task_release_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 1106 { 1107 struct rpc_xprt_switch *xps; 1108 1109 atomic_long_dec(&xprt->queuelen); 1110 rcu_read_lock(); 1111 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 1112 atomic_long_dec(&xps->xps_queuelen); 1113 rcu_read_unlock(); 1114 1115 xprt_put(xprt); 1116 } 1117 1118 void rpc_task_release_transport(struct rpc_task *task) 1119 { 1120 struct rpc_xprt *xprt = task->tk_xprt; 1121 1122 if (xprt) { 1123 task->tk_xprt = NULL; 1124 if (task->tk_client) 1125 rpc_task_release_xprt(task->tk_client, xprt); 1126 else 1127 xprt_put(xprt); 1128 } 1129 } 1130 EXPORT_SYMBOL_GPL(rpc_task_release_transport); 1131 1132 void rpc_task_release_client(struct rpc_task *task) 1133 { 1134 struct rpc_clnt *clnt = task->tk_client; 1135 1136 rpc_task_release_transport(task); 1137 if (clnt != NULL) { 1138 /* Remove from client task list */ 1139 spin_lock(&clnt->cl_lock); 1140 list_del(&task->tk_task); 1141 spin_unlock(&clnt->cl_lock); 1142 task->tk_client = NULL; 1143 1144 rpc_release_client(clnt); 1145 } 1146 } 1147 1148 static struct rpc_xprt * 1149 rpc_task_get_first_xprt(struct rpc_clnt *clnt) 1150 { 1151 struct rpc_xprt *xprt; 1152 1153 rcu_read_lock(); 1154 xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); 1155 rcu_read_unlock(); 1156 return rpc_task_get_xprt(clnt, xprt); 1157 } 1158 1159 static struct rpc_xprt * 1160 rpc_task_get_next_xprt(struct rpc_clnt *clnt) 1161 { 1162 return rpc_task_get_xprt(clnt, xprt_iter_get_next(&clnt->cl_xpi)); 1163 } 1164 1165 static 1166 void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt) 1167 { 1168 if (task->tk_xprt) { 1169 if (!(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) && 1170 (task->tk_flags & RPC_TASK_MOVEABLE))) 1171 return; 1172 xprt_release(task); 1173 xprt_put(task->tk_xprt); 1174 } 1175 if (task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) 1176 task->tk_xprt = rpc_task_get_first_xprt(clnt); 1177 else 1178 task->tk_xprt = rpc_task_get_next_xprt(clnt); 1179 } 1180 1181 static 1182 void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) 1183 { 1184 rpc_task_set_transport(task, clnt); 1185 task->tk_client = clnt; 1186 refcount_inc(&clnt->cl_count); 1187 if (clnt->cl_softrtry) 1188 task->tk_flags |= RPC_TASK_SOFT; 1189 if (clnt->cl_softerr) 1190 task->tk_flags |= RPC_TASK_TIMEOUT; 1191 if (clnt->cl_noretranstimeo) 1192 task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT; 1193 /* Add to the client's list of all tasks */ 1194 spin_lock(&clnt->cl_lock); 1195 list_add_tail(&task->tk_task, &clnt->cl_tasks); 1196 spin_unlock(&clnt->cl_lock); 1197 } 1198 1199 static void 1200 rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg) 1201 { 1202 if (msg != NULL) { 1203 task->tk_msg.rpc_proc = msg->rpc_proc; 1204 task->tk_msg.rpc_argp = msg->rpc_argp; 1205 task->tk_msg.rpc_resp = msg->rpc_resp; 1206 task->tk_msg.rpc_cred = msg->rpc_cred; 1207 if (!(task->tk_flags & RPC_TASK_CRED_NOREF)) 1208 get_cred(task->tk_msg.rpc_cred); 1209 } 1210 } 1211 1212 /* 1213 * Default callback for async RPC calls 1214 */ 1215 static void 1216 rpc_default_callback(struct rpc_task *task, void *data) 1217 { 1218 } 1219 1220 static const struct rpc_call_ops rpc_default_ops = { 1221 .rpc_call_done = rpc_default_callback, 1222 }; 1223 1224 /** 1225 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it 1226 * @task_setup_data: pointer to task initialisation data 1227 */ 1228 struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) 1229 { 1230 struct rpc_task *task; 1231 1232 task = rpc_new_task(task_setup_data); 1233 if (IS_ERR(task)) 1234 return task; 1235 1236 if (!RPC_IS_ASYNC(task)) 1237 task->tk_flags |= RPC_TASK_CRED_NOREF; 1238 1239 rpc_task_set_client(task, task_setup_data->rpc_client); 1240 rpc_task_set_rpc_message(task, task_setup_data->rpc_message); 1241 1242 if (task->tk_action == NULL) 1243 rpc_call_start(task); 1244 1245 atomic_inc(&task->tk_count); 1246 rpc_execute(task); 1247 return task; 1248 } 1249 EXPORT_SYMBOL_GPL(rpc_run_task); 1250 1251 /** 1252 * rpc_call_sync - Perform a synchronous RPC call 1253 * @clnt: pointer to RPC client 1254 * @msg: RPC call parameters 1255 * @flags: RPC call flags 1256 */ 1257 int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags) 1258 { 1259 struct rpc_task *task; 1260 struct rpc_task_setup task_setup_data = { 1261 .rpc_client = clnt, 1262 .rpc_message = msg, 1263 .callback_ops = &rpc_default_ops, 1264 .flags = flags, 1265 }; 1266 int status; 1267 1268 WARN_ON_ONCE(flags & RPC_TASK_ASYNC); 1269 if (flags & RPC_TASK_ASYNC) { 1270 rpc_release_calldata(task_setup_data.callback_ops, 1271 task_setup_data.callback_data); 1272 return -EINVAL; 1273 } 1274 1275 task = rpc_run_task(&task_setup_data); 1276 if (IS_ERR(task)) 1277 return PTR_ERR(task); 1278 status = task->tk_status; 1279 rpc_put_task(task); 1280 return status; 1281 } 1282 EXPORT_SYMBOL_GPL(rpc_call_sync); 1283 1284 /** 1285 * rpc_call_async - Perform an asynchronous RPC call 1286 * @clnt: pointer to RPC client 1287 * @msg: RPC call parameters 1288 * @flags: RPC call flags 1289 * @tk_ops: RPC call ops 1290 * @data: user call data 1291 */ 1292 int 1293 rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags, 1294 const struct rpc_call_ops *tk_ops, void *data) 1295 { 1296 struct rpc_task *task; 1297 struct rpc_task_setup task_setup_data = { 1298 .rpc_client = clnt, 1299 .rpc_message = msg, 1300 .callback_ops = tk_ops, 1301 .callback_data = data, 1302 .flags = flags|RPC_TASK_ASYNC, 1303 }; 1304 1305 task = rpc_run_task(&task_setup_data); 1306 if (IS_ERR(task)) 1307 return PTR_ERR(task); 1308 rpc_put_task(task); 1309 return 0; 1310 } 1311 EXPORT_SYMBOL_GPL(rpc_call_async); 1312 1313 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1314 static void call_bc_encode(struct rpc_task *task); 1315 1316 /** 1317 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run 1318 * rpc_execute against it 1319 * @req: RPC request 1320 * @timeout: timeout values to use for this task 1321 */ 1322 struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, 1323 struct rpc_timeout *timeout) 1324 { 1325 struct rpc_task *task; 1326 struct rpc_task_setup task_setup_data = { 1327 .callback_ops = &rpc_default_ops, 1328 .flags = RPC_TASK_SOFTCONN | 1329 RPC_TASK_NO_RETRANS_TIMEOUT, 1330 }; 1331 1332 dprintk("RPC: rpc_run_bc_task req= %p\n", req); 1333 /* 1334 * Create an rpc_task to send the data 1335 */ 1336 task = rpc_new_task(&task_setup_data); 1337 if (IS_ERR(task)) { 1338 xprt_free_bc_request(req); 1339 return task; 1340 } 1341 1342 xprt_init_bc_request(req, task, timeout); 1343 1344 task->tk_action = call_bc_encode; 1345 atomic_inc(&task->tk_count); 1346 WARN_ON_ONCE(atomic_read(&task->tk_count) != 2); 1347 rpc_execute(task); 1348 1349 dprintk("RPC: rpc_run_bc_task: task= %p\n", task); 1350 return task; 1351 } 1352 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1353 1354 /** 1355 * rpc_prepare_reply_pages - Prepare to receive a reply data payload into pages 1356 * @req: RPC request to prepare 1357 * @pages: vector of struct page pointers 1358 * @base: offset in first page where receive should start, in bytes 1359 * @len: expected size of the upper layer data payload, in bytes 1360 * @hdrsize: expected size of upper layer reply header, in XDR words 1361 * 1362 */ 1363 void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages, 1364 unsigned int base, unsigned int len, 1365 unsigned int hdrsize) 1366 { 1367 hdrsize += RPC_REPHDRSIZE + req->rq_cred->cr_auth->au_ralign; 1368 1369 xdr_inline_pages(&req->rq_rcv_buf, hdrsize << 2, pages, base, len); 1370 trace_rpc_xdr_reply_pages(req->rq_task, &req->rq_rcv_buf); 1371 } 1372 EXPORT_SYMBOL_GPL(rpc_prepare_reply_pages); 1373 1374 void 1375 rpc_call_start(struct rpc_task *task) 1376 { 1377 task->tk_action = call_start; 1378 } 1379 EXPORT_SYMBOL_GPL(rpc_call_start); 1380 1381 /** 1382 * rpc_peeraddr - extract remote peer address from clnt's xprt 1383 * @clnt: RPC client structure 1384 * @buf: target buffer 1385 * @bufsize: length of target buffer 1386 * 1387 * Returns the number of bytes that are actually in the stored address. 1388 */ 1389 size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize) 1390 { 1391 size_t bytes; 1392 struct rpc_xprt *xprt; 1393 1394 rcu_read_lock(); 1395 xprt = rcu_dereference(clnt->cl_xprt); 1396 1397 bytes = xprt->addrlen; 1398 if (bytes > bufsize) 1399 bytes = bufsize; 1400 memcpy(buf, &xprt->addr, bytes); 1401 rcu_read_unlock(); 1402 1403 return bytes; 1404 } 1405 EXPORT_SYMBOL_GPL(rpc_peeraddr); 1406 1407 /** 1408 * rpc_peeraddr2str - return remote peer address in printable format 1409 * @clnt: RPC client structure 1410 * @format: address format 1411 * 1412 * NB: the lifetime of the memory referenced by the returned pointer is 1413 * the same as the rpc_xprt itself. As long as the caller uses this 1414 * pointer, it must hold the RCU read lock. 1415 */ 1416 const char *rpc_peeraddr2str(struct rpc_clnt *clnt, 1417 enum rpc_display_format_t format) 1418 { 1419 struct rpc_xprt *xprt; 1420 1421 xprt = rcu_dereference(clnt->cl_xprt); 1422 1423 if (xprt->address_strings[format] != NULL) 1424 return xprt->address_strings[format]; 1425 else 1426 return "unprintable"; 1427 } 1428 EXPORT_SYMBOL_GPL(rpc_peeraddr2str); 1429 1430 static const struct sockaddr_in rpc_inaddr_loopback = { 1431 .sin_family = AF_INET, 1432 .sin_addr.s_addr = htonl(INADDR_ANY), 1433 }; 1434 1435 static const struct sockaddr_in6 rpc_in6addr_loopback = { 1436 .sin6_family = AF_INET6, 1437 .sin6_addr = IN6ADDR_ANY_INIT, 1438 }; 1439 1440 /* 1441 * Try a getsockname() on a connected datagram socket. Using a 1442 * connected datagram socket prevents leaving a socket in TIME_WAIT. 1443 * This conserves the ephemeral port number space. 1444 * 1445 * Returns zero and fills in "buf" if successful; otherwise, a 1446 * negative errno is returned. 1447 */ 1448 static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen, 1449 struct sockaddr *buf) 1450 { 1451 struct socket *sock; 1452 int err; 1453 1454 err = __sock_create(net, sap->sa_family, 1455 SOCK_DGRAM, IPPROTO_UDP, &sock, 1); 1456 if (err < 0) { 1457 dprintk("RPC: can't create UDP socket (%d)\n", err); 1458 goto out; 1459 } 1460 1461 switch (sap->sa_family) { 1462 case AF_INET: 1463 err = kernel_bind(sock, 1464 (struct sockaddr *)&rpc_inaddr_loopback, 1465 sizeof(rpc_inaddr_loopback)); 1466 break; 1467 case AF_INET6: 1468 err = kernel_bind(sock, 1469 (struct sockaddr *)&rpc_in6addr_loopback, 1470 sizeof(rpc_in6addr_loopback)); 1471 break; 1472 default: 1473 err = -EAFNOSUPPORT; 1474 goto out_release; 1475 } 1476 if (err < 0) { 1477 dprintk("RPC: can't bind UDP socket (%d)\n", err); 1478 goto out_release; 1479 } 1480 1481 err = kernel_connect(sock, sap, salen, 0); 1482 if (err < 0) { 1483 dprintk("RPC: can't connect UDP socket (%d)\n", err); 1484 goto out_release; 1485 } 1486 1487 err = kernel_getsockname(sock, buf); 1488 if (err < 0) { 1489 dprintk("RPC: getsockname failed (%d)\n", err); 1490 goto out_release; 1491 } 1492 1493 err = 0; 1494 if (buf->sa_family == AF_INET6) { 1495 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf; 1496 sin6->sin6_scope_id = 0; 1497 } 1498 dprintk("RPC: %s succeeded\n", __func__); 1499 1500 out_release: 1501 sock_release(sock); 1502 out: 1503 return err; 1504 } 1505 1506 /* 1507 * Scraping a connected socket failed, so we don't have a useable 1508 * local address. Fallback: generate an address that will prevent 1509 * the server from calling us back. 1510 * 1511 * Returns zero and fills in "buf" if successful; otherwise, a 1512 * negative errno is returned. 1513 */ 1514 static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen) 1515 { 1516 switch (family) { 1517 case AF_INET: 1518 if (buflen < sizeof(rpc_inaddr_loopback)) 1519 return -EINVAL; 1520 memcpy(buf, &rpc_inaddr_loopback, 1521 sizeof(rpc_inaddr_loopback)); 1522 break; 1523 case AF_INET6: 1524 if (buflen < sizeof(rpc_in6addr_loopback)) 1525 return -EINVAL; 1526 memcpy(buf, &rpc_in6addr_loopback, 1527 sizeof(rpc_in6addr_loopback)); 1528 break; 1529 default: 1530 dprintk("RPC: %s: address family not supported\n", 1531 __func__); 1532 return -EAFNOSUPPORT; 1533 } 1534 dprintk("RPC: %s: succeeded\n", __func__); 1535 return 0; 1536 } 1537 1538 /** 1539 * rpc_localaddr - discover local endpoint address for an RPC client 1540 * @clnt: RPC client structure 1541 * @buf: target buffer 1542 * @buflen: size of target buffer, in bytes 1543 * 1544 * Returns zero and fills in "buf" and "buflen" if successful; 1545 * otherwise, a negative errno is returned. 1546 * 1547 * This works even if the underlying transport is not currently connected, 1548 * or if the upper layer never previously provided a source address. 1549 * 1550 * The result of this function call is transient: multiple calls in 1551 * succession may give different results, depending on how local 1552 * networking configuration changes over time. 1553 */ 1554 int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen) 1555 { 1556 struct sockaddr_storage address; 1557 struct sockaddr *sap = (struct sockaddr *)&address; 1558 struct rpc_xprt *xprt; 1559 struct net *net; 1560 size_t salen; 1561 int err; 1562 1563 rcu_read_lock(); 1564 xprt = rcu_dereference(clnt->cl_xprt); 1565 salen = xprt->addrlen; 1566 memcpy(sap, &xprt->addr, salen); 1567 net = get_net(xprt->xprt_net); 1568 rcu_read_unlock(); 1569 1570 rpc_set_port(sap, 0); 1571 err = rpc_sockname(net, sap, salen, buf); 1572 put_net(net); 1573 if (err != 0) 1574 /* Couldn't discover local address, return ANYADDR */ 1575 return rpc_anyaddr(sap->sa_family, buf, buflen); 1576 return 0; 1577 } 1578 EXPORT_SYMBOL_GPL(rpc_localaddr); 1579 1580 void 1581 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) 1582 { 1583 struct rpc_xprt *xprt; 1584 1585 rcu_read_lock(); 1586 xprt = rcu_dereference(clnt->cl_xprt); 1587 if (xprt->ops->set_buffer_size) 1588 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); 1589 rcu_read_unlock(); 1590 } 1591 EXPORT_SYMBOL_GPL(rpc_setbufsize); 1592 1593 /** 1594 * rpc_net_ns - Get the network namespace for this RPC client 1595 * @clnt: RPC client to query 1596 * 1597 */ 1598 struct net *rpc_net_ns(struct rpc_clnt *clnt) 1599 { 1600 struct net *ret; 1601 1602 rcu_read_lock(); 1603 ret = rcu_dereference(clnt->cl_xprt)->xprt_net; 1604 rcu_read_unlock(); 1605 return ret; 1606 } 1607 EXPORT_SYMBOL_GPL(rpc_net_ns); 1608 1609 /** 1610 * rpc_max_payload - Get maximum payload size for a transport, in bytes 1611 * @clnt: RPC client to query 1612 * 1613 * For stream transports, this is one RPC record fragment (see RFC 1614 * 1831), as we don't support multi-record requests yet. For datagram 1615 * transports, this is the size of an IP packet minus the IP, UDP, and 1616 * RPC header sizes. 1617 */ 1618 size_t rpc_max_payload(struct rpc_clnt *clnt) 1619 { 1620 size_t ret; 1621 1622 rcu_read_lock(); 1623 ret = rcu_dereference(clnt->cl_xprt)->max_payload; 1624 rcu_read_unlock(); 1625 return ret; 1626 } 1627 EXPORT_SYMBOL_GPL(rpc_max_payload); 1628 1629 /** 1630 * rpc_max_bc_payload - Get maximum backchannel payload size, in bytes 1631 * @clnt: RPC client to query 1632 */ 1633 size_t rpc_max_bc_payload(struct rpc_clnt *clnt) 1634 { 1635 struct rpc_xprt *xprt; 1636 size_t ret; 1637 1638 rcu_read_lock(); 1639 xprt = rcu_dereference(clnt->cl_xprt); 1640 ret = xprt->ops->bc_maxpayload(xprt); 1641 rcu_read_unlock(); 1642 return ret; 1643 } 1644 EXPORT_SYMBOL_GPL(rpc_max_bc_payload); 1645 1646 unsigned int rpc_num_bc_slots(struct rpc_clnt *clnt) 1647 { 1648 struct rpc_xprt *xprt; 1649 unsigned int ret; 1650 1651 rcu_read_lock(); 1652 xprt = rcu_dereference(clnt->cl_xprt); 1653 ret = xprt->ops->bc_num_slots(xprt); 1654 rcu_read_unlock(); 1655 return ret; 1656 } 1657 EXPORT_SYMBOL_GPL(rpc_num_bc_slots); 1658 1659 /** 1660 * rpc_force_rebind - force transport to check that remote port is unchanged 1661 * @clnt: client to rebind 1662 * 1663 */ 1664 void rpc_force_rebind(struct rpc_clnt *clnt) 1665 { 1666 if (clnt->cl_autobind) { 1667 rcu_read_lock(); 1668 xprt_clear_bound(rcu_dereference(clnt->cl_xprt)); 1669 rcu_read_unlock(); 1670 } 1671 } 1672 EXPORT_SYMBOL_GPL(rpc_force_rebind); 1673 1674 static int 1675 __rpc_restart_call(struct rpc_task *task, void (*action)(struct rpc_task *)) 1676 { 1677 task->tk_status = 0; 1678 task->tk_rpc_status = 0; 1679 task->tk_action = action; 1680 return 1; 1681 } 1682 1683 /* 1684 * Restart an (async) RPC call. Usually called from within the 1685 * exit handler. 1686 */ 1687 int 1688 rpc_restart_call(struct rpc_task *task) 1689 { 1690 return __rpc_restart_call(task, call_start); 1691 } 1692 EXPORT_SYMBOL_GPL(rpc_restart_call); 1693 1694 /* 1695 * Restart an (async) RPC call from the call_prepare state. 1696 * Usually called from within the exit handler. 1697 */ 1698 int 1699 rpc_restart_call_prepare(struct rpc_task *task) 1700 { 1701 if (task->tk_ops->rpc_call_prepare != NULL) 1702 return __rpc_restart_call(task, rpc_prepare_task); 1703 return rpc_restart_call(task); 1704 } 1705 EXPORT_SYMBOL_GPL(rpc_restart_call_prepare); 1706 1707 const char 1708 *rpc_proc_name(const struct rpc_task *task) 1709 { 1710 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 1711 1712 if (proc) { 1713 if (proc->p_name) 1714 return proc->p_name; 1715 else 1716 return "NULL"; 1717 } else 1718 return "no proc"; 1719 } 1720 1721 static void 1722 __rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status) 1723 { 1724 trace_rpc_call_rpcerror(task, tk_status, rpc_status); 1725 rpc_task_set_rpc_status(task, rpc_status); 1726 rpc_exit(task, tk_status); 1727 } 1728 1729 static void 1730 rpc_call_rpcerror(struct rpc_task *task, int status) 1731 { 1732 __rpc_call_rpcerror(task, status, status); 1733 } 1734 1735 /* 1736 * 0. Initial state 1737 * 1738 * Other FSM states can be visited zero or more times, but 1739 * this state is visited exactly once for each RPC. 1740 */ 1741 static void 1742 call_start(struct rpc_task *task) 1743 { 1744 struct rpc_clnt *clnt = task->tk_client; 1745 int idx = task->tk_msg.rpc_proc->p_statidx; 1746 1747 trace_rpc_request(task); 1748 1749 if (task->tk_client->cl_shutdown) { 1750 rpc_call_rpcerror(task, -EIO); 1751 return; 1752 } 1753 1754 /* Increment call count (version might not be valid for ping) */ 1755 if (clnt->cl_program->version[clnt->cl_vers]) 1756 clnt->cl_program->version[clnt->cl_vers]->counts[idx]++; 1757 clnt->cl_stats->rpccnt++; 1758 task->tk_action = call_reserve; 1759 rpc_task_set_transport(task, clnt); 1760 } 1761 1762 /* 1763 * 1. Reserve an RPC call slot 1764 */ 1765 static void 1766 call_reserve(struct rpc_task *task) 1767 { 1768 task->tk_status = 0; 1769 task->tk_action = call_reserveresult; 1770 xprt_reserve(task); 1771 } 1772 1773 static void call_retry_reserve(struct rpc_task *task); 1774 1775 /* 1776 * 1b. Grok the result of xprt_reserve() 1777 */ 1778 static void 1779 call_reserveresult(struct rpc_task *task) 1780 { 1781 int status = task->tk_status; 1782 1783 /* 1784 * After a call to xprt_reserve(), we must have either 1785 * a request slot or else an error status. 1786 */ 1787 task->tk_status = 0; 1788 if (status >= 0) { 1789 if (task->tk_rqstp) { 1790 task->tk_action = call_refresh; 1791 return; 1792 } 1793 1794 rpc_call_rpcerror(task, -EIO); 1795 return; 1796 } 1797 1798 switch (status) { 1799 case -ENOMEM: 1800 rpc_delay(task, HZ >> 2); 1801 fallthrough; 1802 case -EAGAIN: /* woken up; retry */ 1803 task->tk_action = call_retry_reserve; 1804 return; 1805 default: 1806 rpc_call_rpcerror(task, status); 1807 } 1808 } 1809 1810 /* 1811 * 1c. Retry reserving an RPC call slot 1812 */ 1813 static void 1814 call_retry_reserve(struct rpc_task *task) 1815 { 1816 task->tk_status = 0; 1817 task->tk_action = call_reserveresult; 1818 xprt_retry_reserve(task); 1819 } 1820 1821 /* 1822 * 2. Bind and/or refresh the credentials 1823 */ 1824 static void 1825 call_refresh(struct rpc_task *task) 1826 { 1827 task->tk_action = call_refreshresult; 1828 task->tk_status = 0; 1829 task->tk_client->cl_stats->rpcauthrefresh++; 1830 rpcauth_refreshcred(task); 1831 } 1832 1833 /* 1834 * 2a. Process the results of a credential refresh 1835 */ 1836 static void 1837 call_refreshresult(struct rpc_task *task) 1838 { 1839 int status = task->tk_status; 1840 1841 task->tk_status = 0; 1842 task->tk_action = call_refresh; 1843 switch (status) { 1844 case 0: 1845 if (rpcauth_uptodatecred(task)) { 1846 task->tk_action = call_allocate; 1847 return; 1848 } 1849 /* Use rate-limiting and a max number of retries if refresh 1850 * had status 0 but failed to update the cred. 1851 */ 1852 fallthrough; 1853 case -ETIMEDOUT: 1854 rpc_delay(task, 3*HZ); 1855 fallthrough; 1856 case -EAGAIN: 1857 status = -EACCES; 1858 fallthrough; 1859 case -EKEYEXPIRED: 1860 if (!task->tk_cred_retry) 1861 break; 1862 task->tk_cred_retry--; 1863 trace_rpc_retry_refresh_status(task); 1864 return; 1865 case -ENOMEM: 1866 rpc_delay(task, HZ >> 4); 1867 return; 1868 } 1869 trace_rpc_refresh_status(task); 1870 rpc_call_rpcerror(task, status); 1871 } 1872 1873 /* 1874 * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc. 1875 * (Note: buffer memory is freed in xprt_release). 1876 */ 1877 static void 1878 call_allocate(struct rpc_task *task) 1879 { 1880 const struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth; 1881 struct rpc_rqst *req = task->tk_rqstp; 1882 struct rpc_xprt *xprt = req->rq_xprt; 1883 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 1884 int status; 1885 1886 task->tk_status = 0; 1887 task->tk_action = call_encode; 1888 1889 if (req->rq_buffer) 1890 return; 1891 1892 if (proc->p_proc != 0) { 1893 BUG_ON(proc->p_arglen == 0); 1894 if (proc->p_decode != NULL) 1895 BUG_ON(proc->p_replen == 0); 1896 } 1897 1898 /* 1899 * Calculate the size (in quads) of the RPC call 1900 * and reply headers, and convert both values 1901 * to byte sizes. 1902 */ 1903 req->rq_callsize = RPC_CALLHDRSIZE + (auth->au_cslack << 1) + 1904 proc->p_arglen; 1905 req->rq_callsize <<= 2; 1906 /* 1907 * Note: the reply buffer must at minimum allocate enough space 1908 * for the 'struct accepted_reply' from RFC5531. 1909 */ 1910 req->rq_rcvsize = RPC_REPHDRSIZE + auth->au_rslack + \ 1911 max_t(size_t, proc->p_replen, 2); 1912 req->rq_rcvsize <<= 2; 1913 1914 status = xprt->ops->buf_alloc(task); 1915 trace_rpc_buf_alloc(task, status); 1916 if (status == 0) 1917 return; 1918 if (status != -ENOMEM) { 1919 rpc_call_rpcerror(task, status); 1920 return; 1921 } 1922 1923 if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) { 1924 task->tk_action = call_allocate; 1925 rpc_delay(task, HZ>>4); 1926 return; 1927 } 1928 1929 rpc_call_rpcerror(task, -ERESTARTSYS); 1930 } 1931 1932 static int 1933 rpc_task_need_encode(struct rpc_task *task) 1934 { 1935 return test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) == 0 && 1936 (!(task->tk_flags & RPC_TASK_SENT) || 1937 !(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) || 1938 xprt_request_need_retransmit(task)); 1939 } 1940 1941 static void 1942 rpc_xdr_encode(struct rpc_task *task) 1943 { 1944 struct rpc_rqst *req = task->tk_rqstp; 1945 struct xdr_stream xdr; 1946 1947 xdr_buf_init(&req->rq_snd_buf, 1948 req->rq_buffer, 1949 req->rq_callsize); 1950 xdr_buf_init(&req->rq_rcv_buf, 1951 req->rq_rbuffer, 1952 req->rq_rcvsize); 1953 1954 req->rq_reply_bytes_recvd = 0; 1955 req->rq_snd_buf.head[0].iov_len = 0; 1956 xdr_init_encode(&xdr, &req->rq_snd_buf, 1957 req->rq_snd_buf.head[0].iov_base, req); 1958 if (rpc_encode_header(task, &xdr)) 1959 return; 1960 1961 task->tk_status = rpcauth_wrap_req(task, &xdr); 1962 } 1963 1964 /* 1965 * 3. Encode arguments of an RPC call 1966 */ 1967 static void 1968 call_encode(struct rpc_task *task) 1969 { 1970 if (!rpc_task_need_encode(task)) 1971 goto out; 1972 1973 /* Dequeue task from the receive queue while we're encoding */ 1974 xprt_request_dequeue_xprt(task); 1975 /* Encode here so that rpcsec_gss can use correct sequence number. */ 1976 rpc_xdr_encode(task); 1977 /* Add task to reply queue before transmission to avoid races */ 1978 if (task->tk_status == 0 && rpc_reply_expected(task)) 1979 task->tk_status = xprt_request_enqueue_receive(task); 1980 /* Did the encode result in an error condition? */ 1981 if (task->tk_status != 0) { 1982 /* Was the error nonfatal? */ 1983 switch (task->tk_status) { 1984 case -EAGAIN: 1985 case -ENOMEM: 1986 rpc_delay(task, HZ >> 4); 1987 break; 1988 case -EKEYEXPIRED: 1989 if (!task->tk_cred_retry) { 1990 rpc_call_rpcerror(task, task->tk_status); 1991 } else { 1992 task->tk_action = call_refresh; 1993 task->tk_cred_retry--; 1994 trace_rpc_retry_refresh_status(task); 1995 } 1996 break; 1997 default: 1998 rpc_call_rpcerror(task, task->tk_status); 1999 } 2000 return; 2001 } 2002 2003 xprt_request_enqueue_transmit(task); 2004 out: 2005 task->tk_action = call_transmit; 2006 /* Check that the connection is OK */ 2007 if (!xprt_bound(task->tk_xprt)) 2008 task->tk_action = call_bind; 2009 else if (!xprt_connected(task->tk_xprt)) 2010 task->tk_action = call_connect; 2011 } 2012 2013 /* 2014 * Helpers to check if the task was already transmitted, and 2015 * to take action when that is the case. 2016 */ 2017 static bool 2018 rpc_task_transmitted(struct rpc_task *task) 2019 { 2020 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); 2021 } 2022 2023 static void 2024 rpc_task_handle_transmitted(struct rpc_task *task) 2025 { 2026 xprt_end_transmit(task); 2027 task->tk_action = call_transmit_status; 2028 } 2029 2030 /* 2031 * 4. Get the server port number if not yet set 2032 */ 2033 static void 2034 call_bind(struct rpc_task *task) 2035 { 2036 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 2037 2038 if (rpc_task_transmitted(task)) { 2039 rpc_task_handle_transmitted(task); 2040 return; 2041 } 2042 2043 if (xprt_bound(xprt)) { 2044 task->tk_action = call_connect; 2045 return; 2046 } 2047 2048 task->tk_action = call_bind_status; 2049 if (!xprt_prepare_transmit(task)) 2050 return; 2051 2052 xprt->ops->rpcbind(task); 2053 } 2054 2055 /* 2056 * 4a. Sort out bind result 2057 */ 2058 static void 2059 call_bind_status(struct rpc_task *task) 2060 { 2061 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 2062 int status = -EIO; 2063 2064 if (rpc_task_transmitted(task)) { 2065 rpc_task_handle_transmitted(task); 2066 return; 2067 } 2068 2069 if (task->tk_status >= 0) 2070 goto out_next; 2071 if (xprt_bound(xprt)) { 2072 task->tk_status = 0; 2073 goto out_next; 2074 } 2075 2076 switch (task->tk_status) { 2077 case -ENOMEM: 2078 rpc_delay(task, HZ >> 2); 2079 goto retry_timeout; 2080 case -EACCES: 2081 trace_rpcb_prog_unavail_err(task); 2082 /* fail immediately if this is an RPC ping */ 2083 if (task->tk_msg.rpc_proc->p_proc == 0) { 2084 status = -EOPNOTSUPP; 2085 break; 2086 } 2087 rpc_delay(task, 3*HZ); 2088 goto retry_timeout; 2089 case -ENOBUFS: 2090 rpc_delay(task, HZ >> 2); 2091 goto retry_timeout; 2092 case -EAGAIN: 2093 goto retry_timeout; 2094 case -ETIMEDOUT: 2095 trace_rpcb_timeout_err(task); 2096 goto retry_timeout; 2097 case -EPFNOSUPPORT: 2098 /* server doesn't support any rpcbind version we know of */ 2099 trace_rpcb_bind_version_err(task); 2100 break; 2101 case -EPROTONOSUPPORT: 2102 trace_rpcb_bind_version_err(task); 2103 goto retry_timeout; 2104 case -ECONNREFUSED: /* connection problems */ 2105 case -ECONNRESET: 2106 case -ECONNABORTED: 2107 case -ENOTCONN: 2108 case -EHOSTDOWN: 2109 case -ENETDOWN: 2110 case -EHOSTUNREACH: 2111 case -ENETUNREACH: 2112 case -EPIPE: 2113 trace_rpcb_unreachable_err(task); 2114 if (!RPC_IS_SOFTCONN(task)) { 2115 rpc_delay(task, 5*HZ); 2116 goto retry_timeout; 2117 } 2118 status = task->tk_status; 2119 break; 2120 default: 2121 trace_rpcb_unrecognized_err(task); 2122 } 2123 2124 rpc_call_rpcerror(task, status); 2125 return; 2126 out_next: 2127 task->tk_action = call_connect; 2128 return; 2129 retry_timeout: 2130 task->tk_status = 0; 2131 task->tk_action = call_bind; 2132 rpc_check_timeout(task); 2133 } 2134 2135 /* 2136 * 4b. Connect to the RPC server 2137 */ 2138 static void 2139 call_connect(struct rpc_task *task) 2140 { 2141 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 2142 2143 if (rpc_task_transmitted(task)) { 2144 rpc_task_handle_transmitted(task); 2145 return; 2146 } 2147 2148 if (xprt_connected(xprt)) { 2149 task->tk_action = call_transmit; 2150 return; 2151 } 2152 2153 task->tk_action = call_connect_status; 2154 if (task->tk_status < 0) 2155 return; 2156 if (task->tk_flags & RPC_TASK_NOCONNECT) { 2157 rpc_call_rpcerror(task, -ENOTCONN); 2158 return; 2159 } 2160 if (!xprt_prepare_transmit(task)) 2161 return; 2162 xprt_connect(task); 2163 } 2164 2165 /* 2166 * 4c. Sort out connect result 2167 */ 2168 static void 2169 call_connect_status(struct rpc_task *task) 2170 { 2171 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 2172 struct rpc_clnt *clnt = task->tk_client; 2173 int status = task->tk_status; 2174 2175 if (rpc_task_transmitted(task)) { 2176 rpc_task_handle_transmitted(task); 2177 return; 2178 } 2179 2180 trace_rpc_connect_status(task); 2181 2182 if (task->tk_status == 0) { 2183 clnt->cl_stats->netreconn++; 2184 goto out_next; 2185 } 2186 if (xprt_connected(xprt)) { 2187 task->tk_status = 0; 2188 goto out_next; 2189 } 2190 2191 task->tk_status = 0; 2192 switch (status) { 2193 case -ECONNREFUSED: 2194 case -ECONNRESET: 2195 /* A positive refusal suggests a rebind is needed. */ 2196 if (RPC_IS_SOFTCONN(task)) 2197 break; 2198 if (clnt->cl_autobind) { 2199 rpc_force_rebind(clnt); 2200 goto out_retry; 2201 } 2202 fallthrough; 2203 case -ECONNABORTED: 2204 case -ENETDOWN: 2205 case -ENETUNREACH: 2206 case -EHOSTUNREACH: 2207 case -EPIPE: 2208 case -EPROTO: 2209 xprt_conditional_disconnect(task->tk_rqstp->rq_xprt, 2210 task->tk_rqstp->rq_connect_cookie); 2211 if (RPC_IS_SOFTCONN(task)) 2212 break; 2213 /* retry with existing socket, after a delay */ 2214 rpc_delay(task, 3*HZ); 2215 fallthrough; 2216 case -EADDRINUSE: 2217 case -ENOTCONN: 2218 case -EAGAIN: 2219 case -ETIMEDOUT: 2220 if (!(task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) && 2221 (task->tk_flags & RPC_TASK_MOVEABLE) && 2222 test_bit(XPRT_REMOVE, &xprt->state)) { 2223 struct rpc_xprt *saved = task->tk_xprt; 2224 struct rpc_xprt_switch *xps; 2225 2226 xps = rpc_clnt_xprt_switch_get(clnt); 2227 if (xps->xps_nxprts > 1) { 2228 long value; 2229 2230 xprt_release(task); 2231 value = atomic_long_dec_return(&xprt->queuelen); 2232 if (value == 0) 2233 rpc_xprt_switch_remove_xprt(xps, saved, 2234 true); 2235 xprt_put(saved); 2236 task->tk_xprt = NULL; 2237 task->tk_action = call_start; 2238 } 2239 xprt_switch_put(xps); 2240 if (!task->tk_xprt) 2241 goto out; 2242 } 2243 goto out_retry; 2244 case -ENOBUFS: 2245 rpc_delay(task, HZ >> 2); 2246 goto out_retry; 2247 } 2248 rpc_call_rpcerror(task, status); 2249 return; 2250 out_next: 2251 task->tk_action = call_transmit; 2252 return; 2253 out_retry: 2254 /* Check for timeouts before looping back to call_bind */ 2255 task->tk_action = call_bind; 2256 out: 2257 rpc_check_timeout(task); 2258 } 2259 2260 /* 2261 * 5. Transmit the RPC request, and wait for reply 2262 */ 2263 static void 2264 call_transmit(struct rpc_task *task) 2265 { 2266 if (rpc_task_transmitted(task)) { 2267 rpc_task_handle_transmitted(task); 2268 return; 2269 } 2270 2271 task->tk_action = call_transmit_status; 2272 if (!xprt_prepare_transmit(task)) 2273 return; 2274 task->tk_status = 0; 2275 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) { 2276 if (!xprt_connected(task->tk_xprt)) { 2277 task->tk_status = -ENOTCONN; 2278 return; 2279 } 2280 xprt_transmit(task); 2281 } 2282 xprt_end_transmit(task); 2283 } 2284 2285 /* 2286 * 5a. Handle cleanup after a transmission 2287 */ 2288 static void 2289 call_transmit_status(struct rpc_task *task) 2290 { 2291 task->tk_action = call_status; 2292 2293 /* 2294 * Common case: success. Force the compiler to put this 2295 * test first. 2296 */ 2297 if (rpc_task_transmitted(task)) { 2298 task->tk_status = 0; 2299 xprt_request_wait_receive(task); 2300 return; 2301 } 2302 2303 switch (task->tk_status) { 2304 default: 2305 break; 2306 case -EBADMSG: 2307 task->tk_status = 0; 2308 task->tk_action = call_encode; 2309 break; 2310 /* 2311 * Special cases: if we've been waiting on the 2312 * socket's write_space() callback, or if the 2313 * socket just returned a connection error, 2314 * then hold onto the transport lock. 2315 */ 2316 case -ENOMEM: 2317 case -ENOBUFS: 2318 rpc_delay(task, HZ>>2); 2319 fallthrough; 2320 case -EBADSLT: 2321 case -EAGAIN: 2322 task->tk_action = call_transmit; 2323 task->tk_status = 0; 2324 break; 2325 case -ECONNREFUSED: 2326 case -EHOSTDOWN: 2327 case -ENETDOWN: 2328 case -EHOSTUNREACH: 2329 case -ENETUNREACH: 2330 case -EPERM: 2331 if (RPC_IS_SOFTCONN(task)) { 2332 if (!task->tk_msg.rpc_proc->p_proc) 2333 trace_xprt_ping(task->tk_xprt, 2334 task->tk_status); 2335 rpc_call_rpcerror(task, task->tk_status); 2336 return; 2337 } 2338 fallthrough; 2339 case -ECONNRESET: 2340 case -ECONNABORTED: 2341 case -EADDRINUSE: 2342 case -ENOTCONN: 2343 case -EPIPE: 2344 task->tk_action = call_bind; 2345 task->tk_status = 0; 2346 break; 2347 } 2348 rpc_check_timeout(task); 2349 } 2350 2351 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 2352 static void call_bc_transmit(struct rpc_task *task); 2353 static void call_bc_transmit_status(struct rpc_task *task); 2354 2355 static void 2356 call_bc_encode(struct rpc_task *task) 2357 { 2358 xprt_request_enqueue_transmit(task); 2359 task->tk_action = call_bc_transmit; 2360 } 2361 2362 /* 2363 * 5b. Send the backchannel RPC reply. On error, drop the reply. In 2364 * addition, disconnect on connectivity errors. 2365 */ 2366 static void 2367 call_bc_transmit(struct rpc_task *task) 2368 { 2369 task->tk_action = call_bc_transmit_status; 2370 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) { 2371 if (!xprt_prepare_transmit(task)) 2372 return; 2373 task->tk_status = 0; 2374 xprt_transmit(task); 2375 } 2376 xprt_end_transmit(task); 2377 } 2378 2379 static void 2380 call_bc_transmit_status(struct rpc_task *task) 2381 { 2382 struct rpc_rqst *req = task->tk_rqstp; 2383 2384 if (rpc_task_transmitted(task)) 2385 task->tk_status = 0; 2386 2387 switch (task->tk_status) { 2388 case 0: 2389 /* Success */ 2390 case -ENETDOWN: 2391 case -EHOSTDOWN: 2392 case -EHOSTUNREACH: 2393 case -ENETUNREACH: 2394 case -ECONNRESET: 2395 case -ECONNREFUSED: 2396 case -EADDRINUSE: 2397 case -ENOTCONN: 2398 case -EPIPE: 2399 break; 2400 case -ENOMEM: 2401 case -ENOBUFS: 2402 rpc_delay(task, HZ>>2); 2403 fallthrough; 2404 case -EBADSLT: 2405 case -EAGAIN: 2406 task->tk_status = 0; 2407 task->tk_action = call_bc_transmit; 2408 return; 2409 case -ETIMEDOUT: 2410 /* 2411 * Problem reaching the server. Disconnect and let the 2412 * forechannel reestablish the connection. The server will 2413 * have to retransmit the backchannel request and we'll 2414 * reprocess it. Since these ops are idempotent, there's no 2415 * need to cache our reply at this time. 2416 */ 2417 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 2418 "error: %d\n", task->tk_status); 2419 xprt_conditional_disconnect(req->rq_xprt, 2420 req->rq_connect_cookie); 2421 break; 2422 default: 2423 /* 2424 * We were unable to reply and will have to drop the 2425 * request. The server should reconnect and retransmit. 2426 */ 2427 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 2428 "error: %d\n", task->tk_status); 2429 break; 2430 } 2431 task->tk_action = rpc_exit_task; 2432 } 2433 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 2434 2435 /* 2436 * 6. Sort out the RPC call status 2437 */ 2438 static void 2439 call_status(struct rpc_task *task) 2440 { 2441 struct rpc_clnt *clnt = task->tk_client; 2442 int status; 2443 2444 if (!task->tk_msg.rpc_proc->p_proc) 2445 trace_xprt_ping(task->tk_xprt, task->tk_status); 2446 2447 status = task->tk_status; 2448 if (status >= 0) { 2449 task->tk_action = call_decode; 2450 return; 2451 } 2452 2453 trace_rpc_call_status(task); 2454 task->tk_status = 0; 2455 switch(status) { 2456 case -EHOSTDOWN: 2457 case -ENETDOWN: 2458 case -EHOSTUNREACH: 2459 case -ENETUNREACH: 2460 case -EPERM: 2461 if (RPC_IS_SOFTCONN(task)) 2462 goto out_exit; 2463 /* 2464 * Delay any retries for 3 seconds, then handle as if it 2465 * were a timeout. 2466 */ 2467 rpc_delay(task, 3*HZ); 2468 fallthrough; 2469 case -ETIMEDOUT: 2470 break; 2471 case -ECONNREFUSED: 2472 case -ECONNRESET: 2473 case -ECONNABORTED: 2474 case -ENOTCONN: 2475 rpc_force_rebind(clnt); 2476 break; 2477 case -EADDRINUSE: 2478 rpc_delay(task, 3*HZ); 2479 fallthrough; 2480 case -EPIPE: 2481 case -EAGAIN: 2482 break; 2483 case -ENFILE: 2484 case -ENOBUFS: 2485 case -ENOMEM: 2486 rpc_delay(task, HZ>>2); 2487 break; 2488 case -EIO: 2489 /* shutdown or soft timeout */ 2490 goto out_exit; 2491 default: 2492 if (clnt->cl_chatty) 2493 printk("%s: RPC call returned error %d\n", 2494 clnt->cl_program->name, -status); 2495 goto out_exit; 2496 } 2497 task->tk_action = call_encode; 2498 rpc_check_timeout(task); 2499 return; 2500 out_exit: 2501 rpc_call_rpcerror(task, status); 2502 } 2503 2504 static bool 2505 rpc_check_connected(const struct rpc_rqst *req) 2506 { 2507 /* No allocated request or transport? return true */ 2508 if (!req || !req->rq_xprt) 2509 return true; 2510 return xprt_connected(req->rq_xprt); 2511 } 2512 2513 static void 2514 rpc_check_timeout(struct rpc_task *task) 2515 { 2516 struct rpc_clnt *clnt = task->tk_client; 2517 2518 if (RPC_SIGNALLED(task)) 2519 return; 2520 2521 if (xprt_adjust_timeout(task->tk_rqstp) == 0) 2522 return; 2523 2524 trace_rpc_timeout_status(task); 2525 task->tk_timeouts++; 2526 2527 if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) { 2528 rpc_call_rpcerror(task, -ETIMEDOUT); 2529 return; 2530 } 2531 2532 if (RPC_IS_SOFT(task)) { 2533 /* 2534 * Once a "no retrans timeout" soft tasks (a.k.a NFSv4) has 2535 * been sent, it should time out only if the transport 2536 * connection gets terminally broken. 2537 */ 2538 if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) && 2539 rpc_check_connected(task->tk_rqstp)) 2540 return; 2541 2542 if (clnt->cl_chatty) { 2543 pr_notice_ratelimited( 2544 "%s: server %s not responding, timed out\n", 2545 clnt->cl_program->name, 2546 task->tk_xprt->servername); 2547 } 2548 if (task->tk_flags & RPC_TASK_TIMEOUT) 2549 rpc_call_rpcerror(task, -ETIMEDOUT); 2550 else 2551 __rpc_call_rpcerror(task, -EIO, -ETIMEDOUT); 2552 return; 2553 } 2554 2555 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { 2556 task->tk_flags |= RPC_CALL_MAJORSEEN; 2557 if (clnt->cl_chatty) { 2558 pr_notice_ratelimited( 2559 "%s: server %s not responding, still trying\n", 2560 clnt->cl_program->name, 2561 task->tk_xprt->servername); 2562 } 2563 } 2564 rpc_force_rebind(clnt); 2565 /* 2566 * Did our request time out due to an RPCSEC_GSS out-of-sequence 2567 * event? RFC2203 requires the server to drop all such requests. 2568 */ 2569 rpcauth_invalcred(task); 2570 } 2571 2572 /* 2573 * 7. Decode the RPC reply 2574 */ 2575 static void 2576 call_decode(struct rpc_task *task) 2577 { 2578 struct rpc_clnt *clnt = task->tk_client; 2579 struct rpc_rqst *req = task->tk_rqstp; 2580 struct xdr_stream xdr; 2581 int err; 2582 2583 if (!task->tk_msg.rpc_proc->p_decode) { 2584 task->tk_action = rpc_exit_task; 2585 return; 2586 } 2587 2588 if (task->tk_flags & RPC_CALL_MAJORSEEN) { 2589 if (clnt->cl_chatty) { 2590 pr_notice_ratelimited("%s: server %s OK\n", 2591 clnt->cl_program->name, 2592 task->tk_xprt->servername); 2593 } 2594 task->tk_flags &= ~RPC_CALL_MAJORSEEN; 2595 } 2596 2597 /* 2598 * Did we ever call xprt_complete_rqst()? If not, we should assume 2599 * the message is incomplete. 2600 */ 2601 err = -EAGAIN; 2602 if (!req->rq_reply_bytes_recvd) 2603 goto out; 2604 2605 /* Ensure that we see all writes made by xprt_complete_rqst() 2606 * before it changed req->rq_reply_bytes_recvd. 2607 */ 2608 smp_rmb(); 2609 2610 req->rq_rcv_buf.len = req->rq_private_buf.len; 2611 trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf); 2612 2613 /* Check that the softirq receive buffer is valid */ 2614 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, 2615 sizeof(req->rq_rcv_buf)) != 0); 2616 2617 xdr_init_decode(&xdr, &req->rq_rcv_buf, 2618 req->rq_rcv_buf.head[0].iov_base, req); 2619 err = rpc_decode_header(task, &xdr); 2620 out: 2621 switch (err) { 2622 case 0: 2623 task->tk_action = rpc_exit_task; 2624 task->tk_status = rpcauth_unwrap_resp(task, &xdr); 2625 xdr_finish_decode(&xdr); 2626 return; 2627 case -EAGAIN: 2628 task->tk_status = 0; 2629 if (task->tk_client->cl_discrtry) 2630 xprt_conditional_disconnect(req->rq_xprt, 2631 req->rq_connect_cookie); 2632 task->tk_action = call_encode; 2633 rpc_check_timeout(task); 2634 break; 2635 case -EKEYREJECTED: 2636 task->tk_action = call_reserve; 2637 rpc_check_timeout(task); 2638 rpcauth_invalcred(task); 2639 /* Ensure we obtain a new XID if we retry! */ 2640 xprt_release(task); 2641 } 2642 } 2643 2644 static int 2645 rpc_encode_header(struct rpc_task *task, struct xdr_stream *xdr) 2646 { 2647 struct rpc_clnt *clnt = task->tk_client; 2648 struct rpc_rqst *req = task->tk_rqstp; 2649 __be32 *p; 2650 int error; 2651 2652 error = -EMSGSIZE; 2653 p = xdr_reserve_space(xdr, RPC_CALLHDRSIZE << 2); 2654 if (!p) 2655 goto out_fail; 2656 *p++ = req->rq_xid; 2657 *p++ = rpc_call; 2658 *p++ = cpu_to_be32(RPC_VERSION); 2659 *p++ = cpu_to_be32(clnt->cl_prog); 2660 *p++ = cpu_to_be32(clnt->cl_vers); 2661 *p = cpu_to_be32(task->tk_msg.rpc_proc->p_proc); 2662 2663 error = rpcauth_marshcred(task, xdr); 2664 if (error < 0) 2665 goto out_fail; 2666 return 0; 2667 out_fail: 2668 trace_rpc_bad_callhdr(task); 2669 rpc_call_rpcerror(task, error); 2670 return error; 2671 } 2672 2673 static noinline int 2674 rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr) 2675 { 2676 struct rpc_clnt *clnt = task->tk_client; 2677 int error; 2678 __be32 *p; 2679 2680 /* RFC-1014 says that the representation of XDR data must be a 2681 * multiple of four bytes 2682 * - if it isn't pointer subtraction in the NFS client may give 2683 * undefined results 2684 */ 2685 if (task->tk_rqstp->rq_rcv_buf.len & 3) 2686 goto out_unparsable; 2687 2688 p = xdr_inline_decode(xdr, 3 * sizeof(*p)); 2689 if (!p) 2690 goto out_unparsable; 2691 p++; /* skip XID */ 2692 if (*p++ != rpc_reply) 2693 goto out_unparsable; 2694 if (*p++ != rpc_msg_accepted) 2695 goto out_msg_denied; 2696 2697 error = rpcauth_checkverf(task, xdr); 2698 if (error) 2699 goto out_verifier; 2700 2701 p = xdr_inline_decode(xdr, sizeof(*p)); 2702 if (!p) 2703 goto out_unparsable; 2704 switch (*p) { 2705 case rpc_success: 2706 return 0; 2707 case rpc_prog_unavail: 2708 trace_rpc__prog_unavail(task); 2709 error = -EPFNOSUPPORT; 2710 goto out_err; 2711 case rpc_prog_mismatch: 2712 trace_rpc__prog_mismatch(task); 2713 error = -EPROTONOSUPPORT; 2714 goto out_err; 2715 case rpc_proc_unavail: 2716 trace_rpc__proc_unavail(task); 2717 error = -EOPNOTSUPP; 2718 goto out_err; 2719 case rpc_garbage_args: 2720 case rpc_system_err: 2721 trace_rpc__garbage_args(task); 2722 error = -EIO; 2723 break; 2724 default: 2725 goto out_unparsable; 2726 } 2727 2728 out_garbage: 2729 clnt->cl_stats->rpcgarbage++; 2730 if (task->tk_garb_retry) { 2731 task->tk_garb_retry--; 2732 task->tk_action = call_encode; 2733 return -EAGAIN; 2734 } 2735 out_err: 2736 rpc_call_rpcerror(task, error); 2737 return error; 2738 2739 out_unparsable: 2740 trace_rpc__unparsable(task); 2741 error = -EIO; 2742 goto out_garbage; 2743 2744 out_verifier: 2745 trace_rpc_bad_verifier(task); 2746 switch (error) { 2747 case -EPROTONOSUPPORT: 2748 goto out_err; 2749 case -EACCES: 2750 /* Re-encode with a fresh cred */ 2751 fallthrough; 2752 default: 2753 goto out_garbage; 2754 } 2755 2756 out_msg_denied: 2757 error = -EACCES; 2758 p = xdr_inline_decode(xdr, sizeof(*p)); 2759 if (!p) 2760 goto out_unparsable; 2761 switch (*p++) { 2762 case rpc_auth_error: 2763 break; 2764 case rpc_mismatch: 2765 trace_rpc__mismatch(task); 2766 error = -EPROTONOSUPPORT; 2767 goto out_err; 2768 default: 2769 goto out_unparsable; 2770 } 2771 2772 p = xdr_inline_decode(xdr, sizeof(*p)); 2773 if (!p) 2774 goto out_unparsable; 2775 switch (*p++) { 2776 case rpc_autherr_rejectedcred: 2777 case rpc_autherr_rejectedverf: 2778 case rpcsec_gsserr_credproblem: 2779 case rpcsec_gsserr_ctxproblem: 2780 rpcauth_invalcred(task); 2781 if (!task->tk_cred_retry) 2782 break; 2783 task->tk_cred_retry--; 2784 trace_rpc__stale_creds(task); 2785 return -EKEYREJECTED; 2786 case rpc_autherr_badcred: 2787 case rpc_autherr_badverf: 2788 /* possibly garbled cred/verf? */ 2789 if (!task->tk_garb_retry) 2790 break; 2791 task->tk_garb_retry--; 2792 trace_rpc__bad_creds(task); 2793 task->tk_action = call_encode; 2794 return -EAGAIN; 2795 case rpc_autherr_tooweak: 2796 trace_rpc__auth_tooweak(task); 2797 pr_warn("RPC: server %s requires stronger authentication.\n", 2798 task->tk_xprt->servername); 2799 break; 2800 default: 2801 goto out_unparsable; 2802 } 2803 goto out_err; 2804 } 2805 2806 static void rpcproc_encode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr, 2807 const void *obj) 2808 { 2809 } 2810 2811 static int rpcproc_decode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr, 2812 void *obj) 2813 { 2814 return 0; 2815 } 2816 2817 static const struct rpc_procinfo rpcproc_null = { 2818 .p_encode = rpcproc_encode_null, 2819 .p_decode = rpcproc_decode_null, 2820 }; 2821 2822 static const struct rpc_procinfo rpcproc_null_noreply = { 2823 .p_encode = rpcproc_encode_null, 2824 }; 2825 2826 static void 2827 rpc_null_call_prepare(struct rpc_task *task, void *data) 2828 { 2829 task->tk_flags &= ~RPC_TASK_NO_RETRANS_TIMEOUT; 2830 rpc_call_start(task); 2831 } 2832 2833 static const struct rpc_call_ops rpc_null_ops = { 2834 .rpc_call_prepare = rpc_null_call_prepare, 2835 .rpc_call_done = rpc_default_callback, 2836 }; 2837 2838 static 2839 struct rpc_task *rpc_call_null_helper(struct rpc_clnt *clnt, 2840 struct rpc_xprt *xprt, struct rpc_cred *cred, int flags, 2841 const struct rpc_call_ops *ops, void *data) 2842 { 2843 struct rpc_message msg = { 2844 .rpc_proc = &rpcproc_null, 2845 }; 2846 struct rpc_task_setup task_setup_data = { 2847 .rpc_client = clnt, 2848 .rpc_xprt = xprt, 2849 .rpc_message = &msg, 2850 .rpc_op_cred = cred, 2851 .callback_ops = ops ?: &rpc_null_ops, 2852 .callback_data = data, 2853 .flags = flags | RPC_TASK_SOFT | RPC_TASK_SOFTCONN | 2854 RPC_TASK_NULLCREDS, 2855 }; 2856 2857 return rpc_run_task(&task_setup_data); 2858 } 2859 2860 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags) 2861 { 2862 return rpc_call_null_helper(clnt, NULL, cred, flags, NULL, NULL); 2863 } 2864 EXPORT_SYMBOL_GPL(rpc_call_null); 2865 2866 static int rpc_ping(struct rpc_clnt *clnt) 2867 { 2868 struct rpc_task *task; 2869 int status; 2870 2871 if (clnt->cl_auth->au_ops->ping) 2872 return clnt->cl_auth->au_ops->ping(clnt); 2873 2874 task = rpc_call_null_helper(clnt, NULL, NULL, 0, NULL, NULL); 2875 if (IS_ERR(task)) 2876 return PTR_ERR(task); 2877 status = task->tk_status; 2878 rpc_put_task(task); 2879 return status; 2880 } 2881 2882 static int rpc_ping_noreply(struct rpc_clnt *clnt) 2883 { 2884 struct rpc_message msg = { 2885 .rpc_proc = &rpcproc_null_noreply, 2886 }; 2887 struct rpc_task_setup task_setup_data = { 2888 .rpc_client = clnt, 2889 .rpc_message = &msg, 2890 .callback_ops = &rpc_null_ops, 2891 .flags = RPC_TASK_SOFT | RPC_TASK_SOFTCONN | RPC_TASK_NULLCREDS, 2892 }; 2893 struct rpc_task *task; 2894 int status; 2895 2896 task = rpc_run_task(&task_setup_data); 2897 if (IS_ERR(task)) 2898 return PTR_ERR(task); 2899 status = task->tk_status; 2900 rpc_put_task(task); 2901 return status; 2902 } 2903 2904 struct rpc_cb_add_xprt_calldata { 2905 struct rpc_xprt_switch *xps; 2906 struct rpc_xprt *xprt; 2907 }; 2908 2909 static void rpc_cb_add_xprt_done(struct rpc_task *task, void *calldata) 2910 { 2911 struct rpc_cb_add_xprt_calldata *data = calldata; 2912 2913 if (task->tk_status == 0) 2914 rpc_xprt_switch_add_xprt(data->xps, data->xprt); 2915 } 2916 2917 static void rpc_cb_add_xprt_release(void *calldata) 2918 { 2919 struct rpc_cb_add_xprt_calldata *data = calldata; 2920 2921 xprt_put(data->xprt); 2922 xprt_switch_put(data->xps); 2923 kfree(data); 2924 } 2925 2926 static const struct rpc_call_ops rpc_cb_add_xprt_call_ops = { 2927 .rpc_call_prepare = rpc_null_call_prepare, 2928 .rpc_call_done = rpc_cb_add_xprt_done, 2929 .rpc_release = rpc_cb_add_xprt_release, 2930 }; 2931 2932 /** 2933 * rpc_clnt_test_and_add_xprt - Test and add a new transport to a rpc_clnt 2934 * @clnt: pointer to struct rpc_clnt 2935 * @xps: pointer to struct rpc_xprt_switch, 2936 * @xprt: pointer struct rpc_xprt 2937 * @in_max_connect: pointer to the max_connect value for the passed in xprt transport 2938 */ 2939 int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt, 2940 struct rpc_xprt_switch *xps, struct rpc_xprt *xprt, 2941 void *in_max_connect) 2942 { 2943 struct rpc_cb_add_xprt_calldata *data; 2944 struct rpc_task *task; 2945 int max_connect = clnt->cl_max_connect; 2946 2947 if (in_max_connect) 2948 max_connect = *(int *)in_max_connect; 2949 if (xps->xps_nunique_destaddr_xprts + 1 > max_connect) { 2950 rcu_read_lock(); 2951 pr_warn("SUNRPC: reached max allowed number (%d) did not add " 2952 "transport to server: %s\n", max_connect, 2953 rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR)); 2954 rcu_read_unlock(); 2955 return -EINVAL; 2956 } 2957 2958 data = kmalloc(sizeof(*data), GFP_KERNEL); 2959 if (!data) 2960 return -ENOMEM; 2961 data->xps = xprt_switch_get(xps); 2962 data->xprt = xprt_get(xprt); 2963 if (rpc_xprt_switch_has_addr(data->xps, (struct sockaddr *)&xprt->addr)) { 2964 rpc_cb_add_xprt_release(data); 2965 goto success; 2966 } 2967 2968 task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC, 2969 &rpc_cb_add_xprt_call_ops, data); 2970 if (IS_ERR(task)) 2971 return PTR_ERR(task); 2972 2973 data->xps->xps_nunique_destaddr_xprts++; 2974 rpc_put_task(task); 2975 success: 2976 return 1; 2977 } 2978 EXPORT_SYMBOL_GPL(rpc_clnt_test_and_add_xprt); 2979 2980 static int rpc_clnt_add_xprt_helper(struct rpc_clnt *clnt, 2981 struct rpc_xprt *xprt, 2982 struct rpc_add_xprt_test *data) 2983 { 2984 struct rpc_task *task; 2985 int status = -EADDRINUSE; 2986 2987 /* Test the connection */ 2988 task = rpc_call_null_helper(clnt, xprt, NULL, 0, NULL, NULL); 2989 if (IS_ERR(task)) 2990 return PTR_ERR(task); 2991 2992 status = task->tk_status; 2993 rpc_put_task(task); 2994 2995 if (status < 0) 2996 return status; 2997 2998 /* rpc_xprt_switch and rpc_xprt are deferrenced by add_xprt_test() */ 2999 data->add_xprt_test(clnt, xprt, data->data); 3000 3001 return 0; 3002 } 3003 3004 /** 3005 * rpc_clnt_setup_test_and_add_xprt() 3006 * 3007 * This is an rpc_clnt_add_xprt setup() function which returns 1 so: 3008 * 1) caller of the test function must dereference the rpc_xprt_switch 3009 * and the rpc_xprt. 3010 * 2) test function must call rpc_xprt_switch_add_xprt, usually in 3011 * the rpc_call_done routine. 3012 * 3013 * Upon success (return of 1), the test function adds the new 3014 * transport to the rpc_clnt xprt switch 3015 * 3016 * @clnt: struct rpc_clnt to get the new transport 3017 * @xps: the rpc_xprt_switch to hold the new transport 3018 * @xprt: the rpc_xprt to test 3019 * @data: a struct rpc_add_xprt_test pointer that holds the test function 3020 * and test function call data 3021 */ 3022 int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt, 3023 struct rpc_xprt_switch *xps, 3024 struct rpc_xprt *xprt, 3025 void *data) 3026 { 3027 int status = -EADDRINUSE; 3028 3029 xprt = xprt_get(xprt); 3030 xprt_switch_get(xps); 3031 3032 if (rpc_xprt_switch_has_addr(xps, (struct sockaddr *)&xprt->addr)) 3033 goto out_err; 3034 3035 status = rpc_clnt_add_xprt_helper(clnt, xprt, data); 3036 if (status < 0) 3037 goto out_err; 3038 3039 status = 1; 3040 out_err: 3041 xprt_put(xprt); 3042 xprt_switch_put(xps); 3043 if (status < 0) 3044 pr_info("RPC: rpc_clnt_test_xprt failed: %d addr %s not " 3045 "added\n", status, 3046 xprt->address_strings[RPC_DISPLAY_ADDR]); 3047 /* so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt */ 3048 return status; 3049 } 3050 EXPORT_SYMBOL_GPL(rpc_clnt_setup_test_and_add_xprt); 3051 3052 /** 3053 * rpc_clnt_add_xprt - Add a new transport to a rpc_clnt 3054 * @clnt: pointer to struct rpc_clnt 3055 * @xprtargs: pointer to struct xprt_create 3056 * @setup: callback to test and/or set up the connection 3057 * @data: pointer to setup function data 3058 * 3059 * Creates a new transport using the parameters set in args and 3060 * adds it to clnt. 3061 * If ping is set, then test that connectivity succeeds before 3062 * adding the new transport. 3063 * 3064 */ 3065 int rpc_clnt_add_xprt(struct rpc_clnt *clnt, 3066 struct xprt_create *xprtargs, 3067 int (*setup)(struct rpc_clnt *, 3068 struct rpc_xprt_switch *, 3069 struct rpc_xprt *, 3070 void *), 3071 void *data) 3072 { 3073 struct rpc_xprt_switch *xps; 3074 struct rpc_xprt *xprt; 3075 unsigned long connect_timeout; 3076 unsigned long reconnect_timeout; 3077 unsigned char resvport, reuseport; 3078 int ret = 0, ident; 3079 3080 rcu_read_lock(); 3081 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 3082 xprt = xprt_iter_xprt(&clnt->cl_xpi); 3083 if (xps == NULL || xprt == NULL) { 3084 rcu_read_unlock(); 3085 xprt_switch_put(xps); 3086 return -EAGAIN; 3087 } 3088 resvport = xprt->resvport; 3089 reuseport = xprt->reuseport; 3090 connect_timeout = xprt->connect_timeout; 3091 reconnect_timeout = xprt->max_reconnect_timeout; 3092 ident = xprt->xprt_class->ident; 3093 rcu_read_unlock(); 3094 3095 if (!xprtargs->ident) 3096 xprtargs->ident = ident; 3097 xprtargs->xprtsec = clnt->cl_xprtsec; 3098 xprt = xprt_create_transport(xprtargs); 3099 if (IS_ERR(xprt)) { 3100 ret = PTR_ERR(xprt); 3101 goto out_put_switch; 3102 } 3103 xprt->resvport = resvport; 3104 xprt->reuseport = reuseport; 3105 3106 if (xprtargs->connect_timeout) 3107 connect_timeout = xprtargs->connect_timeout; 3108 if (xprtargs->reconnect_timeout) 3109 reconnect_timeout = xprtargs->reconnect_timeout; 3110 if (xprt->ops->set_connect_timeout != NULL) 3111 xprt->ops->set_connect_timeout(xprt, 3112 connect_timeout, 3113 reconnect_timeout); 3114 3115 rpc_xprt_switch_set_roundrobin(xps); 3116 if (setup) { 3117 ret = setup(clnt, xps, xprt, data); 3118 if (ret != 0) 3119 goto out_put_xprt; 3120 } 3121 rpc_xprt_switch_add_xprt(xps, xprt); 3122 out_put_xprt: 3123 xprt_put(xprt); 3124 out_put_switch: 3125 xprt_switch_put(xps); 3126 return ret; 3127 } 3128 EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt); 3129 3130 static int rpc_xprt_probe_trunked(struct rpc_clnt *clnt, 3131 struct rpc_xprt *xprt, 3132 struct rpc_add_xprt_test *data) 3133 { 3134 struct rpc_xprt *main_xprt; 3135 int status = 0; 3136 3137 xprt_get(xprt); 3138 3139 rcu_read_lock(); 3140 main_xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); 3141 status = rpc_cmp_addr_port((struct sockaddr *)&xprt->addr, 3142 (struct sockaddr *)&main_xprt->addr); 3143 rcu_read_unlock(); 3144 xprt_put(main_xprt); 3145 if (status || !test_bit(XPRT_OFFLINE, &xprt->state)) 3146 goto out; 3147 3148 status = rpc_clnt_add_xprt_helper(clnt, xprt, data); 3149 out: 3150 xprt_put(xprt); 3151 return status; 3152 } 3153 3154 /* rpc_clnt_probe_trunked_xprt -- probe offlined transport for session trunking 3155 * @clnt rpc_clnt structure 3156 * 3157 * For each offlined transport found in the rpc_clnt structure call 3158 * the function rpc_xprt_probe_trunked() which will determine if this 3159 * transport still belongs to the trunking group. 3160 */ 3161 void rpc_clnt_probe_trunked_xprts(struct rpc_clnt *clnt, 3162 struct rpc_add_xprt_test *data) 3163 { 3164 struct rpc_xprt_iter xpi; 3165 int ret; 3166 3167 ret = rpc_clnt_xprt_iter_offline_init(clnt, &xpi); 3168 if (ret) 3169 return; 3170 for (;;) { 3171 struct rpc_xprt *xprt = xprt_iter_get_next(&xpi); 3172 3173 if (!xprt) 3174 break; 3175 ret = rpc_xprt_probe_trunked(clnt, xprt, data); 3176 xprt_put(xprt); 3177 if (ret < 0) 3178 break; 3179 xprt_iter_rewind(&xpi); 3180 } 3181 xprt_iter_destroy(&xpi); 3182 } 3183 EXPORT_SYMBOL_GPL(rpc_clnt_probe_trunked_xprts); 3184 3185 static int rpc_xprt_offline(struct rpc_clnt *clnt, 3186 struct rpc_xprt *xprt, 3187 void *data) 3188 { 3189 struct rpc_xprt *main_xprt; 3190 struct rpc_xprt_switch *xps; 3191 int err = 0; 3192 3193 xprt_get(xprt); 3194 3195 rcu_read_lock(); 3196 main_xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); 3197 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 3198 err = rpc_cmp_addr_port((struct sockaddr *)&xprt->addr, 3199 (struct sockaddr *)&main_xprt->addr); 3200 rcu_read_unlock(); 3201 xprt_put(main_xprt); 3202 if (err) 3203 goto out; 3204 3205 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) { 3206 err = -EINTR; 3207 goto out; 3208 } 3209 xprt_set_offline_locked(xprt, xps); 3210 3211 xprt_release_write(xprt, NULL); 3212 out: 3213 xprt_put(xprt); 3214 xprt_switch_put(xps); 3215 return err; 3216 } 3217 3218 /* rpc_clnt_manage_trunked_xprts -- offline trunked transports 3219 * @clnt rpc_clnt structure 3220 * 3221 * For each active transport found in the rpc_clnt structure call 3222 * the function rpc_xprt_offline() which will identify trunked transports 3223 * and will mark them offline. 3224 */ 3225 void rpc_clnt_manage_trunked_xprts(struct rpc_clnt *clnt) 3226 { 3227 rpc_clnt_iterate_for_each_xprt(clnt, rpc_xprt_offline, NULL); 3228 } 3229 EXPORT_SYMBOL_GPL(rpc_clnt_manage_trunked_xprts); 3230 3231 struct connect_timeout_data { 3232 unsigned long connect_timeout; 3233 unsigned long reconnect_timeout; 3234 }; 3235 3236 static int 3237 rpc_xprt_set_connect_timeout(struct rpc_clnt *clnt, 3238 struct rpc_xprt *xprt, 3239 void *data) 3240 { 3241 struct connect_timeout_data *timeo = data; 3242 3243 if (xprt->ops->set_connect_timeout) 3244 xprt->ops->set_connect_timeout(xprt, 3245 timeo->connect_timeout, 3246 timeo->reconnect_timeout); 3247 return 0; 3248 } 3249 3250 void 3251 rpc_set_connect_timeout(struct rpc_clnt *clnt, 3252 unsigned long connect_timeout, 3253 unsigned long reconnect_timeout) 3254 { 3255 struct connect_timeout_data timeout = { 3256 .connect_timeout = connect_timeout, 3257 .reconnect_timeout = reconnect_timeout, 3258 }; 3259 rpc_clnt_iterate_for_each_xprt(clnt, 3260 rpc_xprt_set_connect_timeout, 3261 &timeout); 3262 } 3263 EXPORT_SYMBOL_GPL(rpc_set_connect_timeout); 3264 3265 void rpc_clnt_xprt_set_online(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 3266 { 3267 struct rpc_xprt_switch *xps; 3268 3269 xps = rpc_clnt_xprt_switch_get(clnt); 3270 xprt_set_online_locked(xprt, xps); 3271 xprt_switch_put(xps); 3272 } 3273 3274 void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 3275 { 3276 struct rpc_xprt_switch *xps; 3277 3278 if (rpc_clnt_xprt_switch_has_addr(clnt, 3279 (const struct sockaddr *)&xprt->addr)) { 3280 return rpc_clnt_xprt_set_online(clnt, xprt); 3281 } 3282 3283 xps = rpc_clnt_xprt_switch_get(clnt); 3284 rpc_xprt_switch_add_xprt(xps, xprt); 3285 xprt_switch_put(xps); 3286 } 3287 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt); 3288 3289 void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 3290 { 3291 struct rpc_xprt_switch *xps; 3292 3293 rcu_read_lock(); 3294 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 3295 rpc_xprt_switch_remove_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch), 3296 xprt, 0); 3297 xps->xps_nunique_destaddr_xprts--; 3298 rcu_read_unlock(); 3299 } 3300 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_remove_xprt); 3301 3302 bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, 3303 const struct sockaddr *sap) 3304 { 3305 struct rpc_xprt_switch *xps; 3306 bool ret; 3307 3308 rcu_read_lock(); 3309 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 3310 ret = rpc_xprt_switch_has_addr(xps, sap); 3311 rcu_read_unlock(); 3312 return ret; 3313 } 3314 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr); 3315 3316 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 3317 static void rpc_show_header(void) 3318 { 3319 printk(KERN_INFO "-pid- flgs status -client- --rqstp- " 3320 "-timeout ---ops--\n"); 3321 } 3322 3323 static void rpc_show_task(const struct rpc_clnt *clnt, 3324 const struct rpc_task *task) 3325 { 3326 const char *rpc_waitq = "none"; 3327 3328 if (RPC_IS_QUEUED(task)) 3329 rpc_waitq = rpc_qname(task->tk_waitqueue); 3330 3331 printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n", 3332 task->tk_pid, task->tk_flags, task->tk_status, 3333 clnt, task->tk_rqstp, rpc_task_timeout(task), task->tk_ops, 3334 clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task), 3335 task->tk_action, rpc_waitq); 3336 } 3337 3338 void rpc_show_tasks(struct net *net) 3339 { 3340 struct rpc_clnt *clnt; 3341 struct rpc_task *task; 3342 int header = 0; 3343 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 3344 3345 spin_lock(&sn->rpc_client_lock); 3346 list_for_each_entry(clnt, &sn->all_clients, cl_clients) { 3347 spin_lock(&clnt->cl_lock); 3348 list_for_each_entry(task, &clnt->cl_tasks, tk_task) { 3349 if (!header) { 3350 rpc_show_header(); 3351 header++; 3352 } 3353 rpc_show_task(clnt, task); 3354 } 3355 spin_unlock(&clnt->cl_lock); 3356 } 3357 spin_unlock(&sn->rpc_client_lock); 3358 } 3359 #endif 3360 3361 #if IS_ENABLED(CONFIG_SUNRPC_SWAP) 3362 static int 3363 rpc_clnt_swap_activate_callback(struct rpc_clnt *clnt, 3364 struct rpc_xprt *xprt, 3365 void *dummy) 3366 { 3367 return xprt_enable_swap(xprt); 3368 } 3369 3370 int 3371 rpc_clnt_swap_activate(struct rpc_clnt *clnt) 3372 { 3373 while (clnt != clnt->cl_parent) 3374 clnt = clnt->cl_parent; 3375 if (atomic_inc_return(&clnt->cl_swapper) == 1) 3376 return rpc_clnt_iterate_for_each_xprt(clnt, 3377 rpc_clnt_swap_activate_callback, NULL); 3378 return 0; 3379 } 3380 EXPORT_SYMBOL_GPL(rpc_clnt_swap_activate); 3381 3382 static int 3383 rpc_clnt_swap_deactivate_callback(struct rpc_clnt *clnt, 3384 struct rpc_xprt *xprt, 3385 void *dummy) 3386 { 3387 xprt_disable_swap(xprt); 3388 return 0; 3389 } 3390 3391 void 3392 rpc_clnt_swap_deactivate(struct rpc_clnt *clnt) 3393 { 3394 while (clnt != clnt->cl_parent) 3395 clnt = clnt->cl_parent; 3396 if (atomic_dec_if_positive(&clnt->cl_swapper) == 0) 3397 rpc_clnt_iterate_for_each_xprt(clnt, 3398 rpc_clnt_swap_deactivate_callback, NULL); 3399 } 3400 EXPORT_SYMBOL_GPL(rpc_clnt_swap_deactivate); 3401 #endif /* CONFIG_SUNRPC_SWAP */ 3402