1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/clnt.c 4 * 5 * This file contains the high-level RPC interface. 6 * It is modeled as a finite state machine to support both synchronous 7 * and asynchronous requests. 8 * 9 * - RPC header generation and argument serialization. 10 * - Credential refresh. 11 * - TCP connect handling. 12 * - Retry of operation when it is suspected the operation failed because 13 * of uid squashing on the server, or when the credentials were stale 14 * and need to be refreshed, or when a packet was damaged in transit. 15 * This may be have to be moved to the VFS layer. 16 * 17 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> 18 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> 19 */ 20 21 22 #include <linux/module.h> 23 #include <linux/types.h> 24 #include <linux/kallsyms.h> 25 #include <linux/mm.h> 26 #include <linux/namei.h> 27 #include <linux/mount.h> 28 #include <linux/slab.h> 29 #include <linux/rcupdate.h> 30 #include <linux/utsname.h> 31 #include <linux/workqueue.h> 32 #include <linux/in.h> 33 #include <linux/in6.h> 34 #include <linux/un.h> 35 36 #include <linux/sunrpc/clnt.h> 37 #include <linux/sunrpc/addr.h> 38 #include <linux/sunrpc/rpc_pipe_fs.h> 39 #include <linux/sunrpc/metrics.h> 40 #include <linux/sunrpc/bc_xprt.h> 41 #include <trace/events/sunrpc.h> 42 43 #include "sunrpc.h" 44 #include "sysfs.h" 45 #include "netns.h" 46 47 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 48 # define RPCDBG_FACILITY RPCDBG_CALL 49 #endif 50 51 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); 52 53 static void call_start(struct rpc_task *task); 54 static void call_reserve(struct rpc_task *task); 55 static void call_reserveresult(struct rpc_task *task); 56 static void call_allocate(struct rpc_task *task); 57 static void call_encode(struct rpc_task *task); 58 static void call_decode(struct rpc_task *task); 59 static void call_bind(struct rpc_task *task); 60 static void call_bind_status(struct rpc_task *task); 61 static void call_transmit(struct rpc_task *task); 62 static void call_status(struct rpc_task *task); 63 static void call_transmit_status(struct rpc_task *task); 64 static void call_refresh(struct rpc_task *task); 65 static void call_refreshresult(struct rpc_task *task); 66 static void call_connect(struct rpc_task *task); 67 static void call_connect_status(struct rpc_task *task); 68 69 static int rpc_encode_header(struct rpc_task *task, 70 struct xdr_stream *xdr); 71 static int rpc_decode_header(struct rpc_task *task, 72 struct xdr_stream *xdr); 73 static int rpc_ping(struct rpc_clnt *clnt); 74 static int rpc_ping_noreply(struct rpc_clnt *clnt); 75 static void rpc_check_timeout(struct rpc_task *task); 76 77 static void rpc_register_client(struct rpc_clnt *clnt) 78 { 79 struct net *net = rpc_net_ns(clnt); 80 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 81 82 spin_lock(&sn->rpc_client_lock); 83 list_add(&clnt->cl_clients, &sn->all_clients); 84 spin_unlock(&sn->rpc_client_lock); 85 } 86 87 static void rpc_unregister_client(struct rpc_clnt *clnt) 88 { 89 struct net *net = rpc_net_ns(clnt); 90 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 91 92 spin_lock(&sn->rpc_client_lock); 93 list_del(&clnt->cl_clients); 94 spin_unlock(&sn->rpc_client_lock); 95 } 96 97 static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) 98 { 99 rpc_remove_client_dir(clnt); 100 } 101 102 static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) 103 { 104 struct net *net = rpc_net_ns(clnt); 105 struct super_block *pipefs_sb; 106 107 pipefs_sb = rpc_get_sb_net(net); 108 if (pipefs_sb) { 109 if (pipefs_sb == clnt->pipefs_sb) 110 __rpc_clnt_remove_pipedir(clnt); 111 rpc_put_sb_net(net); 112 } 113 } 114 115 static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb, 116 struct rpc_clnt *clnt) 117 { 118 static uint32_t clntid; 119 const char *dir_name = clnt->cl_program->pipe_dir_name; 120 char name[15]; 121 struct dentry *dir, *dentry; 122 123 dir = rpc_d_lookup_sb(sb, dir_name); 124 if (dir == NULL) { 125 pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name); 126 return dir; 127 } 128 for (;;) { 129 snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++); 130 name[sizeof(name) - 1] = '\0'; 131 dentry = rpc_create_client_dir(dir, name, clnt); 132 if (!IS_ERR(dentry)) 133 break; 134 if (dentry == ERR_PTR(-EEXIST)) 135 continue; 136 printk(KERN_INFO "RPC: Couldn't create pipefs entry" 137 " %s/%s, error %ld\n", 138 dir_name, name, PTR_ERR(dentry)); 139 break; 140 } 141 dput(dir); 142 return dentry; 143 } 144 145 static int 146 rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt) 147 { 148 struct dentry *dentry; 149 150 clnt->pipefs_sb = pipefs_sb; 151 152 if (clnt->cl_program->pipe_dir_name != NULL) { 153 dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt); 154 if (IS_ERR(dentry)) 155 return PTR_ERR(dentry); 156 } 157 return 0; 158 } 159 160 static int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event) 161 { 162 if (clnt->cl_program->pipe_dir_name == NULL) 163 return 1; 164 165 switch (event) { 166 case RPC_PIPEFS_MOUNT: 167 if (clnt->cl_pipedir_objects.pdh_dentry != NULL) 168 return 1; 169 if (refcount_read(&clnt->cl_count) == 0) 170 return 1; 171 break; 172 case RPC_PIPEFS_UMOUNT: 173 if (clnt->cl_pipedir_objects.pdh_dentry == NULL) 174 return 1; 175 break; 176 } 177 return 0; 178 } 179 180 static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event, 181 struct super_block *sb) 182 { 183 struct dentry *dentry; 184 185 switch (event) { 186 case RPC_PIPEFS_MOUNT: 187 dentry = rpc_setup_pipedir_sb(sb, clnt); 188 if (!dentry) 189 return -ENOENT; 190 if (IS_ERR(dentry)) 191 return PTR_ERR(dentry); 192 break; 193 case RPC_PIPEFS_UMOUNT: 194 __rpc_clnt_remove_pipedir(clnt); 195 break; 196 default: 197 printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event); 198 return -ENOTSUPP; 199 } 200 return 0; 201 } 202 203 static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, 204 struct super_block *sb) 205 { 206 int error = 0; 207 208 for (;; clnt = clnt->cl_parent) { 209 if (!rpc_clnt_skip_event(clnt, event)) 210 error = __rpc_clnt_handle_event(clnt, event, sb); 211 if (error || clnt == clnt->cl_parent) 212 break; 213 } 214 return error; 215 } 216 217 static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event) 218 { 219 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 220 struct rpc_clnt *clnt; 221 222 spin_lock(&sn->rpc_client_lock); 223 list_for_each_entry(clnt, &sn->all_clients, cl_clients) { 224 if (rpc_clnt_skip_event(clnt, event)) 225 continue; 226 spin_unlock(&sn->rpc_client_lock); 227 return clnt; 228 } 229 spin_unlock(&sn->rpc_client_lock); 230 return NULL; 231 } 232 233 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, 234 void *ptr) 235 { 236 struct super_block *sb = ptr; 237 struct rpc_clnt *clnt; 238 int error = 0; 239 240 while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) { 241 error = __rpc_pipefs_event(clnt, event, sb); 242 if (error) 243 break; 244 } 245 return error; 246 } 247 248 static struct notifier_block rpc_clients_block = { 249 .notifier_call = rpc_pipefs_event, 250 .priority = SUNRPC_PIPEFS_RPC_PRIO, 251 }; 252 253 int rpc_clients_notifier_register(void) 254 { 255 return rpc_pipefs_notifier_register(&rpc_clients_block); 256 } 257 258 void rpc_clients_notifier_unregister(void) 259 { 260 return rpc_pipefs_notifier_unregister(&rpc_clients_block); 261 } 262 263 static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt, 264 struct rpc_xprt *xprt, 265 const struct rpc_timeout *timeout) 266 { 267 struct rpc_xprt *old; 268 269 spin_lock(&clnt->cl_lock); 270 old = rcu_dereference_protected(clnt->cl_xprt, 271 lockdep_is_held(&clnt->cl_lock)); 272 273 if (!xprt_bound(xprt)) 274 clnt->cl_autobind = 1; 275 276 clnt->cl_timeout = timeout; 277 rcu_assign_pointer(clnt->cl_xprt, xprt); 278 spin_unlock(&clnt->cl_lock); 279 280 return old; 281 } 282 283 static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename) 284 { 285 ssize_t copied; 286 287 copied = strscpy(clnt->cl_nodename, 288 nodename, sizeof(clnt->cl_nodename)); 289 290 clnt->cl_nodelen = copied < 0 291 ? sizeof(clnt->cl_nodename) - 1 292 : copied; 293 } 294 295 static int rpc_client_register(struct rpc_clnt *clnt, 296 rpc_authflavor_t pseudoflavor, 297 const char *client_name) 298 { 299 struct rpc_auth_create_args auth_args = { 300 .pseudoflavor = pseudoflavor, 301 .target_name = client_name, 302 }; 303 struct rpc_auth *auth; 304 struct net *net = rpc_net_ns(clnt); 305 struct super_block *pipefs_sb; 306 int err; 307 308 rpc_clnt_debugfs_register(clnt); 309 310 pipefs_sb = rpc_get_sb_net(net); 311 if (pipefs_sb) { 312 err = rpc_setup_pipedir(pipefs_sb, clnt); 313 if (err) 314 goto out; 315 } 316 317 rpc_register_client(clnt); 318 if (pipefs_sb) 319 rpc_put_sb_net(net); 320 321 auth = rpcauth_create(&auth_args, clnt); 322 if (IS_ERR(auth)) { 323 dprintk("RPC: Couldn't create auth handle (flavor %u)\n", 324 pseudoflavor); 325 err = PTR_ERR(auth); 326 goto err_auth; 327 } 328 return 0; 329 err_auth: 330 pipefs_sb = rpc_get_sb_net(net); 331 rpc_unregister_client(clnt); 332 __rpc_clnt_remove_pipedir(clnt); 333 out: 334 if (pipefs_sb) 335 rpc_put_sb_net(net); 336 rpc_sysfs_client_destroy(clnt); 337 rpc_clnt_debugfs_unregister(clnt); 338 return err; 339 } 340 341 static DEFINE_IDA(rpc_clids); 342 343 void rpc_cleanup_clids(void) 344 { 345 ida_destroy(&rpc_clids); 346 } 347 348 static int rpc_alloc_clid(struct rpc_clnt *clnt) 349 { 350 int clid; 351 352 clid = ida_alloc(&rpc_clids, GFP_KERNEL); 353 if (clid < 0) 354 return clid; 355 clnt->cl_clid = clid; 356 return 0; 357 } 358 359 static void rpc_free_clid(struct rpc_clnt *clnt) 360 { 361 ida_free(&rpc_clids, clnt->cl_clid); 362 } 363 364 static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, 365 struct rpc_xprt_switch *xps, 366 struct rpc_xprt *xprt, 367 struct rpc_clnt *parent) 368 { 369 const struct rpc_program *program = args->program; 370 const struct rpc_version *version; 371 struct rpc_clnt *clnt = NULL; 372 const struct rpc_timeout *timeout; 373 const char *nodename = args->nodename; 374 int err; 375 376 err = rpciod_up(); 377 if (err) 378 goto out_no_rpciod; 379 380 err = -EINVAL; 381 if (args->version >= program->nrvers) 382 goto out_err; 383 version = program->version[args->version]; 384 if (version == NULL) 385 goto out_err; 386 387 err = -ENOMEM; 388 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); 389 if (!clnt) 390 goto out_err; 391 clnt->cl_parent = parent ? : clnt; 392 clnt->cl_xprtsec = args->xprtsec; 393 394 err = rpc_alloc_clid(clnt); 395 if (err) 396 goto out_no_clid; 397 398 clnt->cl_cred = get_cred(args->cred); 399 clnt->cl_procinfo = version->procs; 400 clnt->cl_maxproc = version->nrprocs; 401 clnt->cl_prog = args->prognumber ? : program->number; 402 clnt->cl_vers = version->number; 403 clnt->cl_stats = args->stats ? : program->stats; 404 clnt->cl_metrics = rpc_alloc_iostats(clnt); 405 rpc_init_pipe_dir_head(&clnt->cl_pipedir_objects); 406 err = -ENOMEM; 407 if (clnt->cl_metrics == NULL) 408 goto out_no_stats; 409 clnt->cl_program = program; 410 INIT_LIST_HEAD(&clnt->cl_tasks); 411 spin_lock_init(&clnt->cl_lock); 412 413 timeout = xprt->timeout; 414 if (args->timeout != NULL) { 415 memcpy(&clnt->cl_timeout_default, args->timeout, 416 sizeof(clnt->cl_timeout_default)); 417 timeout = &clnt->cl_timeout_default; 418 } 419 420 rpc_clnt_set_transport(clnt, xprt, timeout); 421 xprt->main = true; 422 xprt_iter_init(&clnt->cl_xpi, xps); 423 xprt_switch_put(xps); 424 425 clnt->cl_rtt = &clnt->cl_rtt_default; 426 rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval); 427 428 refcount_set(&clnt->cl_count, 1); 429 430 if (nodename == NULL) 431 nodename = utsname()->nodename; 432 /* save the nodename */ 433 rpc_clnt_set_nodename(clnt, nodename); 434 435 rpc_sysfs_client_setup(clnt, xps, rpc_net_ns(clnt)); 436 err = rpc_client_register(clnt, args->authflavor, args->client_name); 437 if (err) 438 goto out_no_path; 439 if (parent) 440 refcount_inc(&parent->cl_count); 441 442 trace_rpc_clnt_new(clnt, xprt, args); 443 return clnt; 444 445 out_no_path: 446 rpc_free_iostats(clnt->cl_metrics); 447 out_no_stats: 448 put_cred(clnt->cl_cred); 449 rpc_free_clid(clnt); 450 out_no_clid: 451 kfree(clnt); 452 out_err: 453 rpciod_down(); 454 out_no_rpciod: 455 xprt_switch_put(xps); 456 xprt_put(xprt); 457 trace_rpc_clnt_new_err(program->name, args->servername, err); 458 return ERR_PTR(err); 459 } 460 461 static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, 462 struct rpc_xprt *xprt) 463 { 464 struct rpc_clnt *clnt = NULL; 465 struct rpc_xprt_switch *xps; 466 467 if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) { 468 WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC)); 469 xps = args->bc_xprt->xpt_bc_xps; 470 xprt_switch_get(xps); 471 } else { 472 xps = xprt_switch_alloc(xprt, GFP_KERNEL); 473 if (xps == NULL) { 474 xprt_put(xprt); 475 return ERR_PTR(-ENOMEM); 476 } 477 if (xprt->bc_xprt) { 478 xprt_switch_get(xps); 479 xprt->bc_xprt->xpt_bc_xps = xps; 480 } 481 } 482 clnt = rpc_new_client(args, xps, xprt, NULL); 483 if (IS_ERR(clnt)) 484 return clnt; 485 486 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { 487 int err = rpc_ping(clnt); 488 if (err != 0) { 489 rpc_shutdown_client(clnt); 490 return ERR_PTR(err); 491 } 492 } else if (args->flags & RPC_CLNT_CREATE_CONNECTED) { 493 int err = rpc_ping_noreply(clnt); 494 if (err != 0) { 495 rpc_shutdown_client(clnt); 496 return ERR_PTR(err); 497 } 498 } 499 500 clnt->cl_softrtry = 1; 501 if (args->flags & (RPC_CLNT_CREATE_HARDRTRY|RPC_CLNT_CREATE_SOFTERR)) { 502 clnt->cl_softrtry = 0; 503 if (args->flags & RPC_CLNT_CREATE_SOFTERR) 504 clnt->cl_softerr = 1; 505 } 506 507 if (args->flags & RPC_CLNT_CREATE_AUTOBIND) 508 clnt->cl_autobind = 1; 509 if (args->flags & RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT) 510 clnt->cl_noretranstimeo = 1; 511 if (args->flags & RPC_CLNT_CREATE_DISCRTRY) 512 clnt->cl_discrtry = 1; 513 if (!(args->flags & RPC_CLNT_CREATE_QUIET)) 514 clnt->cl_chatty = 1; 515 516 return clnt; 517 } 518 519 /** 520 * rpc_create - create an RPC client and transport with one call 521 * @args: rpc_clnt create argument structure 522 * 523 * Creates and initializes an RPC transport and an RPC client. 524 * 525 * It can ping the server in order to determine if it is up, and to see if 526 * it supports this program and version. RPC_CLNT_CREATE_NOPING disables 527 * this behavior so asynchronous tasks can also use rpc_create. 528 */ 529 struct rpc_clnt *rpc_create(struct rpc_create_args *args) 530 { 531 struct rpc_xprt *xprt; 532 struct xprt_create xprtargs = { 533 .net = args->net, 534 .ident = args->protocol, 535 .srcaddr = args->saddress, 536 .dstaddr = args->address, 537 .addrlen = args->addrsize, 538 .servername = args->servername, 539 .bc_xprt = args->bc_xprt, 540 .xprtsec = args->xprtsec, 541 .connect_timeout = args->connect_timeout, 542 .reconnect_timeout = args->reconnect_timeout, 543 }; 544 char servername[RPC_MAXNETNAMELEN]; 545 struct rpc_clnt *clnt; 546 int i; 547 548 if (args->bc_xprt) { 549 WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC)); 550 xprt = args->bc_xprt->xpt_bc_xprt; 551 if (xprt) { 552 xprt_get(xprt); 553 return rpc_create_xprt(args, xprt); 554 } 555 } 556 557 if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS) 558 xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS; 559 if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT) 560 xprtargs.flags |= XPRT_CREATE_NO_IDLE_TIMEOUT; 561 /* 562 * If the caller chooses not to specify a hostname, whip 563 * up a string representation of the passed-in address. 564 */ 565 if (xprtargs.servername == NULL) { 566 struct sockaddr_un *sun = 567 (struct sockaddr_un *)args->address; 568 struct sockaddr_in *sin = 569 (struct sockaddr_in *)args->address; 570 struct sockaddr_in6 *sin6 = 571 (struct sockaddr_in6 *)args->address; 572 573 servername[0] = '\0'; 574 switch (args->address->sa_family) { 575 case AF_LOCAL: 576 if (sun->sun_path[0]) 577 snprintf(servername, sizeof(servername), "%s", 578 sun->sun_path); 579 else 580 snprintf(servername, sizeof(servername), "@%s", 581 sun->sun_path+1); 582 break; 583 case AF_INET: 584 snprintf(servername, sizeof(servername), "%pI4", 585 &sin->sin_addr.s_addr); 586 break; 587 case AF_INET6: 588 snprintf(servername, sizeof(servername), "%pI6", 589 &sin6->sin6_addr); 590 break; 591 default: 592 /* caller wants default server name, but 593 * address family isn't recognized. */ 594 return ERR_PTR(-EINVAL); 595 } 596 xprtargs.servername = servername; 597 } 598 599 xprt = xprt_create_transport(&xprtargs); 600 if (IS_ERR(xprt)) 601 return (struct rpc_clnt *)xprt; 602 603 /* 604 * By default, kernel RPC client connects from a reserved port. 605 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, 606 * but it is always enabled for rpciod, which handles the connect 607 * operation. 608 */ 609 xprt->resvport = 1; 610 if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT) 611 xprt->resvport = 0; 612 xprt->reuseport = 0; 613 if (args->flags & RPC_CLNT_CREATE_REUSEPORT) 614 xprt->reuseport = 1; 615 616 clnt = rpc_create_xprt(args, xprt); 617 if (IS_ERR(clnt) || args->nconnect <= 1) 618 return clnt; 619 620 for (i = 0; i < args->nconnect - 1; i++) { 621 if (rpc_clnt_add_xprt(clnt, &xprtargs, NULL, NULL) < 0) 622 break; 623 } 624 return clnt; 625 } 626 EXPORT_SYMBOL_GPL(rpc_create); 627 628 /* 629 * This function clones the RPC client structure. It allows us to share the 630 * same transport while varying parameters such as the authentication 631 * flavour. 632 */ 633 static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, 634 struct rpc_clnt *clnt) 635 { 636 struct rpc_xprt_switch *xps; 637 struct rpc_xprt *xprt; 638 struct rpc_clnt *new; 639 int err; 640 641 err = -ENOMEM; 642 rcu_read_lock(); 643 xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); 644 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 645 rcu_read_unlock(); 646 if (xprt == NULL || xps == NULL) { 647 xprt_put(xprt); 648 xprt_switch_put(xps); 649 goto out_err; 650 } 651 args->servername = xprt->servername; 652 args->nodename = clnt->cl_nodename; 653 654 new = rpc_new_client(args, xps, xprt, clnt); 655 if (IS_ERR(new)) 656 return new; 657 658 /* Turn off autobind on clones */ 659 new->cl_autobind = 0; 660 new->cl_softrtry = clnt->cl_softrtry; 661 new->cl_softerr = clnt->cl_softerr; 662 new->cl_noretranstimeo = clnt->cl_noretranstimeo; 663 new->cl_discrtry = clnt->cl_discrtry; 664 new->cl_chatty = clnt->cl_chatty; 665 new->cl_principal = clnt->cl_principal; 666 new->cl_max_connect = clnt->cl_max_connect; 667 return new; 668 669 out_err: 670 trace_rpc_clnt_clone_err(clnt, err); 671 return ERR_PTR(err); 672 } 673 674 /** 675 * rpc_clone_client - Clone an RPC client structure 676 * 677 * @clnt: RPC client whose parameters are copied 678 * 679 * Returns a fresh RPC client or an ERR_PTR. 680 */ 681 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt) 682 { 683 struct rpc_create_args args = { 684 .program = clnt->cl_program, 685 .prognumber = clnt->cl_prog, 686 .version = clnt->cl_vers, 687 .authflavor = clnt->cl_auth->au_flavor, 688 .cred = clnt->cl_cred, 689 .stats = clnt->cl_stats, 690 }; 691 return __rpc_clone_client(&args, clnt); 692 } 693 EXPORT_SYMBOL_GPL(rpc_clone_client); 694 695 /** 696 * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth 697 * 698 * @clnt: RPC client whose parameters are copied 699 * @flavor: security flavor for new client 700 * 701 * Returns a fresh RPC client or an ERR_PTR. 702 */ 703 struct rpc_clnt * 704 rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor) 705 { 706 struct rpc_create_args args = { 707 .program = clnt->cl_program, 708 .prognumber = clnt->cl_prog, 709 .version = clnt->cl_vers, 710 .authflavor = flavor, 711 .cred = clnt->cl_cred, 712 .stats = clnt->cl_stats, 713 }; 714 return __rpc_clone_client(&args, clnt); 715 } 716 EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth); 717 718 /** 719 * rpc_switch_client_transport: switch the RPC transport on the fly 720 * @clnt: pointer to a struct rpc_clnt 721 * @args: pointer to the new transport arguments 722 * @timeout: pointer to the new timeout parameters 723 * 724 * This function allows the caller to switch the RPC transport for the 725 * rpc_clnt structure 'clnt' to allow it to connect to a mirrored NFS 726 * server, for instance. It assumes that the caller has ensured that 727 * there are no active RPC tasks by using some form of locking. 728 * 729 * Returns zero if "clnt" is now using the new xprt. Otherwise a 730 * negative errno is returned, and "clnt" continues to use the old 731 * xprt. 732 */ 733 int rpc_switch_client_transport(struct rpc_clnt *clnt, 734 struct xprt_create *args, 735 const struct rpc_timeout *timeout) 736 { 737 const struct rpc_timeout *old_timeo; 738 rpc_authflavor_t pseudoflavor; 739 struct rpc_xprt_switch *xps, *oldxps; 740 struct rpc_xprt *xprt, *old; 741 struct rpc_clnt *parent; 742 int err; 743 744 args->xprtsec = clnt->cl_xprtsec; 745 xprt = xprt_create_transport(args); 746 if (IS_ERR(xprt)) 747 return PTR_ERR(xprt); 748 749 xps = xprt_switch_alloc(xprt, GFP_KERNEL); 750 if (xps == NULL) { 751 xprt_put(xprt); 752 return -ENOMEM; 753 } 754 755 pseudoflavor = clnt->cl_auth->au_flavor; 756 757 old_timeo = clnt->cl_timeout; 758 old = rpc_clnt_set_transport(clnt, xprt, timeout); 759 oldxps = xprt_iter_xchg_switch(&clnt->cl_xpi, xps); 760 761 rpc_unregister_client(clnt); 762 __rpc_clnt_remove_pipedir(clnt); 763 rpc_sysfs_client_destroy(clnt); 764 rpc_clnt_debugfs_unregister(clnt); 765 766 /* 767 * A new transport was created. "clnt" therefore 768 * becomes the root of a new cl_parent tree. clnt's 769 * children, if it has any, still point to the old xprt. 770 */ 771 parent = clnt->cl_parent; 772 clnt->cl_parent = clnt; 773 774 /* 775 * The old rpc_auth cache cannot be re-used. GSS 776 * contexts in particular are between a single 777 * client and server. 778 */ 779 err = rpc_client_register(clnt, pseudoflavor, NULL); 780 if (err) 781 goto out_revert; 782 783 synchronize_rcu(); 784 if (parent != clnt) 785 rpc_release_client(parent); 786 xprt_switch_put(oldxps); 787 xprt_put(old); 788 trace_rpc_clnt_replace_xprt(clnt); 789 return 0; 790 791 out_revert: 792 xps = xprt_iter_xchg_switch(&clnt->cl_xpi, oldxps); 793 rpc_clnt_set_transport(clnt, old, old_timeo); 794 clnt->cl_parent = parent; 795 rpc_client_register(clnt, pseudoflavor, NULL); 796 xprt_switch_put(xps); 797 xprt_put(xprt); 798 trace_rpc_clnt_replace_xprt_err(clnt); 799 return err; 800 } 801 EXPORT_SYMBOL_GPL(rpc_switch_client_transport); 802 803 static struct rpc_xprt_switch *rpc_clnt_xprt_switch_get(struct rpc_clnt *clnt) 804 { 805 struct rpc_xprt_switch *xps; 806 807 rcu_read_lock(); 808 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 809 rcu_read_unlock(); 810 811 return xps; 812 } 813 814 static 815 int _rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi, 816 void func(struct rpc_xprt_iter *xpi, struct rpc_xprt_switch *xps)) 817 { 818 struct rpc_xprt_switch *xps; 819 820 xps = rpc_clnt_xprt_switch_get(clnt); 821 if (xps == NULL) 822 return -EAGAIN; 823 func(xpi, xps); 824 xprt_switch_put(xps); 825 return 0; 826 } 827 828 static 829 int rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi) 830 { 831 return _rpc_clnt_xprt_iter_init(clnt, xpi, xprt_iter_init_listall); 832 } 833 834 static 835 int rpc_clnt_xprt_iter_offline_init(struct rpc_clnt *clnt, 836 struct rpc_xprt_iter *xpi) 837 { 838 return _rpc_clnt_xprt_iter_init(clnt, xpi, xprt_iter_init_listoffline); 839 } 840 841 /** 842 * rpc_clnt_iterate_for_each_xprt - Apply a function to all transports 843 * @clnt: pointer to client 844 * @fn: function to apply 845 * @data: void pointer to function data 846 * 847 * Iterates through the list of RPC transports currently attached to the 848 * client and applies the function fn(clnt, xprt, data). 849 * 850 * On error, the iteration stops, and the function returns the error value. 851 */ 852 int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt *clnt, 853 int (*fn)(struct rpc_clnt *, struct rpc_xprt *, void *), 854 void *data) 855 { 856 struct rpc_xprt_iter xpi; 857 int ret; 858 859 ret = rpc_clnt_xprt_iter_init(clnt, &xpi); 860 if (ret) 861 return ret; 862 for (;;) { 863 struct rpc_xprt *xprt = xprt_iter_get_next(&xpi); 864 865 if (!xprt) 866 break; 867 ret = fn(clnt, xprt, data); 868 xprt_put(xprt); 869 if (ret < 0) 870 break; 871 } 872 xprt_iter_destroy(&xpi); 873 return ret; 874 } 875 EXPORT_SYMBOL_GPL(rpc_clnt_iterate_for_each_xprt); 876 877 /* 878 * Kill all tasks for the given client. 879 * XXX: kill their descendants as well? 880 */ 881 void rpc_killall_tasks(struct rpc_clnt *clnt) 882 { 883 struct rpc_task *rovr; 884 885 886 if (list_empty(&clnt->cl_tasks)) 887 return; 888 889 /* 890 * Spin lock all_tasks to prevent changes... 891 */ 892 trace_rpc_clnt_killall(clnt); 893 spin_lock(&clnt->cl_lock); 894 list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) 895 rpc_signal_task(rovr); 896 spin_unlock(&clnt->cl_lock); 897 } 898 EXPORT_SYMBOL_GPL(rpc_killall_tasks); 899 900 /** 901 * rpc_cancel_tasks - try to cancel a set of RPC tasks 902 * @clnt: Pointer to RPC client 903 * @error: RPC task error value to set 904 * @fnmatch: Pointer to selector function 905 * @data: User data 906 * 907 * Uses @fnmatch to define a set of RPC tasks that are to be cancelled. 908 * The argument @error must be a negative error value. 909 */ 910 unsigned long rpc_cancel_tasks(struct rpc_clnt *clnt, int error, 911 bool (*fnmatch)(const struct rpc_task *, 912 const void *), 913 const void *data) 914 { 915 struct rpc_task *task; 916 unsigned long count = 0; 917 918 if (list_empty(&clnt->cl_tasks)) 919 return 0; 920 /* 921 * Spin lock all_tasks to prevent changes... 922 */ 923 spin_lock(&clnt->cl_lock); 924 list_for_each_entry(task, &clnt->cl_tasks, tk_task) { 925 if (!RPC_IS_ACTIVATED(task)) 926 continue; 927 if (!fnmatch(task, data)) 928 continue; 929 rpc_task_try_cancel(task, error); 930 count++; 931 } 932 spin_unlock(&clnt->cl_lock); 933 return count; 934 } 935 EXPORT_SYMBOL_GPL(rpc_cancel_tasks); 936 937 static int rpc_clnt_disconnect_xprt(struct rpc_clnt *clnt, 938 struct rpc_xprt *xprt, void *dummy) 939 { 940 if (xprt_connected(xprt)) 941 xprt_force_disconnect(xprt); 942 return 0; 943 } 944 945 void rpc_clnt_disconnect(struct rpc_clnt *clnt) 946 { 947 rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_disconnect_xprt, NULL); 948 } 949 EXPORT_SYMBOL_GPL(rpc_clnt_disconnect); 950 951 /* 952 * Properly shut down an RPC client, terminating all outstanding 953 * requests. 954 */ 955 void rpc_shutdown_client(struct rpc_clnt *clnt) 956 { 957 might_sleep(); 958 959 trace_rpc_clnt_shutdown(clnt); 960 961 clnt->cl_shutdown = 1; 962 while (!list_empty(&clnt->cl_tasks)) { 963 rpc_killall_tasks(clnt); 964 wait_event_timeout(destroy_wait, 965 list_empty(&clnt->cl_tasks), 1*HZ); 966 } 967 968 /* wait for tasks still in workqueue or waitqueue */ 969 wait_event_timeout(destroy_wait, 970 atomic_read(&clnt->cl_task_count) == 0, 1 * HZ); 971 972 rpc_release_client(clnt); 973 } 974 EXPORT_SYMBOL_GPL(rpc_shutdown_client); 975 976 /* 977 * Free an RPC client 978 */ 979 static void rpc_free_client_work(struct work_struct *work) 980 { 981 struct rpc_clnt *clnt = container_of(work, struct rpc_clnt, cl_work); 982 983 trace_rpc_clnt_free(clnt); 984 985 /* These might block on processes that might allocate memory, 986 * so they cannot be called in rpciod, so they are handled separately 987 * here. 988 */ 989 rpc_sysfs_client_destroy(clnt); 990 rpc_clnt_debugfs_unregister(clnt); 991 rpc_free_clid(clnt); 992 rpc_clnt_remove_pipedir(clnt); 993 xprt_put(rcu_dereference_raw(clnt->cl_xprt)); 994 995 kfree(clnt); 996 rpciod_down(); 997 } 998 static struct rpc_clnt * 999 rpc_free_client(struct rpc_clnt *clnt) 1000 { 1001 struct rpc_clnt *parent = NULL; 1002 1003 trace_rpc_clnt_release(clnt); 1004 if (clnt->cl_parent != clnt) 1005 parent = clnt->cl_parent; 1006 rpc_unregister_client(clnt); 1007 rpc_free_iostats(clnt->cl_metrics); 1008 clnt->cl_metrics = NULL; 1009 xprt_iter_destroy(&clnt->cl_xpi); 1010 put_cred(clnt->cl_cred); 1011 1012 INIT_WORK(&clnt->cl_work, rpc_free_client_work); 1013 schedule_work(&clnt->cl_work); 1014 return parent; 1015 } 1016 1017 /* 1018 * Free an RPC client 1019 */ 1020 static struct rpc_clnt * 1021 rpc_free_auth(struct rpc_clnt *clnt) 1022 { 1023 /* 1024 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to 1025 * release remaining GSS contexts. This mechanism ensures 1026 * that it can do so safely. 1027 */ 1028 if (clnt->cl_auth != NULL) { 1029 rpcauth_release(clnt->cl_auth); 1030 clnt->cl_auth = NULL; 1031 } 1032 if (refcount_dec_and_test(&clnt->cl_count)) 1033 return rpc_free_client(clnt); 1034 return NULL; 1035 } 1036 1037 /* 1038 * Release reference to the RPC client 1039 */ 1040 void 1041 rpc_release_client(struct rpc_clnt *clnt) 1042 { 1043 do { 1044 if (list_empty(&clnt->cl_tasks)) 1045 wake_up(&destroy_wait); 1046 if (refcount_dec_not_one(&clnt->cl_count)) 1047 break; 1048 clnt = rpc_free_auth(clnt); 1049 } while (clnt != NULL); 1050 } 1051 EXPORT_SYMBOL_GPL(rpc_release_client); 1052 1053 /** 1054 * rpc_bind_new_program - bind a new RPC program to an existing client 1055 * @old: old rpc_client 1056 * @program: rpc program to set 1057 * @vers: rpc program version 1058 * 1059 * Clones the rpc client and sets up a new RPC program. This is mainly 1060 * of use for enabling different RPC programs to share the same transport. 1061 * The Sun NFSv2/v3 ACL protocol can do this. 1062 */ 1063 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, 1064 const struct rpc_program *program, 1065 u32 vers) 1066 { 1067 struct rpc_create_args args = { 1068 .program = program, 1069 .prognumber = program->number, 1070 .version = vers, 1071 .authflavor = old->cl_auth->au_flavor, 1072 .cred = old->cl_cred, 1073 .stats = old->cl_stats, 1074 .timeout = old->cl_timeout, 1075 }; 1076 struct rpc_clnt *clnt; 1077 int err; 1078 1079 clnt = __rpc_clone_client(&args, old); 1080 if (IS_ERR(clnt)) 1081 goto out; 1082 err = rpc_ping(clnt); 1083 if (err != 0) { 1084 rpc_shutdown_client(clnt); 1085 clnt = ERR_PTR(err); 1086 } 1087 out: 1088 return clnt; 1089 } 1090 EXPORT_SYMBOL_GPL(rpc_bind_new_program); 1091 1092 struct rpc_xprt * 1093 rpc_task_get_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 1094 { 1095 struct rpc_xprt_switch *xps; 1096 1097 if (!xprt) 1098 return NULL; 1099 rcu_read_lock(); 1100 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 1101 atomic_long_inc(&xps->xps_queuelen); 1102 rcu_read_unlock(); 1103 atomic_long_inc(&xprt->queuelen); 1104 1105 return xprt; 1106 } 1107 1108 static void 1109 rpc_task_release_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 1110 { 1111 struct rpc_xprt_switch *xps; 1112 1113 atomic_long_dec(&xprt->queuelen); 1114 rcu_read_lock(); 1115 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 1116 atomic_long_dec(&xps->xps_queuelen); 1117 rcu_read_unlock(); 1118 1119 xprt_put(xprt); 1120 } 1121 1122 void rpc_task_release_transport(struct rpc_task *task) 1123 { 1124 struct rpc_xprt *xprt = task->tk_xprt; 1125 1126 if (xprt) { 1127 task->tk_xprt = NULL; 1128 if (task->tk_client) 1129 rpc_task_release_xprt(task->tk_client, xprt); 1130 else 1131 xprt_put(xprt); 1132 } 1133 } 1134 EXPORT_SYMBOL_GPL(rpc_task_release_transport); 1135 1136 void rpc_task_release_client(struct rpc_task *task) 1137 { 1138 struct rpc_clnt *clnt = task->tk_client; 1139 1140 rpc_task_release_transport(task); 1141 if (clnt != NULL) { 1142 /* Remove from client task list */ 1143 spin_lock(&clnt->cl_lock); 1144 list_del(&task->tk_task); 1145 spin_unlock(&clnt->cl_lock); 1146 task->tk_client = NULL; 1147 atomic_dec(&clnt->cl_task_count); 1148 1149 rpc_release_client(clnt); 1150 } 1151 } 1152 1153 static struct rpc_xprt * 1154 rpc_task_get_first_xprt(struct rpc_clnt *clnt) 1155 { 1156 struct rpc_xprt *xprt; 1157 1158 rcu_read_lock(); 1159 xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); 1160 rcu_read_unlock(); 1161 return rpc_task_get_xprt(clnt, xprt); 1162 } 1163 1164 static struct rpc_xprt * 1165 rpc_task_get_next_xprt(struct rpc_clnt *clnt) 1166 { 1167 return rpc_task_get_xprt(clnt, xprt_iter_get_next(&clnt->cl_xpi)); 1168 } 1169 1170 static 1171 void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt) 1172 { 1173 if (task->tk_xprt) { 1174 if (!(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) && 1175 (task->tk_flags & RPC_TASK_MOVEABLE))) 1176 return; 1177 xprt_release(task); 1178 xprt_put(task->tk_xprt); 1179 } 1180 if (task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) 1181 task->tk_xprt = rpc_task_get_first_xprt(clnt); 1182 else 1183 task->tk_xprt = rpc_task_get_next_xprt(clnt); 1184 } 1185 1186 static 1187 void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) 1188 { 1189 rpc_task_set_transport(task, clnt); 1190 task->tk_client = clnt; 1191 refcount_inc(&clnt->cl_count); 1192 if (clnt->cl_softrtry) 1193 task->tk_flags |= RPC_TASK_SOFT; 1194 if (clnt->cl_softerr) 1195 task->tk_flags |= RPC_TASK_TIMEOUT; 1196 if (clnt->cl_noretranstimeo) 1197 task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT; 1198 atomic_inc(&clnt->cl_task_count); 1199 } 1200 1201 static void 1202 rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg) 1203 { 1204 if (msg != NULL) { 1205 task->tk_msg.rpc_proc = msg->rpc_proc; 1206 task->tk_msg.rpc_argp = msg->rpc_argp; 1207 task->tk_msg.rpc_resp = msg->rpc_resp; 1208 task->tk_msg.rpc_cred = msg->rpc_cred; 1209 if (!(task->tk_flags & RPC_TASK_CRED_NOREF)) 1210 get_cred(task->tk_msg.rpc_cred); 1211 } 1212 } 1213 1214 /* 1215 * Default callback for async RPC calls 1216 */ 1217 static void 1218 rpc_default_callback(struct rpc_task *task, void *data) 1219 { 1220 } 1221 1222 static const struct rpc_call_ops rpc_default_ops = { 1223 .rpc_call_done = rpc_default_callback, 1224 }; 1225 1226 /** 1227 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it 1228 * @task_setup_data: pointer to task initialisation data 1229 */ 1230 struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) 1231 { 1232 struct rpc_task *task; 1233 1234 task = rpc_new_task(task_setup_data); 1235 if (IS_ERR(task)) 1236 return task; 1237 1238 if (!RPC_IS_ASYNC(task)) 1239 task->tk_flags |= RPC_TASK_CRED_NOREF; 1240 1241 rpc_task_set_client(task, task_setup_data->rpc_client); 1242 rpc_task_set_rpc_message(task, task_setup_data->rpc_message); 1243 1244 if (task->tk_action == NULL) 1245 rpc_call_start(task); 1246 1247 atomic_inc(&task->tk_count); 1248 rpc_execute(task); 1249 return task; 1250 } 1251 EXPORT_SYMBOL_GPL(rpc_run_task); 1252 1253 /** 1254 * rpc_call_sync - Perform a synchronous RPC call 1255 * @clnt: pointer to RPC client 1256 * @msg: RPC call parameters 1257 * @flags: RPC call flags 1258 */ 1259 int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags) 1260 { 1261 struct rpc_task *task; 1262 struct rpc_task_setup task_setup_data = { 1263 .rpc_client = clnt, 1264 .rpc_message = msg, 1265 .callback_ops = &rpc_default_ops, 1266 .flags = flags, 1267 }; 1268 int status; 1269 1270 WARN_ON_ONCE(flags & RPC_TASK_ASYNC); 1271 if (flags & RPC_TASK_ASYNC) { 1272 rpc_release_calldata(task_setup_data.callback_ops, 1273 task_setup_data.callback_data); 1274 return -EINVAL; 1275 } 1276 1277 task = rpc_run_task(&task_setup_data); 1278 if (IS_ERR(task)) 1279 return PTR_ERR(task); 1280 status = task->tk_status; 1281 rpc_put_task(task); 1282 return status; 1283 } 1284 EXPORT_SYMBOL_GPL(rpc_call_sync); 1285 1286 /** 1287 * rpc_call_async - Perform an asynchronous RPC call 1288 * @clnt: pointer to RPC client 1289 * @msg: RPC call parameters 1290 * @flags: RPC call flags 1291 * @tk_ops: RPC call ops 1292 * @data: user call data 1293 */ 1294 int 1295 rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags, 1296 const struct rpc_call_ops *tk_ops, void *data) 1297 { 1298 struct rpc_task *task; 1299 struct rpc_task_setup task_setup_data = { 1300 .rpc_client = clnt, 1301 .rpc_message = msg, 1302 .callback_ops = tk_ops, 1303 .callback_data = data, 1304 .flags = flags|RPC_TASK_ASYNC, 1305 }; 1306 1307 task = rpc_run_task(&task_setup_data); 1308 if (IS_ERR(task)) 1309 return PTR_ERR(task); 1310 rpc_put_task(task); 1311 return 0; 1312 } 1313 EXPORT_SYMBOL_GPL(rpc_call_async); 1314 1315 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1316 static void call_bc_encode(struct rpc_task *task); 1317 1318 /** 1319 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run 1320 * rpc_execute against it 1321 * @req: RPC request 1322 * @timeout: timeout values to use for this task 1323 */ 1324 struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, 1325 struct rpc_timeout *timeout) 1326 { 1327 struct rpc_task *task; 1328 struct rpc_task_setup task_setup_data = { 1329 .callback_ops = &rpc_default_ops, 1330 .flags = RPC_TASK_SOFTCONN | 1331 RPC_TASK_NO_RETRANS_TIMEOUT, 1332 }; 1333 1334 dprintk("RPC: rpc_run_bc_task req= %p\n", req); 1335 /* 1336 * Create an rpc_task to send the data 1337 */ 1338 task = rpc_new_task(&task_setup_data); 1339 if (IS_ERR(task)) { 1340 xprt_free_bc_request(req); 1341 return task; 1342 } 1343 1344 xprt_init_bc_request(req, task, timeout); 1345 1346 task->tk_action = call_bc_encode; 1347 atomic_inc(&task->tk_count); 1348 WARN_ON_ONCE(atomic_read(&task->tk_count) != 2); 1349 rpc_execute(task); 1350 1351 dprintk("RPC: rpc_run_bc_task: task= %p\n", task); 1352 return task; 1353 } 1354 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1355 1356 /** 1357 * rpc_prepare_reply_pages - Prepare to receive a reply data payload into pages 1358 * @req: RPC request to prepare 1359 * @pages: vector of struct page pointers 1360 * @base: offset in first page where receive should start, in bytes 1361 * @len: expected size of the upper layer data payload, in bytes 1362 * @hdrsize: expected size of upper layer reply header, in XDR words 1363 * 1364 */ 1365 void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages, 1366 unsigned int base, unsigned int len, 1367 unsigned int hdrsize) 1368 { 1369 hdrsize += RPC_REPHDRSIZE + req->rq_cred->cr_auth->au_ralign; 1370 1371 xdr_inline_pages(&req->rq_rcv_buf, hdrsize << 2, pages, base, len); 1372 trace_rpc_xdr_reply_pages(req->rq_task, &req->rq_rcv_buf); 1373 } 1374 EXPORT_SYMBOL_GPL(rpc_prepare_reply_pages); 1375 1376 void 1377 rpc_call_start(struct rpc_task *task) 1378 { 1379 task->tk_action = call_start; 1380 } 1381 EXPORT_SYMBOL_GPL(rpc_call_start); 1382 1383 /** 1384 * rpc_peeraddr - extract remote peer address from clnt's xprt 1385 * @clnt: RPC client structure 1386 * @buf: target buffer 1387 * @bufsize: length of target buffer 1388 * 1389 * Returns the number of bytes that are actually in the stored address. 1390 */ 1391 size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize) 1392 { 1393 size_t bytes; 1394 struct rpc_xprt *xprt; 1395 1396 rcu_read_lock(); 1397 xprt = rcu_dereference(clnt->cl_xprt); 1398 1399 bytes = xprt->addrlen; 1400 if (bytes > bufsize) 1401 bytes = bufsize; 1402 memcpy(buf, &xprt->addr, bytes); 1403 rcu_read_unlock(); 1404 1405 return bytes; 1406 } 1407 EXPORT_SYMBOL_GPL(rpc_peeraddr); 1408 1409 /** 1410 * rpc_peeraddr2str - return remote peer address in printable format 1411 * @clnt: RPC client structure 1412 * @format: address format 1413 * 1414 * NB: the lifetime of the memory referenced by the returned pointer is 1415 * the same as the rpc_xprt itself. As long as the caller uses this 1416 * pointer, it must hold the RCU read lock. 1417 */ 1418 const char *rpc_peeraddr2str(struct rpc_clnt *clnt, 1419 enum rpc_display_format_t format) 1420 { 1421 struct rpc_xprt *xprt; 1422 1423 xprt = rcu_dereference(clnt->cl_xprt); 1424 1425 if (xprt->address_strings[format] != NULL) 1426 return xprt->address_strings[format]; 1427 else 1428 return "unprintable"; 1429 } 1430 EXPORT_SYMBOL_GPL(rpc_peeraddr2str); 1431 1432 static const struct sockaddr_in rpc_inaddr_loopback = { 1433 .sin_family = AF_INET, 1434 .sin_addr.s_addr = htonl(INADDR_ANY), 1435 }; 1436 1437 static const struct sockaddr_in6 rpc_in6addr_loopback = { 1438 .sin6_family = AF_INET6, 1439 .sin6_addr = IN6ADDR_ANY_INIT, 1440 }; 1441 1442 /* 1443 * Try a getsockname() on a connected datagram socket. Using a 1444 * connected datagram socket prevents leaving a socket in TIME_WAIT. 1445 * This conserves the ephemeral port number space. 1446 * 1447 * Returns zero and fills in "buf" if successful; otherwise, a 1448 * negative errno is returned. 1449 */ 1450 static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen, 1451 struct sockaddr *buf) 1452 { 1453 struct socket *sock; 1454 int err; 1455 1456 err = __sock_create(net, sap->sa_family, 1457 SOCK_DGRAM, IPPROTO_UDP, &sock, 1); 1458 if (err < 0) { 1459 dprintk("RPC: can't create UDP socket (%d)\n", err); 1460 goto out; 1461 } 1462 1463 switch (sap->sa_family) { 1464 case AF_INET: 1465 err = kernel_bind(sock, 1466 (struct sockaddr *)&rpc_inaddr_loopback, 1467 sizeof(rpc_inaddr_loopback)); 1468 break; 1469 case AF_INET6: 1470 err = kernel_bind(sock, 1471 (struct sockaddr *)&rpc_in6addr_loopback, 1472 sizeof(rpc_in6addr_loopback)); 1473 break; 1474 default: 1475 err = -EAFNOSUPPORT; 1476 goto out_release; 1477 } 1478 if (err < 0) { 1479 dprintk("RPC: can't bind UDP socket (%d)\n", err); 1480 goto out_release; 1481 } 1482 1483 err = kernel_connect(sock, sap, salen, 0); 1484 if (err < 0) { 1485 dprintk("RPC: can't connect UDP socket (%d)\n", err); 1486 goto out_release; 1487 } 1488 1489 err = kernel_getsockname(sock, buf); 1490 if (err < 0) { 1491 dprintk("RPC: getsockname failed (%d)\n", err); 1492 goto out_release; 1493 } 1494 1495 err = 0; 1496 if (buf->sa_family == AF_INET6) { 1497 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf; 1498 sin6->sin6_scope_id = 0; 1499 } 1500 dprintk("RPC: %s succeeded\n", __func__); 1501 1502 out_release: 1503 sock_release(sock); 1504 out: 1505 return err; 1506 } 1507 1508 /* 1509 * Scraping a connected socket failed, so we don't have a useable 1510 * local address. Fallback: generate an address that will prevent 1511 * the server from calling us back. 1512 * 1513 * Returns zero and fills in "buf" if successful; otherwise, a 1514 * negative errno is returned. 1515 */ 1516 static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen) 1517 { 1518 switch (family) { 1519 case AF_INET: 1520 if (buflen < sizeof(rpc_inaddr_loopback)) 1521 return -EINVAL; 1522 memcpy(buf, &rpc_inaddr_loopback, 1523 sizeof(rpc_inaddr_loopback)); 1524 break; 1525 case AF_INET6: 1526 if (buflen < sizeof(rpc_in6addr_loopback)) 1527 return -EINVAL; 1528 memcpy(buf, &rpc_in6addr_loopback, 1529 sizeof(rpc_in6addr_loopback)); 1530 break; 1531 default: 1532 dprintk("RPC: %s: address family not supported\n", 1533 __func__); 1534 return -EAFNOSUPPORT; 1535 } 1536 dprintk("RPC: %s: succeeded\n", __func__); 1537 return 0; 1538 } 1539 1540 /** 1541 * rpc_localaddr - discover local endpoint address for an RPC client 1542 * @clnt: RPC client structure 1543 * @buf: target buffer 1544 * @buflen: size of target buffer, in bytes 1545 * 1546 * Returns zero and fills in "buf" and "buflen" if successful; 1547 * otherwise, a negative errno is returned. 1548 * 1549 * This works even if the underlying transport is not currently connected, 1550 * or if the upper layer never previously provided a source address. 1551 * 1552 * The result of this function call is transient: multiple calls in 1553 * succession may give different results, depending on how local 1554 * networking configuration changes over time. 1555 */ 1556 int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen) 1557 { 1558 struct sockaddr_storage address; 1559 struct sockaddr *sap = (struct sockaddr *)&address; 1560 struct rpc_xprt *xprt; 1561 struct net *net; 1562 size_t salen; 1563 int err; 1564 1565 rcu_read_lock(); 1566 xprt = rcu_dereference(clnt->cl_xprt); 1567 salen = xprt->addrlen; 1568 memcpy(sap, &xprt->addr, salen); 1569 net = get_net(xprt->xprt_net); 1570 rcu_read_unlock(); 1571 1572 rpc_set_port(sap, 0); 1573 err = rpc_sockname(net, sap, salen, buf); 1574 put_net(net); 1575 if (err != 0) 1576 /* Couldn't discover local address, return ANYADDR */ 1577 return rpc_anyaddr(sap->sa_family, buf, buflen); 1578 return 0; 1579 } 1580 EXPORT_SYMBOL_GPL(rpc_localaddr); 1581 1582 void 1583 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) 1584 { 1585 struct rpc_xprt *xprt; 1586 1587 rcu_read_lock(); 1588 xprt = rcu_dereference(clnt->cl_xprt); 1589 if (xprt->ops->set_buffer_size) 1590 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); 1591 rcu_read_unlock(); 1592 } 1593 EXPORT_SYMBOL_GPL(rpc_setbufsize); 1594 1595 /** 1596 * rpc_net_ns - Get the network namespace for this RPC client 1597 * @clnt: RPC client to query 1598 * 1599 */ 1600 struct net *rpc_net_ns(struct rpc_clnt *clnt) 1601 { 1602 struct net *ret; 1603 1604 rcu_read_lock(); 1605 ret = rcu_dereference(clnt->cl_xprt)->xprt_net; 1606 rcu_read_unlock(); 1607 return ret; 1608 } 1609 EXPORT_SYMBOL_GPL(rpc_net_ns); 1610 1611 /** 1612 * rpc_max_payload - Get maximum payload size for a transport, in bytes 1613 * @clnt: RPC client to query 1614 * 1615 * For stream transports, this is one RPC record fragment (see RFC 1616 * 1831), as we don't support multi-record requests yet. For datagram 1617 * transports, this is the size of an IP packet minus the IP, UDP, and 1618 * RPC header sizes. 1619 */ 1620 size_t rpc_max_payload(struct rpc_clnt *clnt) 1621 { 1622 size_t ret; 1623 1624 rcu_read_lock(); 1625 ret = rcu_dereference(clnt->cl_xprt)->max_payload; 1626 rcu_read_unlock(); 1627 return ret; 1628 } 1629 EXPORT_SYMBOL_GPL(rpc_max_payload); 1630 1631 /** 1632 * rpc_max_bc_payload - Get maximum backchannel payload size, in bytes 1633 * @clnt: RPC client to query 1634 */ 1635 size_t rpc_max_bc_payload(struct rpc_clnt *clnt) 1636 { 1637 struct rpc_xprt *xprt; 1638 size_t ret; 1639 1640 rcu_read_lock(); 1641 xprt = rcu_dereference(clnt->cl_xprt); 1642 ret = xprt->ops->bc_maxpayload(xprt); 1643 rcu_read_unlock(); 1644 return ret; 1645 } 1646 EXPORT_SYMBOL_GPL(rpc_max_bc_payload); 1647 1648 unsigned int rpc_num_bc_slots(struct rpc_clnt *clnt) 1649 { 1650 struct rpc_xprt *xprt; 1651 unsigned int ret; 1652 1653 rcu_read_lock(); 1654 xprt = rcu_dereference(clnt->cl_xprt); 1655 ret = xprt->ops->bc_num_slots(xprt); 1656 rcu_read_unlock(); 1657 return ret; 1658 } 1659 EXPORT_SYMBOL_GPL(rpc_num_bc_slots); 1660 1661 /** 1662 * rpc_force_rebind - force transport to check that remote port is unchanged 1663 * @clnt: client to rebind 1664 * 1665 */ 1666 void rpc_force_rebind(struct rpc_clnt *clnt) 1667 { 1668 if (clnt->cl_autobind) { 1669 rcu_read_lock(); 1670 xprt_clear_bound(rcu_dereference(clnt->cl_xprt)); 1671 rcu_read_unlock(); 1672 } 1673 } 1674 EXPORT_SYMBOL_GPL(rpc_force_rebind); 1675 1676 static int 1677 __rpc_restart_call(struct rpc_task *task, void (*action)(struct rpc_task *)) 1678 { 1679 task->tk_status = 0; 1680 task->tk_rpc_status = 0; 1681 task->tk_action = action; 1682 return 1; 1683 } 1684 1685 /* 1686 * Restart an (async) RPC call. Usually called from within the 1687 * exit handler. 1688 */ 1689 int 1690 rpc_restart_call(struct rpc_task *task) 1691 { 1692 return __rpc_restart_call(task, call_start); 1693 } 1694 EXPORT_SYMBOL_GPL(rpc_restart_call); 1695 1696 /* 1697 * Restart an (async) RPC call from the call_prepare state. 1698 * Usually called from within the exit handler. 1699 */ 1700 int 1701 rpc_restart_call_prepare(struct rpc_task *task) 1702 { 1703 if (task->tk_ops->rpc_call_prepare != NULL) 1704 return __rpc_restart_call(task, rpc_prepare_task); 1705 return rpc_restart_call(task); 1706 } 1707 EXPORT_SYMBOL_GPL(rpc_restart_call_prepare); 1708 1709 const char 1710 *rpc_proc_name(const struct rpc_task *task) 1711 { 1712 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 1713 1714 if (proc) { 1715 if (proc->p_name) 1716 return proc->p_name; 1717 else 1718 return "NULL"; 1719 } else 1720 return "no proc"; 1721 } 1722 1723 static void 1724 __rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status) 1725 { 1726 trace_rpc_call_rpcerror(task, tk_status, rpc_status); 1727 rpc_task_set_rpc_status(task, rpc_status); 1728 rpc_exit(task, tk_status); 1729 } 1730 1731 static void 1732 rpc_call_rpcerror(struct rpc_task *task, int status) 1733 { 1734 __rpc_call_rpcerror(task, status, status); 1735 } 1736 1737 /* 1738 * 0. Initial state 1739 * 1740 * Other FSM states can be visited zero or more times, but 1741 * this state is visited exactly once for each RPC. 1742 */ 1743 static void 1744 call_start(struct rpc_task *task) 1745 { 1746 struct rpc_clnt *clnt = task->tk_client; 1747 int idx = task->tk_msg.rpc_proc->p_statidx; 1748 1749 trace_rpc_request(task); 1750 1751 if (task->tk_client->cl_shutdown) { 1752 rpc_call_rpcerror(task, -EIO); 1753 return; 1754 } 1755 1756 /* Increment call count (version might not be valid for ping) */ 1757 if (clnt->cl_program->version[clnt->cl_vers]) 1758 clnt->cl_program->version[clnt->cl_vers]->counts[idx]++; 1759 clnt->cl_stats->rpccnt++; 1760 task->tk_action = call_reserve; 1761 rpc_task_set_transport(task, clnt); 1762 } 1763 1764 /* 1765 * 1. Reserve an RPC call slot 1766 */ 1767 static void 1768 call_reserve(struct rpc_task *task) 1769 { 1770 task->tk_status = 0; 1771 task->tk_action = call_reserveresult; 1772 xprt_reserve(task); 1773 } 1774 1775 static void call_retry_reserve(struct rpc_task *task); 1776 1777 /* 1778 * 1b. Grok the result of xprt_reserve() 1779 */ 1780 static void 1781 call_reserveresult(struct rpc_task *task) 1782 { 1783 int status = task->tk_status; 1784 1785 /* 1786 * After a call to xprt_reserve(), we must have either 1787 * a request slot or else an error status. 1788 */ 1789 task->tk_status = 0; 1790 if (status >= 0) { 1791 if (task->tk_rqstp) { 1792 task->tk_action = call_refresh; 1793 1794 /* Add to the client's list of all tasks */ 1795 spin_lock(&task->tk_client->cl_lock); 1796 if (list_empty(&task->tk_task)) 1797 list_add_tail(&task->tk_task, &task->tk_client->cl_tasks); 1798 spin_unlock(&task->tk_client->cl_lock); 1799 return; 1800 } 1801 rpc_call_rpcerror(task, -EIO); 1802 return; 1803 } 1804 1805 switch (status) { 1806 case -ENOMEM: 1807 rpc_delay(task, HZ >> 2); 1808 fallthrough; 1809 case -EAGAIN: /* woken up; retry */ 1810 task->tk_action = call_retry_reserve; 1811 return; 1812 default: 1813 rpc_call_rpcerror(task, status); 1814 } 1815 } 1816 1817 /* 1818 * 1c. Retry reserving an RPC call slot 1819 */ 1820 static void 1821 call_retry_reserve(struct rpc_task *task) 1822 { 1823 task->tk_status = 0; 1824 task->tk_action = call_reserveresult; 1825 xprt_retry_reserve(task); 1826 } 1827 1828 /* 1829 * 2. Bind and/or refresh the credentials 1830 */ 1831 static void 1832 call_refresh(struct rpc_task *task) 1833 { 1834 task->tk_action = call_refreshresult; 1835 task->tk_status = 0; 1836 task->tk_client->cl_stats->rpcauthrefresh++; 1837 rpcauth_refreshcred(task); 1838 } 1839 1840 /* 1841 * 2a. Process the results of a credential refresh 1842 */ 1843 static void 1844 call_refreshresult(struct rpc_task *task) 1845 { 1846 int status = task->tk_status; 1847 1848 task->tk_status = 0; 1849 task->tk_action = call_refresh; 1850 switch (status) { 1851 case 0: 1852 if (rpcauth_uptodatecred(task)) { 1853 task->tk_action = call_allocate; 1854 return; 1855 } 1856 /* Use rate-limiting and a max number of retries if refresh 1857 * had status 0 but failed to update the cred. 1858 */ 1859 fallthrough; 1860 case -ETIMEDOUT: 1861 rpc_delay(task, 3*HZ); 1862 fallthrough; 1863 case -EAGAIN: 1864 status = -EACCES; 1865 if (!task->tk_cred_retry) 1866 break; 1867 task->tk_cred_retry--; 1868 trace_rpc_retry_refresh_status(task); 1869 return; 1870 case -EKEYEXPIRED: 1871 break; 1872 case -ENOMEM: 1873 rpc_delay(task, HZ >> 4); 1874 return; 1875 } 1876 trace_rpc_refresh_status(task); 1877 rpc_call_rpcerror(task, status); 1878 } 1879 1880 /* 1881 * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc. 1882 * (Note: buffer memory is freed in xprt_release). 1883 */ 1884 static void 1885 call_allocate(struct rpc_task *task) 1886 { 1887 const struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth; 1888 struct rpc_rqst *req = task->tk_rqstp; 1889 struct rpc_xprt *xprt = req->rq_xprt; 1890 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 1891 int status; 1892 1893 task->tk_status = 0; 1894 task->tk_action = call_encode; 1895 1896 if (req->rq_buffer) 1897 return; 1898 1899 /* 1900 * Calculate the size (in quads) of the RPC call 1901 * and reply headers, and convert both values 1902 * to byte sizes. 1903 */ 1904 req->rq_callsize = RPC_CALLHDRSIZE + (auth->au_cslack << 1) + 1905 proc->p_arglen; 1906 req->rq_callsize <<= 2; 1907 /* 1908 * Note: the reply buffer must at minimum allocate enough space 1909 * for the 'struct accepted_reply' from RFC5531. 1910 */ 1911 req->rq_rcvsize = RPC_REPHDRSIZE + auth->au_rslack + \ 1912 max_t(size_t, proc->p_replen, 2); 1913 req->rq_rcvsize <<= 2; 1914 1915 status = xprt->ops->buf_alloc(task); 1916 trace_rpc_buf_alloc(task, status); 1917 if (status == 0) 1918 return; 1919 if (status != -ENOMEM) { 1920 rpc_call_rpcerror(task, status); 1921 return; 1922 } 1923 1924 if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) { 1925 task->tk_action = call_allocate; 1926 rpc_delay(task, HZ>>4); 1927 return; 1928 } 1929 1930 rpc_call_rpcerror(task, -ERESTARTSYS); 1931 } 1932 1933 static int 1934 rpc_task_need_encode(struct rpc_task *task) 1935 { 1936 return test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) == 0 && 1937 (!(task->tk_flags & RPC_TASK_SENT) || 1938 !(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) || 1939 xprt_request_need_retransmit(task)); 1940 } 1941 1942 static void 1943 rpc_xdr_encode(struct rpc_task *task) 1944 { 1945 struct rpc_rqst *req = task->tk_rqstp; 1946 struct xdr_stream xdr; 1947 1948 xdr_buf_init(&req->rq_snd_buf, 1949 req->rq_buffer, 1950 req->rq_callsize); 1951 xdr_buf_init(&req->rq_rcv_buf, 1952 req->rq_rbuffer, 1953 req->rq_rcvsize); 1954 1955 req->rq_reply_bytes_recvd = 0; 1956 req->rq_snd_buf.head[0].iov_len = 0; 1957 xdr_init_encode(&xdr, &req->rq_snd_buf, 1958 req->rq_snd_buf.head[0].iov_base, req); 1959 if (rpc_encode_header(task, &xdr)) 1960 return; 1961 1962 task->tk_status = rpcauth_wrap_req(task, &xdr); 1963 } 1964 1965 /* 1966 * 3. Encode arguments of an RPC call 1967 */ 1968 static void 1969 call_encode(struct rpc_task *task) 1970 { 1971 if (!rpc_task_need_encode(task)) 1972 goto out; 1973 1974 /* Dequeue task from the receive queue while we're encoding */ 1975 xprt_request_dequeue_xprt(task); 1976 /* Encode here so that rpcsec_gss can use correct sequence number. */ 1977 rpc_xdr_encode(task); 1978 /* Add task to reply queue before transmission to avoid races */ 1979 if (task->tk_status == 0 && rpc_reply_expected(task)) 1980 task->tk_status = xprt_request_enqueue_receive(task); 1981 /* Did the encode result in an error condition? */ 1982 if (task->tk_status != 0) { 1983 /* Was the error nonfatal? */ 1984 switch (task->tk_status) { 1985 case -EAGAIN: 1986 case -ENOMEM: 1987 rpc_delay(task, HZ >> 4); 1988 break; 1989 case -EKEYEXPIRED: 1990 if (!task->tk_cred_retry) { 1991 rpc_call_rpcerror(task, task->tk_status); 1992 } else { 1993 task->tk_action = call_refresh; 1994 task->tk_cred_retry--; 1995 trace_rpc_retry_refresh_status(task); 1996 } 1997 break; 1998 default: 1999 rpc_call_rpcerror(task, task->tk_status); 2000 } 2001 return; 2002 } 2003 2004 xprt_request_enqueue_transmit(task); 2005 out: 2006 task->tk_action = call_transmit; 2007 /* Check that the connection is OK */ 2008 if (!xprt_bound(task->tk_xprt)) 2009 task->tk_action = call_bind; 2010 else if (!xprt_connected(task->tk_xprt)) 2011 task->tk_action = call_connect; 2012 } 2013 2014 /* 2015 * Helpers to check if the task was already transmitted, and 2016 * to take action when that is the case. 2017 */ 2018 static bool 2019 rpc_task_transmitted(struct rpc_task *task) 2020 { 2021 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); 2022 } 2023 2024 static void 2025 rpc_task_handle_transmitted(struct rpc_task *task) 2026 { 2027 xprt_end_transmit(task); 2028 task->tk_action = call_transmit_status; 2029 } 2030 2031 /* 2032 * 4. Get the server port number if not yet set 2033 */ 2034 static void 2035 call_bind(struct rpc_task *task) 2036 { 2037 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 2038 2039 if (rpc_task_transmitted(task)) { 2040 rpc_task_handle_transmitted(task); 2041 return; 2042 } 2043 2044 if (xprt_bound(xprt)) { 2045 task->tk_action = call_connect; 2046 return; 2047 } 2048 2049 task->tk_action = call_bind_status; 2050 if (!xprt_prepare_transmit(task)) 2051 return; 2052 2053 xprt->ops->rpcbind(task); 2054 } 2055 2056 /* 2057 * 4a. Sort out bind result 2058 */ 2059 static void 2060 call_bind_status(struct rpc_task *task) 2061 { 2062 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 2063 int status = -EIO; 2064 2065 if (rpc_task_transmitted(task)) { 2066 rpc_task_handle_transmitted(task); 2067 return; 2068 } 2069 2070 if (task->tk_status >= 0) 2071 goto out_next; 2072 if (xprt_bound(xprt)) { 2073 task->tk_status = 0; 2074 goto out_next; 2075 } 2076 2077 switch (task->tk_status) { 2078 case -ENOMEM: 2079 rpc_delay(task, HZ >> 2); 2080 goto retry_timeout; 2081 case -EACCES: 2082 trace_rpcb_prog_unavail_err(task); 2083 /* fail immediately if this is an RPC ping */ 2084 if (task->tk_msg.rpc_proc->p_proc == 0) { 2085 status = -EOPNOTSUPP; 2086 break; 2087 } 2088 rpc_delay(task, 3*HZ); 2089 goto retry_timeout; 2090 case -ENOBUFS: 2091 rpc_delay(task, HZ >> 2); 2092 goto retry_timeout; 2093 case -EAGAIN: 2094 goto retry_timeout; 2095 case -ETIMEDOUT: 2096 trace_rpcb_timeout_err(task); 2097 goto retry_timeout; 2098 case -EPFNOSUPPORT: 2099 /* server doesn't support any rpcbind version we know of */ 2100 trace_rpcb_bind_version_err(task); 2101 break; 2102 case -EPROTONOSUPPORT: 2103 trace_rpcb_bind_version_err(task); 2104 goto retry_timeout; 2105 case -ECONNREFUSED: /* connection problems */ 2106 case -ECONNRESET: 2107 case -ECONNABORTED: 2108 case -ENOTCONN: 2109 case -EHOSTDOWN: 2110 case -ENETDOWN: 2111 case -EHOSTUNREACH: 2112 case -ENETUNREACH: 2113 case -EPIPE: 2114 trace_rpcb_unreachable_err(task); 2115 if (!RPC_IS_SOFTCONN(task)) { 2116 rpc_delay(task, 5*HZ); 2117 goto retry_timeout; 2118 } 2119 status = task->tk_status; 2120 break; 2121 default: 2122 trace_rpcb_unrecognized_err(task); 2123 } 2124 2125 rpc_call_rpcerror(task, status); 2126 return; 2127 out_next: 2128 task->tk_action = call_connect; 2129 return; 2130 retry_timeout: 2131 task->tk_status = 0; 2132 task->tk_action = call_bind; 2133 rpc_check_timeout(task); 2134 } 2135 2136 /* 2137 * 4b. Connect to the RPC server 2138 */ 2139 static void 2140 call_connect(struct rpc_task *task) 2141 { 2142 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 2143 2144 if (rpc_task_transmitted(task)) { 2145 rpc_task_handle_transmitted(task); 2146 return; 2147 } 2148 2149 if (xprt_connected(xprt)) { 2150 task->tk_action = call_transmit; 2151 return; 2152 } 2153 2154 task->tk_action = call_connect_status; 2155 if (task->tk_status < 0) 2156 return; 2157 if (task->tk_flags & RPC_TASK_NOCONNECT) { 2158 rpc_call_rpcerror(task, -ENOTCONN); 2159 return; 2160 } 2161 if (!xprt_prepare_transmit(task)) 2162 return; 2163 xprt_connect(task); 2164 } 2165 2166 /* 2167 * 4c. Sort out connect result 2168 */ 2169 static void 2170 call_connect_status(struct rpc_task *task) 2171 { 2172 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 2173 struct rpc_clnt *clnt = task->tk_client; 2174 int status = task->tk_status; 2175 2176 if (rpc_task_transmitted(task)) { 2177 rpc_task_handle_transmitted(task); 2178 return; 2179 } 2180 2181 trace_rpc_connect_status(task); 2182 2183 if (task->tk_status == 0) { 2184 clnt->cl_stats->netreconn++; 2185 goto out_next; 2186 } 2187 if (xprt_connected(xprt)) { 2188 task->tk_status = 0; 2189 goto out_next; 2190 } 2191 2192 task->tk_status = 0; 2193 switch (status) { 2194 case -ECONNREFUSED: 2195 case -ECONNRESET: 2196 /* A positive refusal suggests a rebind is needed. */ 2197 if (RPC_IS_SOFTCONN(task)) 2198 break; 2199 if (clnt->cl_autobind) { 2200 rpc_force_rebind(clnt); 2201 goto out_retry; 2202 } 2203 fallthrough; 2204 case -ECONNABORTED: 2205 case -ENETDOWN: 2206 case -ENETUNREACH: 2207 case -EHOSTUNREACH: 2208 case -EPIPE: 2209 case -EPROTO: 2210 xprt_conditional_disconnect(task->tk_rqstp->rq_xprt, 2211 task->tk_rqstp->rq_connect_cookie); 2212 if (RPC_IS_SOFTCONN(task)) 2213 break; 2214 /* retry with existing socket, after a delay */ 2215 rpc_delay(task, 3*HZ); 2216 fallthrough; 2217 case -EADDRINUSE: 2218 case -ENOTCONN: 2219 case -EAGAIN: 2220 case -ETIMEDOUT: 2221 if (!(task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) && 2222 (task->tk_flags & RPC_TASK_MOVEABLE) && 2223 test_bit(XPRT_REMOVE, &xprt->state)) { 2224 struct rpc_xprt *saved = task->tk_xprt; 2225 struct rpc_xprt_switch *xps; 2226 2227 xps = rpc_clnt_xprt_switch_get(clnt); 2228 if (xps->xps_nxprts > 1) { 2229 long value; 2230 2231 xprt_release(task); 2232 value = atomic_long_dec_return(&xprt->queuelen); 2233 if (value == 0) 2234 rpc_xprt_switch_remove_xprt(xps, saved, 2235 true); 2236 xprt_put(saved); 2237 task->tk_xprt = NULL; 2238 task->tk_action = call_start; 2239 } 2240 xprt_switch_put(xps); 2241 if (!task->tk_xprt) 2242 goto out; 2243 } 2244 goto out_retry; 2245 case -ENOBUFS: 2246 rpc_delay(task, HZ >> 2); 2247 goto out_retry; 2248 } 2249 rpc_call_rpcerror(task, status); 2250 return; 2251 out_next: 2252 task->tk_action = call_transmit; 2253 return; 2254 out_retry: 2255 /* Check for timeouts before looping back to call_bind */ 2256 task->tk_action = call_bind; 2257 out: 2258 rpc_check_timeout(task); 2259 } 2260 2261 /* 2262 * 5. Transmit the RPC request, and wait for reply 2263 */ 2264 static void 2265 call_transmit(struct rpc_task *task) 2266 { 2267 if (rpc_task_transmitted(task)) { 2268 rpc_task_handle_transmitted(task); 2269 return; 2270 } 2271 2272 task->tk_action = call_transmit_status; 2273 if (!xprt_prepare_transmit(task)) 2274 return; 2275 task->tk_status = 0; 2276 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) { 2277 if (!xprt_connected(task->tk_xprt)) { 2278 task->tk_status = -ENOTCONN; 2279 return; 2280 } 2281 xprt_transmit(task); 2282 } 2283 xprt_end_transmit(task); 2284 } 2285 2286 /* 2287 * 5a. Handle cleanup after a transmission 2288 */ 2289 static void 2290 call_transmit_status(struct rpc_task *task) 2291 { 2292 task->tk_action = call_status; 2293 2294 /* 2295 * Common case: success. Force the compiler to put this 2296 * test first. 2297 */ 2298 if (rpc_task_transmitted(task)) { 2299 task->tk_status = 0; 2300 xprt_request_wait_receive(task); 2301 return; 2302 } 2303 2304 switch (task->tk_status) { 2305 default: 2306 break; 2307 case -EBADMSG: 2308 task->tk_status = 0; 2309 task->tk_action = call_encode; 2310 break; 2311 /* 2312 * Special cases: if we've been waiting on the 2313 * socket's write_space() callback, or if the 2314 * socket just returned a connection error, 2315 * then hold onto the transport lock. 2316 */ 2317 case -ENOMEM: 2318 case -ENOBUFS: 2319 rpc_delay(task, HZ>>2); 2320 fallthrough; 2321 case -EBADSLT: 2322 case -EAGAIN: 2323 task->tk_action = call_transmit; 2324 task->tk_status = 0; 2325 break; 2326 case -EHOSTDOWN: 2327 case -ENETDOWN: 2328 case -EHOSTUNREACH: 2329 case -ENETUNREACH: 2330 case -EPERM: 2331 break; 2332 case -ECONNREFUSED: 2333 if (RPC_IS_SOFTCONN(task)) { 2334 if (!task->tk_msg.rpc_proc->p_proc) 2335 trace_xprt_ping(task->tk_xprt, 2336 task->tk_status); 2337 rpc_call_rpcerror(task, task->tk_status); 2338 return; 2339 } 2340 fallthrough; 2341 case -ECONNRESET: 2342 case -ECONNABORTED: 2343 case -EADDRINUSE: 2344 case -ENOTCONN: 2345 case -EPIPE: 2346 task->tk_action = call_bind; 2347 task->tk_status = 0; 2348 break; 2349 } 2350 rpc_check_timeout(task); 2351 } 2352 2353 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 2354 static void call_bc_transmit(struct rpc_task *task); 2355 static void call_bc_transmit_status(struct rpc_task *task); 2356 2357 static void 2358 call_bc_encode(struct rpc_task *task) 2359 { 2360 xprt_request_enqueue_transmit(task); 2361 task->tk_action = call_bc_transmit; 2362 } 2363 2364 /* 2365 * 5b. Send the backchannel RPC reply. On error, drop the reply. In 2366 * addition, disconnect on connectivity errors. 2367 */ 2368 static void 2369 call_bc_transmit(struct rpc_task *task) 2370 { 2371 task->tk_action = call_bc_transmit_status; 2372 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) { 2373 if (!xprt_prepare_transmit(task)) 2374 return; 2375 task->tk_status = 0; 2376 xprt_transmit(task); 2377 } 2378 xprt_end_transmit(task); 2379 } 2380 2381 static void 2382 call_bc_transmit_status(struct rpc_task *task) 2383 { 2384 struct rpc_rqst *req = task->tk_rqstp; 2385 2386 if (rpc_task_transmitted(task)) 2387 task->tk_status = 0; 2388 2389 switch (task->tk_status) { 2390 case 0: 2391 /* Success */ 2392 case -ENETDOWN: 2393 case -EHOSTDOWN: 2394 case -EHOSTUNREACH: 2395 case -ENETUNREACH: 2396 case -ECONNRESET: 2397 case -ECONNREFUSED: 2398 case -EADDRINUSE: 2399 case -ENOTCONN: 2400 case -EPIPE: 2401 break; 2402 case -ENOMEM: 2403 case -ENOBUFS: 2404 rpc_delay(task, HZ>>2); 2405 fallthrough; 2406 case -EBADSLT: 2407 case -EAGAIN: 2408 task->tk_status = 0; 2409 task->tk_action = call_bc_transmit; 2410 return; 2411 case -ETIMEDOUT: 2412 /* 2413 * Problem reaching the server. Disconnect and let the 2414 * forechannel reestablish the connection. The server will 2415 * have to retransmit the backchannel request and we'll 2416 * reprocess it. Since these ops are idempotent, there's no 2417 * need to cache our reply at this time. 2418 */ 2419 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 2420 "error: %d\n", task->tk_status); 2421 xprt_conditional_disconnect(req->rq_xprt, 2422 req->rq_connect_cookie); 2423 break; 2424 default: 2425 /* 2426 * We were unable to reply and will have to drop the 2427 * request. The server should reconnect and retransmit. 2428 */ 2429 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 2430 "error: %d\n", task->tk_status); 2431 break; 2432 } 2433 task->tk_action = rpc_exit_task; 2434 } 2435 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 2436 2437 /* 2438 * 6. Sort out the RPC call status 2439 */ 2440 static void 2441 call_status(struct rpc_task *task) 2442 { 2443 struct rpc_clnt *clnt = task->tk_client; 2444 int status; 2445 2446 if (!task->tk_msg.rpc_proc->p_proc) 2447 trace_xprt_ping(task->tk_xprt, task->tk_status); 2448 2449 status = task->tk_status; 2450 if (status >= 0) { 2451 task->tk_action = call_decode; 2452 return; 2453 } 2454 2455 trace_rpc_call_status(task); 2456 task->tk_status = 0; 2457 switch(status) { 2458 case -EHOSTDOWN: 2459 case -ENETDOWN: 2460 case -EHOSTUNREACH: 2461 case -ENETUNREACH: 2462 case -EPERM: 2463 if (RPC_IS_SOFTCONN(task)) 2464 goto out_exit; 2465 /* 2466 * Delay any retries for 3 seconds, then handle as if it 2467 * were a timeout. 2468 */ 2469 rpc_delay(task, 3*HZ); 2470 fallthrough; 2471 case -ETIMEDOUT: 2472 break; 2473 case -ECONNREFUSED: 2474 case -ECONNRESET: 2475 case -ECONNABORTED: 2476 case -ENOTCONN: 2477 rpc_force_rebind(clnt); 2478 break; 2479 case -EADDRINUSE: 2480 rpc_delay(task, 3*HZ); 2481 fallthrough; 2482 case -EPIPE: 2483 case -EAGAIN: 2484 break; 2485 case -ENFILE: 2486 case -ENOBUFS: 2487 case -ENOMEM: 2488 rpc_delay(task, HZ>>2); 2489 break; 2490 case -EIO: 2491 /* shutdown or soft timeout */ 2492 goto out_exit; 2493 default: 2494 if (clnt->cl_chatty) 2495 printk("%s: RPC call returned error %d\n", 2496 clnt->cl_program->name, -status); 2497 goto out_exit; 2498 } 2499 task->tk_action = call_encode; 2500 rpc_check_timeout(task); 2501 return; 2502 out_exit: 2503 rpc_call_rpcerror(task, status); 2504 } 2505 2506 static bool 2507 rpc_check_connected(const struct rpc_rqst *req) 2508 { 2509 /* No allocated request or transport? return true */ 2510 if (!req || !req->rq_xprt) 2511 return true; 2512 return xprt_connected(req->rq_xprt); 2513 } 2514 2515 static void 2516 rpc_check_timeout(struct rpc_task *task) 2517 { 2518 struct rpc_clnt *clnt = task->tk_client; 2519 2520 if (RPC_SIGNALLED(task)) 2521 return; 2522 2523 if (xprt_adjust_timeout(task->tk_rqstp) == 0) 2524 return; 2525 2526 trace_rpc_timeout_status(task); 2527 task->tk_timeouts++; 2528 2529 if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) { 2530 rpc_call_rpcerror(task, -ETIMEDOUT); 2531 return; 2532 } 2533 2534 if (RPC_IS_SOFT(task)) { 2535 /* 2536 * Once a "no retrans timeout" soft tasks (a.k.a NFSv4) has 2537 * been sent, it should time out only if the transport 2538 * connection gets terminally broken. 2539 */ 2540 if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) && 2541 rpc_check_connected(task->tk_rqstp)) 2542 return; 2543 2544 if (clnt->cl_chatty) { 2545 pr_notice_ratelimited( 2546 "%s: server %s not responding, timed out\n", 2547 clnt->cl_program->name, 2548 task->tk_xprt->servername); 2549 } 2550 if (task->tk_flags & RPC_TASK_TIMEOUT) 2551 rpc_call_rpcerror(task, -ETIMEDOUT); 2552 else 2553 __rpc_call_rpcerror(task, -EIO, -ETIMEDOUT); 2554 return; 2555 } 2556 2557 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { 2558 task->tk_flags |= RPC_CALL_MAJORSEEN; 2559 if (clnt->cl_chatty) { 2560 pr_notice_ratelimited( 2561 "%s: server %s not responding, still trying\n", 2562 clnt->cl_program->name, 2563 task->tk_xprt->servername); 2564 } 2565 } 2566 rpc_force_rebind(clnt); 2567 /* 2568 * Did our request time out due to an RPCSEC_GSS out-of-sequence 2569 * event? RFC2203 requires the server to drop all such requests. 2570 */ 2571 rpcauth_invalcred(task); 2572 } 2573 2574 /* 2575 * 7. Decode the RPC reply 2576 */ 2577 static void 2578 call_decode(struct rpc_task *task) 2579 { 2580 struct rpc_clnt *clnt = task->tk_client; 2581 struct rpc_rqst *req = task->tk_rqstp; 2582 struct xdr_stream xdr; 2583 int err; 2584 2585 if (!task->tk_msg.rpc_proc->p_decode) { 2586 task->tk_action = rpc_exit_task; 2587 return; 2588 } 2589 2590 if (task->tk_flags & RPC_CALL_MAJORSEEN) { 2591 if (clnt->cl_chatty) { 2592 pr_notice_ratelimited("%s: server %s OK\n", 2593 clnt->cl_program->name, 2594 task->tk_xprt->servername); 2595 } 2596 task->tk_flags &= ~RPC_CALL_MAJORSEEN; 2597 } 2598 2599 /* 2600 * Did we ever call xprt_complete_rqst()? If not, we should assume 2601 * the message is incomplete. 2602 */ 2603 err = -EAGAIN; 2604 if (!req->rq_reply_bytes_recvd) 2605 goto out; 2606 2607 /* Ensure that we see all writes made by xprt_complete_rqst() 2608 * before it changed req->rq_reply_bytes_recvd. 2609 */ 2610 smp_rmb(); 2611 2612 req->rq_rcv_buf.len = req->rq_private_buf.len; 2613 trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf); 2614 2615 /* Check that the softirq receive buffer is valid */ 2616 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, 2617 sizeof(req->rq_rcv_buf)) != 0); 2618 2619 xdr_init_decode(&xdr, &req->rq_rcv_buf, 2620 req->rq_rcv_buf.head[0].iov_base, req); 2621 err = rpc_decode_header(task, &xdr); 2622 out: 2623 switch (err) { 2624 case 0: 2625 task->tk_action = rpc_exit_task; 2626 task->tk_status = rpcauth_unwrap_resp(task, &xdr); 2627 xdr_finish_decode(&xdr); 2628 return; 2629 case -EAGAIN: 2630 task->tk_status = 0; 2631 if (task->tk_client->cl_discrtry) 2632 xprt_conditional_disconnect(req->rq_xprt, 2633 req->rq_connect_cookie); 2634 task->tk_action = call_encode; 2635 rpc_check_timeout(task); 2636 break; 2637 case -EKEYREJECTED: 2638 task->tk_action = call_reserve; 2639 rpc_check_timeout(task); 2640 rpcauth_invalcred(task); 2641 /* Ensure we obtain a new XID if we retry! */ 2642 xprt_release(task); 2643 } 2644 } 2645 2646 static int 2647 rpc_encode_header(struct rpc_task *task, struct xdr_stream *xdr) 2648 { 2649 struct rpc_clnt *clnt = task->tk_client; 2650 struct rpc_rqst *req = task->tk_rqstp; 2651 __be32 *p; 2652 int error; 2653 2654 error = -EMSGSIZE; 2655 p = xdr_reserve_space(xdr, RPC_CALLHDRSIZE << 2); 2656 if (!p) 2657 goto out_fail; 2658 *p++ = req->rq_xid; 2659 *p++ = rpc_call; 2660 *p++ = cpu_to_be32(RPC_VERSION); 2661 *p++ = cpu_to_be32(clnt->cl_prog); 2662 *p++ = cpu_to_be32(clnt->cl_vers); 2663 *p = cpu_to_be32(task->tk_msg.rpc_proc->p_proc); 2664 2665 error = rpcauth_marshcred(task, xdr); 2666 if (error < 0) 2667 goto out_fail; 2668 return 0; 2669 out_fail: 2670 trace_rpc_bad_callhdr(task); 2671 rpc_call_rpcerror(task, error); 2672 return error; 2673 } 2674 2675 static noinline int 2676 rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr) 2677 { 2678 struct rpc_clnt *clnt = task->tk_client; 2679 int error; 2680 __be32 *p; 2681 2682 /* RFC-1014 says that the representation of XDR data must be a 2683 * multiple of four bytes 2684 * - if it isn't pointer subtraction in the NFS client may give 2685 * undefined results 2686 */ 2687 if (task->tk_rqstp->rq_rcv_buf.len & 3) 2688 goto out_unparsable; 2689 2690 p = xdr_inline_decode(xdr, 3 * sizeof(*p)); 2691 if (!p) 2692 goto out_unparsable; 2693 p++; /* skip XID */ 2694 if (*p++ != rpc_reply) 2695 goto out_unparsable; 2696 if (*p++ != rpc_msg_accepted) 2697 goto out_msg_denied; 2698 2699 error = rpcauth_checkverf(task, xdr); 2700 if (error) { 2701 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 2702 2703 if (!test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) { 2704 rpcauth_invalcred(task); 2705 if (!task->tk_cred_retry) 2706 goto out_err; 2707 task->tk_cred_retry--; 2708 trace_rpc__stale_creds(task); 2709 return -EKEYREJECTED; 2710 } 2711 goto out_verifier; 2712 } 2713 2714 p = xdr_inline_decode(xdr, sizeof(*p)); 2715 if (!p) 2716 goto out_unparsable; 2717 switch (*p) { 2718 case rpc_success: 2719 return 0; 2720 case rpc_prog_unavail: 2721 trace_rpc__prog_unavail(task); 2722 error = -EPFNOSUPPORT; 2723 goto out_err; 2724 case rpc_prog_mismatch: 2725 trace_rpc__prog_mismatch(task); 2726 error = -EPROTONOSUPPORT; 2727 goto out_err; 2728 case rpc_proc_unavail: 2729 trace_rpc__proc_unavail(task); 2730 error = -EOPNOTSUPP; 2731 goto out_err; 2732 case rpc_garbage_args: 2733 case rpc_system_err: 2734 trace_rpc__garbage_args(task); 2735 error = -EIO; 2736 break; 2737 default: 2738 goto out_unparsable; 2739 } 2740 2741 out_garbage: 2742 clnt->cl_stats->rpcgarbage++; 2743 if (task->tk_garb_retry) { 2744 task->tk_garb_retry--; 2745 task->tk_action = call_encode; 2746 return -EAGAIN; 2747 } 2748 out_err: 2749 rpc_call_rpcerror(task, error); 2750 return error; 2751 2752 out_unparsable: 2753 trace_rpc__unparsable(task); 2754 error = -EIO; 2755 goto out_garbage; 2756 2757 out_verifier: 2758 trace_rpc_bad_verifier(task); 2759 switch (error) { 2760 case -EPROTONOSUPPORT: 2761 goto out_err; 2762 case -EACCES: 2763 /* Re-encode with a fresh cred */ 2764 fallthrough; 2765 default: 2766 goto out_garbage; 2767 } 2768 2769 out_msg_denied: 2770 error = -EACCES; 2771 p = xdr_inline_decode(xdr, sizeof(*p)); 2772 if (!p) 2773 goto out_unparsable; 2774 switch (*p++) { 2775 case rpc_auth_error: 2776 break; 2777 case rpc_mismatch: 2778 trace_rpc__mismatch(task); 2779 error = -EPROTONOSUPPORT; 2780 goto out_err; 2781 default: 2782 goto out_unparsable; 2783 } 2784 2785 p = xdr_inline_decode(xdr, sizeof(*p)); 2786 if (!p) 2787 goto out_unparsable; 2788 switch (*p++) { 2789 case rpc_autherr_rejectedcred: 2790 case rpc_autherr_rejectedverf: 2791 case rpcsec_gsserr_credproblem: 2792 case rpcsec_gsserr_ctxproblem: 2793 rpcauth_invalcred(task); 2794 if (!task->tk_cred_retry) 2795 break; 2796 task->tk_cred_retry--; 2797 trace_rpc__stale_creds(task); 2798 return -EKEYREJECTED; 2799 case rpc_autherr_badcred: 2800 case rpc_autherr_badverf: 2801 /* possibly garbled cred/verf? */ 2802 if (!task->tk_garb_retry) 2803 break; 2804 task->tk_garb_retry--; 2805 trace_rpc__bad_creds(task); 2806 task->tk_action = call_encode; 2807 return -EAGAIN; 2808 case rpc_autherr_tooweak: 2809 trace_rpc__auth_tooweak(task); 2810 pr_warn("RPC: server %s requires stronger authentication.\n", 2811 task->tk_xprt->servername); 2812 break; 2813 default: 2814 goto out_unparsable; 2815 } 2816 goto out_err; 2817 } 2818 2819 static void rpcproc_encode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr, 2820 const void *obj) 2821 { 2822 } 2823 2824 static int rpcproc_decode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr, 2825 void *obj) 2826 { 2827 return 0; 2828 } 2829 2830 static const struct rpc_procinfo rpcproc_null = { 2831 .p_encode = rpcproc_encode_null, 2832 .p_decode = rpcproc_decode_null, 2833 }; 2834 2835 static const struct rpc_procinfo rpcproc_null_noreply = { 2836 .p_encode = rpcproc_encode_null, 2837 }; 2838 2839 static void 2840 rpc_null_call_prepare(struct rpc_task *task, void *data) 2841 { 2842 task->tk_flags &= ~RPC_TASK_NO_RETRANS_TIMEOUT; 2843 rpc_call_start(task); 2844 } 2845 2846 static const struct rpc_call_ops rpc_null_ops = { 2847 .rpc_call_prepare = rpc_null_call_prepare, 2848 .rpc_call_done = rpc_default_callback, 2849 }; 2850 2851 static 2852 struct rpc_task *rpc_call_null_helper(struct rpc_clnt *clnt, 2853 struct rpc_xprt *xprt, struct rpc_cred *cred, int flags, 2854 const struct rpc_call_ops *ops, void *data) 2855 { 2856 struct rpc_message msg = { 2857 .rpc_proc = &rpcproc_null, 2858 }; 2859 struct rpc_task_setup task_setup_data = { 2860 .rpc_client = clnt, 2861 .rpc_xprt = xprt, 2862 .rpc_message = &msg, 2863 .rpc_op_cred = cred, 2864 .callback_ops = ops ?: &rpc_null_ops, 2865 .callback_data = data, 2866 .flags = flags | RPC_TASK_SOFT | RPC_TASK_SOFTCONN | 2867 RPC_TASK_NULLCREDS, 2868 }; 2869 2870 return rpc_run_task(&task_setup_data); 2871 } 2872 2873 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags) 2874 { 2875 return rpc_call_null_helper(clnt, NULL, cred, flags, NULL, NULL); 2876 } 2877 EXPORT_SYMBOL_GPL(rpc_call_null); 2878 2879 static int rpc_ping(struct rpc_clnt *clnt) 2880 { 2881 struct rpc_task *task; 2882 int status; 2883 2884 if (clnt->cl_auth->au_ops->ping) 2885 return clnt->cl_auth->au_ops->ping(clnt); 2886 2887 task = rpc_call_null_helper(clnt, NULL, NULL, 0, NULL, NULL); 2888 if (IS_ERR(task)) 2889 return PTR_ERR(task); 2890 status = task->tk_status; 2891 rpc_put_task(task); 2892 return status; 2893 } 2894 2895 static int rpc_ping_noreply(struct rpc_clnt *clnt) 2896 { 2897 struct rpc_message msg = { 2898 .rpc_proc = &rpcproc_null_noreply, 2899 }; 2900 struct rpc_task_setup task_setup_data = { 2901 .rpc_client = clnt, 2902 .rpc_message = &msg, 2903 .callback_ops = &rpc_null_ops, 2904 .flags = RPC_TASK_SOFT | RPC_TASK_SOFTCONN | RPC_TASK_NULLCREDS, 2905 }; 2906 struct rpc_task *task; 2907 int status; 2908 2909 task = rpc_run_task(&task_setup_data); 2910 if (IS_ERR(task)) 2911 return PTR_ERR(task); 2912 status = task->tk_status; 2913 rpc_put_task(task); 2914 return status; 2915 } 2916 2917 struct rpc_cb_add_xprt_calldata { 2918 struct rpc_xprt_switch *xps; 2919 struct rpc_xprt *xprt; 2920 }; 2921 2922 static void rpc_cb_add_xprt_done(struct rpc_task *task, void *calldata) 2923 { 2924 struct rpc_cb_add_xprt_calldata *data = calldata; 2925 2926 if (task->tk_status == 0) 2927 rpc_xprt_switch_add_xprt(data->xps, data->xprt); 2928 } 2929 2930 static void rpc_cb_add_xprt_release(void *calldata) 2931 { 2932 struct rpc_cb_add_xprt_calldata *data = calldata; 2933 2934 xprt_put(data->xprt); 2935 xprt_switch_put(data->xps); 2936 kfree(data); 2937 } 2938 2939 static const struct rpc_call_ops rpc_cb_add_xprt_call_ops = { 2940 .rpc_call_prepare = rpc_null_call_prepare, 2941 .rpc_call_done = rpc_cb_add_xprt_done, 2942 .rpc_release = rpc_cb_add_xprt_release, 2943 }; 2944 2945 /** 2946 * rpc_clnt_test_and_add_xprt - Test and add a new transport to a rpc_clnt 2947 * @clnt: pointer to struct rpc_clnt 2948 * @xps: pointer to struct rpc_xprt_switch, 2949 * @xprt: pointer struct rpc_xprt 2950 * @in_max_connect: pointer to the max_connect value for the passed in xprt transport 2951 */ 2952 int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt, 2953 struct rpc_xprt_switch *xps, struct rpc_xprt *xprt, 2954 void *in_max_connect) 2955 { 2956 struct rpc_cb_add_xprt_calldata *data; 2957 struct rpc_task *task; 2958 int max_connect = clnt->cl_max_connect; 2959 2960 if (in_max_connect) 2961 max_connect = *(int *)in_max_connect; 2962 if (xps->xps_nunique_destaddr_xprts + 1 > max_connect) { 2963 rcu_read_lock(); 2964 pr_warn("SUNRPC: reached max allowed number (%d) did not add " 2965 "transport to server: %s\n", max_connect, 2966 rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR)); 2967 rcu_read_unlock(); 2968 return -EINVAL; 2969 } 2970 2971 data = kmalloc(sizeof(*data), GFP_KERNEL); 2972 if (!data) 2973 return -ENOMEM; 2974 data->xps = xprt_switch_get(xps); 2975 data->xprt = xprt_get(xprt); 2976 if (rpc_xprt_switch_has_addr(data->xps, (struct sockaddr *)&xprt->addr)) { 2977 rpc_cb_add_xprt_release(data); 2978 goto success; 2979 } 2980 2981 task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC, 2982 &rpc_cb_add_xprt_call_ops, data); 2983 if (IS_ERR(task)) 2984 return PTR_ERR(task); 2985 2986 data->xps->xps_nunique_destaddr_xprts++; 2987 rpc_put_task(task); 2988 success: 2989 return 1; 2990 } 2991 EXPORT_SYMBOL_GPL(rpc_clnt_test_and_add_xprt); 2992 2993 static int rpc_clnt_add_xprt_helper(struct rpc_clnt *clnt, 2994 struct rpc_xprt *xprt, 2995 struct rpc_add_xprt_test *data) 2996 { 2997 struct rpc_task *task; 2998 int status = -EADDRINUSE; 2999 3000 /* Test the connection */ 3001 task = rpc_call_null_helper(clnt, xprt, NULL, 0, NULL, NULL); 3002 if (IS_ERR(task)) 3003 return PTR_ERR(task); 3004 3005 status = task->tk_status; 3006 rpc_put_task(task); 3007 3008 if (status < 0) 3009 return status; 3010 3011 /* rpc_xprt_switch and rpc_xprt are deferrenced by add_xprt_test() */ 3012 data->add_xprt_test(clnt, xprt, data->data); 3013 3014 return 0; 3015 } 3016 3017 /** 3018 * rpc_clnt_setup_test_and_add_xprt() 3019 * 3020 * This is an rpc_clnt_add_xprt setup() function which returns 1 so: 3021 * 1) caller of the test function must dereference the rpc_xprt_switch 3022 * and the rpc_xprt. 3023 * 2) test function must call rpc_xprt_switch_add_xprt, usually in 3024 * the rpc_call_done routine. 3025 * 3026 * Upon success (return of 1), the test function adds the new 3027 * transport to the rpc_clnt xprt switch 3028 * 3029 * @clnt: struct rpc_clnt to get the new transport 3030 * @xps: the rpc_xprt_switch to hold the new transport 3031 * @xprt: the rpc_xprt to test 3032 * @data: a struct rpc_add_xprt_test pointer that holds the test function 3033 * and test function call data 3034 */ 3035 int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt, 3036 struct rpc_xprt_switch *xps, 3037 struct rpc_xprt *xprt, 3038 void *data) 3039 { 3040 int status = -EADDRINUSE; 3041 3042 xprt = xprt_get(xprt); 3043 xprt_switch_get(xps); 3044 3045 if (rpc_xprt_switch_has_addr(xps, (struct sockaddr *)&xprt->addr)) 3046 goto out_err; 3047 3048 status = rpc_clnt_add_xprt_helper(clnt, xprt, data); 3049 if (status < 0) 3050 goto out_err; 3051 3052 status = 1; 3053 out_err: 3054 xprt_put(xprt); 3055 xprt_switch_put(xps); 3056 if (status < 0) 3057 pr_info("RPC: rpc_clnt_test_xprt failed: %d addr %s not " 3058 "added\n", status, 3059 xprt->address_strings[RPC_DISPLAY_ADDR]); 3060 /* so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt */ 3061 return status; 3062 } 3063 EXPORT_SYMBOL_GPL(rpc_clnt_setup_test_and_add_xprt); 3064 3065 /** 3066 * rpc_clnt_add_xprt - Add a new transport to a rpc_clnt 3067 * @clnt: pointer to struct rpc_clnt 3068 * @xprtargs: pointer to struct xprt_create 3069 * @setup: callback to test and/or set up the connection 3070 * @data: pointer to setup function data 3071 * 3072 * Creates a new transport using the parameters set in args and 3073 * adds it to clnt. 3074 * If ping is set, then test that connectivity succeeds before 3075 * adding the new transport. 3076 * 3077 */ 3078 int rpc_clnt_add_xprt(struct rpc_clnt *clnt, 3079 struct xprt_create *xprtargs, 3080 int (*setup)(struct rpc_clnt *, 3081 struct rpc_xprt_switch *, 3082 struct rpc_xprt *, 3083 void *), 3084 void *data) 3085 { 3086 struct rpc_xprt_switch *xps; 3087 struct rpc_xprt *xprt; 3088 unsigned long connect_timeout; 3089 unsigned long reconnect_timeout; 3090 unsigned char resvport, reuseport; 3091 int ret = 0, ident; 3092 3093 rcu_read_lock(); 3094 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 3095 xprt = xprt_iter_xprt(&clnt->cl_xpi); 3096 if (xps == NULL || xprt == NULL) { 3097 rcu_read_unlock(); 3098 xprt_switch_put(xps); 3099 return -EAGAIN; 3100 } 3101 resvport = xprt->resvport; 3102 reuseport = xprt->reuseport; 3103 connect_timeout = xprt->connect_timeout; 3104 reconnect_timeout = xprt->max_reconnect_timeout; 3105 ident = xprt->xprt_class->ident; 3106 rcu_read_unlock(); 3107 3108 if (!xprtargs->ident) 3109 xprtargs->ident = ident; 3110 xprtargs->xprtsec = clnt->cl_xprtsec; 3111 xprt = xprt_create_transport(xprtargs); 3112 if (IS_ERR(xprt)) { 3113 ret = PTR_ERR(xprt); 3114 goto out_put_switch; 3115 } 3116 xprt->resvport = resvport; 3117 xprt->reuseport = reuseport; 3118 3119 if (xprtargs->connect_timeout) 3120 connect_timeout = xprtargs->connect_timeout; 3121 if (xprtargs->reconnect_timeout) 3122 reconnect_timeout = xprtargs->reconnect_timeout; 3123 if (xprt->ops->set_connect_timeout != NULL) 3124 xprt->ops->set_connect_timeout(xprt, 3125 connect_timeout, 3126 reconnect_timeout); 3127 3128 rpc_xprt_switch_set_roundrobin(xps); 3129 if (setup) { 3130 ret = setup(clnt, xps, xprt, data); 3131 if (ret != 0) 3132 goto out_put_xprt; 3133 } 3134 rpc_xprt_switch_add_xprt(xps, xprt); 3135 out_put_xprt: 3136 xprt_put(xprt); 3137 out_put_switch: 3138 xprt_switch_put(xps); 3139 return ret; 3140 } 3141 EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt); 3142 3143 static int rpc_xprt_probe_trunked(struct rpc_clnt *clnt, 3144 struct rpc_xprt *xprt, 3145 struct rpc_add_xprt_test *data) 3146 { 3147 struct rpc_xprt *main_xprt; 3148 int status = 0; 3149 3150 xprt_get(xprt); 3151 3152 rcu_read_lock(); 3153 main_xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); 3154 status = rpc_cmp_addr_port((struct sockaddr *)&xprt->addr, 3155 (struct sockaddr *)&main_xprt->addr); 3156 rcu_read_unlock(); 3157 xprt_put(main_xprt); 3158 if (status || !test_bit(XPRT_OFFLINE, &xprt->state)) 3159 goto out; 3160 3161 status = rpc_clnt_add_xprt_helper(clnt, xprt, data); 3162 out: 3163 xprt_put(xprt); 3164 return status; 3165 } 3166 3167 /* rpc_clnt_probe_trunked_xprt -- probe offlined transport for session trunking 3168 * @clnt rpc_clnt structure 3169 * 3170 * For each offlined transport found in the rpc_clnt structure call 3171 * the function rpc_xprt_probe_trunked() which will determine if this 3172 * transport still belongs to the trunking group. 3173 */ 3174 void rpc_clnt_probe_trunked_xprts(struct rpc_clnt *clnt, 3175 struct rpc_add_xprt_test *data) 3176 { 3177 struct rpc_xprt_iter xpi; 3178 int ret; 3179 3180 ret = rpc_clnt_xprt_iter_offline_init(clnt, &xpi); 3181 if (ret) 3182 return; 3183 for (;;) { 3184 struct rpc_xprt *xprt = xprt_iter_get_next(&xpi); 3185 3186 if (!xprt) 3187 break; 3188 ret = rpc_xprt_probe_trunked(clnt, xprt, data); 3189 xprt_put(xprt); 3190 if (ret < 0) 3191 break; 3192 xprt_iter_rewind(&xpi); 3193 } 3194 xprt_iter_destroy(&xpi); 3195 } 3196 EXPORT_SYMBOL_GPL(rpc_clnt_probe_trunked_xprts); 3197 3198 static int rpc_xprt_offline(struct rpc_clnt *clnt, 3199 struct rpc_xprt *xprt, 3200 void *data) 3201 { 3202 struct rpc_xprt *main_xprt; 3203 struct rpc_xprt_switch *xps; 3204 int err = 0; 3205 3206 xprt_get(xprt); 3207 3208 rcu_read_lock(); 3209 main_xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); 3210 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 3211 err = rpc_cmp_addr_port((struct sockaddr *)&xprt->addr, 3212 (struct sockaddr *)&main_xprt->addr); 3213 rcu_read_unlock(); 3214 xprt_put(main_xprt); 3215 if (err) 3216 goto out; 3217 3218 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) { 3219 err = -EINTR; 3220 goto out; 3221 } 3222 xprt_set_offline_locked(xprt, xps); 3223 3224 xprt_release_write(xprt, NULL); 3225 out: 3226 xprt_put(xprt); 3227 xprt_switch_put(xps); 3228 return err; 3229 } 3230 3231 /* rpc_clnt_manage_trunked_xprts -- offline trunked transports 3232 * @clnt rpc_clnt structure 3233 * 3234 * For each active transport found in the rpc_clnt structure call 3235 * the function rpc_xprt_offline() which will identify trunked transports 3236 * and will mark them offline. 3237 */ 3238 void rpc_clnt_manage_trunked_xprts(struct rpc_clnt *clnt) 3239 { 3240 rpc_clnt_iterate_for_each_xprt(clnt, rpc_xprt_offline, NULL); 3241 } 3242 EXPORT_SYMBOL_GPL(rpc_clnt_manage_trunked_xprts); 3243 3244 struct connect_timeout_data { 3245 unsigned long connect_timeout; 3246 unsigned long reconnect_timeout; 3247 }; 3248 3249 static int 3250 rpc_xprt_set_connect_timeout(struct rpc_clnt *clnt, 3251 struct rpc_xprt *xprt, 3252 void *data) 3253 { 3254 struct connect_timeout_data *timeo = data; 3255 3256 if (xprt->ops->set_connect_timeout) 3257 xprt->ops->set_connect_timeout(xprt, 3258 timeo->connect_timeout, 3259 timeo->reconnect_timeout); 3260 return 0; 3261 } 3262 3263 void 3264 rpc_set_connect_timeout(struct rpc_clnt *clnt, 3265 unsigned long connect_timeout, 3266 unsigned long reconnect_timeout) 3267 { 3268 struct connect_timeout_data timeout = { 3269 .connect_timeout = connect_timeout, 3270 .reconnect_timeout = reconnect_timeout, 3271 }; 3272 rpc_clnt_iterate_for_each_xprt(clnt, 3273 rpc_xprt_set_connect_timeout, 3274 &timeout); 3275 } 3276 EXPORT_SYMBOL_GPL(rpc_set_connect_timeout); 3277 3278 void rpc_clnt_xprt_set_online(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 3279 { 3280 struct rpc_xprt_switch *xps; 3281 3282 xps = rpc_clnt_xprt_switch_get(clnt); 3283 xprt_set_online_locked(xprt, xps); 3284 xprt_switch_put(xps); 3285 } 3286 3287 void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 3288 { 3289 struct rpc_xprt_switch *xps; 3290 3291 if (rpc_clnt_xprt_switch_has_addr(clnt, 3292 (const struct sockaddr *)&xprt->addr)) { 3293 return rpc_clnt_xprt_set_online(clnt, xprt); 3294 } 3295 3296 xps = rpc_clnt_xprt_switch_get(clnt); 3297 rpc_xprt_switch_add_xprt(xps, xprt); 3298 xprt_switch_put(xps); 3299 } 3300 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt); 3301 3302 void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 3303 { 3304 struct rpc_xprt_switch *xps; 3305 3306 rcu_read_lock(); 3307 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 3308 rpc_xprt_switch_remove_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch), 3309 xprt, 0); 3310 xps->xps_nunique_destaddr_xprts--; 3311 rcu_read_unlock(); 3312 } 3313 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_remove_xprt); 3314 3315 bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, 3316 const struct sockaddr *sap) 3317 { 3318 struct rpc_xprt_switch *xps; 3319 bool ret; 3320 3321 rcu_read_lock(); 3322 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 3323 ret = rpc_xprt_switch_has_addr(xps, sap); 3324 rcu_read_unlock(); 3325 return ret; 3326 } 3327 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr); 3328 3329 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 3330 static void rpc_show_header(struct rpc_clnt *clnt) 3331 { 3332 printk(KERN_INFO "clnt[%pISpc] RPC tasks[%d]\n", 3333 (struct sockaddr *)&clnt->cl_xprt->addr, 3334 atomic_read(&clnt->cl_task_count)); 3335 printk(KERN_INFO "-pid- flgs status -client- --rqstp- " 3336 "-timeout ---ops--\n"); 3337 } 3338 3339 static void rpc_show_task(const struct rpc_clnt *clnt, 3340 const struct rpc_task *task) 3341 { 3342 const char *rpc_waitq = "none"; 3343 3344 if (RPC_IS_QUEUED(task)) 3345 rpc_waitq = rpc_qname(task->tk_waitqueue); 3346 3347 printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n", 3348 task->tk_pid, task->tk_flags, task->tk_status, 3349 clnt, task->tk_rqstp, rpc_task_timeout(task), task->tk_ops, 3350 clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task), 3351 task->tk_action, rpc_waitq); 3352 } 3353 3354 void rpc_show_tasks(struct net *net) 3355 { 3356 struct rpc_clnt *clnt; 3357 struct rpc_task *task; 3358 int header = 0; 3359 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 3360 3361 spin_lock(&sn->rpc_client_lock); 3362 list_for_each_entry(clnt, &sn->all_clients, cl_clients) { 3363 spin_lock(&clnt->cl_lock); 3364 list_for_each_entry(task, &clnt->cl_tasks, tk_task) { 3365 if (!header) { 3366 rpc_show_header(clnt); 3367 header++; 3368 } 3369 rpc_show_task(clnt, task); 3370 } 3371 spin_unlock(&clnt->cl_lock); 3372 } 3373 spin_unlock(&sn->rpc_client_lock); 3374 } 3375 #endif 3376 3377 #if IS_ENABLED(CONFIG_SUNRPC_SWAP) 3378 static int 3379 rpc_clnt_swap_activate_callback(struct rpc_clnt *clnt, 3380 struct rpc_xprt *xprt, 3381 void *dummy) 3382 { 3383 return xprt_enable_swap(xprt); 3384 } 3385 3386 int 3387 rpc_clnt_swap_activate(struct rpc_clnt *clnt) 3388 { 3389 while (clnt != clnt->cl_parent) 3390 clnt = clnt->cl_parent; 3391 if (atomic_inc_return(&clnt->cl_swapper) == 1) 3392 return rpc_clnt_iterate_for_each_xprt(clnt, 3393 rpc_clnt_swap_activate_callback, NULL); 3394 return 0; 3395 } 3396 EXPORT_SYMBOL_GPL(rpc_clnt_swap_activate); 3397 3398 static int 3399 rpc_clnt_swap_deactivate_callback(struct rpc_clnt *clnt, 3400 struct rpc_xprt *xprt, 3401 void *dummy) 3402 { 3403 xprt_disable_swap(xprt); 3404 return 0; 3405 } 3406 3407 void 3408 rpc_clnt_swap_deactivate(struct rpc_clnt *clnt) 3409 { 3410 while (clnt != clnt->cl_parent) 3411 clnt = clnt->cl_parent; 3412 if (atomic_dec_if_positive(&clnt->cl_swapper) == 0) 3413 rpc_clnt_iterate_for_each_xprt(clnt, 3414 rpc_clnt_swap_deactivate_callback, NULL); 3415 } 3416 EXPORT_SYMBOL_GPL(rpc_clnt_swap_deactivate); 3417 #endif /* CONFIG_SUNRPC_SWAP */ 3418