1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/clnt.c 4 * 5 * This file contains the high-level RPC interface. 6 * It is modeled as a finite state machine to support both synchronous 7 * and asynchronous requests. 8 * 9 * - RPC header generation and argument serialization. 10 * - Credential refresh. 11 * - TCP connect handling. 12 * - Retry of operation when it is suspected the operation failed because 13 * of uid squashing on the server, or when the credentials were stale 14 * and need to be refreshed, or when a packet was damaged in transit. 15 * This may be have to be moved to the VFS layer. 16 * 17 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> 18 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> 19 */ 20 21 22 #include <linux/module.h> 23 #include <linux/types.h> 24 #include <linux/kallsyms.h> 25 #include <linux/mm.h> 26 #include <linux/namei.h> 27 #include <linux/mount.h> 28 #include <linux/slab.h> 29 #include <linux/rcupdate.h> 30 #include <linux/utsname.h> 31 #include <linux/workqueue.h> 32 #include <linux/in.h> 33 #include <linux/in6.h> 34 #include <linux/un.h> 35 36 #include <linux/sunrpc/clnt.h> 37 #include <linux/sunrpc/addr.h> 38 #include <linux/sunrpc/rpc_pipe_fs.h> 39 #include <linux/sunrpc/metrics.h> 40 #include <linux/sunrpc/bc_xprt.h> 41 #include <trace/events/sunrpc.h> 42 43 #include "sunrpc.h" 44 #include "sysfs.h" 45 #include "netns.h" 46 47 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 48 # define RPCDBG_FACILITY RPCDBG_CALL 49 #endif 50 51 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); 52 53 static void call_start(struct rpc_task *task); 54 static void call_reserve(struct rpc_task *task); 55 static void call_reserveresult(struct rpc_task *task); 56 static void call_allocate(struct rpc_task *task); 57 static void call_encode(struct rpc_task *task); 58 static void call_decode(struct rpc_task *task); 59 static void call_bind(struct rpc_task *task); 60 static void call_bind_status(struct rpc_task *task); 61 static void call_transmit(struct rpc_task *task); 62 static void call_status(struct rpc_task *task); 63 static void call_transmit_status(struct rpc_task *task); 64 static void call_refresh(struct rpc_task *task); 65 static void call_refreshresult(struct rpc_task *task); 66 static void call_connect(struct rpc_task *task); 67 static void call_connect_status(struct rpc_task *task); 68 69 static int rpc_encode_header(struct rpc_task *task, 70 struct xdr_stream *xdr); 71 static int rpc_decode_header(struct rpc_task *task, 72 struct xdr_stream *xdr); 73 static int rpc_ping(struct rpc_clnt *clnt); 74 static int rpc_ping_noreply(struct rpc_clnt *clnt); 75 static void rpc_check_timeout(struct rpc_task *task); 76 77 static void rpc_register_client(struct rpc_clnt *clnt) 78 { 79 struct net *net = rpc_net_ns(clnt); 80 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 81 82 spin_lock(&sn->rpc_client_lock); 83 list_add(&clnt->cl_clients, &sn->all_clients); 84 spin_unlock(&sn->rpc_client_lock); 85 } 86 87 static void rpc_unregister_client(struct rpc_clnt *clnt) 88 { 89 struct net *net = rpc_net_ns(clnt); 90 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 91 92 spin_lock(&sn->rpc_client_lock); 93 list_del(&clnt->cl_clients); 94 spin_unlock(&sn->rpc_client_lock); 95 } 96 97 static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) 98 { 99 rpc_remove_client_dir(clnt); 100 } 101 102 static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) 103 { 104 struct net *net = rpc_net_ns(clnt); 105 struct super_block *pipefs_sb; 106 107 pipefs_sb = rpc_get_sb_net(net); 108 if (pipefs_sb) { 109 if (pipefs_sb == clnt->pipefs_sb) 110 __rpc_clnt_remove_pipedir(clnt); 111 rpc_put_sb_net(net); 112 } 113 } 114 115 static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb, 116 struct rpc_clnt *clnt) 117 { 118 static uint32_t clntid; 119 const char *dir_name = clnt->cl_program->pipe_dir_name; 120 char name[15]; 121 struct dentry *dir, *dentry; 122 123 dir = rpc_d_lookup_sb(sb, dir_name); 124 if (dir == NULL) { 125 pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name); 126 return dir; 127 } 128 for (;;) { 129 snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++); 130 name[sizeof(name) - 1] = '\0'; 131 dentry = rpc_create_client_dir(dir, name, clnt); 132 if (!IS_ERR(dentry)) 133 break; 134 if (dentry == ERR_PTR(-EEXIST)) 135 continue; 136 printk(KERN_INFO "RPC: Couldn't create pipefs entry" 137 " %s/%s, error %ld\n", 138 dir_name, name, PTR_ERR(dentry)); 139 break; 140 } 141 dput(dir); 142 return dentry; 143 } 144 145 static int 146 rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt) 147 { 148 struct dentry *dentry; 149 150 clnt->pipefs_sb = pipefs_sb; 151 152 if (clnt->cl_program->pipe_dir_name != NULL) { 153 dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt); 154 if (IS_ERR(dentry)) 155 return PTR_ERR(dentry); 156 } 157 return 0; 158 } 159 160 static int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event) 161 { 162 if (clnt->cl_program->pipe_dir_name == NULL) 163 return 1; 164 165 switch (event) { 166 case RPC_PIPEFS_MOUNT: 167 if (clnt->cl_pipedir_objects.pdh_dentry != NULL) 168 return 1; 169 if (refcount_read(&clnt->cl_count) == 0) 170 return 1; 171 break; 172 case RPC_PIPEFS_UMOUNT: 173 if (clnt->cl_pipedir_objects.pdh_dentry == NULL) 174 return 1; 175 break; 176 } 177 return 0; 178 } 179 180 static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event, 181 struct super_block *sb) 182 { 183 struct dentry *dentry; 184 185 switch (event) { 186 case RPC_PIPEFS_MOUNT: 187 dentry = rpc_setup_pipedir_sb(sb, clnt); 188 if (!dentry) 189 return -ENOENT; 190 if (IS_ERR(dentry)) 191 return PTR_ERR(dentry); 192 break; 193 case RPC_PIPEFS_UMOUNT: 194 __rpc_clnt_remove_pipedir(clnt); 195 break; 196 default: 197 printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event); 198 return -ENOTSUPP; 199 } 200 return 0; 201 } 202 203 static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, 204 struct super_block *sb) 205 { 206 int error = 0; 207 208 for (;; clnt = clnt->cl_parent) { 209 if (!rpc_clnt_skip_event(clnt, event)) 210 error = __rpc_clnt_handle_event(clnt, event, sb); 211 if (error || clnt == clnt->cl_parent) 212 break; 213 } 214 return error; 215 } 216 217 static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event) 218 { 219 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 220 struct rpc_clnt *clnt; 221 222 spin_lock(&sn->rpc_client_lock); 223 list_for_each_entry(clnt, &sn->all_clients, cl_clients) { 224 if (rpc_clnt_skip_event(clnt, event)) 225 continue; 226 spin_unlock(&sn->rpc_client_lock); 227 return clnt; 228 } 229 spin_unlock(&sn->rpc_client_lock); 230 return NULL; 231 } 232 233 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, 234 void *ptr) 235 { 236 struct super_block *sb = ptr; 237 struct rpc_clnt *clnt; 238 int error = 0; 239 240 while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) { 241 error = __rpc_pipefs_event(clnt, event, sb); 242 if (error) 243 break; 244 } 245 return error; 246 } 247 248 static struct notifier_block rpc_clients_block = { 249 .notifier_call = rpc_pipefs_event, 250 .priority = SUNRPC_PIPEFS_RPC_PRIO, 251 }; 252 253 int rpc_clients_notifier_register(void) 254 { 255 return rpc_pipefs_notifier_register(&rpc_clients_block); 256 } 257 258 void rpc_clients_notifier_unregister(void) 259 { 260 return rpc_pipefs_notifier_unregister(&rpc_clients_block); 261 } 262 263 static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt, 264 struct rpc_xprt *xprt, 265 const struct rpc_timeout *timeout) 266 { 267 struct rpc_xprt *old; 268 269 spin_lock(&clnt->cl_lock); 270 old = rcu_dereference_protected(clnt->cl_xprt, 271 lockdep_is_held(&clnt->cl_lock)); 272 273 clnt->cl_timeout = timeout; 274 rcu_assign_pointer(clnt->cl_xprt, xprt); 275 spin_unlock(&clnt->cl_lock); 276 277 return old; 278 } 279 280 static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename) 281 { 282 ssize_t copied; 283 284 copied = strscpy(clnt->cl_nodename, 285 nodename, sizeof(clnt->cl_nodename)); 286 287 clnt->cl_nodelen = copied < 0 288 ? sizeof(clnt->cl_nodename) - 1 289 : copied; 290 } 291 292 static int rpc_client_register(struct rpc_clnt *clnt, 293 rpc_authflavor_t pseudoflavor, 294 const char *client_name) 295 { 296 struct rpc_auth_create_args auth_args = { 297 .pseudoflavor = pseudoflavor, 298 .target_name = client_name, 299 }; 300 struct rpc_auth *auth; 301 struct net *net = rpc_net_ns(clnt); 302 struct super_block *pipefs_sb; 303 int err; 304 305 rpc_clnt_debugfs_register(clnt); 306 307 pipefs_sb = rpc_get_sb_net(net); 308 if (pipefs_sb) { 309 err = rpc_setup_pipedir(pipefs_sb, clnt); 310 if (err) 311 goto out; 312 } 313 314 rpc_register_client(clnt); 315 if (pipefs_sb) 316 rpc_put_sb_net(net); 317 318 auth = rpcauth_create(&auth_args, clnt); 319 if (IS_ERR(auth)) { 320 dprintk("RPC: Couldn't create auth handle (flavor %u)\n", 321 pseudoflavor); 322 err = PTR_ERR(auth); 323 goto err_auth; 324 } 325 return 0; 326 err_auth: 327 pipefs_sb = rpc_get_sb_net(net); 328 rpc_unregister_client(clnt); 329 __rpc_clnt_remove_pipedir(clnt); 330 out: 331 if (pipefs_sb) 332 rpc_put_sb_net(net); 333 rpc_sysfs_client_destroy(clnt); 334 rpc_clnt_debugfs_unregister(clnt); 335 return err; 336 } 337 338 static DEFINE_IDA(rpc_clids); 339 340 void rpc_cleanup_clids(void) 341 { 342 ida_destroy(&rpc_clids); 343 } 344 345 static int rpc_alloc_clid(struct rpc_clnt *clnt) 346 { 347 int clid; 348 349 clid = ida_alloc(&rpc_clids, GFP_KERNEL); 350 if (clid < 0) 351 return clid; 352 clnt->cl_clid = clid; 353 return 0; 354 } 355 356 static void rpc_free_clid(struct rpc_clnt *clnt) 357 { 358 ida_free(&rpc_clids, clnt->cl_clid); 359 } 360 361 static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, 362 struct rpc_xprt_switch *xps, 363 struct rpc_xprt *xprt, 364 struct rpc_clnt *parent) 365 { 366 const struct rpc_program *program = args->program; 367 const struct rpc_version *version; 368 struct rpc_clnt *clnt = NULL; 369 const struct rpc_timeout *timeout; 370 const char *nodename = args->nodename; 371 int err; 372 373 err = rpciod_up(); 374 if (err) 375 goto out_no_rpciod; 376 377 err = -EINVAL; 378 if (args->version >= program->nrvers) 379 goto out_err; 380 version = program->version[args->version]; 381 if (version == NULL) 382 goto out_err; 383 384 err = -ENOMEM; 385 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); 386 if (!clnt) 387 goto out_err; 388 clnt->cl_parent = parent ? : clnt; 389 clnt->cl_xprtsec = args->xprtsec; 390 391 err = rpc_alloc_clid(clnt); 392 if (err) 393 goto out_no_clid; 394 395 clnt->cl_cred = get_cred(args->cred); 396 clnt->cl_procinfo = version->procs; 397 clnt->cl_maxproc = version->nrprocs; 398 clnt->cl_prog = args->prognumber ? : program->number; 399 clnt->cl_vers = version->number; 400 clnt->cl_stats = args->stats ? : program->stats; 401 clnt->cl_metrics = rpc_alloc_iostats(clnt); 402 rpc_init_pipe_dir_head(&clnt->cl_pipedir_objects); 403 err = -ENOMEM; 404 if (clnt->cl_metrics == NULL) 405 goto out_no_stats; 406 clnt->cl_program = program; 407 INIT_LIST_HEAD(&clnt->cl_tasks); 408 spin_lock_init(&clnt->cl_lock); 409 410 timeout = xprt->timeout; 411 if (args->timeout != NULL) { 412 memcpy(&clnt->cl_timeout_default, args->timeout, 413 sizeof(clnt->cl_timeout_default)); 414 timeout = &clnt->cl_timeout_default; 415 } 416 417 rpc_clnt_set_transport(clnt, xprt, timeout); 418 xprt->main = true; 419 xprt_iter_init(&clnt->cl_xpi, xps); 420 xprt_switch_put(xps); 421 422 clnt->cl_rtt = &clnt->cl_rtt_default; 423 rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval); 424 425 refcount_set(&clnt->cl_count, 1); 426 427 if (nodename == NULL) 428 nodename = utsname()->nodename; 429 /* save the nodename */ 430 rpc_clnt_set_nodename(clnt, nodename); 431 432 rpc_sysfs_client_setup(clnt, xps, rpc_net_ns(clnt)); 433 err = rpc_client_register(clnt, args->authflavor, args->client_name); 434 if (err) 435 goto out_no_path; 436 if (parent) 437 refcount_inc(&parent->cl_count); 438 439 trace_rpc_clnt_new(clnt, xprt, args); 440 return clnt; 441 442 out_no_path: 443 rpc_free_iostats(clnt->cl_metrics); 444 out_no_stats: 445 put_cred(clnt->cl_cred); 446 rpc_free_clid(clnt); 447 out_no_clid: 448 kfree(clnt); 449 out_err: 450 rpciod_down(); 451 out_no_rpciod: 452 xprt_switch_put(xps); 453 xprt_put(xprt); 454 trace_rpc_clnt_new_err(program->name, args->servername, err); 455 return ERR_PTR(err); 456 } 457 458 static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, 459 struct rpc_xprt *xprt) 460 { 461 struct rpc_clnt *clnt = NULL; 462 struct rpc_xprt_switch *xps; 463 464 if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) { 465 WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC)); 466 xps = args->bc_xprt->xpt_bc_xps; 467 xprt_switch_get(xps); 468 } else { 469 xps = xprt_switch_alloc(xprt, GFP_KERNEL); 470 if (xps == NULL) { 471 xprt_put(xprt); 472 return ERR_PTR(-ENOMEM); 473 } 474 if (xprt->bc_xprt) { 475 xprt_switch_get(xps); 476 xprt->bc_xprt->xpt_bc_xps = xps; 477 } 478 } 479 clnt = rpc_new_client(args, xps, xprt, NULL); 480 if (IS_ERR(clnt)) 481 return clnt; 482 483 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { 484 int err = rpc_ping(clnt); 485 if (err != 0) { 486 rpc_shutdown_client(clnt); 487 return ERR_PTR(err); 488 } 489 } else if (args->flags & RPC_CLNT_CREATE_CONNECTED) { 490 int err = rpc_ping_noreply(clnt); 491 if (err != 0) { 492 rpc_shutdown_client(clnt); 493 return ERR_PTR(err); 494 } 495 } 496 497 clnt->cl_softrtry = 1; 498 if (args->flags & (RPC_CLNT_CREATE_HARDRTRY|RPC_CLNT_CREATE_SOFTERR)) { 499 clnt->cl_softrtry = 0; 500 if (args->flags & RPC_CLNT_CREATE_SOFTERR) 501 clnt->cl_softerr = 1; 502 } 503 504 if (args->flags & RPC_CLNT_CREATE_AUTOBIND) 505 clnt->cl_autobind = 1; 506 if (args->flags & RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT) 507 clnt->cl_noretranstimeo = 1; 508 if (args->flags & RPC_CLNT_CREATE_DISCRTRY) 509 clnt->cl_discrtry = 1; 510 if (!(args->flags & RPC_CLNT_CREATE_QUIET)) 511 clnt->cl_chatty = 1; 512 if (args->flags & RPC_CLNT_CREATE_NETUNREACH_FATAL) 513 clnt->cl_netunreach_fatal = 1; 514 515 return clnt; 516 } 517 518 /** 519 * rpc_create - create an RPC client and transport with one call 520 * @args: rpc_clnt create argument structure 521 * 522 * Creates and initializes an RPC transport and an RPC client. 523 * 524 * It can ping the server in order to determine if it is up, and to see if 525 * it supports this program and version. RPC_CLNT_CREATE_NOPING disables 526 * this behavior so asynchronous tasks can also use rpc_create. 527 */ 528 struct rpc_clnt *rpc_create(struct rpc_create_args *args) 529 { 530 struct rpc_xprt *xprt; 531 struct xprt_create xprtargs = { 532 .net = args->net, 533 .ident = args->protocol, 534 .srcaddr = args->saddress, 535 .dstaddr = args->address, 536 .addrlen = args->addrsize, 537 .servername = args->servername, 538 .bc_xprt = args->bc_xprt, 539 .xprtsec = args->xprtsec, 540 .connect_timeout = args->connect_timeout, 541 .reconnect_timeout = args->reconnect_timeout, 542 }; 543 char servername[RPC_MAXNETNAMELEN]; 544 struct rpc_clnt *clnt; 545 int i; 546 547 if (args->bc_xprt) { 548 WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC)); 549 xprt = args->bc_xprt->xpt_bc_xprt; 550 if (xprt) { 551 xprt_get(xprt); 552 return rpc_create_xprt(args, xprt); 553 } 554 } 555 556 if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS) 557 xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS; 558 if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT) 559 xprtargs.flags |= XPRT_CREATE_NO_IDLE_TIMEOUT; 560 /* 561 * If the caller chooses not to specify a hostname, whip 562 * up a string representation of the passed-in address. 563 */ 564 if (xprtargs.servername == NULL) { 565 struct sockaddr_un *sun = 566 (struct sockaddr_un *)args->address; 567 struct sockaddr_in *sin = 568 (struct sockaddr_in *)args->address; 569 struct sockaddr_in6 *sin6 = 570 (struct sockaddr_in6 *)args->address; 571 572 servername[0] = '\0'; 573 switch (args->address->sa_family) { 574 case AF_LOCAL: 575 if (sun->sun_path[0]) 576 snprintf(servername, sizeof(servername), "%s", 577 sun->sun_path); 578 else 579 snprintf(servername, sizeof(servername), "@%s", 580 sun->sun_path+1); 581 break; 582 case AF_INET: 583 snprintf(servername, sizeof(servername), "%pI4", 584 &sin->sin_addr.s_addr); 585 break; 586 case AF_INET6: 587 snprintf(servername, sizeof(servername), "%pI6", 588 &sin6->sin6_addr); 589 break; 590 default: 591 /* caller wants default server name, but 592 * address family isn't recognized. */ 593 return ERR_PTR(-EINVAL); 594 } 595 xprtargs.servername = servername; 596 } 597 598 xprt = xprt_create_transport(&xprtargs); 599 if (IS_ERR(xprt)) 600 return (struct rpc_clnt *)xprt; 601 602 /* 603 * By default, kernel RPC client connects from a reserved port. 604 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, 605 * but it is always enabled for rpciod, which handles the connect 606 * operation. 607 */ 608 xprt->resvport = 1; 609 if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT) 610 xprt->resvport = 0; 611 xprt->reuseport = 0; 612 if (args->flags & RPC_CLNT_CREATE_REUSEPORT) 613 xprt->reuseport = 1; 614 615 clnt = rpc_create_xprt(args, xprt); 616 if (IS_ERR(clnt) || args->nconnect <= 1) 617 return clnt; 618 619 for (i = 0; i < args->nconnect - 1; i++) { 620 if (rpc_clnt_add_xprt(clnt, &xprtargs, NULL, NULL) < 0) 621 break; 622 } 623 return clnt; 624 } 625 EXPORT_SYMBOL_GPL(rpc_create); 626 627 /* 628 * This function clones the RPC client structure. It allows us to share the 629 * same transport while varying parameters such as the authentication 630 * flavour. 631 */ 632 static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, 633 struct rpc_clnt *clnt) 634 { 635 struct rpc_xprt_switch *xps; 636 struct rpc_xprt *xprt; 637 struct rpc_clnt *new; 638 int err; 639 640 err = -ENOMEM; 641 rcu_read_lock(); 642 xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); 643 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 644 rcu_read_unlock(); 645 if (xprt == NULL || xps == NULL) { 646 xprt_put(xprt); 647 xprt_switch_put(xps); 648 goto out_err; 649 } 650 args->servername = xprt->servername; 651 args->nodename = clnt->cl_nodename; 652 653 new = rpc_new_client(args, xps, xprt, clnt); 654 if (IS_ERR(new)) 655 return new; 656 657 /* Turn off autobind on clones */ 658 new->cl_autobind = 0; 659 new->cl_softrtry = clnt->cl_softrtry; 660 new->cl_softerr = clnt->cl_softerr; 661 new->cl_noretranstimeo = clnt->cl_noretranstimeo; 662 new->cl_discrtry = clnt->cl_discrtry; 663 new->cl_chatty = clnt->cl_chatty; 664 new->cl_netunreach_fatal = clnt->cl_netunreach_fatal; 665 new->cl_principal = clnt->cl_principal; 666 new->cl_max_connect = clnt->cl_max_connect; 667 return new; 668 669 out_err: 670 trace_rpc_clnt_clone_err(clnt, err); 671 return ERR_PTR(err); 672 } 673 674 /** 675 * rpc_clone_client - Clone an RPC client structure 676 * 677 * @clnt: RPC client whose parameters are copied 678 * 679 * Returns a fresh RPC client or an ERR_PTR. 680 */ 681 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt) 682 { 683 struct rpc_create_args args = { 684 .program = clnt->cl_program, 685 .prognumber = clnt->cl_prog, 686 .version = clnt->cl_vers, 687 .authflavor = clnt->cl_auth->au_flavor, 688 .cred = clnt->cl_cred, 689 .stats = clnt->cl_stats, 690 }; 691 return __rpc_clone_client(&args, clnt); 692 } 693 EXPORT_SYMBOL_GPL(rpc_clone_client); 694 695 /** 696 * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth 697 * 698 * @clnt: RPC client whose parameters are copied 699 * @flavor: security flavor for new client 700 * 701 * Returns a fresh RPC client or an ERR_PTR. 702 */ 703 struct rpc_clnt * 704 rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor) 705 { 706 struct rpc_create_args args = { 707 .program = clnt->cl_program, 708 .prognumber = clnt->cl_prog, 709 .version = clnt->cl_vers, 710 .authflavor = flavor, 711 .cred = clnt->cl_cred, 712 .stats = clnt->cl_stats, 713 }; 714 return __rpc_clone_client(&args, clnt); 715 } 716 EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth); 717 718 /** 719 * rpc_switch_client_transport: switch the RPC transport on the fly 720 * @clnt: pointer to a struct rpc_clnt 721 * @args: pointer to the new transport arguments 722 * @timeout: pointer to the new timeout parameters 723 * 724 * This function allows the caller to switch the RPC transport for the 725 * rpc_clnt structure 'clnt' to allow it to connect to a mirrored NFS 726 * server, for instance. It assumes that the caller has ensured that 727 * there are no active RPC tasks by using some form of locking. 728 * 729 * Returns zero if "clnt" is now using the new xprt. Otherwise a 730 * negative errno is returned, and "clnt" continues to use the old 731 * xprt. 732 */ 733 int rpc_switch_client_transport(struct rpc_clnt *clnt, 734 struct xprt_create *args, 735 const struct rpc_timeout *timeout) 736 { 737 const struct rpc_timeout *old_timeo; 738 rpc_authflavor_t pseudoflavor; 739 struct rpc_xprt_switch *xps, *oldxps; 740 struct rpc_xprt *xprt, *old; 741 struct rpc_clnt *parent; 742 int err; 743 744 args->xprtsec = clnt->cl_xprtsec; 745 xprt = xprt_create_transport(args); 746 if (IS_ERR(xprt)) 747 return PTR_ERR(xprt); 748 749 xps = xprt_switch_alloc(xprt, GFP_KERNEL); 750 if (xps == NULL) { 751 xprt_put(xprt); 752 return -ENOMEM; 753 } 754 755 pseudoflavor = clnt->cl_auth->au_flavor; 756 757 old_timeo = clnt->cl_timeout; 758 old = rpc_clnt_set_transport(clnt, xprt, timeout); 759 oldxps = xprt_iter_xchg_switch(&clnt->cl_xpi, xps); 760 761 rpc_unregister_client(clnt); 762 __rpc_clnt_remove_pipedir(clnt); 763 rpc_sysfs_client_destroy(clnt); 764 rpc_clnt_debugfs_unregister(clnt); 765 766 /* 767 * A new transport was created. "clnt" therefore 768 * becomes the root of a new cl_parent tree. clnt's 769 * children, if it has any, still point to the old xprt. 770 */ 771 parent = clnt->cl_parent; 772 clnt->cl_parent = clnt; 773 774 /* 775 * The old rpc_auth cache cannot be re-used. GSS 776 * contexts in particular are between a single 777 * client and server. 778 */ 779 err = rpc_client_register(clnt, pseudoflavor, NULL); 780 if (err) 781 goto out_revert; 782 783 synchronize_rcu(); 784 if (parent != clnt) 785 rpc_release_client(parent); 786 xprt_switch_put(oldxps); 787 xprt_put(old); 788 trace_rpc_clnt_replace_xprt(clnt); 789 return 0; 790 791 out_revert: 792 xps = xprt_iter_xchg_switch(&clnt->cl_xpi, oldxps); 793 rpc_clnt_set_transport(clnt, old, old_timeo); 794 clnt->cl_parent = parent; 795 rpc_client_register(clnt, pseudoflavor, NULL); 796 xprt_switch_put(xps); 797 xprt_put(xprt); 798 trace_rpc_clnt_replace_xprt_err(clnt); 799 return err; 800 } 801 EXPORT_SYMBOL_GPL(rpc_switch_client_transport); 802 803 static struct rpc_xprt_switch *rpc_clnt_xprt_switch_get(struct rpc_clnt *clnt) 804 { 805 struct rpc_xprt_switch *xps; 806 807 rcu_read_lock(); 808 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 809 rcu_read_unlock(); 810 811 return xps; 812 } 813 814 static 815 int _rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi, 816 void func(struct rpc_xprt_iter *xpi, struct rpc_xprt_switch *xps)) 817 { 818 struct rpc_xprt_switch *xps; 819 820 xps = rpc_clnt_xprt_switch_get(clnt); 821 if (xps == NULL) 822 return -EAGAIN; 823 func(xpi, xps); 824 xprt_switch_put(xps); 825 return 0; 826 } 827 828 static 829 int rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi) 830 { 831 return _rpc_clnt_xprt_iter_init(clnt, xpi, xprt_iter_init_listall); 832 } 833 834 static 835 int rpc_clnt_xprt_iter_offline_init(struct rpc_clnt *clnt, 836 struct rpc_xprt_iter *xpi) 837 { 838 return _rpc_clnt_xprt_iter_init(clnt, xpi, xprt_iter_init_listoffline); 839 } 840 841 /** 842 * rpc_clnt_iterate_for_each_xprt - Apply a function to all transports 843 * @clnt: pointer to client 844 * @fn: function to apply 845 * @data: void pointer to function data 846 * 847 * Iterates through the list of RPC transports currently attached to the 848 * client and applies the function fn(clnt, xprt, data). 849 * 850 * On error, the iteration stops, and the function returns the error value. 851 */ 852 int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt *clnt, 853 int (*fn)(struct rpc_clnt *, struct rpc_xprt *, void *), 854 void *data) 855 { 856 struct rpc_xprt_iter xpi; 857 int ret; 858 859 ret = rpc_clnt_xprt_iter_init(clnt, &xpi); 860 if (ret) 861 return ret; 862 for (;;) { 863 struct rpc_xprt *xprt = xprt_iter_get_next(&xpi); 864 865 if (!xprt) 866 break; 867 ret = fn(clnt, xprt, data); 868 xprt_put(xprt); 869 if (ret < 0) 870 break; 871 } 872 xprt_iter_destroy(&xpi); 873 return ret; 874 } 875 EXPORT_SYMBOL_GPL(rpc_clnt_iterate_for_each_xprt); 876 877 /* 878 * Kill all tasks for the given client. 879 * XXX: kill their descendants as well? 880 */ 881 void rpc_killall_tasks(struct rpc_clnt *clnt) 882 { 883 struct rpc_task *rovr; 884 885 886 if (list_empty(&clnt->cl_tasks)) 887 return; 888 889 /* 890 * Spin lock all_tasks to prevent changes... 891 */ 892 trace_rpc_clnt_killall(clnt); 893 spin_lock(&clnt->cl_lock); 894 list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) 895 rpc_signal_task(rovr); 896 spin_unlock(&clnt->cl_lock); 897 } 898 EXPORT_SYMBOL_GPL(rpc_killall_tasks); 899 900 /** 901 * rpc_cancel_tasks - try to cancel a set of RPC tasks 902 * @clnt: Pointer to RPC client 903 * @error: RPC task error value to set 904 * @fnmatch: Pointer to selector function 905 * @data: User data 906 * 907 * Uses @fnmatch to define a set of RPC tasks that are to be cancelled. 908 * The argument @error must be a negative error value. 909 */ 910 unsigned long rpc_cancel_tasks(struct rpc_clnt *clnt, int error, 911 bool (*fnmatch)(const struct rpc_task *, 912 const void *), 913 const void *data) 914 { 915 struct rpc_task *task; 916 unsigned long count = 0; 917 918 if (list_empty(&clnt->cl_tasks)) 919 return 0; 920 /* 921 * Spin lock all_tasks to prevent changes... 922 */ 923 spin_lock(&clnt->cl_lock); 924 list_for_each_entry(task, &clnt->cl_tasks, tk_task) { 925 if (!RPC_IS_ACTIVATED(task)) 926 continue; 927 if (!fnmatch(task, data)) 928 continue; 929 rpc_task_try_cancel(task, error); 930 count++; 931 } 932 spin_unlock(&clnt->cl_lock); 933 return count; 934 } 935 EXPORT_SYMBOL_GPL(rpc_cancel_tasks); 936 937 static int rpc_clnt_disconnect_xprt(struct rpc_clnt *clnt, 938 struct rpc_xprt *xprt, void *dummy) 939 { 940 if (xprt_connected(xprt)) 941 xprt_force_disconnect(xprt); 942 return 0; 943 } 944 945 void rpc_clnt_disconnect(struct rpc_clnt *clnt) 946 { 947 rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_disconnect_xprt, NULL); 948 } 949 EXPORT_SYMBOL_GPL(rpc_clnt_disconnect); 950 951 /* 952 * Properly shut down an RPC client, terminating all outstanding 953 * requests. 954 */ 955 void rpc_shutdown_client(struct rpc_clnt *clnt) 956 { 957 might_sleep(); 958 959 trace_rpc_clnt_shutdown(clnt); 960 961 clnt->cl_shutdown = 1; 962 while (!list_empty(&clnt->cl_tasks)) { 963 rpc_killall_tasks(clnt); 964 wait_event_timeout(destroy_wait, 965 list_empty(&clnt->cl_tasks), 1*HZ); 966 } 967 968 /* wait for tasks still in workqueue or waitqueue */ 969 wait_event_timeout(destroy_wait, 970 atomic_read(&clnt->cl_task_count) == 0, 1 * HZ); 971 972 rpc_release_client(clnt); 973 } 974 EXPORT_SYMBOL_GPL(rpc_shutdown_client); 975 976 /* 977 * Free an RPC client 978 */ 979 static void rpc_free_client_work(struct work_struct *work) 980 { 981 struct rpc_clnt *clnt = container_of(work, struct rpc_clnt, cl_work); 982 983 trace_rpc_clnt_free(clnt); 984 985 /* These might block on processes that might allocate memory, 986 * so they cannot be called in rpciod, so they are handled separately 987 * here. 988 */ 989 rpc_sysfs_client_destroy(clnt); 990 rpc_clnt_debugfs_unregister(clnt); 991 rpc_free_clid(clnt); 992 rpc_clnt_remove_pipedir(clnt); 993 xprt_put(rcu_dereference_raw(clnt->cl_xprt)); 994 995 kfree(clnt); 996 rpciod_down(); 997 } 998 static struct rpc_clnt * 999 rpc_free_client(struct rpc_clnt *clnt) 1000 { 1001 struct rpc_clnt *parent = NULL; 1002 1003 trace_rpc_clnt_release(clnt); 1004 if (clnt->cl_parent != clnt) 1005 parent = clnt->cl_parent; 1006 rpc_unregister_client(clnt); 1007 rpc_free_iostats(clnt->cl_metrics); 1008 clnt->cl_metrics = NULL; 1009 xprt_iter_destroy(&clnt->cl_xpi); 1010 put_cred(clnt->cl_cred); 1011 1012 INIT_WORK(&clnt->cl_work, rpc_free_client_work); 1013 schedule_work(&clnt->cl_work); 1014 return parent; 1015 } 1016 1017 /* 1018 * Free an RPC client 1019 */ 1020 static struct rpc_clnt * 1021 rpc_free_auth(struct rpc_clnt *clnt) 1022 { 1023 /* 1024 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to 1025 * release remaining GSS contexts. This mechanism ensures 1026 * that it can do so safely. 1027 */ 1028 if (clnt->cl_auth != NULL) { 1029 rpcauth_release(clnt->cl_auth); 1030 clnt->cl_auth = NULL; 1031 } 1032 if (refcount_dec_and_test(&clnt->cl_count)) 1033 return rpc_free_client(clnt); 1034 return NULL; 1035 } 1036 1037 /* 1038 * Release reference to the RPC client 1039 */ 1040 void 1041 rpc_release_client(struct rpc_clnt *clnt) 1042 { 1043 do { 1044 if (list_empty(&clnt->cl_tasks)) 1045 wake_up(&destroy_wait); 1046 if (refcount_dec_not_one(&clnt->cl_count)) 1047 break; 1048 clnt = rpc_free_auth(clnt); 1049 } while (clnt != NULL); 1050 } 1051 EXPORT_SYMBOL_GPL(rpc_release_client); 1052 1053 /** 1054 * rpc_bind_new_program - bind a new RPC program to an existing client 1055 * @old: old rpc_client 1056 * @program: rpc program to set 1057 * @vers: rpc program version 1058 * 1059 * Clones the rpc client and sets up a new RPC program. This is mainly 1060 * of use for enabling different RPC programs to share the same transport. 1061 * The Sun NFSv2/v3 ACL protocol can do this. 1062 */ 1063 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, 1064 const struct rpc_program *program, 1065 u32 vers) 1066 { 1067 struct rpc_create_args args = { 1068 .program = program, 1069 .prognumber = program->number, 1070 .version = vers, 1071 .authflavor = old->cl_auth->au_flavor, 1072 .cred = old->cl_cred, 1073 .stats = old->cl_stats, 1074 .timeout = old->cl_timeout, 1075 }; 1076 struct rpc_clnt *clnt; 1077 int err; 1078 1079 clnt = __rpc_clone_client(&args, old); 1080 if (IS_ERR(clnt)) 1081 goto out; 1082 err = rpc_ping(clnt); 1083 if (err != 0) { 1084 rpc_shutdown_client(clnt); 1085 clnt = ERR_PTR(err); 1086 } 1087 out: 1088 return clnt; 1089 } 1090 EXPORT_SYMBOL_GPL(rpc_bind_new_program); 1091 1092 struct rpc_xprt * 1093 rpc_task_get_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 1094 { 1095 struct rpc_xprt_switch *xps; 1096 1097 if (!xprt) 1098 return NULL; 1099 rcu_read_lock(); 1100 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 1101 atomic_long_inc(&xps->xps_queuelen); 1102 rcu_read_unlock(); 1103 atomic_long_inc(&xprt->queuelen); 1104 1105 return xprt; 1106 } 1107 1108 static void 1109 rpc_task_release_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 1110 { 1111 struct rpc_xprt_switch *xps; 1112 1113 atomic_long_dec(&xprt->queuelen); 1114 rcu_read_lock(); 1115 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 1116 atomic_long_dec(&xps->xps_queuelen); 1117 rcu_read_unlock(); 1118 1119 xprt_put(xprt); 1120 } 1121 1122 void rpc_task_release_transport(struct rpc_task *task) 1123 { 1124 struct rpc_xprt *xprt = task->tk_xprt; 1125 1126 if (xprt) { 1127 task->tk_xprt = NULL; 1128 if (task->tk_client) 1129 rpc_task_release_xprt(task->tk_client, xprt); 1130 else 1131 xprt_put(xprt); 1132 } 1133 } 1134 EXPORT_SYMBOL_GPL(rpc_task_release_transport); 1135 1136 void rpc_task_release_client(struct rpc_task *task) 1137 { 1138 struct rpc_clnt *clnt = task->tk_client; 1139 1140 rpc_task_release_transport(task); 1141 if (clnt != NULL) { 1142 /* Remove from client task list */ 1143 spin_lock(&clnt->cl_lock); 1144 list_del(&task->tk_task); 1145 spin_unlock(&clnt->cl_lock); 1146 task->tk_client = NULL; 1147 atomic_dec(&clnt->cl_task_count); 1148 1149 rpc_release_client(clnt); 1150 } 1151 } 1152 1153 static struct rpc_xprt * 1154 rpc_task_get_first_xprt(struct rpc_clnt *clnt) 1155 { 1156 struct rpc_xprt *xprt; 1157 1158 rcu_read_lock(); 1159 xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); 1160 rcu_read_unlock(); 1161 return rpc_task_get_xprt(clnt, xprt); 1162 } 1163 1164 static struct rpc_xprt * 1165 rpc_task_get_next_xprt(struct rpc_clnt *clnt) 1166 { 1167 return rpc_task_get_xprt(clnt, xprt_iter_get_next(&clnt->cl_xpi)); 1168 } 1169 1170 static 1171 void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt) 1172 { 1173 if (task->tk_xprt) { 1174 if (!(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) && 1175 (task->tk_flags & RPC_TASK_MOVEABLE))) 1176 return; 1177 xprt_release(task); 1178 xprt_put(task->tk_xprt); 1179 } 1180 if (task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) 1181 task->tk_xprt = rpc_task_get_first_xprt(clnt); 1182 else 1183 task->tk_xprt = rpc_task_get_next_xprt(clnt); 1184 } 1185 1186 static 1187 void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) 1188 { 1189 rpc_task_set_transport(task, clnt); 1190 task->tk_client = clnt; 1191 refcount_inc(&clnt->cl_count); 1192 if (clnt->cl_softrtry) 1193 task->tk_flags |= RPC_TASK_SOFT; 1194 if (clnt->cl_softerr) 1195 task->tk_flags |= RPC_TASK_TIMEOUT; 1196 if (clnt->cl_noretranstimeo) 1197 task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT; 1198 if (clnt->cl_netunreach_fatal) 1199 task->tk_flags |= RPC_TASK_NETUNREACH_FATAL; 1200 atomic_inc(&clnt->cl_task_count); 1201 } 1202 1203 static void 1204 rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg) 1205 { 1206 if (msg != NULL) { 1207 task->tk_msg.rpc_proc = msg->rpc_proc; 1208 task->tk_msg.rpc_argp = msg->rpc_argp; 1209 task->tk_msg.rpc_resp = msg->rpc_resp; 1210 task->tk_msg.rpc_cred = msg->rpc_cred; 1211 if (!(task->tk_flags & RPC_TASK_CRED_NOREF)) 1212 get_cred(task->tk_msg.rpc_cred); 1213 } 1214 } 1215 1216 /* 1217 * Default callback for async RPC calls 1218 */ 1219 static void 1220 rpc_default_callback(struct rpc_task *task, void *data) 1221 { 1222 } 1223 1224 static const struct rpc_call_ops rpc_default_ops = { 1225 .rpc_call_done = rpc_default_callback, 1226 }; 1227 1228 /** 1229 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it 1230 * @task_setup_data: pointer to task initialisation data 1231 */ 1232 struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) 1233 { 1234 struct rpc_task *task; 1235 1236 task = rpc_new_task(task_setup_data); 1237 if (IS_ERR(task)) 1238 return task; 1239 1240 if (!RPC_IS_ASYNC(task)) 1241 task->tk_flags |= RPC_TASK_CRED_NOREF; 1242 1243 rpc_task_set_client(task, task_setup_data->rpc_client); 1244 rpc_task_set_rpc_message(task, task_setup_data->rpc_message); 1245 1246 if (task->tk_action == NULL) 1247 rpc_call_start(task); 1248 1249 atomic_inc(&task->tk_count); 1250 rpc_execute(task); 1251 return task; 1252 } 1253 EXPORT_SYMBOL_GPL(rpc_run_task); 1254 1255 /** 1256 * rpc_call_sync - Perform a synchronous RPC call 1257 * @clnt: pointer to RPC client 1258 * @msg: RPC call parameters 1259 * @flags: RPC call flags 1260 */ 1261 int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags) 1262 { 1263 struct rpc_task *task; 1264 struct rpc_task_setup task_setup_data = { 1265 .rpc_client = clnt, 1266 .rpc_message = msg, 1267 .callback_ops = &rpc_default_ops, 1268 .flags = flags, 1269 }; 1270 int status; 1271 1272 WARN_ON_ONCE(flags & RPC_TASK_ASYNC); 1273 if (flags & RPC_TASK_ASYNC) { 1274 rpc_release_calldata(task_setup_data.callback_ops, 1275 task_setup_data.callback_data); 1276 return -EINVAL; 1277 } 1278 1279 task = rpc_run_task(&task_setup_data); 1280 if (IS_ERR(task)) 1281 return PTR_ERR(task); 1282 status = task->tk_status; 1283 rpc_put_task(task); 1284 return status; 1285 } 1286 EXPORT_SYMBOL_GPL(rpc_call_sync); 1287 1288 /** 1289 * rpc_call_async - Perform an asynchronous RPC call 1290 * @clnt: pointer to RPC client 1291 * @msg: RPC call parameters 1292 * @flags: RPC call flags 1293 * @tk_ops: RPC call ops 1294 * @data: user call data 1295 */ 1296 int 1297 rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags, 1298 const struct rpc_call_ops *tk_ops, void *data) 1299 { 1300 struct rpc_task *task; 1301 struct rpc_task_setup task_setup_data = { 1302 .rpc_client = clnt, 1303 .rpc_message = msg, 1304 .callback_ops = tk_ops, 1305 .callback_data = data, 1306 .flags = flags|RPC_TASK_ASYNC, 1307 }; 1308 1309 task = rpc_run_task(&task_setup_data); 1310 if (IS_ERR(task)) 1311 return PTR_ERR(task); 1312 rpc_put_task(task); 1313 return 0; 1314 } 1315 EXPORT_SYMBOL_GPL(rpc_call_async); 1316 1317 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1318 static void call_bc_encode(struct rpc_task *task); 1319 1320 /** 1321 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run 1322 * rpc_execute against it 1323 * @req: RPC request 1324 * @timeout: timeout values to use for this task 1325 */ 1326 struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, 1327 struct rpc_timeout *timeout) 1328 { 1329 struct rpc_task *task; 1330 struct rpc_task_setup task_setup_data = { 1331 .callback_ops = &rpc_default_ops, 1332 .flags = RPC_TASK_SOFTCONN | 1333 RPC_TASK_NO_RETRANS_TIMEOUT, 1334 }; 1335 1336 dprintk("RPC: rpc_run_bc_task req= %p\n", req); 1337 /* 1338 * Create an rpc_task to send the data 1339 */ 1340 task = rpc_new_task(&task_setup_data); 1341 if (IS_ERR(task)) { 1342 xprt_free_bc_request(req); 1343 return task; 1344 } 1345 1346 xprt_init_bc_request(req, task, timeout); 1347 1348 task->tk_action = call_bc_encode; 1349 atomic_inc(&task->tk_count); 1350 WARN_ON_ONCE(atomic_read(&task->tk_count) != 2); 1351 rpc_execute(task); 1352 1353 dprintk("RPC: rpc_run_bc_task: task= %p\n", task); 1354 return task; 1355 } 1356 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1357 1358 /** 1359 * rpc_prepare_reply_pages - Prepare to receive a reply data payload into pages 1360 * @req: RPC request to prepare 1361 * @pages: vector of struct page pointers 1362 * @base: offset in first page where receive should start, in bytes 1363 * @len: expected size of the upper layer data payload, in bytes 1364 * @hdrsize: expected size of upper layer reply header, in XDR words 1365 * 1366 */ 1367 void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages, 1368 unsigned int base, unsigned int len, 1369 unsigned int hdrsize) 1370 { 1371 hdrsize += RPC_REPHDRSIZE + req->rq_cred->cr_auth->au_ralign; 1372 1373 xdr_inline_pages(&req->rq_rcv_buf, hdrsize << 2, pages, base, len); 1374 trace_rpc_xdr_reply_pages(req->rq_task, &req->rq_rcv_buf); 1375 } 1376 EXPORT_SYMBOL_GPL(rpc_prepare_reply_pages); 1377 1378 void 1379 rpc_call_start(struct rpc_task *task) 1380 { 1381 task->tk_action = call_start; 1382 } 1383 EXPORT_SYMBOL_GPL(rpc_call_start); 1384 1385 /** 1386 * rpc_peeraddr - extract remote peer address from clnt's xprt 1387 * @clnt: RPC client structure 1388 * @buf: target buffer 1389 * @bufsize: length of target buffer 1390 * 1391 * Returns the number of bytes that are actually in the stored address. 1392 */ 1393 size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize) 1394 { 1395 size_t bytes; 1396 struct rpc_xprt *xprt; 1397 1398 rcu_read_lock(); 1399 xprt = rcu_dereference(clnt->cl_xprt); 1400 1401 bytes = xprt->addrlen; 1402 if (bytes > bufsize) 1403 bytes = bufsize; 1404 memcpy(buf, &xprt->addr, bytes); 1405 rcu_read_unlock(); 1406 1407 return bytes; 1408 } 1409 EXPORT_SYMBOL_GPL(rpc_peeraddr); 1410 1411 /** 1412 * rpc_peeraddr2str - return remote peer address in printable format 1413 * @clnt: RPC client structure 1414 * @format: address format 1415 * 1416 * NB: the lifetime of the memory referenced by the returned pointer is 1417 * the same as the rpc_xprt itself. As long as the caller uses this 1418 * pointer, it must hold the RCU read lock. 1419 */ 1420 const char *rpc_peeraddr2str(struct rpc_clnt *clnt, 1421 enum rpc_display_format_t format) 1422 { 1423 struct rpc_xprt *xprt; 1424 1425 xprt = rcu_dereference(clnt->cl_xprt); 1426 1427 if (xprt->address_strings[format] != NULL) 1428 return xprt->address_strings[format]; 1429 else 1430 return "unprintable"; 1431 } 1432 EXPORT_SYMBOL_GPL(rpc_peeraddr2str); 1433 1434 static const struct sockaddr_in rpc_inaddr_loopback = { 1435 .sin_family = AF_INET, 1436 .sin_addr.s_addr = htonl(INADDR_ANY), 1437 }; 1438 1439 static const struct sockaddr_in6 rpc_in6addr_loopback = { 1440 .sin6_family = AF_INET6, 1441 .sin6_addr = IN6ADDR_ANY_INIT, 1442 }; 1443 1444 /* 1445 * Try a getsockname() on a connected datagram socket. Using a 1446 * connected datagram socket prevents leaving a socket in TIME_WAIT. 1447 * This conserves the ephemeral port number space. 1448 * 1449 * Returns zero and fills in "buf" if successful; otherwise, a 1450 * negative errno is returned. 1451 */ 1452 static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen, 1453 struct sockaddr *buf) 1454 { 1455 struct socket *sock; 1456 int err; 1457 1458 err = __sock_create(net, sap->sa_family, 1459 SOCK_DGRAM, IPPROTO_UDP, &sock, 1); 1460 if (err < 0) { 1461 dprintk("RPC: can't create UDP socket (%d)\n", err); 1462 goto out; 1463 } 1464 1465 switch (sap->sa_family) { 1466 case AF_INET: 1467 err = kernel_bind(sock, 1468 (struct sockaddr *)&rpc_inaddr_loopback, 1469 sizeof(rpc_inaddr_loopback)); 1470 break; 1471 case AF_INET6: 1472 err = kernel_bind(sock, 1473 (struct sockaddr *)&rpc_in6addr_loopback, 1474 sizeof(rpc_in6addr_loopback)); 1475 break; 1476 default: 1477 err = -EAFNOSUPPORT; 1478 goto out_release; 1479 } 1480 if (err < 0) { 1481 dprintk("RPC: can't bind UDP socket (%d)\n", err); 1482 goto out_release; 1483 } 1484 1485 err = kernel_connect(sock, sap, salen, 0); 1486 if (err < 0) { 1487 dprintk("RPC: can't connect UDP socket (%d)\n", err); 1488 goto out_release; 1489 } 1490 1491 err = kernel_getsockname(sock, buf); 1492 if (err < 0) { 1493 dprintk("RPC: getsockname failed (%d)\n", err); 1494 goto out_release; 1495 } 1496 1497 err = 0; 1498 if (buf->sa_family == AF_INET6) { 1499 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf; 1500 sin6->sin6_scope_id = 0; 1501 } 1502 dprintk("RPC: %s succeeded\n", __func__); 1503 1504 out_release: 1505 sock_release(sock); 1506 out: 1507 return err; 1508 } 1509 1510 /* 1511 * Scraping a connected socket failed, so we don't have a useable 1512 * local address. Fallback: generate an address that will prevent 1513 * the server from calling us back. 1514 * 1515 * Returns zero and fills in "buf" if successful; otherwise, a 1516 * negative errno is returned. 1517 */ 1518 static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen) 1519 { 1520 switch (family) { 1521 case AF_INET: 1522 if (buflen < sizeof(rpc_inaddr_loopback)) 1523 return -EINVAL; 1524 memcpy(buf, &rpc_inaddr_loopback, 1525 sizeof(rpc_inaddr_loopback)); 1526 break; 1527 case AF_INET6: 1528 if (buflen < sizeof(rpc_in6addr_loopback)) 1529 return -EINVAL; 1530 memcpy(buf, &rpc_in6addr_loopback, 1531 sizeof(rpc_in6addr_loopback)); 1532 break; 1533 default: 1534 dprintk("RPC: %s: address family not supported\n", 1535 __func__); 1536 return -EAFNOSUPPORT; 1537 } 1538 dprintk("RPC: %s: succeeded\n", __func__); 1539 return 0; 1540 } 1541 1542 /** 1543 * rpc_localaddr - discover local endpoint address for an RPC client 1544 * @clnt: RPC client structure 1545 * @buf: target buffer 1546 * @buflen: size of target buffer, in bytes 1547 * 1548 * Returns zero and fills in "buf" and "buflen" if successful; 1549 * otherwise, a negative errno is returned. 1550 * 1551 * This works even if the underlying transport is not currently connected, 1552 * or if the upper layer never previously provided a source address. 1553 * 1554 * The result of this function call is transient: multiple calls in 1555 * succession may give different results, depending on how local 1556 * networking configuration changes over time. 1557 */ 1558 int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen) 1559 { 1560 struct sockaddr_storage address; 1561 struct sockaddr *sap = (struct sockaddr *)&address; 1562 struct rpc_xprt *xprt; 1563 struct net *net; 1564 size_t salen; 1565 int err; 1566 1567 rcu_read_lock(); 1568 xprt = rcu_dereference(clnt->cl_xprt); 1569 salen = xprt->addrlen; 1570 memcpy(sap, &xprt->addr, salen); 1571 net = get_net(xprt->xprt_net); 1572 rcu_read_unlock(); 1573 1574 rpc_set_port(sap, 0); 1575 err = rpc_sockname(net, sap, salen, buf); 1576 put_net(net); 1577 if (err != 0) 1578 /* Couldn't discover local address, return ANYADDR */ 1579 return rpc_anyaddr(sap->sa_family, buf, buflen); 1580 return 0; 1581 } 1582 EXPORT_SYMBOL_GPL(rpc_localaddr); 1583 1584 void 1585 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) 1586 { 1587 struct rpc_xprt *xprt; 1588 1589 rcu_read_lock(); 1590 xprt = rcu_dereference(clnt->cl_xprt); 1591 if (xprt->ops->set_buffer_size) 1592 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); 1593 rcu_read_unlock(); 1594 } 1595 EXPORT_SYMBOL_GPL(rpc_setbufsize); 1596 1597 /** 1598 * rpc_net_ns - Get the network namespace for this RPC client 1599 * @clnt: RPC client to query 1600 * 1601 */ 1602 struct net *rpc_net_ns(struct rpc_clnt *clnt) 1603 { 1604 struct net *ret; 1605 1606 rcu_read_lock(); 1607 ret = rcu_dereference(clnt->cl_xprt)->xprt_net; 1608 rcu_read_unlock(); 1609 return ret; 1610 } 1611 EXPORT_SYMBOL_GPL(rpc_net_ns); 1612 1613 /** 1614 * rpc_max_payload - Get maximum payload size for a transport, in bytes 1615 * @clnt: RPC client to query 1616 * 1617 * For stream transports, this is one RPC record fragment (see RFC 1618 * 1831), as we don't support multi-record requests yet. For datagram 1619 * transports, this is the size of an IP packet minus the IP, UDP, and 1620 * RPC header sizes. 1621 */ 1622 size_t rpc_max_payload(struct rpc_clnt *clnt) 1623 { 1624 size_t ret; 1625 1626 rcu_read_lock(); 1627 ret = rcu_dereference(clnt->cl_xprt)->max_payload; 1628 rcu_read_unlock(); 1629 return ret; 1630 } 1631 EXPORT_SYMBOL_GPL(rpc_max_payload); 1632 1633 /** 1634 * rpc_max_bc_payload - Get maximum backchannel payload size, in bytes 1635 * @clnt: RPC client to query 1636 */ 1637 size_t rpc_max_bc_payload(struct rpc_clnt *clnt) 1638 { 1639 struct rpc_xprt *xprt; 1640 size_t ret; 1641 1642 rcu_read_lock(); 1643 xprt = rcu_dereference(clnt->cl_xprt); 1644 ret = xprt->ops->bc_maxpayload(xprt); 1645 rcu_read_unlock(); 1646 return ret; 1647 } 1648 EXPORT_SYMBOL_GPL(rpc_max_bc_payload); 1649 1650 unsigned int rpc_num_bc_slots(struct rpc_clnt *clnt) 1651 { 1652 struct rpc_xprt *xprt; 1653 unsigned int ret; 1654 1655 rcu_read_lock(); 1656 xprt = rcu_dereference(clnt->cl_xprt); 1657 ret = xprt->ops->bc_num_slots(xprt); 1658 rcu_read_unlock(); 1659 return ret; 1660 } 1661 EXPORT_SYMBOL_GPL(rpc_num_bc_slots); 1662 1663 /** 1664 * rpc_force_rebind - force transport to check that remote port is unchanged 1665 * @clnt: client to rebind 1666 * 1667 */ 1668 void rpc_force_rebind(struct rpc_clnt *clnt) 1669 { 1670 if (clnt->cl_autobind) { 1671 rcu_read_lock(); 1672 xprt_clear_bound(rcu_dereference(clnt->cl_xprt)); 1673 rcu_read_unlock(); 1674 } 1675 } 1676 EXPORT_SYMBOL_GPL(rpc_force_rebind); 1677 1678 static int 1679 __rpc_restart_call(struct rpc_task *task, void (*action)(struct rpc_task *)) 1680 { 1681 task->tk_status = 0; 1682 task->tk_rpc_status = 0; 1683 task->tk_action = action; 1684 return 1; 1685 } 1686 1687 /* 1688 * Restart an (async) RPC call. Usually called from within the 1689 * exit handler. 1690 */ 1691 int 1692 rpc_restart_call(struct rpc_task *task) 1693 { 1694 return __rpc_restart_call(task, call_start); 1695 } 1696 EXPORT_SYMBOL_GPL(rpc_restart_call); 1697 1698 /* 1699 * Restart an (async) RPC call from the call_prepare state. 1700 * Usually called from within the exit handler. 1701 */ 1702 int 1703 rpc_restart_call_prepare(struct rpc_task *task) 1704 { 1705 if (task->tk_ops->rpc_call_prepare != NULL) 1706 return __rpc_restart_call(task, rpc_prepare_task); 1707 return rpc_restart_call(task); 1708 } 1709 EXPORT_SYMBOL_GPL(rpc_restart_call_prepare); 1710 1711 const char 1712 *rpc_proc_name(const struct rpc_task *task) 1713 { 1714 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 1715 1716 if (proc) { 1717 if (proc->p_name) 1718 return proc->p_name; 1719 else 1720 return "NULL"; 1721 } else 1722 return "no proc"; 1723 } 1724 1725 static void 1726 __rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status) 1727 { 1728 trace_rpc_call_rpcerror(task, tk_status, rpc_status); 1729 rpc_task_set_rpc_status(task, rpc_status); 1730 rpc_exit(task, tk_status); 1731 } 1732 1733 static void 1734 rpc_call_rpcerror(struct rpc_task *task, int status) 1735 { 1736 __rpc_call_rpcerror(task, status, status); 1737 } 1738 1739 /* 1740 * 0. Initial state 1741 * 1742 * Other FSM states can be visited zero or more times, but 1743 * this state is visited exactly once for each RPC. 1744 */ 1745 static void 1746 call_start(struct rpc_task *task) 1747 { 1748 struct rpc_clnt *clnt = task->tk_client; 1749 int idx = task->tk_msg.rpc_proc->p_statidx; 1750 1751 trace_rpc_request(task); 1752 1753 if (task->tk_client->cl_shutdown) { 1754 rpc_call_rpcerror(task, -EIO); 1755 return; 1756 } 1757 1758 /* Increment call count (version might not be valid for ping) */ 1759 if (clnt->cl_program->version[clnt->cl_vers]) 1760 clnt->cl_program->version[clnt->cl_vers]->counts[idx]++; 1761 clnt->cl_stats->rpccnt++; 1762 task->tk_action = call_reserve; 1763 rpc_task_set_transport(task, clnt); 1764 } 1765 1766 /* 1767 * 1. Reserve an RPC call slot 1768 */ 1769 static void 1770 call_reserve(struct rpc_task *task) 1771 { 1772 task->tk_status = 0; 1773 task->tk_action = call_reserveresult; 1774 xprt_reserve(task); 1775 } 1776 1777 static void call_retry_reserve(struct rpc_task *task); 1778 1779 /* 1780 * 1b. Grok the result of xprt_reserve() 1781 */ 1782 static void 1783 call_reserveresult(struct rpc_task *task) 1784 { 1785 int status = task->tk_status; 1786 1787 /* 1788 * After a call to xprt_reserve(), we must have either 1789 * a request slot or else an error status. 1790 */ 1791 task->tk_status = 0; 1792 if (status >= 0) { 1793 if (task->tk_rqstp) { 1794 task->tk_action = call_refresh; 1795 1796 /* Add to the client's list of all tasks */ 1797 spin_lock(&task->tk_client->cl_lock); 1798 if (list_empty(&task->tk_task)) 1799 list_add_tail(&task->tk_task, &task->tk_client->cl_tasks); 1800 spin_unlock(&task->tk_client->cl_lock); 1801 return; 1802 } 1803 rpc_call_rpcerror(task, -EIO); 1804 return; 1805 } 1806 1807 switch (status) { 1808 case -ENOMEM: 1809 rpc_delay(task, HZ >> 2); 1810 fallthrough; 1811 case -EAGAIN: /* woken up; retry */ 1812 task->tk_action = call_retry_reserve; 1813 return; 1814 default: 1815 rpc_call_rpcerror(task, status); 1816 } 1817 } 1818 1819 /* 1820 * 1c. Retry reserving an RPC call slot 1821 */ 1822 static void 1823 call_retry_reserve(struct rpc_task *task) 1824 { 1825 task->tk_status = 0; 1826 task->tk_action = call_reserveresult; 1827 xprt_retry_reserve(task); 1828 } 1829 1830 /* 1831 * 2. Bind and/or refresh the credentials 1832 */ 1833 static void 1834 call_refresh(struct rpc_task *task) 1835 { 1836 task->tk_action = call_refreshresult; 1837 task->tk_status = 0; 1838 task->tk_client->cl_stats->rpcauthrefresh++; 1839 rpcauth_refreshcred(task); 1840 } 1841 1842 /* 1843 * 2a. Process the results of a credential refresh 1844 */ 1845 static void 1846 call_refreshresult(struct rpc_task *task) 1847 { 1848 int status = task->tk_status; 1849 1850 task->tk_status = 0; 1851 task->tk_action = call_refresh; 1852 switch (status) { 1853 case 0: 1854 if (rpcauth_uptodatecred(task)) { 1855 task->tk_action = call_allocate; 1856 return; 1857 } 1858 /* Use rate-limiting and a max number of retries if refresh 1859 * had status 0 but failed to update the cred. 1860 */ 1861 fallthrough; 1862 case -ETIMEDOUT: 1863 rpc_delay(task, 3*HZ); 1864 fallthrough; 1865 case -EAGAIN: 1866 status = -EACCES; 1867 if (!task->tk_cred_retry) 1868 break; 1869 task->tk_cred_retry--; 1870 trace_rpc_retry_refresh_status(task); 1871 return; 1872 case -EKEYEXPIRED: 1873 break; 1874 case -ENOMEM: 1875 rpc_delay(task, HZ >> 4); 1876 return; 1877 } 1878 trace_rpc_refresh_status(task); 1879 rpc_call_rpcerror(task, status); 1880 } 1881 1882 /* 1883 * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc. 1884 * (Note: buffer memory is freed in xprt_release). 1885 */ 1886 static void 1887 call_allocate(struct rpc_task *task) 1888 { 1889 const struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth; 1890 struct rpc_rqst *req = task->tk_rqstp; 1891 struct rpc_xprt *xprt = req->rq_xprt; 1892 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 1893 int status; 1894 1895 task->tk_status = 0; 1896 task->tk_action = call_encode; 1897 1898 if (req->rq_buffer) 1899 return; 1900 1901 /* 1902 * Calculate the size (in quads) of the RPC call 1903 * and reply headers, and convert both values 1904 * to byte sizes. 1905 */ 1906 req->rq_callsize = RPC_CALLHDRSIZE + (auth->au_cslack << 1) + 1907 proc->p_arglen; 1908 req->rq_callsize <<= 2; 1909 /* 1910 * Note: the reply buffer must at minimum allocate enough space 1911 * for the 'struct accepted_reply' from RFC5531. 1912 */ 1913 req->rq_rcvsize = RPC_REPHDRSIZE + auth->au_rslack + \ 1914 max_t(size_t, proc->p_replen, 2); 1915 req->rq_rcvsize <<= 2; 1916 1917 status = xprt->ops->buf_alloc(task); 1918 trace_rpc_buf_alloc(task, status); 1919 if (status == 0) 1920 return; 1921 if (status != -ENOMEM) { 1922 rpc_call_rpcerror(task, status); 1923 return; 1924 } 1925 1926 if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) { 1927 task->tk_action = call_allocate; 1928 rpc_delay(task, HZ>>4); 1929 return; 1930 } 1931 1932 rpc_call_rpcerror(task, -ERESTARTSYS); 1933 } 1934 1935 static int 1936 rpc_task_need_encode(struct rpc_task *task) 1937 { 1938 return test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) == 0 && 1939 (!(task->tk_flags & RPC_TASK_SENT) || 1940 !(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) || 1941 xprt_request_need_retransmit(task)); 1942 } 1943 1944 static void 1945 rpc_xdr_encode(struct rpc_task *task) 1946 { 1947 struct rpc_rqst *req = task->tk_rqstp; 1948 struct xdr_stream xdr; 1949 1950 xdr_buf_init(&req->rq_snd_buf, 1951 req->rq_buffer, 1952 req->rq_callsize); 1953 xdr_buf_init(&req->rq_rcv_buf, 1954 req->rq_rbuffer, 1955 req->rq_rcvsize); 1956 1957 req->rq_reply_bytes_recvd = 0; 1958 req->rq_snd_buf.head[0].iov_len = 0; 1959 xdr_init_encode(&xdr, &req->rq_snd_buf, 1960 req->rq_snd_buf.head[0].iov_base, req); 1961 if (rpc_encode_header(task, &xdr)) 1962 return; 1963 1964 task->tk_status = rpcauth_wrap_req(task, &xdr); 1965 } 1966 1967 /* 1968 * 3. Encode arguments of an RPC call 1969 */ 1970 static void 1971 call_encode(struct rpc_task *task) 1972 { 1973 if (!rpc_task_need_encode(task)) 1974 goto out; 1975 1976 /* Dequeue task from the receive queue while we're encoding */ 1977 xprt_request_dequeue_xprt(task); 1978 /* Encode here so that rpcsec_gss can use correct sequence number. */ 1979 rpc_xdr_encode(task); 1980 /* Add task to reply queue before transmission to avoid races */ 1981 if (task->tk_status == 0 && rpc_reply_expected(task)) 1982 task->tk_status = xprt_request_enqueue_receive(task); 1983 /* Did the encode result in an error condition? */ 1984 if (task->tk_status != 0) { 1985 /* Was the error nonfatal? */ 1986 switch (task->tk_status) { 1987 case -EAGAIN: 1988 case -ENOMEM: 1989 rpc_delay(task, HZ >> 4); 1990 break; 1991 case -EKEYEXPIRED: 1992 if (!task->tk_cred_retry) { 1993 rpc_call_rpcerror(task, task->tk_status); 1994 } else { 1995 task->tk_action = call_refresh; 1996 task->tk_cred_retry--; 1997 trace_rpc_retry_refresh_status(task); 1998 } 1999 break; 2000 default: 2001 rpc_call_rpcerror(task, task->tk_status); 2002 } 2003 return; 2004 } 2005 2006 xprt_request_enqueue_transmit(task); 2007 out: 2008 task->tk_action = call_transmit; 2009 /* Check that the connection is OK */ 2010 if (!xprt_bound(task->tk_xprt)) 2011 task->tk_action = call_bind; 2012 else if (!xprt_connected(task->tk_xprt)) 2013 task->tk_action = call_connect; 2014 } 2015 2016 /* 2017 * Helpers to check if the task was already transmitted, and 2018 * to take action when that is the case. 2019 */ 2020 static bool 2021 rpc_task_transmitted(struct rpc_task *task) 2022 { 2023 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); 2024 } 2025 2026 static void 2027 rpc_task_handle_transmitted(struct rpc_task *task) 2028 { 2029 xprt_end_transmit(task); 2030 task->tk_action = call_transmit_status; 2031 } 2032 2033 /* 2034 * 4. Get the server port number if not yet set 2035 */ 2036 static void 2037 call_bind(struct rpc_task *task) 2038 { 2039 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 2040 2041 if (rpc_task_transmitted(task)) { 2042 rpc_task_handle_transmitted(task); 2043 return; 2044 } 2045 2046 if (xprt_bound(xprt)) { 2047 task->tk_action = call_connect; 2048 return; 2049 } 2050 2051 task->tk_action = call_bind_status; 2052 if (!xprt_prepare_transmit(task)) 2053 return; 2054 2055 xprt->ops->rpcbind(task); 2056 } 2057 2058 /* 2059 * 4a. Sort out bind result 2060 */ 2061 static void 2062 call_bind_status(struct rpc_task *task) 2063 { 2064 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 2065 int status = -EIO; 2066 2067 if (rpc_task_transmitted(task)) { 2068 rpc_task_handle_transmitted(task); 2069 return; 2070 } 2071 2072 if (task->tk_status >= 0) 2073 goto out_next; 2074 if (xprt_bound(xprt)) { 2075 task->tk_status = 0; 2076 goto out_next; 2077 } 2078 2079 switch (task->tk_status) { 2080 case -ENOMEM: 2081 rpc_delay(task, HZ >> 2); 2082 goto retry_timeout; 2083 case -EACCES: 2084 trace_rpcb_prog_unavail_err(task); 2085 /* fail immediately if this is an RPC ping */ 2086 if (task->tk_msg.rpc_proc->p_proc == 0) { 2087 status = -EOPNOTSUPP; 2088 break; 2089 } 2090 rpc_delay(task, 3*HZ); 2091 goto retry_timeout; 2092 case -ENOBUFS: 2093 rpc_delay(task, HZ >> 2); 2094 goto retry_timeout; 2095 case -EAGAIN: 2096 goto retry_timeout; 2097 case -ETIMEDOUT: 2098 trace_rpcb_timeout_err(task); 2099 goto retry_timeout; 2100 case -EPFNOSUPPORT: 2101 /* server doesn't support any rpcbind version we know of */ 2102 trace_rpcb_bind_version_err(task); 2103 break; 2104 case -EPROTONOSUPPORT: 2105 trace_rpcb_bind_version_err(task); 2106 goto retry_timeout; 2107 case -ENETDOWN: 2108 case -ENETUNREACH: 2109 if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL) 2110 break; 2111 fallthrough; 2112 case -ECONNREFUSED: /* connection problems */ 2113 case -ECONNRESET: 2114 case -ECONNABORTED: 2115 case -ENOTCONN: 2116 case -EHOSTDOWN: 2117 case -EHOSTUNREACH: 2118 case -EPIPE: 2119 trace_rpcb_unreachable_err(task); 2120 if (!RPC_IS_SOFTCONN(task)) { 2121 rpc_delay(task, 5*HZ); 2122 goto retry_timeout; 2123 } 2124 status = task->tk_status; 2125 break; 2126 default: 2127 trace_rpcb_unrecognized_err(task); 2128 } 2129 2130 rpc_call_rpcerror(task, status); 2131 return; 2132 out_next: 2133 task->tk_action = call_connect; 2134 return; 2135 retry_timeout: 2136 task->tk_status = 0; 2137 task->tk_action = call_bind; 2138 rpc_check_timeout(task); 2139 } 2140 2141 /* 2142 * 4b. Connect to the RPC server 2143 */ 2144 static void 2145 call_connect(struct rpc_task *task) 2146 { 2147 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 2148 2149 if (rpc_task_transmitted(task)) { 2150 rpc_task_handle_transmitted(task); 2151 return; 2152 } 2153 2154 if (xprt_connected(xprt)) { 2155 task->tk_action = call_transmit; 2156 return; 2157 } 2158 2159 task->tk_action = call_connect_status; 2160 if (task->tk_status < 0) 2161 return; 2162 if (task->tk_flags & RPC_TASK_NOCONNECT) { 2163 rpc_call_rpcerror(task, -ENOTCONN); 2164 return; 2165 } 2166 if (!xprt_prepare_transmit(task)) 2167 return; 2168 xprt_connect(task); 2169 } 2170 2171 /* 2172 * 4c. Sort out connect result 2173 */ 2174 static void 2175 call_connect_status(struct rpc_task *task) 2176 { 2177 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 2178 struct rpc_clnt *clnt = task->tk_client; 2179 int status = task->tk_status; 2180 2181 if (rpc_task_transmitted(task)) { 2182 rpc_task_handle_transmitted(task); 2183 return; 2184 } 2185 2186 trace_rpc_connect_status(task); 2187 2188 if (task->tk_status == 0) { 2189 clnt->cl_stats->netreconn++; 2190 goto out_next; 2191 } 2192 if (xprt_connected(xprt)) { 2193 task->tk_status = 0; 2194 goto out_next; 2195 } 2196 2197 task->tk_status = 0; 2198 switch (status) { 2199 case -ENETDOWN: 2200 case -ENETUNREACH: 2201 if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL) 2202 break; 2203 fallthrough; 2204 case -ECONNREFUSED: 2205 case -ECONNRESET: 2206 /* A positive refusal suggests a rebind is needed. */ 2207 if (clnt->cl_autobind) { 2208 rpc_force_rebind(clnt); 2209 if (RPC_IS_SOFTCONN(task)) 2210 break; 2211 goto out_retry; 2212 } 2213 fallthrough; 2214 case -ECONNABORTED: 2215 case -EHOSTUNREACH: 2216 case -EPIPE: 2217 case -EPROTO: 2218 xprt_conditional_disconnect(task->tk_rqstp->rq_xprt, 2219 task->tk_rqstp->rq_connect_cookie); 2220 if (RPC_IS_SOFTCONN(task)) 2221 break; 2222 /* retry with existing socket, after a delay */ 2223 rpc_delay(task, 3*HZ); 2224 fallthrough; 2225 case -EADDRINUSE: 2226 case -ENOTCONN: 2227 case -EAGAIN: 2228 case -ETIMEDOUT: 2229 if (!(task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) && 2230 (task->tk_flags & RPC_TASK_MOVEABLE) && 2231 test_bit(XPRT_REMOVE, &xprt->state)) { 2232 struct rpc_xprt *saved = task->tk_xprt; 2233 struct rpc_xprt_switch *xps; 2234 2235 xps = rpc_clnt_xprt_switch_get(clnt); 2236 if (xps->xps_nxprts > 1) { 2237 long value; 2238 2239 xprt_release(task); 2240 value = atomic_long_dec_return(&xprt->queuelen); 2241 if (value == 0) 2242 rpc_xprt_switch_remove_xprt(xps, saved, 2243 true); 2244 xprt_put(saved); 2245 task->tk_xprt = NULL; 2246 task->tk_action = call_start; 2247 } 2248 xprt_switch_put(xps); 2249 if (!task->tk_xprt) 2250 goto out; 2251 } 2252 goto out_retry; 2253 case -ENOBUFS: 2254 rpc_delay(task, HZ >> 2); 2255 goto out_retry; 2256 } 2257 rpc_call_rpcerror(task, status); 2258 return; 2259 out_next: 2260 task->tk_action = call_transmit; 2261 return; 2262 out_retry: 2263 /* Check for timeouts before looping back to call_bind */ 2264 task->tk_action = call_bind; 2265 out: 2266 rpc_check_timeout(task); 2267 } 2268 2269 /* 2270 * 5. Transmit the RPC request, and wait for reply 2271 */ 2272 static void 2273 call_transmit(struct rpc_task *task) 2274 { 2275 if (rpc_task_transmitted(task)) { 2276 rpc_task_handle_transmitted(task); 2277 return; 2278 } 2279 2280 task->tk_action = call_transmit_status; 2281 if (!xprt_prepare_transmit(task)) 2282 return; 2283 task->tk_status = 0; 2284 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) { 2285 if (!xprt_connected(task->tk_xprt)) { 2286 task->tk_status = -ENOTCONN; 2287 return; 2288 } 2289 xprt_transmit(task); 2290 } 2291 xprt_end_transmit(task); 2292 } 2293 2294 /* 2295 * 5a. Handle cleanup after a transmission 2296 */ 2297 static void 2298 call_transmit_status(struct rpc_task *task) 2299 { 2300 task->tk_action = call_status; 2301 2302 /* 2303 * Common case: success. Force the compiler to put this 2304 * test first. 2305 */ 2306 if (rpc_task_transmitted(task)) { 2307 task->tk_status = 0; 2308 xprt_request_wait_receive(task); 2309 return; 2310 } 2311 2312 switch (task->tk_status) { 2313 default: 2314 break; 2315 case -EBADMSG: 2316 task->tk_status = 0; 2317 task->tk_action = call_encode; 2318 break; 2319 /* 2320 * Special cases: if we've been waiting on the 2321 * socket's write_space() callback, or if the 2322 * socket just returned a connection error, 2323 * then hold onto the transport lock. 2324 */ 2325 case -ENOMEM: 2326 case -ENOBUFS: 2327 rpc_delay(task, HZ>>2); 2328 fallthrough; 2329 case -EBADSLT: 2330 case -EAGAIN: 2331 task->tk_action = call_transmit; 2332 task->tk_status = 0; 2333 break; 2334 case -EHOSTDOWN: 2335 case -ENETDOWN: 2336 case -EHOSTUNREACH: 2337 case -ENETUNREACH: 2338 case -EPERM: 2339 break; 2340 case -ECONNREFUSED: 2341 if (RPC_IS_SOFTCONN(task)) { 2342 if (!task->tk_msg.rpc_proc->p_proc) 2343 trace_xprt_ping(task->tk_xprt, 2344 task->tk_status); 2345 rpc_call_rpcerror(task, task->tk_status); 2346 return; 2347 } 2348 fallthrough; 2349 case -ECONNRESET: 2350 case -ECONNABORTED: 2351 case -EADDRINUSE: 2352 case -ENOTCONN: 2353 case -EPIPE: 2354 task->tk_action = call_bind; 2355 task->tk_status = 0; 2356 break; 2357 } 2358 rpc_check_timeout(task); 2359 } 2360 2361 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 2362 static void call_bc_transmit(struct rpc_task *task); 2363 static void call_bc_transmit_status(struct rpc_task *task); 2364 2365 static void 2366 call_bc_encode(struct rpc_task *task) 2367 { 2368 xprt_request_enqueue_transmit(task); 2369 task->tk_action = call_bc_transmit; 2370 } 2371 2372 /* 2373 * 5b. Send the backchannel RPC reply. On error, drop the reply. In 2374 * addition, disconnect on connectivity errors. 2375 */ 2376 static void 2377 call_bc_transmit(struct rpc_task *task) 2378 { 2379 task->tk_action = call_bc_transmit_status; 2380 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) { 2381 if (!xprt_prepare_transmit(task)) 2382 return; 2383 task->tk_status = 0; 2384 xprt_transmit(task); 2385 } 2386 xprt_end_transmit(task); 2387 } 2388 2389 static void 2390 call_bc_transmit_status(struct rpc_task *task) 2391 { 2392 struct rpc_rqst *req = task->tk_rqstp; 2393 2394 if (rpc_task_transmitted(task)) 2395 task->tk_status = 0; 2396 2397 switch (task->tk_status) { 2398 case 0: 2399 /* Success */ 2400 case -ENETDOWN: 2401 case -EHOSTDOWN: 2402 case -EHOSTUNREACH: 2403 case -ENETUNREACH: 2404 case -ECONNRESET: 2405 case -ECONNREFUSED: 2406 case -EADDRINUSE: 2407 case -ENOTCONN: 2408 case -EPIPE: 2409 break; 2410 case -ENOMEM: 2411 case -ENOBUFS: 2412 rpc_delay(task, HZ>>2); 2413 fallthrough; 2414 case -EBADSLT: 2415 case -EAGAIN: 2416 task->tk_status = 0; 2417 task->tk_action = call_bc_transmit; 2418 return; 2419 case -ETIMEDOUT: 2420 /* 2421 * Problem reaching the server. Disconnect and let the 2422 * forechannel reestablish the connection. The server will 2423 * have to retransmit the backchannel request and we'll 2424 * reprocess it. Since these ops are idempotent, there's no 2425 * need to cache our reply at this time. 2426 */ 2427 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 2428 "error: %d\n", task->tk_status); 2429 xprt_conditional_disconnect(req->rq_xprt, 2430 req->rq_connect_cookie); 2431 break; 2432 default: 2433 /* 2434 * We were unable to reply and will have to drop the 2435 * request. The server should reconnect and retransmit. 2436 */ 2437 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 2438 "error: %d\n", task->tk_status); 2439 break; 2440 } 2441 task->tk_action = rpc_exit_task; 2442 } 2443 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 2444 2445 /* 2446 * 6. Sort out the RPC call status 2447 */ 2448 static void 2449 call_status(struct rpc_task *task) 2450 { 2451 struct rpc_clnt *clnt = task->tk_client; 2452 int status; 2453 2454 if (!task->tk_msg.rpc_proc->p_proc) 2455 trace_xprt_ping(task->tk_xprt, task->tk_status); 2456 2457 status = task->tk_status; 2458 if (status >= 0) { 2459 task->tk_action = call_decode; 2460 return; 2461 } 2462 2463 trace_rpc_call_status(task); 2464 task->tk_status = 0; 2465 switch(status) { 2466 case -ENETDOWN: 2467 case -ENETUNREACH: 2468 if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL) 2469 goto out_exit; 2470 fallthrough; 2471 case -EHOSTDOWN: 2472 case -EHOSTUNREACH: 2473 case -EPERM: 2474 if (RPC_IS_SOFTCONN(task)) 2475 goto out_exit; 2476 /* 2477 * Delay any retries for 3 seconds, then handle as if it 2478 * were a timeout. 2479 */ 2480 rpc_delay(task, 3*HZ); 2481 fallthrough; 2482 case -ETIMEDOUT: 2483 break; 2484 case -ECONNREFUSED: 2485 case -ECONNRESET: 2486 case -ECONNABORTED: 2487 case -ENOTCONN: 2488 rpc_force_rebind(clnt); 2489 break; 2490 case -EADDRINUSE: 2491 rpc_delay(task, 3*HZ); 2492 fallthrough; 2493 case -EPIPE: 2494 case -EAGAIN: 2495 break; 2496 case -ENFILE: 2497 case -ENOBUFS: 2498 case -ENOMEM: 2499 rpc_delay(task, HZ>>2); 2500 break; 2501 case -EIO: 2502 /* shutdown or soft timeout */ 2503 goto out_exit; 2504 default: 2505 if (clnt->cl_chatty) 2506 printk("%s: RPC call returned error %d\n", 2507 clnt->cl_program->name, -status); 2508 goto out_exit; 2509 } 2510 task->tk_action = call_encode; 2511 rpc_check_timeout(task); 2512 return; 2513 out_exit: 2514 rpc_call_rpcerror(task, status); 2515 } 2516 2517 static bool 2518 rpc_check_connected(const struct rpc_rqst *req) 2519 { 2520 /* No allocated request or transport? return true */ 2521 if (!req || !req->rq_xprt) 2522 return true; 2523 return xprt_connected(req->rq_xprt); 2524 } 2525 2526 static void 2527 rpc_check_timeout(struct rpc_task *task) 2528 { 2529 struct rpc_clnt *clnt = task->tk_client; 2530 2531 if (RPC_SIGNALLED(task)) 2532 return; 2533 2534 if (xprt_adjust_timeout(task->tk_rqstp) == 0) 2535 return; 2536 2537 trace_rpc_timeout_status(task); 2538 task->tk_timeouts++; 2539 2540 if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) { 2541 rpc_call_rpcerror(task, -ETIMEDOUT); 2542 return; 2543 } 2544 2545 if (RPC_IS_SOFT(task)) { 2546 /* 2547 * Once a "no retrans timeout" soft tasks (a.k.a NFSv4) has 2548 * been sent, it should time out only if the transport 2549 * connection gets terminally broken. 2550 */ 2551 if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) && 2552 rpc_check_connected(task->tk_rqstp)) 2553 return; 2554 2555 if (clnt->cl_chatty) { 2556 pr_notice_ratelimited( 2557 "%s: server %s not responding, timed out\n", 2558 clnt->cl_program->name, 2559 task->tk_xprt->servername); 2560 } 2561 if (task->tk_flags & RPC_TASK_TIMEOUT) 2562 rpc_call_rpcerror(task, -ETIMEDOUT); 2563 else 2564 __rpc_call_rpcerror(task, -EIO, -ETIMEDOUT); 2565 return; 2566 } 2567 2568 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { 2569 task->tk_flags |= RPC_CALL_MAJORSEEN; 2570 if (clnt->cl_chatty) { 2571 pr_notice_ratelimited( 2572 "%s: server %s not responding, still trying\n", 2573 clnt->cl_program->name, 2574 task->tk_xprt->servername); 2575 } 2576 } 2577 rpc_force_rebind(clnt); 2578 /* 2579 * Did our request time out due to an RPCSEC_GSS out-of-sequence 2580 * event? RFC2203 requires the server to drop all such requests. 2581 */ 2582 rpcauth_invalcred(task); 2583 } 2584 2585 /* 2586 * 7. Decode the RPC reply 2587 */ 2588 static void 2589 call_decode(struct rpc_task *task) 2590 { 2591 struct rpc_clnt *clnt = task->tk_client; 2592 struct rpc_rqst *req = task->tk_rqstp; 2593 struct xdr_stream xdr; 2594 int err; 2595 2596 if (!task->tk_msg.rpc_proc->p_decode) { 2597 task->tk_action = rpc_exit_task; 2598 return; 2599 } 2600 2601 if (task->tk_flags & RPC_CALL_MAJORSEEN) { 2602 if (clnt->cl_chatty) { 2603 pr_notice_ratelimited("%s: server %s OK\n", 2604 clnt->cl_program->name, 2605 task->tk_xprt->servername); 2606 } 2607 task->tk_flags &= ~RPC_CALL_MAJORSEEN; 2608 } 2609 2610 /* 2611 * Did we ever call xprt_complete_rqst()? If not, we should assume 2612 * the message is incomplete. 2613 */ 2614 err = -EAGAIN; 2615 if (!req->rq_reply_bytes_recvd) 2616 goto out; 2617 2618 /* Ensure that we see all writes made by xprt_complete_rqst() 2619 * before it changed req->rq_reply_bytes_recvd. 2620 */ 2621 smp_rmb(); 2622 2623 req->rq_rcv_buf.len = req->rq_private_buf.len; 2624 trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf); 2625 2626 /* Check that the softirq receive buffer is valid */ 2627 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, 2628 sizeof(req->rq_rcv_buf)) != 0); 2629 2630 xdr_init_decode(&xdr, &req->rq_rcv_buf, 2631 req->rq_rcv_buf.head[0].iov_base, req); 2632 err = rpc_decode_header(task, &xdr); 2633 out: 2634 switch (err) { 2635 case 0: 2636 task->tk_action = rpc_exit_task; 2637 task->tk_status = rpcauth_unwrap_resp(task, &xdr); 2638 xdr_finish_decode(&xdr); 2639 return; 2640 case -EAGAIN: 2641 task->tk_status = 0; 2642 if (task->tk_client->cl_discrtry) 2643 xprt_conditional_disconnect(req->rq_xprt, 2644 req->rq_connect_cookie); 2645 task->tk_action = call_encode; 2646 rpc_check_timeout(task); 2647 break; 2648 case -EKEYREJECTED: 2649 task->tk_action = call_reserve; 2650 rpc_check_timeout(task); 2651 rpcauth_invalcred(task); 2652 /* Ensure we obtain a new XID if we retry! */ 2653 xprt_release(task); 2654 } 2655 } 2656 2657 static int 2658 rpc_encode_header(struct rpc_task *task, struct xdr_stream *xdr) 2659 { 2660 struct rpc_clnt *clnt = task->tk_client; 2661 struct rpc_rqst *req = task->tk_rqstp; 2662 __be32 *p; 2663 int error; 2664 2665 error = -EMSGSIZE; 2666 p = xdr_reserve_space(xdr, RPC_CALLHDRSIZE << 2); 2667 if (!p) 2668 goto out_fail; 2669 *p++ = req->rq_xid; 2670 *p++ = rpc_call; 2671 *p++ = cpu_to_be32(RPC_VERSION); 2672 *p++ = cpu_to_be32(clnt->cl_prog); 2673 *p++ = cpu_to_be32(clnt->cl_vers); 2674 *p = cpu_to_be32(task->tk_msg.rpc_proc->p_proc); 2675 2676 error = rpcauth_marshcred(task, xdr); 2677 if (error < 0) 2678 goto out_fail; 2679 return 0; 2680 out_fail: 2681 trace_rpc_bad_callhdr(task); 2682 rpc_call_rpcerror(task, error); 2683 return error; 2684 } 2685 2686 static noinline int 2687 rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr) 2688 { 2689 struct rpc_clnt *clnt = task->tk_client; 2690 int error; 2691 __be32 *p; 2692 2693 /* RFC-1014 says that the representation of XDR data must be a 2694 * multiple of four bytes 2695 * - if it isn't pointer subtraction in the NFS client may give 2696 * undefined results 2697 */ 2698 if (task->tk_rqstp->rq_rcv_buf.len & 3) 2699 goto out_unparsable; 2700 2701 p = xdr_inline_decode(xdr, 3 * sizeof(*p)); 2702 if (!p) 2703 goto out_unparsable; 2704 p++; /* skip XID */ 2705 if (*p++ != rpc_reply) 2706 goto out_unparsable; 2707 if (*p++ != rpc_msg_accepted) 2708 goto out_msg_denied; 2709 2710 error = rpcauth_checkverf(task, xdr); 2711 if (error) { 2712 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 2713 2714 if (!test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) { 2715 rpcauth_invalcred(task); 2716 if (!task->tk_cred_retry) 2717 goto out_err; 2718 task->tk_cred_retry--; 2719 trace_rpc__stale_creds(task); 2720 return -EKEYREJECTED; 2721 } 2722 goto out_verifier; 2723 } 2724 2725 p = xdr_inline_decode(xdr, sizeof(*p)); 2726 if (!p) 2727 goto out_unparsable; 2728 switch (*p) { 2729 case rpc_success: 2730 return 0; 2731 case rpc_prog_unavail: 2732 trace_rpc__prog_unavail(task); 2733 error = -EPFNOSUPPORT; 2734 goto out_err; 2735 case rpc_prog_mismatch: 2736 trace_rpc__prog_mismatch(task); 2737 error = -EPROTONOSUPPORT; 2738 goto out_err; 2739 case rpc_proc_unavail: 2740 trace_rpc__proc_unavail(task); 2741 error = -EOPNOTSUPP; 2742 goto out_err; 2743 case rpc_garbage_args: 2744 case rpc_system_err: 2745 trace_rpc__garbage_args(task); 2746 error = -EIO; 2747 break; 2748 default: 2749 goto out_unparsable; 2750 } 2751 2752 out_garbage: 2753 clnt->cl_stats->rpcgarbage++; 2754 if (task->tk_garb_retry) { 2755 task->tk_garb_retry--; 2756 task->tk_action = call_encode; 2757 return -EAGAIN; 2758 } 2759 out_err: 2760 rpc_call_rpcerror(task, error); 2761 return error; 2762 2763 out_unparsable: 2764 trace_rpc__unparsable(task); 2765 error = -EIO; 2766 goto out_garbage; 2767 2768 out_verifier: 2769 trace_rpc_bad_verifier(task); 2770 switch (error) { 2771 case -EPROTONOSUPPORT: 2772 goto out_err; 2773 case -EACCES: 2774 /* Re-encode with a fresh cred */ 2775 fallthrough; 2776 default: 2777 goto out_garbage; 2778 } 2779 2780 out_msg_denied: 2781 error = -EACCES; 2782 p = xdr_inline_decode(xdr, sizeof(*p)); 2783 if (!p) 2784 goto out_unparsable; 2785 switch (*p++) { 2786 case rpc_auth_error: 2787 break; 2788 case rpc_mismatch: 2789 trace_rpc__mismatch(task); 2790 error = -EPROTONOSUPPORT; 2791 goto out_err; 2792 default: 2793 goto out_unparsable; 2794 } 2795 2796 p = xdr_inline_decode(xdr, sizeof(*p)); 2797 if (!p) 2798 goto out_unparsable; 2799 switch (*p++) { 2800 case rpc_autherr_rejectedcred: 2801 case rpc_autherr_rejectedverf: 2802 case rpcsec_gsserr_credproblem: 2803 case rpcsec_gsserr_ctxproblem: 2804 rpcauth_invalcred(task); 2805 if (!task->tk_cred_retry) 2806 break; 2807 task->tk_cred_retry--; 2808 trace_rpc__stale_creds(task); 2809 return -EKEYREJECTED; 2810 case rpc_autherr_badcred: 2811 case rpc_autherr_badverf: 2812 /* possibly garbled cred/verf? */ 2813 if (!task->tk_garb_retry) 2814 break; 2815 task->tk_garb_retry--; 2816 trace_rpc__bad_creds(task); 2817 task->tk_action = call_encode; 2818 return -EAGAIN; 2819 case rpc_autherr_tooweak: 2820 trace_rpc__auth_tooweak(task); 2821 pr_warn("RPC: server %s requires stronger authentication.\n", 2822 task->tk_xprt->servername); 2823 break; 2824 default: 2825 goto out_unparsable; 2826 } 2827 goto out_err; 2828 } 2829 2830 static void rpcproc_encode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr, 2831 const void *obj) 2832 { 2833 } 2834 2835 static int rpcproc_decode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr, 2836 void *obj) 2837 { 2838 return 0; 2839 } 2840 2841 static const struct rpc_procinfo rpcproc_null = { 2842 .p_encode = rpcproc_encode_null, 2843 .p_decode = rpcproc_decode_null, 2844 }; 2845 2846 static const struct rpc_procinfo rpcproc_null_noreply = { 2847 .p_encode = rpcproc_encode_null, 2848 }; 2849 2850 static void 2851 rpc_null_call_prepare(struct rpc_task *task, void *data) 2852 { 2853 task->tk_flags &= ~RPC_TASK_NO_RETRANS_TIMEOUT; 2854 rpc_call_start(task); 2855 } 2856 2857 static const struct rpc_call_ops rpc_null_ops = { 2858 .rpc_call_prepare = rpc_null_call_prepare, 2859 .rpc_call_done = rpc_default_callback, 2860 }; 2861 2862 static 2863 struct rpc_task *rpc_call_null_helper(struct rpc_clnt *clnt, 2864 struct rpc_xprt *xprt, struct rpc_cred *cred, int flags, 2865 const struct rpc_call_ops *ops, void *data) 2866 { 2867 struct rpc_message msg = { 2868 .rpc_proc = &rpcproc_null, 2869 }; 2870 struct rpc_task_setup task_setup_data = { 2871 .rpc_client = clnt, 2872 .rpc_xprt = xprt, 2873 .rpc_message = &msg, 2874 .rpc_op_cred = cred, 2875 .callback_ops = ops ?: &rpc_null_ops, 2876 .callback_data = data, 2877 .flags = flags | RPC_TASK_SOFT | RPC_TASK_SOFTCONN | 2878 RPC_TASK_NULLCREDS, 2879 }; 2880 2881 return rpc_run_task(&task_setup_data); 2882 } 2883 2884 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags) 2885 { 2886 return rpc_call_null_helper(clnt, NULL, cred, flags, NULL, NULL); 2887 } 2888 EXPORT_SYMBOL_GPL(rpc_call_null); 2889 2890 static int rpc_ping(struct rpc_clnt *clnt) 2891 { 2892 struct rpc_task *task; 2893 int status; 2894 2895 if (clnt->cl_auth->au_ops->ping) 2896 return clnt->cl_auth->au_ops->ping(clnt); 2897 2898 task = rpc_call_null_helper(clnt, NULL, NULL, 0, NULL, NULL); 2899 if (IS_ERR(task)) 2900 return PTR_ERR(task); 2901 status = task->tk_status; 2902 rpc_put_task(task); 2903 return status; 2904 } 2905 2906 static int rpc_ping_noreply(struct rpc_clnt *clnt) 2907 { 2908 struct rpc_message msg = { 2909 .rpc_proc = &rpcproc_null_noreply, 2910 }; 2911 struct rpc_task_setup task_setup_data = { 2912 .rpc_client = clnt, 2913 .rpc_message = &msg, 2914 .callback_ops = &rpc_null_ops, 2915 .flags = RPC_TASK_SOFT | RPC_TASK_SOFTCONN | RPC_TASK_NULLCREDS, 2916 }; 2917 struct rpc_task *task; 2918 int status; 2919 2920 task = rpc_run_task(&task_setup_data); 2921 if (IS_ERR(task)) 2922 return PTR_ERR(task); 2923 status = task->tk_status; 2924 rpc_put_task(task); 2925 return status; 2926 } 2927 2928 struct rpc_cb_add_xprt_calldata { 2929 struct rpc_xprt_switch *xps; 2930 struct rpc_xprt *xprt; 2931 }; 2932 2933 static void rpc_cb_add_xprt_done(struct rpc_task *task, void *calldata) 2934 { 2935 struct rpc_cb_add_xprt_calldata *data = calldata; 2936 2937 if (task->tk_status == 0) 2938 rpc_xprt_switch_add_xprt(data->xps, data->xprt); 2939 } 2940 2941 static void rpc_cb_add_xprt_release(void *calldata) 2942 { 2943 struct rpc_cb_add_xprt_calldata *data = calldata; 2944 2945 xprt_put(data->xprt); 2946 xprt_switch_put(data->xps); 2947 kfree(data); 2948 } 2949 2950 static const struct rpc_call_ops rpc_cb_add_xprt_call_ops = { 2951 .rpc_call_prepare = rpc_null_call_prepare, 2952 .rpc_call_done = rpc_cb_add_xprt_done, 2953 .rpc_release = rpc_cb_add_xprt_release, 2954 }; 2955 2956 /** 2957 * rpc_clnt_test_and_add_xprt - Test and add a new transport to a rpc_clnt 2958 * @clnt: pointer to struct rpc_clnt 2959 * @xps: pointer to struct rpc_xprt_switch, 2960 * @xprt: pointer struct rpc_xprt 2961 * @in_max_connect: pointer to the max_connect value for the passed in xprt transport 2962 */ 2963 int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt, 2964 struct rpc_xprt_switch *xps, struct rpc_xprt *xprt, 2965 void *in_max_connect) 2966 { 2967 struct rpc_cb_add_xprt_calldata *data; 2968 struct rpc_task *task; 2969 int max_connect = clnt->cl_max_connect; 2970 2971 if (in_max_connect) 2972 max_connect = *(int *)in_max_connect; 2973 if (xps->xps_nunique_destaddr_xprts + 1 > max_connect) { 2974 rcu_read_lock(); 2975 pr_warn("SUNRPC: reached max allowed number (%d) did not add " 2976 "transport to server: %s\n", max_connect, 2977 rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR)); 2978 rcu_read_unlock(); 2979 return -EINVAL; 2980 } 2981 2982 data = kmalloc(sizeof(*data), GFP_KERNEL); 2983 if (!data) 2984 return -ENOMEM; 2985 data->xps = xprt_switch_get(xps); 2986 data->xprt = xprt_get(xprt); 2987 if (rpc_xprt_switch_has_addr(data->xps, (struct sockaddr *)&xprt->addr)) { 2988 rpc_cb_add_xprt_release(data); 2989 goto success; 2990 } 2991 2992 task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC, 2993 &rpc_cb_add_xprt_call_ops, data); 2994 if (IS_ERR(task)) 2995 return PTR_ERR(task); 2996 2997 data->xps->xps_nunique_destaddr_xprts++; 2998 rpc_put_task(task); 2999 success: 3000 return 1; 3001 } 3002 EXPORT_SYMBOL_GPL(rpc_clnt_test_and_add_xprt); 3003 3004 static int rpc_clnt_add_xprt_helper(struct rpc_clnt *clnt, 3005 struct rpc_xprt *xprt, 3006 struct rpc_add_xprt_test *data) 3007 { 3008 struct rpc_task *task; 3009 int status = -EADDRINUSE; 3010 3011 /* Test the connection */ 3012 task = rpc_call_null_helper(clnt, xprt, NULL, 0, NULL, NULL); 3013 if (IS_ERR(task)) 3014 return PTR_ERR(task); 3015 3016 status = task->tk_status; 3017 rpc_put_task(task); 3018 3019 if (status < 0) 3020 return status; 3021 3022 /* rpc_xprt_switch and rpc_xprt are deferrenced by add_xprt_test() */ 3023 data->add_xprt_test(clnt, xprt, data->data); 3024 3025 return 0; 3026 } 3027 3028 /** 3029 * rpc_clnt_setup_test_and_add_xprt() 3030 * 3031 * This is an rpc_clnt_add_xprt setup() function which returns 1 so: 3032 * 1) caller of the test function must dereference the rpc_xprt_switch 3033 * and the rpc_xprt. 3034 * 2) test function must call rpc_xprt_switch_add_xprt, usually in 3035 * the rpc_call_done routine. 3036 * 3037 * Upon success (return of 1), the test function adds the new 3038 * transport to the rpc_clnt xprt switch 3039 * 3040 * @clnt: struct rpc_clnt to get the new transport 3041 * @xps: the rpc_xprt_switch to hold the new transport 3042 * @xprt: the rpc_xprt to test 3043 * @data: a struct rpc_add_xprt_test pointer that holds the test function 3044 * and test function call data 3045 */ 3046 int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt, 3047 struct rpc_xprt_switch *xps, 3048 struct rpc_xprt *xprt, 3049 void *data) 3050 { 3051 int status = -EADDRINUSE; 3052 3053 xprt = xprt_get(xprt); 3054 xprt_switch_get(xps); 3055 3056 if (rpc_xprt_switch_has_addr(xps, (struct sockaddr *)&xprt->addr)) 3057 goto out_err; 3058 3059 status = rpc_clnt_add_xprt_helper(clnt, xprt, data); 3060 if (status < 0) 3061 goto out_err; 3062 3063 status = 1; 3064 out_err: 3065 xprt_put(xprt); 3066 xprt_switch_put(xps); 3067 if (status < 0) 3068 pr_info("RPC: rpc_clnt_test_xprt failed: %d addr %s not " 3069 "added\n", status, 3070 xprt->address_strings[RPC_DISPLAY_ADDR]); 3071 /* so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt */ 3072 return status; 3073 } 3074 EXPORT_SYMBOL_GPL(rpc_clnt_setup_test_and_add_xprt); 3075 3076 /** 3077 * rpc_clnt_add_xprt - Add a new transport to a rpc_clnt 3078 * @clnt: pointer to struct rpc_clnt 3079 * @xprtargs: pointer to struct xprt_create 3080 * @setup: callback to test and/or set up the connection 3081 * @data: pointer to setup function data 3082 * 3083 * Creates a new transport using the parameters set in args and 3084 * adds it to clnt. 3085 * If ping is set, then test that connectivity succeeds before 3086 * adding the new transport. 3087 * 3088 */ 3089 int rpc_clnt_add_xprt(struct rpc_clnt *clnt, 3090 struct xprt_create *xprtargs, 3091 int (*setup)(struct rpc_clnt *, 3092 struct rpc_xprt_switch *, 3093 struct rpc_xprt *, 3094 void *), 3095 void *data) 3096 { 3097 struct rpc_xprt_switch *xps; 3098 struct rpc_xprt *xprt; 3099 unsigned long connect_timeout; 3100 unsigned long reconnect_timeout; 3101 unsigned char resvport, reuseport; 3102 int ret = 0, ident; 3103 3104 rcu_read_lock(); 3105 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 3106 xprt = xprt_iter_xprt(&clnt->cl_xpi); 3107 if (xps == NULL || xprt == NULL) { 3108 rcu_read_unlock(); 3109 xprt_switch_put(xps); 3110 return -EAGAIN; 3111 } 3112 resvport = xprt->resvport; 3113 reuseport = xprt->reuseport; 3114 connect_timeout = xprt->connect_timeout; 3115 reconnect_timeout = xprt->max_reconnect_timeout; 3116 ident = xprt->xprt_class->ident; 3117 rcu_read_unlock(); 3118 3119 if (!xprtargs->ident) 3120 xprtargs->ident = ident; 3121 xprtargs->xprtsec = clnt->cl_xprtsec; 3122 xprt = xprt_create_transport(xprtargs); 3123 if (IS_ERR(xprt)) { 3124 ret = PTR_ERR(xprt); 3125 goto out_put_switch; 3126 } 3127 xprt->resvport = resvport; 3128 xprt->reuseport = reuseport; 3129 3130 if (xprtargs->connect_timeout) 3131 connect_timeout = xprtargs->connect_timeout; 3132 if (xprtargs->reconnect_timeout) 3133 reconnect_timeout = xprtargs->reconnect_timeout; 3134 if (xprt->ops->set_connect_timeout != NULL) 3135 xprt->ops->set_connect_timeout(xprt, 3136 connect_timeout, 3137 reconnect_timeout); 3138 3139 rpc_xprt_switch_set_roundrobin(xps); 3140 if (setup) { 3141 ret = setup(clnt, xps, xprt, data); 3142 if (ret != 0) 3143 goto out_put_xprt; 3144 } 3145 rpc_xprt_switch_add_xprt(xps, xprt); 3146 out_put_xprt: 3147 xprt_put(xprt); 3148 out_put_switch: 3149 xprt_switch_put(xps); 3150 return ret; 3151 } 3152 EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt); 3153 3154 static int rpc_xprt_probe_trunked(struct rpc_clnt *clnt, 3155 struct rpc_xprt *xprt, 3156 struct rpc_add_xprt_test *data) 3157 { 3158 struct rpc_xprt *main_xprt; 3159 int status = 0; 3160 3161 xprt_get(xprt); 3162 3163 rcu_read_lock(); 3164 main_xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); 3165 status = rpc_cmp_addr_port((struct sockaddr *)&xprt->addr, 3166 (struct sockaddr *)&main_xprt->addr); 3167 rcu_read_unlock(); 3168 xprt_put(main_xprt); 3169 if (status || !test_bit(XPRT_OFFLINE, &xprt->state)) 3170 goto out; 3171 3172 status = rpc_clnt_add_xprt_helper(clnt, xprt, data); 3173 out: 3174 xprt_put(xprt); 3175 return status; 3176 } 3177 3178 /* rpc_clnt_probe_trunked_xprt -- probe offlined transport for session trunking 3179 * @clnt rpc_clnt structure 3180 * 3181 * For each offlined transport found in the rpc_clnt structure call 3182 * the function rpc_xprt_probe_trunked() which will determine if this 3183 * transport still belongs to the trunking group. 3184 */ 3185 void rpc_clnt_probe_trunked_xprts(struct rpc_clnt *clnt, 3186 struct rpc_add_xprt_test *data) 3187 { 3188 struct rpc_xprt_iter xpi; 3189 int ret; 3190 3191 ret = rpc_clnt_xprt_iter_offline_init(clnt, &xpi); 3192 if (ret) 3193 return; 3194 for (;;) { 3195 struct rpc_xprt *xprt = xprt_iter_get_next(&xpi); 3196 3197 if (!xprt) 3198 break; 3199 ret = rpc_xprt_probe_trunked(clnt, xprt, data); 3200 xprt_put(xprt); 3201 if (ret < 0) 3202 break; 3203 xprt_iter_rewind(&xpi); 3204 } 3205 xprt_iter_destroy(&xpi); 3206 } 3207 EXPORT_SYMBOL_GPL(rpc_clnt_probe_trunked_xprts); 3208 3209 static int rpc_xprt_offline(struct rpc_clnt *clnt, 3210 struct rpc_xprt *xprt, 3211 void *data) 3212 { 3213 struct rpc_xprt *main_xprt; 3214 struct rpc_xprt_switch *xps; 3215 int err = 0; 3216 3217 xprt_get(xprt); 3218 3219 rcu_read_lock(); 3220 main_xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); 3221 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 3222 err = rpc_cmp_addr_port((struct sockaddr *)&xprt->addr, 3223 (struct sockaddr *)&main_xprt->addr); 3224 rcu_read_unlock(); 3225 xprt_put(main_xprt); 3226 if (err) 3227 goto out; 3228 3229 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) { 3230 err = -EINTR; 3231 goto out; 3232 } 3233 xprt_set_offline_locked(xprt, xps); 3234 3235 xprt_release_write(xprt, NULL); 3236 out: 3237 xprt_put(xprt); 3238 xprt_switch_put(xps); 3239 return err; 3240 } 3241 3242 /* rpc_clnt_manage_trunked_xprts -- offline trunked transports 3243 * @clnt rpc_clnt structure 3244 * 3245 * For each active transport found in the rpc_clnt structure call 3246 * the function rpc_xprt_offline() which will identify trunked transports 3247 * and will mark them offline. 3248 */ 3249 void rpc_clnt_manage_trunked_xprts(struct rpc_clnt *clnt) 3250 { 3251 rpc_clnt_iterate_for_each_xprt(clnt, rpc_xprt_offline, NULL); 3252 } 3253 EXPORT_SYMBOL_GPL(rpc_clnt_manage_trunked_xprts); 3254 3255 struct connect_timeout_data { 3256 unsigned long connect_timeout; 3257 unsigned long reconnect_timeout; 3258 }; 3259 3260 static int 3261 rpc_xprt_set_connect_timeout(struct rpc_clnt *clnt, 3262 struct rpc_xprt *xprt, 3263 void *data) 3264 { 3265 struct connect_timeout_data *timeo = data; 3266 3267 if (xprt->ops->set_connect_timeout) 3268 xprt->ops->set_connect_timeout(xprt, 3269 timeo->connect_timeout, 3270 timeo->reconnect_timeout); 3271 return 0; 3272 } 3273 3274 void 3275 rpc_set_connect_timeout(struct rpc_clnt *clnt, 3276 unsigned long connect_timeout, 3277 unsigned long reconnect_timeout) 3278 { 3279 struct connect_timeout_data timeout = { 3280 .connect_timeout = connect_timeout, 3281 .reconnect_timeout = reconnect_timeout, 3282 }; 3283 rpc_clnt_iterate_for_each_xprt(clnt, 3284 rpc_xprt_set_connect_timeout, 3285 &timeout); 3286 } 3287 EXPORT_SYMBOL_GPL(rpc_set_connect_timeout); 3288 3289 void rpc_clnt_xprt_set_online(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 3290 { 3291 struct rpc_xprt_switch *xps; 3292 3293 xps = rpc_clnt_xprt_switch_get(clnt); 3294 xprt_set_online_locked(xprt, xps); 3295 xprt_switch_put(xps); 3296 } 3297 3298 void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 3299 { 3300 struct rpc_xprt_switch *xps; 3301 3302 if (rpc_clnt_xprt_switch_has_addr(clnt, 3303 (const struct sockaddr *)&xprt->addr)) { 3304 return rpc_clnt_xprt_set_online(clnt, xprt); 3305 } 3306 3307 xps = rpc_clnt_xprt_switch_get(clnt); 3308 rpc_xprt_switch_add_xprt(xps, xprt); 3309 xprt_switch_put(xps); 3310 } 3311 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt); 3312 3313 void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 3314 { 3315 struct rpc_xprt_switch *xps; 3316 3317 rcu_read_lock(); 3318 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 3319 rpc_xprt_switch_remove_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch), 3320 xprt, 0); 3321 xps->xps_nunique_destaddr_xprts--; 3322 rcu_read_unlock(); 3323 } 3324 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_remove_xprt); 3325 3326 bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, 3327 const struct sockaddr *sap) 3328 { 3329 struct rpc_xprt_switch *xps; 3330 bool ret; 3331 3332 rcu_read_lock(); 3333 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 3334 ret = rpc_xprt_switch_has_addr(xps, sap); 3335 rcu_read_unlock(); 3336 return ret; 3337 } 3338 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr); 3339 3340 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 3341 static void rpc_show_header(struct rpc_clnt *clnt) 3342 { 3343 printk(KERN_INFO "clnt[%pISpc] RPC tasks[%d]\n", 3344 (struct sockaddr *)&clnt->cl_xprt->addr, 3345 atomic_read(&clnt->cl_task_count)); 3346 printk(KERN_INFO "-pid- flgs status -client- --rqstp- " 3347 "-timeout ---ops--\n"); 3348 } 3349 3350 static void rpc_show_task(const struct rpc_clnt *clnt, 3351 const struct rpc_task *task) 3352 { 3353 const char *rpc_waitq = "none"; 3354 3355 if (RPC_IS_QUEUED(task)) 3356 rpc_waitq = rpc_qname(task->tk_waitqueue); 3357 3358 printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n", 3359 task->tk_pid, task->tk_flags, task->tk_status, 3360 clnt, task->tk_rqstp, rpc_task_timeout(task), task->tk_ops, 3361 clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task), 3362 task->tk_action, rpc_waitq); 3363 } 3364 3365 void rpc_show_tasks(struct net *net) 3366 { 3367 struct rpc_clnt *clnt; 3368 struct rpc_task *task; 3369 int header = 0; 3370 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 3371 3372 spin_lock(&sn->rpc_client_lock); 3373 list_for_each_entry(clnt, &sn->all_clients, cl_clients) { 3374 spin_lock(&clnt->cl_lock); 3375 list_for_each_entry(task, &clnt->cl_tasks, tk_task) { 3376 if (!header) { 3377 rpc_show_header(clnt); 3378 header++; 3379 } 3380 rpc_show_task(clnt, task); 3381 } 3382 spin_unlock(&clnt->cl_lock); 3383 } 3384 spin_unlock(&sn->rpc_client_lock); 3385 } 3386 #endif 3387 3388 #if IS_ENABLED(CONFIG_SUNRPC_SWAP) 3389 static int 3390 rpc_clnt_swap_activate_callback(struct rpc_clnt *clnt, 3391 struct rpc_xprt *xprt, 3392 void *dummy) 3393 { 3394 return xprt_enable_swap(xprt); 3395 } 3396 3397 int 3398 rpc_clnt_swap_activate(struct rpc_clnt *clnt) 3399 { 3400 while (clnt != clnt->cl_parent) 3401 clnt = clnt->cl_parent; 3402 if (atomic_inc_return(&clnt->cl_swapper) == 1) 3403 return rpc_clnt_iterate_for_each_xprt(clnt, 3404 rpc_clnt_swap_activate_callback, NULL); 3405 return 0; 3406 } 3407 EXPORT_SYMBOL_GPL(rpc_clnt_swap_activate); 3408 3409 static int 3410 rpc_clnt_swap_deactivate_callback(struct rpc_clnt *clnt, 3411 struct rpc_xprt *xprt, 3412 void *dummy) 3413 { 3414 xprt_disable_swap(xprt); 3415 return 0; 3416 } 3417 3418 void 3419 rpc_clnt_swap_deactivate(struct rpc_clnt *clnt) 3420 { 3421 while (clnt != clnt->cl_parent) 3422 clnt = clnt->cl_parent; 3423 if (atomic_dec_if_positive(&clnt->cl_swapper) == 0) 3424 rpc_clnt_iterate_for_each_xprt(clnt, 3425 rpc_clnt_swap_deactivate_callback, NULL); 3426 } 3427 EXPORT_SYMBOL_GPL(rpc_clnt_swap_deactivate); 3428 #endif /* CONFIG_SUNRPC_SWAP */ 3429