1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/clnt.c 4 * 5 * This file contains the high-level RPC interface. 6 * It is modeled as a finite state machine to support both synchronous 7 * and asynchronous requests. 8 * 9 * - RPC header generation and argument serialization. 10 * - Credential refresh. 11 * - TCP connect handling. 12 * - Retry of operation when it is suspected the operation failed because 13 * of uid squashing on the server, or when the credentials were stale 14 * and need to be refreshed, or when a packet was damaged in transit. 15 * This may be have to be moved to the VFS layer. 16 * 17 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> 18 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> 19 */ 20 21 22 #include <linux/module.h> 23 #include <linux/types.h> 24 #include <linux/kallsyms.h> 25 #include <linux/mm.h> 26 #include <linux/namei.h> 27 #include <linux/mount.h> 28 #include <linux/slab.h> 29 #include <linux/rcupdate.h> 30 #include <linux/utsname.h> 31 #include <linux/workqueue.h> 32 #include <linux/in.h> 33 #include <linux/in6.h> 34 #include <linux/un.h> 35 36 #include <linux/sunrpc/clnt.h> 37 #include <linux/sunrpc/addr.h> 38 #include <linux/sunrpc/rpc_pipe_fs.h> 39 #include <linux/sunrpc/metrics.h> 40 #include <linux/sunrpc/bc_xprt.h> 41 #include <trace/events/sunrpc.h> 42 43 #include "sunrpc.h" 44 #include "sysfs.h" 45 #include "netns.h" 46 47 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 48 # define RPCDBG_FACILITY RPCDBG_CALL 49 #endif 50 51 /* 52 * All RPC clients are linked into this list 53 */ 54 55 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); 56 57 58 static void call_start(struct rpc_task *task); 59 static void call_reserve(struct rpc_task *task); 60 static void call_reserveresult(struct rpc_task *task); 61 static void call_allocate(struct rpc_task *task); 62 static void call_encode(struct rpc_task *task); 63 static void call_decode(struct rpc_task *task); 64 static void call_bind(struct rpc_task *task); 65 static void call_bind_status(struct rpc_task *task); 66 static void call_transmit(struct rpc_task *task); 67 static void call_status(struct rpc_task *task); 68 static void call_transmit_status(struct rpc_task *task); 69 static void call_refresh(struct rpc_task *task); 70 static void call_refreshresult(struct rpc_task *task); 71 static void call_connect(struct rpc_task *task); 72 static void call_connect_status(struct rpc_task *task); 73 74 static int rpc_encode_header(struct rpc_task *task, 75 struct xdr_stream *xdr); 76 static int rpc_decode_header(struct rpc_task *task, 77 struct xdr_stream *xdr); 78 static int rpc_ping(struct rpc_clnt *clnt); 79 static int rpc_ping_noreply(struct rpc_clnt *clnt); 80 static void rpc_check_timeout(struct rpc_task *task); 81 82 static void rpc_register_client(struct rpc_clnt *clnt) 83 { 84 struct net *net = rpc_net_ns(clnt); 85 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 86 87 spin_lock(&sn->rpc_client_lock); 88 list_add(&clnt->cl_clients, &sn->all_clients); 89 spin_unlock(&sn->rpc_client_lock); 90 } 91 92 static void rpc_unregister_client(struct rpc_clnt *clnt) 93 { 94 struct net *net = rpc_net_ns(clnt); 95 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 96 97 spin_lock(&sn->rpc_client_lock); 98 list_del(&clnt->cl_clients); 99 spin_unlock(&sn->rpc_client_lock); 100 } 101 102 static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) 103 { 104 rpc_remove_client_dir(clnt); 105 } 106 107 static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) 108 { 109 struct net *net = rpc_net_ns(clnt); 110 struct super_block *pipefs_sb; 111 112 pipefs_sb = rpc_get_sb_net(net); 113 if (pipefs_sb) { 114 if (pipefs_sb == clnt->pipefs_sb) 115 __rpc_clnt_remove_pipedir(clnt); 116 rpc_put_sb_net(net); 117 } 118 } 119 120 static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb, 121 struct rpc_clnt *clnt) 122 { 123 static uint32_t clntid; 124 const char *dir_name = clnt->cl_program->pipe_dir_name; 125 char name[15]; 126 struct dentry *dir, *dentry; 127 128 dir = rpc_d_lookup_sb(sb, dir_name); 129 if (dir == NULL) { 130 pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name); 131 return dir; 132 } 133 for (;;) { 134 snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++); 135 name[sizeof(name) - 1] = '\0'; 136 dentry = rpc_create_client_dir(dir, name, clnt); 137 if (!IS_ERR(dentry)) 138 break; 139 if (dentry == ERR_PTR(-EEXIST)) 140 continue; 141 printk(KERN_INFO "RPC: Couldn't create pipefs entry" 142 " %s/%s, error %ld\n", 143 dir_name, name, PTR_ERR(dentry)); 144 break; 145 } 146 dput(dir); 147 return dentry; 148 } 149 150 static int 151 rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt) 152 { 153 struct dentry *dentry; 154 155 clnt->pipefs_sb = pipefs_sb; 156 157 if (clnt->cl_program->pipe_dir_name != NULL) { 158 dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt); 159 if (IS_ERR(dentry)) 160 return PTR_ERR(dentry); 161 } 162 return 0; 163 } 164 165 static int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event) 166 { 167 if (clnt->cl_program->pipe_dir_name == NULL) 168 return 1; 169 170 switch (event) { 171 case RPC_PIPEFS_MOUNT: 172 if (clnt->cl_pipedir_objects.pdh_dentry != NULL) 173 return 1; 174 if (refcount_read(&clnt->cl_count) == 0) 175 return 1; 176 break; 177 case RPC_PIPEFS_UMOUNT: 178 if (clnt->cl_pipedir_objects.pdh_dentry == NULL) 179 return 1; 180 break; 181 } 182 return 0; 183 } 184 185 static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event, 186 struct super_block *sb) 187 { 188 struct dentry *dentry; 189 190 switch (event) { 191 case RPC_PIPEFS_MOUNT: 192 dentry = rpc_setup_pipedir_sb(sb, clnt); 193 if (!dentry) 194 return -ENOENT; 195 if (IS_ERR(dentry)) 196 return PTR_ERR(dentry); 197 break; 198 case RPC_PIPEFS_UMOUNT: 199 __rpc_clnt_remove_pipedir(clnt); 200 break; 201 default: 202 printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event); 203 return -ENOTSUPP; 204 } 205 return 0; 206 } 207 208 static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, 209 struct super_block *sb) 210 { 211 int error = 0; 212 213 for (;; clnt = clnt->cl_parent) { 214 if (!rpc_clnt_skip_event(clnt, event)) 215 error = __rpc_clnt_handle_event(clnt, event, sb); 216 if (error || clnt == clnt->cl_parent) 217 break; 218 } 219 return error; 220 } 221 222 static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event) 223 { 224 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 225 struct rpc_clnt *clnt; 226 227 spin_lock(&sn->rpc_client_lock); 228 list_for_each_entry(clnt, &sn->all_clients, cl_clients) { 229 if (rpc_clnt_skip_event(clnt, event)) 230 continue; 231 spin_unlock(&sn->rpc_client_lock); 232 return clnt; 233 } 234 spin_unlock(&sn->rpc_client_lock); 235 return NULL; 236 } 237 238 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, 239 void *ptr) 240 { 241 struct super_block *sb = ptr; 242 struct rpc_clnt *clnt; 243 int error = 0; 244 245 while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) { 246 error = __rpc_pipefs_event(clnt, event, sb); 247 if (error) 248 break; 249 } 250 return error; 251 } 252 253 static struct notifier_block rpc_clients_block = { 254 .notifier_call = rpc_pipefs_event, 255 .priority = SUNRPC_PIPEFS_RPC_PRIO, 256 }; 257 258 int rpc_clients_notifier_register(void) 259 { 260 return rpc_pipefs_notifier_register(&rpc_clients_block); 261 } 262 263 void rpc_clients_notifier_unregister(void) 264 { 265 return rpc_pipefs_notifier_unregister(&rpc_clients_block); 266 } 267 268 static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt, 269 struct rpc_xprt *xprt, 270 const struct rpc_timeout *timeout) 271 { 272 struct rpc_xprt *old; 273 274 spin_lock(&clnt->cl_lock); 275 old = rcu_dereference_protected(clnt->cl_xprt, 276 lockdep_is_held(&clnt->cl_lock)); 277 278 if (!xprt_bound(xprt)) 279 clnt->cl_autobind = 1; 280 281 clnt->cl_timeout = timeout; 282 rcu_assign_pointer(clnt->cl_xprt, xprt); 283 spin_unlock(&clnt->cl_lock); 284 285 return old; 286 } 287 288 static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename) 289 { 290 ssize_t copied; 291 292 copied = strscpy(clnt->cl_nodename, 293 nodename, sizeof(clnt->cl_nodename)); 294 295 clnt->cl_nodelen = copied < 0 296 ? sizeof(clnt->cl_nodename) - 1 297 : copied; 298 } 299 300 static int rpc_client_register(struct rpc_clnt *clnt, 301 rpc_authflavor_t pseudoflavor, 302 const char *client_name) 303 { 304 struct rpc_auth_create_args auth_args = { 305 .pseudoflavor = pseudoflavor, 306 .target_name = client_name, 307 }; 308 struct rpc_auth *auth; 309 struct net *net = rpc_net_ns(clnt); 310 struct super_block *pipefs_sb; 311 int err; 312 313 rpc_clnt_debugfs_register(clnt); 314 315 pipefs_sb = rpc_get_sb_net(net); 316 if (pipefs_sb) { 317 err = rpc_setup_pipedir(pipefs_sb, clnt); 318 if (err) 319 goto out; 320 } 321 322 rpc_register_client(clnt); 323 if (pipefs_sb) 324 rpc_put_sb_net(net); 325 326 auth = rpcauth_create(&auth_args, clnt); 327 if (IS_ERR(auth)) { 328 dprintk("RPC: Couldn't create auth handle (flavor %u)\n", 329 pseudoflavor); 330 err = PTR_ERR(auth); 331 goto err_auth; 332 } 333 return 0; 334 err_auth: 335 pipefs_sb = rpc_get_sb_net(net); 336 rpc_unregister_client(clnt); 337 __rpc_clnt_remove_pipedir(clnt); 338 out: 339 if (pipefs_sb) 340 rpc_put_sb_net(net); 341 rpc_sysfs_client_destroy(clnt); 342 rpc_clnt_debugfs_unregister(clnt); 343 return err; 344 } 345 346 static DEFINE_IDA(rpc_clids); 347 348 void rpc_cleanup_clids(void) 349 { 350 ida_destroy(&rpc_clids); 351 } 352 353 static int rpc_alloc_clid(struct rpc_clnt *clnt) 354 { 355 int clid; 356 357 clid = ida_alloc(&rpc_clids, GFP_KERNEL); 358 if (clid < 0) 359 return clid; 360 clnt->cl_clid = clid; 361 return 0; 362 } 363 364 static void rpc_free_clid(struct rpc_clnt *clnt) 365 { 366 ida_free(&rpc_clids, clnt->cl_clid); 367 } 368 369 static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, 370 struct rpc_xprt_switch *xps, 371 struct rpc_xprt *xprt, 372 struct rpc_clnt *parent) 373 { 374 const struct rpc_program *program = args->program; 375 const struct rpc_version *version; 376 struct rpc_clnt *clnt = NULL; 377 const struct rpc_timeout *timeout; 378 const char *nodename = args->nodename; 379 int err; 380 381 err = rpciod_up(); 382 if (err) 383 goto out_no_rpciod; 384 385 err = -EINVAL; 386 if (args->version >= program->nrvers) 387 goto out_err; 388 version = program->version[args->version]; 389 if (version == NULL) 390 goto out_err; 391 392 err = -ENOMEM; 393 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); 394 if (!clnt) 395 goto out_err; 396 clnt->cl_parent = parent ? : clnt; 397 clnt->cl_xprtsec = args->xprtsec; 398 399 err = rpc_alloc_clid(clnt); 400 if (err) 401 goto out_no_clid; 402 403 clnt->cl_cred = get_cred(args->cred); 404 clnt->cl_procinfo = version->procs; 405 clnt->cl_maxproc = version->nrprocs; 406 clnt->cl_prog = args->prognumber ? : program->number; 407 clnt->cl_vers = version->number; 408 clnt->cl_stats = program->stats; 409 clnt->cl_metrics = rpc_alloc_iostats(clnt); 410 rpc_init_pipe_dir_head(&clnt->cl_pipedir_objects); 411 err = -ENOMEM; 412 if (clnt->cl_metrics == NULL) 413 goto out_no_stats; 414 clnt->cl_program = program; 415 INIT_LIST_HEAD(&clnt->cl_tasks); 416 spin_lock_init(&clnt->cl_lock); 417 418 timeout = xprt->timeout; 419 if (args->timeout != NULL) { 420 memcpy(&clnt->cl_timeout_default, args->timeout, 421 sizeof(clnt->cl_timeout_default)); 422 timeout = &clnt->cl_timeout_default; 423 } 424 425 rpc_clnt_set_transport(clnt, xprt, timeout); 426 xprt->main = true; 427 xprt_iter_init(&clnt->cl_xpi, xps); 428 xprt_switch_put(xps); 429 430 clnt->cl_rtt = &clnt->cl_rtt_default; 431 rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval); 432 433 refcount_set(&clnt->cl_count, 1); 434 435 if (nodename == NULL) 436 nodename = utsname()->nodename; 437 /* save the nodename */ 438 rpc_clnt_set_nodename(clnt, nodename); 439 440 rpc_sysfs_client_setup(clnt, xps, rpc_net_ns(clnt)); 441 err = rpc_client_register(clnt, args->authflavor, args->client_name); 442 if (err) 443 goto out_no_path; 444 if (parent) 445 refcount_inc(&parent->cl_count); 446 447 trace_rpc_clnt_new(clnt, xprt, args); 448 return clnt; 449 450 out_no_path: 451 rpc_free_iostats(clnt->cl_metrics); 452 out_no_stats: 453 put_cred(clnt->cl_cred); 454 rpc_free_clid(clnt); 455 out_no_clid: 456 kfree(clnt); 457 out_err: 458 rpciod_down(); 459 out_no_rpciod: 460 xprt_switch_put(xps); 461 xprt_put(xprt); 462 trace_rpc_clnt_new_err(program->name, args->servername, err); 463 return ERR_PTR(err); 464 } 465 466 static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, 467 struct rpc_xprt *xprt) 468 { 469 struct rpc_clnt *clnt = NULL; 470 struct rpc_xprt_switch *xps; 471 472 if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) { 473 WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC)); 474 xps = args->bc_xprt->xpt_bc_xps; 475 xprt_switch_get(xps); 476 } else { 477 xps = xprt_switch_alloc(xprt, GFP_KERNEL); 478 if (xps == NULL) { 479 xprt_put(xprt); 480 return ERR_PTR(-ENOMEM); 481 } 482 if (xprt->bc_xprt) { 483 xprt_switch_get(xps); 484 xprt->bc_xprt->xpt_bc_xps = xps; 485 } 486 } 487 clnt = rpc_new_client(args, xps, xprt, NULL); 488 if (IS_ERR(clnt)) 489 return clnt; 490 491 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { 492 int err = rpc_ping(clnt); 493 if (err != 0) { 494 rpc_shutdown_client(clnt); 495 return ERR_PTR(err); 496 } 497 } else if (args->flags & RPC_CLNT_CREATE_CONNECTED) { 498 int err = rpc_ping_noreply(clnt); 499 if (err != 0) { 500 rpc_shutdown_client(clnt); 501 return ERR_PTR(err); 502 } 503 } 504 505 clnt->cl_softrtry = 1; 506 if (args->flags & (RPC_CLNT_CREATE_HARDRTRY|RPC_CLNT_CREATE_SOFTERR)) { 507 clnt->cl_softrtry = 0; 508 if (args->flags & RPC_CLNT_CREATE_SOFTERR) 509 clnt->cl_softerr = 1; 510 } 511 512 if (args->flags & RPC_CLNT_CREATE_AUTOBIND) 513 clnt->cl_autobind = 1; 514 if (args->flags & RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT) 515 clnt->cl_noretranstimeo = 1; 516 if (args->flags & RPC_CLNT_CREATE_DISCRTRY) 517 clnt->cl_discrtry = 1; 518 if (!(args->flags & RPC_CLNT_CREATE_QUIET)) 519 clnt->cl_chatty = 1; 520 521 return clnt; 522 } 523 524 /** 525 * rpc_create - create an RPC client and transport with one call 526 * @args: rpc_clnt create argument structure 527 * 528 * Creates and initializes an RPC transport and an RPC client. 529 * 530 * It can ping the server in order to determine if it is up, and to see if 531 * it supports this program and version. RPC_CLNT_CREATE_NOPING disables 532 * this behavior so asynchronous tasks can also use rpc_create. 533 */ 534 struct rpc_clnt *rpc_create(struct rpc_create_args *args) 535 { 536 struct rpc_xprt *xprt; 537 struct xprt_create xprtargs = { 538 .net = args->net, 539 .ident = args->protocol, 540 .srcaddr = args->saddress, 541 .dstaddr = args->address, 542 .addrlen = args->addrsize, 543 .servername = args->servername, 544 .bc_xprt = args->bc_xprt, 545 .xprtsec = args->xprtsec, 546 .connect_timeout = args->connect_timeout, 547 .reconnect_timeout = args->reconnect_timeout, 548 }; 549 char servername[48]; 550 struct rpc_clnt *clnt; 551 int i; 552 553 if (args->bc_xprt) { 554 WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC)); 555 xprt = args->bc_xprt->xpt_bc_xprt; 556 if (xprt) { 557 xprt_get(xprt); 558 return rpc_create_xprt(args, xprt); 559 } 560 } 561 562 if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS) 563 xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS; 564 if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT) 565 xprtargs.flags |= XPRT_CREATE_NO_IDLE_TIMEOUT; 566 /* 567 * If the caller chooses not to specify a hostname, whip 568 * up a string representation of the passed-in address. 569 */ 570 if (xprtargs.servername == NULL) { 571 struct sockaddr_un *sun = 572 (struct sockaddr_un *)args->address; 573 struct sockaddr_in *sin = 574 (struct sockaddr_in *)args->address; 575 struct sockaddr_in6 *sin6 = 576 (struct sockaddr_in6 *)args->address; 577 578 servername[0] = '\0'; 579 switch (args->address->sa_family) { 580 case AF_LOCAL: 581 if (sun->sun_path[0]) 582 snprintf(servername, sizeof(servername), "%s", 583 sun->sun_path); 584 else 585 snprintf(servername, sizeof(servername), "@%s", 586 sun->sun_path+1); 587 break; 588 case AF_INET: 589 snprintf(servername, sizeof(servername), "%pI4", 590 &sin->sin_addr.s_addr); 591 break; 592 case AF_INET6: 593 snprintf(servername, sizeof(servername), "%pI6", 594 &sin6->sin6_addr); 595 break; 596 default: 597 /* caller wants default server name, but 598 * address family isn't recognized. */ 599 return ERR_PTR(-EINVAL); 600 } 601 xprtargs.servername = servername; 602 } 603 604 xprt = xprt_create_transport(&xprtargs); 605 if (IS_ERR(xprt)) 606 return (struct rpc_clnt *)xprt; 607 608 /* 609 * By default, kernel RPC client connects from a reserved port. 610 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, 611 * but it is always enabled for rpciod, which handles the connect 612 * operation. 613 */ 614 xprt->resvport = 1; 615 if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT) 616 xprt->resvport = 0; 617 xprt->reuseport = 0; 618 if (args->flags & RPC_CLNT_CREATE_REUSEPORT) 619 xprt->reuseport = 1; 620 621 clnt = rpc_create_xprt(args, xprt); 622 if (IS_ERR(clnt) || args->nconnect <= 1) 623 return clnt; 624 625 for (i = 0; i < args->nconnect - 1; i++) { 626 if (rpc_clnt_add_xprt(clnt, &xprtargs, NULL, NULL) < 0) 627 break; 628 } 629 return clnt; 630 } 631 EXPORT_SYMBOL_GPL(rpc_create); 632 633 /* 634 * This function clones the RPC client structure. It allows us to share the 635 * same transport while varying parameters such as the authentication 636 * flavour. 637 */ 638 static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, 639 struct rpc_clnt *clnt) 640 { 641 struct rpc_xprt_switch *xps; 642 struct rpc_xprt *xprt; 643 struct rpc_clnt *new; 644 int err; 645 646 err = -ENOMEM; 647 rcu_read_lock(); 648 xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); 649 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 650 rcu_read_unlock(); 651 if (xprt == NULL || xps == NULL) { 652 xprt_put(xprt); 653 xprt_switch_put(xps); 654 goto out_err; 655 } 656 args->servername = xprt->servername; 657 args->nodename = clnt->cl_nodename; 658 659 new = rpc_new_client(args, xps, xprt, clnt); 660 if (IS_ERR(new)) 661 return new; 662 663 /* Turn off autobind on clones */ 664 new->cl_autobind = 0; 665 new->cl_softrtry = clnt->cl_softrtry; 666 new->cl_softerr = clnt->cl_softerr; 667 new->cl_noretranstimeo = clnt->cl_noretranstimeo; 668 new->cl_discrtry = clnt->cl_discrtry; 669 new->cl_chatty = clnt->cl_chatty; 670 new->cl_principal = clnt->cl_principal; 671 new->cl_max_connect = clnt->cl_max_connect; 672 return new; 673 674 out_err: 675 trace_rpc_clnt_clone_err(clnt, err); 676 return ERR_PTR(err); 677 } 678 679 /** 680 * rpc_clone_client - Clone an RPC client structure 681 * 682 * @clnt: RPC client whose parameters are copied 683 * 684 * Returns a fresh RPC client or an ERR_PTR. 685 */ 686 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt) 687 { 688 struct rpc_create_args args = { 689 .program = clnt->cl_program, 690 .prognumber = clnt->cl_prog, 691 .version = clnt->cl_vers, 692 .authflavor = clnt->cl_auth->au_flavor, 693 .cred = clnt->cl_cred, 694 }; 695 return __rpc_clone_client(&args, clnt); 696 } 697 EXPORT_SYMBOL_GPL(rpc_clone_client); 698 699 /** 700 * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth 701 * 702 * @clnt: RPC client whose parameters are copied 703 * @flavor: security flavor for new client 704 * 705 * Returns a fresh RPC client or an ERR_PTR. 706 */ 707 struct rpc_clnt * 708 rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor) 709 { 710 struct rpc_create_args args = { 711 .program = clnt->cl_program, 712 .prognumber = clnt->cl_prog, 713 .version = clnt->cl_vers, 714 .authflavor = flavor, 715 .cred = clnt->cl_cred, 716 }; 717 return __rpc_clone_client(&args, clnt); 718 } 719 EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth); 720 721 /** 722 * rpc_switch_client_transport: switch the RPC transport on the fly 723 * @clnt: pointer to a struct rpc_clnt 724 * @args: pointer to the new transport arguments 725 * @timeout: pointer to the new timeout parameters 726 * 727 * This function allows the caller to switch the RPC transport for the 728 * rpc_clnt structure 'clnt' to allow it to connect to a mirrored NFS 729 * server, for instance. It assumes that the caller has ensured that 730 * there are no active RPC tasks by using some form of locking. 731 * 732 * Returns zero if "clnt" is now using the new xprt. Otherwise a 733 * negative errno is returned, and "clnt" continues to use the old 734 * xprt. 735 */ 736 int rpc_switch_client_transport(struct rpc_clnt *clnt, 737 struct xprt_create *args, 738 const struct rpc_timeout *timeout) 739 { 740 const struct rpc_timeout *old_timeo; 741 rpc_authflavor_t pseudoflavor; 742 struct rpc_xprt_switch *xps, *oldxps; 743 struct rpc_xprt *xprt, *old; 744 struct rpc_clnt *parent; 745 int err; 746 747 args->xprtsec = clnt->cl_xprtsec; 748 xprt = xprt_create_transport(args); 749 if (IS_ERR(xprt)) 750 return PTR_ERR(xprt); 751 752 xps = xprt_switch_alloc(xprt, GFP_KERNEL); 753 if (xps == NULL) { 754 xprt_put(xprt); 755 return -ENOMEM; 756 } 757 758 pseudoflavor = clnt->cl_auth->au_flavor; 759 760 old_timeo = clnt->cl_timeout; 761 old = rpc_clnt_set_transport(clnt, xprt, timeout); 762 oldxps = xprt_iter_xchg_switch(&clnt->cl_xpi, xps); 763 764 rpc_unregister_client(clnt); 765 __rpc_clnt_remove_pipedir(clnt); 766 rpc_sysfs_client_destroy(clnt); 767 rpc_clnt_debugfs_unregister(clnt); 768 769 /* 770 * A new transport was created. "clnt" therefore 771 * becomes the root of a new cl_parent tree. clnt's 772 * children, if it has any, still point to the old xprt. 773 */ 774 parent = clnt->cl_parent; 775 clnt->cl_parent = clnt; 776 777 /* 778 * The old rpc_auth cache cannot be re-used. GSS 779 * contexts in particular are between a single 780 * client and server. 781 */ 782 err = rpc_client_register(clnt, pseudoflavor, NULL); 783 if (err) 784 goto out_revert; 785 786 synchronize_rcu(); 787 if (parent != clnt) 788 rpc_release_client(parent); 789 xprt_switch_put(oldxps); 790 xprt_put(old); 791 trace_rpc_clnt_replace_xprt(clnt); 792 return 0; 793 794 out_revert: 795 xps = xprt_iter_xchg_switch(&clnt->cl_xpi, oldxps); 796 rpc_clnt_set_transport(clnt, old, old_timeo); 797 clnt->cl_parent = parent; 798 rpc_client_register(clnt, pseudoflavor, NULL); 799 xprt_switch_put(xps); 800 xprt_put(xprt); 801 trace_rpc_clnt_replace_xprt_err(clnt); 802 return err; 803 } 804 EXPORT_SYMBOL_GPL(rpc_switch_client_transport); 805 806 static 807 int _rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi, 808 void func(struct rpc_xprt_iter *xpi, struct rpc_xprt_switch *xps)) 809 { 810 struct rpc_xprt_switch *xps; 811 812 rcu_read_lock(); 813 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 814 rcu_read_unlock(); 815 if (xps == NULL) 816 return -EAGAIN; 817 func(xpi, xps); 818 xprt_switch_put(xps); 819 return 0; 820 } 821 822 static 823 int rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi) 824 { 825 return _rpc_clnt_xprt_iter_init(clnt, xpi, xprt_iter_init_listall); 826 } 827 828 static 829 int rpc_clnt_xprt_iter_offline_init(struct rpc_clnt *clnt, 830 struct rpc_xprt_iter *xpi) 831 { 832 return _rpc_clnt_xprt_iter_init(clnt, xpi, xprt_iter_init_listoffline); 833 } 834 835 /** 836 * rpc_clnt_iterate_for_each_xprt - Apply a function to all transports 837 * @clnt: pointer to client 838 * @fn: function to apply 839 * @data: void pointer to function data 840 * 841 * Iterates through the list of RPC transports currently attached to the 842 * client and applies the function fn(clnt, xprt, data). 843 * 844 * On error, the iteration stops, and the function returns the error value. 845 */ 846 int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt *clnt, 847 int (*fn)(struct rpc_clnt *, struct rpc_xprt *, void *), 848 void *data) 849 { 850 struct rpc_xprt_iter xpi; 851 int ret; 852 853 ret = rpc_clnt_xprt_iter_init(clnt, &xpi); 854 if (ret) 855 return ret; 856 for (;;) { 857 struct rpc_xprt *xprt = xprt_iter_get_next(&xpi); 858 859 if (!xprt) 860 break; 861 ret = fn(clnt, xprt, data); 862 xprt_put(xprt); 863 if (ret < 0) 864 break; 865 } 866 xprt_iter_destroy(&xpi); 867 return ret; 868 } 869 EXPORT_SYMBOL_GPL(rpc_clnt_iterate_for_each_xprt); 870 871 /* 872 * Kill all tasks for the given client. 873 * XXX: kill their descendants as well? 874 */ 875 void rpc_killall_tasks(struct rpc_clnt *clnt) 876 { 877 struct rpc_task *rovr; 878 879 880 if (list_empty(&clnt->cl_tasks)) 881 return; 882 883 /* 884 * Spin lock all_tasks to prevent changes... 885 */ 886 trace_rpc_clnt_killall(clnt); 887 spin_lock(&clnt->cl_lock); 888 list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) 889 rpc_signal_task(rovr); 890 spin_unlock(&clnt->cl_lock); 891 } 892 EXPORT_SYMBOL_GPL(rpc_killall_tasks); 893 894 /** 895 * rpc_cancel_tasks - try to cancel a set of RPC tasks 896 * @clnt: Pointer to RPC client 897 * @error: RPC task error value to set 898 * @fnmatch: Pointer to selector function 899 * @data: User data 900 * 901 * Uses @fnmatch to define a set of RPC tasks that are to be cancelled. 902 * The argument @error must be a negative error value. 903 */ 904 unsigned long rpc_cancel_tasks(struct rpc_clnt *clnt, int error, 905 bool (*fnmatch)(const struct rpc_task *, 906 const void *), 907 const void *data) 908 { 909 struct rpc_task *task; 910 unsigned long count = 0; 911 912 if (list_empty(&clnt->cl_tasks)) 913 return 0; 914 /* 915 * Spin lock all_tasks to prevent changes... 916 */ 917 spin_lock(&clnt->cl_lock); 918 list_for_each_entry(task, &clnt->cl_tasks, tk_task) { 919 if (!RPC_IS_ACTIVATED(task)) 920 continue; 921 if (!fnmatch(task, data)) 922 continue; 923 rpc_task_try_cancel(task, error); 924 count++; 925 } 926 spin_unlock(&clnt->cl_lock); 927 return count; 928 } 929 EXPORT_SYMBOL_GPL(rpc_cancel_tasks); 930 931 static int rpc_clnt_disconnect_xprt(struct rpc_clnt *clnt, 932 struct rpc_xprt *xprt, void *dummy) 933 { 934 if (xprt_connected(xprt)) 935 xprt_force_disconnect(xprt); 936 return 0; 937 } 938 939 void rpc_clnt_disconnect(struct rpc_clnt *clnt) 940 { 941 rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_disconnect_xprt, NULL); 942 } 943 EXPORT_SYMBOL_GPL(rpc_clnt_disconnect); 944 945 /* 946 * Properly shut down an RPC client, terminating all outstanding 947 * requests. 948 */ 949 void rpc_shutdown_client(struct rpc_clnt *clnt) 950 { 951 might_sleep(); 952 953 trace_rpc_clnt_shutdown(clnt); 954 955 while (!list_empty(&clnt->cl_tasks)) { 956 rpc_killall_tasks(clnt); 957 wait_event_timeout(destroy_wait, 958 list_empty(&clnt->cl_tasks), 1*HZ); 959 } 960 961 rpc_release_client(clnt); 962 } 963 EXPORT_SYMBOL_GPL(rpc_shutdown_client); 964 965 /* 966 * Free an RPC client 967 */ 968 static void rpc_free_client_work(struct work_struct *work) 969 { 970 struct rpc_clnt *clnt = container_of(work, struct rpc_clnt, cl_work); 971 972 trace_rpc_clnt_free(clnt); 973 974 /* These might block on processes that might allocate memory, 975 * so they cannot be called in rpciod, so they are handled separately 976 * here. 977 */ 978 rpc_sysfs_client_destroy(clnt); 979 rpc_clnt_debugfs_unregister(clnt); 980 rpc_free_clid(clnt); 981 rpc_clnt_remove_pipedir(clnt); 982 xprt_put(rcu_dereference_raw(clnt->cl_xprt)); 983 984 kfree(clnt); 985 rpciod_down(); 986 } 987 static struct rpc_clnt * 988 rpc_free_client(struct rpc_clnt *clnt) 989 { 990 struct rpc_clnt *parent = NULL; 991 992 trace_rpc_clnt_release(clnt); 993 if (clnt->cl_parent != clnt) 994 parent = clnt->cl_parent; 995 rpc_unregister_client(clnt); 996 rpc_free_iostats(clnt->cl_metrics); 997 clnt->cl_metrics = NULL; 998 xprt_iter_destroy(&clnt->cl_xpi); 999 put_cred(clnt->cl_cred); 1000 1001 INIT_WORK(&clnt->cl_work, rpc_free_client_work); 1002 schedule_work(&clnt->cl_work); 1003 return parent; 1004 } 1005 1006 /* 1007 * Free an RPC client 1008 */ 1009 static struct rpc_clnt * 1010 rpc_free_auth(struct rpc_clnt *clnt) 1011 { 1012 /* 1013 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to 1014 * release remaining GSS contexts. This mechanism ensures 1015 * that it can do so safely. 1016 */ 1017 if (clnt->cl_auth != NULL) { 1018 rpcauth_release(clnt->cl_auth); 1019 clnt->cl_auth = NULL; 1020 } 1021 if (refcount_dec_and_test(&clnt->cl_count)) 1022 return rpc_free_client(clnt); 1023 return NULL; 1024 } 1025 1026 /* 1027 * Release reference to the RPC client 1028 */ 1029 void 1030 rpc_release_client(struct rpc_clnt *clnt) 1031 { 1032 do { 1033 if (list_empty(&clnt->cl_tasks)) 1034 wake_up(&destroy_wait); 1035 if (refcount_dec_not_one(&clnt->cl_count)) 1036 break; 1037 clnt = rpc_free_auth(clnt); 1038 } while (clnt != NULL); 1039 } 1040 EXPORT_SYMBOL_GPL(rpc_release_client); 1041 1042 /** 1043 * rpc_bind_new_program - bind a new RPC program to an existing client 1044 * @old: old rpc_client 1045 * @program: rpc program to set 1046 * @vers: rpc program version 1047 * 1048 * Clones the rpc client and sets up a new RPC program. This is mainly 1049 * of use for enabling different RPC programs to share the same transport. 1050 * The Sun NFSv2/v3 ACL protocol can do this. 1051 */ 1052 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, 1053 const struct rpc_program *program, 1054 u32 vers) 1055 { 1056 struct rpc_create_args args = { 1057 .program = program, 1058 .prognumber = program->number, 1059 .version = vers, 1060 .authflavor = old->cl_auth->au_flavor, 1061 .cred = old->cl_cred, 1062 }; 1063 struct rpc_clnt *clnt; 1064 int err; 1065 1066 clnt = __rpc_clone_client(&args, old); 1067 if (IS_ERR(clnt)) 1068 goto out; 1069 err = rpc_ping(clnt); 1070 if (err != 0) { 1071 rpc_shutdown_client(clnt); 1072 clnt = ERR_PTR(err); 1073 } 1074 out: 1075 return clnt; 1076 } 1077 EXPORT_SYMBOL_GPL(rpc_bind_new_program); 1078 1079 struct rpc_xprt * 1080 rpc_task_get_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 1081 { 1082 struct rpc_xprt_switch *xps; 1083 1084 if (!xprt) 1085 return NULL; 1086 rcu_read_lock(); 1087 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 1088 atomic_long_inc(&xps->xps_queuelen); 1089 rcu_read_unlock(); 1090 atomic_long_inc(&xprt->queuelen); 1091 1092 return xprt; 1093 } 1094 1095 static void 1096 rpc_task_release_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 1097 { 1098 struct rpc_xprt_switch *xps; 1099 1100 atomic_long_dec(&xprt->queuelen); 1101 rcu_read_lock(); 1102 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 1103 atomic_long_dec(&xps->xps_queuelen); 1104 rcu_read_unlock(); 1105 1106 xprt_put(xprt); 1107 } 1108 1109 void rpc_task_release_transport(struct rpc_task *task) 1110 { 1111 struct rpc_xprt *xprt = task->tk_xprt; 1112 1113 if (xprt) { 1114 task->tk_xprt = NULL; 1115 if (task->tk_client) 1116 rpc_task_release_xprt(task->tk_client, xprt); 1117 else 1118 xprt_put(xprt); 1119 } 1120 } 1121 EXPORT_SYMBOL_GPL(rpc_task_release_transport); 1122 1123 void rpc_task_release_client(struct rpc_task *task) 1124 { 1125 struct rpc_clnt *clnt = task->tk_client; 1126 1127 rpc_task_release_transport(task); 1128 if (clnt != NULL) { 1129 /* Remove from client task list */ 1130 spin_lock(&clnt->cl_lock); 1131 list_del(&task->tk_task); 1132 spin_unlock(&clnt->cl_lock); 1133 task->tk_client = NULL; 1134 1135 rpc_release_client(clnt); 1136 } 1137 } 1138 1139 static struct rpc_xprt * 1140 rpc_task_get_first_xprt(struct rpc_clnt *clnt) 1141 { 1142 struct rpc_xprt *xprt; 1143 1144 rcu_read_lock(); 1145 xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); 1146 rcu_read_unlock(); 1147 return rpc_task_get_xprt(clnt, xprt); 1148 } 1149 1150 static struct rpc_xprt * 1151 rpc_task_get_next_xprt(struct rpc_clnt *clnt) 1152 { 1153 return rpc_task_get_xprt(clnt, xprt_iter_get_next(&clnt->cl_xpi)); 1154 } 1155 1156 static 1157 void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt) 1158 { 1159 if (task->tk_xprt) { 1160 if (!(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) && 1161 (task->tk_flags & RPC_TASK_MOVEABLE))) 1162 return; 1163 xprt_release(task); 1164 xprt_put(task->tk_xprt); 1165 } 1166 if (task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) 1167 task->tk_xprt = rpc_task_get_first_xprt(clnt); 1168 else 1169 task->tk_xprt = rpc_task_get_next_xprt(clnt); 1170 } 1171 1172 static 1173 void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) 1174 { 1175 rpc_task_set_transport(task, clnt); 1176 task->tk_client = clnt; 1177 refcount_inc(&clnt->cl_count); 1178 if (clnt->cl_softrtry) 1179 task->tk_flags |= RPC_TASK_SOFT; 1180 if (clnt->cl_softerr) 1181 task->tk_flags |= RPC_TASK_TIMEOUT; 1182 if (clnt->cl_noretranstimeo) 1183 task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT; 1184 /* Add to the client's list of all tasks */ 1185 spin_lock(&clnt->cl_lock); 1186 list_add_tail(&task->tk_task, &clnt->cl_tasks); 1187 spin_unlock(&clnt->cl_lock); 1188 } 1189 1190 static void 1191 rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg) 1192 { 1193 if (msg != NULL) { 1194 task->tk_msg.rpc_proc = msg->rpc_proc; 1195 task->tk_msg.rpc_argp = msg->rpc_argp; 1196 task->tk_msg.rpc_resp = msg->rpc_resp; 1197 task->tk_msg.rpc_cred = msg->rpc_cred; 1198 if (!(task->tk_flags & RPC_TASK_CRED_NOREF)) 1199 get_cred(task->tk_msg.rpc_cred); 1200 } 1201 } 1202 1203 /* 1204 * Default callback for async RPC calls 1205 */ 1206 static void 1207 rpc_default_callback(struct rpc_task *task, void *data) 1208 { 1209 } 1210 1211 static const struct rpc_call_ops rpc_default_ops = { 1212 .rpc_call_done = rpc_default_callback, 1213 }; 1214 1215 /** 1216 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it 1217 * @task_setup_data: pointer to task initialisation data 1218 */ 1219 struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) 1220 { 1221 struct rpc_task *task; 1222 1223 task = rpc_new_task(task_setup_data); 1224 if (IS_ERR(task)) 1225 return task; 1226 1227 if (!RPC_IS_ASYNC(task)) 1228 task->tk_flags |= RPC_TASK_CRED_NOREF; 1229 1230 rpc_task_set_client(task, task_setup_data->rpc_client); 1231 rpc_task_set_rpc_message(task, task_setup_data->rpc_message); 1232 1233 if (task->tk_action == NULL) 1234 rpc_call_start(task); 1235 1236 atomic_inc(&task->tk_count); 1237 rpc_execute(task); 1238 return task; 1239 } 1240 EXPORT_SYMBOL_GPL(rpc_run_task); 1241 1242 /** 1243 * rpc_call_sync - Perform a synchronous RPC call 1244 * @clnt: pointer to RPC client 1245 * @msg: RPC call parameters 1246 * @flags: RPC call flags 1247 */ 1248 int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags) 1249 { 1250 struct rpc_task *task; 1251 struct rpc_task_setup task_setup_data = { 1252 .rpc_client = clnt, 1253 .rpc_message = msg, 1254 .callback_ops = &rpc_default_ops, 1255 .flags = flags, 1256 }; 1257 int status; 1258 1259 WARN_ON_ONCE(flags & RPC_TASK_ASYNC); 1260 if (flags & RPC_TASK_ASYNC) { 1261 rpc_release_calldata(task_setup_data.callback_ops, 1262 task_setup_data.callback_data); 1263 return -EINVAL; 1264 } 1265 1266 task = rpc_run_task(&task_setup_data); 1267 if (IS_ERR(task)) 1268 return PTR_ERR(task); 1269 status = task->tk_status; 1270 rpc_put_task(task); 1271 return status; 1272 } 1273 EXPORT_SYMBOL_GPL(rpc_call_sync); 1274 1275 /** 1276 * rpc_call_async - Perform an asynchronous RPC call 1277 * @clnt: pointer to RPC client 1278 * @msg: RPC call parameters 1279 * @flags: RPC call flags 1280 * @tk_ops: RPC call ops 1281 * @data: user call data 1282 */ 1283 int 1284 rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags, 1285 const struct rpc_call_ops *tk_ops, void *data) 1286 { 1287 struct rpc_task *task; 1288 struct rpc_task_setup task_setup_data = { 1289 .rpc_client = clnt, 1290 .rpc_message = msg, 1291 .callback_ops = tk_ops, 1292 .callback_data = data, 1293 .flags = flags|RPC_TASK_ASYNC, 1294 }; 1295 1296 task = rpc_run_task(&task_setup_data); 1297 if (IS_ERR(task)) 1298 return PTR_ERR(task); 1299 rpc_put_task(task); 1300 return 0; 1301 } 1302 EXPORT_SYMBOL_GPL(rpc_call_async); 1303 1304 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1305 static void call_bc_encode(struct rpc_task *task); 1306 1307 /** 1308 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run 1309 * rpc_execute against it 1310 * @req: RPC request 1311 */ 1312 struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req) 1313 { 1314 struct rpc_task *task; 1315 struct rpc_task_setup task_setup_data = { 1316 .callback_ops = &rpc_default_ops, 1317 .flags = RPC_TASK_SOFTCONN | 1318 RPC_TASK_NO_RETRANS_TIMEOUT, 1319 }; 1320 1321 dprintk("RPC: rpc_run_bc_task req= %p\n", req); 1322 /* 1323 * Create an rpc_task to send the data 1324 */ 1325 task = rpc_new_task(&task_setup_data); 1326 if (IS_ERR(task)) { 1327 xprt_free_bc_request(req); 1328 return task; 1329 } 1330 1331 xprt_init_bc_request(req, task); 1332 1333 task->tk_action = call_bc_encode; 1334 atomic_inc(&task->tk_count); 1335 WARN_ON_ONCE(atomic_read(&task->tk_count) != 2); 1336 rpc_execute(task); 1337 1338 dprintk("RPC: rpc_run_bc_task: task= %p\n", task); 1339 return task; 1340 } 1341 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1342 1343 /** 1344 * rpc_prepare_reply_pages - Prepare to receive a reply data payload into pages 1345 * @req: RPC request to prepare 1346 * @pages: vector of struct page pointers 1347 * @base: offset in first page where receive should start, in bytes 1348 * @len: expected size of the upper layer data payload, in bytes 1349 * @hdrsize: expected size of upper layer reply header, in XDR words 1350 * 1351 */ 1352 void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages, 1353 unsigned int base, unsigned int len, 1354 unsigned int hdrsize) 1355 { 1356 hdrsize += RPC_REPHDRSIZE + req->rq_cred->cr_auth->au_ralign; 1357 1358 xdr_inline_pages(&req->rq_rcv_buf, hdrsize << 2, pages, base, len); 1359 trace_rpc_xdr_reply_pages(req->rq_task, &req->rq_rcv_buf); 1360 } 1361 EXPORT_SYMBOL_GPL(rpc_prepare_reply_pages); 1362 1363 void 1364 rpc_call_start(struct rpc_task *task) 1365 { 1366 task->tk_action = call_start; 1367 } 1368 EXPORT_SYMBOL_GPL(rpc_call_start); 1369 1370 /** 1371 * rpc_peeraddr - extract remote peer address from clnt's xprt 1372 * @clnt: RPC client structure 1373 * @buf: target buffer 1374 * @bufsize: length of target buffer 1375 * 1376 * Returns the number of bytes that are actually in the stored address. 1377 */ 1378 size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize) 1379 { 1380 size_t bytes; 1381 struct rpc_xprt *xprt; 1382 1383 rcu_read_lock(); 1384 xprt = rcu_dereference(clnt->cl_xprt); 1385 1386 bytes = xprt->addrlen; 1387 if (bytes > bufsize) 1388 bytes = bufsize; 1389 memcpy(buf, &xprt->addr, bytes); 1390 rcu_read_unlock(); 1391 1392 return bytes; 1393 } 1394 EXPORT_SYMBOL_GPL(rpc_peeraddr); 1395 1396 /** 1397 * rpc_peeraddr2str - return remote peer address in printable format 1398 * @clnt: RPC client structure 1399 * @format: address format 1400 * 1401 * NB: the lifetime of the memory referenced by the returned pointer is 1402 * the same as the rpc_xprt itself. As long as the caller uses this 1403 * pointer, it must hold the RCU read lock. 1404 */ 1405 const char *rpc_peeraddr2str(struct rpc_clnt *clnt, 1406 enum rpc_display_format_t format) 1407 { 1408 struct rpc_xprt *xprt; 1409 1410 xprt = rcu_dereference(clnt->cl_xprt); 1411 1412 if (xprt->address_strings[format] != NULL) 1413 return xprt->address_strings[format]; 1414 else 1415 return "unprintable"; 1416 } 1417 EXPORT_SYMBOL_GPL(rpc_peeraddr2str); 1418 1419 static const struct sockaddr_in rpc_inaddr_loopback = { 1420 .sin_family = AF_INET, 1421 .sin_addr.s_addr = htonl(INADDR_ANY), 1422 }; 1423 1424 static const struct sockaddr_in6 rpc_in6addr_loopback = { 1425 .sin6_family = AF_INET6, 1426 .sin6_addr = IN6ADDR_ANY_INIT, 1427 }; 1428 1429 /* 1430 * Try a getsockname() on a connected datagram socket. Using a 1431 * connected datagram socket prevents leaving a socket in TIME_WAIT. 1432 * This conserves the ephemeral port number space. 1433 * 1434 * Returns zero and fills in "buf" if successful; otherwise, a 1435 * negative errno is returned. 1436 */ 1437 static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen, 1438 struct sockaddr *buf) 1439 { 1440 struct socket *sock; 1441 int err; 1442 1443 err = __sock_create(net, sap->sa_family, 1444 SOCK_DGRAM, IPPROTO_UDP, &sock, 1); 1445 if (err < 0) { 1446 dprintk("RPC: can't create UDP socket (%d)\n", err); 1447 goto out; 1448 } 1449 1450 switch (sap->sa_family) { 1451 case AF_INET: 1452 err = kernel_bind(sock, 1453 (struct sockaddr *)&rpc_inaddr_loopback, 1454 sizeof(rpc_inaddr_loopback)); 1455 break; 1456 case AF_INET6: 1457 err = kernel_bind(sock, 1458 (struct sockaddr *)&rpc_in6addr_loopback, 1459 sizeof(rpc_in6addr_loopback)); 1460 break; 1461 default: 1462 err = -EAFNOSUPPORT; 1463 goto out_release; 1464 } 1465 if (err < 0) { 1466 dprintk("RPC: can't bind UDP socket (%d)\n", err); 1467 goto out_release; 1468 } 1469 1470 err = kernel_connect(sock, sap, salen, 0); 1471 if (err < 0) { 1472 dprintk("RPC: can't connect UDP socket (%d)\n", err); 1473 goto out_release; 1474 } 1475 1476 err = kernel_getsockname(sock, buf); 1477 if (err < 0) { 1478 dprintk("RPC: getsockname failed (%d)\n", err); 1479 goto out_release; 1480 } 1481 1482 err = 0; 1483 if (buf->sa_family == AF_INET6) { 1484 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf; 1485 sin6->sin6_scope_id = 0; 1486 } 1487 dprintk("RPC: %s succeeded\n", __func__); 1488 1489 out_release: 1490 sock_release(sock); 1491 out: 1492 return err; 1493 } 1494 1495 /* 1496 * Scraping a connected socket failed, so we don't have a useable 1497 * local address. Fallback: generate an address that will prevent 1498 * the server from calling us back. 1499 * 1500 * Returns zero and fills in "buf" if successful; otherwise, a 1501 * negative errno is returned. 1502 */ 1503 static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen) 1504 { 1505 switch (family) { 1506 case AF_INET: 1507 if (buflen < sizeof(rpc_inaddr_loopback)) 1508 return -EINVAL; 1509 memcpy(buf, &rpc_inaddr_loopback, 1510 sizeof(rpc_inaddr_loopback)); 1511 break; 1512 case AF_INET6: 1513 if (buflen < sizeof(rpc_in6addr_loopback)) 1514 return -EINVAL; 1515 memcpy(buf, &rpc_in6addr_loopback, 1516 sizeof(rpc_in6addr_loopback)); 1517 break; 1518 default: 1519 dprintk("RPC: %s: address family not supported\n", 1520 __func__); 1521 return -EAFNOSUPPORT; 1522 } 1523 dprintk("RPC: %s: succeeded\n", __func__); 1524 return 0; 1525 } 1526 1527 /** 1528 * rpc_localaddr - discover local endpoint address for an RPC client 1529 * @clnt: RPC client structure 1530 * @buf: target buffer 1531 * @buflen: size of target buffer, in bytes 1532 * 1533 * Returns zero and fills in "buf" and "buflen" if successful; 1534 * otherwise, a negative errno is returned. 1535 * 1536 * This works even if the underlying transport is not currently connected, 1537 * or if the upper layer never previously provided a source address. 1538 * 1539 * The result of this function call is transient: multiple calls in 1540 * succession may give different results, depending on how local 1541 * networking configuration changes over time. 1542 */ 1543 int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen) 1544 { 1545 struct sockaddr_storage address; 1546 struct sockaddr *sap = (struct sockaddr *)&address; 1547 struct rpc_xprt *xprt; 1548 struct net *net; 1549 size_t salen; 1550 int err; 1551 1552 rcu_read_lock(); 1553 xprt = rcu_dereference(clnt->cl_xprt); 1554 salen = xprt->addrlen; 1555 memcpy(sap, &xprt->addr, salen); 1556 net = get_net(xprt->xprt_net); 1557 rcu_read_unlock(); 1558 1559 rpc_set_port(sap, 0); 1560 err = rpc_sockname(net, sap, salen, buf); 1561 put_net(net); 1562 if (err != 0) 1563 /* Couldn't discover local address, return ANYADDR */ 1564 return rpc_anyaddr(sap->sa_family, buf, buflen); 1565 return 0; 1566 } 1567 EXPORT_SYMBOL_GPL(rpc_localaddr); 1568 1569 void 1570 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) 1571 { 1572 struct rpc_xprt *xprt; 1573 1574 rcu_read_lock(); 1575 xprt = rcu_dereference(clnt->cl_xprt); 1576 if (xprt->ops->set_buffer_size) 1577 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); 1578 rcu_read_unlock(); 1579 } 1580 EXPORT_SYMBOL_GPL(rpc_setbufsize); 1581 1582 /** 1583 * rpc_net_ns - Get the network namespace for this RPC client 1584 * @clnt: RPC client to query 1585 * 1586 */ 1587 struct net *rpc_net_ns(struct rpc_clnt *clnt) 1588 { 1589 struct net *ret; 1590 1591 rcu_read_lock(); 1592 ret = rcu_dereference(clnt->cl_xprt)->xprt_net; 1593 rcu_read_unlock(); 1594 return ret; 1595 } 1596 EXPORT_SYMBOL_GPL(rpc_net_ns); 1597 1598 /** 1599 * rpc_max_payload - Get maximum payload size for a transport, in bytes 1600 * @clnt: RPC client to query 1601 * 1602 * For stream transports, this is one RPC record fragment (see RFC 1603 * 1831), as we don't support multi-record requests yet. For datagram 1604 * transports, this is the size of an IP packet minus the IP, UDP, and 1605 * RPC header sizes. 1606 */ 1607 size_t rpc_max_payload(struct rpc_clnt *clnt) 1608 { 1609 size_t ret; 1610 1611 rcu_read_lock(); 1612 ret = rcu_dereference(clnt->cl_xprt)->max_payload; 1613 rcu_read_unlock(); 1614 return ret; 1615 } 1616 EXPORT_SYMBOL_GPL(rpc_max_payload); 1617 1618 /** 1619 * rpc_max_bc_payload - Get maximum backchannel payload size, in bytes 1620 * @clnt: RPC client to query 1621 */ 1622 size_t rpc_max_bc_payload(struct rpc_clnt *clnt) 1623 { 1624 struct rpc_xprt *xprt; 1625 size_t ret; 1626 1627 rcu_read_lock(); 1628 xprt = rcu_dereference(clnt->cl_xprt); 1629 ret = xprt->ops->bc_maxpayload(xprt); 1630 rcu_read_unlock(); 1631 return ret; 1632 } 1633 EXPORT_SYMBOL_GPL(rpc_max_bc_payload); 1634 1635 unsigned int rpc_num_bc_slots(struct rpc_clnt *clnt) 1636 { 1637 struct rpc_xprt *xprt; 1638 unsigned int ret; 1639 1640 rcu_read_lock(); 1641 xprt = rcu_dereference(clnt->cl_xprt); 1642 ret = xprt->ops->bc_num_slots(xprt); 1643 rcu_read_unlock(); 1644 return ret; 1645 } 1646 EXPORT_SYMBOL_GPL(rpc_num_bc_slots); 1647 1648 /** 1649 * rpc_force_rebind - force transport to check that remote port is unchanged 1650 * @clnt: client to rebind 1651 * 1652 */ 1653 void rpc_force_rebind(struct rpc_clnt *clnt) 1654 { 1655 if (clnt->cl_autobind) { 1656 rcu_read_lock(); 1657 xprt_clear_bound(rcu_dereference(clnt->cl_xprt)); 1658 rcu_read_unlock(); 1659 } 1660 } 1661 EXPORT_SYMBOL_GPL(rpc_force_rebind); 1662 1663 static int 1664 __rpc_restart_call(struct rpc_task *task, void (*action)(struct rpc_task *)) 1665 { 1666 task->tk_status = 0; 1667 task->tk_rpc_status = 0; 1668 task->tk_action = action; 1669 return 1; 1670 } 1671 1672 /* 1673 * Restart an (async) RPC call. Usually called from within the 1674 * exit handler. 1675 */ 1676 int 1677 rpc_restart_call(struct rpc_task *task) 1678 { 1679 return __rpc_restart_call(task, call_start); 1680 } 1681 EXPORT_SYMBOL_GPL(rpc_restart_call); 1682 1683 /* 1684 * Restart an (async) RPC call from the call_prepare state. 1685 * Usually called from within the exit handler. 1686 */ 1687 int 1688 rpc_restart_call_prepare(struct rpc_task *task) 1689 { 1690 if (task->tk_ops->rpc_call_prepare != NULL) 1691 return __rpc_restart_call(task, rpc_prepare_task); 1692 return rpc_restart_call(task); 1693 } 1694 EXPORT_SYMBOL_GPL(rpc_restart_call_prepare); 1695 1696 const char 1697 *rpc_proc_name(const struct rpc_task *task) 1698 { 1699 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 1700 1701 if (proc) { 1702 if (proc->p_name) 1703 return proc->p_name; 1704 else 1705 return "NULL"; 1706 } else 1707 return "no proc"; 1708 } 1709 1710 static void 1711 __rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status) 1712 { 1713 trace_rpc_call_rpcerror(task, tk_status, rpc_status); 1714 rpc_task_set_rpc_status(task, rpc_status); 1715 rpc_exit(task, tk_status); 1716 } 1717 1718 static void 1719 rpc_call_rpcerror(struct rpc_task *task, int status) 1720 { 1721 __rpc_call_rpcerror(task, status, status); 1722 } 1723 1724 /* 1725 * 0. Initial state 1726 * 1727 * Other FSM states can be visited zero or more times, but 1728 * this state is visited exactly once for each RPC. 1729 */ 1730 static void 1731 call_start(struct rpc_task *task) 1732 { 1733 struct rpc_clnt *clnt = task->tk_client; 1734 int idx = task->tk_msg.rpc_proc->p_statidx; 1735 1736 trace_rpc_request(task); 1737 1738 if (task->tk_client->cl_shutdown) { 1739 rpc_call_rpcerror(task, -EIO); 1740 return; 1741 } 1742 1743 /* Increment call count (version might not be valid for ping) */ 1744 if (clnt->cl_program->version[clnt->cl_vers]) 1745 clnt->cl_program->version[clnt->cl_vers]->counts[idx]++; 1746 clnt->cl_stats->rpccnt++; 1747 task->tk_action = call_reserve; 1748 rpc_task_set_transport(task, clnt); 1749 } 1750 1751 /* 1752 * 1. Reserve an RPC call slot 1753 */ 1754 static void 1755 call_reserve(struct rpc_task *task) 1756 { 1757 task->tk_status = 0; 1758 task->tk_action = call_reserveresult; 1759 xprt_reserve(task); 1760 } 1761 1762 static void call_retry_reserve(struct rpc_task *task); 1763 1764 /* 1765 * 1b. Grok the result of xprt_reserve() 1766 */ 1767 static void 1768 call_reserveresult(struct rpc_task *task) 1769 { 1770 int status = task->tk_status; 1771 1772 /* 1773 * After a call to xprt_reserve(), we must have either 1774 * a request slot or else an error status. 1775 */ 1776 task->tk_status = 0; 1777 if (status >= 0) { 1778 if (task->tk_rqstp) { 1779 task->tk_action = call_refresh; 1780 return; 1781 } 1782 1783 rpc_call_rpcerror(task, -EIO); 1784 return; 1785 } 1786 1787 switch (status) { 1788 case -ENOMEM: 1789 rpc_delay(task, HZ >> 2); 1790 fallthrough; 1791 case -EAGAIN: /* woken up; retry */ 1792 task->tk_action = call_retry_reserve; 1793 return; 1794 default: 1795 rpc_call_rpcerror(task, status); 1796 } 1797 } 1798 1799 /* 1800 * 1c. Retry reserving an RPC call slot 1801 */ 1802 static void 1803 call_retry_reserve(struct rpc_task *task) 1804 { 1805 task->tk_status = 0; 1806 task->tk_action = call_reserveresult; 1807 xprt_retry_reserve(task); 1808 } 1809 1810 /* 1811 * 2. Bind and/or refresh the credentials 1812 */ 1813 static void 1814 call_refresh(struct rpc_task *task) 1815 { 1816 task->tk_action = call_refreshresult; 1817 task->tk_status = 0; 1818 task->tk_client->cl_stats->rpcauthrefresh++; 1819 rpcauth_refreshcred(task); 1820 } 1821 1822 /* 1823 * 2a. Process the results of a credential refresh 1824 */ 1825 static void 1826 call_refreshresult(struct rpc_task *task) 1827 { 1828 int status = task->tk_status; 1829 1830 task->tk_status = 0; 1831 task->tk_action = call_refresh; 1832 switch (status) { 1833 case 0: 1834 if (rpcauth_uptodatecred(task)) { 1835 task->tk_action = call_allocate; 1836 return; 1837 } 1838 /* Use rate-limiting and a max number of retries if refresh 1839 * had status 0 but failed to update the cred. 1840 */ 1841 fallthrough; 1842 case -ETIMEDOUT: 1843 rpc_delay(task, 3*HZ); 1844 fallthrough; 1845 case -EAGAIN: 1846 status = -EACCES; 1847 fallthrough; 1848 case -EKEYEXPIRED: 1849 if (!task->tk_cred_retry) 1850 break; 1851 task->tk_cred_retry--; 1852 trace_rpc_retry_refresh_status(task); 1853 return; 1854 case -ENOMEM: 1855 rpc_delay(task, HZ >> 4); 1856 return; 1857 } 1858 trace_rpc_refresh_status(task); 1859 rpc_call_rpcerror(task, status); 1860 } 1861 1862 /* 1863 * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc. 1864 * (Note: buffer memory is freed in xprt_release). 1865 */ 1866 static void 1867 call_allocate(struct rpc_task *task) 1868 { 1869 const struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth; 1870 struct rpc_rqst *req = task->tk_rqstp; 1871 struct rpc_xprt *xprt = req->rq_xprt; 1872 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 1873 int status; 1874 1875 task->tk_status = 0; 1876 task->tk_action = call_encode; 1877 1878 if (req->rq_buffer) 1879 return; 1880 1881 if (proc->p_proc != 0) { 1882 BUG_ON(proc->p_arglen == 0); 1883 if (proc->p_decode != NULL) 1884 BUG_ON(proc->p_replen == 0); 1885 } 1886 1887 /* 1888 * Calculate the size (in quads) of the RPC call 1889 * and reply headers, and convert both values 1890 * to byte sizes. 1891 */ 1892 req->rq_callsize = RPC_CALLHDRSIZE + (auth->au_cslack << 1) + 1893 proc->p_arglen; 1894 req->rq_callsize <<= 2; 1895 /* 1896 * Note: the reply buffer must at minimum allocate enough space 1897 * for the 'struct accepted_reply' from RFC5531. 1898 */ 1899 req->rq_rcvsize = RPC_REPHDRSIZE + auth->au_rslack + \ 1900 max_t(size_t, proc->p_replen, 2); 1901 req->rq_rcvsize <<= 2; 1902 1903 status = xprt->ops->buf_alloc(task); 1904 trace_rpc_buf_alloc(task, status); 1905 if (status == 0) 1906 return; 1907 if (status != -ENOMEM) { 1908 rpc_call_rpcerror(task, status); 1909 return; 1910 } 1911 1912 if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) { 1913 task->tk_action = call_allocate; 1914 rpc_delay(task, HZ>>4); 1915 return; 1916 } 1917 1918 rpc_call_rpcerror(task, -ERESTARTSYS); 1919 } 1920 1921 static int 1922 rpc_task_need_encode(struct rpc_task *task) 1923 { 1924 return test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) == 0 && 1925 (!(task->tk_flags & RPC_TASK_SENT) || 1926 !(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) || 1927 xprt_request_need_retransmit(task)); 1928 } 1929 1930 static void 1931 rpc_xdr_encode(struct rpc_task *task) 1932 { 1933 struct rpc_rqst *req = task->tk_rqstp; 1934 struct xdr_stream xdr; 1935 1936 xdr_buf_init(&req->rq_snd_buf, 1937 req->rq_buffer, 1938 req->rq_callsize); 1939 xdr_buf_init(&req->rq_rcv_buf, 1940 req->rq_rbuffer, 1941 req->rq_rcvsize); 1942 1943 req->rq_reply_bytes_recvd = 0; 1944 req->rq_snd_buf.head[0].iov_len = 0; 1945 xdr_init_encode(&xdr, &req->rq_snd_buf, 1946 req->rq_snd_buf.head[0].iov_base, req); 1947 if (rpc_encode_header(task, &xdr)) 1948 return; 1949 1950 task->tk_status = rpcauth_wrap_req(task, &xdr); 1951 } 1952 1953 /* 1954 * 3. Encode arguments of an RPC call 1955 */ 1956 static void 1957 call_encode(struct rpc_task *task) 1958 { 1959 if (!rpc_task_need_encode(task)) 1960 goto out; 1961 1962 /* Dequeue task from the receive queue while we're encoding */ 1963 xprt_request_dequeue_xprt(task); 1964 /* Encode here so that rpcsec_gss can use correct sequence number. */ 1965 rpc_xdr_encode(task); 1966 /* Add task to reply queue before transmission to avoid races */ 1967 if (task->tk_status == 0 && rpc_reply_expected(task)) 1968 task->tk_status = xprt_request_enqueue_receive(task); 1969 /* Did the encode result in an error condition? */ 1970 if (task->tk_status != 0) { 1971 /* Was the error nonfatal? */ 1972 switch (task->tk_status) { 1973 case -EAGAIN: 1974 case -ENOMEM: 1975 rpc_delay(task, HZ >> 4); 1976 break; 1977 case -EKEYEXPIRED: 1978 if (!task->tk_cred_retry) { 1979 rpc_call_rpcerror(task, task->tk_status); 1980 } else { 1981 task->tk_action = call_refresh; 1982 task->tk_cred_retry--; 1983 trace_rpc_retry_refresh_status(task); 1984 } 1985 break; 1986 default: 1987 rpc_call_rpcerror(task, task->tk_status); 1988 } 1989 return; 1990 } 1991 1992 xprt_request_enqueue_transmit(task); 1993 out: 1994 task->tk_action = call_transmit; 1995 /* Check that the connection is OK */ 1996 if (!xprt_bound(task->tk_xprt)) 1997 task->tk_action = call_bind; 1998 else if (!xprt_connected(task->tk_xprt)) 1999 task->tk_action = call_connect; 2000 } 2001 2002 /* 2003 * Helpers to check if the task was already transmitted, and 2004 * to take action when that is the case. 2005 */ 2006 static bool 2007 rpc_task_transmitted(struct rpc_task *task) 2008 { 2009 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); 2010 } 2011 2012 static void 2013 rpc_task_handle_transmitted(struct rpc_task *task) 2014 { 2015 xprt_end_transmit(task); 2016 task->tk_action = call_transmit_status; 2017 } 2018 2019 /* 2020 * 4. Get the server port number if not yet set 2021 */ 2022 static void 2023 call_bind(struct rpc_task *task) 2024 { 2025 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 2026 2027 if (rpc_task_transmitted(task)) { 2028 rpc_task_handle_transmitted(task); 2029 return; 2030 } 2031 2032 if (xprt_bound(xprt)) { 2033 task->tk_action = call_connect; 2034 return; 2035 } 2036 2037 task->tk_action = call_bind_status; 2038 if (!xprt_prepare_transmit(task)) 2039 return; 2040 2041 xprt->ops->rpcbind(task); 2042 } 2043 2044 /* 2045 * 4a. Sort out bind result 2046 */ 2047 static void 2048 call_bind_status(struct rpc_task *task) 2049 { 2050 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 2051 int status = -EIO; 2052 2053 if (rpc_task_transmitted(task)) { 2054 rpc_task_handle_transmitted(task); 2055 return; 2056 } 2057 2058 if (task->tk_status >= 0) 2059 goto out_next; 2060 if (xprt_bound(xprt)) { 2061 task->tk_status = 0; 2062 goto out_next; 2063 } 2064 2065 switch (task->tk_status) { 2066 case -ENOMEM: 2067 rpc_delay(task, HZ >> 2); 2068 goto retry_timeout; 2069 case -EACCES: 2070 trace_rpcb_prog_unavail_err(task); 2071 /* fail immediately if this is an RPC ping */ 2072 if (task->tk_msg.rpc_proc->p_proc == 0) { 2073 status = -EOPNOTSUPP; 2074 break; 2075 } 2076 rpc_delay(task, 3*HZ); 2077 goto retry_timeout; 2078 case -ENOBUFS: 2079 rpc_delay(task, HZ >> 2); 2080 goto retry_timeout; 2081 case -EAGAIN: 2082 goto retry_timeout; 2083 case -ETIMEDOUT: 2084 trace_rpcb_timeout_err(task); 2085 goto retry_timeout; 2086 case -EPFNOSUPPORT: 2087 /* server doesn't support any rpcbind version we know of */ 2088 trace_rpcb_bind_version_err(task); 2089 break; 2090 case -EPROTONOSUPPORT: 2091 trace_rpcb_bind_version_err(task); 2092 goto retry_timeout; 2093 case -ECONNREFUSED: /* connection problems */ 2094 case -ECONNRESET: 2095 case -ECONNABORTED: 2096 case -ENOTCONN: 2097 case -EHOSTDOWN: 2098 case -ENETDOWN: 2099 case -EHOSTUNREACH: 2100 case -ENETUNREACH: 2101 case -EPIPE: 2102 trace_rpcb_unreachable_err(task); 2103 if (!RPC_IS_SOFTCONN(task)) { 2104 rpc_delay(task, 5*HZ); 2105 goto retry_timeout; 2106 } 2107 status = task->tk_status; 2108 break; 2109 default: 2110 trace_rpcb_unrecognized_err(task); 2111 } 2112 2113 rpc_call_rpcerror(task, status); 2114 return; 2115 out_next: 2116 task->tk_action = call_connect; 2117 return; 2118 retry_timeout: 2119 task->tk_status = 0; 2120 task->tk_action = call_bind; 2121 rpc_check_timeout(task); 2122 } 2123 2124 /* 2125 * 4b. Connect to the RPC server 2126 */ 2127 static void 2128 call_connect(struct rpc_task *task) 2129 { 2130 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 2131 2132 if (rpc_task_transmitted(task)) { 2133 rpc_task_handle_transmitted(task); 2134 return; 2135 } 2136 2137 if (xprt_connected(xprt)) { 2138 task->tk_action = call_transmit; 2139 return; 2140 } 2141 2142 task->tk_action = call_connect_status; 2143 if (task->tk_status < 0) 2144 return; 2145 if (task->tk_flags & RPC_TASK_NOCONNECT) { 2146 rpc_call_rpcerror(task, -ENOTCONN); 2147 return; 2148 } 2149 if (!xprt_prepare_transmit(task)) 2150 return; 2151 xprt_connect(task); 2152 } 2153 2154 /* 2155 * 4c. Sort out connect result 2156 */ 2157 static void 2158 call_connect_status(struct rpc_task *task) 2159 { 2160 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 2161 struct rpc_clnt *clnt = task->tk_client; 2162 int status = task->tk_status; 2163 2164 if (rpc_task_transmitted(task)) { 2165 rpc_task_handle_transmitted(task); 2166 return; 2167 } 2168 2169 trace_rpc_connect_status(task); 2170 2171 if (task->tk_status == 0) { 2172 clnt->cl_stats->netreconn++; 2173 goto out_next; 2174 } 2175 if (xprt_connected(xprt)) { 2176 task->tk_status = 0; 2177 goto out_next; 2178 } 2179 2180 task->tk_status = 0; 2181 switch (status) { 2182 case -ECONNREFUSED: 2183 case -ECONNRESET: 2184 /* A positive refusal suggests a rebind is needed. */ 2185 if (RPC_IS_SOFTCONN(task)) 2186 break; 2187 if (clnt->cl_autobind) { 2188 rpc_force_rebind(clnt); 2189 goto out_retry; 2190 } 2191 fallthrough; 2192 case -ECONNABORTED: 2193 case -ENETDOWN: 2194 case -ENETUNREACH: 2195 case -EHOSTUNREACH: 2196 case -EPIPE: 2197 case -EPROTO: 2198 xprt_conditional_disconnect(task->tk_rqstp->rq_xprt, 2199 task->tk_rqstp->rq_connect_cookie); 2200 if (RPC_IS_SOFTCONN(task)) 2201 break; 2202 /* retry with existing socket, after a delay */ 2203 rpc_delay(task, 3*HZ); 2204 fallthrough; 2205 case -EADDRINUSE: 2206 case -ENOTCONN: 2207 case -EAGAIN: 2208 case -ETIMEDOUT: 2209 if (!(task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) && 2210 (task->tk_flags & RPC_TASK_MOVEABLE) && 2211 test_bit(XPRT_REMOVE, &xprt->state)) { 2212 struct rpc_xprt *saved = task->tk_xprt; 2213 struct rpc_xprt_switch *xps; 2214 2215 rcu_read_lock(); 2216 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 2217 rcu_read_unlock(); 2218 if (xps->xps_nxprts > 1) { 2219 long value; 2220 2221 xprt_release(task); 2222 value = atomic_long_dec_return(&xprt->queuelen); 2223 if (value == 0) 2224 rpc_xprt_switch_remove_xprt(xps, saved, 2225 true); 2226 xprt_put(saved); 2227 task->tk_xprt = NULL; 2228 task->tk_action = call_start; 2229 } 2230 xprt_switch_put(xps); 2231 if (!task->tk_xprt) 2232 goto out; 2233 } 2234 goto out_retry; 2235 case -ENOBUFS: 2236 rpc_delay(task, HZ >> 2); 2237 goto out_retry; 2238 } 2239 rpc_call_rpcerror(task, status); 2240 return; 2241 out_next: 2242 task->tk_action = call_transmit; 2243 return; 2244 out_retry: 2245 /* Check for timeouts before looping back to call_bind */ 2246 task->tk_action = call_bind; 2247 out: 2248 rpc_check_timeout(task); 2249 } 2250 2251 /* 2252 * 5. Transmit the RPC request, and wait for reply 2253 */ 2254 static void 2255 call_transmit(struct rpc_task *task) 2256 { 2257 if (rpc_task_transmitted(task)) { 2258 rpc_task_handle_transmitted(task); 2259 return; 2260 } 2261 2262 task->tk_action = call_transmit_status; 2263 if (!xprt_prepare_transmit(task)) 2264 return; 2265 task->tk_status = 0; 2266 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) { 2267 if (!xprt_connected(task->tk_xprt)) { 2268 task->tk_status = -ENOTCONN; 2269 return; 2270 } 2271 xprt_transmit(task); 2272 } 2273 xprt_end_transmit(task); 2274 } 2275 2276 /* 2277 * 5a. Handle cleanup after a transmission 2278 */ 2279 static void 2280 call_transmit_status(struct rpc_task *task) 2281 { 2282 task->tk_action = call_status; 2283 2284 /* 2285 * Common case: success. Force the compiler to put this 2286 * test first. 2287 */ 2288 if (rpc_task_transmitted(task)) { 2289 task->tk_status = 0; 2290 xprt_request_wait_receive(task); 2291 return; 2292 } 2293 2294 switch (task->tk_status) { 2295 default: 2296 break; 2297 case -EBADMSG: 2298 task->tk_status = 0; 2299 task->tk_action = call_encode; 2300 break; 2301 /* 2302 * Special cases: if we've been waiting on the 2303 * socket's write_space() callback, or if the 2304 * socket just returned a connection error, 2305 * then hold onto the transport lock. 2306 */ 2307 case -ENOMEM: 2308 case -ENOBUFS: 2309 rpc_delay(task, HZ>>2); 2310 fallthrough; 2311 case -EBADSLT: 2312 case -EAGAIN: 2313 task->tk_action = call_transmit; 2314 task->tk_status = 0; 2315 break; 2316 case -ECONNREFUSED: 2317 case -EHOSTDOWN: 2318 case -ENETDOWN: 2319 case -EHOSTUNREACH: 2320 case -ENETUNREACH: 2321 case -EPERM: 2322 if (RPC_IS_SOFTCONN(task)) { 2323 if (!task->tk_msg.rpc_proc->p_proc) 2324 trace_xprt_ping(task->tk_xprt, 2325 task->tk_status); 2326 rpc_call_rpcerror(task, task->tk_status); 2327 return; 2328 } 2329 fallthrough; 2330 case -ECONNRESET: 2331 case -ECONNABORTED: 2332 case -EADDRINUSE: 2333 case -ENOTCONN: 2334 case -EPIPE: 2335 task->tk_action = call_bind; 2336 task->tk_status = 0; 2337 break; 2338 } 2339 rpc_check_timeout(task); 2340 } 2341 2342 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 2343 static void call_bc_transmit(struct rpc_task *task); 2344 static void call_bc_transmit_status(struct rpc_task *task); 2345 2346 static void 2347 call_bc_encode(struct rpc_task *task) 2348 { 2349 xprt_request_enqueue_transmit(task); 2350 task->tk_action = call_bc_transmit; 2351 } 2352 2353 /* 2354 * 5b. Send the backchannel RPC reply. On error, drop the reply. In 2355 * addition, disconnect on connectivity errors. 2356 */ 2357 static void 2358 call_bc_transmit(struct rpc_task *task) 2359 { 2360 task->tk_action = call_bc_transmit_status; 2361 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) { 2362 if (!xprt_prepare_transmit(task)) 2363 return; 2364 task->tk_status = 0; 2365 xprt_transmit(task); 2366 } 2367 xprt_end_transmit(task); 2368 } 2369 2370 static void 2371 call_bc_transmit_status(struct rpc_task *task) 2372 { 2373 struct rpc_rqst *req = task->tk_rqstp; 2374 2375 if (rpc_task_transmitted(task)) 2376 task->tk_status = 0; 2377 2378 switch (task->tk_status) { 2379 case 0: 2380 /* Success */ 2381 case -ENETDOWN: 2382 case -EHOSTDOWN: 2383 case -EHOSTUNREACH: 2384 case -ENETUNREACH: 2385 case -ECONNRESET: 2386 case -ECONNREFUSED: 2387 case -EADDRINUSE: 2388 case -ENOTCONN: 2389 case -EPIPE: 2390 break; 2391 case -ENOMEM: 2392 case -ENOBUFS: 2393 rpc_delay(task, HZ>>2); 2394 fallthrough; 2395 case -EBADSLT: 2396 case -EAGAIN: 2397 task->tk_status = 0; 2398 task->tk_action = call_bc_transmit; 2399 return; 2400 case -ETIMEDOUT: 2401 /* 2402 * Problem reaching the server. Disconnect and let the 2403 * forechannel reestablish the connection. The server will 2404 * have to retransmit the backchannel request and we'll 2405 * reprocess it. Since these ops are idempotent, there's no 2406 * need to cache our reply at this time. 2407 */ 2408 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 2409 "error: %d\n", task->tk_status); 2410 xprt_conditional_disconnect(req->rq_xprt, 2411 req->rq_connect_cookie); 2412 break; 2413 default: 2414 /* 2415 * We were unable to reply and will have to drop the 2416 * request. The server should reconnect and retransmit. 2417 */ 2418 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 2419 "error: %d\n", task->tk_status); 2420 break; 2421 } 2422 task->tk_action = rpc_exit_task; 2423 } 2424 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 2425 2426 /* 2427 * 6. Sort out the RPC call status 2428 */ 2429 static void 2430 call_status(struct rpc_task *task) 2431 { 2432 struct rpc_clnt *clnt = task->tk_client; 2433 int status; 2434 2435 if (!task->tk_msg.rpc_proc->p_proc) 2436 trace_xprt_ping(task->tk_xprt, task->tk_status); 2437 2438 status = task->tk_status; 2439 if (status >= 0) { 2440 task->tk_action = call_decode; 2441 return; 2442 } 2443 2444 trace_rpc_call_status(task); 2445 task->tk_status = 0; 2446 switch(status) { 2447 case -EHOSTDOWN: 2448 case -ENETDOWN: 2449 case -EHOSTUNREACH: 2450 case -ENETUNREACH: 2451 case -EPERM: 2452 if (RPC_IS_SOFTCONN(task)) 2453 goto out_exit; 2454 /* 2455 * Delay any retries for 3 seconds, then handle as if it 2456 * were a timeout. 2457 */ 2458 rpc_delay(task, 3*HZ); 2459 fallthrough; 2460 case -ETIMEDOUT: 2461 break; 2462 case -ECONNREFUSED: 2463 case -ECONNRESET: 2464 case -ECONNABORTED: 2465 case -ENOTCONN: 2466 rpc_force_rebind(clnt); 2467 break; 2468 case -EADDRINUSE: 2469 rpc_delay(task, 3*HZ); 2470 fallthrough; 2471 case -EPIPE: 2472 case -EAGAIN: 2473 break; 2474 case -ENFILE: 2475 case -ENOBUFS: 2476 case -ENOMEM: 2477 rpc_delay(task, HZ>>2); 2478 break; 2479 case -EIO: 2480 /* shutdown or soft timeout */ 2481 goto out_exit; 2482 default: 2483 if (clnt->cl_chatty) 2484 printk("%s: RPC call returned error %d\n", 2485 clnt->cl_program->name, -status); 2486 goto out_exit; 2487 } 2488 task->tk_action = call_encode; 2489 rpc_check_timeout(task); 2490 return; 2491 out_exit: 2492 rpc_call_rpcerror(task, status); 2493 } 2494 2495 static bool 2496 rpc_check_connected(const struct rpc_rqst *req) 2497 { 2498 /* No allocated request or transport? return true */ 2499 if (!req || !req->rq_xprt) 2500 return true; 2501 return xprt_connected(req->rq_xprt); 2502 } 2503 2504 static void 2505 rpc_check_timeout(struct rpc_task *task) 2506 { 2507 struct rpc_clnt *clnt = task->tk_client; 2508 2509 if (RPC_SIGNALLED(task)) 2510 return; 2511 2512 if (xprt_adjust_timeout(task->tk_rqstp) == 0) 2513 return; 2514 2515 trace_rpc_timeout_status(task); 2516 task->tk_timeouts++; 2517 2518 if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) { 2519 rpc_call_rpcerror(task, -ETIMEDOUT); 2520 return; 2521 } 2522 2523 if (RPC_IS_SOFT(task)) { 2524 /* 2525 * Once a "no retrans timeout" soft tasks (a.k.a NFSv4) has 2526 * been sent, it should time out only if the transport 2527 * connection gets terminally broken. 2528 */ 2529 if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) && 2530 rpc_check_connected(task->tk_rqstp)) 2531 return; 2532 2533 if (clnt->cl_chatty) { 2534 pr_notice_ratelimited( 2535 "%s: server %s not responding, timed out\n", 2536 clnt->cl_program->name, 2537 task->tk_xprt->servername); 2538 } 2539 if (task->tk_flags & RPC_TASK_TIMEOUT) 2540 rpc_call_rpcerror(task, -ETIMEDOUT); 2541 else 2542 __rpc_call_rpcerror(task, -EIO, -ETIMEDOUT); 2543 return; 2544 } 2545 2546 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { 2547 task->tk_flags |= RPC_CALL_MAJORSEEN; 2548 if (clnt->cl_chatty) { 2549 pr_notice_ratelimited( 2550 "%s: server %s not responding, still trying\n", 2551 clnt->cl_program->name, 2552 task->tk_xprt->servername); 2553 } 2554 } 2555 rpc_force_rebind(clnt); 2556 /* 2557 * Did our request time out due to an RPCSEC_GSS out-of-sequence 2558 * event? RFC2203 requires the server to drop all such requests. 2559 */ 2560 rpcauth_invalcred(task); 2561 } 2562 2563 /* 2564 * 7. Decode the RPC reply 2565 */ 2566 static void 2567 call_decode(struct rpc_task *task) 2568 { 2569 struct rpc_clnt *clnt = task->tk_client; 2570 struct rpc_rqst *req = task->tk_rqstp; 2571 struct xdr_stream xdr; 2572 int err; 2573 2574 if (!task->tk_msg.rpc_proc->p_decode) { 2575 task->tk_action = rpc_exit_task; 2576 return; 2577 } 2578 2579 if (task->tk_flags & RPC_CALL_MAJORSEEN) { 2580 if (clnt->cl_chatty) { 2581 pr_notice_ratelimited("%s: server %s OK\n", 2582 clnt->cl_program->name, 2583 task->tk_xprt->servername); 2584 } 2585 task->tk_flags &= ~RPC_CALL_MAJORSEEN; 2586 } 2587 2588 /* 2589 * Did we ever call xprt_complete_rqst()? If not, we should assume 2590 * the message is incomplete. 2591 */ 2592 err = -EAGAIN; 2593 if (!req->rq_reply_bytes_recvd) 2594 goto out; 2595 2596 /* Ensure that we see all writes made by xprt_complete_rqst() 2597 * before it changed req->rq_reply_bytes_recvd. 2598 */ 2599 smp_rmb(); 2600 2601 req->rq_rcv_buf.len = req->rq_private_buf.len; 2602 trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf); 2603 2604 /* Check that the softirq receive buffer is valid */ 2605 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, 2606 sizeof(req->rq_rcv_buf)) != 0); 2607 2608 xdr_init_decode(&xdr, &req->rq_rcv_buf, 2609 req->rq_rcv_buf.head[0].iov_base, req); 2610 err = rpc_decode_header(task, &xdr); 2611 out: 2612 switch (err) { 2613 case 0: 2614 task->tk_action = rpc_exit_task; 2615 task->tk_status = rpcauth_unwrap_resp(task, &xdr); 2616 xdr_finish_decode(&xdr); 2617 return; 2618 case -EAGAIN: 2619 task->tk_status = 0; 2620 if (task->tk_client->cl_discrtry) 2621 xprt_conditional_disconnect(req->rq_xprt, 2622 req->rq_connect_cookie); 2623 task->tk_action = call_encode; 2624 rpc_check_timeout(task); 2625 break; 2626 case -EKEYREJECTED: 2627 task->tk_action = call_reserve; 2628 rpc_check_timeout(task); 2629 rpcauth_invalcred(task); 2630 /* Ensure we obtain a new XID if we retry! */ 2631 xprt_release(task); 2632 } 2633 } 2634 2635 static int 2636 rpc_encode_header(struct rpc_task *task, struct xdr_stream *xdr) 2637 { 2638 struct rpc_clnt *clnt = task->tk_client; 2639 struct rpc_rqst *req = task->tk_rqstp; 2640 __be32 *p; 2641 int error; 2642 2643 error = -EMSGSIZE; 2644 p = xdr_reserve_space(xdr, RPC_CALLHDRSIZE << 2); 2645 if (!p) 2646 goto out_fail; 2647 *p++ = req->rq_xid; 2648 *p++ = rpc_call; 2649 *p++ = cpu_to_be32(RPC_VERSION); 2650 *p++ = cpu_to_be32(clnt->cl_prog); 2651 *p++ = cpu_to_be32(clnt->cl_vers); 2652 *p = cpu_to_be32(task->tk_msg.rpc_proc->p_proc); 2653 2654 error = rpcauth_marshcred(task, xdr); 2655 if (error < 0) 2656 goto out_fail; 2657 return 0; 2658 out_fail: 2659 trace_rpc_bad_callhdr(task); 2660 rpc_call_rpcerror(task, error); 2661 return error; 2662 } 2663 2664 static noinline int 2665 rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr) 2666 { 2667 struct rpc_clnt *clnt = task->tk_client; 2668 int error; 2669 __be32 *p; 2670 2671 /* RFC-1014 says that the representation of XDR data must be a 2672 * multiple of four bytes 2673 * - if it isn't pointer subtraction in the NFS client may give 2674 * undefined results 2675 */ 2676 if (task->tk_rqstp->rq_rcv_buf.len & 3) 2677 goto out_unparsable; 2678 2679 p = xdr_inline_decode(xdr, 3 * sizeof(*p)); 2680 if (!p) 2681 goto out_unparsable; 2682 p++; /* skip XID */ 2683 if (*p++ != rpc_reply) 2684 goto out_unparsable; 2685 if (*p++ != rpc_msg_accepted) 2686 goto out_msg_denied; 2687 2688 error = rpcauth_checkverf(task, xdr); 2689 if (error) 2690 goto out_verifier; 2691 2692 p = xdr_inline_decode(xdr, sizeof(*p)); 2693 if (!p) 2694 goto out_unparsable; 2695 switch (*p) { 2696 case rpc_success: 2697 return 0; 2698 case rpc_prog_unavail: 2699 trace_rpc__prog_unavail(task); 2700 error = -EPFNOSUPPORT; 2701 goto out_err; 2702 case rpc_prog_mismatch: 2703 trace_rpc__prog_mismatch(task); 2704 error = -EPROTONOSUPPORT; 2705 goto out_err; 2706 case rpc_proc_unavail: 2707 trace_rpc__proc_unavail(task); 2708 error = -EOPNOTSUPP; 2709 goto out_err; 2710 case rpc_garbage_args: 2711 case rpc_system_err: 2712 trace_rpc__garbage_args(task); 2713 error = -EIO; 2714 break; 2715 default: 2716 goto out_unparsable; 2717 } 2718 2719 out_garbage: 2720 clnt->cl_stats->rpcgarbage++; 2721 if (task->tk_garb_retry) { 2722 task->tk_garb_retry--; 2723 task->tk_action = call_encode; 2724 return -EAGAIN; 2725 } 2726 out_err: 2727 rpc_call_rpcerror(task, error); 2728 return error; 2729 2730 out_unparsable: 2731 trace_rpc__unparsable(task); 2732 error = -EIO; 2733 goto out_garbage; 2734 2735 out_verifier: 2736 trace_rpc_bad_verifier(task); 2737 switch (error) { 2738 case -EPROTONOSUPPORT: 2739 goto out_err; 2740 case -EACCES: 2741 /* Re-encode with a fresh cred */ 2742 fallthrough; 2743 default: 2744 goto out_garbage; 2745 } 2746 2747 out_msg_denied: 2748 error = -EACCES; 2749 p = xdr_inline_decode(xdr, sizeof(*p)); 2750 if (!p) 2751 goto out_unparsable; 2752 switch (*p++) { 2753 case rpc_auth_error: 2754 break; 2755 case rpc_mismatch: 2756 trace_rpc__mismatch(task); 2757 error = -EPROTONOSUPPORT; 2758 goto out_err; 2759 default: 2760 goto out_unparsable; 2761 } 2762 2763 p = xdr_inline_decode(xdr, sizeof(*p)); 2764 if (!p) 2765 goto out_unparsable; 2766 switch (*p++) { 2767 case rpc_autherr_rejectedcred: 2768 case rpc_autherr_rejectedverf: 2769 case rpcsec_gsserr_credproblem: 2770 case rpcsec_gsserr_ctxproblem: 2771 rpcauth_invalcred(task); 2772 if (!task->tk_cred_retry) 2773 break; 2774 task->tk_cred_retry--; 2775 trace_rpc__stale_creds(task); 2776 return -EKEYREJECTED; 2777 case rpc_autherr_badcred: 2778 case rpc_autherr_badverf: 2779 /* possibly garbled cred/verf? */ 2780 if (!task->tk_garb_retry) 2781 break; 2782 task->tk_garb_retry--; 2783 trace_rpc__bad_creds(task); 2784 task->tk_action = call_encode; 2785 return -EAGAIN; 2786 case rpc_autherr_tooweak: 2787 trace_rpc__auth_tooweak(task); 2788 pr_warn("RPC: server %s requires stronger authentication.\n", 2789 task->tk_xprt->servername); 2790 break; 2791 default: 2792 goto out_unparsable; 2793 } 2794 goto out_err; 2795 } 2796 2797 static void rpcproc_encode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr, 2798 const void *obj) 2799 { 2800 } 2801 2802 static int rpcproc_decode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr, 2803 void *obj) 2804 { 2805 return 0; 2806 } 2807 2808 static const struct rpc_procinfo rpcproc_null = { 2809 .p_encode = rpcproc_encode_null, 2810 .p_decode = rpcproc_decode_null, 2811 }; 2812 2813 static const struct rpc_procinfo rpcproc_null_noreply = { 2814 .p_encode = rpcproc_encode_null, 2815 }; 2816 2817 static void 2818 rpc_null_call_prepare(struct rpc_task *task, void *data) 2819 { 2820 task->tk_flags &= ~RPC_TASK_NO_RETRANS_TIMEOUT; 2821 rpc_call_start(task); 2822 } 2823 2824 static const struct rpc_call_ops rpc_null_ops = { 2825 .rpc_call_prepare = rpc_null_call_prepare, 2826 .rpc_call_done = rpc_default_callback, 2827 }; 2828 2829 static 2830 struct rpc_task *rpc_call_null_helper(struct rpc_clnt *clnt, 2831 struct rpc_xprt *xprt, struct rpc_cred *cred, int flags, 2832 const struct rpc_call_ops *ops, void *data) 2833 { 2834 struct rpc_message msg = { 2835 .rpc_proc = &rpcproc_null, 2836 }; 2837 struct rpc_task_setup task_setup_data = { 2838 .rpc_client = clnt, 2839 .rpc_xprt = xprt, 2840 .rpc_message = &msg, 2841 .rpc_op_cred = cred, 2842 .callback_ops = ops ?: &rpc_null_ops, 2843 .callback_data = data, 2844 .flags = flags | RPC_TASK_SOFT | RPC_TASK_SOFTCONN | 2845 RPC_TASK_NULLCREDS, 2846 }; 2847 2848 return rpc_run_task(&task_setup_data); 2849 } 2850 2851 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags) 2852 { 2853 return rpc_call_null_helper(clnt, NULL, cred, flags, NULL, NULL); 2854 } 2855 EXPORT_SYMBOL_GPL(rpc_call_null); 2856 2857 static int rpc_ping(struct rpc_clnt *clnt) 2858 { 2859 struct rpc_task *task; 2860 int status; 2861 2862 if (clnt->cl_auth->au_ops->ping) 2863 return clnt->cl_auth->au_ops->ping(clnt); 2864 2865 task = rpc_call_null_helper(clnt, NULL, NULL, 0, NULL, NULL); 2866 if (IS_ERR(task)) 2867 return PTR_ERR(task); 2868 status = task->tk_status; 2869 rpc_put_task(task); 2870 return status; 2871 } 2872 2873 static int rpc_ping_noreply(struct rpc_clnt *clnt) 2874 { 2875 struct rpc_message msg = { 2876 .rpc_proc = &rpcproc_null_noreply, 2877 }; 2878 struct rpc_task_setup task_setup_data = { 2879 .rpc_client = clnt, 2880 .rpc_message = &msg, 2881 .callback_ops = &rpc_null_ops, 2882 .flags = RPC_TASK_SOFT | RPC_TASK_SOFTCONN | RPC_TASK_NULLCREDS, 2883 }; 2884 struct rpc_task *task; 2885 int status; 2886 2887 task = rpc_run_task(&task_setup_data); 2888 if (IS_ERR(task)) 2889 return PTR_ERR(task); 2890 status = task->tk_status; 2891 rpc_put_task(task); 2892 return status; 2893 } 2894 2895 struct rpc_cb_add_xprt_calldata { 2896 struct rpc_xprt_switch *xps; 2897 struct rpc_xprt *xprt; 2898 }; 2899 2900 static void rpc_cb_add_xprt_done(struct rpc_task *task, void *calldata) 2901 { 2902 struct rpc_cb_add_xprt_calldata *data = calldata; 2903 2904 if (task->tk_status == 0) 2905 rpc_xprt_switch_add_xprt(data->xps, data->xprt); 2906 } 2907 2908 static void rpc_cb_add_xprt_release(void *calldata) 2909 { 2910 struct rpc_cb_add_xprt_calldata *data = calldata; 2911 2912 xprt_put(data->xprt); 2913 xprt_switch_put(data->xps); 2914 kfree(data); 2915 } 2916 2917 static const struct rpc_call_ops rpc_cb_add_xprt_call_ops = { 2918 .rpc_call_prepare = rpc_null_call_prepare, 2919 .rpc_call_done = rpc_cb_add_xprt_done, 2920 .rpc_release = rpc_cb_add_xprt_release, 2921 }; 2922 2923 /** 2924 * rpc_clnt_test_and_add_xprt - Test and add a new transport to a rpc_clnt 2925 * @clnt: pointer to struct rpc_clnt 2926 * @xps: pointer to struct rpc_xprt_switch, 2927 * @xprt: pointer struct rpc_xprt 2928 * @in_max_connect: pointer to the max_connect value for the passed in xprt transport 2929 */ 2930 int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt, 2931 struct rpc_xprt_switch *xps, struct rpc_xprt *xprt, 2932 void *in_max_connect) 2933 { 2934 struct rpc_cb_add_xprt_calldata *data; 2935 struct rpc_task *task; 2936 int max_connect = clnt->cl_max_connect; 2937 2938 if (in_max_connect) 2939 max_connect = *(int *)in_max_connect; 2940 if (xps->xps_nunique_destaddr_xprts + 1 > max_connect) { 2941 rcu_read_lock(); 2942 pr_warn("SUNRPC: reached max allowed number (%d) did not add " 2943 "transport to server: %s\n", max_connect, 2944 rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR)); 2945 rcu_read_unlock(); 2946 return -EINVAL; 2947 } 2948 2949 data = kmalloc(sizeof(*data), GFP_KERNEL); 2950 if (!data) 2951 return -ENOMEM; 2952 data->xps = xprt_switch_get(xps); 2953 data->xprt = xprt_get(xprt); 2954 if (rpc_xprt_switch_has_addr(data->xps, (struct sockaddr *)&xprt->addr)) { 2955 rpc_cb_add_xprt_release(data); 2956 goto success; 2957 } 2958 2959 task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC, 2960 &rpc_cb_add_xprt_call_ops, data); 2961 if (IS_ERR(task)) 2962 return PTR_ERR(task); 2963 2964 data->xps->xps_nunique_destaddr_xprts++; 2965 rpc_put_task(task); 2966 success: 2967 return 1; 2968 } 2969 EXPORT_SYMBOL_GPL(rpc_clnt_test_and_add_xprt); 2970 2971 static int rpc_clnt_add_xprt_helper(struct rpc_clnt *clnt, 2972 struct rpc_xprt *xprt, 2973 struct rpc_add_xprt_test *data) 2974 { 2975 struct rpc_task *task; 2976 int status = -EADDRINUSE; 2977 2978 /* Test the connection */ 2979 task = rpc_call_null_helper(clnt, xprt, NULL, 0, NULL, NULL); 2980 if (IS_ERR(task)) 2981 return PTR_ERR(task); 2982 2983 status = task->tk_status; 2984 rpc_put_task(task); 2985 2986 if (status < 0) 2987 return status; 2988 2989 /* rpc_xprt_switch and rpc_xprt are deferrenced by add_xprt_test() */ 2990 data->add_xprt_test(clnt, xprt, data->data); 2991 2992 return 0; 2993 } 2994 2995 /** 2996 * rpc_clnt_setup_test_and_add_xprt() 2997 * 2998 * This is an rpc_clnt_add_xprt setup() function which returns 1 so: 2999 * 1) caller of the test function must dereference the rpc_xprt_switch 3000 * and the rpc_xprt. 3001 * 2) test function must call rpc_xprt_switch_add_xprt, usually in 3002 * the rpc_call_done routine. 3003 * 3004 * Upon success (return of 1), the test function adds the new 3005 * transport to the rpc_clnt xprt switch 3006 * 3007 * @clnt: struct rpc_clnt to get the new transport 3008 * @xps: the rpc_xprt_switch to hold the new transport 3009 * @xprt: the rpc_xprt to test 3010 * @data: a struct rpc_add_xprt_test pointer that holds the test function 3011 * and test function call data 3012 */ 3013 int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt, 3014 struct rpc_xprt_switch *xps, 3015 struct rpc_xprt *xprt, 3016 void *data) 3017 { 3018 int status = -EADDRINUSE; 3019 3020 xprt = xprt_get(xprt); 3021 xprt_switch_get(xps); 3022 3023 if (rpc_xprt_switch_has_addr(xps, (struct sockaddr *)&xprt->addr)) 3024 goto out_err; 3025 3026 status = rpc_clnt_add_xprt_helper(clnt, xprt, data); 3027 if (status < 0) 3028 goto out_err; 3029 3030 status = 1; 3031 out_err: 3032 xprt_put(xprt); 3033 xprt_switch_put(xps); 3034 if (status < 0) 3035 pr_info("RPC: rpc_clnt_test_xprt failed: %d addr %s not " 3036 "added\n", status, 3037 xprt->address_strings[RPC_DISPLAY_ADDR]); 3038 /* so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt */ 3039 return status; 3040 } 3041 EXPORT_SYMBOL_GPL(rpc_clnt_setup_test_and_add_xprt); 3042 3043 /** 3044 * rpc_clnt_add_xprt - Add a new transport to a rpc_clnt 3045 * @clnt: pointer to struct rpc_clnt 3046 * @xprtargs: pointer to struct xprt_create 3047 * @setup: callback to test and/or set up the connection 3048 * @data: pointer to setup function data 3049 * 3050 * Creates a new transport using the parameters set in args and 3051 * adds it to clnt. 3052 * If ping is set, then test that connectivity succeeds before 3053 * adding the new transport. 3054 * 3055 */ 3056 int rpc_clnt_add_xprt(struct rpc_clnt *clnt, 3057 struct xprt_create *xprtargs, 3058 int (*setup)(struct rpc_clnt *, 3059 struct rpc_xprt_switch *, 3060 struct rpc_xprt *, 3061 void *), 3062 void *data) 3063 { 3064 struct rpc_xprt_switch *xps; 3065 struct rpc_xprt *xprt; 3066 unsigned long connect_timeout; 3067 unsigned long reconnect_timeout; 3068 unsigned char resvport, reuseport; 3069 int ret = 0, ident; 3070 3071 rcu_read_lock(); 3072 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 3073 xprt = xprt_iter_xprt(&clnt->cl_xpi); 3074 if (xps == NULL || xprt == NULL) { 3075 rcu_read_unlock(); 3076 xprt_switch_put(xps); 3077 return -EAGAIN; 3078 } 3079 resvport = xprt->resvport; 3080 reuseport = xprt->reuseport; 3081 connect_timeout = xprt->connect_timeout; 3082 reconnect_timeout = xprt->max_reconnect_timeout; 3083 ident = xprt->xprt_class->ident; 3084 rcu_read_unlock(); 3085 3086 if (!xprtargs->ident) 3087 xprtargs->ident = ident; 3088 xprtargs->xprtsec = clnt->cl_xprtsec; 3089 xprt = xprt_create_transport(xprtargs); 3090 if (IS_ERR(xprt)) { 3091 ret = PTR_ERR(xprt); 3092 goto out_put_switch; 3093 } 3094 xprt->resvport = resvport; 3095 xprt->reuseport = reuseport; 3096 3097 if (xprtargs->connect_timeout) 3098 connect_timeout = xprtargs->connect_timeout; 3099 if (xprtargs->reconnect_timeout) 3100 reconnect_timeout = xprtargs->reconnect_timeout; 3101 if (xprt->ops->set_connect_timeout != NULL) 3102 xprt->ops->set_connect_timeout(xprt, 3103 connect_timeout, 3104 reconnect_timeout); 3105 3106 rpc_xprt_switch_set_roundrobin(xps); 3107 if (setup) { 3108 ret = setup(clnt, xps, xprt, data); 3109 if (ret != 0) 3110 goto out_put_xprt; 3111 } 3112 rpc_xprt_switch_add_xprt(xps, xprt); 3113 out_put_xprt: 3114 xprt_put(xprt); 3115 out_put_switch: 3116 xprt_switch_put(xps); 3117 return ret; 3118 } 3119 EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt); 3120 3121 static int rpc_xprt_probe_trunked(struct rpc_clnt *clnt, 3122 struct rpc_xprt *xprt, 3123 struct rpc_add_xprt_test *data) 3124 { 3125 struct rpc_xprt_switch *xps; 3126 struct rpc_xprt *main_xprt; 3127 int status = 0; 3128 3129 xprt_get(xprt); 3130 3131 rcu_read_lock(); 3132 main_xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); 3133 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 3134 status = rpc_cmp_addr_port((struct sockaddr *)&xprt->addr, 3135 (struct sockaddr *)&main_xprt->addr); 3136 rcu_read_unlock(); 3137 xprt_put(main_xprt); 3138 if (status || !test_bit(XPRT_OFFLINE, &xprt->state)) 3139 goto out; 3140 3141 status = rpc_clnt_add_xprt_helper(clnt, xprt, data); 3142 out: 3143 xprt_put(xprt); 3144 xprt_switch_put(xps); 3145 return status; 3146 } 3147 3148 /* rpc_clnt_probe_trunked_xprt -- probe offlined transport for session trunking 3149 * @clnt rpc_clnt structure 3150 * 3151 * For each offlined transport found in the rpc_clnt structure call 3152 * the function rpc_xprt_probe_trunked() which will determine if this 3153 * transport still belongs to the trunking group. 3154 */ 3155 void rpc_clnt_probe_trunked_xprts(struct rpc_clnt *clnt, 3156 struct rpc_add_xprt_test *data) 3157 { 3158 struct rpc_xprt_iter xpi; 3159 int ret; 3160 3161 ret = rpc_clnt_xprt_iter_offline_init(clnt, &xpi); 3162 if (ret) 3163 return; 3164 for (;;) { 3165 struct rpc_xprt *xprt = xprt_iter_get_next(&xpi); 3166 3167 if (!xprt) 3168 break; 3169 ret = rpc_xprt_probe_trunked(clnt, xprt, data); 3170 xprt_put(xprt); 3171 if (ret < 0) 3172 break; 3173 xprt_iter_rewind(&xpi); 3174 } 3175 xprt_iter_destroy(&xpi); 3176 } 3177 EXPORT_SYMBOL_GPL(rpc_clnt_probe_trunked_xprts); 3178 3179 static int rpc_xprt_offline(struct rpc_clnt *clnt, 3180 struct rpc_xprt *xprt, 3181 void *data) 3182 { 3183 struct rpc_xprt *main_xprt; 3184 struct rpc_xprt_switch *xps; 3185 int err = 0; 3186 3187 xprt_get(xprt); 3188 3189 rcu_read_lock(); 3190 main_xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); 3191 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 3192 err = rpc_cmp_addr_port((struct sockaddr *)&xprt->addr, 3193 (struct sockaddr *)&main_xprt->addr); 3194 rcu_read_unlock(); 3195 xprt_put(main_xprt); 3196 if (err) 3197 goto out; 3198 3199 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) { 3200 err = -EINTR; 3201 goto out; 3202 } 3203 xprt_set_offline_locked(xprt, xps); 3204 3205 xprt_release_write(xprt, NULL); 3206 out: 3207 xprt_put(xprt); 3208 xprt_switch_put(xps); 3209 return err; 3210 } 3211 3212 /* rpc_clnt_manage_trunked_xprts -- offline trunked transports 3213 * @clnt rpc_clnt structure 3214 * 3215 * For each active transport found in the rpc_clnt structure call 3216 * the function rpc_xprt_offline() which will identify trunked transports 3217 * and will mark them offline. 3218 */ 3219 void rpc_clnt_manage_trunked_xprts(struct rpc_clnt *clnt) 3220 { 3221 rpc_clnt_iterate_for_each_xprt(clnt, rpc_xprt_offline, NULL); 3222 } 3223 EXPORT_SYMBOL_GPL(rpc_clnt_manage_trunked_xprts); 3224 3225 struct connect_timeout_data { 3226 unsigned long connect_timeout; 3227 unsigned long reconnect_timeout; 3228 }; 3229 3230 static int 3231 rpc_xprt_set_connect_timeout(struct rpc_clnt *clnt, 3232 struct rpc_xprt *xprt, 3233 void *data) 3234 { 3235 struct connect_timeout_data *timeo = data; 3236 3237 if (xprt->ops->set_connect_timeout) 3238 xprt->ops->set_connect_timeout(xprt, 3239 timeo->connect_timeout, 3240 timeo->reconnect_timeout); 3241 return 0; 3242 } 3243 3244 void 3245 rpc_set_connect_timeout(struct rpc_clnt *clnt, 3246 unsigned long connect_timeout, 3247 unsigned long reconnect_timeout) 3248 { 3249 struct connect_timeout_data timeout = { 3250 .connect_timeout = connect_timeout, 3251 .reconnect_timeout = reconnect_timeout, 3252 }; 3253 rpc_clnt_iterate_for_each_xprt(clnt, 3254 rpc_xprt_set_connect_timeout, 3255 &timeout); 3256 } 3257 EXPORT_SYMBOL_GPL(rpc_set_connect_timeout); 3258 3259 void rpc_clnt_xprt_switch_put(struct rpc_clnt *clnt) 3260 { 3261 rcu_read_lock(); 3262 xprt_switch_put(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 3263 rcu_read_unlock(); 3264 } 3265 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_put); 3266 3267 void rpc_clnt_xprt_set_online(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 3268 { 3269 struct rpc_xprt_switch *xps; 3270 3271 rcu_read_lock(); 3272 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 3273 rcu_read_unlock(); 3274 xprt_set_online_locked(xprt, xps); 3275 } 3276 3277 void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 3278 { 3279 if (rpc_clnt_xprt_switch_has_addr(clnt, 3280 (const struct sockaddr *)&xprt->addr)) { 3281 return rpc_clnt_xprt_set_online(clnt, xprt); 3282 } 3283 rcu_read_lock(); 3284 rpc_xprt_switch_add_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch), 3285 xprt); 3286 rcu_read_unlock(); 3287 } 3288 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt); 3289 3290 void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 3291 { 3292 struct rpc_xprt_switch *xps; 3293 3294 rcu_read_lock(); 3295 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 3296 rpc_xprt_switch_remove_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch), 3297 xprt, 0); 3298 xps->xps_nunique_destaddr_xprts--; 3299 rcu_read_unlock(); 3300 } 3301 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_remove_xprt); 3302 3303 bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, 3304 const struct sockaddr *sap) 3305 { 3306 struct rpc_xprt_switch *xps; 3307 bool ret; 3308 3309 rcu_read_lock(); 3310 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 3311 ret = rpc_xprt_switch_has_addr(xps, sap); 3312 rcu_read_unlock(); 3313 return ret; 3314 } 3315 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr); 3316 3317 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 3318 static void rpc_show_header(void) 3319 { 3320 printk(KERN_INFO "-pid- flgs status -client- --rqstp- " 3321 "-timeout ---ops--\n"); 3322 } 3323 3324 static void rpc_show_task(const struct rpc_clnt *clnt, 3325 const struct rpc_task *task) 3326 { 3327 const char *rpc_waitq = "none"; 3328 3329 if (RPC_IS_QUEUED(task)) 3330 rpc_waitq = rpc_qname(task->tk_waitqueue); 3331 3332 printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n", 3333 task->tk_pid, task->tk_flags, task->tk_status, 3334 clnt, task->tk_rqstp, rpc_task_timeout(task), task->tk_ops, 3335 clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task), 3336 task->tk_action, rpc_waitq); 3337 } 3338 3339 void rpc_show_tasks(struct net *net) 3340 { 3341 struct rpc_clnt *clnt; 3342 struct rpc_task *task; 3343 int header = 0; 3344 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 3345 3346 spin_lock(&sn->rpc_client_lock); 3347 list_for_each_entry(clnt, &sn->all_clients, cl_clients) { 3348 spin_lock(&clnt->cl_lock); 3349 list_for_each_entry(task, &clnt->cl_tasks, tk_task) { 3350 if (!header) { 3351 rpc_show_header(); 3352 header++; 3353 } 3354 rpc_show_task(clnt, task); 3355 } 3356 spin_unlock(&clnt->cl_lock); 3357 } 3358 spin_unlock(&sn->rpc_client_lock); 3359 } 3360 #endif 3361 3362 #if IS_ENABLED(CONFIG_SUNRPC_SWAP) 3363 static int 3364 rpc_clnt_swap_activate_callback(struct rpc_clnt *clnt, 3365 struct rpc_xprt *xprt, 3366 void *dummy) 3367 { 3368 return xprt_enable_swap(xprt); 3369 } 3370 3371 int 3372 rpc_clnt_swap_activate(struct rpc_clnt *clnt) 3373 { 3374 while (clnt != clnt->cl_parent) 3375 clnt = clnt->cl_parent; 3376 if (atomic_inc_return(&clnt->cl_swapper) == 1) 3377 return rpc_clnt_iterate_for_each_xprt(clnt, 3378 rpc_clnt_swap_activate_callback, NULL); 3379 return 0; 3380 } 3381 EXPORT_SYMBOL_GPL(rpc_clnt_swap_activate); 3382 3383 static int 3384 rpc_clnt_swap_deactivate_callback(struct rpc_clnt *clnt, 3385 struct rpc_xprt *xprt, 3386 void *dummy) 3387 { 3388 xprt_disable_swap(xprt); 3389 return 0; 3390 } 3391 3392 void 3393 rpc_clnt_swap_deactivate(struct rpc_clnt *clnt) 3394 { 3395 while (clnt != clnt->cl_parent) 3396 clnt = clnt->cl_parent; 3397 if (atomic_dec_if_positive(&clnt->cl_swapper) == 0) 3398 rpc_clnt_iterate_for_each_xprt(clnt, 3399 rpc_clnt_swap_deactivate_callback, NULL); 3400 } 3401 EXPORT_SYMBOL_GPL(rpc_clnt_swap_deactivate); 3402 #endif /* CONFIG_SUNRPC_SWAP */ 3403