1 /* 2 * linux/net/sunrpc/clnt.c 3 * 4 * This file contains the high-level RPC interface. 5 * It is modeled as a finite state machine to support both synchronous 6 * and asynchronous requests. 7 * 8 * - RPC header generation and argument serialization. 9 * - Credential refresh. 10 * - TCP connect handling. 11 * - Retry of operation when it is suspected the operation failed because 12 * of uid squashing on the server, or when the credentials were stale 13 * and need to be refreshed, or when a packet was damaged in transit. 14 * This may be have to be moved to the VFS layer. 15 * 16 * NB: BSD uses a more intelligent approach to guessing when a request 17 * or reply has been lost by keeping the RTO estimate for each procedure. 18 * We currently make do with a constant timeout value. 19 * 20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> 21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> 22 */ 23 24 #include <asm/system.h> 25 26 #include <linux/module.h> 27 #include <linux/types.h> 28 #include <linux/kallsyms.h> 29 #include <linux/mm.h> 30 #include <linux/slab.h> 31 #include <linux/utsname.h> 32 #include <linux/workqueue.h> 33 #include <linux/in6.h> 34 35 #include <linux/sunrpc/clnt.h> 36 #include <linux/sunrpc/rpc_pipe_fs.h> 37 #include <linux/sunrpc/metrics.h> 38 #include <linux/sunrpc/bc_xprt.h> 39 40 #include "sunrpc.h" 41 42 #ifdef RPC_DEBUG 43 # define RPCDBG_FACILITY RPCDBG_CALL 44 #endif 45 46 #define dprint_status(t) \ 47 dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \ 48 __func__, t->tk_status) 49 50 /* 51 * All RPC clients are linked into this list 52 */ 53 static LIST_HEAD(all_clients); 54 static DEFINE_SPINLOCK(rpc_client_lock); 55 56 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); 57 58 59 static void call_start(struct rpc_task *task); 60 static void call_reserve(struct rpc_task *task); 61 static void call_reserveresult(struct rpc_task *task); 62 static void call_allocate(struct rpc_task *task); 63 static void call_decode(struct rpc_task *task); 64 static void call_bind(struct rpc_task *task); 65 static void call_bind_status(struct rpc_task *task); 66 static void call_transmit(struct rpc_task *task); 67 #if defined(CONFIG_NFS_V4_1) 68 static void call_bc_transmit(struct rpc_task *task); 69 #endif /* CONFIG_NFS_V4_1 */ 70 static void call_status(struct rpc_task *task); 71 static void call_transmit_status(struct rpc_task *task); 72 static void call_refresh(struct rpc_task *task); 73 static void call_refreshresult(struct rpc_task *task); 74 static void call_timeout(struct rpc_task *task); 75 static void call_connect(struct rpc_task *task); 76 static void call_connect_status(struct rpc_task *task); 77 78 static __be32 *rpc_encode_header(struct rpc_task *task); 79 static __be32 *rpc_verify_header(struct rpc_task *task); 80 static int rpc_ping(struct rpc_clnt *clnt, int flags); 81 82 static void rpc_register_client(struct rpc_clnt *clnt) 83 { 84 spin_lock(&rpc_client_lock); 85 list_add(&clnt->cl_clients, &all_clients); 86 spin_unlock(&rpc_client_lock); 87 } 88 89 static void rpc_unregister_client(struct rpc_clnt *clnt) 90 { 91 spin_lock(&rpc_client_lock); 92 list_del(&clnt->cl_clients); 93 spin_unlock(&rpc_client_lock); 94 } 95 96 static int 97 rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) 98 { 99 static uint32_t clntid; 100 int error; 101 102 clnt->cl_vfsmnt = ERR_PTR(-ENOENT); 103 clnt->cl_dentry = ERR_PTR(-ENOENT); 104 if (dir_name == NULL) 105 return 0; 106 107 clnt->cl_vfsmnt = rpc_get_mount(); 108 if (IS_ERR(clnt->cl_vfsmnt)) 109 return PTR_ERR(clnt->cl_vfsmnt); 110 111 for (;;) { 112 snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname), 113 "%s/clnt%x", dir_name, 114 (unsigned int)clntid++); 115 clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0'; 116 clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt); 117 if (!IS_ERR(clnt->cl_dentry)) 118 return 0; 119 error = PTR_ERR(clnt->cl_dentry); 120 if (error != -EEXIST) { 121 printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n", 122 clnt->cl_pathname, error); 123 rpc_put_mount(); 124 return error; 125 } 126 } 127 } 128 129 static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt) 130 { 131 struct rpc_program *program = args->program; 132 struct rpc_version *version; 133 struct rpc_clnt *clnt = NULL; 134 struct rpc_auth *auth; 135 int err; 136 size_t len; 137 138 /* sanity check the name before trying to print it */ 139 err = -EINVAL; 140 len = strlen(args->servername); 141 if (len > RPC_MAXNETNAMELEN) 142 goto out_no_rpciod; 143 len++; 144 145 dprintk("RPC: creating %s client for %s (xprt %p)\n", 146 program->name, args->servername, xprt); 147 148 err = rpciod_up(); 149 if (err) 150 goto out_no_rpciod; 151 err = -EINVAL; 152 if (!xprt) 153 goto out_no_xprt; 154 155 if (args->version >= program->nrvers) 156 goto out_err; 157 version = program->version[args->version]; 158 if (version == NULL) 159 goto out_err; 160 161 err = -ENOMEM; 162 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); 163 if (!clnt) 164 goto out_err; 165 clnt->cl_parent = clnt; 166 167 clnt->cl_server = clnt->cl_inline_name; 168 if (len > sizeof(clnt->cl_inline_name)) { 169 char *buf = kmalloc(len, GFP_KERNEL); 170 if (buf != NULL) 171 clnt->cl_server = buf; 172 else 173 len = sizeof(clnt->cl_inline_name); 174 } 175 strlcpy(clnt->cl_server, args->servername, len); 176 177 clnt->cl_xprt = xprt; 178 clnt->cl_procinfo = version->procs; 179 clnt->cl_maxproc = version->nrprocs; 180 clnt->cl_protname = program->name; 181 clnt->cl_prog = args->prognumber ? : program->number; 182 clnt->cl_vers = version->number; 183 clnt->cl_stats = program->stats; 184 clnt->cl_metrics = rpc_alloc_iostats(clnt); 185 err = -ENOMEM; 186 if (clnt->cl_metrics == NULL) 187 goto out_no_stats; 188 clnt->cl_program = program; 189 INIT_LIST_HEAD(&clnt->cl_tasks); 190 spin_lock_init(&clnt->cl_lock); 191 192 if (!xprt_bound(clnt->cl_xprt)) 193 clnt->cl_autobind = 1; 194 195 clnt->cl_timeout = xprt->timeout; 196 if (args->timeout != NULL) { 197 memcpy(&clnt->cl_timeout_default, args->timeout, 198 sizeof(clnt->cl_timeout_default)); 199 clnt->cl_timeout = &clnt->cl_timeout_default; 200 } 201 202 clnt->cl_rtt = &clnt->cl_rtt_default; 203 rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval); 204 clnt->cl_principal = NULL; 205 if (args->client_name) { 206 clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL); 207 if (!clnt->cl_principal) 208 goto out_no_principal; 209 } 210 211 kref_init(&clnt->cl_kref); 212 213 err = rpc_setup_pipedir(clnt, program->pipe_dir_name); 214 if (err < 0) 215 goto out_no_path; 216 217 auth = rpcauth_create(args->authflavor, clnt); 218 if (IS_ERR(auth)) { 219 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n", 220 args->authflavor); 221 err = PTR_ERR(auth); 222 goto out_no_auth; 223 } 224 225 /* save the nodename */ 226 clnt->cl_nodelen = strlen(init_utsname()->nodename); 227 if (clnt->cl_nodelen > UNX_MAXNODENAME) 228 clnt->cl_nodelen = UNX_MAXNODENAME; 229 memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen); 230 rpc_register_client(clnt); 231 return clnt; 232 233 out_no_auth: 234 if (!IS_ERR(clnt->cl_dentry)) { 235 rpc_rmdir(clnt->cl_dentry); 236 rpc_put_mount(); 237 } 238 out_no_path: 239 kfree(clnt->cl_principal); 240 out_no_principal: 241 rpc_free_iostats(clnt->cl_metrics); 242 out_no_stats: 243 if (clnt->cl_server != clnt->cl_inline_name) 244 kfree(clnt->cl_server); 245 kfree(clnt); 246 out_err: 247 xprt_put(xprt); 248 out_no_xprt: 249 rpciod_down(); 250 out_no_rpciod: 251 return ERR_PTR(err); 252 } 253 254 /* 255 * rpc_create - create an RPC client and transport with one call 256 * @args: rpc_clnt create argument structure 257 * 258 * Creates and initializes an RPC transport and an RPC client. 259 * 260 * It can ping the server in order to determine if it is up, and to see if 261 * it supports this program and version. RPC_CLNT_CREATE_NOPING disables 262 * this behavior so asynchronous tasks can also use rpc_create. 263 */ 264 struct rpc_clnt *rpc_create(struct rpc_create_args *args) 265 { 266 struct rpc_xprt *xprt; 267 struct rpc_clnt *clnt; 268 struct xprt_create xprtargs = { 269 .ident = args->protocol, 270 .srcaddr = args->saddress, 271 .dstaddr = args->address, 272 .addrlen = args->addrsize, 273 }; 274 char servername[48]; 275 276 /* 277 * If the caller chooses not to specify a hostname, whip 278 * up a string representation of the passed-in address. 279 */ 280 if (args->servername == NULL) { 281 servername[0] = '\0'; 282 switch (args->address->sa_family) { 283 case AF_INET: { 284 struct sockaddr_in *sin = 285 (struct sockaddr_in *)args->address; 286 snprintf(servername, sizeof(servername), "%pI4", 287 &sin->sin_addr.s_addr); 288 break; 289 } 290 case AF_INET6: { 291 struct sockaddr_in6 *sin = 292 (struct sockaddr_in6 *)args->address; 293 snprintf(servername, sizeof(servername), "%pI6", 294 &sin->sin6_addr); 295 break; 296 } 297 default: 298 /* caller wants default server name, but 299 * address family isn't recognized. */ 300 return ERR_PTR(-EINVAL); 301 } 302 args->servername = servername; 303 } 304 305 xprt = xprt_create_transport(&xprtargs); 306 if (IS_ERR(xprt)) 307 return (struct rpc_clnt *)xprt; 308 309 /* 310 * By default, kernel RPC client connects from a reserved port. 311 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, 312 * but it is always enabled for rpciod, which handles the connect 313 * operation. 314 */ 315 xprt->resvport = 1; 316 if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT) 317 xprt->resvport = 0; 318 319 clnt = rpc_new_client(args, xprt); 320 if (IS_ERR(clnt)) 321 return clnt; 322 323 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { 324 int err = rpc_ping(clnt, RPC_TASK_SOFT); 325 if (err != 0) { 326 rpc_shutdown_client(clnt); 327 return ERR_PTR(err); 328 } 329 } 330 331 clnt->cl_softrtry = 1; 332 if (args->flags & RPC_CLNT_CREATE_HARDRTRY) 333 clnt->cl_softrtry = 0; 334 335 if (args->flags & RPC_CLNT_CREATE_AUTOBIND) 336 clnt->cl_autobind = 1; 337 if (args->flags & RPC_CLNT_CREATE_DISCRTRY) 338 clnt->cl_discrtry = 1; 339 if (!(args->flags & RPC_CLNT_CREATE_QUIET)) 340 clnt->cl_chatty = 1; 341 342 return clnt; 343 } 344 EXPORT_SYMBOL_GPL(rpc_create); 345 346 /* 347 * This function clones the RPC client structure. It allows us to share the 348 * same transport while varying parameters such as the authentication 349 * flavour. 350 */ 351 struct rpc_clnt * 352 rpc_clone_client(struct rpc_clnt *clnt) 353 { 354 struct rpc_clnt *new; 355 int err = -ENOMEM; 356 357 new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); 358 if (!new) 359 goto out_no_clnt; 360 new->cl_parent = clnt; 361 /* Turn off autobind on clones */ 362 new->cl_autobind = 0; 363 INIT_LIST_HEAD(&new->cl_tasks); 364 spin_lock_init(&new->cl_lock); 365 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_timeout->to_initval); 366 new->cl_metrics = rpc_alloc_iostats(clnt); 367 if (new->cl_metrics == NULL) 368 goto out_no_stats; 369 if (clnt->cl_principal) { 370 new->cl_principal = kstrdup(clnt->cl_principal, GFP_KERNEL); 371 if (new->cl_principal == NULL) 372 goto out_no_principal; 373 } 374 kref_init(&new->cl_kref); 375 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); 376 if (err != 0) 377 goto out_no_path; 378 if (new->cl_auth) 379 atomic_inc(&new->cl_auth->au_count); 380 xprt_get(clnt->cl_xprt); 381 kref_get(&clnt->cl_kref); 382 rpc_register_client(new); 383 rpciod_up(); 384 return new; 385 out_no_path: 386 kfree(new->cl_principal); 387 out_no_principal: 388 rpc_free_iostats(new->cl_metrics); 389 out_no_stats: 390 kfree(new); 391 out_no_clnt: 392 dprintk("RPC: %s: returned error %d\n", __func__, err); 393 return ERR_PTR(err); 394 } 395 EXPORT_SYMBOL_GPL(rpc_clone_client); 396 397 /* 398 * Properly shut down an RPC client, terminating all outstanding 399 * requests. 400 */ 401 void rpc_shutdown_client(struct rpc_clnt *clnt) 402 { 403 dprintk("RPC: shutting down %s client for %s\n", 404 clnt->cl_protname, clnt->cl_server); 405 406 while (!list_empty(&clnt->cl_tasks)) { 407 rpc_killall_tasks(clnt); 408 wait_event_timeout(destroy_wait, 409 list_empty(&clnt->cl_tasks), 1*HZ); 410 } 411 412 rpc_release_client(clnt); 413 } 414 EXPORT_SYMBOL_GPL(rpc_shutdown_client); 415 416 /* 417 * Free an RPC client 418 */ 419 static void 420 rpc_free_client(struct kref *kref) 421 { 422 struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref); 423 424 dprintk("RPC: destroying %s client for %s\n", 425 clnt->cl_protname, clnt->cl_server); 426 if (!IS_ERR(clnt->cl_dentry)) { 427 rpc_rmdir(clnt->cl_dentry); 428 rpc_put_mount(); 429 } 430 if (clnt->cl_parent != clnt) { 431 rpc_release_client(clnt->cl_parent); 432 goto out_free; 433 } 434 if (clnt->cl_server != clnt->cl_inline_name) 435 kfree(clnt->cl_server); 436 out_free: 437 rpc_unregister_client(clnt); 438 rpc_free_iostats(clnt->cl_metrics); 439 kfree(clnt->cl_principal); 440 clnt->cl_metrics = NULL; 441 xprt_put(clnt->cl_xprt); 442 rpciod_down(); 443 kfree(clnt); 444 } 445 446 /* 447 * Free an RPC client 448 */ 449 static void 450 rpc_free_auth(struct kref *kref) 451 { 452 struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref); 453 454 if (clnt->cl_auth == NULL) { 455 rpc_free_client(kref); 456 return; 457 } 458 459 /* 460 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to 461 * release remaining GSS contexts. This mechanism ensures 462 * that it can do so safely. 463 */ 464 kref_init(kref); 465 rpcauth_release(clnt->cl_auth); 466 clnt->cl_auth = NULL; 467 kref_put(kref, rpc_free_client); 468 } 469 470 /* 471 * Release reference to the RPC client 472 */ 473 void 474 rpc_release_client(struct rpc_clnt *clnt) 475 { 476 dprintk("RPC: rpc_release_client(%p)\n", clnt); 477 478 if (list_empty(&clnt->cl_tasks)) 479 wake_up(&destroy_wait); 480 kref_put(&clnt->cl_kref, rpc_free_auth); 481 } 482 483 /** 484 * rpc_bind_new_program - bind a new RPC program to an existing client 485 * @old: old rpc_client 486 * @program: rpc program to set 487 * @vers: rpc program version 488 * 489 * Clones the rpc client and sets up a new RPC program. This is mainly 490 * of use for enabling different RPC programs to share the same transport. 491 * The Sun NFSv2/v3 ACL protocol can do this. 492 */ 493 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, 494 struct rpc_program *program, 495 u32 vers) 496 { 497 struct rpc_clnt *clnt; 498 struct rpc_version *version; 499 int err; 500 501 BUG_ON(vers >= program->nrvers || !program->version[vers]); 502 version = program->version[vers]; 503 clnt = rpc_clone_client(old); 504 if (IS_ERR(clnt)) 505 goto out; 506 clnt->cl_procinfo = version->procs; 507 clnt->cl_maxproc = version->nrprocs; 508 clnt->cl_protname = program->name; 509 clnt->cl_prog = program->number; 510 clnt->cl_vers = version->number; 511 clnt->cl_stats = program->stats; 512 err = rpc_ping(clnt, RPC_TASK_SOFT); 513 if (err != 0) { 514 rpc_shutdown_client(clnt); 515 clnt = ERR_PTR(err); 516 } 517 out: 518 return clnt; 519 } 520 EXPORT_SYMBOL_GPL(rpc_bind_new_program); 521 522 /* 523 * Default callback for async RPC calls 524 */ 525 static void 526 rpc_default_callback(struct rpc_task *task, void *data) 527 { 528 } 529 530 static const struct rpc_call_ops rpc_default_ops = { 531 .rpc_call_done = rpc_default_callback, 532 }; 533 534 /** 535 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it 536 * @task_setup_data: pointer to task initialisation data 537 */ 538 struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) 539 { 540 struct rpc_task *task, *ret; 541 542 task = rpc_new_task(task_setup_data); 543 if (task == NULL) { 544 rpc_release_calldata(task_setup_data->callback_ops, 545 task_setup_data->callback_data); 546 ret = ERR_PTR(-ENOMEM); 547 goto out; 548 } 549 550 if (task->tk_status != 0) { 551 ret = ERR_PTR(task->tk_status); 552 rpc_put_task(task); 553 goto out; 554 } 555 atomic_inc(&task->tk_count); 556 rpc_execute(task); 557 ret = task; 558 out: 559 return ret; 560 } 561 EXPORT_SYMBOL_GPL(rpc_run_task); 562 563 /** 564 * rpc_call_sync - Perform a synchronous RPC call 565 * @clnt: pointer to RPC client 566 * @msg: RPC call parameters 567 * @flags: RPC call flags 568 */ 569 int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags) 570 { 571 struct rpc_task *task; 572 struct rpc_task_setup task_setup_data = { 573 .rpc_client = clnt, 574 .rpc_message = msg, 575 .callback_ops = &rpc_default_ops, 576 .flags = flags, 577 }; 578 int status; 579 580 BUG_ON(flags & RPC_TASK_ASYNC); 581 582 task = rpc_run_task(&task_setup_data); 583 if (IS_ERR(task)) 584 return PTR_ERR(task); 585 status = task->tk_status; 586 rpc_put_task(task); 587 return status; 588 } 589 EXPORT_SYMBOL_GPL(rpc_call_sync); 590 591 /** 592 * rpc_call_async - Perform an asynchronous RPC call 593 * @clnt: pointer to RPC client 594 * @msg: RPC call parameters 595 * @flags: RPC call flags 596 * @tk_ops: RPC call ops 597 * @data: user call data 598 */ 599 int 600 rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags, 601 const struct rpc_call_ops *tk_ops, void *data) 602 { 603 struct rpc_task *task; 604 struct rpc_task_setup task_setup_data = { 605 .rpc_client = clnt, 606 .rpc_message = msg, 607 .callback_ops = tk_ops, 608 .callback_data = data, 609 .flags = flags|RPC_TASK_ASYNC, 610 }; 611 612 task = rpc_run_task(&task_setup_data); 613 if (IS_ERR(task)) 614 return PTR_ERR(task); 615 rpc_put_task(task); 616 return 0; 617 } 618 EXPORT_SYMBOL_GPL(rpc_call_async); 619 620 #if defined(CONFIG_NFS_V4_1) 621 /** 622 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run 623 * rpc_execute against it 624 * @ops: RPC call ops 625 */ 626 struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, 627 const struct rpc_call_ops *tk_ops) 628 { 629 struct rpc_task *task; 630 struct xdr_buf *xbufp = &req->rq_snd_buf; 631 struct rpc_task_setup task_setup_data = { 632 .callback_ops = tk_ops, 633 }; 634 635 dprintk("RPC: rpc_run_bc_task req= %p\n", req); 636 /* 637 * Create an rpc_task to send the data 638 */ 639 task = rpc_new_task(&task_setup_data); 640 if (!task) { 641 xprt_free_bc_request(req); 642 goto out; 643 } 644 task->tk_rqstp = req; 645 646 /* 647 * Set up the xdr_buf length. 648 * This also indicates that the buffer is XDR encoded already. 649 */ 650 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len + 651 xbufp->tail[0].iov_len; 652 653 task->tk_action = call_bc_transmit; 654 atomic_inc(&task->tk_count); 655 BUG_ON(atomic_read(&task->tk_count) != 2); 656 rpc_execute(task); 657 658 out: 659 dprintk("RPC: rpc_run_bc_task: task= %p\n", task); 660 return task; 661 } 662 #endif /* CONFIG_NFS_V4_1 */ 663 664 void 665 rpc_call_start(struct rpc_task *task) 666 { 667 task->tk_action = call_start; 668 } 669 EXPORT_SYMBOL_GPL(rpc_call_start); 670 671 /** 672 * rpc_peeraddr - extract remote peer address from clnt's xprt 673 * @clnt: RPC client structure 674 * @buf: target buffer 675 * @bufsize: length of target buffer 676 * 677 * Returns the number of bytes that are actually in the stored address. 678 */ 679 size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize) 680 { 681 size_t bytes; 682 struct rpc_xprt *xprt = clnt->cl_xprt; 683 684 bytes = sizeof(xprt->addr); 685 if (bytes > bufsize) 686 bytes = bufsize; 687 memcpy(buf, &clnt->cl_xprt->addr, bytes); 688 return xprt->addrlen; 689 } 690 EXPORT_SYMBOL_GPL(rpc_peeraddr); 691 692 /** 693 * rpc_peeraddr2str - return remote peer address in printable format 694 * @clnt: RPC client structure 695 * @format: address format 696 * 697 */ 698 const char *rpc_peeraddr2str(struct rpc_clnt *clnt, 699 enum rpc_display_format_t format) 700 { 701 struct rpc_xprt *xprt = clnt->cl_xprt; 702 703 if (xprt->address_strings[format] != NULL) 704 return xprt->address_strings[format]; 705 else 706 return "unprintable"; 707 } 708 EXPORT_SYMBOL_GPL(rpc_peeraddr2str); 709 710 void 711 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) 712 { 713 struct rpc_xprt *xprt = clnt->cl_xprt; 714 if (xprt->ops->set_buffer_size) 715 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); 716 } 717 EXPORT_SYMBOL_GPL(rpc_setbufsize); 718 719 /* 720 * Return size of largest payload RPC client can support, in bytes 721 * 722 * For stream transports, this is one RPC record fragment (see RFC 723 * 1831), as we don't support multi-record requests yet. For datagram 724 * transports, this is the size of an IP packet minus the IP, UDP, and 725 * RPC header sizes. 726 */ 727 size_t rpc_max_payload(struct rpc_clnt *clnt) 728 { 729 return clnt->cl_xprt->max_payload; 730 } 731 EXPORT_SYMBOL_GPL(rpc_max_payload); 732 733 /** 734 * rpc_force_rebind - force transport to check that remote port is unchanged 735 * @clnt: client to rebind 736 * 737 */ 738 void rpc_force_rebind(struct rpc_clnt *clnt) 739 { 740 if (clnt->cl_autobind) 741 xprt_clear_bound(clnt->cl_xprt); 742 } 743 EXPORT_SYMBOL_GPL(rpc_force_rebind); 744 745 /* 746 * Restart an (async) RPC call from the call_prepare state. 747 * Usually called from within the exit handler. 748 */ 749 void 750 rpc_restart_call_prepare(struct rpc_task *task) 751 { 752 if (RPC_ASSASSINATED(task)) 753 return; 754 task->tk_action = rpc_prepare_task; 755 } 756 EXPORT_SYMBOL_GPL(rpc_restart_call_prepare); 757 758 /* 759 * Restart an (async) RPC call. Usually called from within the 760 * exit handler. 761 */ 762 void 763 rpc_restart_call(struct rpc_task *task) 764 { 765 if (RPC_ASSASSINATED(task)) 766 return; 767 768 task->tk_action = call_start; 769 } 770 EXPORT_SYMBOL_GPL(rpc_restart_call); 771 772 #ifdef RPC_DEBUG 773 static const char *rpc_proc_name(const struct rpc_task *task) 774 { 775 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 776 777 if (proc) { 778 if (proc->p_name) 779 return proc->p_name; 780 else 781 return "NULL"; 782 } else 783 return "no proc"; 784 } 785 #endif 786 787 /* 788 * 0. Initial state 789 * 790 * Other FSM states can be visited zero or more times, but 791 * this state is visited exactly once for each RPC. 792 */ 793 static void 794 call_start(struct rpc_task *task) 795 { 796 struct rpc_clnt *clnt = task->tk_client; 797 798 dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid, 799 clnt->cl_protname, clnt->cl_vers, 800 rpc_proc_name(task), 801 (RPC_IS_ASYNC(task) ? "async" : "sync")); 802 803 /* Increment call count */ 804 task->tk_msg.rpc_proc->p_count++; 805 clnt->cl_stats->rpccnt++; 806 task->tk_action = call_reserve; 807 } 808 809 /* 810 * 1. Reserve an RPC call slot 811 */ 812 static void 813 call_reserve(struct rpc_task *task) 814 { 815 dprint_status(task); 816 817 if (!rpcauth_uptodatecred(task)) { 818 task->tk_action = call_refresh; 819 return; 820 } 821 822 task->tk_status = 0; 823 task->tk_action = call_reserveresult; 824 xprt_reserve(task); 825 } 826 827 /* 828 * 1b. Grok the result of xprt_reserve() 829 */ 830 static void 831 call_reserveresult(struct rpc_task *task) 832 { 833 int status = task->tk_status; 834 835 dprint_status(task); 836 837 /* 838 * After a call to xprt_reserve(), we must have either 839 * a request slot or else an error status. 840 */ 841 task->tk_status = 0; 842 if (status >= 0) { 843 if (task->tk_rqstp) { 844 task->tk_action = call_allocate; 845 return; 846 } 847 848 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n", 849 __func__, status); 850 rpc_exit(task, -EIO); 851 return; 852 } 853 854 /* 855 * Even though there was an error, we may have acquired 856 * a request slot somehow. Make sure not to leak it. 857 */ 858 if (task->tk_rqstp) { 859 printk(KERN_ERR "%s: status=%d, request allocated anyway\n", 860 __func__, status); 861 xprt_release(task); 862 } 863 864 switch (status) { 865 case -EAGAIN: /* woken up; retry */ 866 task->tk_action = call_reserve; 867 return; 868 case -EIO: /* probably a shutdown */ 869 break; 870 default: 871 printk(KERN_ERR "%s: unrecognized error %d, exiting\n", 872 __func__, status); 873 break; 874 } 875 rpc_exit(task, status); 876 } 877 878 /* 879 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. 880 * (Note: buffer memory is freed in xprt_release). 881 */ 882 static void 883 call_allocate(struct rpc_task *task) 884 { 885 unsigned int slack = task->tk_msg.rpc_cred->cr_auth->au_cslack; 886 struct rpc_rqst *req = task->tk_rqstp; 887 struct rpc_xprt *xprt = task->tk_xprt; 888 struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 889 890 dprint_status(task); 891 892 task->tk_status = 0; 893 task->tk_action = call_bind; 894 895 if (req->rq_buffer) 896 return; 897 898 if (proc->p_proc != 0) { 899 BUG_ON(proc->p_arglen == 0); 900 if (proc->p_decode != NULL) 901 BUG_ON(proc->p_replen == 0); 902 } 903 904 /* 905 * Calculate the size (in quads) of the RPC call 906 * and reply headers, and convert both values 907 * to byte sizes. 908 */ 909 req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen; 910 req->rq_callsize <<= 2; 911 req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen; 912 req->rq_rcvsize <<= 2; 913 914 req->rq_buffer = xprt->ops->buf_alloc(task, 915 req->rq_callsize + req->rq_rcvsize); 916 if (req->rq_buffer != NULL) 917 return; 918 919 dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid); 920 921 if (RPC_IS_ASYNC(task) || !signalled()) { 922 task->tk_action = call_allocate; 923 rpc_delay(task, HZ>>4); 924 return; 925 } 926 927 rpc_exit(task, -ERESTARTSYS); 928 } 929 930 static inline int 931 rpc_task_need_encode(struct rpc_task *task) 932 { 933 return task->tk_rqstp->rq_snd_buf.len == 0; 934 } 935 936 static inline void 937 rpc_task_force_reencode(struct rpc_task *task) 938 { 939 task->tk_rqstp->rq_snd_buf.len = 0; 940 } 941 942 static inline void 943 rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) 944 { 945 buf->head[0].iov_base = start; 946 buf->head[0].iov_len = len; 947 buf->tail[0].iov_len = 0; 948 buf->page_len = 0; 949 buf->flags = 0; 950 buf->len = 0; 951 buf->buflen = len; 952 } 953 954 /* 955 * 3. Encode arguments of an RPC call 956 */ 957 static void 958 rpc_xdr_encode(struct rpc_task *task) 959 { 960 struct rpc_rqst *req = task->tk_rqstp; 961 kxdrproc_t encode; 962 __be32 *p; 963 964 dprint_status(task); 965 966 rpc_xdr_buf_init(&req->rq_snd_buf, 967 req->rq_buffer, 968 req->rq_callsize); 969 rpc_xdr_buf_init(&req->rq_rcv_buf, 970 (char *)req->rq_buffer + req->rq_callsize, 971 req->rq_rcvsize); 972 973 p = rpc_encode_header(task); 974 if (p == NULL) { 975 printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n"); 976 rpc_exit(task, -EIO); 977 return; 978 } 979 980 encode = task->tk_msg.rpc_proc->p_encode; 981 if (encode == NULL) 982 return; 983 984 task->tk_status = rpcauth_wrap_req(task, encode, req, p, 985 task->tk_msg.rpc_argp); 986 } 987 988 /* 989 * 4. Get the server port number if not yet set 990 */ 991 static void 992 call_bind(struct rpc_task *task) 993 { 994 struct rpc_xprt *xprt = task->tk_xprt; 995 996 dprint_status(task); 997 998 task->tk_action = call_connect; 999 if (!xprt_bound(xprt)) { 1000 task->tk_action = call_bind_status; 1001 task->tk_timeout = xprt->bind_timeout; 1002 xprt->ops->rpcbind(task); 1003 } 1004 } 1005 1006 /* 1007 * 4a. Sort out bind result 1008 */ 1009 static void 1010 call_bind_status(struct rpc_task *task) 1011 { 1012 int status = -EIO; 1013 1014 if (task->tk_status >= 0) { 1015 dprint_status(task); 1016 task->tk_status = 0; 1017 task->tk_action = call_connect; 1018 return; 1019 } 1020 1021 switch (task->tk_status) { 1022 case -ENOMEM: 1023 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid); 1024 rpc_delay(task, HZ >> 2); 1025 goto retry_timeout; 1026 case -EACCES: 1027 dprintk("RPC: %5u remote rpcbind: RPC program/version " 1028 "unavailable\n", task->tk_pid); 1029 /* fail immediately if this is an RPC ping */ 1030 if (task->tk_msg.rpc_proc->p_proc == 0) { 1031 status = -EOPNOTSUPP; 1032 break; 1033 } 1034 rpc_delay(task, 3*HZ); 1035 goto retry_timeout; 1036 case -ETIMEDOUT: 1037 dprintk("RPC: %5u rpcbind request timed out\n", 1038 task->tk_pid); 1039 goto retry_timeout; 1040 case -EPFNOSUPPORT: 1041 /* server doesn't support any rpcbind version we know of */ 1042 dprintk("RPC: %5u remote rpcbind service unavailable\n", 1043 task->tk_pid); 1044 break; 1045 case -EPROTONOSUPPORT: 1046 dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n", 1047 task->tk_pid); 1048 task->tk_status = 0; 1049 task->tk_action = call_bind; 1050 return; 1051 default: 1052 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n", 1053 task->tk_pid, -task->tk_status); 1054 } 1055 1056 rpc_exit(task, status); 1057 return; 1058 1059 retry_timeout: 1060 task->tk_action = call_timeout; 1061 } 1062 1063 /* 1064 * 4b. Connect to the RPC server 1065 */ 1066 static void 1067 call_connect(struct rpc_task *task) 1068 { 1069 struct rpc_xprt *xprt = task->tk_xprt; 1070 1071 dprintk("RPC: %5u call_connect xprt %p %s connected\n", 1072 task->tk_pid, xprt, 1073 (xprt_connected(xprt) ? "is" : "is not")); 1074 1075 task->tk_action = call_transmit; 1076 if (!xprt_connected(xprt)) { 1077 task->tk_action = call_connect_status; 1078 if (task->tk_status < 0) 1079 return; 1080 xprt_connect(task); 1081 } 1082 } 1083 1084 /* 1085 * 4c. Sort out connect result 1086 */ 1087 static void 1088 call_connect_status(struct rpc_task *task) 1089 { 1090 struct rpc_clnt *clnt = task->tk_client; 1091 int status = task->tk_status; 1092 1093 dprint_status(task); 1094 1095 task->tk_status = 0; 1096 if (status >= 0 || status == -EAGAIN) { 1097 clnt->cl_stats->netreconn++; 1098 task->tk_action = call_transmit; 1099 return; 1100 } 1101 1102 switch (status) { 1103 /* if soft mounted, test if we've timed out */ 1104 case -ETIMEDOUT: 1105 task->tk_action = call_timeout; 1106 break; 1107 default: 1108 rpc_exit(task, -EIO); 1109 } 1110 } 1111 1112 /* 1113 * 5. Transmit the RPC request, and wait for reply 1114 */ 1115 static void 1116 call_transmit(struct rpc_task *task) 1117 { 1118 dprint_status(task); 1119 1120 task->tk_action = call_status; 1121 if (task->tk_status < 0) 1122 return; 1123 task->tk_status = xprt_prepare_transmit(task); 1124 if (task->tk_status != 0) 1125 return; 1126 task->tk_action = call_transmit_status; 1127 /* Encode here so that rpcsec_gss can use correct sequence number. */ 1128 if (rpc_task_need_encode(task)) { 1129 BUG_ON(task->tk_rqstp->rq_bytes_sent != 0); 1130 rpc_xdr_encode(task); 1131 /* Did the encode result in an error condition? */ 1132 if (task->tk_status != 0) { 1133 /* Was the error nonfatal? */ 1134 if (task->tk_status == -EAGAIN) 1135 rpc_delay(task, HZ >> 4); 1136 else 1137 rpc_exit(task, task->tk_status); 1138 return; 1139 } 1140 } 1141 xprt_transmit(task); 1142 if (task->tk_status < 0) 1143 return; 1144 /* 1145 * On success, ensure that we call xprt_end_transmit() before sleeping 1146 * in order to allow access to the socket to other RPC requests. 1147 */ 1148 call_transmit_status(task); 1149 if (rpc_reply_expected(task)) 1150 return; 1151 task->tk_action = rpc_exit_task; 1152 rpc_wake_up_queued_task(&task->tk_xprt->pending, task); 1153 } 1154 1155 /* 1156 * 5a. Handle cleanup after a transmission 1157 */ 1158 static void 1159 call_transmit_status(struct rpc_task *task) 1160 { 1161 task->tk_action = call_status; 1162 switch (task->tk_status) { 1163 case -EAGAIN: 1164 break; 1165 default: 1166 xprt_end_transmit(task); 1167 /* 1168 * Special cases: if we've been waiting on the 1169 * socket's write_space() callback, or if the 1170 * socket just returned a connection error, 1171 * then hold onto the transport lock. 1172 */ 1173 case -ECONNREFUSED: 1174 case -ECONNRESET: 1175 case -ENOTCONN: 1176 case -EHOSTDOWN: 1177 case -EHOSTUNREACH: 1178 case -ENETUNREACH: 1179 case -EPIPE: 1180 rpc_task_force_reencode(task); 1181 } 1182 } 1183 1184 #if defined(CONFIG_NFS_V4_1) 1185 /* 1186 * 5b. Send the backchannel RPC reply. On error, drop the reply. In 1187 * addition, disconnect on connectivity errors. 1188 */ 1189 static void 1190 call_bc_transmit(struct rpc_task *task) 1191 { 1192 struct rpc_rqst *req = task->tk_rqstp; 1193 1194 BUG_ON(task->tk_status != 0); 1195 task->tk_status = xprt_prepare_transmit(task); 1196 if (task->tk_status == -EAGAIN) { 1197 /* 1198 * Could not reserve the transport. Try again after the 1199 * transport is released. 1200 */ 1201 task->tk_status = 0; 1202 task->tk_action = call_bc_transmit; 1203 return; 1204 } 1205 1206 task->tk_action = rpc_exit_task; 1207 if (task->tk_status < 0) { 1208 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 1209 "error: %d\n", task->tk_status); 1210 return; 1211 } 1212 1213 xprt_transmit(task); 1214 xprt_end_transmit(task); 1215 dprint_status(task); 1216 switch (task->tk_status) { 1217 case 0: 1218 /* Success */ 1219 break; 1220 case -EHOSTDOWN: 1221 case -EHOSTUNREACH: 1222 case -ENETUNREACH: 1223 case -ETIMEDOUT: 1224 /* 1225 * Problem reaching the server. Disconnect and let the 1226 * forechannel reestablish the connection. The server will 1227 * have to retransmit the backchannel request and we'll 1228 * reprocess it. Since these ops are idempotent, there's no 1229 * need to cache our reply at this time. 1230 */ 1231 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 1232 "error: %d\n", task->tk_status); 1233 xprt_conditional_disconnect(task->tk_xprt, 1234 req->rq_connect_cookie); 1235 break; 1236 default: 1237 /* 1238 * We were unable to reply and will have to drop the 1239 * request. The server should reconnect and retransmit. 1240 */ 1241 BUG_ON(task->tk_status == -EAGAIN); 1242 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 1243 "error: %d\n", task->tk_status); 1244 break; 1245 } 1246 rpc_wake_up_queued_task(&req->rq_xprt->pending, task); 1247 } 1248 #endif /* CONFIG_NFS_V4_1 */ 1249 1250 /* 1251 * 6. Sort out the RPC call status 1252 */ 1253 static void 1254 call_status(struct rpc_task *task) 1255 { 1256 struct rpc_clnt *clnt = task->tk_client; 1257 struct rpc_rqst *req = task->tk_rqstp; 1258 int status; 1259 1260 if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent) 1261 task->tk_status = req->rq_reply_bytes_recvd; 1262 1263 dprint_status(task); 1264 1265 status = task->tk_status; 1266 if (status >= 0) { 1267 task->tk_action = call_decode; 1268 return; 1269 } 1270 1271 task->tk_status = 0; 1272 switch(status) { 1273 case -EHOSTDOWN: 1274 case -EHOSTUNREACH: 1275 case -ENETUNREACH: 1276 /* 1277 * Delay any retries for 3 seconds, then handle as if it 1278 * were a timeout. 1279 */ 1280 rpc_delay(task, 3*HZ); 1281 case -ETIMEDOUT: 1282 task->tk_action = call_timeout; 1283 if (task->tk_client->cl_discrtry) 1284 xprt_conditional_disconnect(task->tk_xprt, 1285 req->rq_connect_cookie); 1286 break; 1287 case -ECONNRESET: 1288 case -ECONNREFUSED: 1289 rpc_force_rebind(clnt); 1290 rpc_delay(task, 3*HZ); 1291 case -EPIPE: 1292 case -ENOTCONN: 1293 task->tk_action = call_bind; 1294 break; 1295 case -EAGAIN: 1296 task->tk_action = call_transmit; 1297 break; 1298 case -EIO: 1299 /* shutdown or soft timeout */ 1300 rpc_exit(task, status); 1301 break; 1302 default: 1303 if (clnt->cl_chatty) 1304 printk("%s: RPC call returned error %d\n", 1305 clnt->cl_protname, -status); 1306 rpc_exit(task, status); 1307 } 1308 } 1309 1310 /* 1311 * 6a. Handle RPC timeout 1312 * We do not release the request slot, so we keep using the 1313 * same XID for all retransmits. 1314 */ 1315 static void 1316 call_timeout(struct rpc_task *task) 1317 { 1318 struct rpc_clnt *clnt = task->tk_client; 1319 1320 if (xprt_adjust_timeout(task->tk_rqstp) == 0) { 1321 dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid); 1322 goto retry; 1323 } 1324 1325 dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); 1326 task->tk_timeouts++; 1327 1328 if (RPC_IS_SOFT(task)) { 1329 if (clnt->cl_chatty) 1330 printk(KERN_NOTICE "%s: server %s not responding, timed out\n", 1331 clnt->cl_protname, clnt->cl_server); 1332 rpc_exit(task, -EIO); 1333 return; 1334 } 1335 1336 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { 1337 task->tk_flags |= RPC_CALL_MAJORSEEN; 1338 if (clnt->cl_chatty) 1339 printk(KERN_NOTICE "%s: server %s not responding, still trying\n", 1340 clnt->cl_protname, clnt->cl_server); 1341 } 1342 rpc_force_rebind(clnt); 1343 /* 1344 * Did our request time out due to an RPCSEC_GSS out-of-sequence 1345 * event? RFC2203 requires the server to drop all such requests. 1346 */ 1347 rpcauth_invalcred(task); 1348 1349 retry: 1350 clnt->cl_stats->rpcretrans++; 1351 task->tk_action = call_bind; 1352 task->tk_status = 0; 1353 } 1354 1355 /* 1356 * 7. Decode the RPC reply 1357 */ 1358 static void 1359 call_decode(struct rpc_task *task) 1360 { 1361 struct rpc_clnt *clnt = task->tk_client; 1362 struct rpc_rqst *req = task->tk_rqstp; 1363 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; 1364 __be32 *p; 1365 1366 dprintk("RPC: %5u call_decode (status %d)\n", 1367 task->tk_pid, task->tk_status); 1368 1369 if (task->tk_flags & RPC_CALL_MAJORSEEN) { 1370 if (clnt->cl_chatty) 1371 printk(KERN_NOTICE "%s: server %s OK\n", 1372 clnt->cl_protname, clnt->cl_server); 1373 task->tk_flags &= ~RPC_CALL_MAJORSEEN; 1374 } 1375 1376 /* 1377 * Ensure that we see all writes made by xprt_complete_rqst() 1378 * before it changed req->rq_reply_bytes_recvd. 1379 */ 1380 smp_rmb(); 1381 req->rq_rcv_buf.len = req->rq_private_buf.len; 1382 1383 /* Check that the softirq receive buffer is valid */ 1384 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, 1385 sizeof(req->rq_rcv_buf)) != 0); 1386 1387 if (req->rq_rcv_buf.len < 12) { 1388 if (!RPC_IS_SOFT(task)) { 1389 task->tk_action = call_bind; 1390 clnt->cl_stats->rpcretrans++; 1391 goto out_retry; 1392 } 1393 dprintk("RPC: %s: too small RPC reply size (%d bytes)\n", 1394 clnt->cl_protname, task->tk_status); 1395 task->tk_action = call_timeout; 1396 goto out_retry; 1397 } 1398 1399 p = rpc_verify_header(task); 1400 if (IS_ERR(p)) { 1401 if (p == ERR_PTR(-EAGAIN)) 1402 goto out_retry; 1403 return; 1404 } 1405 1406 task->tk_action = rpc_exit_task; 1407 1408 if (decode) { 1409 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, 1410 task->tk_msg.rpc_resp); 1411 } 1412 dprintk("RPC: %5u call_decode result %d\n", task->tk_pid, 1413 task->tk_status); 1414 return; 1415 out_retry: 1416 task->tk_status = 0; 1417 /* Note: rpc_verify_header() may have freed the RPC slot */ 1418 if (task->tk_rqstp == req) { 1419 req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0; 1420 if (task->tk_client->cl_discrtry) 1421 xprt_conditional_disconnect(task->tk_xprt, 1422 req->rq_connect_cookie); 1423 } 1424 } 1425 1426 /* 1427 * 8. Refresh the credentials if rejected by the server 1428 */ 1429 static void 1430 call_refresh(struct rpc_task *task) 1431 { 1432 dprint_status(task); 1433 1434 task->tk_action = call_refreshresult; 1435 task->tk_status = 0; 1436 task->tk_client->cl_stats->rpcauthrefresh++; 1437 rpcauth_refreshcred(task); 1438 } 1439 1440 /* 1441 * 8a. Process the results of a credential refresh 1442 */ 1443 static void 1444 call_refreshresult(struct rpc_task *task) 1445 { 1446 int status = task->tk_status; 1447 1448 dprint_status(task); 1449 1450 task->tk_status = 0; 1451 task->tk_action = call_reserve; 1452 if (status >= 0 && rpcauth_uptodatecred(task)) 1453 return; 1454 if (status == -EACCES) { 1455 rpc_exit(task, -EACCES); 1456 return; 1457 } 1458 task->tk_action = call_refresh; 1459 if (status != -ETIMEDOUT) 1460 rpc_delay(task, 3*HZ); 1461 return; 1462 } 1463 1464 static __be32 * 1465 rpc_encode_header(struct rpc_task *task) 1466 { 1467 struct rpc_clnt *clnt = task->tk_client; 1468 struct rpc_rqst *req = task->tk_rqstp; 1469 __be32 *p = req->rq_svec[0].iov_base; 1470 1471 /* FIXME: check buffer size? */ 1472 1473 p = xprt_skip_transport_header(task->tk_xprt, p); 1474 *p++ = req->rq_xid; /* XID */ 1475 *p++ = htonl(RPC_CALL); /* CALL */ 1476 *p++ = htonl(RPC_VERSION); /* RPC version */ 1477 *p++ = htonl(clnt->cl_prog); /* program number */ 1478 *p++ = htonl(clnt->cl_vers); /* program version */ 1479 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ 1480 p = rpcauth_marshcred(task, p); 1481 req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); 1482 return p; 1483 } 1484 1485 static __be32 * 1486 rpc_verify_header(struct rpc_task *task) 1487 { 1488 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; 1489 int len = task->tk_rqstp->rq_rcv_buf.len >> 2; 1490 __be32 *p = iov->iov_base; 1491 u32 n; 1492 int error = -EACCES; 1493 1494 if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) { 1495 /* RFC-1014 says that the representation of XDR data must be a 1496 * multiple of four bytes 1497 * - if it isn't pointer subtraction in the NFS client may give 1498 * undefined results 1499 */ 1500 dprintk("RPC: %5u %s: XDR representation not a multiple of" 1501 " 4 bytes: 0x%x\n", task->tk_pid, __func__, 1502 task->tk_rqstp->rq_rcv_buf.len); 1503 goto out_eio; 1504 } 1505 if ((len -= 3) < 0) 1506 goto out_overflow; 1507 1508 p += 1; /* skip XID */ 1509 if ((n = ntohl(*p++)) != RPC_REPLY) { 1510 dprintk("RPC: %5u %s: not an RPC reply: %x\n", 1511 task->tk_pid, __func__, n); 1512 goto out_garbage; 1513 } 1514 1515 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { 1516 if (--len < 0) 1517 goto out_overflow; 1518 switch ((n = ntohl(*p++))) { 1519 case RPC_AUTH_ERROR: 1520 break; 1521 case RPC_MISMATCH: 1522 dprintk("RPC: %5u %s: RPC call version " 1523 "mismatch!\n", 1524 task->tk_pid, __func__); 1525 error = -EPROTONOSUPPORT; 1526 goto out_err; 1527 default: 1528 dprintk("RPC: %5u %s: RPC call rejected, " 1529 "unknown error: %x\n", 1530 task->tk_pid, __func__, n); 1531 goto out_eio; 1532 } 1533 if (--len < 0) 1534 goto out_overflow; 1535 switch ((n = ntohl(*p++))) { 1536 case RPC_AUTH_REJECTEDCRED: 1537 case RPC_AUTH_REJECTEDVERF: 1538 case RPCSEC_GSS_CREDPROBLEM: 1539 case RPCSEC_GSS_CTXPROBLEM: 1540 if (!task->tk_cred_retry) 1541 break; 1542 task->tk_cred_retry--; 1543 dprintk("RPC: %5u %s: retry stale creds\n", 1544 task->tk_pid, __func__); 1545 rpcauth_invalcred(task); 1546 /* Ensure we obtain a new XID! */ 1547 xprt_release(task); 1548 task->tk_action = call_refresh; 1549 goto out_retry; 1550 case RPC_AUTH_BADCRED: 1551 case RPC_AUTH_BADVERF: 1552 /* possibly garbled cred/verf? */ 1553 if (!task->tk_garb_retry) 1554 break; 1555 task->tk_garb_retry--; 1556 dprintk("RPC: %5u %s: retry garbled creds\n", 1557 task->tk_pid, __func__); 1558 task->tk_action = call_bind; 1559 goto out_retry; 1560 case RPC_AUTH_TOOWEAK: 1561 printk(KERN_NOTICE "RPC: server %s requires stronger " 1562 "authentication.\n", task->tk_client->cl_server); 1563 break; 1564 default: 1565 dprintk("RPC: %5u %s: unknown auth error: %x\n", 1566 task->tk_pid, __func__, n); 1567 error = -EIO; 1568 } 1569 dprintk("RPC: %5u %s: call rejected %d\n", 1570 task->tk_pid, __func__, n); 1571 goto out_err; 1572 } 1573 if (!(p = rpcauth_checkverf(task, p))) { 1574 dprintk("RPC: %5u %s: auth check failed\n", 1575 task->tk_pid, __func__); 1576 goto out_garbage; /* bad verifier, retry */ 1577 } 1578 len = p - (__be32 *)iov->iov_base - 1; 1579 if (len < 0) 1580 goto out_overflow; 1581 switch ((n = ntohl(*p++))) { 1582 case RPC_SUCCESS: 1583 return p; 1584 case RPC_PROG_UNAVAIL: 1585 dprintk("RPC: %5u %s: program %u is unsupported by server %s\n", 1586 task->tk_pid, __func__, 1587 (unsigned int)task->tk_client->cl_prog, 1588 task->tk_client->cl_server); 1589 error = -EPFNOSUPPORT; 1590 goto out_err; 1591 case RPC_PROG_MISMATCH: 1592 dprintk("RPC: %5u %s: program %u, version %u unsupported by " 1593 "server %s\n", task->tk_pid, __func__, 1594 (unsigned int)task->tk_client->cl_prog, 1595 (unsigned int)task->tk_client->cl_vers, 1596 task->tk_client->cl_server); 1597 error = -EPROTONOSUPPORT; 1598 goto out_err; 1599 case RPC_PROC_UNAVAIL: 1600 dprintk("RPC: %5u %s: proc %s unsupported by program %u, " 1601 "version %u on server %s\n", 1602 task->tk_pid, __func__, 1603 rpc_proc_name(task), 1604 task->tk_client->cl_prog, 1605 task->tk_client->cl_vers, 1606 task->tk_client->cl_server); 1607 error = -EOPNOTSUPP; 1608 goto out_err; 1609 case RPC_GARBAGE_ARGS: 1610 dprintk("RPC: %5u %s: server saw garbage\n", 1611 task->tk_pid, __func__); 1612 break; /* retry */ 1613 default: 1614 dprintk("RPC: %5u %s: server accept status: %x\n", 1615 task->tk_pid, __func__, n); 1616 /* Also retry */ 1617 } 1618 1619 out_garbage: 1620 task->tk_client->cl_stats->rpcgarbage++; 1621 if (task->tk_garb_retry) { 1622 task->tk_garb_retry--; 1623 dprintk("RPC: %5u %s: retrying\n", 1624 task->tk_pid, __func__); 1625 task->tk_action = call_bind; 1626 out_retry: 1627 return ERR_PTR(-EAGAIN); 1628 } 1629 out_eio: 1630 error = -EIO; 1631 out_err: 1632 rpc_exit(task, error); 1633 dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid, 1634 __func__, error); 1635 return ERR_PTR(error); 1636 out_overflow: 1637 dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid, 1638 __func__); 1639 goto out_garbage; 1640 } 1641 1642 static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj) 1643 { 1644 return 0; 1645 } 1646 1647 static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj) 1648 { 1649 return 0; 1650 } 1651 1652 static struct rpc_procinfo rpcproc_null = { 1653 .p_encode = rpcproc_encode_null, 1654 .p_decode = rpcproc_decode_null, 1655 }; 1656 1657 static int rpc_ping(struct rpc_clnt *clnt, int flags) 1658 { 1659 struct rpc_message msg = { 1660 .rpc_proc = &rpcproc_null, 1661 }; 1662 int err; 1663 msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); 1664 err = rpc_call_sync(clnt, &msg, flags); 1665 put_rpccred(msg.rpc_cred); 1666 return err; 1667 } 1668 1669 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags) 1670 { 1671 struct rpc_message msg = { 1672 .rpc_proc = &rpcproc_null, 1673 .rpc_cred = cred, 1674 }; 1675 struct rpc_task_setup task_setup_data = { 1676 .rpc_client = clnt, 1677 .rpc_message = &msg, 1678 .callback_ops = &rpc_default_ops, 1679 .flags = flags, 1680 }; 1681 return rpc_run_task(&task_setup_data); 1682 } 1683 EXPORT_SYMBOL_GPL(rpc_call_null); 1684 1685 #ifdef RPC_DEBUG 1686 static void rpc_show_header(void) 1687 { 1688 printk(KERN_INFO "-pid- flgs status -client- --rqstp- " 1689 "-timeout ---ops--\n"); 1690 } 1691 1692 static void rpc_show_task(const struct rpc_clnt *clnt, 1693 const struct rpc_task *task) 1694 { 1695 const char *rpc_waitq = "none"; 1696 char *p, action[KSYM_SYMBOL_LEN]; 1697 1698 if (RPC_IS_QUEUED(task)) 1699 rpc_waitq = rpc_qname(task->tk_waitqueue); 1700 1701 /* map tk_action pointer to a function name; then trim off 1702 * the "+0x0 [sunrpc]" */ 1703 sprint_symbol(action, (unsigned long)task->tk_action); 1704 p = strchr(action, '+'); 1705 if (p) 1706 *p = '\0'; 1707 1708 printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%s q:%s\n", 1709 task->tk_pid, task->tk_flags, task->tk_status, 1710 clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops, 1711 clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task), 1712 action, rpc_waitq); 1713 } 1714 1715 void rpc_show_tasks(void) 1716 { 1717 struct rpc_clnt *clnt; 1718 struct rpc_task *task; 1719 int header = 0; 1720 1721 spin_lock(&rpc_client_lock); 1722 list_for_each_entry(clnt, &all_clients, cl_clients) { 1723 spin_lock(&clnt->cl_lock); 1724 list_for_each_entry(task, &clnt->cl_tasks, tk_task) { 1725 if (!header) { 1726 rpc_show_header(); 1727 header++; 1728 } 1729 rpc_show_task(clnt, task); 1730 } 1731 spin_unlock(&clnt->cl_lock); 1732 } 1733 spin_unlock(&rpc_client_lock); 1734 } 1735 #endif 1736