1 /* 2 * linux/net/sunrpc/clnt.c 3 * 4 * This file contains the high-level RPC interface. 5 * It is modeled as a finite state machine to support both synchronous 6 * and asynchronous requests. 7 * 8 * - RPC header generation and argument serialization. 9 * - Credential refresh. 10 * - TCP connect handling. 11 * - Retry of operation when it is suspected the operation failed because 12 * of uid squashing on the server, or when the credentials were stale 13 * and need to be refreshed, or when a packet was damaged in transit. 14 * This may be have to be moved to the VFS layer. 15 * 16 * NB: BSD uses a more intelligent approach to guessing when a request 17 * or reply has been lost by keeping the RTO estimate for each procedure. 18 * We currently make do with a constant timeout value. 19 * 20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> 21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> 22 */ 23 24 #include <asm/system.h> 25 26 #include <linux/module.h> 27 #include <linux/types.h> 28 #include <linux/kallsyms.h> 29 #include <linux/mm.h> 30 #include <linux/namei.h> 31 #include <linux/mount.h> 32 #include <linux/slab.h> 33 #include <linux/utsname.h> 34 #include <linux/workqueue.h> 35 #include <linux/in6.h> 36 37 #include <linux/sunrpc/clnt.h> 38 #include <linux/sunrpc/rpc_pipe_fs.h> 39 #include <linux/sunrpc/metrics.h> 40 #include <linux/sunrpc/bc_xprt.h> 41 42 #include "sunrpc.h" 43 44 #ifdef RPC_DEBUG 45 # define RPCDBG_FACILITY RPCDBG_CALL 46 #endif 47 48 #define dprint_status(t) \ 49 dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \ 50 __func__, t->tk_status) 51 52 /* 53 * All RPC clients are linked into this list 54 */ 55 static LIST_HEAD(all_clients); 56 static DEFINE_SPINLOCK(rpc_client_lock); 57 58 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); 59 60 61 static void call_start(struct rpc_task *task); 62 static void call_reserve(struct rpc_task *task); 63 static void call_reserveresult(struct rpc_task *task); 64 static void call_allocate(struct rpc_task *task); 65 static void call_decode(struct rpc_task *task); 66 static void call_bind(struct rpc_task *task); 67 static void call_bind_status(struct rpc_task *task); 68 static void call_transmit(struct rpc_task *task); 69 #if defined(CONFIG_NFS_V4_1) 70 static void call_bc_transmit(struct rpc_task *task); 71 #endif /* CONFIG_NFS_V4_1 */ 72 static void call_status(struct rpc_task *task); 73 static void call_transmit_status(struct rpc_task *task); 74 static void call_refresh(struct rpc_task *task); 75 static void call_refreshresult(struct rpc_task *task); 76 static void call_timeout(struct rpc_task *task); 77 static void call_connect(struct rpc_task *task); 78 static void call_connect_status(struct rpc_task *task); 79 80 static __be32 *rpc_encode_header(struct rpc_task *task); 81 static __be32 *rpc_verify_header(struct rpc_task *task); 82 static int rpc_ping(struct rpc_clnt *clnt, int flags); 83 84 static void rpc_register_client(struct rpc_clnt *clnt) 85 { 86 spin_lock(&rpc_client_lock); 87 list_add(&clnt->cl_clients, &all_clients); 88 spin_unlock(&rpc_client_lock); 89 } 90 91 static void rpc_unregister_client(struct rpc_clnt *clnt) 92 { 93 spin_lock(&rpc_client_lock); 94 list_del(&clnt->cl_clients); 95 spin_unlock(&rpc_client_lock); 96 } 97 98 static int 99 rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) 100 { 101 static uint32_t clntid; 102 struct nameidata nd; 103 struct path path; 104 char name[15]; 105 struct qstr q = { 106 .name = name, 107 }; 108 int error; 109 110 clnt->cl_path.mnt = ERR_PTR(-ENOENT); 111 clnt->cl_path.dentry = ERR_PTR(-ENOENT); 112 if (dir_name == NULL) 113 return 0; 114 115 path.mnt = rpc_get_mount(); 116 if (IS_ERR(path.mnt)) 117 return PTR_ERR(path.mnt); 118 error = vfs_path_lookup(path.mnt->mnt_root, path.mnt, dir_name, 0, &nd); 119 if (error) 120 goto err; 121 122 for (;;) { 123 q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++); 124 name[sizeof(name) - 1] = '\0'; 125 q.hash = full_name_hash(q.name, q.len); 126 path.dentry = rpc_create_client_dir(nd.path.dentry, &q, clnt); 127 if (!IS_ERR(path.dentry)) 128 break; 129 error = PTR_ERR(path.dentry); 130 if (error != -EEXIST) { 131 printk(KERN_INFO "RPC: Couldn't create pipefs entry" 132 " %s/%s, error %d\n", 133 dir_name, name, error); 134 goto err_path_put; 135 } 136 } 137 path_put(&nd.path); 138 clnt->cl_path = path; 139 return 0; 140 err_path_put: 141 path_put(&nd.path); 142 err: 143 rpc_put_mount(); 144 return error; 145 } 146 147 static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt) 148 { 149 struct rpc_program *program = args->program; 150 struct rpc_version *version; 151 struct rpc_clnt *clnt = NULL; 152 struct rpc_auth *auth; 153 int err; 154 size_t len; 155 156 /* sanity check the name before trying to print it */ 157 err = -EINVAL; 158 len = strlen(args->servername); 159 if (len > RPC_MAXNETNAMELEN) 160 goto out_no_rpciod; 161 len++; 162 163 dprintk("RPC: creating %s client for %s (xprt %p)\n", 164 program->name, args->servername, xprt); 165 166 err = rpciod_up(); 167 if (err) 168 goto out_no_rpciod; 169 err = -EINVAL; 170 if (!xprt) 171 goto out_no_xprt; 172 173 if (args->version >= program->nrvers) 174 goto out_err; 175 version = program->version[args->version]; 176 if (version == NULL) 177 goto out_err; 178 179 err = -ENOMEM; 180 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); 181 if (!clnt) 182 goto out_err; 183 clnt->cl_parent = clnt; 184 185 clnt->cl_server = clnt->cl_inline_name; 186 if (len > sizeof(clnt->cl_inline_name)) { 187 char *buf = kmalloc(len, GFP_KERNEL); 188 if (buf != NULL) 189 clnt->cl_server = buf; 190 else 191 len = sizeof(clnt->cl_inline_name); 192 } 193 strlcpy(clnt->cl_server, args->servername, len); 194 195 clnt->cl_xprt = xprt; 196 clnt->cl_procinfo = version->procs; 197 clnt->cl_maxproc = version->nrprocs; 198 clnt->cl_protname = program->name; 199 clnt->cl_prog = args->prognumber ? : program->number; 200 clnt->cl_vers = version->number; 201 clnt->cl_stats = program->stats; 202 clnt->cl_metrics = rpc_alloc_iostats(clnt); 203 err = -ENOMEM; 204 if (clnt->cl_metrics == NULL) 205 goto out_no_stats; 206 clnt->cl_program = program; 207 INIT_LIST_HEAD(&clnt->cl_tasks); 208 spin_lock_init(&clnt->cl_lock); 209 210 if (!xprt_bound(clnt->cl_xprt)) 211 clnt->cl_autobind = 1; 212 213 clnt->cl_timeout = xprt->timeout; 214 if (args->timeout != NULL) { 215 memcpy(&clnt->cl_timeout_default, args->timeout, 216 sizeof(clnt->cl_timeout_default)); 217 clnt->cl_timeout = &clnt->cl_timeout_default; 218 } 219 220 clnt->cl_rtt = &clnt->cl_rtt_default; 221 rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval); 222 clnt->cl_principal = NULL; 223 if (args->client_name) { 224 clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL); 225 if (!clnt->cl_principal) 226 goto out_no_principal; 227 } 228 229 kref_init(&clnt->cl_kref); 230 231 err = rpc_setup_pipedir(clnt, program->pipe_dir_name); 232 if (err < 0) 233 goto out_no_path; 234 235 auth = rpcauth_create(args->authflavor, clnt); 236 if (IS_ERR(auth)) { 237 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n", 238 args->authflavor); 239 err = PTR_ERR(auth); 240 goto out_no_auth; 241 } 242 243 /* save the nodename */ 244 clnt->cl_nodelen = strlen(init_utsname()->nodename); 245 if (clnt->cl_nodelen > UNX_MAXNODENAME) 246 clnt->cl_nodelen = UNX_MAXNODENAME; 247 memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen); 248 rpc_register_client(clnt); 249 return clnt; 250 251 out_no_auth: 252 if (!IS_ERR(clnt->cl_path.dentry)) { 253 rpc_remove_client_dir(clnt->cl_path.dentry); 254 rpc_put_mount(); 255 } 256 out_no_path: 257 kfree(clnt->cl_principal); 258 out_no_principal: 259 rpc_free_iostats(clnt->cl_metrics); 260 out_no_stats: 261 if (clnt->cl_server != clnt->cl_inline_name) 262 kfree(clnt->cl_server); 263 kfree(clnt); 264 out_err: 265 xprt_put(xprt); 266 out_no_xprt: 267 rpciod_down(); 268 out_no_rpciod: 269 return ERR_PTR(err); 270 } 271 272 /* 273 * rpc_create - create an RPC client and transport with one call 274 * @args: rpc_clnt create argument structure 275 * 276 * Creates and initializes an RPC transport and an RPC client. 277 * 278 * It can ping the server in order to determine if it is up, and to see if 279 * it supports this program and version. RPC_CLNT_CREATE_NOPING disables 280 * this behavior so asynchronous tasks can also use rpc_create. 281 */ 282 struct rpc_clnt *rpc_create(struct rpc_create_args *args) 283 { 284 struct rpc_xprt *xprt; 285 struct rpc_clnt *clnt; 286 struct xprt_create xprtargs = { 287 .ident = args->protocol, 288 .srcaddr = args->saddress, 289 .dstaddr = args->address, 290 .addrlen = args->addrsize, 291 }; 292 char servername[48]; 293 294 /* 295 * If the caller chooses not to specify a hostname, whip 296 * up a string representation of the passed-in address. 297 */ 298 if (args->servername == NULL) { 299 servername[0] = '\0'; 300 switch (args->address->sa_family) { 301 case AF_INET: { 302 struct sockaddr_in *sin = 303 (struct sockaddr_in *)args->address; 304 snprintf(servername, sizeof(servername), "%pI4", 305 &sin->sin_addr.s_addr); 306 break; 307 } 308 case AF_INET6: { 309 struct sockaddr_in6 *sin = 310 (struct sockaddr_in6 *)args->address; 311 snprintf(servername, sizeof(servername), "%pI6", 312 &sin->sin6_addr); 313 break; 314 } 315 default: 316 /* caller wants default server name, but 317 * address family isn't recognized. */ 318 return ERR_PTR(-EINVAL); 319 } 320 args->servername = servername; 321 } 322 323 xprt = xprt_create_transport(&xprtargs); 324 if (IS_ERR(xprt)) 325 return (struct rpc_clnt *)xprt; 326 327 /* 328 * By default, kernel RPC client connects from a reserved port. 329 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, 330 * but it is always enabled for rpciod, which handles the connect 331 * operation. 332 */ 333 xprt->resvport = 1; 334 if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT) 335 xprt->resvport = 0; 336 337 clnt = rpc_new_client(args, xprt); 338 if (IS_ERR(clnt)) 339 return clnt; 340 341 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { 342 int err = rpc_ping(clnt, RPC_TASK_SOFT); 343 if (err != 0) { 344 rpc_shutdown_client(clnt); 345 return ERR_PTR(err); 346 } 347 } 348 349 clnt->cl_softrtry = 1; 350 if (args->flags & RPC_CLNT_CREATE_HARDRTRY) 351 clnt->cl_softrtry = 0; 352 353 if (args->flags & RPC_CLNT_CREATE_AUTOBIND) 354 clnt->cl_autobind = 1; 355 if (args->flags & RPC_CLNT_CREATE_DISCRTRY) 356 clnt->cl_discrtry = 1; 357 if (!(args->flags & RPC_CLNT_CREATE_QUIET)) 358 clnt->cl_chatty = 1; 359 360 return clnt; 361 } 362 EXPORT_SYMBOL_GPL(rpc_create); 363 364 /* 365 * This function clones the RPC client structure. It allows us to share the 366 * same transport while varying parameters such as the authentication 367 * flavour. 368 */ 369 struct rpc_clnt * 370 rpc_clone_client(struct rpc_clnt *clnt) 371 { 372 struct rpc_clnt *new; 373 int err = -ENOMEM; 374 375 new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); 376 if (!new) 377 goto out_no_clnt; 378 new->cl_parent = clnt; 379 /* Turn off autobind on clones */ 380 new->cl_autobind = 0; 381 INIT_LIST_HEAD(&new->cl_tasks); 382 spin_lock_init(&new->cl_lock); 383 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_timeout->to_initval); 384 new->cl_metrics = rpc_alloc_iostats(clnt); 385 if (new->cl_metrics == NULL) 386 goto out_no_stats; 387 if (clnt->cl_principal) { 388 new->cl_principal = kstrdup(clnt->cl_principal, GFP_KERNEL); 389 if (new->cl_principal == NULL) 390 goto out_no_principal; 391 } 392 kref_init(&new->cl_kref); 393 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); 394 if (err != 0) 395 goto out_no_path; 396 if (new->cl_auth) 397 atomic_inc(&new->cl_auth->au_count); 398 xprt_get(clnt->cl_xprt); 399 kref_get(&clnt->cl_kref); 400 rpc_register_client(new); 401 rpciod_up(); 402 return new; 403 out_no_path: 404 kfree(new->cl_principal); 405 out_no_principal: 406 rpc_free_iostats(new->cl_metrics); 407 out_no_stats: 408 kfree(new); 409 out_no_clnt: 410 dprintk("RPC: %s: returned error %d\n", __func__, err); 411 return ERR_PTR(err); 412 } 413 EXPORT_SYMBOL_GPL(rpc_clone_client); 414 415 /* 416 * Properly shut down an RPC client, terminating all outstanding 417 * requests. 418 */ 419 void rpc_shutdown_client(struct rpc_clnt *clnt) 420 { 421 dprintk("RPC: shutting down %s client for %s\n", 422 clnt->cl_protname, clnt->cl_server); 423 424 while (!list_empty(&clnt->cl_tasks)) { 425 rpc_killall_tasks(clnt); 426 wait_event_timeout(destroy_wait, 427 list_empty(&clnt->cl_tasks), 1*HZ); 428 } 429 430 rpc_release_client(clnt); 431 } 432 EXPORT_SYMBOL_GPL(rpc_shutdown_client); 433 434 /* 435 * Free an RPC client 436 */ 437 static void 438 rpc_free_client(struct kref *kref) 439 { 440 struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref); 441 442 dprintk("RPC: destroying %s client for %s\n", 443 clnt->cl_protname, clnt->cl_server); 444 if (!IS_ERR(clnt->cl_path.dentry)) { 445 rpc_remove_client_dir(clnt->cl_path.dentry); 446 rpc_put_mount(); 447 } 448 if (clnt->cl_parent != clnt) { 449 rpc_release_client(clnt->cl_parent); 450 goto out_free; 451 } 452 if (clnt->cl_server != clnt->cl_inline_name) 453 kfree(clnt->cl_server); 454 out_free: 455 rpc_unregister_client(clnt); 456 rpc_free_iostats(clnt->cl_metrics); 457 kfree(clnt->cl_principal); 458 clnt->cl_metrics = NULL; 459 xprt_put(clnt->cl_xprt); 460 rpciod_down(); 461 kfree(clnt); 462 } 463 464 /* 465 * Free an RPC client 466 */ 467 static void 468 rpc_free_auth(struct kref *kref) 469 { 470 struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref); 471 472 if (clnt->cl_auth == NULL) { 473 rpc_free_client(kref); 474 return; 475 } 476 477 /* 478 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to 479 * release remaining GSS contexts. This mechanism ensures 480 * that it can do so safely. 481 */ 482 kref_init(kref); 483 rpcauth_release(clnt->cl_auth); 484 clnt->cl_auth = NULL; 485 kref_put(kref, rpc_free_client); 486 } 487 488 /* 489 * Release reference to the RPC client 490 */ 491 void 492 rpc_release_client(struct rpc_clnt *clnt) 493 { 494 dprintk("RPC: rpc_release_client(%p)\n", clnt); 495 496 if (list_empty(&clnt->cl_tasks)) 497 wake_up(&destroy_wait); 498 kref_put(&clnt->cl_kref, rpc_free_auth); 499 } 500 501 /** 502 * rpc_bind_new_program - bind a new RPC program to an existing client 503 * @old: old rpc_client 504 * @program: rpc program to set 505 * @vers: rpc program version 506 * 507 * Clones the rpc client and sets up a new RPC program. This is mainly 508 * of use for enabling different RPC programs to share the same transport. 509 * The Sun NFSv2/v3 ACL protocol can do this. 510 */ 511 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, 512 struct rpc_program *program, 513 u32 vers) 514 { 515 struct rpc_clnt *clnt; 516 struct rpc_version *version; 517 int err; 518 519 BUG_ON(vers >= program->nrvers || !program->version[vers]); 520 version = program->version[vers]; 521 clnt = rpc_clone_client(old); 522 if (IS_ERR(clnt)) 523 goto out; 524 clnt->cl_procinfo = version->procs; 525 clnt->cl_maxproc = version->nrprocs; 526 clnt->cl_protname = program->name; 527 clnt->cl_prog = program->number; 528 clnt->cl_vers = version->number; 529 clnt->cl_stats = program->stats; 530 err = rpc_ping(clnt, RPC_TASK_SOFT); 531 if (err != 0) { 532 rpc_shutdown_client(clnt); 533 clnt = ERR_PTR(err); 534 } 535 out: 536 return clnt; 537 } 538 EXPORT_SYMBOL_GPL(rpc_bind_new_program); 539 540 /* 541 * Default callback for async RPC calls 542 */ 543 static void 544 rpc_default_callback(struct rpc_task *task, void *data) 545 { 546 } 547 548 static const struct rpc_call_ops rpc_default_ops = { 549 .rpc_call_done = rpc_default_callback, 550 }; 551 552 /** 553 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it 554 * @task_setup_data: pointer to task initialisation data 555 */ 556 struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) 557 { 558 struct rpc_task *task, *ret; 559 560 task = rpc_new_task(task_setup_data); 561 if (task == NULL) { 562 rpc_release_calldata(task_setup_data->callback_ops, 563 task_setup_data->callback_data); 564 ret = ERR_PTR(-ENOMEM); 565 goto out; 566 } 567 568 if (task->tk_status != 0) { 569 ret = ERR_PTR(task->tk_status); 570 rpc_put_task(task); 571 goto out; 572 } 573 atomic_inc(&task->tk_count); 574 rpc_execute(task); 575 ret = task; 576 out: 577 return ret; 578 } 579 EXPORT_SYMBOL_GPL(rpc_run_task); 580 581 /** 582 * rpc_call_sync - Perform a synchronous RPC call 583 * @clnt: pointer to RPC client 584 * @msg: RPC call parameters 585 * @flags: RPC call flags 586 */ 587 int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags) 588 { 589 struct rpc_task *task; 590 struct rpc_task_setup task_setup_data = { 591 .rpc_client = clnt, 592 .rpc_message = msg, 593 .callback_ops = &rpc_default_ops, 594 .flags = flags, 595 }; 596 int status; 597 598 BUG_ON(flags & RPC_TASK_ASYNC); 599 600 task = rpc_run_task(&task_setup_data); 601 if (IS_ERR(task)) 602 return PTR_ERR(task); 603 status = task->tk_status; 604 rpc_put_task(task); 605 return status; 606 } 607 EXPORT_SYMBOL_GPL(rpc_call_sync); 608 609 /** 610 * rpc_call_async - Perform an asynchronous RPC call 611 * @clnt: pointer to RPC client 612 * @msg: RPC call parameters 613 * @flags: RPC call flags 614 * @tk_ops: RPC call ops 615 * @data: user call data 616 */ 617 int 618 rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags, 619 const struct rpc_call_ops *tk_ops, void *data) 620 { 621 struct rpc_task *task; 622 struct rpc_task_setup task_setup_data = { 623 .rpc_client = clnt, 624 .rpc_message = msg, 625 .callback_ops = tk_ops, 626 .callback_data = data, 627 .flags = flags|RPC_TASK_ASYNC, 628 }; 629 630 task = rpc_run_task(&task_setup_data); 631 if (IS_ERR(task)) 632 return PTR_ERR(task); 633 rpc_put_task(task); 634 return 0; 635 } 636 EXPORT_SYMBOL_GPL(rpc_call_async); 637 638 #if defined(CONFIG_NFS_V4_1) 639 /** 640 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run 641 * rpc_execute against it 642 * @ops: RPC call ops 643 */ 644 struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, 645 const struct rpc_call_ops *tk_ops) 646 { 647 struct rpc_task *task; 648 struct xdr_buf *xbufp = &req->rq_snd_buf; 649 struct rpc_task_setup task_setup_data = { 650 .callback_ops = tk_ops, 651 }; 652 653 dprintk("RPC: rpc_run_bc_task req= %p\n", req); 654 /* 655 * Create an rpc_task to send the data 656 */ 657 task = rpc_new_task(&task_setup_data); 658 if (!task) { 659 xprt_free_bc_request(req); 660 goto out; 661 } 662 task->tk_rqstp = req; 663 664 /* 665 * Set up the xdr_buf length. 666 * This also indicates that the buffer is XDR encoded already. 667 */ 668 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len + 669 xbufp->tail[0].iov_len; 670 671 task->tk_action = call_bc_transmit; 672 atomic_inc(&task->tk_count); 673 BUG_ON(atomic_read(&task->tk_count) != 2); 674 rpc_execute(task); 675 676 out: 677 dprintk("RPC: rpc_run_bc_task: task= %p\n", task); 678 return task; 679 } 680 #endif /* CONFIG_NFS_V4_1 */ 681 682 void 683 rpc_call_start(struct rpc_task *task) 684 { 685 task->tk_action = call_start; 686 } 687 EXPORT_SYMBOL_GPL(rpc_call_start); 688 689 /** 690 * rpc_peeraddr - extract remote peer address from clnt's xprt 691 * @clnt: RPC client structure 692 * @buf: target buffer 693 * @bufsize: length of target buffer 694 * 695 * Returns the number of bytes that are actually in the stored address. 696 */ 697 size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize) 698 { 699 size_t bytes; 700 struct rpc_xprt *xprt = clnt->cl_xprt; 701 702 bytes = sizeof(xprt->addr); 703 if (bytes > bufsize) 704 bytes = bufsize; 705 memcpy(buf, &clnt->cl_xprt->addr, bytes); 706 return xprt->addrlen; 707 } 708 EXPORT_SYMBOL_GPL(rpc_peeraddr); 709 710 /** 711 * rpc_peeraddr2str - return remote peer address in printable format 712 * @clnt: RPC client structure 713 * @format: address format 714 * 715 */ 716 const char *rpc_peeraddr2str(struct rpc_clnt *clnt, 717 enum rpc_display_format_t format) 718 { 719 struct rpc_xprt *xprt = clnt->cl_xprt; 720 721 if (xprt->address_strings[format] != NULL) 722 return xprt->address_strings[format]; 723 else 724 return "unprintable"; 725 } 726 EXPORT_SYMBOL_GPL(rpc_peeraddr2str); 727 728 void 729 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) 730 { 731 struct rpc_xprt *xprt = clnt->cl_xprt; 732 if (xprt->ops->set_buffer_size) 733 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); 734 } 735 EXPORT_SYMBOL_GPL(rpc_setbufsize); 736 737 /* 738 * Return size of largest payload RPC client can support, in bytes 739 * 740 * For stream transports, this is one RPC record fragment (see RFC 741 * 1831), as we don't support multi-record requests yet. For datagram 742 * transports, this is the size of an IP packet minus the IP, UDP, and 743 * RPC header sizes. 744 */ 745 size_t rpc_max_payload(struct rpc_clnt *clnt) 746 { 747 return clnt->cl_xprt->max_payload; 748 } 749 EXPORT_SYMBOL_GPL(rpc_max_payload); 750 751 /** 752 * rpc_force_rebind - force transport to check that remote port is unchanged 753 * @clnt: client to rebind 754 * 755 */ 756 void rpc_force_rebind(struct rpc_clnt *clnt) 757 { 758 if (clnt->cl_autobind) 759 xprt_clear_bound(clnt->cl_xprt); 760 } 761 EXPORT_SYMBOL_GPL(rpc_force_rebind); 762 763 /* 764 * Restart an (async) RPC call from the call_prepare state. 765 * Usually called from within the exit handler. 766 */ 767 void 768 rpc_restart_call_prepare(struct rpc_task *task) 769 { 770 if (RPC_ASSASSINATED(task)) 771 return; 772 task->tk_action = rpc_prepare_task; 773 } 774 EXPORT_SYMBOL_GPL(rpc_restart_call_prepare); 775 776 /* 777 * Restart an (async) RPC call. Usually called from within the 778 * exit handler. 779 */ 780 void 781 rpc_restart_call(struct rpc_task *task) 782 { 783 if (RPC_ASSASSINATED(task)) 784 return; 785 786 task->tk_action = call_start; 787 } 788 EXPORT_SYMBOL_GPL(rpc_restart_call); 789 790 #ifdef RPC_DEBUG 791 static const char *rpc_proc_name(const struct rpc_task *task) 792 { 793 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 794 795 if (proc) { 796 if (proc->p_name) 797 return proc->p_name; 798 else 799 return "NULL"; 800 } else 801 return "no proc"; 802 } 803 #endif 804 805 /* 806 * 0. Initial state 807 * 808 * Other FSM states can be visited zero or more times, but 809 * this state is visited exactly once for each RPC. 810 */ 811 static void 812 call_start(struct rpc_task *task) 813 { 814 struct rpc_clnt *clnt = task->tk_client; 815 816 dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid, 817 clnt->cl_protname, clnt->cl_vers, 818 rpc_proc_name(task), 819 (RPC_IS_ASYNC(task) ? "async" : "sync")); 820 821 /* Increment call count */ 822 task->tk_msg.rpc_proc->p_count++; 823 clnt->cl_stats->rpccnt++; 824 task->tk_action = call_reserve; 825 } 826 827 /* 828 * 1. Reserve an RPC call slot 829 */ 830 static void 831 call_reserve(struct rpc_task *task) 832 { 833 dprint_status(task); 834 835 if (!rpcauth_uptodatecred(task)) { 836 task->tk_action = call_refresh; 837 return; 838 } 839 840 task->tk_status = 0; 841 task->tk_action = call_reserveresult; 842 xprt_reserve(task); 843 } 844 845 /* 846 * 1b. Grok the result of xprt_reserve() 847 */ 848 static void 849 call_reserveresult(struct rpc_task *task) 850 { 851 int status = task->tk_status; 852 853 dprint_status(task); 854 855 /* 856 * After a call to xprt_reserve(), we must have either 857 * a request slot or else an error status. 858 */ 859 task->tk_status = 0; 860 if (status >= 0) { 861 if (task->tk_rqstp) { 862 task->tk_action = call_allocate; 863 return; 864 } 865 866 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n", 867 __func__, status); 868 rpc_exit(task, -EIO); 869 return; 870 } 871 872 /* 873 * Even though there was an error, we may have acquired 874 * a request slot somehow. Make sure not to leak it. 875 */ 876 if (task->tk_rqstp) { 877 printk(KERN_ERR "%s: status=%d, request allocated anyway\n", 878 __func__, status); 879 xprt_release(task); 880 } 881 882 switch (status) { 883 case -EAGAIN: /* woken up; retry */ 884 task->tk_action = call_reserve; 885 return; 886 case -EIO: /* probably a shutdown */ 887 break; 888 default: 889 printk(KERN_ERR "%s: unrecognized error %d, exiting\n", 890 __func__, status); 891 break; 892 } 893 rpc_exit(task, status); 894 } 895 896 /* 897 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. 898 * (Note: buffer memory is freed in xprt_release). 899 */ 900 static void 901 call_allocate(struct rpc_task *task) 902 { 903 unsigned int slack = task->tk_msg.rpc_cred->cr_auth->au_cslack; 904 struct rpc_rqst *req = task->tk_rqstp; 905 struct rpc_xprt *xprt = task->tk_xprt; 906 struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 907 908 dprint_status(task); 909 910 task->tk_status = 0; 911 task->tk_action = call_bind; 912 913 if (req->rq_buffer) 914 return; 915 916 if (proc->p_proc != 0) { 917 BUG_ON(proc->p_arglen == 0); 918 if (proc->p_decode != NULL) 919 BUG_ON(proc->p_replen == 0); 920 } 921 922 /* 923 * Calculate the size (in quads) of the RPC call 924 * and reply headers, and convert both values 925 * to byte sizes. 926 */ 927 req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen; 928 req->rq_callsize <<= 2; 929 req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen; 930 req->rq_rcvsize <<= 2; 931 932 req->rq_buffer = xprt->ops->buf_alloc(task, 933 req->rq_callsize + req->rq_rcvsize); 934 if (req->rq_buffer != NULL) 935 return; 936 937 dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid); 938 939 if (RPC_IS_ASYNC(task) || !signalled()) { 940 task->tk_action = call_allocate; 941 rpc_delay(task, HZ>>4); 942 return; 943 } 944 945 rpc_exit(task, -ERESTARTSYS); 946 } 947 948 static inline int 949 rpc_task_need_encode(struct rpc_task *task) 950 { 951 return task->tk_rqstp->rq_snd_buf.len == 0; 952 } 953 954 static inline void 955 rpc_task_force_reencode(struct rpc_task *task) 956 { 957 task->tk_rqstp->rq_snd_buf.len = 0; 958 task->tk_rqstp->rq_bytes_sent = 0; 959 } 960 961 static inline void 962 rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) 963 { 964 buf->head[0].iov_base = start; 965 buf->head[0].iov_len = len; 966 buf->tail[0].iov_len = 0; 967 buf->page_len = 0; 968 buf->flags = 0; 969 buf->len = 0; 970 buf->buflen = len; 971 } 972 973 /* 974 * 3. Encode arguments of an RPC call 975 */ 976 static void 977 rpc_xdr_encode(struct rpc_task *task) 978 { 979 struct rpc_rqst *req = task->tk_rqstp; 980 kxdrproc_t encode; 981 __be32 *p; 982 983 dprint_status(task); 984 985 rpc_xdr_buf_init(&req->rq_snd_buf, 986 req->rq_buffer, 987 req->rq_callsize); 988 rpc_xdr_buf_init(&req->rq_rcv_buf, 989 (char *)req->rq_buffer + req->rq_callsize, 990 req->rq_rcvsize); 991 992 p = rpc_encode_header(task); 993 if (p == NULL) { 994 printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n"); 995 rpc_exit(task, -EIO); 996 return; 997 } 998 999 encode = task->tk_msg.rpc_proc->p_encode; 1000 if (encode == NULL) 1001 return; 1002 1003 task->tk_status = rpcauth_wrap_req(task, encode, req, p, 1004 task->tk_msg.rpc_argp); 1005 } 1006 1007 /* 1008 * 4. Get the server port number if not yet set 1009 */ 1010 static void 1011 call_bind(struct rpc_task *task) 1012 { 1013 struct rpc_xprt *xprt = task->tk_xprt; 1014 1015 dprint_status(task); 1016 1017 task->tk_action = call_connect; 1018 if (!xprt_bound(xprt)) { 1019 task->tk_action = call_bind_status; 1020 task->tk_timeout = xprt->bind_timeout; 1021 xprt->ops->rpcbind(task); 1022 } 1023 } 1024 1025 /* 1026 * 4a. Sort out bind result 1027 */ 1028 static void 1029 call_bind_status(struct rpc_task *task) 1030 { 1031 int status = -EIO; 1032 1033 if (task->tk_status >= 0) { 1034 dprint_status(task); 1035 task->tk_status = 0; 1036 task->tk_action = call_connect; 1037 return; 1038 } 1039 1040 switch (task->tk_status) { 1041 case -ENOMEM: 1042 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid); 1043 rpc_delay(task, HZ >> 2); 1044 goto retry_timeout; 1045 case -EACCES: 1046 dprintk("RPC: %5u remote rpcbind: RPC program/version " 1047 "unavailable\n", task->tk_pid); 1048 /* fail immediately if this is an RPC ping */ 1049 if (task->tk_msg.rpc_proc->p_proc == 0) { 1050 status = -EOPNOTSUPP; 1051 break; 1052 } 1053 rpc_delay(task, 3*HZ); 1054 goto retry_timeout; 1055 case -ETIMEDOUT: 1056 dprintk("RPC: %5u rpcbind request timed out\n", 1057 task->tk_pid); 1058 goto retry_timeout; 1059 case -EPFNOSUPPORT: 1060 /* server doesn't support any rpcbind version we know of */ 1061 dprintk("RPC: %5u remote rpcbind service unavailable\n", 1062 task->tk_pid); 1063 break; 1064 case -EPROTONOSUPPORT: 1065 dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n", 1066 task->tk_pid); 1067 task->tk_status = 0; 1068 task->tk_action = call_bind; 1069 return; 1070 default: 1071 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n", 1072 task->tk_pid, -task->tk_status); 1073 } 1074 1075 rpc_exit(task, status); 1076 return; 1077 1078 retry_timeout: 1079 task->tk_action = call_timeout; 1080 } 1081 1082 /* 1083 * 4b. Connect to the RPC server 1084 */ 1085 static void 1086 call_connect(struct rpc_task *task) 1087 { 1088 struct rpc_xprt *xprt = task->tk_xprt; 1089 1090 dprintk("RPC: %5u call_connect xprt %p %s connected\n", 1091 task->tk_pid, xprt, 1092 (xprt_connected(xprt) ? "is" : "is not")); 1093 1094 task->tk_action = call_transmit; 1095 if (!xprt_connected(xprt)) { 1096 task->tk_action = call_connect_status; 1097 if (task->tk_status < 0) 1098 return; 1099 xprt_connect(task); 1100 } 1101 } 1102 1103 /* 1104 * 4c. Sort out connect result 1105 */ 1106 static void 1107 call_connect_status(struct rpc_task *task) 1108 { 1109 struct rpc_clnt *clnt = task->tk_client; 1110 int status = task->tk_status; 1111 1112 dprint_status(task); 1113 1114 task->tk_status = 0; 1115 if (status >= 0 || status == -EAGAIN) { 1116 clnt->cl_stats->netreconn++; 1117 task->tk_action = call_transmit; 1118 return; 1119 } 1120 1121 switch (status) { 1122 /* if soft mounted, test if we've timed out */ 1123 case -ETIMEDOUT: 1124 task->tk_action = call_timeout; 1125 break; 1126 default: 1127 rpc_exit(task, -EIO); 1128 } 1129 } 1130 1131 /* 1132 * 5. Transmit the RPC request, and wait for reply 1133 */ 1134 static void 1135 call_transmit(struct rpc_task *task) 1136 { 1137 dprint_status(task); 1138 1139 task->tk_action = call_status; 1140 if (task->tk_status < 0) 1141 return; 1142 task->tk_status = xprt_prepare_transmit(task); 1143 if (task->tk_status != 0) 1144 return; 1145 task->tk_action = call_transmit_status; 1146 /* Encode here so that rpcsec_gss can use correct sequence number. */ 1147 if (rpc_task_need_encode(task)) { 1148 BUG_ON(task->tk_rqstp->rq_bytes_sent != 0); 1149 rpc_xdr_encode(task); 1150 /* Did the encode result in an error condition? */ 1151 if (task->tk_status != 0) { 1152 /* Was the error nonfatal? */ 1153 if (task->tk_status == -EAGAIN) 1154 rpc_delay(task, HZ >> 4); 1155 else 1156 rpc_exit(task, task->tk_status); 1157 return; 1158 } 1159 } 1160 xprt_transmit(task); 1161 if (task->tk_status < 0) 1162 return; 1163 /* 1164 * On success, ensure that we call xprt_end_transmit() before sleeping 1165 * in order to allow access to the socket to other RPC requests. 1166 */ 1167 call_transmit_status(task); 1168 if (rpc_reply_expected(task)) 1169 return; 1170 task->tk_action = rpc_exit_task; 1171 rpc_wake_up_queued_task(&task->tk_xprt->pending, task); 1172 } 1173 1174 /* 1175 * 5a. Handle cleanup after a transmission 1176 */ 1177 static void 1178 call_transmit_status(struct rpc_task *task) 1179 { 1180 task->tk_action = call_status; 1181 switch (task->tk_status) { 1182 case -EAGAIN: 1183 break; 1184 default: 1185 xprt_end_transmit(task); 1186 /* 1187 * Special cases: if we've been waiting on the 1188 * socket's write_space() callback, or if the 1189 * socket just returned a connection error, 1190 * then hold onto the transport lock. 1191 */ 1192 case -ECONNREFUSED: 1193 case -ECONNRESET: 1194 case -ENOTCONN: 1195 case -EHOSTDOWN: 1196 case -EHOSTUNREACH: 1197 case -ENETUNREACH: 1198 case -EPIPE: 1199 rpc_task_force_reencode(task); 1200 } 1201 } 1202 1203 #if defined(CONFIG_NFS_V4_1) 1204 /* 1205 * 5b. Send the backchannel RPC reply. On error, drop the reply. In 1206 * addition, disconnect on connectivity errors. 1207 */ 1208 static void 1209 call_bc_transmit(struct rpc_task *task) 1210 { 1211 struct rpc_rqst *req = task->tk_rqstp; 1212 1213 BUG_ON(task->tk_status != 0); 1214 task->tk_status = xprt_prepare_transmit(task); 1215 if (task->tk_status == -EAGAIN) { 1216 /* 1217 * Could not reserve the transport. Try again after the 1218 * transport is released. 1219 */ 1220 task->tk_status = 0; 1221 task->tk_action = call_bc_transmit; 1222 return; 1223 } 1224 1225 task->tk_action = rpc_exit_task; 1226 if (task->tk_status < 0) { 1227 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 1228 "error: %d\n", task->tk_status); 1229 return; 1230 } 1231 1232 xprt_transmit(task); 1233 xprt_end_transmit(task); 1234 dprint_status(task); 1235 switch (task->tk_status) { 1236 case 0: 1237 /* Success */ 1238 break; 1239 case -EHOSTDOWN: 1240 case -EHOSTUNREACH: 1241 case -ENETUNREACH: 1242 case -ETIMEDOUT: 1243 /* 1244 * Problem reaching the server. Disconnect and let the 1245 * forechannel reestablish the connection. The server will 1246 * have to retransmit the backchannel request and we'll 1247 * reprocess it. Since these ops are idempotent, there's no 1248 * need to cache our reply at this time. 1249 */ 1250 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 1251 "error: %d\n", task->tk_status); 1252 xprt_conditional_disconnect(task->tk_xprt, 1253 req->rq_connect_cookie); 1254 break; 1255 default: 1256 /* 1257 * We were unable to reply and will have to drop the 1258 * request. The server should reconnect and retransmit. 1259 */ 1260 BUG_ON(task->tk_status == -EAGAIN); 1261 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 1262 "error: %d\n", task->tk_status); 1263 break; 1264 } 1265 rpc_wake_up_queued_task(&req->rq_xprt->pending, task); 1266 } 1267 #endif /* CONFIG_NFS_V4_1 */ 1268 1269 /* 1270 * 6. Sort out the RPC call status 1271 */ 1272 static void 1273 call_status(struct rpc_task *task) 1274 { 1275 struct rpc_clnt *clnt = task->tk_client; 1276 struct rpc_rqst *req = task->tk_rqstp; 1277 int status; 1278 1279 if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent) 1280 task->tk_status = req->rq_reply_bytes_recvd; 1281 1282 dprint_status(task); 1283 1284 status = task->tk_status; 1285 if (status >= 0) { 1286 task->tk_action = call_decode; 1287 return; 1288 } 1289 1290 task->tk_status = 0; 1291 switch(status) { 1292 case -EHOSTDOWN: 1293 case -EHOSTUNREACH: 1294 case -ENETUNREACH: 1295 /* 1296 * Delay any retries for 3 seconds, then handle as if it 1297 * were a timeout. 1298 */ 1299 rpc_delay(task, 3*HZ); 1300 case -ETIMEDOUT: 1301 task->tk_action = call_timeout; 1302 if (task->tk_client->cl_discrtry) 1303 xprt_conditional_disconnect(task->tk_xprt, 1304 req->rq_connect_cookie); 1305 break; 1306 case -ECONNRESET: 1307 case -ECONNREFUSED: 1308 rpc_force_rebind(clnt); 1309 rpc_delay(task, 3*HZ); 1310 case -EPIPE: 1311 case -ENOTCONN: 1312 task->tk_action = call_bind; 1313 break; 1314 case -EAGAIN: 1315 task->tk_action = call_transmit; 1316 break; 1317 case -EIO: 1318 /* shutdown or soft timeout */ 1319 rpc_exit(task, status); 1320 break; 1321 default: 1322 if (clnt->cl_chatty) 1323 printk("%s: RPC call returned error %d\n", 1324 clnt->cl_protname, -status); 1325 rpc_exit(task, status); 1326 } 1327 } 1328 1329 /* 1330 * 6a. Handle RPC timeout 1331 * We do not release the request slot, so we keep using the 1332 * same XID for all retransmits. 1333 */ 1334 static void 1335 call_timeout(struct rpc_task *task) 1336 { 1337 struct rpc_clnt *clnt = task->tk_client; 1338 1339 if (xprt_adjust_timeout(task->tk_rqstp) == 0) { 1340 dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid); 1341 goto retry; 1342 } 1343 1344 dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); 1345 task->tk_timeouts++; 1346 1347 if (RPC_IS_SOFT(task)) { 1348 if (clnt->cl_chatty) 1349 printk(KERN_NOTICE "%s: server %s not responding, timed out\n", 1350 clnt->cl_protname, clnt->cl_server); 1351 rpc_exit(task, -EIO); 1352 return; 1353 } 1354 1355 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { 1356 task->tk_flags |= RPC_CALL_MAJORSEEN; 1357 if (clnt->cl_chatty) 1358 printk(KERN_NOTICE "%s: server %s not responding, still trying\n", 1359 clnt->cl_protname, clnt->cl_server); 1360 } 1361 rpc_force_rebind(clnt); 1362 /* 1363 * Did our request time out due to an RPCSEC_GSS out-of-sequence 1364 * event? RFC2203 requires the server to drop all such requests. 1365 */ 1366 rpcauth_invalcred(task); 1367 1368 retry: 1369 clnt->cl_stats->rpcretrans++; 1370 task->tk_action = call_bind; 1371 task->tk_status = 0; 1372 } 1373 1374 /* 1375 * 7. Decode the RPC reply 1376 */ 1377 static void 1378 call_decode(struct rpc_task *task) 1379 { 1380 struct rpc_clnt *clnt = task->tk_client; 1381 struct rpc_rqst *req = task->tk_rqstp; 1382 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; 1383 __be32 *p; 1384 1385 dprintk("RPC: %5u call_decode (status %d)\n", 1386 task->tk_pid, task->tk_status); 1387 1388 if (task->tk_flags & RPC_CALL_MAJORSEEN) { 1389 if (clnt->cl_chatty) 1390 printk(KERN_NOTICE "%s: server %s OK\n", 1391 clnt->cl_protname, clnt->cl_server); 1392 task->tk_flags &= ~RPC_CALL_MAJORSEEN; 1393 } 1394 1395 /* 1396 * Ensure that we see all writes made by xprt_complete_rqst() 1397 * before it changed req->rq_reply_bytes_recvd. 1398 */ 1399 smp_rmb(); 1400 req->rq_rcv_buf.len = req->rq_private_buf.len; 1401 1402 /* Check that the softirq receive buffer is valid */ 1403 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, 1404 sizeof(req->rq_rcv_buf)) != 0); 1405 1406 if (req->rq_rcv_buf.len < 12) { 1407 if (!RPC_IS_SOFT(task)) { 1408 task->tk_action = call_bind; 1409 clnt->cl_stats->rpcretrans++; 1410 goto out_retry; 1411 } 1412 dprintk("RPC: %s: too small RPC reply size (%d bytes)\n", 1413 clnt->cl_protname, task->tk_status); 1414 task->tk_action = call_timeout; 1415 goto out_retry; 1416 } 1417 1418 p = rpc_verify_header(task); 1419 if (IS_ERR(p)) { 1420 if (p == ERR_PTR(-EAGAIN)) 1421 goto out_retry; 1422 return; 1423 } 1424 1425 task->tk_action = rpc_exit_task; 1426 1427 if (decode) { 1428 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, 1429 task->tk_msg.rpc_resp); 1430 } 1431 dprintk("RPC: %5u call_decode result %d\n", task->tk_pid, 1432 task->tk_status); 1433 return; 1434 out_retry: 1435 task->tk_status = 0; 1436 /* Note: rpc_verify_header() may have freed the RPC slot */ 1437 if (task->tk_rqstp == req) { 1438 req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0; 1439 if (task->tk_client->cl_discrtry) 1440 xprt_conditional_disconnect(task->tk_xprt, 1441 req->rq_connect_cookie); 1442 } 1443 } 1444 1445 /* 1446 * 8. Refresh the credentials if rejected by the server 1447 */ 1448 static void 1449 call_refresh(struct rpc_task *task) 1450 { 1451 dprint_status(task); 1452 1453 task->tk_action = call_refreshresult; 1454 task->tk_status = 0; 1455 task->tk_client->cl_stats->rpcauthrefresh++; 1456 rpcauth_refreshcred(task); 1457 } 1458 1459 /* 1460 * 8a. Process the results of a credential refresh 1461 */ 1462 static void 1463 call_refreshresult(struct rpc_task *task) 1464 { 1465 int status = task->tk_status; 1466 1467 dprint_status(task); 1468 1469 task->tk_status = 0; 1470 task->tk_action = call_reserve; 1471 if (status >= 0 && rpcauth_uptodatecred(task)) 1472 return; 1473 if (status == -EACCES) { 1474 rpc_exit(task, -EACCES); 1475 return; 1476 } 1477 task->tk_action = call_refresh; 1478 if (status != -ETIMEDOUT) 1479 rpc_delay(task, 3*HZ); 1480 return; 1481 } 1482 1483 static __be32 * 1484 rpc_encode_header(struct rpc_task *task) 1485 { 1486 struct rpc_clnt *clnt = task->tk_client; 1487 struct rpc_rqst *req = task->tk_rqstp; 1488 __be32 *p = req->rq_svec[0].iov_base; 1489 1490 /* FIXME: check buffer size? */ 1491 1492 p = xprt_skip_transport_header(task->tk_xprt, p); 1493 *p++ = req->rq_xid; /* XID */ 1494 *p++ = htonl(RPC_CALL); /* CALL */ 1495 *p++ = htonl(RPC_VERSION); /* RPC version */ 1496 *p++ = htonl(clnt->cl_prog); /* program number */ 1497 *p++ = htonl(clnt->cl_vers); /* program version */ 1498 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ 1499 p = rpcauth_marshcred(task, p); 1500 req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); 1501 return p; 1502 } 1503 1504 static __be32 * 1505 rpc_verify_header(struct rpc_task *task) 1506 { 1507 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; 1508 int len = task->tk_rqstp->rq_rcv_buf.len >> 2; 1509 __be32 *p = iov->iov_base; 1510 u32 n; 1511 int error = -EACCES; 1512 1513 if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) { 1514 /* RFC-1014 says that the representation of XDR data must be a 1515 * multiple of four bytes 1516 * - if it isn't pointer subtraction in the NFS client may give 1517 * undefined results 1518 */ 1519 dprintk("RPC: %5u %s: XDR representation not a multiple of" 1520 " 4 bytes: 0x%x\n", task->tk_pid, __func__, 1521 task->tk_rqstp->rq_rcv_buf.len); 1522 goto out_eio; 1523 } 1524 if ((len -= 3) < 0) 1525 goto out_overflow; 1526 1527 p += 1; /* skip XID */ 1528 if ((n = ntohl(*p++)) != RPC_REPLY) { 1529 dprintk("RPC: %5u %s: not an RPC reply: %x\n", 1530 task->tk_pid, __func__, n); 1531 goto out_garbage; 1532 } 1533 1534 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { 1535 if (--len < 0) 1536 goto out_overflow; 1537 switch ((n = ntohl(*p++))) { 1538 case RPC_AUTH_ERROR: 1539 break; 1540 case RPC_MISMATCH: 1541 dprintk("RPC: %5u %s: RPC call version " 1542 "mismatch!\n", 1543 task->tk_pid, __func__); 1544 error = -EPROTONOSUPPORT; 1545 goto out_err; 1546 default: 1547 dprintk("RPC: %5u %s: RPC call rejected, " 1548 "unknown error: %x\n", 1549 task->tk_pid, __func__, n); 1550 goto out_eio; 1551 } 1552 if (--len < 0) 1553 goto out_overflow; 1554 switch ((n = ntohl(*p++))) { 1555 case RPC_AUTH_REJECTEDCRED: 1556 case RPC_AUTH_REJECTEDVERF: 1557 case RPCSEC_GSS_CREDPROBLEM: 1558 case RPCSEC_GSS_CTXPROBLEM: 1559 if (!task->tk_cred_retry) 1560 break; 1561 task->tk_cred_retry--; 1562 dprintk("RPC: %5u %s: retry stale creds\n", 1563 task->tk_pid, __func__); 1564 rpcauth_invalcred(task); 1565 /* Ensure we obtain a new XID! */ 1566 xprt_release(task); 1567 task->tk_action = call_refresh; 1568 goto out_retry; 1569 case RPC_AUTH_BADCRED: 1570 case RPC_AUTH_BADVERF: 1571 /* possibly garbled cred/verf? */ 1572 if (!task->tk_garb_retry) 1573 break; 1574 task->tk_garb_retry--; 1575 dprintk("RPC: %5u %s: retry garbled creds\n", 1576 task->tk_pid, __func__); 1577 task->tk_action = call_bind; 1578 goto out_retry; 1579 case RPC_AUTH_TOOWEAK: 1580 printk(KERN_NOTICE "RPC: server %s requires stronger " 1581 "authentication.\n", task->tk_client->cl_server); 1582 break; 1583 default: 1584 dprintk("RPC: %5u %s: unknown auth error: %x\n", 1585 task->tk_pid, __func__, n); 1586 error = -EIO; 1587 } 1588 dprintk("RPC: %5u %s: call rejected %d\n", 1589 task->tk_pid, __func__, n); 1590 goto out_err; 1591 } 1592 if (!(p = rpcauth_checkverf(task, p))) { 1593 dprintk("RPC: %5u %s: auth check failed\n", 1594 task->tk_pid, __func__); 1595 goto out_garbage; /* bad verifier, retry */ 1596 } 1597 len = p - (__be32 *)iov->iov_base - 1; 1598 if (len < 0) 1599 goto out_overflow; 1600 switch ((n = ntohl(*p++))) { 1601 case RPC_SUCCESS: 1602 return p; 1603 case RPC_PROG_UNAVAIL: 1604 dprintk("RPC: %5u %s: program %u is unsupported by server %s\n", 1605 task->tk_pid, __func__, 1606 (unsigned int)task->tk_client->cl_prog, 1607 task->tk_client->cl_server); 1608 error = -EPFNOSUPPORT; 1609 goto out_err; 1610 case RPC_PROG_MISMATCH: 1611 dprintk("RPC: %5u %s: program %u, version %u unsupported by " 1612 "server %s\n", task->tk_pid, __func__, 1613 (unsigned int)task->tk_client->cl_prog, 1614 (unsigned int)task->tk_client->cl_vers, 1615 task->tk_client->cl_server); 1616 error = -EPROTONOSUPPORT; 1617 goto out_err; 1618 case RPC_PROC_UNAVAIL: 1619 dprintk("RPC: %5u %s: proc %s unsupported by program %u, " 1620 "version %u on server %s\n", 1621 task->tk_pid, __func__, 1622 rpc_proc_name(task), 1623 task->tk_client->cl_prog, 1624 task->tk_client->cl_vers, 1625 task->tk_client->cl_server); 1626 error = -EOPNOTSUPP; 1627 goto out_err; 1628 case RPC_GARBAGE_ARGS: 1629 dprintk("RPC: %5u %s: server saw garbage\n", 1630 task->tk_pid, __func__); 1631 break; /* retry */ 1632 default: 1633 dprintk("RPC: %5u %s: server accept status: %x\n", 1634 task->tk_pid, __func__, n); 1635 /* Also retry */ 1636 } 1637 1638 out_garbage: 1639 task->tk_client->cl_stats->rpcgarbage++; 1640 if (task->tk_garb_retry) { 1641 task->tk_garb_retry--; 1642 dprintk("RPC: %5u %s: retrying\n", 1643 task->tk_pid, __func__); 1644 task->tk_action = call_bind; 1645 out_retry: 1646 return ERR_PTR(-EAGAIN); 1647 } 1648 out_eio: 1649 error = -EIO; 1650 out_err: 1651 rpc_exit(task, error); 1652 dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid, 1653 __func__, error); 1654 return ERR_PTR(error); 1655 out_overflow: 1656 dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid, 1657 __func__); 1658 goto out_garbage; 1659 } 1660 1661 static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj) 1662 { 1663 return 0; 1664 } 1665 1666 static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj) 1667 { 1668 return 0; 1669 } 1670 1671 static struct rpc_procinfo rpcproc_null = { 1672 .p_encode = rpcproc_encode_null, 1673 .p_decode = rpcproc_decode_null, 1674 }; 1675 1676 static int rpc_ping(struct rpc_clnt *clnt, int flags) 1677 { 1678 struct rpc_message msg = { 1679 .rpc_proc = &rpcproc_null, 1680 }; 1681 int err; 1682 msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); 1683 err = rpc_call_sync(clnt, &msg, flags); 1684 put_rpccred(msg.rpc_cred); 1685 return err; 1686 } 1687 1688 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags) 1689 { 1690 struct rpc_message msg = { 1691 .rpc_proc = &rpcproc_null, 1692 .rpc_cred = cred, 1693 }; 1694 struct rpc_task_setup task_setup_data = { 1695 .rpc_client = clnt, 1696 .rpc_message = &msg, 1697 .callback_ops = &rpc_default_ops, 1698 .flags = flags, 1699 }; 1700 return rpc_run_task(&task_setup_data); 1701 } 1702 EXPORT_SYMBOL_GPL(rpc_call_null); 1703 1704 #ifdef RPC_DEBUG 1705 static void rpc_show_header(void) 1706 { 1707 printk(KERN_INFO "-pid- flgs status -client- --rqstp- " 1708 "-timeout ---ops--\n"); 1709 } 1710 1711 static void rpc_show_task(const struct rpc_clnt *clnt, 1712 const struct rpc_task *task) 1713 { 1714 const char *rpc_waitq = "none"; 1715 char *p, action[KSYM_SYMBOL_LEN]; 1716 1717 if (RPC_IS_QUEUED(task)) 1718 rpc_waitq = rpc_qname(task->tk_waitqueue); 1719 1720 /* map tk_action pointer to a function name; then trim off 1721 * the "+0x0 [sunrpc]" */ 1722 sprint_symbol(action, (unsigned long)task->tk_action); 1723 p = strchr(action, '+'); 1724 if (p) 1725 *p = '\0'; 1726 1727 printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%s q:%s\n", 1728 task->tk_pid, task->tk_flags, task->tk_status, 1729 clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops, 1730 clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task), 1731 action, rpc_waitq); 1732 } 1733 1734 void rpc_show_tasks(void) 1735 { 1736 struct rpc_clnt *clnt; 1737 struct rpc_task *task; 1738 int header = 0; 1739 1740 spin_lock(&rpc_client_lock); 1741 list_for_each_entry(clnt, &all_clients, cl_clients) { 1742 spin_lock(&clnt->cl_lock); 1743 list_for_each_entry(task, &clnt->cl_tasks, tk_task) { 1744 if (!header) { 1745 rpc_show_header(); 1746 header++; 1747 } 1748 rpc_show_task(clnt, task); 1749 } 1750 spin_unlock(&clnt->cl_lock); 1751 } 1752 spin_unlock(&rpc_client_lock); 1753 } 1754 #endif 1755