1 /* 2 * linux/net/sunrpc/clnt.c 3 * 4 * This file contains the high-level RPC interface. 5 * It is modeled as a finite state machine to support both synchronous 6 * and asynchronous requests. 7 * 8 * - RPC header generation and argument serialization. 9 * - Credential refresh. 10 * - TCP connect handling. 11 * - Retry of operation when it is suspected the operation failed because 12 * of uid squashing on the server, or when the credentials were stale 13 * and need to be refreshed, or when a packet was damaged in transit. 14 * This may be have to be moved to the VFS layer. 15 * 16 * NB: BSD uses a more intelligent approach to guessing when a request 17 * or reply has been lost by keeping the RTO estimate for each procedure. 18 * We currently make do with a constant timeout value. 19 * 20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> 21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> 22 */ 23 24 #include <asm/system.h> 25 26 #include <linux/module.h> 27 #include <linux/types.h> 28 #include <linux/mm.h> 29 #include <linux/slab.h> 30 #include <linux/utsname.h> 31 #include <linux/workqueue.h> 32 33 #include <linux/sunrpc/clnt.h> 34 #include <linux/sunrpc/rpc_pipe_fs.h> 35 #include <linux/sunrpc/metrics.h> 36 37 38 #define RPC_SLACK_SPACE (1024) /* total overkill */ 39 40 #ifdef RPC_DEBUG 41 # define RPCDBG_FACILITY RPCDBG_CALL 42 #endif 43 44 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); 45 46 47 static void call_start(struct rpc_task *task); 48 static void call_reserve(struct rpc_task *task); 49 static void call_reserveresult(struct rpc_task *task); 50 static void call_allocate(struct rpc_task *task); 51 static void call_encode(struct rpc_task *task); 52 static void call_decode(struct rpc_task *task); 53 static void call_bind(struct rpc_task *task); 54 static void call_bind_status(struct rpc_task *task); 55 static void call_transmit(struct rpc_task *task); 56 static void call_status(struct rpc_task *task); 57 static void call_transmit_status(struct rpc_task *task); 58 static void call_refresh(struct rpc_task *task); 59 static void call_refreshresult(struct rpc_task *task); 60 static void call_timeout(struct rpc_task *task); 61 static void call_connect(struct rpc_task *task); 62 static void call_connect_status(struct rpc_task *task); 63 static u32 * call_header(struct rpc_task *task); 64 static u32 * call_verify(struct rpc_task *task); 65 66 67 static int 68 rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) 69 { 70 static uint32_t clntid; 71 int error; 72 73 clnt->cl_vfsmnt = ERR_PTR(-ENOENT); 74 clnt->cl_dentry = ERR_PTR(-ENOENT); 75 if (dir_name == NULL) 76 return 0; 77 78 clnt->cl_vfsmnt = rpc_get_mount(); 79 if (IS_ERR(clnt->cl_vfsmnt)) 80 return PTR_ERR(clnt->cl_vfsmnt); 81 82 for (;;) { 83 snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname), 84 "%s/clnt%x", dir_name, 85 (unsigned int)clntid++); 86 clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0'; 87 clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt); 88 if (!IS_ERR(clnt->cl_dentry)) 89 return 0; 90 error = PTR_ERR(clnt->cl_dentry); 91 if (error != -EEXIST) { 92 printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n", 93 clnt->cl_pathname, error); 94 rpc_put_mount(); 95 return error; 96 } 97 } 98 } 99 100 /* 101 * Create an RPC client 102 * FIXME: This should also take a flags argument (as in task->tk_flags). 103 * It's called (among others) from pmap_create_client, which may in 104 * turn be called by an async task. In this case, rpciod should not be 105 * made to sleep too long. 106 */ 107 struct rpc_clnt * 108 rpc_new_client(struct rpc_xprt *xprt, char *servname, 109 struct rpc_program *program, u32 vers, 110 rpc_authflavor_t flavor) 111 { 112 struct rpc_version *version; 113 struct rpc_clnt *clnt = NULL; 114 struct rpc_auth *auth; 115 int err; 116 int len; 117 118 dprintk("RPC: creating %s client for %s (xprt %p)\n", 119 program->name, servname, xprt); 120 121 err = -EINVAL; 122 if (!xprt) 123 goto out_no_xprt; 124 if (vers >= program->nrvers || !(version = program->version[vers])) 125 goto out_err; 126 127 err = -ENOMEM; 128 clnt = kmalloc(sizeof(*clnt), GFP_KERNEL); 129 if (!clnt) 130 goto out_err; 131 memset(clnt, 0, sizeof(*clnt)); 132 atomic_set(&clnt->cl_users, 0); 133 atomic_set(&clnt->cl_count, 1); 134 clnt->cl_parent = clnt; 135 136 clnt->cl_server = clnt->cl_inline_name; 137 len = strlen(servname) + 1; 138 if (len > sizeof(clnt->cl_inline_name)) { 139 char *buf = kmalloc(len, GFP_KERNEL); 140 if (buf != 0) 141 clnt->cl_server = buf; 142 else 143 len = sizeof(clnt->cl_inline_name); 144 } 145 strlcpy(clnt->cl_server, servname, len); 146 147 clnt->cl_xprt = xprt; 148 clnt->cl_procinfo = version->procs; 149 clnt->cl_maxproc = version->nrprocs; 150 clnt->cl_protname = program->name; 151 clnt->cl_pmap = &clnt->cl_pmap_default; 152 clnt->cl_port = xprt->addr.sin_port; 153 clnt->cl_prog = program->number; 154 clnt->cl_vers = version->number; 155 clnt->cl_prot = xprt->prot; 156 clnt->cl_stats = program->stats; 157 clnt->cl_metrics = rpc_alloc_iostats(clnt); 158 rpc_init_wait_queue(&clnt->cl_pmap_default.pm_bindwait, "bindwait"); 159 160 if (!clnt->cl_port) 161 clnt->cl_autobind = 1; 162 163 clnt->cl_rtt = &clnt->cl_rtt_default; 164 rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval); 165 166 err = rpc_setup_pipedir(clnt, program->pipe_dir_name); 167 if (err < 0) 168 goto out_no_path; 169 170 auth = rpcauth_create(flavor, clnt); 171 if (IS_ERR(auth)) { 172 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n", 173 flavor); 174 err = PTR_ERR(auth); 175 goto out_no_auth; 176 } 177 178 /* save the nodename */ 179 clnt->cl_nodelen = strlen(system_utsname.nodename); 180 if (clnt->cl_nodelen > UNX_MAXNODENAME) 181 clnt->cl_nodelen = UNX_MAXNODENAME; 182 memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen); 183 return clnt; 184 185 out_no_auth: 186 if (!IS_ERR(clnt->cl_dentry)) { 187 rpc_rmdir(clnt->cl_pathname); 188 dput(clnt->cl_dentry); 189 rpc_put_mount(); 190 } 191 out_no_path: 192 if (clnt->cl_server != clnt->cl_inline_name) 193 kfree(clnt->cl_server); 194 kfree(clnt); 195 out_err: 196 xprt_destroy(xprt); 197 out_no_xprt: 198 return ERR_PTR(err); 199 } 200 201 /** 202 * Create an RPC client 203 * @xprt - pointer to xprt struct 204 * @servname - name of server 205 * @info - rpc_program 206 * @version - rpc_program version 207 * @authflavor - rpc_auth flavour to use 208 * 209 * Creates an RPC client structure, then pings the server in order to 210 * determine if it is up, and if it supports this program and version. 211 * 212 * This function should never be called by asynchronous tasks such as 213 * the portmapper. 214 */ 215 struct rpc_clnt *rpc_create_client(struct rpc_xprt *xprt, char *servname, 216 struct rpc_program *info, u32 version, rpc_authflavor_t authflavor) 217 { 218 struct rpc_clnt *clnt; 219 int err; 220 221 clnt = rpc_new_client(xprt, servname, info, version, authflavor); 222 if (IS_ERR(clnt)) 223 return clnt; 224 err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 225 if (err == 0) 226 return clnt; 227 rpc_shutdown_client(clnt); 228 return ERR_PTR(err); 229 } 230 231 /* 232 * This function clones the RPC client structure. It allows us to share the 233 * same transport while varying parameters such as the authentication 234 * flavour. 235 */ 236 struct rpc_clnt * 237 rpc_clone_client(struct rpc_clnt *clnt) 238 { 239 struct rpc_clnt *new; 240 241 new = kmalloc(sizeof(*new), GFP_KERNEL); 242 if (!new) 243 goto out_no_clnt; 244 memcpy(new, clnt, sizeof(*new)); 245 atomic_set(&new->cl_count, 1); 246 atomic_set(&new->cl_users, 0); 247 new->cl_parent = clnt; 248 atomic_inc(&clnt->cl_count); 249 /* Duplicate portmapper */ 250 rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait"); 251 /* Turn off autobind on clones */ 252 new->cl_autobind = 0; 253 new->cl_oneshot = 0; 254 new->cl_dead = 0; 255 if (!IS_ERR(new->cl_dentry)) { 256 dget(new->cl_dentry); 257 rpc_get_mount(); 258 } 259 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); 260 if (new->cl_auth) 261 atomic_inc(&new->cl_auth->au_count); 262 new->cl_pmap = &new->cl_pmap_default; 263 new->cl_metrics = rpc_alloc_iostats(clnt); 264 return new; 265 out_no_clnt: 266 printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__); 267 return ERR_PTR(-ENOMEM); 268 } 269 270 /* 271 * Properly shut down an RPC client, terminating all outstanding 272 * requests. Note that we must be certain that cl_oneshot and 273 * cl_dead are cleared, or else the client would be destroyed 274 * when the last task releases it. 275 */ 276 int 277 rpc_shutdown_client(struct rpc_clnt *clnt) 278 { 279 dprintk("RPC: shutting down %s client for %s, tasks=%d\n", 280 clnt->cl_protname, clnt->cl_server, 281 atomic_read(&clnt->cl_users)); 282 283 while (atomic_read(&clnt->cl_users) > 0) { 284 /* Don't let rpc_release_client destroy us */ 285 clnt->cl_oneshot = 0; 286 clnt->cl_dead = 0; 287 rpc_killall_tasks(clnt); 288 wait_event_timeout(destroy_wait, 289 !atomic_read(&clnt->cl_users), 1*HZ); 290 } 291 292 if (atomic_read(&clnt->cl_users) < 0) { 293 printk(KERN_ERR "RPC: rpc_shutdown_client clnt %p tasks=%d\n", 294 clnt, atomic_read(&clnt->cl_users)); 295 #ifdef RPC_DEBUG 296 rpc_show_tasks(); 297 #endif 298 BUG(); 299 } 300 301 return rpc_destroy_client(clnt); 302 } 303 304 /* 305 * Delete an RPC client 306 */ 307 int 308 rpc_destroy_client(struct rpc_clnt *clnt) 309 { 310 if (!atomic_dec_and_test(&clnt->cl_count)) 311 return 1; 312 BUG_ON(atomic_read(&clnt->cl_users) != 0); 313 314 dprintk("RPC: destroying %s client for %s\n", 315 clnt->cl_protname, clnt->cl_server); 316 if (clnt->cl_auth) { 317 rpcauth_destroy(clnt->cl_auth); 318 clnt->cl_auth = NULL; 319 } 320 if (clnt->cl_parent != clnt) { 321 rpc_destroy_client(clnt->cl_parent); 322 goto out_free; 323 } 324 if (clnt->cl_pathname[0]) 325 rpc_rmdir(clnt->cl_pathname); 326 if (clnt->cl_xprt) { 327 xprt_destroy(clnt->cl_xprt); 328 clnt->cl_xprt = NULL; 329 } 330 if (clnt->cl_server != clnt->cl_inline_name) 331 kfree(clnt->cl_server); 332 out_free: 333 rpc_free_iostats(clnt->cl_metrics); 334 clnt->cl_metrics = NULL; 335 if (!IS_ERR(clnt->cl_dentry)) { 336 dput(clnt->cl_dentry); 337 rpc_put_mount(); 338 } 339 kfree(clnt); 340 return 0; 341 } 342 343 /* 344 * Release an RPC client 345 */ 346 void 347 rpc_release_client(struct rpc_clnt *clnt) 348 { 349 dprintk("RPC: rpc_release_client(%p, %d)\n", 350 clnt, atomic_read(&clnt->cl_users)); 351 352 if (!atomic_dec_and_test(&clnt->cl_users)) 353 return; 354 wake_up(&destroy_wait); 355 if (clnt->cl_oneshot || clnt->cl_dead) 356 rpc_destroy_client(clnt); 357 } 358 359 /** 360 * rpc_bind_new_program - bind a new RPC program to an existing client 361 * @old - old rpc_client 362 * @program - rpc program to set 363 * @vers - rpc program version 364 * 365 * Clones the rpc client and sets up a new RPC program. This is mainly 366 * of use for enabling different RPC programs to share the same transport. 367 * The Sun NFSv2/v3 ACL protocol can do this. 368 */ 369 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, 370 struct rpc_program *program, 371 int vers) 372 { 373 struct rpc_clnt *clnt; 374 struct rpc_version *version; 375 int err; 376 377 BUG_ON(vers >= program->nrvers || !program->version[vers]); 378 version = program->version[vers]; 379 clnt = rpc_clone_client(old); 380 if (IS_ERR(clnt)) 381 goto out; 382 clnt->cl_procinfo = version->procs; 383 clnt->cl_maxproc = version->nrprocs; 384 clnt->cl_protname = program->name; 385 clnt->cl_prog = program->number; 386 clnt->cl_vers = version->number; 387 clnt->cl_stats = program->stats; 388 err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 389 if (err != 0) { 390 rpc_shutdown_client(clnt); 391 clnt = ERR_PTR(err); 392 } 393 out: 394 return clnt; 395 } 396 397 /* 398 * Default callback for async RPC calls 399 */ 400 static void 401 rpc_default_callback(struct rpc_task *task, void *data) 402 { 403 } 404 405 static const struct rpc_call_ops rpc_default_ops = { 406 .rpc_call_done = rpc_default_callback, 407 }; 408 409 /* 410 * Export the signal mask handling for synchronous code that 411 * sleeps on RPC calls 412 */ 413 #define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM)) 414 415 static void rpc_save_sigmask(sigset_t *oldset, int intr) 416 { 417 unsigned long sigallow = sigmask(SIGKILL); 418 sigset_t sigmask; 419 420 /* Block all signals except those listed in sigallow */ 421 if (intr) 422 sigallow |= RPC_INTR_SIGNALS; 423 siginitsetinv(&sigmask, sigallow); 424 sigprocmask(SIG_BLOCK, &sigmask, oldset); 425 } 426 427 static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset) 428 { 429 rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task)); 430 } 431 432 static inline void rpc_restore_sigmask(sigset_t *oldset) 433 { 434 sigprocmask(SIG_SETMASK, oldset, NULL); 435 } 436 437 void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset) 438 { 439 rpc_save_sigmask(oldset, clnt->cl_intr); 440 } 441 442 void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset) 443 { 444 rpc_restore_sigmask(oldset); 445 } 446 447 /* 448 * New rpc_call implementation 449 */ 450 int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) 451 { 452 struct rpc_task *task; 453 sigset_t oldset; 454 int status; 455 456 /* If this client is slain all further I/O fails */ 457 if (clnt->cl_dead) 458 return -EIO; 459 460 BUG_ON(flags & RPC_TASK_ASYNC); 461 462 status = -ENOMEM; 463 task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL); 464 if (task == NULL) 465 goto out; 466 467 /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */ 468 rpc_task_sigmask(task, &oldset); 469 470 rpc_call_setup(task, msg, 0); 471 472 /* Set up the call info struct and execute the task */ 473 status = task->tk_status; 474 if (status == 0) { 475 atomic_inc(&task->tk_count); 476 status = rpc_execute(task); 477 if (status == 0) 478 status = task->tk_status; 479 } 480 rpc_restore_sigmask(&oldset); 481 rpc_release_task(task); 482 out: 483 return status; 484 } 485 486 /* 487 * New rpc_call implementation 488 */ 489 int 490 rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, 491 const struct rpc_call_ops *tk_ops, void *data) 492 { 493 struct rpc_task *task; 494 sigset_t oldset; 495 int status; 496 497 /* If this client is slain all further I/O fails */ 498 status = -EIO; 499 if (clnt->cl_dead) 500 goto out_release; 501 502 flags |= RPC_TASK_ASYNC; 503 504 /* Create/initialize a new RPC task */ 505 status = -ENOMEM; 506 if (!(task = rpc_new_task(clnt, flags, tk_ops, data))) 507 goto out_release; 508 509 /* Mask signals on GSS_AUTH upcalls */ 510 rpc_task_sigmask(task, &oldset); 511 512 rpc_call_setup(task, msg, 0); 513 514 /* Set up the call info struct and execute the task */ 515 status = task->tk_status; 516 if (status == 0) 517 rpc_execute(task); 518 else 519 rpc_release_task(task); 520 521 rpc_restore_sigmask(&oldset); 522 return status; 523 out_release: 524 if (tk_ops->rpc_release != NULL) 525 tk_ops->rpc_release(data); 526 return status; 527 } 528 529 530 void 531 rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags) 532 { 533 task->tk_msg = *msg; 534 task->tk_flags |= flags; 535 /* Bind the user cred */ 536 if (task->tk_msg.rpc_cred != NULL) 537 rpcauth_holdcred(task); 538 else 539 rpcauth_bindcred(task); 540 541 if (task->tk_status == 0) 542 task->tk_action = call_start; 543 else 544 task->tk_action = rpc_exit_task; 545 } 546 547 void 548 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) 549 { 550 struct rpc_xprt *xprt = clnt->cl_xprt; 551 if (xprt->ops->set_buffer_size) 552 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); 553 } 554 555 /* 556 * Return size of largest payload RPC client can support, in bytes 557 * 558 * For stream transports, this is one RPC record fragment (see RFC 559 * 1831), as we don't support multi-record requests yet. For datagram 560 * transports, this is the size of an IP packet minus the IP, UDP, and 561 * RPC header sizes. 562 */ 563 size_t rpc_max_payload(struct rpc_clnt *clnt) 564 { 565 return clnt->cl_xprt->max_payload; 566 } 567 EXPORT_SYMBOL(rpc_max_payload); 568 569 /** 570 * rpc_force_rebind - force transport to check that remote port is unchanged 571 * @clnt: client to rebind 572 * 573 */ 574 void rpc_force_rebind(struct rpc_clnt *clnt) 575 { 576 if (clnt->cl_autobind) 577 clnt->cl_port = 0; 578 } 579 EXPORT_SYMBOL(rpc_force_rebind); 580 581 /* 582 * Restart an (async) RPC call. Usually called from within the 583 * exit handler. 584 */ 585 void 586 rpc_restart_call(struct rpc_task *task) 587 { 588 if (RPC_ASSASSINATED(task)) 589 return; 590 591 task->tk_action = call_start; 592 } 593 594 /* 595 * 0. Initial state 596 * 597 * Other FSM states can be visited zero or more times, but 598 * this state is visited exactly once for each RPC. 599 */ 600 static void 601 call_start(struct rpc_task *task) 602 { 603 struct rpc_clnt *clnt = task->tk_client; 604 605 dprintk("RPC: %4d call_start %s%d proc %d (%s)\n", task->tk_pid, 606 clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc->p_proc, 607 (RPC_IS_ASYNC(task) ? "async" : "sync")); 608 609 /* Increment call count */ 610 task->tk_msg.rpc_proc->p_count++; 611 clnt->cl_stats->rpccnt++; 612 task->tk_action = call_reserve; 613 } 614 615 /* 616 * 1. Reserve an RPC call slot 617 */ 618 static void 619 call_reserve(struct rpc_task *task) 620 { 621 dprintk("RPC: %4d call_reserve\n", task->tk_pid); 622 623 if (!rpcauth_uptodatecred(task)) { 624 task->tk_action = call_refresh; 625 return; 626 } 627 628 task->tk_status = 0; 629 task->tk_action = call_reserveresult; 630 xprt_reserve(task); 631 } 632 633 /* 634 * 1b. Grok the result of xprt_reserve() 635 */ 636 static void 637 call_reserveresult(struct rpc_task *task) 638 { 639 int status = task->tk_status; 640 641 dprintk("RPC: %4d call_reserveresult (status %d)\n", 642 task->tk_pid, task->tk_status); 643 644 /* 645 * After a call to xprt_reserve(), we must have either 646 * a request slot or else an error status. 647 */ 648 task->tk_status = 0; 649 if (status >= 0) { 650 if (task->tk_rqstp) { 651 task->tk_action = call_allocate; 652 return; 653 } 654 655 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n", 656 __FUNCTION__, status); 657 rpc_exit(task, -EIO); 658 return; 659 } 660 661 /* 662 * Even though there was an error, we may have acquired 663 * a request slot somehow. Make sure not to leak it. 664 */ 665 if (task->tk_rqstp) { 666 printk(KERN_ERR "%s: status=%d, request allocated anyway\n", 667 __FUNCTION__, status); 668 xprt_release(task); 669 } 670 671 switch (status) { 672 case -EAGAIN: /* woken up; retry */ 673 task->tk_action = call_reserve; 674 return; 675 case -EIO: /* probably a shutdown */ 676 break; 677 default: 678 printk(KERN_ERR "%s: unrecognized error %d, exiting\n", 679 __FUNCTION__, status); 680 break; 681 } 682 rpc_exit(task, status); 683 } 684 685 /* 686 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. 687 * (Note: buffer memory is freed in xprt_release). 688 */ 689 static void 690 call_allocate(struct rpc_task *task) 691 { 692 struct rpc_rqst *req = task->tk_rqstp; 693 struct rpc_xprt *xprt = task->tk_xprt; 694 unsigned int bufsiz; 695 696 dprintk("RPC: %4d call_allocate (status %d)\n", 697 task->tk_pid, task->tk_status); 698 task->tk_action = call_bind; 699 if (req->rq_buffer) 700 return; 701 702 /* FIXME: compute buffer requirements more exactly using 703 * auth->au_wslack */ 704 bufsiz = task->tk_msg.rpc_proc->p_bufsiz + RPC_SLACK_SPACE; 705 706 if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL) 707 return; 708 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); 709 710 if (RPC_IS_ASYNC(task) || !signalled()) { 711 xprt_release(task); 712 task->tk_action = call_reserve; 713 rpc_delay(task, HZ>>4); 714 return; 715 } 716 717 rpc_exit(task, -ERESTARTSYS); 718 } 719 720 static inline int 721 rpc_task_need_encode(struct rpc_task *task) 722 { 723 return task->tk_rqstp->rq_snd_buf.len == 0; 724 } 725 726 static inline void 727 rpc_task_force_reencode(struct rpc_task *task) 728 { 729 task->tk_rqstp->rq_snd_buf.len = 0; 730 } 731 732 /* 733 * 3. Encode arguments of an RPC call 734 */ 735 static void 736 call_encode(struct rpc_task *task) 737 { 738 struct rpc_rqst *req = task->tk_rqstp; 739 struct xdr_buf *sndbuf = &req->rq_snd_buf; 740 struct xdr_buf *rcvbuf = &req->rq_rcv_buf; 741 unsigned int bufsiz; 742 kxdrproc_t encode; 743 u32 *p; 744 745 dprintk("RPC: %4d call_encode (status %d)\n", 746 task->tk_pid, task->tk_status); 747 748 /* Default buffer setup */ 749 bufsiz = req->rq_bufsize >> 1; 750 sndbuf->head[0].iov_base = (void *)req->rq_buffer; 751 sndbuf->head[0].iov_len = bufsiz; 752 sndbuf->tail[0].iov_len = 0; 753 sndbuf->page_len = 0; 754 sndbuf->len = 0; 755 sndbuf->buflen = bufsiz; 756 rcvbuf->head[0].iov_base = (void *)((char *)req->rq_buffer + bufsiz); 757 rcvbuf->head[0].iov_len = bufsiz; 758 rcvbuf->tail[0].iov_len = 0; 759 rcvbuf->page_len = 0; 760 rcvbuf->len = 0; 761 rcvbuf->buflen = bufsiz; 762 763 /* Encode header and provided arguments */ 764 encode = task->tk_msg.rpc_proc->p_encode; 765 if (!(p = call_header(task))) { 766 printk(KERN_INFO "RPC: call_header failed, exit EIO\n"); 767 rpc_exit(task, -EIO); 768 return; 769 } 770 if (encode == NULL) 771 return; 772 773 task->tk_status = rpcauth_wrap_req(task, encode, req, p, 774 task->tk_msg.rpc_argp); 775 if (task->tk_status == -ENOMEM) { 776 /* XXX: Is this sane? */ 777 rpc_delay(task, 3*HZ); 778 task->tk_status = -EAGAIN; 779 } 780 } 781 782 /* 783 * 4. Get the server port number if not yet set 784 */ 785 static void 786 call_bind(struct rpc_task *task) 787 { 788 struct rpc_clnt *clnt = task->tk_client; 789 790 dprintk("RPC: %4d call_bind (status %d)\n", 791 task->tk_pid, task->tk_status); 792 793 task->tk_action = call_connect; 794 if (!clnt->cl_port) { 795 task->tk_action = call_bind_status; 796 task->tk_timeout = task->tk_xprt->bind_timeout; 797 rpc_getport(task, clnt); 798 } 799 } 800 801 /* 802 * 4a. Sort out bind result 803 */ 804 static void 805 call_bind_status(struct rpc_task *task) 806 { 807 int status = -EACCES; 808 809 if (task->tk_status >= 0) { 810 dprintk("RPC: %4d call_bind_status (status %d)\n", 811 task->tk_pid, task->tk_status); 812 task->tk_status = 0; 813 task->tk_action = call_connect; 814 return; 815 } 816 817 switch (task->tk_status) { 818 case -EACCES: 819 dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n", 820 task->tk_pid); 821 rpc_delay(task, 3*HZ); 822 goto retry_bind; 823 case -ETIMEDOUT: 824 dprintk("RPC: %4d rpcbind request timed out\n", 825 task->tk_pid); 826 if (RPC_IS_SOFT(task)) { 827 status = -EIO; 828 break; 829 } 830 goto retry_bind; 831 case -EPFNOSUPPORT: 832 dprintk("RPC: %4d remote rpcbind service unavailable\n", 833 task->tk_pid); 834 break; 835 case -EPROTONOSUPPORT: 836 dprintk("RPC: %4d remote rpcbind version 2 unavailable\n", 837 task->tk_pid); 838 break; 839 default: 840 dprintk("RPC: %4d unrecognized rpcbind error (%d)\n", 841 task->tk_pid, -task->tk_status); 842 status = -EIO; 843 break; 844 } 845 846 rpc_exit(task, status); 847 return; 848 849 retry_bind: 850 task->tk_status = 0; 851 task->tk_action = call_bind; 852 return; 853 } 854 855 /* 856 * 4b. Connect to the RPC server 857 */ 858 static void 859 call_connect(struct rpc_task *task) 860 { 861 struct rpc_xprt *xprt = task->tk_xprt; 862 863 dprintk("RPC: %4d call_connect xprt %p %s connected\n", 864 task->tk_pid, xprt, 865 (xprt_connected(xprt) ? "is" : "is not")); 866 867 task->tk_action = call_transmit; 868 if (!xprt_connected(xprt)) { 869 task->tk_action = call_connect_status; 870 if (task->tk_status < 0) 871 return; 872 xprt_connect(task); 873 } 874 } 875 876 /* 877 * 4c. Sort out connect result 878 */ 879 static void 880 call_connect_status(struct rpc_task *task) 881 { 882 struct rpc_clnt *clnt = task->tk_client; 883 int status = task->tk_status; 884 885 dprintk("RPC: %5u call_connect_status (status %d)\n", 886 task->tk_pid, task->tk_status); 887 888 task->tk_status = 0; 889 if (status >= 0) { 890 clnt->cl_stats->netreconn++; 891 task->tk_action = call_transmit; 892 return; 893 } 894 895 /* Something failed: remote service port may have changed */ 896 rpc_force_rebind(clnt); 897 898 switch (status) { 899 case -ENOTCONN: 900 case -ETIMEDOUT: 901 case -EAGAIN: 902 task->tk_action = call_bind; 903 break; 904 default: 905 rpc_exit(task, -EIO); 906 break; 907 } 908 } 909 910 /* 911 * 5. Transmit the RPC request, and wait for reply 912 */ 913 static void 914 call_transmit(struct rpc_task *task) 915 { 916 dprintk("RPC: %4d call_transmit (status %d)\n", 917 task->tk_pid, task->tk_status); 918 919 task->tk_action = call_status; 920 if (task->tk_status < 0) 921 return; 922 task->tk_status = xprt_prepare_transmit(task); 923 if (task->tk_status != 0) 924 return; 925 /* Encode here so that rpcsec_gss can use correct sequence number. */ 926 if (rpc_task_need_encode(task)) { 927 task->tk_rqstp->rq_bytes_sent = 0; 928 call_encode(task); 929 /* Did the encode result in an error condition? */ 930 if (task->tk_status != 0) 931 goto out_nosend; 932 } 933 task->tk_action = call_transmit_status; 934 xprt_transmit(task); 935 if (task->tk_status < 0) 936 return; 937 if (!task->tk_msg.rpc_proc->p_decode) { 938 task->tk_action = rpc_exit_task; 939 rpc_wake_up_task(task); 940 } 941 return; 942 out_nosend: 943 /* release socket write lock before attempting to handle error */ 944 xprt_abort_transmit(task); 945 rpc_task_force_reencode(task); 946 } 947 948 /* 949 * 6. Sort out the RPC call status 950 */ 951 static void 952 call_status(struct rpc_task *task) 953 { 954 struct rpc_clnt *clnt = task->tk_client; 955 struct rpc_rqst *req = task->tk_rqstp; 956 int status; 957 958 if (req->rq_received > 0 && !req->rq_bytes_sent) 959 task->tk_status = req->rq_received; 960 961 dprintk("RPC: %4d call_status (status %d)\n", 962 task->tk_pid, task->tk_status); 963 964 status = task->tk_status; 965 if (status >= 0) { 966 task->tk_action = call_decode; 967 return; 968 } 969 970 task->tk_status = 0; 971 switch(status) { 972 case -ETIMEDOUT: 973 task->tk_action = call_timeout; 974 break; 975 case -ECONNREFUSED: 976 case -ENOTCONN: 977 rpc_force_rebind(clnt); 978 task->tk_action = call_bind; 979 break; 980 case -EAGAIN: 981 task->tk_action = call_transmit; 982 break; 983 case -EIO: 984 /* shutdown or soft timeout */ 985 rpc_exit(task, status); 986 break; 987 default: 988 printk("%s: RPC call returned error %d\n", 989 clnt->cl_protname, -status); 990 rpc_exit(task, status); 991 break; 992 } 993 } 994 995 /* 996 * 6a. Handle transmission errors. 997 */ 998 static void 999 call_transmit_status(struct rpc_task *task) 1000 { 1001 if (task->tk_status != -EAGAIN) 1002 rpc_task_force_reencode(task); 1003 call_status(task); 1004 } 1005 1006 /* 1007 * 6b. Handle RPC timeout 1008 * We do not release the request slot, so we keep using the 1009 * same XID for all retransmits. 1010 */ 1011 static void 1012 call_timeout(struct rpc_task *task) 1013 { 1014 struct rpc_clnt *clnt = task->tk_client; 1015 1016 if (xprt_adjust_timeout(task->tk_rqstp) == 0) { 1017 dprintk("RPC: %4d call_timeout (minor)\n", task->tk_pid); 1018 goto retry; 1019 } 1020 1021 dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid); 1022 task->tk_timeouts++; 1023 1024 if (RPC_IS_SOFT(task)) { 1025 printk(KERN_NOTICE "%s: server %s not responding, timed out\n", 1026 clnt->cl_protname, clnt->cl_server); 1027 rpc_exit(task, -EIO); 1028 return; 1029 } 1030 1031 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { 1032 task->tk_flags |= RPC_CALL_MAJORSEEN; 1033 printk(KERN_NOTICE "%s: server %s not responding, still trying\n", 1034 clnt->cl_protname, clnt->cl_server); 1035 } 1036 rpc_force_rebind(clnt); 1037 1038 retry: 1039 clnt->cl_stats->rpcretrans++; 1040 task->tk_action = call_bind; 1041 task->tk_status = 0; 1042 } 1043 1044 /* 1045 * 7. Decode the RPC reply 1046 */ 1047 static void 1048 call_decode(struct rpc_task *task) 1049 { 1050 struct rpc_clnt *clnt = task->tk_client; 1051 struct rpc_rqst *req = task->tk_rqstp; 1052 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; 1053 u32 *p; 1054 1055 dprintk("RPC: %4d call_decode (status %d)\n", 1056 task->tk_pid, task->tk_status); 1057 1058 if (task->tk_flags & RPC_CALL_MAJORSEEN) { 1059 printk(KERN_NOTICE "%s: server %s OK\n", 1060 clnt->cl_protname, clnt->cl_server); 1061 task->tk_flags &= ~RPC_CALL_MAJORSEEN; 1062 } 1063 1064 if (task->tk_status < 12) { 1065 if (!RPC_IS_SOFT(task)) { 1066 task->tk_action = call_bind; 1067 clnt->cl_stats->rpcretrans++; 1068 goto out_retry; 1069 } 1070 printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n", 1071 clnt->cl_protname, task->tk_status); 1072 rpc_exit(task, -EIO); 1073 return; 1074 } 1075 1076 /* 1077 * Ensure that we see all writes made by xprt_complete_rqst() 1078 * before it changed req->rq_received. 1079 */ 1080 smp_rmb(); 1081 req->rq_rcv_buf.len = req->rq_private_buf.len; 1082 1083 /* Check that the softirq receive buffer is valid */ 1084 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, 1085 sizeof(req->rq_rcv_buf)) != 0); 1086 1087 /* Verify the RPC header */ 1088 p = call_verify(task); 1089 if (IS_ERR(p)) { 1090 if (p == ERR_PTR(-EAGAIN)) 1091 goto out_retry; 1092 return; 1093 } 1094 1095 task->tk_action = rpc_exit_task; 1096 1097 if (decode) 1098 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, 1099 task->tk_msg.rpc_resp); 1100 dprintk("RPC: %4d call_decode result %d\n", task->tk_pid, 1101 task->tk_status); 1102 return; 1103 out_retry: 1104 req->rq_received = req->rq_private_buf.len = 0; 1105 task->tk_status = 0; 1106 } 1107 1108 /* 1109 * 8. Refresh the credentials if rejected by the server 1110 */ 1111 static void 1112 call_refresh(struct rpc_task *task) 1113 { 1114 dprintk("RPC: %4d call_refresh\n", task->tk_pid); 1115 1116 xprt_release(task); /* Must do to obtain new XID */ 1117 task->tk_action = call_refreshresult; 1118 task->tk_status = 0; 1119 task->tk_client->cl_stats->rpcauthrefresh++; 1120 rpcauth_refreshcred(task); 1121 } 1122 1123 /* 1124 * 8a. Process the results of a credential refresh 1125 */ 1126 static void 1127 call_refreshresult(struct rpc_task *task) 1128 { 1129 int status = task->tk_status; 1130 dprintk("RPC: %4d call_refreshresult (status %d)\n", 1131 task->tk_pid, task->tk_status); 1132 1133 task->tk_status = 0; 1134 task->tk_action = call_reserve; 1135 if (status >= 0 && rpcauth_uptodatecred(task)) 1136 return; 1137 if (status == -EACCES) { 1138 rpc_exit(task, -EACCES); 1139 return; 1140 } 1141 task->tk_action = call_refresh; 1142 if (status != -ETIMEDOUT) 1143 rpc_delay(task, 3*HZ); 1144 return; 1145 } 1146 1147 /* 1148 * Call header serialization 1149 */ 1150 static u32 * 1151 call_header(struct rpc_task *task) 1152 { 1153 struct rpc_clnt *clnt = task->tk_client; 1154 struct rpc_rqst *req = task->tk_rqstp; 1155 u32 *p = req->rq_svec[0].iov_base; 1156 1157 /* FIXME: check buffer size? */ 1158 1159 p = xprt_skip_transport_header(task->tk_xprt, p); 1160 *p++ = req->rq_xid; /* XID */ 1161 *p++ = htonl(RPC_CALL); /* CALL */ 1162 *p++ = htonl(RPC_VERSION); /* RPC version */ 1163 *p++ = htonl(clnt->cl_prog); /* program number */ 1164 *p++ = htonl(clnt->cl_vers); /* program version */ 1165 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ 1166 p = rpcauth_marshcred(task, p); 1167 req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); 1168 return p; 1169 } 1170 1171 /* 1172 * Reply header verification 1173 */ 1174 static u32 * 1175 call_verify(struct rpc_task *task) 1176 { 1177 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; 1178 int len = task->tk_rqstp->rq_rcv_buf.len >> 2; 1179 u32 *p = iov->iov_base, n; 1180 int error = -EACCES; 1181 1182 if ((len -= 3) < 0) 1183 goto out_overflow; 1184 p += 1; /* skip XID */ 1185 1186 if ((n = ntohl(*p++)) != RPC_REPLY) { 1187 printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n); 1188 goto out_garbage; 1189 } 1190 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { 1191 if (--len < 0) 1192 goto out_overflow; 1193 switch ((n = ntohl(*p++))) { 1194 case RPC_AUTH_ERROR: 1195 break; 1196 case RPC_MISMATCH: 1197 dprintk("%s: RPC call version mismatch!\n", __FUNCTION__); 1198 error = -EPROTONOSUPPORT; 1199 goto out_err; 1200 default: 1201 dprintk("%s: RPC call rejected, unknown error: %x\n", __FUNCTION__, n); 1202 goto out_eio; 1203 } 1204 if (--len < 0) 1205 goto out_overflow; 1206 switch ((n = ntohl(*p++))) { 1207 case RPC_AUTH_REJECTEDCRED: 1208 case RPC_AUTH_REJECTEDVERF: 1209 case RPCSEC_GSS_CREDPROBLEM: 1210 case RPCSEC_GSS_CTXPROBLEM: 1211 if (!task->tk_cred_retry) 1212 break; 1213 task->tk_cred_retry--; 1214 dprintk("RPC: %4d call_verify: retry stale creds\n", 1215 task->tk_pid); 1216 rpcauth_invalcred(task); 1217 task->tk_action = call_refresh; 1218 goto out_retry; 1219 case RPC_AUTH_BADCRED: 1220 case RPC_AUTH_BADVERF: 1221 /* possibly garbled cred/verf? */ 1222 if (!task->tk_garb_retry) 1223 break; 1224 task->tk_garb_retry--; 1225 dprintk("RPC: %4d call_verify: retry garbled creds\n", 1226 task->tk_pid); 1227 task->tk_action = call_bind; 1228 goto out_retry; 1229 case RPC_AUTH_TOOWEAK: 1230 printk(KERN_NOTICE "call_verify: server %s requires stronger " 1231 "authentication.\n", task->tk_client->cl_server); 1232 break; 1233 default: 1234 printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n); 1235 error = -EIO; 1236 } 1237 dprintk("RPC: %4d call_verify: call rejected %d\n", 1238 task->tk_pid, n); 1239 goto out_err; 1240 } 1241 if (!(p = rpcauth_checkverf(task, p))) { 1242 printk(KERN_WARNING "call_verify: auth check failed\n"); 1243 goto out_garbage; /* bad verifier, retry */ 1244 } 1245 len = p - (u32 *)iov->iov_base - 1; 1246 if (len < 0) 1247 goto out_overflow; 1248 switch ((n = ntohl(*p++))) { 1249 case RPC_SUCCESS: 1250 return p; 1251 case RPC_PROG_UNAVAIL: 1252 dprintk("RPC: call_verify: program %u is unsupported by server %s\n", 1253 (unsigned int)task->tk_client->cl_prog, 1254 task->tk_client->cl_server); 1255 error = -EPFNOSUPPORT; 1256 goto out_err; 1257 case RPC_PROG_MISMATCH: 1258 dprintk("RPC: call_verify: program %u, version %u unsupported by server %s\n", 1259 (unsigned int)task->tk_client->cl_prog, 1260 (unsigned int)task->tk_client->cl_vers, 1261 task->tk_client->cl_server); 1262 error = -EPROTONOSUPPORT; 1263 goto out_err; 1264 case RPC_PROC_UNAVAIL: 1265 dprintk("RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n", 1266 task->tk_msg.rpc_proc, 1267 task->tk_client->cl_prog, 1268 task->tk_client->cl_vers, 1269 task->tk_client->cl_server); 1270 error = -EOPNOTSUPP; 1271 goto out_err; 1272 case RPC_GARBAGE_ARGS: 1273 dprintk("RPC: %4d %s: server saw garbage\n", task->tk_pid, __FUNCTION__); 1274 break; /* retry */ 1275 default: 1276 printk(KERN_WARNING "call_verify: server accept status: %x\n", n); 1277 /* Also retry */ 1278 } 1279 1280 out_garbage: 1281 task->tk_client->cl_stats->rpcgarbage++; 1282 if (task->tk_garb_retry) { 1283 task->tk_garb_retry--; 1284 dprintk("RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid); 1285 task->tk_action = call_bind; 1286 out_retry: 1287 return ERR_PTR(-EAGAIN); 1288 } 1289 printk(KERN_WARNING "RPC %s: retry failed, exit EIO\n", __FUNCTION__); 1290 out_eio: 1291 error = -EIO; 1292 out_err: 1293 rpc_exit(task, error); 1294 return ERR_PTR(error); 1295 out_overflow: 1296 printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__); 1297 goto out_garbage; 1298 } 1299 1300 static int rpcproc_encode_null(void *rqstp, u32 *data, void *obj) 1301 { 1302 return 0; 1303 } 1304 1305 static int rpcproc_decode_null(void *rqstp, u32 *data, void *obj) 1306 { 1307 return 0; 1308 } 1309 1310 static struct rpc_procinfo rpcproc_null = { 1311 .p_encode = rpcproc_encode_null, 1312 .p_decode = rpcproc_decode_null, 1313 }; 1314 1315 int rpc_ping(struct rpc_clnt *clnt, int flags) 1316 { 1317 struct rpc_message msg = { 1318 .rpc_proc = &rpcproc_null, 1319 }; 1320 int err; 1321 msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); 1322 err = rpc_call_sync(clnt, &msg, flags); 1323 put_rpccred(msg.rpc_cred); 1324 return err; 1325 } 1326