1 /* 2 * linux/net/sunrpc/clnt.c 3 * 4 * This file contains the high-level RPC interface. 5 * It is modeled as a finite state machine to support both synchronous 6 * and asynchronous requests. 7 * 8 * - RPC header generation and argument serialization. 9 * - Credential refresh. 10 * - TCP connect handling. 11 * - Retry of operation when it is suspected the operation failed because 12 * of uid squashing on the server, or when the credentials were stale 13 * and need to be refreshed, or when a packet was damaged in transit. 14 * This may be have to be moved to the VFS layer. 15 * 16 * NB: BSD uses a more intelligent approach to guessing when a request 17 * or reply has been lost by keeping the RTO estimate for each procedure. 18 * We currently make do with a constant timeout value. 19 * 20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> 21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> 22 */ 23 24 #include <asm/system.h> 25 26 #include <linux/module.h> 27 #include <linux/types.h> 28 #include <linux/mm.h> 29 #include <linux/slab.h> 30 #include <linux/utsname.h> 31 32 #include <linux/sunrpc/clnt.h> 33 #include <linux/workqueue.h> 34 #include <linux/sunrpc/rpc_pipe_fs.h> 35 36 #include <linux/nfs.h> 37 38 39 #define RPC_SLACK_SPACE (1024) /* total overkill */ 40 41 #ifdef RPC_DEBUG 42 # define RPCDBG_FACILITY RPCDBG_CALL 43 #endif 44 45 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); 46 47 48 static void call_start(struct rpc_task *task); 49 static void call_reserve(struct rpc_task *task); 50 static void call_reserveresult(struct rpc_task *task); 51 static void call_allocate(struct rpc_task *task); 52 static void call_encode(struct rpc_task *task); 53 static void call_decode(struct rpc_task *task); 54 static void call_bind(struct rpc_task *task); 55 static void call_bind_status(struct rpc_task *task); 56 static void call_transmit(struct rpc_task *task); 57 static void call_status(struct rpc_task *task); 58 static void call_transmit_status(struct rpc_task *task); 59 static void call_refresh(struct rpc_task *task); 60 static void call_refreshresult(struct rpc_task *task); 61 static void call_timeout(struct rpc_task *task); 62 static void call_connect(struct rpc_task *task); 63 static void call_connect_status(struct rpc_task *task); 64 static u32 * call_header(struct rpc_task *task); 65 static u32 * call_verify(struct rpc_task *task); 66 67 68 static int 69 rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) 70 { 71 static uint32_t clntid; 72 int error; 73 74 if (dir_name == NULL) 75 return 0; 76 for (;;) { 77 snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname), 78 "%s/clnt%x", dir_name, 79 (unsigned int)clntid++); 80 clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0'; 81 clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt); 82 if (!IS_ERR(clnt->cl_dentry)) 83 return 0; 84 error = PTR_ERR(clnt->cl_dentry); 85 if (error != -EEXIST) { 86 printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n", 87 clnt->cl_pathname, error); 88 return error; 89 } 90 } 91 } 92 93 /* 94 * Create an RPC client 95 * FIXME: This should also take a flags argument (as in task->tk_flags). 96 * It's called (among others) from pmap_create_client, which may in 97 * turn be called by an async task. In this case, rpciod should not be 98 * made to sleep too long. 99 */ 100 struct rpc_clnt * 101 rpc_new_client(struct rpc_xprt *xprt, char *servname, 102 struct rpc_program *program, u32 vers, 103 rpc_authflavor_t flavor) 104 { 105 struct rpc_version *version; 106 struct rpc_clnt *clnt = NULL; 107 struct rpc_auth *auth; 108 int err; 109 int len; 110 111 dprintk("RPC: creating %s client for %s (xprt %p)\n", 112 program->name, servname, xprt); 113 114 err = -EINVAL; 115 if (!xprt) 116 goto out_no_xprt; 117 if (vers >= program->nrvers || !(version = program->version[vers])) 118 goto out_err; 119 120 err = -ENOMEM; 121 clnt = kmalloc(sizeof(*clnt), GFP_KERNEL); 122 if (!clnt) 123 goto out_err; 124 memset(clnt, 0, sizeof(*clnt)); 125 atomic_set(&clnt->cl_users, 0); 126 atomic_set(&clnt->cl_count, 1); 127 clnt->cl_parent = clnt; 128 129 clnt->cl_server = clnt->cl_inline_name; 130 len = strlen(servname) + 1; 131 if (len > sizeof(clnt->cl_inline_name)) { 132 char *buf = kmalloc(len, GFP_KERNEL); 133 if (buf != 0) 134 clnt->cl_server = buf; 135 else 136 len = sizeof(clnt->cl_inline_name); 137 } 138 strlcpy(clnt->cl_server, servname, len); 139 140 clnt->cl_xprt = xprt; 141 clnt->cl_procinfo = version->procs; 142 clnt->cl_maxproc = version->nrprocs; 143 clnt->cl_protname = program->name; 144 clnt->cl_pmap = &clnt->cl_pmap_default; 145 clnt->cl_port = xprt->addr.sin_port; 146 clnt->cl_prog = program->number; 147 clnt->cl_vers = version->number; 148 clnt->cl_prot = xprt->prot; 149 clnt->cl_stats = program->stats; 150 rpc_init_wait_queue(&clnt->cl_pmap_default.pm_bindwait, "bindwait"); 151 152 if (!clnt->cl_port) 153 clnt->cl_autobind = 1; 154 155 clnt->cl_rtt = &clnt->cl_rtt_default; 156 rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval); 157 158 err = rpc_setup_pipedir(clnt, program->pipe_dir_name); 159 if (err < 0) 160 goto out_no_path; 161 162 auth = rpcauth_create(flavor, clnt); 163 if (IS_ERR(auth)) { 164 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n", 165 flavor); 166 err = PTR_ERR(auth); 167 goto out_no_auth; 168 } 169 170 /* save the nodename */ 171 clnt->cl_nodelen = strlen(system_utsname.nodename); 172 if (clnt->cl_nodelen > UNX_MAXNODENAME) 173 clnt->cl_nodelen = UNX_MAXNODENAME; 174 memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen); 175 return clnt; 176 177 out_no_auth: 178 rpc_rmdir(clnt->cl_pathname); 179 out_no_path: 180 if (clnt->cl_server != clnt->cl_inline_name) 181 kfree(clnt->cl_server); 182 kfree(clnt); 183 out_err: 184 xprt_destroy(xprt); 185 out_no_xprt: 186 return ERR_PTR(err); 187 } 188 189 /** 190 * Create an RPC client 191 * @xprt - pointer to xprt struct 192 * @servname - name of server 193 * @info - rpc_program 194 * @version - rpc_program version 195 * @authflavor - rpc_auth flavour to use 196 * 197 * Creates an RPC client structure, then pings the server in order to 198 * determine if it is up, and if it supports this program and version. 199 * 200 * This function should never be called by asynchronous tasks such as 201 * the portmapper. 202 */ 203 struct rpc_clnt *rpc_create_client(struct rpc_xprt *xprt, char *servname, 204 struct rpc_program *info, u32 version, rpc_authflavor_t authflavor) 205 { 206 struct rpc_clnt *clnt; 207 int err; 208 209 clnt = rpc_new_client(xprt, servname, info, version, authflavor); 210 if (IS_ERR(clnt)) 211 return clnt; 212 err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 213 if (err == 0) 214 return clnt; 215 rpc_shutdown_client(clnt); 216 return ERR_PTR(err); 217 } 218 219 /* 220 * This function clones the RPC client structure. It allows us to share the 221 * same transport while varying parameters such as the authentication 222 * flavour. 223 */ 224 struct rpc_clnt * 225 rpc_clone_client(struct rpc_clnt *clnt) 226 { 227 struct rpc_clnt *new; 228 229 new = kmalloc(sizeof(*new), GFP_KERNEL); 230 if (!new) 231 goto out_no_clnt; 232 memcpy(new, clnt, sizeof(*new)); 233 atomic_set(&new->cl_count, 1); 234 atomic_set(&new->cl_users, 0); 235 new->cl_parent = clnt; 236 atomic_inc(&clnt->cl_count); 237 /* Duplicate portmapper */ 238 rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait"); 239 /* Turn off autobind on clones */ 240 new->cl_autobind = 0; 241 new->cl_oneshot = 0; 242 new->cl_dead = 0; 243 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); 244 if (new->cl_auth) 245 atomic_inc(&new->cl_auth->au_count); 246 new->cl_pmap = &new->cl_pmap_default; 247 rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait"); 248 return new; 249 out_no_clnt: 250 printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__); 251 return ERR_PTR(-ENOMEM); 252 } 253 254 /* 255 * Properly shut down an RPC client, terminating all outstanding 256 * requests. Note that we must be certain that cl_oneshot and 257 * cl_dead are cleared, or else the client would be destroyed 258 * when the last task releases it. 259 */ 260 int 261 rpc_shutdown_client(struct rpc_clnt *clnt) 262 { 263 dprintk("RPC: shutting down %s client for %s, tasks=%d\n", 264 clnt->cl_protname, clnt->cl_server, 265 atomic_read(&clnt->cl_users)); 266 267 while (atomic_read(&clnt->cl_users) > 0) { 268 /* Don't let rpc_release_client destroy us */ 269 clnt->cl_oneshot = 0; 270 clnt->cl_dead = 0; 271 rpc_killall_tasks(clnt); 272 wait_event_timeout(destroy_wait, 273 !atomic_read(&clnt->cl_users), 1*HZ); 274 } 275 276 if (atomic_read(&clnt->cl_users) < 0) { 277 printk(KERN_ERR "RPC: rpc_shutdown_client clnt %p tasks=%d\n", 278 clnt, atomic_read(&clnt->cl_users)); 279 #ifdef RPC_DEBUG 280 rpc_show_tasks(); 281 #endif 282 BUG(); 283 } 284 285 return rpc_destroy_client(clnt); 286 } 287 288 /* 289 * Delete an RPC client 290 */ 291 int 292 rpc_destroy_client(struct rpc_clnt *clnt) 293 { 294 if (!atomic_dec_and_test(&clnt->cl_count)) 295 return 1; 296 BUG_ON(atomic_read(&clnt->cl_users) != 0); 297 298 dprintk("RPC: destroying %s client for %s\n", 299 clnt->cl_protname, clnt->cl_server); 300 if (clnt->cl_auth) { 301 rpcauth_destroy(clnt->cl_auth); 302 clnt->cl_auth = NULL; 303 } 304 if (clnt->cl_parent != clnt) { 305 rpc_destroy_client(clnt->cl_parent); 306 goto out_free; 307 } 308 if (clnt->cl_pathname[0]) 309 rpc_rmdir(clnt->cl_pathname); 310 if (clnt->cl_xprt) { 311 xprt_destroy(clnt->cl_xprt); 312 clnt->cl_xprt = NULL; 313 } 314 if (clnt->cl_server != clnt->cl_inline_name) 315 kfree(clnt->cl_server); 316 out_free: 317 kfree(clnt); 318 return 0; 319 } 320 321 /* 322 * Release an RPC client 323 */ 324 void 325 rpc_release_client(struct rpc_clnt *clnt) 326 { 327 dprintk("RPC: rpc_release_client(%p, %d)\n", 328 clnt, atomic_read(&clnt->cl_users)); 329 330 if (!atomic_dec_and_test(&clnt->cl_users)) 331 return; 332 wake_up(&destroy_wait); 333 if (clnt->cl_oneshot || clnt->cl_dead) 334 rpc_destroy_client(clnt); 335 } 336 337 /** 338 * rpc_bind_new_program - bind a new RPC program to an existing client 339 * @old - old rpc_client 340 * @program - rpc program to set 341 * @vers - rpc program version 342 * 343 * Clones the rpc client and sets up a new RPC program. This is mainly 344 * of use for enabling different RPC programs to share the same transport. 345 * The Sun NFSv2/v3 ACL protocol can do this. 346 */ 347 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, 348 struct rpc_program *program, 349 int vers) 350 { 351 struct rpc_clnt *clnt; 352 struct rpc_version *version; 353 int err; 354 355 BUG_ON(vers >= program->nrvers || !program->version[vers]); 356 version = program->version[vers]; 357 clnt = rpc_clone_client(old); 358 if (IS_ERR(clnt)) 359 goto out; 360 clnt->cl_procinfo = version->procs; 361 clnt->cl_maxproc = version->nrprocs; 362 clnt->cl_protname = program->name; 363 clnt->cl_prog = program->number; 364 clnt->cl_vers = version->number; 365 clnt->cl_stats = program->stats; 366 err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 367 if (err != 0) { 368 rpc_shutdown_client(clnt); 369 clnt = ERR_PTR(err); 370 } 371 out: 372 return clnt; 373 } 374 375 /* 376 * Default callback for async RPC calls 377 */ 378 static void 379 rpc_default_callback(struct rpc_task *task, void *data) 380 { 381 } 382 383 static const struct rpc_call_ops rpc_default_ops = { 384 .rpc_call_done = rpc_default_callback, 385 }; 386 387 /* 388 * Export the signal mask handling for synchronous code that 389 * sleeps on RPC calls 390 */ 391 #define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM)) 392 393 static void rpc_save_sigmask(sigset_t *oldset, int intr) 394 { 395 unsigned long sigallow = sigmask(SIGKILL); 396 sigset_t sigmask; 397 398 /* Block all signals except those listed in sigallow */ 399 if (intr) 400 sigallow |= RPC_INTR_SIGNALS; 401 siginitsetinv(&sigmask, sigallow); 402 sigprocmask(SIG_BLOCK, &sigmask, oldset); 403 } 404 405 static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset) 406 { 407 rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task)); 408 } 409 410 static inline void rpc_restore_sigmask(sigset_t *oldset) 411 { 412 sigprocmask(SIG_SETMASK, oldset, NULL); 413 } 414 415 void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset) 416 { 417 rpc_save_sigmask(oldset, clnt->cl_intr); 418 } 419 420 void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset) 421 { 422 rpc_restore_sigmask(oldset); 423 } 424 425 /* 426 * New rpc_call implementation 427 */ 428 int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) 429 { 430 struct rpc_task *task; 431 sigset_t oldset; 432 int status; 433 434 /* If this client is slain all further I/O fails */ 435 if (clnt->cl_dead) 436 return -EIO; 437 438 BUG_ON(flags & RPC_TASK_ASYNC); 439 440 status = -ENOMEM; 441 task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL); 442 if (task == NULL) 443 goto out; 444 445 /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */ 446 rpc_task_sigmask(task, &oldset); 447 448 rpc_call_setup(task, msg, 0); 449 450 /* Set up the call info struct and execute the task */ 451 status = task->tk_status; 452 if (status == 0) { 453 atomic_inc(&task->tk_count); 454 status = rpc_execute(task); 455 if (status == 0) 456 status = task->tk_status; 457 } 458 rpc_restore_sigmask(&oldset); 459 rpc_release_task(task); 460 out: 461 return status; 462 } 463 464 /* 465 * New rpc_call implementation 466 */ 467 int 468 rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, 469 const struct rpc_call_ops *tk_ops, void *data) 470 { 471 struct rpc_task *task; 472 sigset_t oldset; 473 int status; 474 475 /* If this client is slain all further I/O fails */ 476 if (clnt->cl_dead) 477 return -EIO; 478 479 flags |= RPC_TASK_ASYNC; 480 481 /* Create/initialize a new RPC task */ 482 status = -ENOMEM; 483 if (!(task = rpc_new_task(clnt, flags, tk_ops, data))) 484 goto out; 485 486 /* Mask signals on GSS_AUTH upcalls */ 487 rpc_task_sigmask(task, &oldset); 488 489 rpc_call_setup(task, msg, 0); 490 491 /* Set up the call info struct and execute the task */ 492 status = task->tk_status; 493 if (status == 0) 494 rpc_execute(task); 495 else 496 rpc_release_task(task); 497 498 rpc_restore_sigmask(&oldset); 499 out: 500 return status; 501 } 502 503 504 void 505 rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags) 506 { 507 task->tk_msg = *msg; 508 task->tk_flags |= flags; 509 /* Bind the user cred */ 510 if (task->tk_msg.rpc_cred != NULL) 511 rpcauth_holdcred(task); 512 else 513 rpcauth_bindcred(task); 514 515 if (task->tk_status == 0) 516 task->tk_action = call_start; 517 else 518 task->tk_action = rpc_exit_task; 519 } 520 521 void 522 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) 523 { 524 struct rpc_xprt *xprt = clnt->cl_xprt; 525 if (xprt->ops->set_buffer_size) 526 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); 527 } 528 529 /* 530 * Return size of largest payload RPC client can support, in bytes 531 * 532 * For stream transports, this is one RPC record fragment (see RFC 533 * 1831), as we don't support multi-record requests yet. For datagram 534 * transports, this is the size of an IP packet minus the IP, UDP, and 535 * RPC header sizes. 536 */ 537 size_t rpc_max_payload(struct rpc_clnt *clnt) 538 { 539 return clnt->cl_xprt->max_payload; 540 } 541 EXPORT_SYMBOL(rpc_max_payload); 542 543 /** 544 * rpc_force_rebind - force transport to check that remote port is unchanged 545 * @clnt: client to rebind 546 * 547 */ 548 void rpc_force_rebind(struct rpc_clnt *clnt) 549 { 550 if (clnt->cl_autobind) 551 clnt->cl_port = 0; 552 } 553 EXPORT_SYMBOL(rpc_force_rebind); 554 555 /* 556 * Restart an (async) RPC call. Usually called from within the 557 * exit handler. 558 */ 559 void 560 rpc_restart_call(struct rpc_task *task) 561 { 562 if (RPC_ASSASSINATED(task)) 563 return; 564 565 task->tk_action = call_start; 566 } 567 568 /* 569 * 0. Initial state 570 * 571 * Other FSM states can be visited zero or more times, but 572 * this state is visited exactly once for each RPC. 573 */ 574 static void 575 call_start(struct rpc_task *task) 576 { 577 struct rpc_clnt *clnt = task->tk_client; 578 579 dprintk("RPC: %4d call_start %s%d proc %d (%s)\n", task->tk_pid, 580 clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc->p_proc, 581 (RPC_IS_ASYNC(task) ? "async" : "sync")); 582 583 /* Increment call count */ 584 task->tk_msg.rpc_proc->p_count++; 585 clnt->cl_stats->rpccnt++; 586 task->tk_action = call_reserve; 587 } 588 589 /* 590 * 1. Reserve an RPC call slot 591 */ 592 static void 593 call_reserve(struct rpc_task *task) 594 { 595 dprintk("RPC: %4d call_reserve\n", task->tk_pid); 596 597 if (!rpcauth_uptodatecred(task)) { 598 task->tk_action = call_refresh; 599 return; 600 } 601 602 task->tk_status = 0; 603 task->tk_action = call_reserveresult; 604 xprt_reserve(task); 605 } 606 607 /* 608 * 1b. Grok the result of xprt_reserve() 609 */ 610 static void 611 call_reserveresult(struct rpc_task *task) 612 { 613 int status = task->tk_status; 614 615 dprintk("RPC: %4d call_reserveresult (status %d)\n", 616 task->tk_pid, task->tk_status); 617 618 /* 619 * After a call to xprt_reserve(), we must have either 620 * a request slot or else an error status. 621 */ 622 task->tk_status = 0; 623 if (status >= 0) { 624 if (task->tk_rqstp) { 625 task->tk_action = call_allocate; 626 return; 627 } 628 629 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n", 630 __FUNCTION__, status); 631 rpc_exit(task, -EIO); 632 return; 633 } 634 635 /* 636 * Even though there was an error, we may have acquired 637 * a request slot somehow. Make sure not to leak it. 638 */ 639 if (task->tk_rqstp) { 640 printk(KERN_ERR "%s: status=%d, request allocated anyway\n", 641 __FUNCTION__, status); 642 xprt_release(task); 643 } 644 645 switch (status) { 646 case -EAGAIN: /* woken up; retry */ 647 task->tk_action = call_reserve; 648 return; 649 case -EIO: /* probably a shutdown */ 650 break; 651 default: 652 printk(KERN_ERR "%s: unrecognized error %d, exiting\n", 653 __FUNCTION__, status); 654 break; 655 } 656 rpc_exit(task, status); 657 } 658 659 /* 660 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. 661 * (Note: buffer memory is freed in xprt_release). 662 */ 663 static void 664 call_allocate(struct rpc_task *task) 665 { 666 struct rpc_rqst *req = task->tk_rqstp; 667 struct rpc_xprt *xprt = task->tk_xprt; 668 unsigned int bufsiz; 669 670 dprintk("RPC: %4d call_allocate (status %d)\n", 671 task->tk_pid, task->tk_status); 672 task->tk_action = call_bind; 673 if (req->rq_buffer) 674 return; 675 676 /* FIXME: compute buffer requirements more exactly using 677 * auth->au_wslack */ 678 bufsiz = task->tk_msg.rpc_proc->p_bufsiz + RPC_SLACK_SPACE; 679 680 if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL) 681 return; 682 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); 683 684 if (RPC_IS_ASYNC(task) || !signalled()) { 685 xprt_release(task); 686 task->tk_action = call_reserve; 687 rpc_delay(task, HZ>>4); 688 return; 689 } 690 691 rpc_exit(task, -ERESTARTSYS); 692 } 693 694 static inline int 695 rpc_task_need_encode(struct rpc_task *task) 696 { 697 return task->tk_rqstp->rq_snd_buf.len == 0; 698 } 699 700 static inline void 701 rpc_task_force_reencode(struct rpc_task *task) 702 { 703 task->tk_rqstp->rq_snd_buf.len = 0; 704 } 705 706 /* 707 * 3. Encode arguments of an RPC call 708 */ 709 static void 710 call_encode(struct rpc_task *task) 711 { 712 struct rpc_rqst *req = task->tk_rqstp; 713 struct xdr_buf *sndbuf = &req->rq_snd_buf; 714 struct xdr_buf *rcvbuf = &req->rq_rcv_buf; 715 unsigned int bufsiz; 716 kxdrproc_t encode; 717 u32 *p; 718 719 dprintk("RPC: %4d call_encode (status %d)\n", 720 task->tk_pid, task->tk_status); 721 722 /* Default buffer setup */ 723 bufsiz = req->rq_bufsize >> 1; 724 sndbuf->head[0].iov_base = (void *)req->rq_buffer; 725 sndbuf->head[0].iov_len = bufsiz; 726 sndbuf->tail[0].iov_len = 0; 727 sndbuf->page_len = 0; 728 sndbuf->len = 0; 729 sndbuf->buflen = bufsiz; 730 rcvbuf->head[0].iov_base = (void *)((char *)req->rq_buffer + bufsiz); 731 rcvbuf->head[0].iov_len = bufsiz; 732 rcvbuf->tail[0].iov_len = 0; 733 rcvbuf->page_len = 0; 734 rcvbuf->len = 0; 735 rcvbuf->buflen = bufsiz; 736 737 /* Encode header and provided arguments */ 738 encode = task->tk_msg.rpc_proc->p_encode; 739 if (!(p = call_header(task))) { 740 printk(KERN_INFO "RPC: call_header failed, exit EIO\n"); 741 rpc_exit(task, -EIO); 742 return; 743 } 744 if (encode == NULL) 745 return; 746 747 task->tk_status = rpcauth_wrap_req(task, encode, req, p, 748 task->tk_msg.rpc_argp); 749 if (task->tk_status == -ENOMEM) { 750 /* XXX: Is this sane? */ 751 rpc_delay(task, 3*HZ); 752 task->tk_status = -EAGAIN; 753 } 754 } 755 756 /* 757 * 4. Get the server port number if not yet set 758 */ 759 static void 760 call_bind(struct rpc_task *task) 761 { 762 struct rpc_clnt *clnt = task->tk_client; 763 764 dprintk("RPC: %4d call_bind (status %d)\n", 765 task->tk_pid, task->tk_status); 766 767 task->tk_action = call_connect; 768 if (!clnt->cl_port) { 769 task->tk_action = call_bind_status; 770 task->tk_timeout = task->tk_xprt->bind_timeout; 771 rpc_getport(task, clnt); 772 } 773 } 774 775 /* 776 * 4a. Sort out bind result 777 */ 778 static void 779 call_bind_status(struct rpc_task *task) 780 { 781 int status = -EACCES; 782 783 if (task->tk_status >= 0) { 784 dprintk("RPC: %4d call_bind_status (status %d)\n", 785 task->tk_pid, task->tk_status); 786 task->tk_status = 0; 787 task->tk_action = call_connect; 788 return; 789 } 790 791 switch (task->tk_status) { 792 case -EACCES: 793 dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n", 794 task->tk_pid); 795 rpc_delay(task, 3*HZ); 796 goto retry_bind; 797 case -ETIMEDOUT: 798 dprintk("RPC: %4d rpcbind request timed out\n", 799 task->tk_pid); 800 if (RPC_IS_SOFT(task)) { 801 status = -EIO; 802 break; 803 } 804 goto retry_bind; 805 case -EPFNOSUPPORT: 806 dprintk("RPC: %4d remote rpcbind service unavailable\n", 807 task->tk_pid); 808 break; 809 case -EPROTONOSUPPORT: 810 dprintk("RPC: %4d remote rpcbind version 2 unavailable\n", 811 task->tk_pid); 812 break; 813 default: 814 dprintk("RPC: %4d unrecognized rpcbind error (%d)\n", 815 task->tk_pid, -task->tk_status); 816 status = -EIO; 817 break; 818 } 819 820 rpc_exit(task, status); 821 return; 822 823 retry_bind: 824 task->tk_status = 0; 825 task->tk_action = call_bind; 826 return; 827 } 828 829 /* 830 * 4b. Connect to the RPC server 831 */ 832 static void 833 call_connect(struct rpc_task *task) 834 { 835 struct rpc_xprt *xprt = task->tk_xprt; 836 837 dprintk("RPC: %4d call_connect xprt %p %s connected\n", 838 task->tk_pid, xprt, 839 (xprt_connected(xprt) ? "is" : "is not")); 840 841 task->tk_action = call_transmit; 842 if (!xprt_connected(xprt)) { 843 task->tk_action = call_connect_status; 844 if (task->tk_status < 0) 845 return; 846 xprt_connect(task); 847 } 848 } 849 850 /* 851 * 4c. Sort out connect result 852 */ 853 static void 854 call_connect_status(struct rpc_task *task) 855 { 856 struct rpc_clnt *clnt = task->tk_client; 857 int status = task->tk_status; 858 859 dprintk("RPC: %5u call_connect_status (status %d)\n", 860 task->tk_pid, task->tk_status); 861 862 task->tk_status = 0; 863 if (status >= 0) { 864 clnt->cl_stats->netreconn++; 865 task->tk_action = call_transmit; 866 return; 867 } 868 869 /* Something failed: remote service port may have changed */ 870 rpc_force_rebind(clnt); 871 872 switch (status) { 873 case -ENOTCONN: 874 case -ETIMEDOUT: 875 case -EAGAIN: 876 task->tk_action = call_bind; 877 break; 878 default: 879 rpc_exit(task, -EIO); 880 break; 881 } 882 } 883 884 /* 885 * 5. Transmit the RPC request, and wait for reply 886 */ 887 static void 888 call_transmit(struct rpc_task *task) 889 { 890 dprintk("RPC: %4d call_transmit (status %d)\n", 891 task->tk_pid, task->tk_status); 892 893 task->tk_action = call_status; 894 if (task->tk_status < 0) 895 return; 896 task->tk_status = xprt_prepare_transmit(task); 897 if (task->tk_status != 0) 898 return; 899 /* Encode here so that rpcsec_gss can use correct sequence number. */ 900 if (rpc_task_need_encode(task)) { 901 task->tk_rqstp->rq_bytes_sent = 0; 902 call_encode(task); 903 /* Did the encode result in an error condition? */ 904 if (task->tk_status != 0) 905 goto out_nosend; 906 } 907 task->tk_action = call_transmit_status; 908 xprt_transmit(task); 909 if (task->tk_status < 0) 910 return; 911 if (!task->tk_msg.rpc_proc->p_decode) { 912 task->tk_action = rpc_exit_task; 913 rpc_wake_up_task(task); 914 } 915 return; 916 out_nosend: 917 /* release socket write lock before attempting to handle error */ 918 xprt_abort_transmit(task); 919 rpc_task_force_reencode(task); 920 } 921 922 /* 923 * 6. Sort out the RPC call status 924 */ 925 static void 926 call_status(struct rpc_task *task) 927 { 928 struct rpc_clnt *clnt = task->tk_client; 929 struct rpc_rqst *req = task->tk_rqstp; 930 int status; 931 932 if (req->rq_received > 0 && !req->rq_bytes_sent) 933 task->tk_status = req->rq_received; 934 935 dprintk("RPC: %4d call_status (status %d)\n", 936 task->tk_pid, task->tk_status); 937 938 status = task->tk_status; 939 if (status >= 0) { 940 task->tk_action = call_decode; 941 return; 942 } 943 944 task->tk_status = 0; 945 switch(status) { 946 case -ETIMEDOUT: 947 task->tk_action = call_timeout; 948 break; 949 case -ECONNREFUSED: 950 case -ENOTCONN: 951 rpc_force_rebind(clnt); 952 task->tk_action = call_bind; 953 break; 954 case -EAGAIN: 955 task->tk_action = call_transmit; 956 break; 957 case -EIO: 958 /* shutdown or soft timeout */ 959 rpc_exit(task, status); 960 break; 961 default: 962 printk("%s: RPC call returned error %d\n", 963 clnt->cl_protname, -status); 964 rpc_exit(task, status); 965 break; 966 } 967 } 968 969 /* 970 * 6a. Handle transmission errors. 971 */ 972 static void 973 call_transmit_status(struct rpc_task *task) 974 { 975 if (task->tk_status != -EAGAIN) 976 rpc_task_force_reencode(task); 977 call_status(task); 978 } 979 980 /* 981 * 6b. Handle RPC timeout 982 * We do not release the request slot, so we keep using the 983 * same XID for all retransmits. 984 */ 985 static void 986 call_timeout(struct rpc_task *task) 987 { 988 struct rpc_clnt *clnt = task->tk_client; 989 990 if (xprt_adjust_timeout(task->tk_rqstp) == 0) { 991 dprintk("RPC: %4d call_timeout (minor)\n", task->tk_pid); 992 goto retry; 993 } 994 995 dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid); 996 if (RPC_IS_SOFT(task)) { 997 printk(KERN_NOTICE "%s: server %s not responding, timed out\n", 998 clnt->cl_protname, clnt->cl_server); 999 rpc_exit(task, -EIO); 1000 return; 1001 } 1002 1003 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { 1004 task->tk_flags |= RPC_CALL_MAJORSEEN; 1005 printk(KERN_NOTICE "%s: server %s not responding, still trying\n", 1006 clnt->cl_protname, clnt->cl_server); 1007 } 1008 rpc_force_rebind(clnt); 1009 1010 retry: 1011 clnt->cl_stats->rpcretrans++; 1012 task->tk_action = call_bind; 1013 task->tk_status = 0; 1014 } 1015 1016 /* 1017 * 7. Decode the RPC reply 1018 */ 1019 static void 1020 call_decode(struct rpc_task *task) 1021 { 1022 struct rpc_clnt *clnt = task->tk_client; 1023 struct rpc_rqst *req = task->tk_rqstp; 1024 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; 1025 u32 *p; 1026 1027 dprintk("RPC: %4d call_decode (status %d)\n", 1028 task->tk_pid, task->tk_status); 1029 1030 if (task->tk_flags & RPC_CALL_MAJORSEEN) { 1031 printk(KERN_NOTICE "%s: server %s OK\n", 1032 clnt->cl_protname, clnt->cl_server); 1033 task->tk_flags &= ~RPC_CALL_MAJORSEEN; 1034 } 1035 1036 if (task->tk_status < 12) { 1037 if (!RPC_IS_SOFT(task)) { 1038 task->tk_action = call_bind; 1039 clnt->cl_stats->rpcretrans++; 1040 goto out_retry; 1041 } 1042 printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n", 1043 clnt->cl_protname, task->tk_status); 1044 rpc_exit(task, -EIO); 1045 return; 1046 } 1047 1048 req->rq_rcv_buf.len = req->rq_private_buf.len; 1049 1050 /* Check that the softirq receive buffer is valid */ 1051 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, 1052 sizeof(req->rq_rcv_buf)) != 0); 1053 1054 /* Verify the RPC header */ 1055 p = call_verify(task); 1056 if (IS_ERR(p)) { 1057 if (p == ERR_PTR(-EAGAIN)) 1058 goto out_retry; 1059 return; 1060 } 1061 1062 task->tk_action = rpc_exit_task; 1063 1064 if (decode) 1065 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, 1066 task->tk_msg.rpc_resp); 1067 dprintk("RPC: %4d call_decode result %d\n", task->tk_pid, 1068 task->tk_status); 1069 return; 1070 out_retry: 1071 req->rq_received = req->rq_private_buf.len = 0; 1072 task->tk_status = 0; 1073 } 1074 1075 /* 1076 * 8. Refresh the credentials if rejected by the server 1077 */ 1078 static void 1079 call_refresh(struct rpc_task *task) 1080 { 1081 dprintk("RPC: %4d call_refresh\n", task->tk_pid); 1082 1083 xprt_release(task); /* Must do to obtain new XID */ 1084 task->tk_action = call_refreshresult; 1085 task->tk_status = 0; 1086 task->tk_client->cl_stats->rpcauthrefresh++; 1087 rpcauth_refreshcred(task); 1088 } 1089 1090 /* 1091 * 8a. Process the results of a credential refresh 1092 */ 1093 static void 1094 call_refreshresult(struct rpc_task *task) 1095 { 1096 int status = task->tk_status; 1097 dprintk("RPC: %4d call_refreshresult (status %d)\n", 1098 task->tk_pid, task->tk_status); 1099 1100 task->tk_status = 0; 1101 task->tk_action = call_reserve; 1102 if (status >= 0 && rpcauth_uptodatecred(task)) 1103 return; 1104 if (status == -EACCES) { 1105 rpc_exit(task, -EACCES); 1106 return; 1107 } 1108 task->tk_action = call_refresh; 1109 if (status != -ETIMEDOUT) 1110 rpc_delay(task, 3*HZ); 1111 return; 1112 } 1113 1114 /* 1115 * Call header serialization 1116 */ 1117 static u32 * 1118 call_header(struct rpc_task *task) 1119 { 1120 struct rpc_clnt *clnt = task->tk_client; 1121 struct rpc_rqst *req = task->tk_rqstp; 1122 u32 *p = req->rq_svec[0].iov_base; 1123 1124 /* FIXME: check buffer size? */ 1125 1126 p = xprt_skip_transport_header(task->tk_xprt, p); 1127 *p++ = req->rq_xid; /* XID */ 1128 *p++ = htonl(RPC_CALL); /* CALL */ 1129 *p++ = htonl(RPC_VERSION); /* RPC version */ 1130 *p++ = htonl(clnt->cl_prog); /* program number */ 1131 *p++ = htonl(clnt->cl_vers); /* program version */ 1132 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ 1133 p = rpcauth_marshcred(task, p); 1134 req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); 1135 return p; 1136 } 1137 1138 /* 1139 * Reply header verification 1140 */ 1141 static u32 * 1142 call_verify(struct rpc_task *task) 1143 { 1144 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; 1145 int len = task->tk_rqstp->rq_rcv_buf.len >> 2; 1146 u32 *p = iov->iov_base, n; 1147 int error = -EACCES; 1148 1149 if ((len -= 3) < 0) 1150 goto out_overflow; 1151 p += 1; /* skip XID */ 1152 1153 if ((n = ntohl(*p++)) != RPC_REPLY) { 1154 printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n); 1155 goto out_garbage; 1156 } 1157 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { 1158 if (--len < 0) 1159 goto out_overflow; 1160 switch ((n = ntohl(*p++))) { 1161 case RPC_AUTH_ERROR: 1162 break; 1163 case RPC_MISMATCH: 1164 dprintk("%s: RPC call version mismatch!\n", __FUNCTION__); 1165 error = -EPROTONOSUPPORT; 1166 goto out_err; 1167 default: 1168 dprintk("%s: RPC call rejected, unknown error: %x\n", __FUNCTION__, n); 1169 goto out_eio; 1170 } 1171 if (--len < 0) 1172 goto out_overflow; 1173 switch ((n = ntohl(*p++))) { 1174 case RPC_AUTH_REJECTEDCRED: 1175 case RPC_AUTH_REJECTEDVERF: 1176 case RPCSEC_GSS_CREDPROBLEM: 1177 case RPCSEC_GSS_CTXPROBLEM: 1178 if (!task->tk_cred_retry) 1179 break; 1180 task->tk_cred_retry--; 1181 dprintk("RPC: %4d call_verify: retry stale creds\n", 1182 task->tk_pid); 1183 rpcauth_invalcred(task); 1184 task->tk_action = call_refresh; 1185 goto out_retry; 1186 case RPC_AUTH_BADCRED: 1187 case RPC_AUTH_BADVERF: 1188 /* possibly garbled cred/verf? */ 1189 if (!task->tk_garb_retry) 1190 break; 1191 task->tk_garb_retry--; 1192 dprintk("RPC: %4d call_verify: retry garbled creds\n", 1193 task->tk_pid); 1194 task->tk_action = call_bind; 1195 goto out_retry; 1196 case RPC_AUTH_TOOWEAK: 1197 printk(KERN_NOTICE "call_verify: server requires stronger " 1198 "authentication.\n"); 1199 break; 1200 default: 1201 printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n); 1202 error = -EIO; 1203 } 1204 dprintk("RPC: %4d call_verify: call rejected %d\n", 1205 task->tk_pid, n); 1206 goto out_err; 1207 } 1208 if (!(p = rpcauth_checkverf(task, p))) { 1209 printk(KERN_WARNING "call_verify: auth check failed\n"); 1210 goto out_garbage; /* bad verifier, retry */ 1211 } 1212 len = p - (u32 *)iov->iov_base - 1; 1213 if (len < 0) 1214 goto out_overflow; 1215 switch ((n = ntohl(*p++))) { 1216 case RPC_SUCCESS: 1217 return p; 1218 case RPC_PROG_UNAVAIL: 1219 dprintk("RPC: call_verify: program %u is unsupported by server %s\n", 1220 (unsigned int)task->tk_client->cl_prog, 1221 task->tk_client->cl_server); 1222 error = -EPFNOSUPPORT; 1223 goto out_err; 1224 case RPC_PROG_MISMATCH: 1225 dprintk("RPC: call_verify: program %u, version %u unsupported by server %s\n", 1226 (unsigned int)task->tk_client->cl_prog, 1227 (unsigned int)task->tk_client->cl_vers, 1228 task->tk_client->cl_server); 1229 error = -EPROTONOSUPPORT; 1230 goto out_err; 1231 case RPC_PROC_UNAVAIL: 1232 dprintk("RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n", 1233 task->tk_msg.rpc_proc, 1234 task->tk_client->cl_prog, 1235 task->tk_client->cl_vers, 1236 task->tk_client->cl_server); 1237 error = -EOPNOTSUPP; 1238 goto out_err; 1239 case RPC_GARBAGE_ARGS: 1240 dprintk("RPC: %4d %s: server saw garbage\n", task->tk_pid, __FUNCTION__); 1241 break; /* retry */ 1242 default: 1243 printk(KERN_WARNING "call_verify: server accept status: %x\n", n); 1244 /* Also retry */ 1245 } 1246 1247 out_garbage: 1248 task->tk_client->cl_stats->rpcgarbage++; 1249 if (task->tk_garb_retry) { 1250 task->tk_garb_retry--; 1251 dprintk("RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid); 1252 task->tk_action = call_bind; 1253 out_retry: 1254 return ERR_PTR(-EAGAIN); 1255 } 1256 printk(KERN_WARNING "RPC %s: retry failed, exit EIO\n", __FUNCTION__); 1257 out_eio: 1258 error = -EIO; 1259 out_err: 1260 rpc_exit(task, error); 1261 return ERR_PTR(error); 1262 out_overflow: 1263 printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__); 1264 goto out_garbage; 1265 } 1266 1267 static int rpcproc_encode_null(void *rqstp, u32 *data, void *obj) 1268 { 1269 return 0; 1270 } 1271 1272 static int rpcproc_decode_null(void *rqstp, u32 *data, void *obj) 1273 { 1274 return 0; 1275 } 1276 1277 static struct rpc_procinfo rpcproc_null = { 1278 .p_encode = rpcproc_encode_null, 1279 .p_decode = rpcproc_decode_null, 1280 }; 1281 1282 int rpc_ping(struct rpc_clnt *clnt, int flags) 1283 { 1284 struct rpc_message msg = { 1285 .rpc_proc = &rpcproc_null, 1286 }; 1287 int err; 1288 msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); 1289 err = rpc_call_sync(clnt, &msg, flags); 1290 put_rpccred(msg.rpc_cred); 1291 return err; 1292 } 1293