1 /* $NetBSD: clnt_vc.c,v 1.4 2000/07/14 08:40:42 fvdl Exp $ */ 2 3 /*- 4 * Copyright (c) 2009, Sun Microsystems, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * - Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * - Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * - Neither the name of Sun Microsystems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #if defined(LIBC_SCCS) && !defined(lint) 32 static char *sccsid2 = "@(#)clnt_tcp.c 1.37 87/10/05 Copyr 1984 Sun Micro"; 33 static char *sccsid = "@(#)clnt_tcp.c 2.2 88/08/01 4.0 RPCSRC"; 34 static char sccsid3[] = "@(#)clnt_vc.c 1.19 89/03/16 Copyr 1988 Sun Micro"; 35 #endif 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 /* 40 * clnt_tcp.c, Implements a TCP/IP based, client side RPC. 41 * 42 * Copyright (C) 1984, Sun Microsystems, Inc. 43 * 44 * TCP based RPC supports 'batched calls'. 45 * A sequence of calls may be batched-up in a send buffer. The rpc call 46 * return immediately to the client even though the call was not necessarily 47 * sent. The batching occurs if the results' xdr routine is NULL (0) AND 48 * the rpc timeout value is zero (see clnt.h, rpc). 49 * 50 * Clients should NOT casually batch calls that in fact return results; that is, 51 * the server side should be aware that a call is batched and not produce any 52 * return message. Batched calls that produce many result messages can 53 * deadlock (netlock) the client and the server.... 54 * 55 * Now go hang yourself. 56 */ 57 58 #include "namespace.h" 59 #include "reentrant.h" 60 #include <sys/types.h> 61 #include <sys/poll.h> 62 #include <sys/syslog.h> 63 #include <sys/socket.h> 64 #include <sys/un.h> 65 #include <sys/uio.h> 66 67 #include <arpa/inet.h> 68 #include <assert.h> 69 #include <err.h> 70 #include <errno.h> 71 #include <netdb.h> 72 #include <stdio.h> 73 #include <stdlib.h> 74 #include <string.h> 75 #include <unistd.h> 76 #include <signal.h> 77 78 #include <rpc/rpc.h> 79 #include <rpc/rpcsec_gss.h> 80 #include "un-namespace.h" 81 #include "rpc_com.h" 82 #include "mt_misc.h" 83 84 #define MCALL_MSG_SIZE 24 85 86 struct cmessage { 87 struct cmsghdr cmsg; 88 struct cmsgcred cmcred; 89 }; 90 91 static enum clnt_stat clnt_vc_call(CLIENT *, rpcproc_t, xdrproc_t, void *, 92 xdrproc_t, void *, struct timeval); 93 static void clnt_vc_geterr(CLIENT *, struct rpc_err *); 94 static bool_t clnt_vc_freeres(CLIENT *, xdrproc_t, void *); 95 static void clnt_vc_abort(CLIENT *); 96 static bool_t clnt_vc_control(CLIENT *, u_int, void *); 97 static void clnt_vc_destroy(CLIENT *); 98 static struct clnt_ops *clnt_vc_ops(void); 99 static bool_t time_not_ok(struct timeval *); 100 static int read_vc(void *, void *, int); 101 static int write_vc(void *, void *, int); 102 static int __msgwrite(int, void *, size_t); 103 static int __msgread(int, void *, size_t); 104 105 struct ct_data { 106 int ct_fd; /* connection's fd */ 107 bool_t ct_closeit; /* close it on destroy */ 108 struct timeval ct_wait; /* wait interval in milliseconds */ 109 bool_t ct_waitset; /* wait set by clnt_control? */ 110 struct netbuf ct_addr; /* remote addr */ 111 struct rpc_err ct_error; 112 union { 113 char ct_mcallc[MCALL_MSG_SIZE]; /* marshalled callmsg */ 114 u_int32_t ct_mcalli; 115 } ct_u; 116 u_int ct_mpos; /* pos after marshal */ 117 XDR ct_xdrs; /* XDR stream */ 118 }; 119 120 /* 121 * This machinery implements per-fd locks for MT-safety. It is not 122 * sufficient to do per-CLIENT handle locks for MT-safety because a 123 * user may create more than one CLIENT handle with the same fd behind 124 * it. Therfore, we allocate an array of flags (vc_fd_locks), protected 125 * by the clnt_fd_lock mutex, and an array (vc_cv) of condition variables 126 * similarly protected. Vc_fd_lock[fd] == 1 => a call is activte on some 127 * CLIENT handle created for that fd. 128 * The current implementation holds locks across the entire RPC and reply. 129 * Yes, this is silly, and as soon as this code is proven to work, this 130 * should be the first thing fixed. One step at a time. 131 */ 132 static int *vc_fd_locks; 133 static cond_t *vc_cv; 134 #define release_fd_lock(fd, mask) { \ 135 mutex_lock(&clnt_fd_lock); \ 136 vc_fd_locks[fd] = 0; \ 137 mutex_unlock(&clnt_fd_lock); \ 138 thr_sigsetmask(SIG_SETMASK, &(mask), (sigset_t *) NULL); \ 139 cond_signal(&vc_cv[fd]); \ 140 } 141 142 static const char clnt_vc_errstr[] = "%s : %s"; 143 static const char clnt_vc_str[] = "clnt_vc_create"; 144 static const char __no_mem_str[] = "out of memory"; 145 146 /* 147 * Create a client handle for a connection. 148 * Default options are set, which the user can change using clnt_control()'s. 149 * The rpc/vc package does buffering similar to stdio, so the client 150 * must pick send and receive buffer sizes, 0 => use the default. 151 * NB: fd is copied into a private area. 152 * NB: The rpch->cl_auth is set null authentication. Caller may wish to 153 * set this something more useful. 154 * 155 * fd should be an open socket 156 */ 157 CLIENT * 158 clnt_vc_create(fd, raddr, prog, vers, sendsz, recvsz) 159 int fd; /* open file descriptor */ 160 const struct netbuf *raddr; /* servers address */ 161 const rpcprog_t prog; /* program number */ 162 const rpcvers_t vers; /* version number */ 163 u_int sendsz; /* buffer recv size */ 164 u_int recvsz; /* buffer send size */ 165 { 166 CLIENT *cl; /* client handle */ 167 struct ct_data *ct = NULL; /* client handle */ 168 struct timeval now; 169 struct rpc_msg call_msg; 170 static u_int32_t disrupt; 171 sigset_t mask; 172 sigset_t newmask; 173 struct sockaddr_storage ss; 174 socklen_t slen; 175 struct __rpc_sockinfo si; 176 177 if (disrupt == 0) 178 disrupt = (u_int32_t)(long)raddr; 179 180 cl = (CLIENT *)mem_alloc(sizeof (*cl)); 181 ct = (struct ct_data *)mem_alloc(sizeof (*ct)); 182 if ((cl == (CLIENT *)NULL) || (ct == (struct ct_data *)NULL)) { 183 (void) syslog(LOG_ERR, clnt_vc_errstr, 184 clnt_vc_str, __no_mem_str); 185 rpc_createerr.cf_stat = RPC_SYSTEMERROR; 186 rpc_createerr.cf_error.re_errno = errno; 187 goto err; 188 } 189 ct->ct_addr.buf = NULL; 190 sigfillset(&newmask); 191 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 192 mutex_lock(&clnt_fd_lock); 193 if (vc_fd_locks == (int *) NULL) { 194 int cv_allocsz, fd_allocsz; 195 int dtbsize = __rpc_dtbsize(); 196 197 fd_allocsz = dtbsize * sizeof (int); 198 vc_fd_locks = (int *) mem_alloc(fd_allocsz); 199 if (vc_fd_locks == (int *) NULL) { 200 mutex_unlock(&clnt_fd_lock); 201 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 202 goto err; 203 } else 204 memset(vc_fd_locks, '\0', fd_allocsz); 205 206 assert(vc_cv == (cond_t *) NULL); 207 cv_allocsz = dtbsize * sizeof (cond_t); 208 vc_cv = (cond_t *) mem_alloc(cv_allocsz); 209 if (vc_cv == (cond_t *) NULL) { 210 mem_free(vc_fd_locks, fd_allocsz); 211 vc_fd_locks = (int *) NULL; 212 mutex_unlock(&clnt_fd_lock); 213 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 214 goto err; 215 } else { 216 int i; 217 218 for (i = 0; i < dtbsize; i++) 219 cond_init(&vc_cv[i], 0, (void *) 0); 220 } 221 } else 222 assert(vc_cv != (cond_t *) NULL); 223 224 /* 225 * XXX - fvdl connecting while holding a mutex? 226 */ 227 slen = sizeof ss; 228 if (_getpeername(fd, (struct sockaddr *)(void *)&ss, &slen) < 0) { 229 if (errno != ENOTCONN) { 230 rpc_createerr.cf_stat = RPC_SYSTEMERROR; 231 rpc_createerr.cf_error.re_errno = errno; 232 mutex_unlock(&clnt_fd_lock); 233 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 234 goto err; 235 } 236 if (_connect(fd, (struct sockaddr *)raddr->buf, raddr->len) < 0){ 237 rpc_createerr.cf_stat = RPC_SYSTEMERROR; 238 rpc_createerr.cf_error.re_errno = errno; 239 mutex_unlock(&clnt_fd_lock); 240 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 241 goto err; 242 } 243 } 244 mutex_unlock(&clnt_fd_lock); 245 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 246 if (!__rpc_fd2sockinfo(fd, &si)) 247 goto err; 248 249 ct->ct_closeit = FALSE; 250 251 /* 252 * Set up private data struct 253 */ 254 ct->ct_fd = fd; 255 ct->ct_wait.tv_usec = 0; 256 ct->ct_waitset = FALSE; 257 ct->ct_addr.buf = malloc(raddr->maxlen); 258 if (ct->ct_addr.buf == NULL) 259 goto err; 260 memcpy(ct->ct_addr.buf, raddr->buf, raddr->len); 261 ct->ct_addr.len = raddr->len; 262 ct->ct_addr.maxlen = raddr->maxlen; 263 264 /* 265 * Initialize call message 266 */ 267 (void)gettimeofday(&now, NULL); 268 call_msg.rm_xid = ((u_int32_t)++disrupt) ^ __RPC_GETXID(&now); 269 call_msg.rm_direction = CALL; 270 call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION; 271 call_msg.rm_call.cb_prog = (u_int32_t)prog; 272 call_msg.rm_call.cb_vers = (u_int32_t)vers; 273 274 /* 275 * pre-serialize the static part of the call msg and stash it away 276 */ 277 xdrmem_create(&(ct->ct_xdrs), ct->ct_u.ct_mcallc, MCALL_MSG_SIZE, 278 XDR_ENCODE); 279 if (! xdr_callhdr(&(ct->ct_xdrs), &call_msg)) { 280 if (ct->ct_closeit) { 281 (void)_close(fd); 282 } 283 goto err; 284 } 285 ct->ct_mpos = XDR_GETPOS(&(ct->ct_xdrs)); 286 XDR_DESTROY(&(ct->ct_xdrs)); 287 assert(ct->ct_mpos + sizeof(uint32_t) <= MCALL_MSG_SIZE); 288 289 /* 290 * Create a client handle which uses xdrrec for serialization 291 * and authnone for authentication. 292 */ 293 cl->cl_ops = clnt_vc_ops(); 294 cl->cl_private = ct; 295 cl->cl_auth = authnone_create(); 296 sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz); 297 recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz); 298 xdrrec_create(&(ct->ct_xdrs), sendsz, recvsz, 299 cl->cl_private, read_vc, write_vc); 300 return (cl); 301 302 err: 303 if (ct) { 304 if (ct->ct_addr.len) 305 mem_free(ct->ct_addr.buf, ct->ct_addr.len); 306 mem_free(ct, sizeof (struct ct_data)); 307 } 308 if (cl) 309 mem_free(cl, sizeof (CLIENT)); 310 return ((CLIENT *)NULL); 311 } 312 313 static enum clnt_stat 314 clnt_vc_call(cl, proc, xdr_args, args_ptr, xdr_results, results_ptr, timeout) 315 CLIENT *cl; 316 rpcproc_t proc; 317 xdrproc_t xdr_args; 318 void *args_ptr; 319 xdrproc_t xdr_results; 320 void *results_ptr; 321 struct timeval timeout; 322 { 323 struct ct_data *ct = (struct ct_data *) cl->cl_private; 324 XDR *xdrs = &(ct->ct_xdrs); 325 struct rpc_msg reply_msg; 326 u_int32_t x_id; 327 u_int32_t *msg_x_id = &ct->ct_u.ct_mcalli; /* yuk */ 328 bool_t shipnow; 329 int refreshes = 2; 330 sigset_t mask, newmask; 331 int rpc_lock_value; 332 bool_t reply_stat; 333 334 assert(cl != NULL); 335 336 sigfillset(&newmask); 337 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 338 mutex_lock(&clnt_fd_lock); 339 while (vc_fd_locks[ct->ct_fd]) 340 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); 341 if (__isthreaded) 342 rpc_lock_value = 1; 343 else 344 rpc_lock_value = 0; 345 vc_fd_locks[ct->ct_fd] = rpc_lock_value; 346 mutex_unlock(&clnt_fd_lock); 347 if (!ct->ct_waitset) { 348 /* If time is not within limits, we ignore it. */ 349 if (time_not_ok(&timeout) == FALSE) 350 ct->ct_wait = timeout; 351 } 352 353 shipnow = 354 (xdr_results == NULL && timeout.tv_sec == 0 355 && timeout.tv_usec == 0) ? FALSE : TRUE; 356 357 call_again: 358 xdrs->x_op = XDR_ENCODE; 359 ct->ct_error.re_status = RPC_SUCCESS; 360 x_id = ntohl(--(*msg_x_id)); 361 362 if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) { 363 if ((! XDR_PUTBYTES(xdrs, ct->ct_u.ct_mcallc, ct->ct_mpos)) || 364 (! XDR_PUTINT32(xdrs, &proc)) || 365 (! AUTH_MARSHALL(cl->cl_auth, xdrs)) || 366 (! (*xdr_args)(xdrs, args_ptr))) { 367 if (ct->ct_error.re_status == RPC_SUCCESS) 368 ct->ct_error.re_status = RPC_CANTENCODEARGS; 369 (void)xdrrec_endofrecord(xdrs, TRUE); 370 release_fd_lock(ct->ct_fd, mask); 371 return (ct->ct_error.re_status); 372 } 373 } else { 374 *(uint32_t *) &ct->ct_u.ct_mcallc[ct->ct_mpos] = htonl(proc); 375 if (! __rpc_gss_wrap(cl->cl_auth, ct->ct_u.ct_mcallc, 376 ct->ct_mpos + sizeof(uint32_t), 377 xdrs, xdr_args, args_ptr)) { 378 if (ct->ct_error.re_status == RPC_SUCCESS) 379 ct->ct_error.re_status = RPC_CANTENCODEARGS; 380 (void)xdrrec_endofrecord(xdrs, TRUE); 381 release_fd_lock(ct->ct_fd, mask); 382 return (ct->ct_error.re_status); 383 } 384 } 385 if (! xdrrec_endofrecord(xdrs, shipnow)) { 386 release_fd_lock(ct->ct_fd, mask); 387 return (ct->ct_error.re_status = RPC_CANTSEND); 388 } 389 if (! shipnow) { 390 release_fd_lock(ct->ct_fd, mask); 391 return (RPC_SUCCESS); 392 } 393 /* 394 * Hack to provide rpc-based message passing 395 */ 396 if (timeout.tv_sec == 0 && timeout.tv_usec == 0) { 397 release_fd_lock(ct->ct_fd, mask); 398 return(ct->ct_error.re_status = RPC_TIMEDOUT); 399 } 400 401 402 /* 403 * Keep receiving until we get a valid transaction id 404 */ 405 xdrs->x_op = XDR_DECODE; 406 while (TRUE) { 407 reply_msg.acpted_rply.ar_verf = _null_auth; 408 reply_msg.acpted_rply.ar_results.where = NULL; 409 reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void; 410 if (! xdrrec_skiprecord(xdrs)) { 411 release_fd_lock(ct->ct_fd, mask); 412 return (ct->ct_error.re_status); 413 } 414 /* now decode and validate the response header */ 415 if (! xdr_replymsg(xdrs, &reply_msg)) { 416 if (ct->ct_error.re_status == RPC_SUCCESS) 417 continue; 418 release_fd_lock(ct->ct_fd, mask); 419 return (ct->ct_error.re_status); 420 } 421 if (reply_msg.rm_xid == x_id) 422 break; 423 } 424 425 /* 426 * process header 427 */ 428 _seterr_reply(&reply_msg, &(ct->ct_error)); 429 if (ct->ct_error.re_status == RPC_SUCCESS) { 430 if (! AUTH_VALIDATE(cl->cl_auth, 431 &reply_msg.acpted_rply.ar_verf)) { 432 ct->ct_error.re_status = RPC_AUTHERROR; 433 ct->ct_error.re_why = AUTH_INVALIDRESP; 434 } else { 435 if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) { 436 reply_stat = (*xdr_results)(xdrs, results_ptr); 437 } else { 438 reply_stat = __rpc_gss_unwrap(cl->cl_auth, 439 xdrs, xdr_results, results_ptr); 440 } 441 if (! reply_stat) { 442 if (ct->ct_error.re_status == RPC_SUCCESS) 443 ct->ct_error.re_status = 444 RPC_CANTDECODERES; 445 } 446 } 447 /* free verifier ... */ 448 if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) { 449 xdrs->x_op = XDR_FREE; 450 (void)xdr_opaque_auth(xdrs, 451 &(reply_msg.acpted_rply.ar_verf)); 452 } 453 } /* end successful completion */ 454 else { 455 /* maybe our credentials need to be refreshed ... */ 456 if (refreshes-- && AUTH_REFRESH(cl->cl_auth, &reply_msg)) 457 goto call_again; 458 } /* end of unsuccessful completion */ 459 release_fd_lock(ct->ct_fd, mask); 460 return (ct->ct_error.re_status); 461 } 462 463 static void 464 clnt_vc_geterr(cl, errp) 465 CLIENT *cl; 466 struct rpc_err *errp; 467 { 468 struct ct_data *ct; 469 470 assert(cl != NULL); 471 assert(errp != NULL); 472 473 ct = (struct ct_data *) cl->cl_private; 474 *errp = ct->ct_error; 475 } 476 477 static bool_t 478 clnt_vc_freeres(cl, xdr_res, res_ptr) 479 CLIENT *cl; 480 xdrproc_t xdr_res; 481 void *res_ptr; 482 { 483 struct ct_data *ct; 484 XDR *xdrs; 485 bool_t dummy; 486 sigset_t mask; 487 sigset_t newmask; 488 489 assert(cl != NULL); 490 491 ct = (struct ct_data *)cl->cl_private; 492 xdrs = &(ct->ct_xdrs); 493 494 sigfillset(&newmask); 495 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 496 mutex_lock(&clnt_fd_lock); 497 while (vc_fd_locks[ct->ct_fd]) 498 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); 499 xdrs->x_op = XDR_FREE; 500 dummy = (*xdr_res)(xdrs, res_ptr); 501 mutex_unlock(&clnt_fd_lock); 502 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 503 cond_signal(&vc_cv[ct->ct_fd]); 504 505 return dummy; 506 } 507 508 /*ARGSUSED*/ 509 static void 510 clnt_vc_abort(cl) 511 CLIENT *cl; 512 { 513 } 514 515 static bool_t 516 clnt_vc_control(cl, request, info) 517 CLIENT *cl; 518 u_int request; 519 void *info; 520 { 521 struct ct_data *ct; 522 void *infop = info; 523 sigset_t mask; 524 sigset_t newmask; 525 int rpc_lock_value; 526 527 assert(cl != NULL); 528 529 ct = (struct ct_data *)cl->cl_private; 530 531 sigfillset(&newmask); 532 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 533 mutex_lock(&clnt_fd_lock); 534 while (vc_fd_locks[ct->ct_fd]) 535 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); 536 if (__isthreaded) 537 rpc_lock_value = 1; 538 else 539 rpc_lock_value = 0; 540 vc_fd_locks[ct->ct_fd] = rpc_lock_value; 541 mutex_unlock(&clnt_fd_lock); 542 543 switch (request) { 544 case CLSET_FD_CLOSE: 545 ct->ct_closeit = TRUE; 546 release_fd_lock(ct->ct_fd, mask); 547 return (TRUE); 548 case CLSET_FD_NCLOSE: 549 ct->ct_closeit = FALSE; 550 release_fd_lock(ct->ct_fd, mask); 551 return (TRUE); 552 default: 553 break; 554 } 555 556 /* for other requests which use info */ 557 if (info == NULL) { 558 release_fd_lock(ct->ct_fd, mask); 559 return (FALSE); 560 } 561 switch (request) { 562 case CLSET_TIMEOUT: 563 if (time_not_ok((struct timeval *)info)) { 564 release_fd_lock(ct->ct_fd, mask); 565 return (FALSE); 566 } 567 ct->ct_wait = *(struct timeval *)infop; 568 ct->ct_waitset = TRUE; 569 break; 570 case CLGET_TIMEOUT: 571 *(struct timeval *)infop = ct->ct_wait; 572 break; 573 case CLGET_SERVER_ADDR: 574 (void) memcpy(info, ct->ct_addr.buf, (size_t)ct->ct_addr.len); 575 break; 576 case CLGET_FD: 577 *(int *)info = ct->ct_fd; 578 break; 579 case CLGET_SVC_ADDR: 580 /* The caller should not free this memory area */ 581 *(struct netbuf *)info = ct->ct_addr; 582 break; 583 case CLSET_SVC_ADDR: /* set to new address */ 584 release_fd_lock(ct->ct_fd, mask); 585 return (FALSE); 586 case CLGET_XID: 587 /* 588 * use the knowledge that xid is the 589 * first element in the call structure 590 * This will get the xid of the PREVIOUS call 591 */ 592 *(u_int32_t *)info = 593 ntohl(*(u_int32_t *)(void *)&ct->ct_u.ct_mcalli); 594 break; 595 case CLSET_XID: 596 /* This will set the xid of the NEXT call */ 597 *(u_int32_t *)(void *)&ct->ct_u.ct_mcalli = 598 htonl(*((u_int32_t *)info) + 1); 599 /* increment by 1 as clnt_vc_call() decrements once */ 600 break; 601 case CLGET_VERS: 602 /* 603 * This RELIES on the information that, in the call body, 604 * the version number field is the fifth field from the 605 * begining of the RPC header. MUST be changed if the 606 * call_struct is changed 607 */ 608 *(u_int32_t *)info = 609 ntohl(*(u_int32_t *)(void *)(ct->ct_u.ct_mcallc + 610 4 * BYTES_PER_XDR_UNIT)); 611 break; 612 613 case CLSET_VERS: 614 *(u_int32_t *)(void *)(ct->ct_u.ct_mcallc + 615 4 * BYTES_PER_XDR_UNIT) = 616 htonl(*(u_int32_t *)info); 617 break; 618 619 case CLGET_PROG: 620 /* 621 * This RELIES on the information that, in the call body, 622 * the program number field is the fourth field from the 623 * begining of the RPC header. MUST be changed if the 624 * call_struct is changed 625 */ 626 *(u_int32_t *)info = 627 ntohl(*(u_int32_t *)(void *)(ct->ct_u.ct_mcallc + 628 3 * BYTES_PER_XDR_UNIT)); 629 break; 630 631 case CLSET_PROG: 632 *(u_int32_t *)(void *)(ct->ct_u.ct_mcallc + 633 3 * BYTES_PER_XDR_UNIT) = 634 htonl(*(u_int32_t *)info); 635 break; 636 637 default: 638 release_fd_lock(ct->ct_fd, mask); 639 return (FALSE); 640 } 641 release_fd_lock(ct->ct_fd, mask); 642 return (TRUE); 643 } 644 645 646 static void 647 clnt_vc_destroy(cl) 648 CLIENT *cl; 649 { 650 struct ct_data *ct = (struct ct_data *) cl->cl_private; 651 int ct_fd = ct->ct_fd; 652 sigset_t mask; 653 sigset_t newmask; 654 655 assert(cl != NULL); 656 657 ct = (struct ct_data *) cl->cl_private; 658 659 sigfillset(&newmask); 660 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 661 mutex_lock(&clnt_fd_lock); 662 while (vc_fd_locks[ct_fd]) 663 cond_wait(&vc_cv[ct_fd], &clnt_fd_lock); 664 if (ct->ct_closeit && ct->ct_fd != -1) { 665 (void)_close(ct->ct_fd); 666 } 667 XDR_DESTROY(&(ct->ct_xdrs)); 668 if (ct->ct_addr.buf) 669 free(ct->ct_addr.buf); 670 mem_free(ct, sizeof(struct ct_data)); 671 if (cl->cl_netid && cl->cl_netid[0]) 672 mem_free(cl->cl_netid, strlen(cl->cl_netid) +1); 673 if (cl->cl_tp && cl->cl_tp[0]) 674 mem_free(cl->cl_tp, strlen(cl->cl_tp) +1); 675 mem_free(cl, sizeof(CLIENT)); 676 mutex_unlock(&clnt_fd_lock); 677 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 678 cond_signal(&vc_cv[ct_fd]); 679 } 680 681 /* 682 * Interface between xdr serializer and tcp connection. 683 * Behaves like the system calls, read & write, but keeps some error state 684 * around for the rpc level. 685 */ 686 static int 687 read_vc(ctp, buf, len) 688 void *ctp; 689 void *buf; 690 int len; 691 { 692 struct sockaddr sa; 693 socklen_t sal; 694 struct ct_data *ct = (struct ct_data *)ctp; 695 struct pollfd fd; 696 int milliseconds = (int)((ct->ct_wait.tv_sec * 1000) + 697 (ct->ct_wait.tv_usec / 1000)); 698 699 if (len == 0) 700 return (0); 701 fd.fd = ct->ct_fd; 702 fd.events = POLLIN; 703 for (;;) { 704 switch (_poll(&fd, 1, milliseconds)) { 705 case 0: 706 ct->ct_error.re_status = RPC_TIMEDOUT; 707 return (-1); 708 709 case -1: 710 if (errno == EINTR) 711 continue; 712 ct->ct_error.re_status = RPC_CANTRECV; 713 ct->ct_error.re_errno = errno; 714 return (-1); 715 } 716 break; 717 } 718 719 sal = sizeof(sa); 720 if ((_getpeername(ct->ct_fd, &sa, &sal) == 0) && 721 (sa.sa_family == AF_LOCAL)) { 722 len = __msgread(ct->ct_fd, buf, (size_t)len); 723 } else { 724 len = _read(ct->ct_fd, buf, (size_t)len); 725 } 726 727 switch (len) { 728 case 0: 729 /* premature eof */ 730 ct->ct_error.re_errno = ECONNRESET; 731 ct->ct_error.re_status = RPC_CANTRECV; 732 len = -1; /* it's really an error */ 733 break; 734 735 case -1: 736 ct->ct_error.re_errno = errno; 737 ct->ct_error.re_status = RPC_CANTRECV; 738 break; 739 } 740 return (len); 741 } 742 743 static int 744 write_vc(ctp, buf, len) 745 void *ctp; 746 void *buf; 747 int len; 748 { 749 struct sockaddr sa; 750 socklen_t sal; 751 struct ct_data *ct = (struct ct_data *)ctp; 752 int i, cnt; 753 754 sal = sizeof(sa); 755 if ((_getpeername(ct->ct_fd, &sa, &sal) == 0) && 756 (sa.sa_family == AF_LOCAL)) { 757 for (cnt = len; cnt > 0; cnt -= i, buf = (char *)buf + i) { 758 if ((i = __msgwrite(ct->ct_fd, buf, 759 (size_t)cnt)) == -1) { 760 ct->ct_error.re_errno = errno; 761 ct->ct_error.re_status = RPC_CANTSEND; 762 return (-1); 763 } 764 } 765 } else { 766 for (cnt = len; cnt > 0; cnt -= i, buf = (char *)buf + i) { 767 if ((i = _write(ct->ct_fd, buf, (size_t)cnt)) == -1) { 768 ct->ct_error.re_errno = errno; 769 ct->ct_error.re_status = RPC_CANTSEND; 770 return (-1); 771 } 772 } 773 } 774 return (len); 775 } 776 777 static struct clnt_ops * 778 clnt_vc_ops() 779 { 780 static struct clnt_ops ops; 781 sigset_t mask, newmask; 782 783 /* VARIABLES PROTECTED BY ops_lock: ops */ 784 785 sigfillset(&newmask); 786 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 787 mutex_lock(&ops_lock); 788 if (ops.cl_call == NULL) { 789 ops.cl_call = clnt_vc_call; 790 ops.cl_abort = clnt_vc_abort; 791 ops.cl_geterr = clnt_vc_geterr; 792 ops.cl_freeres = clnt_vc_freeres; 793 ops.cl_destroy = clnt_vc_destroy; 794 ops.cl_control = clnt_vc_control; 795 } 796 mutex_unlock(&ops_lock); 797 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 798 return (&ops); 799 } 800 801 /* 802 * Make sure that the time is not garbage. -1 value is disallowed. 803 * Note this is different from time_not_ok in clnt_dg.c 804 */ 805 static bool_t 806 time_not_ok(t) 807 struct timeval *t; 808 { 809 return (t->tv_sec <= -1 || t->tv_sec > 100000000 || 810 t->tv_usec <= -1 || t->tv_usec > 1000000); 811 } 812 813 static int 814 __msgread(sock, buf, cnt) 815 int sock; 816 void *buf; 817 size_t cnt; 818 { 819 struct iovec iov[1]; 820 struct msghdr msg; 821 union { 822 struct cmsghdr cmsg; 823 char control[CMSG_SPACE(sizeof(struct cmsgcred))]; 824 } cm; 825 826 bzero((char *)&cm, sizeof(cm)); 827 iov[0].iov_base = buf; 828 iov[0].iov_len = cnt; 829 830 msg.msg_iov = iov; 831 msg.msg_iovlen = 1; 832 msg.msg_name = NULL; 833 msg.msg_namelen = 0; 834 msg.msg_control = (caddr_t)&cm; 835 msg.msg_controllen = CMSG_SPACE(sizeof(struct cmsgcred)); 836 msg.msg_flags = 0; 837 838 return(_recvmsg(sock, &msg, 0)); 839 } 840 841 static int 842 __msgwrite(sock, buf, cnt) 843 int sock; 844 void *buf; 845 size_t cnt; 846 { 847 struct iovec iov[1]; 848 struct msghdr msg; 849 union { 850 struct cmsghdr cmsg; 851 char control[CMSG_SPACE(sizeof(struct cmsgcred))]; 852 } cm; 853 854 bzero((char *)&cm, sizeof(cm)); 855 iov[0].iov_base = buf; 856 iov[0].iov_len = cnt; 857 858 cm.cmsg.cmsg_type = SCM_CREDS; 859 cm.cmsg.cmsg_level = SOL_SOCKET; 860 cm.cmsg.cmsg_len = CMSG_LEN(sizeof(struct cmsgcred)); 861 862 msg.msg_iov = iov; 863 msg.msg_iovlen = 1; 864 msg.msg_name = NULL; 865 msg.msg_namelen = 0; 866 msg.msg_control = (caddr_t)&cm; 867 msg.msg_controllen = CMSG_SPACE(sizeof(struct cmsgcred)); 868 msg.msg_flags = 0; 869 870 return(_sendmsg(sock, &msg, 0)); 871 } 872