1 /* $NetBSD: clnt_vc.c,v 1.4 2000/07/14 08:40:42 fvdl Exp $ */ 2 3 /*- 4 * Copyright (c) 2009, Sun Microsystems, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * - Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * - Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * - Neither the name of Sun Microsystems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #if defined(LIBC_SCCS) && !defined(lint) 32 static char *sccsid2 = "@(#)clnt_tcp.c 1.37 87/10/05 Copyr 1984 Sun Micro"; 33 static char *sccsid = "@(#)clnt_tcp.c 2.2 88/08/01 4.0 RPCSRC"; 34 static char sccsid3[] = "@(#)clnt_vc.c 1.19 89/03/16 Copyr 1988 Sun Micro"; 35 #endif 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 /* 40 * clnt_tcp.c, Implements a TCP/IP based, client side RPC. 41 * 42 * Copyright (C) 1984, Sun Microsystems, Inc. 43 * 44 * TCP based RPC supports 'batched calls'. 45 * A sequence of calls may be batched-up in a send buffer. The rpc call 46 * return immediately to the client even though the call was not necessarily 47 * sent. The batching occurs if the results' xdr routine is NULL (0) AND 48 * the rpc timeout value is zero (see clnt.h, rpc). 49 * 50 * Clients should NOT casually batch calls that in fact return results; that is, 51 * the server side should be aware that a call is batched and not produce any 52 * return message. Batched calls that produce many result messages can 53 * deadlock (netlock) the client and the server.... 54 * 55 * Now go hang yourself. 56 */ 57 58 #include "namespace.h" 59 #include "reentrant.h" 60 #include <sys/types.h> 61 #include <sys/poll.h> 62 #include <sys/syslog.h> 63 #include <sys/socket.h> 64 #include <sys/un.h> 65 #include <sys/uio.h> 66 67 #include <arpa/inet.h> 68 #include <assert.h> 69 #include <err.h> 70 #include <errno.h> 71 #include <netdb.h> 72 #include <stdio.h> 73 #include <stdlib.h> 74 #include <string.h> 75 #include <unistd.h> 76 #include <signal.h> 77 78 #include <rpc/rpc.h> 79 #include <rpc/rpcsec_gss.h> 80 #include "un-namespace.h" 81 #include "rpc_com.h" 82 #include "mt_misc.h" 83 84 #define MCALL_MSG_SIZE 24 85 86 struct cmessage { 87 struct cmsghdr cmsg; 88 struct cmsgcred cmcred; 89 }; 90 91 static enum clnt_stat clnt_vc_call(CLIENT *, rpcproc_t, xdrproc_t, void *, 92 xdrproc_t, void *, struct timeval); 93 static void clnt_vc_geterr(CLIENT *, struct rpc_err *); 94 static bool_t clnt_vc_freeres(CLIENT *, xdrproc_t, void *); 95 static void clnt_vc_abort(CLIENT *); 96 static bool_t clnt_vc_control(CLIENT *, u_int, void *); 97 static void clnt_vc_destroy(CLIENT *); 98 static struct clnt_ops *clnt_vc_ops(void); 99 static bool_t time_not_ok(struct timeval *); 100 static int read_vc(void *, void *, int); 101 static int write_vc(void *, void *, int); 102 static int __msgwrite(int, void *, size_t); 103 static int __msgread(int, void *, size_t); 104 105 struct ct_data { 106 int ct_fd; /* connection's fd */ 107 bool_t ct_closeit; /* close it on destroy */ 108 struct timeval ct_wait; /* wait interval in milliseconds */ 109 bool_t ct_waitset; /* wait set by clnt_control? */ 110 struct netbuf ct_addr; /* remote addr */ 111 struct rpc_err ct_error; 112 union { 113 char ct_mcallc[MCALL_MSG_SIZE]; /* marshalled callmsg */ 114 u_int32_t ct_mcalli; 115 } ct_u; 116 u_int ct_mpos; /* pos after marshal */ 117 XDR ct_xdrs; /* XDR stream */ 118 }; 119 120 /* 121 * This machinery implements per-fd locks for MT-safety. It is not 122 * sufficient to do per-CLIENT handle locks for MT-safety because a 123 * user may create more than one CLIENT handle with the same fd behind 124 * it. Therfore, we allocate an array of flags (vc_fd_locks), protected 125 * by the clnt_fd_lock mutex, and an array (vc_cv) of condition variables 126 * similarly protected. Vc_fd_lock[fd] == 1 => a call is activte on some 127 * CLIENT handle created for that fd. 128 * The current implementation holds locks across the entire RPC and reply. 129 * Yes, this is silly, and as soon as this code is proven to work, this 130 * should be the first thing fixed. One step at a time. 131 */ 132 static int *vc_fd_locks; 133 static cond_t *vc_cv; 134 #define release_fd_lock(fd, mask) { \ 135 mutex_lock(&clnt_fd_lock); \ 136 vc_fd_locks[fd] = 0; \ 137 mutex_unlock(&clnt_fd_lock); \ 138 thr_sigsetmask(SIG_SETMASK, &(mask), (sigset_t *) NULL); \ 139 cond_signal(&vc_cv[fd]); \ 140 } 141 142 static const char clnt_vc_errstr[] = "%s : %s"; 143 static const char clnt_vc_str[] = "clnt_vc_create"; 144 static const char clnt_read_vc_str[] = "read_vc"; 145 static const char __no_mem_str[] = "out of memory"; 146 147 /* 148 * Create a client handle for a connection. 149 * Default options are set, which the user can change using clnt_control()'s. 150 * The rpc/vc package does buffering similar to stdio, so the client 151 * must pick send and receive buffer sizes, 0 => use the default. 152 * NB: fd is copied into a private area. 153 * NB: The rpch->cl_auth is set null authentication. Caller may wish to 154 * set this something more useful. 155 * 156 * fd should be an open socket 157 */ 158 CLIENT * 159 clnt_vc_create(fd, raddr, prog, vers, sendsz, recvsz) 160 int fd; /* open file descriptor */ 161 const struct netbuf *raddr; /* servers address */ 162 const rpcprog_t prog; /* program number */ 163 const rpcvers_t vers; /* version number */ 164 u_int sendsz; /* buffer recv size */ 165 u_int recvsz; /* buffer send size */ 166 { 167 CLIENT *cl; /* client handle */ 168 struct ct_data *ct = NULL; /* client handle */ 169 struct timeval now; 170 struct rpc_msg call_msg; 171 static u_int32_t disrupt; 172 sigset_t mask; 173 sigset_t newmask; 174 struct sockaddr_storage ss; 175 socklen_t slen; 176 struct __rpc_sockinfo si; 177 178 if (disrupt == 0) 179 disrupt = (u_int32_t)(long)raddr; 180 181 cl = (CLIENT *)mem_alloc(sizeof (*cl)); 182 ct = (struct ct_data *)mem_alloc(sizeof (*ct)); 183 if ((cl == (CLIENT *)NULL) || (ct == (struct ct_data *)NULL)) { 184 (void) syslog(LOG_ERR, clnt_vc_errstr, 185 clnt_vc_str, __no_mem_str); 186 rpc_createerr.cf_stat = RPC_SYSTEMERROR; 187 rpc_createerr.cf_error.re_errno = errno; 188 goto err; 189 } 190 ct->ct_addr.buf = NULL; 191 sigfillset(&newmask); 192 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 193 mutex_lock(&clnt_fd_lock); 194 if (vc_fd_locks == (int *) NULL) { 195 int cv_allocsz, fd_allocsz; 196 int dtbsize = __rpc_dtbsize(); 197 198 fd_allocsz = dtbsize * sizeof (int); 199 vc_fd_locks = (int *) mem_alloc(fd_allocsz); 200 if (vc_fd_locks == (int *) NULL) { 201 mutex_unlock(&clnt_fd_lock); 202 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 203 goto err; 204 } else 205 memset(vc_fd_locks, '\0', fd_allocsz); 206 207 assert(vc_cv == (cond_t *) NULL); 208 cv_allocsz = dtbsize * sizeof (cond_t); 209 vc_cv = (cond_t *) mem_alloc(cv_allocsz); 210 if (vc_cv == (cond_t *) NULL) { 211 mem_free(vc_fd_locks, fd_allocsz); 212 vc_fd_locks = (int *) NULL; 213 mutex_unlock(&clnt_fd_lock); 214 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 215 goto err; 216 } else { 217 int i; 218 219 for (i = 0; i < dtbsize; i++) 220 cond_init(&vc_cv[i], 0, (void *) 0); 221 } 222 } else 223 assert(vc_cv != (cond_t *) NULL); 224 225 /* 226 * XXX - fvdl connecting while holding a mutex? 227 */ 228 slen = sizeof ss; 229 if (_getpeername(fd, (struct sockaddr *)(void *)&ss, &slen) < 0) { 230 if (errno != ENOTCONN) { 231 rpc_createerr.cf_stat = RPC_SYSTEMERROR; 232 rpc_createerr.cf_error.re_errno = errno; 233 mutex_unlock(&clnt_fd_lock); 234 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 235 goto err; 236 } 237 if (_connect(fd, (struct sockaddr *)raddr->buf, raddr->len) < 0){ 238 rpc_createerr.cf_stat = RPC_SYSTEMERROR; 239 rpc_createerr.cf_error.re_errno = errno; 240 mutex_unlock(&clnt_fd_lock); 241 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 242 goto err; 243 } 244 } 245 mutex_unlock(&clnt_fd_lock); 246 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 247 if (!__rpc_fd2sockinfo(fd, &si)) 248 goto err; 249 250 ct->ct_closeit = FALSE; 251 252 /* 253 * Set up private data struct 254 */ 255 ct->ct_fd = fd; 256 ct->ct_wait.tv_usec = 0; 257 ct->ct_waitset = FALSE; 258 ct->ct_addr.buf = malloc(raddr->maxlen); 259 if (ct->ct_addr.buf == NULL) 260 goto err; 261 memcpy(ct->ct_addr.buf, raddr->buf, raddr->len); 262 ct->ct_addr.len = raddr->len; 263 ct->ct_addr.maxlen = raddr->maxlen; 264 265 /* 266 * Initialize call message 267 */ 268 (void)gettimeofday(&now, NULL); 269 call_msg.rm_xid = ((u_int32_t)++disrupt) ^ __RPC_GETXID(&now); 270 call_msg.rm_direction = CALL; 271 call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION; 272 call_msg.rm_call.cb_prog = (u_int32_t)prog; 273 call_msg.rm_call.cb_vers = (u_int32_t)vers; 274 275 /* 276 * pre-serialize the static part of the call msg and stash it away 277 */ 278 xdrmem_create(&(ct->ct_xdrs), ct->ct_u.ct_mcallc, MCALL_MSG_SIZE, 279 XDR_ENCODE); 280 if (! xdr_callhdr(&(ct->ct_xdrs), &call_msg)) { 281 if (ct->ct_closeit) { 282 (void)_close(fd); 283 } 284 goto err; 285 } 286 ct->ct_mpos = XDR_GETPOS(&(ct->ct_xdrs)); 287 XDR_DESTROY(&(ct->ct_xdrs)); 288 assert(ct->ct_mpos + sizeof(uint32_t) <= MCALL_MSG_SIZE); 289 290 /* 291 * Create a client handle which uses xdrrec for serialization 292 * and authnone for authentication. 293 */ 294 cl->cl_ops = clnt_vc_ops(); 295 cl->cl_private = ct; 296 cl->cl_auth = authnone_create(); 297 sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz); 298 recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz); 299 xdrrec_create(&(ct->ct_xdrs), sendsz, recvsz, 300 cl->cl_private, read_vc, write_vc); 301 return (cl); 302 303 err: 304 if (ct) { 305 if (ct->ct_addr.len) 306 mem_free(ct->ct_addr.buf, ct->ct_addr.len); 307 mem_free(ct, sizeof (struct ct_data)); 308 } 309 if (cl) 310 mem_free(cl, sizeof (CLIENT)); 311 return ((CLIENT *)NULL); 312 } 313 314 static enum clnt_stat 315 clnt_vc_call(cl, proc, xdr_args, args_ptr, xdr_results, results_ptr, timeout) 316 CLIENT *cl; 317 rpcproc_t proc; 318 xdrproc_t xdr_args; 319 void *args_ptr; 320 xdrproc_t xdr_results; 321 void *results_ptr; 322 struct timeval timeout; 323 { 324 struct ct_data *ct = (struct ct_data *) cl->cl_private; 325 XDR *xdrs = &(ct->ct_xdrs); 326 struct rpc_msg reply_msg; 327 u_int32_t x_id; 328 u_int32_t *msg_x_id = &ct->ct_u.ct_mcalli; /* yuk */ 329 bool_t shipnow; 330 int refreshes = 2; 331 sigset_t mask, newmask; 332 int rpc_lock_value; 333 bool_t reply_stat; 334 335 assert(cl != NULL); 336 337 sigfillset(&newmask); 338 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 339 mutex_lock(&clnt_fd_lock); 340 while (vc_fd_locks[ct->ct_fd]) 341 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); 342 if (__isthreaded) 343 rpc_lock_value = 1; 344 else 345 rpc_lock_value = 0; 346 vc_fd_locks[ct->ct_fd] = rpc_lock_value; 347 mutex_unlock(&clnt_fd_lock); 348 if (!ct->ct_waitset) { 349 /* If time is not within limits, we ignore it. */ 350 if (time_not_ok(&timeout) == FALSE) 351 ct->ct_wait = timeout; 352 } 353 354 shipnow = 355 (xdr_results == NULL && timeout.tv_sec == 0 356 && timeout.tv_usec == 0) ? FALSE : TRUE; 357 358 call_again: 359 xdrs->x_op = XDR_ENCODE; 360 ct->ct_error.re_status = RPC_SUCCESS; 361 x_id = ntohl(--(*msg_x_id)); 362 363 if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) { 364 if ((! XDR_PUTBYTES(xdrs, ct->ct_u.ct_mcallc, ct->ct_mpos)) || 365 (! XDR_PUTINT32(xdrs, &proc)) || 366 (! AUTH_MARSHALL(cl->cl_auth, xdrs)) || 367 (! (*xdr_args)(xdrs, args_ptr))) { 368 if (ct->ct_error.re_status == RPC_SUCCESS) 369 ct->ct_error.re_status = RPC_CANTENCODEARGS; 370 (void)xdrrec_endofrecord(xdrs, TRUE); 371 release_fd_lock(ct->ct_fd, mask); 372 return (ct->ct_error.re_status); 373 } 374 } else { 375 *(uint32_t *) &ct->ct_u.ct_mcallc[ct->ct_mpos] = htonl(proc); 376 if (! __rpc_gss_wrap(cl->cl_auth, ct->ct_u.ct_mcallc, 377 ct->ct_mpos + sizeof(uint32_t), 378 xdrs, xdr_args, args_ptr)) { 379 if (ct->ct_error.re_status == RPC_SUCCESS) 380 ct->ct_error.re_status = RPC_CANTENCODEARGS; 381 (void)xdrrec_endofrecord(xdrs, TRUE); 382 release_fd_lock(ct->ct_fd, mask); 383 return (ct->ct_error.re_status); 384 } 385 } 386 if (! xdrrec_endofrecord(xdrs, shipnow)) { 387 release_fd_lock(ct->ct_fd, mask); 388 return (ct->ct_error.re_status = RPC_CANTSEND); 389 } 390 if (! shipnow) { 391 release_fd_lock(ct->ct_fd, mask); 392 return (RPC_SUCCESS); 393 } 394 /* 395 * Hack to provide rpc-based message passing 396 */ 397 if (timeout.tv_sec == 0 && timeout.tv_usec == 0) { 398 release_fd_lock(ct->ct_fd, mask); 399 return(ct->ct_error.re_status = RPC_TIMEDOUT); 400 } 401 402 403 /* 404 * Keep receiving until we get a valid transaction id 405 */ 406 xdrs->x_op = XDR_DECODE; 407 while (TRUE) { 408 reply_msg.acpted_rply.ar_verf = _null_auth; 409 reply_msg.acpted_rply.ar_results.where = NULL; 410 reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void; 411 if (! xdrrec_skiprecord(xdrs)) { 412 release_fd_lock(ct->ct_fd, mask); 413 return (ct->ct_error.re_status); 414 } 415 /* now decode and validate the response header */ 416 if (! xdr_replymsg(xdrs, &reply_msg)) { 417 if (ct->ct_error.re_status == RPC_SUCCESS) 418 continue; 419 release_fd_lock(ct->ct_fd, mask); 420 return (ct->ct_error.re_status); 421 } 422 if (reply_msg.rm_xid == x_id) 423 break; 424 } 425 426 /* 427 * process header 428 */ 429 _seterr_reply(&reply_msg, &(ct->ct_error)); 430 if (ct->ct_error.re_status == RPC_SUCCESS) { 431 if (! AUTH_VALIDATE(cl->cl_auth, 432 &reply_msg.acpted_rply.ar_verf)) { 433 ct->ct_error.re_status = RPC_AUTHERROR; 434 ct->ct_error.re_why = AUTH_INVALIDRESP; 435 } else { 436 if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) { 437 reply_stat = (*xdr_results)(xdrs, results_ptr); 438 } else { 439 reply_stat = __rpc_gss_unwrap(cl->cl_auth, 440 xdrs, xdr_results, results_ptr); 441 } 442 if (! reply_stat) { 443 if (ct->ct_error.re_status == RPC_SUCCESS) 444 ct->ct_error.re_status = 445 RPC_CANTDECODERES; 446 } 447 } 448 /* free verifier ... */ 449 if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) { 450 xdrs->x_op = XDR_FREE; 451 (void)xdr_opaque_auth(xdrs, 452 &(reply_msg.acpted_rply.ar_verf)); 453 } 454 } /* end successful completion */ 455 else { 456 /* maybe our credentials need to be refreshed ... */ 457 if (refreshes-- && AUTH_REFRESH(cl->cl_auth, &reply_msg)) 458 goto call_again; 459 } /* end of unsuccessful completion */ 460 release_fd_lock(ct->ct_fd, mask); 461 return (ct->ct_error.re_status); 462 } 463 464 static void 465 clnt_vc_geterr(cl, errp) 466 CLIENT *cl; 467 struct rpc_err *errp; 468 { 469 struct ct_data *ct; 470 471 assert(cl != NULL); 472 assert(errp != NULL); 473 474 ct = (struct ct_data *) cl->cl_private; 475 *errp = ct->ct_error; 476 } 477 478 static bool_t 479 clnt_vc_freeres(cl, xdr_res, res_ptr) 480 CLIENT *cl; 481 xdrproc_t xdr_res; 482 void *res_ptr; 483 { 484 struct ct_data *ct; 485 XDR *xdrs; 486 bool_t dummy; 487 sigset_t mask; 488 sigset_t newmask; 489 490 assert(cl != NULL); 491 492 ct = (struct ct_data *)cl->cl_private; 493 xdrs = &(ct->ct_xdrs); 494 495 sigfillset(&newmask); 496 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 497 mutex_lock(&clnt_fd_lock); 498 while (vc_fd_locks[ct->ct_fd]) 499 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); 500 xdrs->x_op = XDR_FREE; 501 dummy = (*xdr_res)(xdrs, res_ptr); 502 mutex_unlock(&clnt_fd_lock); 503 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 504 cond_signal(&vc_cv[ct->ct_fd]); 505 506 return dummy; 507 } 508 509 /*ARGSUSED*/ 510 static void 511 clnt_vc_abort(cl) 512 CLIENT *cl; 513 { 514 } 515 516 static bool_t 517 clnt_vc_control(cl, request, info) 518 CLIENT *cl; 519 u_int request; 520 void *info; 521 { 522 struct ct_data *ct; 523 void *infop = info; 524 sigset_t mask; 525 sigset_t newmask; 526 int rpc_lock_value; 527 528 assert(cl != NULL); 529 530 ct = (struct ct_data *)cl->cl_private; 531 532 sigfillset(&newmask); 533 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 534 mutex_lock(&clnt_fd_lock); 535 while (vc_fd_locks[ct->ct_fd]) 536 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); 537 if (__isthreaded) 538 rpc_lock_value = 1; 539 else 540 rpc_lock_value = 0; 541 vc_fd_locks[ct->ct_fd] = rpc_lock_value; 542 mutex_unlock(&clnt_fd_lock); 543 544 switch (request) { 545 case CLSET_FD_CLOSE: 546 ct->ct_closeit = TRUE; 547 release_fd_lock(ct->ct_fd, mask); 548 return (TRUE); 549 case CLSET_FD_NCLOSE: 550 ct->ct_closeit = FALSE; 551 release_fd_lock(ct->ct_fd, mask); 552 return (TRUE); 553 default: 554 break; 555 } 556 557 /* for other requests which use info */ 558 if (info == NULL) { 559 release_fd_lock(ct->ct_fd, mask); 560 return (FALSE); 561 } 562 switch (request) { 563 case CLSET_TIMEOUT: 564 if (time_not_ok((struct timeval *)info)) { 565 release_fd_lock(ct->ct_fd, mask); 566 return (FALSE); 567 } 568 ct->ct_wait = *(struct timeval *)infop; 569 ct->ct_waitset = TRUE; 570 break; 571 case CLGET_TIMEOUT: 572 *(struct timeval *)infop = ct->ct_wait; 573 break; 574 case CLGET_SERVER_ADDR: 575 (void) memcpy(info, ct->ct_addr.buf, (size_t)ct->ct_addr.len); 576 break; 577 case CLGET_FD: 578 *(int *)info = ct->ct_fd; 579 break; 580 case CLGET_SVC_ADDR: 581 /* The caller should not free this memory area */ 582 *(struct netbuf *)info = ct->ct_addr; 583 break; 584 case CLSET_SVC_ADDR: /* set to new address */ 585 release_fd_lock(ct->ct_fd, mask); 586 return (FALSE); 587 case CLGET_XID: 588 /* 589 * use the knowledge that xid is the 590 * first element in the call structure 591 * This will get the xid of the PREVIOUS call 592 */ 593 *(u_int32_t *)info = 594 ntohl(*(u_int32_t *)(void *)&ct->ct_u.ct_mcalli); 595 break; 596 case CLSET_XID: 597 /* This will set the xid of the NEXT call */ 598 *(u_int32_t *)(void *)&ct->ct_u.ct_mcalli = 599 htonl(*((u_int32_t *)info) + 1); 600 /* increment by 1 as clnt_vc_call() decrements once */ 601 break; 602 case CLGET_VERS: 603 /* 604 * This RELIES on the information that, in the call body, 605 * the version number field is the fifth field from the 606 * begining of the RPC header. MUST be changed if the 607 * call_struct is changed 608 */ 609 *(u_int32_t *)info = 610 ntohl(*(u_int32_t *)(void *)(ct->ct_u.ct_mcallc + 611 4 * BYTES_PER_XDR_UNIT)); 612 break; 613 614 case CLSET_VERS: 615 *(u_int32_t *)(void *)(ct->ct_u.ct_mcallc + 616 4 * BYTES_PER_XDR_UNIT) = 617 htonl(*(u_int32_t *)info); 618 break; 619 620 case CLGET_PROG: 621 /* 622 * This RELIES on the information that, in the call body, 623 * the program number field is the fourth field from the 624 * begining of the RPC header. MUST be changed if the 625 * call_struct is changed 626 */ 627 *(u_int32_t *)info = 628 ntohl(*(u_int32_t *)(void *)(ct->ct_u.ct_mcallc + 629 3 * BYTES_PER_XDR_UNIT)); 630 break; 631 632 case CLSET_PROG: 633 *(u_int32_t *)(void *)(ct->ct_u.ct_mcallc + 634 3 * BYTES_PER_XDR_UNIT) = 635 htonl(*(u_int32_t *)info); 636 break; 637 638 default: 639 release_fd_lock(ct->ct_fd, mask); 640 return (FALSE); 641 } 642 release_fd_lock(ct->ct_fd, mask); 643 return (TRUE); 644 } 645 646 647 static void 648 clnt_vc_destroy(cl) 649 CLIENT *cl; 650 { 651 struct ct_data *ct = (struct ct_data *) cl->cl_private; 652 int ct_fd = ct->ct_fd; 653 sigset_t mask; 654 sigset_t newmask; 655 656 assert(cl != NULL); 657 658 ct = (struct ct_data *) cl->cl_private; 659 660 sigfillset(&newmask); 661 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 662 mutex_lock(&clnt_fd_lock); 663 while (vc_fd_locks[ct_fd]) 664 cond_wait(&vc_cv[ct_fd], &clnt_fd_lock); 665 if (ct->ct_closeit && ct->ct_fd != -1) { 666 (void)_close(ct->ct_fd); 667 } 668 XDR_DESTROY(&(ct->ct_xdrs)); 669 if (ct->ct_addr.buf) 670 free(ct->ct_addr.buf); 671 mem_free(ct, sizeof(struct ct_data)); 672 if (cl->cl_netid && cl->cl_netid[0]) 673 mem_free(cl->cl_netid, strlen(cl->cl_netid) +1); 674 if (cl->cl_tp && cl->cl_tp[0]) 675 mem_free(cl->cl_tp, strlen(cl->cl_tp) +1); 676 mem_free(cl, sizeof(CLIENT)); 677 mutex_unlock(&clnt_fd_lock); 678 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 679 cond_signal(&vc_cv[ct_fd]); 680 } 681 682 /* 683 * Interface between xdr serializer and tcp connection. 684 * Behaves like the system calls, read & write, but keeps some error state 685 * around for the rpc level. 686 */ 687 static int 688 read_vc(ctp, buf, len) 689 void *ctp; 690 void *buf; 691 int len; 692 { 693 struct sockaddr sa; 694 socklen_t sal; 695 struct ct_data *ct = (struct ct_data *)ctp; 696 struct pollfd fd; 697 int milliseconds = (int)((ct->ct_wait.tv_sec * 1000) + 698 (ct->ct_wait.tv_usec / 1000)); 699 700 if (len == 0) 701 return (0); 702 fd.fd = ct->ct_fd; 703 fd.events = POLLIN; 704 for (;;) { 705 switch (_poll(&fd, 1, milliseconds)) { 706 case 0: 707 ct->ct_error.re_status = RPC_TIMEDOUT; 708 return (-1); 709 710 case -1: 711 if (errno == EINTR) 712 continue; 713 ct->ct_error.re_status = RPC_CANTRECV; 714 ct->ct_error.re_errno = errno; 715 return (-1); 716 } 717 break; 718 } 719 720 sal = sizeof(sa); 721 if ((_getpeername(ct->ct_fd, &sa, &sal) == 0) && 722 (sa.sa_family == AF_LOCAL)) { 723 len = __msgread(ct->ct_fd, buf, (size_t)len); 724 } else { 725 len = _read(ct->ct_fd, buf, (size_t)len); 726 } 727 728 switch (len) { 729 case 0: 730 /* premature eof */ 731 ct->ct_error.re_errno = ECONNRESET; 732 ct->ct_error.re_status = RPC_CANTRECV; 733 len = -1; /* it's really an error */ 734 break; 735 736 case -1: 737 ct->ct_error.re_errno = errno; 738 ct->ct_error.re_status = RPC_CANTRECV; 739 break; 740 } 741 return (len); 742 } 743 744 static int 745 write_vc(ctp, buf, len) 746 void *ctp; 747 void *buf; 748 int len; 749 { 750 struct sockaddr sa; 751 socklen_t sal; 752 struct ct_data *ct = (struct ct_data *)ctp; 753 int i, cnt; 754 755 sal = sizeof(sa); 756 if ((_getpeername(ct->ct_fd, &sa, &sal) == 0) && 757 (sa.sa_family == AF_LOCAL)) { 758 for (cnt = len; cnt > 0; cnt -= i, buf = (char *)buf + i) { 759 if ((i = __msgwrite(ct->ct_fd, buf, 760 (size_t)cnt)) == -1) { 761 ct->ct_error.re_errno = errno; 762 ct->ct_error.re_status = RPC_CANTSEND; 763 return (-1); 764 } 765 } 766 } else { 767 for (cnt = len; cnt > 0; cnt -= i, buf = (char *)buf + i) { 768 if ((i = _write(ct->ct_fd, buf, (size_t)cnt)) == -1) { 769 ct->ct_error.re_errno = errno; 770 ct->ct_error.re_status = RPC_CANTSEND; 771 return (-1); 772 } 773 } 774 } 775 return (len); 776 } 777 778 static struct clnt_ops * 779 clnt_vc_ops() 780 { 781 static struct clnt_ops ops; 782 sigset_t mask, newmask; 783 784 /* VARIABLES PROTECTED BY ops_lock: ops */ 785 786 sigfillset(&newmask); 787 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 788 mutex_lock(&ops_lock); 789 if (ops.cl_call == NULL) { 790 ops.cl_call = clnt_vc_call; 791 ops.cl_abort = clnt_vc_abort; 792 ops.cl_geterr = clnt_vc_geterr; 793 ops.cl_freeres = clnt_vc_freeres; 794 ops.cl_destroy = clnt_vc_destroy; 795 ops.cl_control = clnt_vc_control; 796 } 797 mutex_unlock(&ops_lock); 798 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 799 return (&ops); 800 } 801 802 /* 803 * Make sure that the time is not garbage. -1 value is disallowed. 804 * Note this is different from time_not_ok in clnt_dg.c 805 */ 806 static bool_t 807 time_not_ok(t) 808 struct timeval *t; 809 { 810 return (t->tv_sec <= -1 || t->tv_sec > 100000000 || 811 t->tv_usec <= -1 || t->tv_usec > 1000000); 812 } 813 814 static int 815 __msgread(sock, buf, cnt) 816 int sock; 817 void *buf; 818 size_t cnt; 819 { 820 struct iovec iov[1]; 821 struct msghdr msg; 822 union { 823 struct cmsghdr cmsg; 824 char control[CMSG_SPACE(sizeof(struct cmsgcred))]; 825 } cm; 826 827 bzero((char *)&cm, sizeof(cm)); 828 iov[0].iov_base = buf; 829 iov[0].iov_len = cnt; 830 831 msg.msg_iov = iov; 832 msg.msg_iovlen = 1; 833 msg.msg_name = NULL; 834 msg.msg_namelen = 0; 835 msg.msg_control = (caddr_t)&cm; 836 msg.msg_controllen = CMSG_SPACE(sizeof(struct cmsgcred)); 837 msg.msg_flags = 0; 838 839 return(_recvmsg(sock, &msg, 0)); 840 } 841 842 static int 843 __msgwrite(sock, buf, cnt) 844 int sock; 845 void *buf; 846 size_t cnt; 847 { 848 struct iovec iov[1]; 849 struct msghdr msg; 850 union { 851 struct cmsghdr cmsg; 852 char control[CMSG_SPACE(sizeof(struct cmsgcred))]; 853 } cm; 854 855 bzero((char *)&cm, sizeof(cm)); 856 iov[0].iov_base = buf; 857 iov[0].iov_len = cnt; 858 859 cm.cmsg.cmsg_type = SCM_CREDS; 860 cm.cmsg.cmsg_level = SOL_SOCKET; 861 cm.cmsg.cmsg_len = CMSG_LEN(sizeof(struct cmsgcred)); 862 863 msg.msg_iov = iov; 864 msg.msg_iovlen = 1; 865 msg.msg_name = NULL; 866 msg.msg_namelen = 0; 867 msg.msg_control = (caddr_t)&cm; 868 msg.msg_controllen = CMSG_SPACE(sizeof(struct cmsgcred)); 869 msg.msg_flags = 0; 870 871 return(_sendmsg(sock, &msg, 0)); 872 } 873