1 /* $NetBSD: clnt_vc.c,v 1.4 2000/07/14 08:40:42 fvdl Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-3-Clause 5 * 6 * Copyright (c) 2009, Sun Microsystems, Inc. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions are met: 11 * - Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * - Neither the name of Sun Microsystems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #if defined(LIBC_SCCS) && !defined(lint) 34 static char *sccsid2 = "@(#)clnt_tcp.c 1.37 87/10/05 Copyr 1984 Sun Micro"; 35 static char *sccsid = "@(#)clnt_tcp.c 2.2 88/08/01 4.0 RPCSRC"; 36 static char sccsid3[] = "@(#)clnt_vc.c 1.19 89/03/16 Copyr 1988 Sun Micro"; 37 #endif 38 #include <sys/cdefs.h> 39 /* 40 * clnt_tcp.c, Implements a TCP/IP based, client side RPC. 41 * 42 * Copyright (C) 1984, Sun Microsystems, Inc. 43 * 44 * TCP based RPC supports 'batched calls'. 45 * A sequence of calls may be batched-up in a send buffer. The rpc call 46 * return immediately to the client even though the call was not necessarily 47 * sent. The batching occurs if the results' xdr routine is NULL (0) AND 48 * the rpc timeout value is zero (see clnt.h, rpc). 49 * 50 * Clients should NOT casually batch calls that in fact return results; that is, 51 * the server side should be aware that a call is batched and not produce any 52 * return message. Batched calls that produce many result messages can 53 * deadlock (netlock) the client and the server.... 54 * 55 * Now go hang yourself. 56 */ 57 58 #include "namespace.h" 59 #include "reentrant.h" 60 #include <sys/types.h> 61 #include <sys/poll.h> 62 #include <sys/syslog.h> 63 #include <sys/socket.h> 64 #include <sys/un.h> 65 #include <sys/uio.h> 66 67 #include <arpa/inet.h> 68 #include <assert.h> 69 #include <err.h> 70 #include <errno.h> 71 #include <netdb.h> 72 #include <stdio.h> 73 #include <stdlib.h> 74 #include <string.h> 75 #include <unistd.h> 76 #include <signal.h> 77 78 #include <rpc/rpc.h> 79 #include <rpc/rpcsec_gss.h> 80 #include "un-namespace.h" 81 #include "rpc_com.h" 82 #include "mt_misc.h" 83 84 #define MCALL_MSG_SIZE 24 85 86 struct cmessage { 87 struct cmsghdr cmsg; 88 struct cmsgcred cmcred; 89 }; 90 91 static enum clnt_stat clnt_vc_call(CLIENT *, rpcproc_t, xdrproc_t, void *, 92 xdrproc_t, void *, struct timeval); 93 static void clnt_vc_geterr(CLIENT *, struct rpc_err *); 94 static bool_t clnt_vc_freeres(CLIENT *, xdrproc_t, void *); 95 static void clnt_vc_abort(CLIENT *); 96 static bool_t clnt_vc_control(CLIENT *, u_int, void *); 97 static void clnt_vc_destroy(CLIENT *); 98 static struct clnt_ops *clnt_vc_ops(void); 99 static bool_t time_not_ok(struct timeval *); 100 static int read_vc(void *, void *, int); 101 static int write_vc(void *, void *, int); 102 static int __msgwrite(int, void *, size_t); 103 static int __msgread(int, void *, size_t); 104 105 struct ct_data { 106 int ct_fd; /* connection's fd */ 107 bool_t ct_closeit; /* close it on destroy */ 108 struct timeval ct_wait; /* wait interval in milliseconds */ 109 bool_t ct_waitset; /* wait set by clnt_control? */ 110 struct netbuf ct_addr; /* remote addr */ 111 struct rpc_err ct_error; 112 union { 113 char ct_mcallc[MCALL_MSG_SIZE]; /* marshalled callmsg */ 114 u_int32_t ct_mcalli; 115 } ct_u; 116 u_int ct_mpos; /* pos after marshal */ 117 XDR ct_xdrs; /* XDR stream */ 118 }; 119 120 /* 121 * This machinery implements per-fd locks for MT-safety. It is not 122 * sufficient to do per-CLIENT handle locks for MT-safety because a 123 * user may create more than one CLIENT handle with the same fd behind 124 * it. Therefore, we allocate an array of flags (vc_fd_locks), protected 125 * by the clnt_fd_lock mutex, and an array (vc_cv) of condition variables 126 * similarly protected. Vc_fd_lock[fd] == 1 => a call is active on some 127 * CLIENT handle created for that fd. 128 * The current implementation holds locks across the entire RPC and reply. 129 * Yes, this is silly, and as soon as this code is proven to work, this 130 * should be the first thing fixed. One step at a time. 131 */ 132 static int *vc_fd_locks; 133 static cond_t *vc_cv; 134 #define release_fd_lock(fd, mask) { \ 135 mutex_lock(&clnt_fd_lock); \ 136 vc_fd_locks[fd] = 0; \ 137 mutex_unlock(&clnt_fd_lock); \ 138 thr_sigsetmask(SIG_SETMASK, &(mask), (sigset_t *) NULL); \ 139 cond_signal(&vc_cv[fd]); \ 140 } 141 142 static const char clnt_vc_errstr[] = "%s : %s"; 143 static const char clnt_vc_str[] = "clnt_vc_create"; 144 static const char __no_mem_str[] = "out of memory"; 145 146 /* 147 * Create a client handle for a connection. 148 * Default options are set, which the user can change using clnt_control()'s. 149 * The rpc/vc package does buffering similar to stdio, so the client 150 * must pick send and receive buffer sizes, 0 => use the default. 151 * NB: fd is copied into a private area. 152 * NB: The rpch->cl_auth is set null authentication. Caller may wish to 153 * set this something more useful. 154 * 155 * fd should be an open socket 156 * 157 * fd - open file descriptor 158 * raddr - servers address 159 * prog - program number 160 * vers - version number 161 * sendsz - buffer send size 162 * recvsz - buffer recv size 163 */ 164 CLIENT * 165 clnt_vc_create(int fd, const struct netbuf *raddr, const rpcprog_t prog, 166 const rpcvers_t vers, u_int sendsz, u_int recvsz) 167 { 168 CLIENT *cl; /* client handle */ 169 struct ct_data *ct = NULL; /* client handle */ 170 struct timeval now; 171 struct rpc_msg call_msg; 172 static u_int32_t disrupt; 173 sigset_t mask; 174 sigset_t newmask; 175 struct sockaddr_storage ss; 176 socklen_t slen; 177 struct __rpc_sockinfo si; 178 179 if (disrupt == 0) 180 disrupt = (u_int32_t)(long)raddr; 181 182 cl = (CLIENT *)mem_alloc(sizeof (*cl)); 183 ct = (struct ct_data *)mem_alloc(sizeof (*ct)); 184 if ((cl == (CLIENT *)NULL) || (ct == (struct ct_data *)NULL)) { 185 (void) syslog(LOG_ERR, clnt_vc_errstr, 186 clnt_vc_str, __no_mem_str); 187 rpc_createerr.cf_stat = RPC_SYSTEMERROR; 188 rpc_createerr.cf_error.re_errno = errno; 189 goto err; 190 } 191 ct->ct_addr.buf = NULL; 192 sigfillset(&newmask); 193 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 194 mutex_lock(&clnt_fd_lock); 195 if (vc_fd_locks == (int *) NULL) { 196 int cv_allocsz, fd_allocsz; 197 int dtbsize = __rpc_dtbsize(); 198 199 fd_allocsz = dtbsize * sizeof (int); 200 vc_fd_locks = (int *) mem_alloc(fd_allocsz); 201 if (vc_fd_locks == (int *) NULL) { 202 mutex_unlock(&clnt_fd_lock); 203 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 204 goto err; 205 } else 206 memset(vc_fd_locks, '\0', fd_allocsz); 207 208 assert(vc_cv == (cond_t *) NULL); 209 cv_allocsz = dtbsize * sizeof (cond_t); 210 vc_cv = (cond_t *) mem_alloc(cv_allocsz); 211 if (vc_cv == (cond_t *) NULL) { 212 mem_free(vc_fd_locks, fd_allocsz); 213 vc_fd_locks = (int *) NULL; 214 mutex_unlock(&clnt_fd_lock); 215 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 216 goto err; 217 } else { 218 int i; 219 220 for (i = 0; i < dtbsize; i++) 221 cond_init(&vc_cv[i], 0, (void *) 0); 222 } 223 } else 224 assert(vc_cv != (cond_t *) NULL); 225 226 /* 227 * XXX - fvdl connecting while holding a mutex? 228 */ 229 slen = sizeof ss; 230 if (_getpeername(fd, (struct sockaddr *)(void *)&ss, &slen) < 0) { 231 if (errno != ENOTCONN) { 232 rpc_createerr.cf_stat = RPC_SYSTEMERROR; 233 rpc_createerr.cf_error.re_errno = errno; 234 mutex_unlock(&clnt_fd_lock); 235 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 236 goto err; 237 } 238 if (_connect(fd, (struct sockaddr *)raddr->buf, raddr->len) < 0){ 239 rpc_createerr.cf_stat = RPC_SYSTEMERROR; 240 rpc_createerr.cf_error.re_errno = errno; 241 mutex_unlock(&clnt_fd_lock); 242 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 243 goto err; 244 } 245 } 246 mutex_unlock(&clnt_fd_lock); 247 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 248 if (!__rpc_fd2sockinfo(fd, &si)) 249 goto err; 250 251 ct->ct_closeit = FALSE; 252 253 /* 254 * Set up private data struct 255 */ 256 ct->ct_fd = fd; 257 ct->ct_wait.tv_usec = 0; 258 ct->ct_waitset = FALSE; 259 ct->ct_addr.buf = malloc(raddr->maxlen); 260 if (ct->ct_addr.buf == NULL) 261 goto err; 262 memcpy(ct->ct_addr.buf, raddr->buf, raddr->len); 263 ct->ct_addr.len = raddr->len; 264 ct->ct_addr.maxlen = raddr->maxlen; 265 266 /* 267 * Initialize call message 268 */ 269 (void)gettimeofday(&now, NULL); 270 call_msg.rm_xid = ((u_int32_t)++disrupt) ^ __RPC_GETXID(&now); 271 call_msg.rm_direction = CALL; 272 call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION; 273 call_msg.rm_call.cb_prog = (u_int32_t)prog; 274 call_msg.rm_call.cb_vers = (u_int32_t)vers; 275 276 /* 277 * pre-serialize the static part of the call msg and stash it away 278 */ 279 xdrmem_create(&(ct->ct_xdrs), ct->ct_u.ct_mcallc, MCALL_MSG_SIZE, 280 XDR_ENCODE); 281 if (! xdr_callhdr(&(ct->ct_xdrs), &call_msg)) { 282 if (ct->ct_closeit) { 283 (void)_close(fd); 284 } 285 goto err; 286 } 287 ct->ct_mpos = XDR_GETPOS(&(ct->ct_xdrs)); 288 XDR_DESTROY(&(ct->ct_xdrs)); 289 assert(ct->ct_mpos + sizeof(uint32_t) <= MCALL_MSG_SIZE); 290 291 /* 292 * Create a client handle which uses xdrrec for serialization 293 * and authnone for authentication. 294 */ 295 cl->cl_ops = clnt_vc_ops(); 296 cl->cl_private = ct; 297 cl->cl_auth = authnone_create(); 298 sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz); 299 recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz); 300 xdrrec_create(&(ct->ct_xdrs), sendsz, recvsz, 301 cl->cl_private, read_vc, write_vc); 302 return (cl); 303 304 err: 305 if (ct) { 306 if (ct->ct_addr.len) 307 mem_free(ct->ct_addr.buf, ct->ct_addr.len); 308 mem_free(ct, sizeof (struct ct_data)); 309 } 310 if (cl) 311 mem_free(cl, sizeof (CLIENT)); 312 return ((CLIENT *)NULL); 313 } 314 315 static enum clnt_stat 316 clnt_vc_call(CLIENT *cl, rpcproc_t proc, xdrproc_t xdr_args, void *args_ptr, 317 xdrproc_t xdr_results, void *results_ptr, struct timeval timeout) 318 { 319 struct ct_data *ct = (struct ct_data *) cl->cl_private; 320 XDR *xdrs = &(ct->ct_xdrs); 321 struct rpc_msg reply_msg; 322 u_int32_t x_id; 323 u_int32_t *msg_x_id = &ct->ct_u.ct_mcalli; /* yuk */ 324 bool_t shipnow; 325 int refreshes = 2; 326 sigset_t mask, newmask; 327 int rpc_lock_value; 328 bool_t reply_stat; 329 330 assert(cl != NULL); 331 332 sigfillset(&newmask); 333 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 334 mutex_lock(&clnt_fd_lock); 335 while (vc_fd_locks[ct->ct_fd]) 336 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); 337 if (__isthreaded) 338 rpc_lock_value = 1; 339 else 340 rpc_lock_value = 0; 341 vc_fd_locks[ct->ct_fd] = rpc_lock_value; 342 mutex_unlock(&clnt_fd_lock); 343 if (!ct->ct_waitset) { 344 /* If time is not within limits, we ignore it. */ 345 if (time_not_ok(&timeout) == FALSE) 346 ct->ct_wait = timeout; 347 } 348 349 shipnow = 350 (xdr_results == NULL && timeout.tv_sec == 0 351 && timeout.tv_usec == 0) ? FALSE : TRUE; 352 353 call_again: 354 xdrs->x_op = XDR_ENCODE; 355 ct->ct_error.re_status = RPC_SUCCESS; 356 x_id = ntohl(--(*msg_x_id)); 357 358 if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) { 359 if ((! XDR_PUTBYTES(xdrs, ct->ct_u.ct_mcallc, ct->ct_mpos)) || 360 (! XDR_PUTINT32(xdrs, &proc)) || 361 (! AUTH_MARSHALL(cl->cl_auth, xdrs)) || 362 (! (*xdr_args)(xdrs, args_ptr))) { 363 if (ct->ct_error.re_status == RPC_SUCCESS) 364 ct->ct_error.re_status = RPC_CANTENCODEARGS; 365 (void)xdrrec_endofrecord(xdrs, TRUE); 366 release_fd_lock(ct->ct_fd, mask); 367 return (ct->ct_error.re_status); 368 } 369 } else { 370 *(uint32_t *) &ct->ct_u.ct_mcallc[ct->ct_mpos] = htonl(proc); 371 if (! __rpc_gss_wrap(cl->cl_auth, ct->ct_u.ct_mcallc, 372 ct->ct_mpos + sizeof(uint32_t), 373 xdrs, xdr_args, args_ptr)) { 374 if (ct->ct_error.re_status == RPC_SUCCESS) 375 ct->ct_error.re_status = RPC_CANTENCODEARGS; 376 (void)xdrrec_endofrecord(xdrs, TRUE); 377 release_fd_lock(ct->ct_fd, mask); 378 return (ct->ct_error.re_status); 379 } 380 } 381 if (! xdrrec_endofrecord(xdrs, shipnow)) { 382 release_fd_lock(ct->ct_fd, mask); 383 return (ct->ct_error.re_status = RPC_CANTSEND); 384 } 385 if (! shipnow) { 386 release_fd_lock(ct->ct_fd, mask); 387 return (RPC_SUCCESS); 388 } 389 /* 390 * Hack to provide rpc-based message passing 391 */ 392 if (timeout.tv_sec == 0 && timeout.tv_usec == 0) { 393 release_fd_lock(ct->ct_fd, mask); 394 return(ct->ct_error.re_status = RPC_TIMEDOUT); 395 } 396 397 398 /* 399 * Keep receiving until we get a valid transaction id 400 */ 401 xdrs->x_op = XDR_DECODE; 402 while (TRUE) { 403 reply_msg.acpted_rply.ar_verf = _null_auth; 404 reply_msg.acpted_rply.ar_results.where = NULL; 405 reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void; 406 if (! xdrrec_skiprecord(xdrs)) { 407 release_fd_lock(ct->ct_fd, mask); 408 return (ct->ct_error.re_status); 409 } 410 /* now decode and validate the response header */ 411 if (! xdr_replymsg(xdrs, &reply_msg)) { 412 if (ct->ct_error.re_status == RPC_SUCCESS) 413 continue; 414 release_fd_lock(ct->ct_fd, mask); 415 return (ct->ct_error.re_status); 416 } 417 if (reply_msg.rm_xid == x_id) 418 break; 419 } 420 421 /* 422 * process header 423 */ 424 _seterr_reply(&reply_msg, &(ct->ct_error)); 425 if (ct->ct_error.re_status == RPC_SUCCESS) { 426 if (! AUTH_VALIDATE(cl->cl_auth, 427 &reply_msg.acpted_rply.ar_verf)) { 428 ct->ct_error.re_status = RPC_AUTHERROR; 429 ct->ct_error.re_why = AUTH_INVALIDRESP; 430 } else { 431 if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) { 432 reply_stat = (*xdr_results)(xdrs, results_ptr); 433 } else { 434 reply_stat = __rpc_gss_unwrap(cl->cl_auth, 435 xdrs, xdr_results, results_ptr); 436 } 437 if (! reply_stat) { 438 if (ct->ct_error.re_status == RPC_SUCCESS) 439 ct->ct_error.re_status = 440 RPC_CANTDECODERES; 441 } 442 } 443 /* free verifier ... */ 444 if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) { 445 xdrs->x_op = XDR_FREE; 446 (void)xdr_opaque_auth(xdrs, 447 &(reply_msg.acpted_rply.ar_verf)); 448 } 449 } /* end successful completion */ 450 else { 451 /* maybe our credentials need to be refreshed ... */ 452 if (refreshes-- && AUTH_REFRESH(cl->cl_auth, &reply_msg)) 453 goto call_again; 454 } /* end of unsuccessful completion */ 455 release_fd_lock(ct->ct_fd, mask); 456 return (ct->ct_error.re_status); 457 } 458 459 static void 460 clnt_vc_geterr(CLIENT *cl, struct rpc_err *errp) 461 { 462 struct ct_data *ct; 463 464 assert(cl != NULL); 465 assert(errp != NULL); 466 467 ct = (struct ct_data *) cl->cl_private; 468 *errp = ct->ct_error; 469 } 470 471 static bool_t 472 clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr) 473 { 474 struct ct_data *ct; 475 XDR *xdrs; 476 bool_t dummy; 477 sigset_t mask; 478 sigset_t newmask; 479 480 assert(cl != NULL); 481 482 ct = (struct ct_data *)cl->cl_private; 483 xdrs = &(ct->ct_xdrs); 484 485 sigfillset(&newmask); 486 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 487 mutex_lock(&clnt_fd_lock); 488 while (vc_fd_locks[ct->ct_fd]) 489 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); 490 xdrs->x_op = XDR_FREE; 491 dummy = (*xdr_res)(xdrs, res_ptr); 492 mutex_unlock(&clnt_fd_lock); 493 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 494 cond_signal(&vc_cv[ct->ct_fd]); 495 496 return dummy; 497 } 498 499 /*ARGSUSED*/ 500 static void 501 clnt_vc_abort(CLIENT *cl) 502 { 503 } 504 505 static __inline void 506 htonlp(void *dst, const void *src, uint32_t incr) 507 { 508 /* We are aligned, so we think */ 509 *(uint32_t *)dst = htonl(*(const uint32_t *)src + incr); 510 } 511 512 static __inline void 513 ntohlp(void *dst, const void *src) 514 { 515 /* We are aligned, so we think */ 516 *(uint32_t *)dst = htonl(*(const uint32_t *)src); 517 } 518 519 static bool_t 520 clnt_vc_control(CLIENT *cl, u_int request, void *info) 521 { 522 struct ct_data *ct; 523 void *infop = info; 524 sigset_t mask; 525 sigset_t newmask; 526 int rpc_lock_value; 527 528 assert(cl != NULL); 529 530 ct = (struct ct_data *)cl->cl_private; 531 532 sigfillset(&newmask); 533 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 534 mutex_lock(&clnt_fd_lock); 535 while (vc_fd_locks[ct->ct_fd]) 536 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); 537 if (__isthreaded) 538 rpc_lock_value = 1; 539 else 540 rpc_lock_value = 0; 541 vc_fd_locks[ct->ct_fd] = rpc_lock_value; 542 mutex_unlock(&clnt_fd_lock); 543 544 switch (request) { 545 case CLSET_FD_CLOSE: 546 ct->ct_closeit = TRUE; 547 release_fd_lock(ct->ct_fd, mask); 548 return (TRUE); 549 case CLSET_FD_NCLOSE: 550 ct->ct_closeit = FALSE; 551 release_fd_lock(ct->ct_fd, mask); 552 return (TRUE); 553 default: 554 break; 555 } 556 557 /* for other requests which use info */ 558 if (info == NULL) { 559 release_fd_lock(ct->ct_fd, mask); 560 return (FALSE); 561 } 562 switch (request) { 563 case CLSET_TIMEOUT: 564 if (time_not_ok((struct timeval *)info)) { 565 release_fd_lock(ct->ct_fd, mask); 566 return (FALSE); 567 } 568 ct->ct_wait = *(struct timeval *)infop; 569 ct->ct_waitset = TRUE; 570 break; 571 case CLGET_TIMEOUT: 572 *(struct timeval *)infop = ct->ct_wait; 573 break; 574 case CLGET_SERVER_ADDR: 575 (void) memcpy(info, ct->ct_addr.buf, (size_t)ct->ct_addr.len); 576 break; 577 case CLGET_FD: 578 *(int *)info = ct->ct_fd; 579 break; 580 case CLGET_SVC_ADDR: 581 /* The caller should not free this memory area */ 582 *(struct netbuf *)info = ct->ct_addr; 583 break; 584 case CLSET_SVC_ADDR: /* set to new address */ 585 release_fd_lock(ct->ct_fd, mask); 586 return (FALSE); 587 case CLGET_XID: 588 /* 589 * use the knowledge that xid is the 590 * first element in the call structure 591 * This will get the xid of the PREVIOUS call 592 */ 593 ntohlp(info, &ct->ct_u.ct_mcalli); 594 break; 595 case CLSET_XID: 596 /* This will set the xid of the NEXT call */ 597 /* increment by 1 as clnt_vc_call() decrements once */ 598 htonlp(&ct->ct_u.ct_mcalli, info, 1); 599 break; 600 case CLGET_VERS: 601 /* 602 * This RELIES on the information that, in the call body, 603 * the version number field is the fifth field from the 604 * beginning of the RPC header. MUST be changed if the 605 * call_struct is changed 606 */ 607 ntohlp(info, ct->ct_u.ct_mcallc + 4 * BYTES_PER_XDR_UNIT); 608 break; 609 610 case CLSET_VERS: 611 htonlp(ct->ct_u.ct_mcallc + 4 * BYTES_PER_XDR_UNIT, info, 0); 612 break; 613 614 case CLGET_PROG: 615 /* 616 * This RELIES on the information that, in the call body, 617 * the program number field is the fourth field from the 618 * beginning of the RPC header. MUST be changed if the 619 * call_struct is changed 620 */ 621 ntohlp(info, ct->ct_u.ct_mcallc + 3 * BYTES_PER_XDR_UNIT); 622 break; 623 624 case CLSET_PROG: 625 htonlp(ct->ct_u.ct_mcallc + 3 * BYTES_PER_XDR_UNIT, info, 0); 626 break; 627 628 default: 629 release_fd_lock(ct->ct_fd, mask); 630 return (FALSE); 631 } 632 release_fd_lock(ct->ct_fd, mask); 633 return (TRUE); 634 } 635 636 637 static void 638 clnt_vc_destroy(CLIENT *cl) 639 { 640 struct ct_data *ct = (struct ct_data *) cl->cl_private; 641 int ct_fd = ct->ct_fd; 642 sigset_t mask; 643 sigset_t newmask; 644 645 assert(cl != NULL); 646 647 ct = (struct ct_data *) cl->cl_private; 648 649 sigfillset(&newmask); 650 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 651 mutex_lock(&clnt_fd_lock); 652 while (vc_fd_locks[ct_fd]) 653 cond_wait(&vc_cv[ct_fd], &clnt_fd_lock); 654 if (ct->ct_closeit && ct->ct_fd != -1) { 655 (void)_close(ct->ct_fd); 656 } 657 XDR_DESTROY(&(ct->ct_xdrs)); 658 free(ct->ct_addr.buf); 659 mem_free(ct, sizeof(struct ct_data)); 660 if (cl->cl_netid && cl->cl_netid[0]) 661 mem_free(cl->cl_netid, strlen(cl->cl_netid) +1); 662 if (cl->cl_tp && cl->cl_tp[0]) 663 mem_free(cl->cl_tp, strlen(cl->cl_tp) +1); 664 mem_free(cl, sizeof(CLIENT)); 665 mutex_unlock(&clnt_fd_lock); 666 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 667 cond_signal(&vc_cv[ct_fd]); 668 } 669 670 /* 671 * Interface between xdr serializer and tcp connection. 672 * Behaves like the system calls, read & write, but keeps some error state 673 * around for the rpc level. 674 */ 675 static int 676 read_vc(void *ctp, void *buf, int len) 677 { 678 struct sockaddr sa; 679 socklen_t sal; 680 struct ct_data *ct = (struct ct_data *)ctp; 681 struct pollfd fd; 682 int milliseconds = (int)((ct->ct_wait.tv_sec * 1000) + 683 (ct->ct_wait.tv_usec / 1000)); 684 685 if (len == 0) 686 return (0); 687 fd.fd = ct->ct_fd; 688 fd.events = POLLIN; 689 for (;;) { 690 switch (_poll(&fd, 1, milliseconds)) { 691 case 0: 692 ct->ct_error.re_status = RPC_TIMEDOUT; 693 return (-1); 694 695 case -1: 696 if (errno == EINTR) 697 continue; 698 ct->ct_error.re_status = RPC_CANTRECV; 699 ct->ct_error.re_errno = errno; 700 return (-1); 701 } 702 break; 703 } 704 705 sal = sizeof(sa); 706 if ((_getpeername(ct->ct_fd, &sa, &sal) == 0) && 707 (sa.sa_family == AF_LOCAL)) { 708 len = __msgread(ct->ct_fd, buf, (size_t)len); 709 } else { 710 len = _read(ct->ct_fd, buf, (size_t)len); 711 } 712 713 switch (len) { 714 case 0: 715 /* premature eof */ 716 ct->ct_error.re_errno = ECONNRESET; 717 ct->ct_error.re_status = RPC_CANTRECV; 718 len = -1; /* it's really an error */ 719 break; 720 721 case -1: 722 ct->ct_error.re_errno = errno; 723 ct->ct_error.re_status = RPC_CANTRECV; 724 break; 725 } 726 return (len); 727 } 728 729 static int 730 write_vc(void *ctp, void *buf, int len) 731 { 732 struct sockaddr sa; 733 socklen_t sal; 734 struct ct_data *ct = (struct ct_data *)ctp; 735 int i, cnt; 736 737 sal = sizeof(sa); 738 if ((_getpeername(ct->ct_fd, &sa, &sal) == 0) && 739 (sa.sa_family == AF_LOCAL)) { 740 for (cnt = len; cnt > 0; cnt -= i, buf = (char *)buf + i) { 741 if ((i = __msgwrite(ct->ct_fd, buf, 742 (size_t)cnt)) == -1) { 743 ct->ct_error.re_errno = errno; 744 ct->ct_error.re_status = RPC_CANTSEND; 745 return (-1); 746 } 747 } 748 } else { 749 for (cnt = len; cnt > 0; cnt -= i, buf = (char *)buf + i) { 750 if ((i = _write(ct->ct_fd, buf, (size_t)cnt)) == -1) { 751 ct->ct_error.re_errno = errno; 752 ct->ct_error.re_status = RPC_CANTSEND; 753 return (-1); 754 } 755 } 756 } 757 return (len); 758 } 759 760 static struct clnt_ops * 761 clnt_vc_ops(void) 762 { 763 static struct clnt_ops ops; 764 sigset_t mask, newmask; 765 766 /* VARIABLES PROTECTED BY ops_lock: ops */ 767 768 sigfillset(&newmask); 769 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 770 mutex_lock(&ops_lock); 771 if (ops.cl_call == NULL) { 772 ops.cl_call = clnt_vc_call; 773 ops.cl_abort = clnt_vc_abort; 774 ops.cl_geterr = clnt_vc_geterr; 775 ops.cl_freeres = clnt_vc_freeres; 776 ops.cl_destroy = clnt_vc_destroy; 777 ops.cl_control = clnt_vc_control; 778 } 779 mutex_unlock(&ops_lock); 780 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 781 return (&ops); 782 } 783 784 /* 785 * Make sure that the time is not garbage. -1 value is disallowed. 786 * Note this is different from time_not_ok in clnt_dg.c 787 */ 788 static bool_t 789 time_not_ok(struct timeval *t) 790 { 791 return (t->tv_sec <= -1 || t->tv_sec > 100000000 || 792 t->tv_usec <= -1 || t->tv_usec > 1000000); 793 } 794 795 static int 796 __msgread(int sock, void *buf, size_t cnt) 797 { 798 struct iovec iov[1]; 799 struct msghdr msg; 800 union { 801 struct cmsghdr cmsg; 802 char control[CMSG_SPACE(sizeof(struct cmsgcred))]; 803 } cm; 804 805 bzero((char *)&cm, sizeof(cm)); 806 iov[0].iov_base = buf; 807 iov[0].iov_len = cnt; 808 809 msg.msg_iov = iov; 810 msg.msg_iovlen = 1; 811 msg.msg_name = NULL; 812 msg.msg_namelen = 0; 813 msg.msg_control = (caddr_t)&cm; 814 msg.msg_controllen = CMSG_SPACE(sizeof(struct cmsgcred)); 815 msg.msg_flags = 0; 816 817 return(_recvmsg(sock, &msg, 0)); 818 } 819 820 static int 821 __msgwrite(int sock, void *buf, size_t cnt) 822 { 823 struct iovec iov[1]; 824 struct msghdr msg; 825 union { 826 struct cmsghdr cmsg; 827 char control[CMSG_SPACE(sizeof(struct cmsgcred))]; 828 } cm; 829 830 bzero((char *)&cm, sizeof(cm)); 831 iov[0].iov_base = buf; 832 iov[0].iov_len = cnt; 833 834 cm.cmsg.cmsg_type = SCM_CREDS; 835 cm.cmsg.cmsg_level = SOL_SOCKET; 836 cm.cmsg.cmsg_len = CMSG_LEN(sizeof(struct cmsgcred)); 837 838 msg.msg_iov = iov; 839 msg.msg_iovlen = 1; 840 msg.msg_name = NULL; 841 msg.msg_namelen = 0; 842 msg.msg_control = (caddr_t)&cm; 843 msg.msg_controllen = CMSG_SPACE(sizeof(struct cmsgcred)); 844 msg.msg_flags = 0; 845 846 return(_sendmsg(sock, &msg, 0)); 847 } 848