1 /* $NetBSD: clnt_vc.c,v 1.4 2000/07/14 08:40:42 fvdl Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-3-Clause 5 * 6 * Copyright (c) 2009, Sun Microsystems, Inc. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions are met: 11 * - Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * - Neither the name of Sun Microsystems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #if defined(LIBC_SCCS) && !defined(lint) 34 static char *sccsid2 = "@(#)clnt_tcp.c 1.37 87/10/05 Copyr 1984 Sun Micro"; 35 static char *sccsid = "@(#)clnt_tcp.c 2.2 88/08/01 4.0 RPCSRC"; 36 static char sccsid3[] = "@(#)clnt_vc.c 1.19 89/03/16 Copyr 1988 Sun Micro"; 37 #endif 38 /* 39 * clnt_tcp.c, Implements a TCP/IP based, client side RPC. 40 * 41 * Copyright (C) 1984, Sun Microsystems, Inc. 42 * 43 * TCP based RPC supports 'batched calls'. 44 * A sequence of calls may be batched-up in a send buffer. The rpc call 45 * return immediately to the client even though the call was not necessarily 46 * sent. The batching occurs if the results' xdr routine is NULL (0) AND 47 * the rpc timeout value is zero (see clnt.h, rpc). 48 * 49 * Clients should NOT casually batch calls that in fact return results; that is, 50 * the server side should be aware that a call is batched and not produce any 51 * return message. Batched calls that produce many result messages can 52 * deadlock (netlock) the client and the server.... 53 * 54 * Now go hang yourself. 55 */ 56 57 #include "namespace.h" 58 #include "reentrant.h" 59 #include <sys/types.h> 60 #include <sys/poll.h> 61 #include <sys/syslog.h> 62 #include <sys/socket.h> 63 #include <sys/un.h> 64 #include <sys/uio.h> 65 66 #include <arpa/inet.h> 67 #include <assert.h> 68 #include <err.h> 69 #include <errno.h> 70 #include <netdb.h> 71 #include <stdio.h> 72 #include <stdlib.h> 73 #include <string.h> 74 #include <unistd.h> 75 #include <signal.h> 76 77 #include <rpc/rpc.h> 78 #include <rpc/rpcsec_gss.h> 79 #include "un-namespace.h" 80 #include "rpc_com.h" 81 #include "mt_misc.h" 82 83 #define MCALL_MSG_SIZE 24 84 85 struct cmessage { 86 struct cmsghdr cmsg; 87 struct cmsgcred cmcred; 88 }; 89 90 static enum clnt_stat clnt_vc_call(CLIENT *, rpcproc_t, xdrproc_t, void *, 91 xdrproc_t, void *, struct timeval); 92 static void clnt_vc_geterr(CLIENT *, struct rpc_err *); 93 static bool_t clnt_vc_freeres(CLIENT *, xdrproc_t, void *); 94 static void clnt_vc_abort(CLIENT *); 95 static bool_t clnt_vc_control(CLIENT *, u_int, void *); 96 static void clnt_vc_destroy(CLIENT *); 97 static struct clnt_ops *clnt_vc_ops(void); 98 static bool_t time_not_ok(struct timeval *); 99 static int read_vc(void *, void *, int); 100 static int write_vc(void *, void *, int); 101 static int __msgwrite(int, void *, size_t); 102 static int __msgread(int, void *, size_t); 103 104 struct ct_data { 105 int ct_fd; /* connection's fd */ 106 bool_t ct_closeit; /* close it on destroy */ 107 struct timeval ct_wait; /* wait interval in milliseconds */ 108 bool_t ct_waitset; /* wait set by clnt_control? */ 109 struct netbuf ct_addr; /* remote addr */ 110 struct rpc_err ct_error; 111 union { 112 char ct_mcallc[MCALL_MSG_SIZE]; /* marshalled callmsg */ 113 u_int32_t ct_mcalli; 114 } ct_u; 115 u_int ct_mpos; /* pos after marshal */ 116 XDR ct_xdrs; /* XDR stream */ 117 }; 118 119 /* 120 * This machinery implements per-fd locks for MT-safety. It is not 121 * sufficient to do per-CLIENT handle locks for MT-safety because a 122 * user may create more than one CLIENT handle with the same fd behind 123 * it. Therefore, we allocate an array of flags (vc_fd_locks), protected 124 * by the clnt_fd_lock mutex, and an array (vc_cv) of condition variables 125 * similarly protected. Vc_fd_lock[fd] == 1 => a call is active on some 126 * CLIENT handle created for that fd. 127 * The current implementation holds locks across the entire RPC and reply. 128 * Yes, this is silly, and as soon as this code is proven to work, this 129 * should be the first thing fixed. One step at a time. 130 */ 131 static int *vc_fd_locks; 132 static cond_t *vc_cv; 133 #define release_fd_lock(fd, mask) { \ 134 mutex_lock(&clnt_fd_lock); \ 135 vc_fd_locks[fd] = 0; \ 136 mutex_unlock(&clnt_fd_lock); \ 137 thr_sigsetmask(SIG_SETMASK, &(mask), (sigset_t *) NULL); \ 138 cond_signal(&vc_cv[fd]); \ 139 } 140 141 static const char clnt_vc_errstr[] = "%s : %s"; 142 static const char clnt_vc_str[] = "clnt_vc_create"; 143 static const char __no_mem_str[] = "out of memory"; 144 145 /* 146 * Create a client handle for a connection. 147 * Default options are set, which the user can change using clnt_control()'s. 148 * The rpc/vc package does buffering similar to stdio, so the client 149 * must pick send and receive buffer sizes, 0 => use the default. 150 * NB: fd is copied into a private area. 151 * NB: The rpch->cl_auth is set null authentication. Caller may wish to 152 * set this something more useful. 153 * 154 * fd should be an open socket 155 * 156 * fd - open file descriptor 157 * raddr - servers address 158 * prog - program number 159 * vers - version number 160 * sendsz - buffer send size 161 * recvsz - buffer recv size 162 */ 163 CLIENT * 164 clnt_vc_create(int fd, const struct netbuf *raddr, const rpcprog_t prog, 165 const rpcvers_t vers, u_int sendsz, u_int recvsz) 166 { 167 CLIENT *cl; /* client handle */ 168 struct ct_data *ct = NULL; /* client handle */ 169 struct timeval now; 170 struct rpc_msg call_msg; 171 static u_int32_t disrupt; 172 sigset_t mask; 173 sigset_t newmask; 174 struct sockaddr_storage ss; 175 socklen_t slen; 176 struct __rpc_sockinfo si; 177 178 if (disrupt == 0) 179 disrupt = (u_int32_t)(long)raddr; 180 181 cl = (CLIENT *)mem_alloc(sizeof (*cl)); 182 ct = (struct ct_data *)mem_alloc(sizeof (*ct)); 183 if ((cl == (CLIENT *)NULL) || (ct == (struct ct_data *)NULL)) { 184 (void) syslog(LOG_ERR, clnt_vc_errstr, 185 clnt_vc_str, __no_mem_str); 186 rpc_createerr.cf_stat = RPC_SYSTEMERROR; 187 rpc_createerr.cf_error.re_errno = errno; 188 goto err; 189 } 190 ct->ct_addr.buf = NULL; 191 sigfillset(&newmask); 192 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 193 mutex_lock(&clnt_fd_lock); 194 if (vc_fd_locks == (int *) NULL) { 195 int cv_allocsz, fd_allocsz; 196 int dtbsize = __rpc_dtbsize(); 197 198 fd_allocsz = dtbsize * sizeof (int); 199 vc_fd_locks = (int *) mem_alloc(fd_allocsz); 200 if (vc_fd_locks == (int *) NULL) { 201 mutex_unlock(&clnt_fd_lock); 202 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 203 goto err; 204 } else 205 memset(vc_fd_locks, '\0', fd_allocsz); 206 207 assert(vc_cv == (cond_t *) NULL); 208 cv_allocsz = dtbsize * sizeof (cond_t); 209 vc_cv = (cond_t *) mem_alloc(cv_allocsz); 210 if (vc_cv == (cond_t *) NULL) { 211 mem_free(vc_fd_locks, fd_allocsz); 212 vc_fd_locks = (int *) NULL; 213 mutex_unlock(&clnt_fd_lock); 214 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 215 goto err; 216 } else { 217 int i; 218 219 for (i = 0; i < dtbsize; i++) 220 cond_init(&vc_cv[i], 0, (void *) 0); 221 } 222 } else 223 assert(vc_cv != (cond_t *) NULL); 224 225 /* 226 * XXX - fvdl connecting while holding a mutex? 227 */ 228 slen = sizeof ss; 229 if (_getpeername(fd, (struct sockaddr *)(void *)&ss, &slen) < 0) { 230 if (errno != ENOTCONN) { 231 rpc_createerr.cf_stat = RPC_SYSTEMERROR; 232 rpc_createerr.cf_error.re_errno = errno; 233 mutex_unlock(&clnt_fd_lock); 234 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 235 goto err; 236 } 237 if (_connect(fd, (struct sockaddr *)raddr->buf, raddr->len) < 0){ 238 rpc_createerr.cf_stat = RPC_SYSTEMERROR; 239 rpc_createerr.cf_error.re_errno = errno; 240 mutex_unlock(&clnt_fd_lock); 241 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 242 goto err; 243 } 244 } 245 mutex_unlock(&clnt_fd_lock); 246 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 247 if (!__rpc_fd2sockinfo(fd, &si)) 248 goto err; 249 250 ct->ct_closeit = FALSE; 251 252 /* 253 * Set up private data struct 254 */ 255 ct->ct_fd = fd; 256 ct->ct_wait.tv_usec = 0; 257 ct->ct_waitset = FALSE; 258 ct->ct_addr.buf = malloc(raddr->maxlen); 259 if (ct->ct_addr.buf == NULL) 260 goto err; 261 memcpy(ct->ct_addr.buf, raddr->buf, raddr->len); 262 ct->ct_addr.len = raddr->len; 263 ct->ct_addr.maxlen = raddr->maxlen; 264 265 /* 266 * Initialize call message 267 */ 268 (void)gettimeofday(&now, NULL); 269 call_msg.rm_xid = ((u_int32_t)++disrupt) ^ __RPC_GETXID(&now); 270 call_msg.rm_direction = CALL; 271 call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION; 272 call_msg.rm_call.cb_prog = (u_int32_t)prog; 273 call_msg.rm_call.cb_vers = (u_int32_t)vers; 274 275 /* 276 * pre-serialize the static part of the call msg and stash it away 277 */ 278 xdrmem_create(&(ct->ct_xdrs), ct->ct_u.ct_mcallc, MCALL_MSG_SIZE, 279 XDR_ENCODE); 280 if (! xdr_callhdr(&(ct->ct_xdrs), &call_msg)) { 281 if (ct->ct_closeit) { 282 (void)_close(fd); 283 } 284 goto err; 285 } 286 ct->ct_mpos = XDR_GETPOS(&(ct->ct_xdrs)); 287 XDR_DESTROY(&(ct->ct_xdrs)); 288 assert(ct->ct_mpos + sizeof(uint32_t) <= MCALL_MSG_SIZE); 289 290 /* 291 * Create a client handle which uses xdrrec for serialization 292 * and authnone for authentication. 293 */ 294 cl->cl_ops = clnt_vc_ops(); 295 cl->cl_private = ct; 296 cl->cl_auth = authnone_create(); 297 sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz); 298 recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz); 299 xdrrec_create(&(ct->ct_xdrs), sendsz, recvsz, 300 cl->cl_private, read_vc, write_vc); 301 return (cl); 302 303 err: 304 if (ct) { 305 if (ct->ct_addr.len) 306 mem_free(ct->ct_addr.buf, ct->ct_addr.len); 307 mem_free(ct, sizeof (struct ct_data)); 308 } 309 if (cl) 310 mem_free(cl, sizeof (CLIENT)); 311 return ((CLIENT *)NULL); 312 } 313 314 static enum clnt_stat 315 clnt_vc_call(CLIENT *cl, rpcproc_t proc, xdrproc_t xdr_args, void *args_ptr, 316 xdrproc_t xdr_results, void *results_ptr, struct timeval timeout) 317 { 318 struct ct_data *ct = (struct ct_data *) cl->cl_private; 319 XDR *xdrs = &(ct->ct_xdrs); 320 struct rpc_msg reply_msg; 321 u_int32_t x_id; 322 u_int32_t *msg_x_id = &ct->ct_u.ct_mcalli; /* yuk */ 323 bool_t shipnow; 324 int refreshes = 2; 325 sigset_t mask, newmask; 326 int rpc_lock_value; 327 bool_t reply_stat; 328 329 assert(cl != NULL); 330 331 sigfillset(&newmask); 332 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 333 mutex_lock(&clnt_fd_lock); 334 while (vc_fd_locks[ct->ct_fd]) 335 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); 336 if (__isthreaded) 337 rpc_lock_value = 1; 338 else 339 rpc_lock_value = 0; 340 vc_fd_locks[ct->ct_fd] = rpc_lock_value; 341 mutex_unlock(&clnt_fd_lock); 342 if (!ct->ct_waitset) { 343 /* If time is not within limits, we ignore it. */ 344 if (time_not_ok(&timeout) == FALSE) 345 ct->ct_wait = timeout; 346 } 347 348 shipnow = 349 (xdr_results == NULL && timeout.tv_sec == 0 350 && timeout.tv_usec == 0) ? FALSE : TRUE; 351 352 call_again: 353 xdrs->x_op = XDR_ENCODE; 354 ct->ct_error.re_status = RPC_SUCCESS; 355 x_id = ntohl(--(*msg_x_id)); 356 357 if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) { 358 if ((! XDR_PUTBYTES(xdrs, ct->ct_u.ct_mcallc, ct->ct_mpos)) || 359 (! XDR_PUTINT32(xdrs, &proc)) || 360 (! AUTH_MARSHALL(cl->cl_auth, xdrs)) || 361 (! (*xdr_args)(xdrs, args_ptr))) { 362 if (ct->ct_error.re_status == RPC_SUCCESS) 363 ct->ct_error.re_status = RPC_CANTENCODEARGS; 364 (void)xdrrec_endofrecord(xdrs, TRUE); 365 release_fd_lock(ct->ct_fd, mask); 366 return (ct->ct_error.re_status); 367 } 368 } else { 369 *(uint32_t *) &ct->ct_u.ct_mcallc[ct->ct_mpos] = htonl(proc); 370 if (! __rpc_gss_wrap(cl->cl_auth, ct->ct_u.ct_mcallc, 371 ct->ct_mpos + sizeof(uint32_t), 372 xdrs, xdr_args, args_ptr)) { 373 if (ct->ct_error.re_status == RPC_SUCCESS) 374 ct->ct_error.re_status = RPC_CANTENCODEARGS; 375 (void)xdrrec_endofrecord(xdrs, TRUE); 376 release_fd_lock(ct->ct_fd, mask); 377 return (ct->ct_error.re_status); 378 } 379 } 380 if (! xdrrec_endofrecord(xdrs, shipnow)) { 381 release_fd_lock(ct->ct_fd, mask); 382 return (ct->ct_error.re_status = RPC_CANTSEND); 383 } 384 if (! shipnow) { 385 release_fd_lock(ct->ct_fd, mask); 386 return (RPC_SUCCESS); 387 } 388 /* 389 * Hack to provide rpc-based message passing 390 */ 391 if (timeout.tv_sec == 0 && timeout.tv_usec == 0) { 392 release_fd_lock(ct->ct_fd, mask); 393 return(ct->ct_error.re_status = RPC_TIMEDOUT); 394 } 395 396 397 /* 398 * Keep receiving until we get a valid transaction id 399 */ 400 xdrs->x_op = XDR_DECODE; 401 while (TRUE) { 402 reply_msg.acpted_rply.ar_verf = _null_auth; 403 reply_msg.acpted_rply.ar_results.where = NULL; 404 reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void; 405 if (! xdrrec_skiprecord(xdrs)) { 406 release_fd_lock(ct->ct_fd, mask); 407 return (ct->ct_error.re_status); 408 } 409 /* now decode and validate the response header */ 410 if (! xdr_replymsg(xdrs, &reply_msg)) { 411 if (ct->ct_error.re_status == RPC_SUCCESS) 412 continue; 413 release_fd_lock(ct->ct_fd, mask); 414 return (ct->ct_error.re_status); 415 } 416 if (reply_msg.rm_xid == x_id) 417 break; 418 } 419 420 /* 421 * process header 422 */ 423 _seterr_reply(&reply_msg, &(ct->ct_error)); 424 if (ct->ct_error.re_status == RPC_SUCCESS) { 425 if (! AUTH_VALIDATE(cl->cl_auth, 426 &reply_msg.acpted_rply.ar_verf)) { 427 ct->ct_error.re_status = RPC_AUTHERROR; 428 ct->ct_error.re_why = AUTH_INVALIDRESP; 429 } else { 430 if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) { 431 reply_stat = (*xdr_results)(xdrs, results_ptr); 432 } else { 433 reply_stat = __rpc_gss_unwrap(cl->cl_auth, 434 xdrs, xdr_results, results_ptr); 435 } 436 if (! reply_stat) { 437 if (ct->ct_error.re_status == RPC_SUCCESS) 438 ct->ct_error.re_status = 439 RPC_CANTDECODERES; 440 } 441 } 442 /* free verifier ... */ 443 if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) { 444 xdrs->x_op = XDR_FREE; 445 (void)xdr_opaque_auth(xdrs, 446 &(reply_msg.acpted_rply.ar_verf)); 447 } 448 } /* end successful completion */ 449 else { 450 /* maybe our credentials need to be refreshed ... */ 451 if (refreshes-- && AUTH_REFRESH(cl->cl_auth, &reply_msg)) 452 goto call_again; 453 } /* end of unsuccessful completion */ 454 release_fd_lock(ct->ct_fd, mask); 455 return (ct->ct_error.re_status); 456 } 457 458 static void 459 clnt_vc_geterr(CLIENT *cl, struct rpc_err *errp) 460 { 461 struct ct_data *ct; 462 463 assert(cl != NULL); 464 assert(errp != NULL); 465 466 ct = (struct ct_data *) cl->cl_private; 467 *errp = ct->ct_error; 468 } 469 470 static bool_t 471 clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr) 472 { 473 struct ct_data *ct; 474 XDR *xdrs; 475 bool_t dummy; 476 sigset_t mask; 477 sigset_t newmask; 478 479 assert(cl != NULL); 480 481 ct = (struct ct_data *)cl->cl_private; 482 xdrs = &(ct->ct_xdrs); 483 484 sigfillset(&newmask); 485 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 486 mutex_lock(&clnt_fd_lock); 487 while (vc_fd_locks[ct->ct_fd]) 488 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); 489 xdrs->x_op = XDR_FREE; 490 dummy = (*xdr_res)(xdrs, res_ptr); 491 mutex_unlock(&clnt_fd_lock); 492 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 493 cond_signal(&vc_cv[ct->ct_fd]); 494 495 return dummy; 496 } 497 498 /*ARGSUSED*/ 499 static void 500 clnt_vc_abort(CLIENT *cl) 501 { 502 } 503 504 static __inline void 505 htonlp(void *dst, const void *src, uint32_t incr) 506 { 507 /* We are aligned, so we think */ 508 *(uint32_t *)dst = htonl(*(const uint32_t *)src + incr); 509 } 510 511 static __inline void 512 ntohlp(void *dst, const void *src) 513 { 514 /* We are aligned, so we think */ 515 *(uint32_t *)dst = htonl(*(const uint32_t *)src); 516 } 517 518 static bool_t 519 clnt_vc_control(CLIENT *cl, u_int request, void *info) 520 { 521 struct ct_data *ct; 522 void *infop = info; 523 sigset_t mask; 524 sigset_t newmask; 525 int rpc_lock_value; 526 527 assert(cl != NULL); 528 529 ct = (struct ct_data *)cl->cl_private; 530 531 sigfillset(&newmask); 532 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 533 mutex_lock(&clnt_fd_lock); 534 while (vc_fd_locks[ct->ct_fd]) 535 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); 536 if (__isthreaded) 537 rpc_lock_value = 1; 538 else 539 rpc_lock_value = 0; 540 vc_fd_locks[ct->ct_fd] = rpc_lock_value; 541 mutex_unlock(&clnt_fd_lock); 542 543 switch (request) { 544 case CLSET_FD_CLOSE: 545 ct->ct_closeit = TRUE; 546 release_fd_lock(ct->ct_fd, mask); 547 return (TRUE); 548 case CLSET_FD_NCLOSE: 549 ct->ct_closeit = FALSE; 550 release_fd_lock(ct->ct_fd, mask); 551 return (TRUE); 552 default: 553 break; 554 } 555 556 /* for other requests which use info */ 557 if (info == NULL) { 558 release_fd_lock(ct->ct_fd, mask); 559 return (FALSE); 560 } 561 switch (request) { 562 case CLSET_TIMEOUT: 563 if (time_not_ok((struct timeval *)info)) { 564 release_fd_lock(ct->ct_fd, mask); 565 return (FALSE); 566 } 567 ct->ct_wait = *(struct timeval *)infop; 568 ct->ct_waitset = TRUE; 569 break; 570 case CLGET_TIMEOUT: 571 *(struct timeval *)infop = ct->ct_wait; 572 break; 573 case CLGET_SERVER_ADDR: 574 (void) memcpy(info, ct->ct_addr.buf, (size_t)ct->ct_addr.len); 575 break; 576 case CLGET_FD: 577 *(int *)info = ct->ct_fd; 578 break; 579 case CLGET_SVC_ADDR: 580 /* The caller should not free this memory area */ 581 *(struct netbuf *)info = ct->ct_addr; 582 break; 583 case CLSET_SVC_ADDR: /* set to new address */ 584 release_fd_lock(ct->ct_fd, mask); 585 return (FALSE); 586 case CLGET_XID: 587 /* 588 * use the knowledge that xid is the 589 * first element in the call structure 590 * This will get the xid of the PREVIOUS call 591 */ 592 ntohlp(info, &ct->ct_u.ct_mcalli); 593 break; 594 case CLSET_XID: 595 /* This will set the xid of the NEXT call */ 596 /* increment by 1 as clnt_vc_call() decrements once */ 597 htonlp(&ct->ct_u.ct_mcalli, info, 1); 598 break; 599 case CLGET_VERS: 600 /* 601 * This RELIES on the information that, in the call body, 602 * the version number field is the fifth field from the 603 * beginning of the RPC header. MUST be changed if the 604 * call_struct is changed 605 */ 606 ntohlp(info, ct->ct_u.ct_mcallc + 4 * BYTES_PER_XDR_UNIT); 607 break; 608 609 case CLSET_VERS: 610 htonlp(ct->ct_u.ct_mcallc + 4 * BYTES_PER_XDR_UNIT, info, 0); 611 break; 612 613 case CLGET_PROG: 614 /* 615 * This RELIES on the information that, in the call body, 616 * the program number field is the fourth field from the 617 * beginning of the RPC header. MUST be changed if the 618 * call_struct is changed 619 */ 620 ntohlp(info, ct->ct_u.ct_mcallc + 3 * BYTES_PER_XDR_UNIT); 621 break; 622 623 case CLSET_PROG: 624 htonlp(ct->ct_u.ct_mcallc + 3 * BYTES_PER_XDR_UNIT, info, 0); 625 break; 626 627 default: 628 release_fd_lock(ct->ct_fd, mask); 629 return (FALSE); 630 } 631 release_fd_lock(ct->ct_fd, mask); 632 return (TRUE); 633 } 634 635 636 static void 637 clnt_vc_destroy(CLIENT *cl) 638 { 639 struct ct_data *ct = (struct ct_data *) cl->cl_private; 640 int ct_fd = ct->ct_fd; 641 sigset_t mask; 642 sigset_t newmask; 643 644 assert(cl != NULL); 645 646 ct = (struct ct_data *) cl->cl_private; 647 648 sigfillset(&newmask); 649 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 650 mutex_lock(&clnt_fd_lock); 651 while (vc_fd_locks[ct_fd]) 652 cond_wait(&vc_cv[ct_fd], &clnt_fd_lock); 653 if (ct->ct_closeit && ct->ct_fd != -1) { 654 (void)_close(ct->ct_fd); 655 } 656 XDR_DESTROY(&(ct->ct_xdrs)); 657 free(ct->ct_addr.buf); 658 mem_free(ct, sizeof(struct ct_data)); 659 if (cl->cl_netid && cl->cl_netid[0]) 660 mem_free(cl->cl_netid, strlen(cl->cl_netid) +1); 661 if (cl->cl_tp && cl->cl_tp[0]) 662 mem_free(cl->cl_tp, strlen(cl->cl_tp) +1); 663 mem_free(cl, sizeof(CLIENT)); 664 mutex_unlock(&clnt_fd_lock); 665 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 666 cond_signal(&vc_cv[ct_fd]); 667 } 668 669 /* 670 * Interface between xdr serializer and tcp connection. 671 * Behaves like the system calls, read & write, but keeps some error state 672 * around for the rpc level. 673 */ 674 static int 675 read_vc(void *ctp, void *buf, int len) 676 { 677 struct sockaddr sa; 678 socklen_t sal; 679 struct ct_data *ct = (struct ct_data *)ctp; 680 struct pollfd fd; 681 int milliseconds = (int)((ct->ct_wait.tv_sec * 1000) + 682 (ct->ct_wait.tv_usec / 1000)); 683 684 if (len == 0) 685 return (0); 686 fd.fd = ct->ct_fd; 687 fd.events = POLLIN; 688 for (;;) { 689 switch (_poll(&fd, 1, milliseconds)) { 690 case 0: 691 ct->ct_error.re_status = RPC_TIMEDOUT; 692 return (-1); 693 694 case -1: 695 if (errno == EINTR) 696 continue; 697 ct->ct_error.re_status = RPC_CANTRECV; 698 ct->ct_error.re_errno = errno; 699 return (-1); 700 } 701 break; 702 } 703 704 sal = sizeof(sa); 705 if ((_getpeername(ct->ct_fd, &sa, &sal) == 0) && 706 (sa.sa_family == AF_LOCAL)) { 707 len = __msgread(ct->ct_fd, buf, (size_t)len); 708 } else { 709 len = _read(ct->ct_fd, buf, (size_t)len); 710 } 711 712 switch (len) { 713 case 0: 714 /* premature eof */ 715 ct->ct_error.re_errno = ECONNRESET; 716 ct->ct_error.re_status = RPC_CANTRECV; 717 len = -1; /* it's really an error */ 718 break; 719 720 case -1: 721 ct->ct_error.re_errno = errno; 722 ct->ct_error.re_status = RPC_CANTRECV; 723 break; 724 } 725 return (len); 726 } 727 728 static int 729 write_vc(void *ctp, void *buf, int len) 730 { 731 struct sockaddr sa; 732 socklen_t sal; 733 struct ct_data *ct = (struct ct_data *)ctp; 734 int i, cnt; 735 736 sal = sizeof(sa); 737 if ((_getpeername(ct->ct_fd, &sa, &sal) == 0) && 738 (sa.sa_family == AF_LOCAL)) { 739 for (cnt = len; cnt > 0; cnt -= i, buf = (char *)buf + i) { 740 if ((i = __msgwrite(ct->ct_fd, buf, 741 (size_t)cnt)) == -1) { 742 ct->ct_error.re_errno = errno; 743 ct->ct_error.re_status = RPC_CANTSEND; 744 return (-1); 745 } 746 } 747 } else { 748 for (cnt = len; cnt > 0; cnt -= i, buf = (char *)buf + i) { 749 if ((i = _write(ct->ct_fd, buf, (size_t)cnt)) == -1) { 750 ct->ct_error.re_errno = errno; 751 ct->ct_error.re_status = RPC_CANTSEND; 752 return (-1); 753 } 754 } 755 } 756 return (len); 757 } 758 759 static struct clnt_ops * 760 clnt_vc_ops(void) 761 { 762 static struct clnt_ops ops; 763 sigset_t mask, newmask; 764 765 /* VARIABLES PROTECTED BY ops_lock: ops */ 766 767 sigfillset(&newmask); 768 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 769 mutex_lock(&ops_lock); 770 if (ops.cl_call == NULL) { 771 ops.cl_call = clnt_vc_call; 772 ops.cl_abort = clnt_vc_abort; 773 ops.cl_geterr = clnt_vc_geterr; 774 ops.cl_freeres = clnt_vc_freeres; 775 ops.cl_destroy = clnt_vc_destroy; 776 ops.cl_control = clnt_vc_control; 777 } 778 mutex_unlock(&ops_lock); 779 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 780 return (&ops); 781 } 782 783 /* 784 * Make sure that the time is not garbage. -1 value is disallowed. 785 * Note this is different from time_not_ok in clnt_dg.c 786 */ 787 static bool_t 788 time_not_ok(struct timeval *t) 789 { 790 return (t->tv_sec <= -1 || t->tv_sec > 100000000 || 791 t->tv_usec <= -1 || t->tv_usec > 1000000); 792 } 793 794 static int 795 __msgread(int sock, void *buf, size_t cnt) 796 { 797 struct iovec iov[1]; 798 struct msghdr msg; 799 union { 800 struct cmsghdr cmsg; 801 char control[CMSG_SPACE(sizeof(struct cmsgcred))]; 802 } cm; 803 804 bzero((char *)&cm, sizeof(cm)); 805 iov[0].iov_base = buf; 806 iov[0].iov_len = cnt; 807 808 msg.msg_iov = iov; 809 msg.msg_iovlen = 1; 810 msg.msg_name = NULL; 811 msg.msg_namelen = 0; 812 msg.msg_control = (caddr_t)&cm; 813 msg.msg_controllen = CMSG_SPACE(sizeof(struct cmsgcred)); 814 msg.msg_flags = 0; 815 816 return(_recvmsg(sock, &msg, 0)); 817 } 818 819 static int 820 __msgwrite(int sock, void *buf, size_t cnt) 821 { 822 struct iovec iov[1]; 823 struct msghdr msg; 824 union { 825 struct cmsghdr cmsg; 826 char control[CMSG_SPACE(sizeof(struct cmsgcred))]; 827 } cm; 828 829 bzero((char *)&cm, sizeof(cm)); 830 iov[0].iov_base = buf; 831 iov[0].iov_len = cnt; 832 833 cm.cmsg.cmsg_type = SCM_CREDS; 834 cm.cmsg.cmsg_level = SOL_SOCKET; 835 cm.cmsg.cmsg_len = CMSG_LEN(sizeof(struct cmsgcred)); 836 837 msg.msg_iov = iov; 838 msg.msg_iovlen = 1; 839 msg.msg_name = NULL; 840 msg.msg_namelen = 0; 841 msg.msg_control = (caddr_t)&cm; 842 msg.msg_controllen = CMSG_SPACE(sizeof(struct cmsgcred)); 843 msg.msg_flags = 0; 844 845 return(_sendmsg(sock, &msg, 0)); 846 } 847