1 /* $NetBSD: clnt_dg.c,v 1.4 2000/07/14 08:40:41 fvdl Exp $ */ 2 3 /* 4 * Sun RPC is a product of Sun Microsystems, Inc. and is provided for 5 * unrestricted use provided that this legend is included on all tape 6 * media and as a part of the software program in whole or part. Users 7 * may copy or modify Sun RPC without charge, but are not authorized 8 * to license or distribute it to anyone else except as part of a product or 9 * program developed by the user. 10 * 11 * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE 12 * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR 13 * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE. 14 * 15 * Sun RPC is provided with no support and without any obligation on the 16 * part of Sun Microsystems, Inc. to assist in its use, correction, 17 * modification or enhancement. 18 * 19 * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE 20 * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC 21 * OR ANY PART THEREOF. 22 * 23 * In no event will Sun Microsystems, Inc. be liable for any lost revenue 24 * or profits or other special, indirect and consequential damages, even if 25 * Sun has been advised of the possibility of such damages. 26 * 27 * Sun Microsystems, Inc. 28 * 2550 Garcia Avenue 29 * Mountain View, California 94043 30 */ 31 /* 32 * Copyright (c) 1986-1991 by Sun Microsystems Inc. 33 */ 34 35 /* #ident "@(#)clnt_dg.c 1.23 94/04/22 SMI" */ 36 37 #if !defined(lint) && defined(SCCSIDS) 38 static char sccsid[] = "@(#)clnt_dg.c 1.19 89/03/16 Copyr 1988 Sun Micro"; 39 #endif 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 /* 44 * Implements a connectionless client side RPC. 45 */ 46 47 #include "namespace.h" 48 #include "reentrant.h" 49 #include <sys/types.h> 50 #include <sys/event.h> 51 #include <sys/time.h> 52 #include <sys/socket.h> 53 #include <sys/ioctl.h> 54 #include <arpa/inet.h> 55 #include <rpc/rpc.h> 56 #include <errno.h> 57 #include <stdlib.h> 58 #include <string.h> 59 #include <signal.h> 60 #include <unistd.h> 61 #include <err.h> 62 #include "un-namespace.h" 63 #include "rpc_com.h" 64 65 66 #define RPC_MAX_BACKOFF 30 /* seconds */ 67 68 69 static struct clnt_ops *clnt_dg_ops(void); 70 static bool_t time_not_ok(struct timeval *); 71 static enum clnt_stat clnt_dg_call(CLIENT *, rpcproc_t, xdrproc_t, void *, 72 xdrproc_t, void *, struct timeval); 73 static void clnt_dg_geterr(CLIENT *, struct rpc_err *); 74 static bool_t clnt_dg_freeres(CLIENT *, xdrproc_t, void *); 75 static void clnt_dg_abort(CLIENT *); 76 static bool_t clnt_dg_control(CLIENT *, u_int, void *); 77 static void clnt_dg_destroy(CLIENT *); 78 79 80 81 82 /* 83 * This machinery implements per-fd locks for MT-safety. It is not 84 * sufficient to do per-CLIENT handle locks for MT-safety because a 85 * user may create more than one CLIENT handle with the same fd behind 86 * it. Therfore, we allocate an array of flags (dg_fd_locks), protected 87 * by the clnt_fd_lock mutex, and an array (dg_cv) of condition variables 88 * similarly protected. Dg_fd_lock[fd] == 1 => a call is activte on some 89 * CLIENT handle created for that fd. 90 * The current implementation holds locks across the entire RPC and reply, 91 * including retransmissions. Yes, this is silly, and as soon as this 92 * code is proven to work, this should be the first thing fixed. One step 93 * at a time. 94 */ 95 static int *dg_fd_locks; 96 extern mutex_t clnt_fd_lock; 97 static cond_t *dg_cv; 98 #define release_fd_lock(fd, mask) { \ 99 mutex_lock(&clnt_fd_lock); \ 100 dg_fd_locks[fd] = 0; \ 101 mutex_unlock(&clnt_fd_lock); \ 102 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); \ 103 cond_signal(&dg_cv[fd]); \ 104 } 105 106 static const char mem_err_clnt_dg[] = "clnt_dg_create: out of memory"; 107 108 /* VARIABLES PROTECTED BY clnt_fd_lock: dg_fd_locks, dg_cv */ 109 110 /* 111 * Private data kept per client handle 112 */ 113 struct cu_data { 114 int cu_fd; /* connections fd */ 115 bool_t cu_closeit; /* opened by library */ 116 struct sockaddr_storage cu_raddr; /* remote address */ 117 int cu_rlen; 118 struct timeval cu_wait; /* retransmit interval */ 119 struct timeval cu_total; /* total time for the call */ 120 struct rpc_err cu_error; 121 XDR cu_outxdrs; 122 u_int cu_xdrpos; 123 u_int cu_sendsz; /* send size */ 124 char *cu_outbuf; 125 u_int cu_recvsz; /* recv size */ 126 int cu_async; 127 int cu_connect; /* Use connect(). */ 128 int cu_connected; /* Have done connect(). */ 129 struct kevent cu_kin; 130 int cu_kq; 131 char cu_inbuf[1]; 132 }; 133 134 /* 135 * Connection less client creation returns with client handle parameters. 136 * Default options are set, which the user can change using clnt_control(). 137 * fd should be open and bound. 138 * NB: The rpch->cl_auth is initialized to null authentication. 139 * Caller may wish to set this something more useful. 140 * 141 * sendsz and recvsz are the maximum allowable packet sizes that can be 142 * sent and received. Normally they are the same, but they can be 143 * changed to improve the program efficiency and buffer allocation. 144 * If they are 0, use the transport default. 145 * 146 * If svcaddr is NULL, returns NULL. 147 */ 148 CLIENT * 149 clnt_dg_create(fd, svcaddr, program, version, sendsz, recvsz) 150 int fd; /* open file descriptor */ 151 const struct netbuf *svcaddr; /* servers address */ 152 rpcprog_t program; /* program number */ 153 rpcvers_t version; /* version number */ 154 u_int sendsz; /* buffer recv size */ 155 u_int recvsz; /* buffer send size */ 156 { 157 CLIENT *cl = NULL; /* client handle */ 158 struct cu_data *cu = NULL; /* private data */ 159 struct timeval now; 160 struct rpc_msg call_msg; 161 sigset_t mask; 162 sigset_t newmask; 163 struct __rpc_sockinfo si; 164 int one = 1; 165 166 sigfillset(&newmask); 167 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 168 mutex_lock(&clnt_fd_lock); 169 if (dg_fd_locks == (int *) NULL) { 170 int cv_allocsz; 171 size_t fd_allocsz; 172 int dtbsize = __rpc_dtbsize(); 173 174 fd_allocsz = dtbsize * sizeof (int); 175 dg_fd_locks = (int *) mem_alloc(fd_allocsz); 176 if (dg_fd_locks == (int *) NULL) { 177 mutex_unlock(&clnt_fd_lock); 178 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 179 goto err1; 180 } else 181 memset(dg_fd_locks, '\0', fd_allocsz); 182 183 cv_allocsz = dtbsize * sizeof (cond_t); 184 dg_cv = (cond_t *) mem_alloc(cv_allocsz); 185 if (dg_cv == (cond_t *) NULL) { 186 mem_free(dg_fd_locks, fd_allocsz); 187 dg_fd_locks = (int *) NULL; 188 mutex_unlock(&clnt_fd_lock); 189 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 190 goto err1; 191 } else { 192 int i; 193 194 for (i = 0; i < dtbsize; i++) 195 cond_init(&dg_cv[i], 0, (void *) 0); 196 } 197 } 198 199 mutex_unlock(&clnt_fd_lock); 200 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 201 202 if (svcaddr == NULL) { 203 rpc_createerr.cf_stat = RPC_UNKNOWNADDR; 204 return (NULL); 205 } 206 207 if (!__rpc_fd2sockinfo(fd, &si)) { 208 rpc_createerr.cf_stat = RPC_TLIERROR; 209 rpc_createerr.cf_error.re_errno = 0; 210 return (NULL); 211 } 212 /* 213 * Find the receive and the send size 214 */ 215 sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz); 216 recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz); 217 if ((sendsz == 0) || (recvsz == 0)) { 218 rpc_createerr.cf_stat = RPC_TLIERROR; /* XXX */ 219 rpc_createerr.cf_error.re_errno = 0; 220 return (NULL); 221 } 222 223 if ((cl = mem_alloc(sizeof (CLIENT))) == NULL) 224 goto err1; 225 /* 226 * Should be multiple of 4 for XDR. 227 */ 228 sendsz = ((sendsz + 3) / 4) * 4; 229 recvsz = ((recvsz + 3) / 4) * 4; 230 cu = mem_alloc(sizeof (*cu) + sendsz + recvsz); 231 if (cu == NULL) 232 goto err1; 233 (void) memcpy(&cu->cu_raddr, svcaddr->buf, (size_t)svcaddr->len); 234 cu->cu_rlen = svcaddr->len; 235 cu->cu_outbuf = &cu->cu_inbuf[recvsz]; 236 /* Other values can also be set through clnt_control() */ 237 cu->cu_wait.tv_sec = 15; /* heuristically chosen */ 238 cu->cu_wait.tv_usec = 0; 239 cu->cu_total.tv_sec = -1; 240 cu->cu_total.tv_usec = -1; 241 cu->cu_sendsz = sendsz; 242 cu->cu_recvsz = recvsz; 243 cu->cu_async = FALSE; 244 cu->cu_connect = FALSE; 245 cu->cu_connected = FALSE; 246 (void) gettimeofday(&now, NULL); 247 call_msg.rm_xid = __RPC_GETXID(&now); 248 call_msg.rm_call.cb_prog = program; 249 call_msg.rm_call.cb_vers = version; 250 xdrmem_create(&(cu->cu_outxdrs), cu->cu_outbuf, sendsz, XDR_ENCODE); 251 if (! xdr_callhdr(&(cu->cu_outxdrs), &call_msg)) { 252 rpc_createerr.cf_stat = RPC_CANTENCODEARGS; /* XXX */ 253 rpc_createerr.cf_error.re_errno = 0; 254 goto err2; 255 } 256 cu->cu_xdrpos = XDR_GETPOS(&(cu->cu_outxdrs)); 257 258 /* XXX fvdl - do we still want this? */ 259 #if 0 260 (void)bindresvport_sa(fd, (struct sockaddr *)svcaddr->buf); 261 #endif 262 _ioctl(fd, FIONBIO, (char *)(void *)&one); 263 264 /* 265 * By default, closeit is always FALSE. It is users responsibility 266 * to do a close on it, else the user may use clnt_control 267 * to let clnt_destroy do it for him/her. 268 */ 269 cu->cu_closeit = FALSE; 270 cu->cu_fd = fd; 271 cl->cl_ops = clnt_dg_ops(); 272 cl->cl_private = (caddr_t)(void *)cu; 273 cl->cl_auth = authnone_create(); 274 cl->cl_tp = NULL; 275 cl->cl_netid = NULL; 276 cu->cu_kq = -1; 277 EV_SET(&cu->cu_kin, cu->cu_fd, EVFILT_READ, EV_ADD, 0, 0, 0); 278 return (cl); 279 err1: 280 warnx(mem_err_clnt_dg); 281 rpc_createerr.cf_stat = RPC_SYSTEMERROR; 282 rpc_createerr.cf_error.re_errno = errno; 283 err2: 284 if (cl) { 285 mem_free(cl, sizeof (CLIENT)); 286 if (cu) 287 mem_free(cu, sizeof (*cu) + sendsz + recvsz); 288 } 289 return (NULL); 290 } 291 292 static enum clnt_stat 293 clnt_dg_call(cl, proc, xargs, argsp, xresults, resultsp, utimeout) 294 CLIENT *cl; /* client handle */ 295 rpcproc_t proc; /* procedure number */ 296 xdrproc_t xargs; /* xdr routine for args */ 297 void *argsp; /* pointer to args */ 298 xdrproc_t xresults; /* xdr routine for results */ 299 void *resultsp; /* pointer to results */ 300 struct timeval utimeout; /* seconds to wait before giving up */ 301 { 302 struct cu_data *cu = (struct cu_data *)cl->cl_private; 303 XDR *xdrs; 304 size_t outlen = 0; 305 struct rpc_msg reply_msg; 306 XDR reply_xdrs; 307 bool_t ok; 308 int nrefreshes = 2; /* number of times to refresh cred */ 309 struct timeval timeout; 310 struct timeval retransmit_time; 311 struct timeval next_sendtime, starttime, time_waited, tv; 312 struct timespec ts; 313 struct kevent kv; 314 struct sockaddr *sa; 315 sigset_t mask; 316 sigset_t newmask; 317 socklen_t inlen, salen; 318 ssize_t recvlen = 0; 319 int kin_len, n, rpc_lock_value; 320 u_int32_t xid; 321 322 outlen = 0; 323 sigfillset(&newmask); 324 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 325 mutex_lock(&clnt_fd_lock); 326 while (dg_fd_locks[cu->cu_fd]) 327 cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock); 328 if (__isthreaded) 329 rpc_lock_value = 1; 330 else 331 rpc_lock_value = 0; 332 dg_fd_locks[cu->cu_fd] = rpc_lock_value; 333 mutex_unlock(&clnt_fd_lock); 334 if (cu->cu_total.tv_usec == -1) { 335 timeout = utimeout; /* use supplied timeout */ 336 } else { 337 timeout = cu->cu_total; /* use default timeout */ 338 } 339 340 if (cu->cu_connect && !cu->cu_connected) { 341 if (_connect(cu->cu_fd, (struct sockaddr *)&cu->cu_raddr, 342 cu->cu_rlen) < 0) { 343 cu->cu_error.re_errno = errno; 344 cu->cu_error.re_status = RPC_CANTSEND; 345 goto out; 346 } 347 cu->cu_connected = 1; 348 } 349 if (cu->cu_connected) { 350 sa = NULL; 351 salen = 0; 352 } else { 353 sa = (struct sockaddr *)&cu->cu_raddr; 354 salen = cu->cu_rlen; 355 } 356 time_waited.tv_sec = 0; 357 time_waited.tv_usec = 0; 358 retransmit_time = next_sendtime = cu->cu_wait; 359 gettimeofday(&starttime, NULL); 360 361 /* Clean up in case the last call ended in a longjmp(3) call. */ 362 if (cu->cu_kq >= 0) 363 _close(cu->cu_kq); 364 if ((cu->cu_kq = kqueue()) < 0) { 365 cu->cu_error.re_errno = errno; 366 cu->cu_error.re_status = RPC_CANTSEND; 367 goto out; 368 } 369 kin_len = 1; 370 371 call_again: 372 xdrs = &(cu->cu_outxdrs); 373 if (cu->cu_async == TRUE && xargs == NULL) 374 goto get_reply; 375 xdrs->x_op = XDR_ENCODE; 376 XDR_SETPOS(xdrs, cu->cu_xdrpos); 377 /* 378 * the transaction is the first thing in the out buffer 379 * XXX Yes, and it's in network byte order, so we should to 380 * be careful when we increment it, shouldn't we. 381 */ 382 xid = ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf)); 383 xid++; 384 *(u_int32_t *)(void *)(cu->cu_outbuf) = htonl(xid); 385 386 if ((! XDR_PUTINT32(xdrs, &proc)) || 387 (! AUTH_MARSHALL(cl->cl_auth, xdrs)) || 388 (! (*xargs)(xdrs, argsp))) { 389 cu->cu_error.re_status = RPC_CANTENCODEARGS; 390 goto out; 391 } 392 outlen = (size_t)XDR_GETPOS(xdrs); 393 394 send_again: 395 if (_sendto(cu->cu_fd, cu->cu_outbuf, outlen, 0, sa, salen) != outlen) { 396 cu->cu_error.re_errno = errno; 397 cu->cu_error.re_status = RPC_CANTSEND; 398 goto out; 399 } 400 401 /* 402 * Hack to provide rpc-based message passing 403 */ 404 if (timeout.tv_sec == 0 && timeout.tv_usec == 0) { 405 cu->cu_error.re_status = RPC_TIMEDOUT; 406 goto out; 407 } 408 409 get_reply: 410 411 /* 412 * sub-optimal code appears here because we have 413 * some clock time to spare while the packets are in flight. 414 * (We assume that this is actually only executed once.) 415 */ 416 reply_msg.acpted_rply.ar_verf = _null_auth; 417 reply_msg.acpted_rply.ar_results.where = resultsp; 418 reply_msg.acpted_rply.ar_results.proc = xresults; 419 420 for (;;) { 421 /* Decide how long to wait. */ 422 if (timercmp(&next_sendtime, &timeout, <)) 423 timersub(&next_sendtime, &time_waited, &tv); 424 else 425 timersub(&timeout, &time_waited, &tv); 426 if (tv.tv_sec < 0 || tv.tv_usec < 0) 427 tv.tv_sec = tv.tv_usec = 0; 428 TIMEVAL_TO_TIMESPEC(&tv, &ts); 429 430 n = _kevent(cu->cu_kq, &cu->cu_kin, kin_len, &kv, 1, &ts); 431 /* We don't need to register the event again. */ 432 kin_len = 0; 433 434 if (n == 1) { 435 if (kv.flags & EV_ERROR) { 436 cu->cu_error.re_errno = kv.data; 437 cu->cu_error.re_status = RPC_CANTRECV; 438 goto out; 439 } 440 /* We have some data now */ 441 do { 442 recvlen = _recvfrom(cu->cu_fd, cu->cu_inbuf, 443 cu->cu_recvsz, 0, NULL, NULL); 444 } while (recvlen < 0 && errno == EINTR); 445 if (recvlen < 0 && errno != EWOULDBLOCK) { 446 cu->cu_error.re_errno = errno; 447 cu->cu_error.re_status = RPC_CANTRECV; 448 goto out; 449 } 450 if (recvlen >= sizeof(u_int32_t) && 451 (cu->cu_async == TRUE || 452 *((u_int32_t *)(void *)(cu->cu_inbuf)) == 453 *((u_int32_t *)(void *)(cu->cu_outbuf)))) { 454 /* We now assume we have the proper reply. */ 455 break; 456 } 457 } 458 if (n == -1 && errno != EINTR) { 459 cu->cu_error.re_errno = errno; 460 cu->cu_error.re_status = RPC_CANTRECV; 461 goto out; 462 } 463 gettimeofday(&tv, NULL); 464 timersub(&tv, &starttime, &time_waited); 465 466 /* Check for timeout. */ 467 if (timercmp(&time_waited, &timeout, >)) { 468 cu->cu_error.re_status = RPC_TIMEDOUT; 469 goto out; 470 } 471 472 /* Retransmit if necessary. */ 473 if (timercmp(&time_waited, &next_sendtime, >)) { 474 /* update retransmit_time */ 475 if (retransmit_time.tv_sec < RPC_MAX_BACKOFF) 476 timeradd(&retransmit_time, &retransmit_time, 477 &retransmit_time); 478 timeradd(&next_sendtime, &retransmit_time, 479 &next_sendtime); 480 goto send_again; 481 } 482 } 483 inlen = (socklen_t)recvlen; 484 485 /* 486 * now decode and validate the response 487 */ 488 489 xdrmem_create(&reply_xdrs, cu->cu_inbuf, (u_int)inlen, XDR_DECODE); 490 ok = xdr_replymsg(&reply_xdrs, &reply_msg); 491 /* XDR_DESTROY(&reply_xdrs); save a few cycles on noop destroy */ 492 if (ok) { 493 if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) && 494 (reply_msg.acpted_rply.ar_stat == SUCCESS)) 495 cu->cu_error.re_status = RPC_SUCCESS; 496 else 497 _seterr_reply(&reply_msg, &(cu->cu_error)); 498 499 if (cu->cu_error.re_status == RPC_SUCCESS) { 500 if (! AUTH_VALIDATE(cl->cl_auth, 501 &reply_msg.acpted_rply.ar_verf)) { 502 cu->cu_error.re_status = RPC_AUTHERROR; 503 cu->cu_error.re_why = AUTH_INVALIDRESP; 504 } 505 if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) { 506 xdrs->x_op = XDR_FREE; 507 (void) xdr_opaque_auth(xdrs, 508 &(reply_msg.acpted_rply.ar_verf)); 509 } 510 } /* end successful completion */ 511 /* 512 * If unsuccesful AND error is an authentication error 513 * then refresh credentials and try again, else break 514 */ 515 else if (cu->cu_error.re_status == RPC_AUTHERROR) 516 /* maybe our credentials need to be refreshed ... */ 517 if (nrefreshes > 0 && 518 AUTH_REFRESH(cl->cl_auth, &reply_msg)) { 519 nrefreshes--; 520 goto call_again; 521 } 522 /* end of unsuccessful completion */ 523 } /* end of valid reply message */ 524 else { 525 cu->cu_error.re_status = RPC_CANTDECODERES; 526 527 } 528 out: 529 if (cu->cu_kq >= 0) 530 _close(cu->cu_kq); 531 cu->cu_kq = -1; 532 release_fd_lock(cu->cu_fd, mask); 533 return (cu->cu_error.re_status); 534 } 535 536 static void 537 clnt_dg_geterr(cl, errp) 538 CLIENT *cl; 539 struct rpc_err *errp; 540 { 541 struct cu_data *cu = (struct cu_data *)cl->cl_private; 542 543 *errp = cu->cu_error; 544 } 545 546 static bool_t 547 clnt_dg_freeres(cl, xdr_res, res_ptr) 548 CLIENT *cl; 549 xdrproc_t xdr_res; 550 void *res_ptr; 551 { 552 struct cu_data *cu = (struct cu_data *)cl->cl_private; 553 XDR *xdrs = &(cu->cu_outxdrs); 554 bool_t dummy; 555 sigset_t mask; 556 sigset_t newmask; 557 558 sigfillset(&newmask); 559 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 560 mutex_lock(&clnt_fd_lock); 561 while (dg_fd_locks[cu->cu_fd]) 562 cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock); 563 xdrs->x_op = XDR_FREE; 564 dummy = (*xdr_res)(xdrs, res_ptr); 565 mutex_unlock(&clnt_fd_lock); 566 thr_sigsetmask(SIG_SETMASK, &mask, NULL); 567 cond_signal(&dg_cv[cu->cu_fd]); 568 return (dummy); 569 } 570 571 /*ARGSUSED*/ 572 static void 573 clnt_dg_abort(h) 574 CLIENT *h; 575 { 576 } 577 578 static bool_t 579 clnt_dg_control(cl, request, info) 580 CLIENT *cl; 581 u_int request; 582 void *info; 583 { 584 struct cu_data *cu = (struct cu_data *)cl->cl_private; 585 struct netbuf *addr; 586 sigset_t mask; 587 sigset_t newmask; 588 int rpc_lock_value; 589 590 sigfillset(&newmask); 591 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 592 mutex_lock(&clnt_fd_lock); 593 while (dg_fd_locks[cu->cu_fd]) 594 cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock); 595 if (__isthreaded) 596 rpc_lock_value = 1; 597 else 598 rpc_lock_value = 0; 599 dg_fd_locks[cu->cu_fd] = rpc_lock_value; 600 mutex_unlock(&clnt_fd_lock); 601 switch (request) { 602 case CLSET_FD_CLOSE: 603 cu->cu_closeit = TRUE; 604 release_fd_lock(cu->cu_fd, mask); 605 return (TRUE); 606 case CLSET_FD_NCLOSE: 607 cu->cu_closeit = FALSE; 608 release_fd_lock(cu->cu_fd, mask); 609 return (TRUE); 610 } 611 612 /* for other requests which use info */ 613 if (info == NULL) { 614 release_fd_lock(cu->cu_fd, mask); 615 return (FALSE); 616 } 617 switch (request) { 618 case CLSET_TIMEOUT: 619 if (time_not_ok((struct timeval *)info)) { 620 release_fd_lock(cu->cu_fd, mask); 621 return (FALSE); 622 } 623 cu->cu_total = *(struct timeval *)info; 624 break; 625 case CLGET_TIMEOUT: 626 *(struct timeval *)info = cu->cu_total; 627 break; 628 case CLGET_SERVER_ADDR: /* Give him the fd address */ 629 /* Now obsolete. Only for backward compatibility */ 630 (void) memcpy(info, &cu->cu_raddr, (size_t)cu->cu_rlen); 631 break; 632 case CLSET_RETRY_TIMEOUT: 633 if (time_not_ok((struct timeval *)info)) { 634 release_fd_lock(cu->cu_fd, mask); 635 return (FALSE); 636 } 637 cu->cu_wait = *(struct timeval *)info; 638 break; 639 case CLGET_RETRY_TIMEOUT: 640 *(struct timeval *)info = cu->cu_wait; 641 break; 642 case CLGET_FD: 643 *(int *)info = cu->cu_fd; 644 break; 645 case CLGET_SVC_ADDR: 646 addr = (struct netbuf *)info; 647 addr->buf = &cu->cu_raddr; 648 addr->len = cu->cu_rlen; 649 addr->maxlen = sizeof cu->cu_raddr; 650 break; 651 case CLSET_SVC_ADDR: /* set to new address */ 652 addr = (struct netbuf *)info; 653 if (addr->len < sizeof cu->cu_raddr) { 654 release_fd_lock(cu->cu_fd, mask); 655 return (FALSE); 656 } 657 (void) memcpy(&cu->cu_raddr, addr->buf, addr->len); 658 cu->cu_rlen = addr->len; 659 break; 660 case CLGET_XID: 661 /* 662 * use the knowledge that xid is the 663 * first element in the call structure *. 664 * This will get the xid of the PREVIOUS call 665 */ 666 *(u_int32_t *)info = 667 ntohl(*(u_int32_t *)(void *)cu->cu_outbuf); 668 break; 669 670 case CLSET_XID: 671 /* This will set the xid of the NEXT call */ 672 *(u_int32_t *)(void *)cu->cu_outbuf = 673 htonl(*(u_int32_t *)info - 1); 674 /* decrement by 1 as clnt_dg_call() increments once */ 675 break; 676 677 case CLGET_VERS: 678 /* 679 * This RELIES on the information that, in the call body, 680 * the version number field is the fifth field from the 681 * begining of the RPC header. MUST be changed if the 682 * call_struct is changed 683 */ 684 *(u_int32_t *)info = 685 ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf + 686 4 * BYTES_PER_XDR_UNIT)); 687 break; 688 689 case CLSET_VERS: 690 *(u_int32_t *)(void *)(cu->cu_outbuf + 4 * BYTES_PER_XDR_UNIT) 691 = htonl(*(u_int32_t *)info); 692 break; 693 694 case CLGET_PROG: 695 /* 696 * This RELIES on the information that, in the call body, 697 * the program number field is the fourth field from the 698 * begining of the RPC header. MUST be changed if the 699 * call_struct is changed 700 */ 701 *(u_int32_t *)info = 702 ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf + 703 3 * BYTES_PER_XDR_UNIT)); 704 break; 705 706 case CLSET_PROG: 707 *(u_int32_t *)(void *)(cu->cu_outbuf + 3 * BYTES_PER_XDR_UNIT) 708 = htonl(*(u_int32_t *)info); 709 break; 710 case CLSET_ASYNC: 711 cu->cu_async = *(int *)info; 712 break; 713 case CLSET_CONNECT: 714 cu->cu_connect = *(int *)info; 715 break; 716 default: 717 release_fd_lock(cu->cu_fd, mask); 718 return (FALSE); 719 } 720 release_fd_lock(cu->cu_fd, mask); 721 return (TRUE); 722 } 723 724 static void 725 clnt_dg_destroy(cl) 726 CLIENT *cl; 727 { 728 struct cu_data *cu = (struct cu_data *)cl->cl_private; 729 int cu_fd = cu->cu_fd; 730 sigset_t mask; 731 sigset_t newmask; 732 733 sigfillset(&newmask); 734 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 735 mutex_lock(&clnt_fd_lock); 736 while (dg_fd_locks[cu_fd]) 737 cond_wait(&dg_cv[cu_fd], &clnt_fd_lock); 738 if (cu->cu_closeit) 739 (void)_close(cu_fd); 740 if (cu->cu_kq >= 0) 741 _close(cu->cu_kq); 742 XDR_DESTROY(&(cu->cu_outxdrs)); 743 mem_free(cu, (sizeof (*cu) + cu->cu_sendsz + cu->cu_recvsz)); 744 if (cl->cl_netid && cl->cl_netid[0]) 745 mem_free(cl->cl_netid, strlen(cl->cl_netid) +1); 746 if (cl->cl_tp && cl->cl_tp[0]) 747 mem_free(cl->cl_tp, strlen(cl->cl_tp) +1); 748 mem_free(cl, sizeof (CLIENT)); 749 mutex_unlock(&clnt_fd_lock); 750 thr_sigsetmask(SIG_SETMASK, &mask, NULL); 751 cond_signal(&dg_cv[cu_fd]); 752 } 753 754 static struct clnt_ops * 755 clnt_dg_ops() 756 { 757 static struct clnt_ops ops; 758 extern mutex_t ops_lock; 759 sigset_t mask; 760 sigset_t newmask; 761 762 /* VARIABLES PROTECTED BY ops_lock: ops */ 763 764 sigfillset(&newmask); 765 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 766 mutex_lock(&ops_lock); 767 if (ops.cl_call == NULL) { 768 ops.cl_call = clnt_dg_call; 769 ops.cl_abort = clnt_dg_abort; 770 ops.cl_geterr = clnt_dg_geterr; 771 ops.cl_freeres = clnt_dg_freeres; 772 ops.cl_destroy = clnt_dg_destroy; 773 ops.cl_control = clnt_dg_control; 774 } 775 mutex_unlock(&ops_lock); 776 thr_sigsetmask(SIG_SETMASK, &mask, NULL); 777 return (&ops); 778 } 779 780 /* 781 * Make sure that the time is not garbage. -1 value is allowed. 782 */ 783 static bool_t 784 time_not_ok(t) 785 struct timeval *t; 786 { 787 return (t->tv_sec < -1 || t->tv_sec > 100000000 || 788 t->tv_usec < -1 || t->tv_usec > 1000000); 789 } 790 791