xref: /freebsd/lib/libc/rpc/clnt_dg.c (revision ce4946daa5ce852d28008dac492029500ab2ee95)
1 /*	$NetBSD: clnt_dg.c,v 1.4 2000/07/14 08:40:41 fvdl Exp $	*/
2 /*	$FreeBSD$ */
3 
4 /*
5  * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
6  * unrestricted use provided that this legend is included on all tape
7  * media and as a part of the software program in whole or part.  Users
8  * may copy or modify Sun RPC without charge, but are not authorized
9  * to license or distribute it to anyone else except as part of a product or
10  * program developed by the user.
11  *
12  * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
13  * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
14  * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
15  *
16  * Sun RPC is provided with no support and without any obligation on the
17  * part of Sun Microsystems, Inc. to assist in its use, correction,
18  * modification or enhancement.
19  *
20  * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
21  * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
22  * OR ANY PART THEREOF.
23  *
24  * In no event will Sun Microsystems, Inc. be liable for any lost revenue
25  * or profits or other special, indirect and consequential damages, even if
26  * Sun has been advised of the possibility of such damages.
27  *
28  * Sun Microsystems, Inc.
29  * 2550 Garcia Avenue
30  * Mountain View, California  94043
31  */
32 /*
33  * Copyright (c) 1986-1991 by Sun Microsystems Inc.
34  */
35 
36 /* #ident	"@(#)clnt_dg.c	1.23	94/04/22 SMI" */
37 
38 #if 0
39 #if !defined(lint) && defined(SCCSIDS)
40 static char sccsid[] = "@(#)clnt_dg.c 1.19 89/03/16 Copyr 1988 Sun Micro";
41 #endif
42 #endif
43 
44 /*
45  * Implements a connectionless client side RPC.
46  */
47 
48 #include "namespace.h"
49 #include "reentrant.h"
50 #include <sys/poll.h>
51 #include <sys/types.h>
52 #include <sys/time.h>
53 #include <sys/socket.h>
54 #include <sys/ioctl.h>
55 #include <rpc/rpc.h>
56 #include <errno.h>
57 #include <stdlib.h>
58 #include <string.h>
59 #include <signal.h>
60 #include <unistd.h>
61 #include <err.h>
62 #include "un-namespace.h"
63 #include "rpc_com.h"
64 
65 
66 #define	RPC_MAX_BACKOFF		30 /* seconds */
67 
68 
69 static struct clnt_ops *clnt_dg_ops __P((void));
70 static bool_t time_not_ok __P((struct timeval *));
71 static enum clnt_stat clnt_dg_call __P((CLIENT *, rpcproc_t, xdrproc_t, caddr_t,
72 					xdrproc_t, caddr_t, struct timeval));
73 static void clnt_dg_geterr __P((CLIENT *, struct rpc_err *));
74 static bool_t clnt_dg_freeres __P((CLIENT *, xdrproc_t, caddr_t));
75 static void clnt_dg_abort __P((CLIENT *));
76 static bool_t clnt_dg_control __P((CLIENT *, u_int, char *));
77 static void clnt_dg_destroy __P((CLIENT *));
78 static int __rpc_timeval_to_msec __P((struct timeval *));
79 
80 
81 
82 
83 /*
84  *	This machinery implements per-fd locks for MT-safety.  It is not
85  *	sufficient to do per-CLIENT handle locks for MT-safety because a
86  *	user may create more than one CLIENT handle with the same fd behind
87  *	it.  Therfore, we allocate an array of flags (dg_fd_locks), protected
88  *	by the clnt_fd_lock mutex, and an array (dg_cv) of condition variables
89  *	similarly protected.  Dg_fd_lock[fd] == 1 => a call is activte on some
90  *	CLIENT handle created for that fd.
91  *	The current implementation holds locks across the entire RPC and reply,
92  *	including retransmissions.  Yes, this is silly, and as soon as this
93  *	code is proven to work, this should be the first thing fixed.  One step
94  *	at a time.
95  */
96 static int	*dg_fd_locks;
97 extern mutex_t clnt_fd_lock;
98 static cond_t	*dg_cv;
99 #define	release_fd_lock(fd, mask) {		\
100 	mutex_lock(&clnt_fd_lock);	\
101 	dg_fd_locks[fd] = 0;		\
102 	mutex_unlock(&clnt_fd_lock);	\
103 	thr_sigsetmask(SIG_SETMASK, &(mask), (sigset_t *) NULL);	\
104 	cond_signal(&dg_cv[fd]);	\
105 }
106 
107 static const char mem_err_clnt_dg[] = "clnt_dg_create: out of memory";
108 
109 /* VARIABLES PROTECTED BY clnt_fd_lock: dg_fd_locks, dg_cv */
110 
111 /*
112  * Private data kept per client handle
113  */
114 struct cu_data {
115 	int			cu_fd;		/* connections fd */
116 	bool_t			cu_closeit;	/* opened by library */
117 	struct sockaddr_storage	cu_raddr;	/* remote address */
118 	int			cu_rlen;
119 	struct timeval		cu_wait;	/* retransmit interval */
120 	struct timeval		cu_total;	/* total time for the call */
121 	struct rpc_err		cu_error;
122 	XDR			cu_outxdrs;
123 	u_int			cu_xdrpos;
124 	u_int			cu_sendsz;	/* send size */
125 	char			*cu_outbuf;
126 	u_int			cu_recvsz;	/* recv size */
127 	struct pollfd		pfdp;
128 	int			cu_async;
129 	char			cu_inbuf[1];
130 };
131 
132 /*
133  * Connection less client creation returns with client handle parameters.
134  * Default options are set, which the user can change using clnt_control().
135  * fd should be open and bound.
136  * NB: The rpch->cl_auth is initialized to null authentication.
137  * 	Caller may wish to set this something more useful.
138  *
139  * sendsz and recvsz are the maximum allowable packet sizes that can be
140  * sent and received. Normally they are the same, but they can be
141  * changed to improve the program efficiency and buffer allocation.
142  * If they are 0, use the transport default.
143  *
144  * If svcaddr is NULL, returns NULL.
145  */
146 CLIENT *
147 clnt_dg_create(fd, svcaddr, program, version, sendsz, recvsz)
148 	int fd;				/* open file descriptor */
149 	const struct netbuf *svcaddr;	/* servers address */
150 	rpcprog_t program;		/* program number */
151 	rpcvers_t version;		/* version number */
152 	u_int sendsz;			/* buffer recv size */
153 	u_int recvsz;			/* buffer send size */
154 {
155 	CLIENT *cl = NULL;		/* client handle */
156 	struct cu_data *cu = NULL;	/* private data */
157 	struct timeval now;
158 	struct rpc_msg call_msg;
159 	sigset_t mask;
160 	sigset_t newmask;
161 	struct __rpc_sockinfo si;
162 	int one = 1;
163 
164 	sigfillset(&newmask);
165 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
166 	mutex_lock(&clnt_fd_lock);
167 	if (dg_fd_locks == (int *) NULL) {
168 		int cv_allocsz;
169 		size_t fd_allocsz;
170 		int dtbsize = __rpc_dtbsize();
171 
172 		fd_allocsz = dtbsize * sizeof (int);
173 		dg_fd_locks = (int *) mem_alloc(fd_allocsz);
174 		if (dg_fd_locks == (int *) NULL) {
175 			mutex_unlock(&clnt_fd_lock);
176 			thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
177 			goto err1;
178 		} else
179 			memset(dg_fd_locks, '\0', fd_allocsz);
180 
181 		cv_allocsz = dtbsize * sizeof (cond_t);
182 		dg_cv = (cond_t *) mem_alloc(cv_allocsz);
183 		if (dg_cv == (cond_t *) NULL) {
184 			mem_free(dg_fd_locks, fd_allocsz);
185 			dg_fd_locks = (int *) NULL;
186 			mutex_unlock(&clnt_fd_lock);
187 			thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
188 			goto err1;
189 		} else {
190 			int i;
191 
192 			for (i = 0; i < dtbsize; i++)
193 				cond_init(&dg_cv[i], 0, (void *) 0);
194 		}
195 	}
196 
197 	mutex_unlock(&clnt_fd_lock);
198 	thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
199 
200 	if (svcaddr == NULL) {
201 		rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
202 		return (NULL);
203 	}
204 
205 	if (!__rpc_fd2sockinfo(fd, &si)) {
206 		rpc_createerr.cf_stat = RPC_TLIERROR;
207 		rpc_createerr.cf_error.re_errno = 0;
208 		return (NULL);
209 	}
210 	/*
211 	 * Find the receive and the send size
212 	 */
213 	sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
214 	recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
215 	if ((sendsz == 0) || (recvsz == 0)) {
216 		rpc_createerr.cf_stat = RPC_TLIERROR; /* XXX */
217 		rpc_createerr.cf_error.re_errno = 0;
218 		return (NULL);
219 	}
220 
221 	if ((cl = mem_alloc(sizeof (CLIENT))) == NULL)
222 		goto err1;
223 	/*
224 	 * Should be multiple of 4 for XDR.
225 	 */
226 	sendsz = ((sendsz + 3) / 4) * 4;
227 	recvsz = ((recvsz + 3) / 4) * 4;
228 	cu = mem_alloc(sizeof (*cu) + sendsz + recvsz);
229 	if (cu == NULL)
230 		goto err1;
231 	(void) memcpy(&cu->cu_raddr, svcaddr->buf, (size_t)svcaddr->len);
232 	cu->cu_rlen = svcaddr->len;
233 	cu->cu_outbuf = &cu->cu_inbuf[recvsz];
234 	/* Other values can also be set through clnt_control() */
235 	cu->cu_wait.tv_sec = 15;	/* heuristically chosen */
236 	cu->cu_wait.tv_usec = 0;
237 	cu->cu_total.tv_sec = -1;
238 	cu->cu_total.tv_usec = -1;
239 	cu->cu_sendsz = sendsz;
240 	cu->cu_recvsz = recvsz;
241 	cu->cu_async = FALSE;
242 	(void) gettimeofday(&now, NULL);
243 	call_msg.rm_xid = __RPC_GETXID(&now);
244 	call_msg.rm_call.cb_prog = program;
245 	call_msg.rm_call.cb_vers = version;
246 	xdrmem_create(&(cu->cu_outxdrs), cu->cu_outbuf, sendsz, XDR_ENCODE);
247 	if (! xdr_callhdr(&(cu->cu_outxdrs), &call_msg)) {
248 		rpc_createerr.cf_stat = RPC_CANTENCODEARGS;  /* XXX */
249 		rpc_createerr.cf_error.re_errno = 0;
250 		goto err2;
251 	}
252 	cu->cu_xdrpos = XDR_GETPOS(&(cu->cu_outxdrs));
253 
254 	/* XXX fvdl - do we still want this? */
255 #if 0
256 	(void)bindresvport_sa(fd, (struct sockaddr *)svcaddr->buf);
257 #endif
258 	_ioctl(fd, FIONBIO, (char *)(void *)&one);
259 
260 	/*
261 	 * By default, closeit is always FALSE. It is users responsibility
262 	 * to do a close on it, else the user may use clnt_control
263 	 * to let clnt_destroy do it for him/her.
264 	 */
265 	cu->cu_closeit = FALSE;
266 	cu->cu_fd = fd;
267 	cl->cl_ops = clnt_dg_ops();
268 	cl->cl_private = (caddr_t)(void *)cu;
269 	cl->cl_auth = authnone_create();
270 	cl->cl_tp = NULL;
271 	cl->cl_netid = NULL;
272 	cu->pfdp.fd = cu->cu_fd;
273 	cu->pfdp.events = POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND;
274 	return (cl);
275 err1:
276 	warnx(mem_err_clnt_dg);
277 	rpc_createerr.cf_stat = RPC_SYSTEMERROR;
278 	rpc_createerr.cf_error.re_errno = errno;
279 err2:
280 	if (cl) {
281 		mem_free(cl, sizeof (CLIENT));
282 		if (cu)
283 			mem_free(cu, sizeof (*cu) + sendsz + recvsz);
284 	}
285 	return (NULL);
286 }
287 
288 static enum clnt_stat
289 clnt_dg_call(cl, proc, xargs, argsp, xresults, resultsp, utimeout)
290 	CLIENT	*cl;			/* client handle */
291 	rpcproc_t	proc;		/* procedure number */
292 	xdrproc_t	xargs;		/* xdr routine for args */
293 	caddr_t		argsp;		/* pointer to args */
294 	xdrproc_t	xresults;	/* xdr routine for results */
295 	caddr_t		resultsp;	/* pointer to results */
296 	struct timeval	utimeout;	/* seconds to wait before giving up */
297 {
298 	struct cu_data *cu = (struct cu_data *)cl->cl_private;
299 	XDR *xdrs;
300 	size_t outlen;
301 	struct rpc_msg reply_msg;
302 	XDR reply_xdrs;
303 	struct timeval time_waited;
304 	bool_t ok;
305 	int nrefreshes = 2;		/* number of times to refresh cred */
306 	struct timeval timeout;
307 	struct timeval retransmit_time;
308 	struct timeval startime, curtime;
309 	int firsttimeout = 1;
310 	int dtbsize = __rpc_dtbsize();
311 	sigset_t mask;
312 	sigset_t newmask;
313 	socklen_t fromlen, inlen;
314 	ssize_t recvlen = 0;
315 	int rpc_lock_value;
316 	u_int32_t xid;
317 
318 	sigfillset(&newmask);
319 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
320 	mutex_lock(&clnt_fd_lock);
321 	while (dg_fd_locks[cu->cu_fd])
322 		cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
323 	if (__isthreaded)
324 		rpc_lock_value = 1;
325 	else
326 		rpc_lock_value = 0;
327 	dg_fd_locks[cu->cu_fd] = rpc_lock_value;
328 	mutex_unlock(&clnt_fd_lock);
329 	if (cu->cu_total.tv_usec == -1) {
330 		timeout = utimeout;	/* use supplied timeout */
331 	} else {
332 		timeout = cu->cu_total;	/* use default timeout */
333 	}
334 
335 	time_waited.tv_sec = 0;
336 	time_waited.tv_usec = 0;
337 	retransmit_time = cu->cu_wait;
338 
339 call_again:
340 	xdrs = &(cu->cu_outxdrs);
341 	if (cu->cu_async == TRUE && xargs == NULL)
342 		goto get_reply;
343 	xdrs->x_op = XDR_ENCODE;
344 	XDR_SETPOS(xdrs, cu->cu_xdrpos);
345 	/*
346 	 * the transaction is the first thing in the out buffer
347 	 * XXX Yes, and it's in network byte order, so we should to
348 	 * be careful when we increment it, shouldn't we.
349 	 */
350 	xid = ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf));
351 	xid++;
352 	*(u_int32_t *)(void *)(cu->cu_outbuf) = htonl(xid);
353 
354 	if ((! XDR_PUTINT32(xdrs, &proc)) ||
355 	    (! AUTH_MARSHALL(cl->cl_auth, xdrs)) ||
356 	    (! (*xargs)(xdrs, argsp))) {
357 		release_fd_lock(cu->cu_fd, mask);
358 		return (cu->cu_error.re_status = RPC_CANTENCODEARGS);
359 	}
360 	outlen = (size_t)XDR_GETPOS(xdrs);
361 
362 send_again:
363 	if (_sendto(cu->cu_fd, cu->cu_outbuf, outlen, 0,
364 	    (struct sockaddr *)(void *)&cu->cu_raddr, (socklen_t)cu->cu_rlen)
365 	    != outlen) {
366 		cu->cu_error.re_errno = errno;
367 		release_fd_lock(cu->cu_fd, mask);
368 		return (cu->cu_error.re_status = RPC_CANTSEND);
369 	}
370 
371 	/*
372 	 * Hack to provide rpc-based message passing
373 	 */
374 	if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
375 		release_fd_lock(cu->cu_fd, mask);
376 		return (cu->cu_error.re_status = RPC_TIMEDOUT);
377 	}
378 
379 get_reply:
380 
381 	/*
382 	 * sub-optimal code appears here because we have
383 	 * some clock time to spare while the packets are in flight.
384 	 * (We assume that this is actually only executed once.)
385 	 */
386 	reply_msg.acpted_rply.ar_verf = _null_auth;
387 	reply_msg.acpted_rply.ar_results.where = resultsp;
388 	reply_msg.acpted_rply.ar_results.proc = xresults;
389 
390 
391 	for (;;) {
392 		switch (_poll(&cu->pfdp, 1,
393 		    __rpc_timeval_to_msec(&retransmit_time))) {
394 		case 0:
395 			time_waited.tv_sec += retransmit_time.tv_sec;
396 			time_waited.tv_usec += retransmit_time.tv_usec;
397 			while (time_waited.tv_usec >= 1000000) {
398 				time_waited.tv_sec++;
399 				time_waited.tv_usec -= 1000000;
400 			}
401 			/* update retransmit_time */
402 			if (retransmit_time.tv_sec < RPC_MAX_BACKOFF) {
403 				retransmit_time.tv_usec *= 2;
404 				retransmit_time.tv_sec *= 2;
405 				while (retransmit_time.tv_usec >= 1000000) {
406 					retransmit_time.tv_sec++;
407 					retransmit_time.tv_usec -= 1000000;
408 				}
409 			}
410 
411 			if ((time_waited.tv_sec < timeout.tv_sec) ||
412 			    ((time_waited.tv_sec == timeout.tv_sec) &&
413 				(time_waited.tv_usec < timeout.tv_usec)))
414 				goto send_again;
415 			release_fd_lock(cu->cu_fd, mask);
416 			return (cu->cu_error.re_status = RPC_TIMEDOUT);
417 
418 		case -1:
419 			if (errno == EBADF) {
420 				cu->cu_error.re_errno = errno;
421 				release_fd_lock(cu->cu_fd, mask);
422 				return (cu->cu_error.re_status = RPC_CANTRECV);
423 			}
424 			if (errno != EINTR) {
425 				errno = 0; /* reset it */
426 				continue;
427 			}
428 			/* interrupted by another signal, update time_waited */
429 			if (firsttimeout) {
430 				/*
431 				 * Could have done gettimeofday before clnt_call
432 				 * but that means 1 more system call per each
433 				 * clnt_call, so do it after first time out
434 				 */
435 				if (gettimeofday(&startime,
436 					(struct timezone *) NULL) == -1) {
437 					errno = 0;
438 					continue;
439 				}
440 				firsttimeout = 0;
441 				errno = 0;
442 				continue;
443 			};
444 			if (gettimeofday(&curtime,
445 				(struct timezone *) NULL) == -1) {
446 				errno = 0;
447 				continue;
448 			};
449 			time_waited.tv_sec += curtime.tv_sec - startime.tv_sec;
450 			time_waited.tv_usec += curtime.tv_usec -
451 							startime.tv_usec;
452 			while (time_waited.tv_usec < 0) {
453 				time_waited.tv_sec--;
454 				time_waited.tv_usec += 1000000;
455 			};
456 			while (time_waited.tv_usec >= 1000000) {
457 				time_waited.tv_sec++;
458 				time_waited.tv_usec -= 1000000;
459 			}
460 			startime.tv_sec = curtime.tv_sec;
461 			startime.tv_usec = curtime.tv_usec;
462 			if ((time_waited.tv_sec > timeout.tv_sec) ||
463 				((time_waited.tv_sec == timeout.tv_sec) &&
464 				(time_waited.tv_usec > timeout.tv_usec))) {
465 				release_fd_lock(cu->cu_fd, mask);
466 				return (cu->cu_error.re_status = RPC_TIMEDOUT);
467 			}
468 			errno = 0; /* reset it */
469 			continue;
470 		};
471 
472 		if (cu->pfdp.revents & POLLNVAL || (cu->pfdp.revents == 0)) {
473 			cu->cu_error.re_status = RPC_CANTRECV;
474 			/*
475 			 *	Note:  we're faking errno here because we
476 			 *	previously would have expected _poll() to
477 			 *	return -1 with errno EBADF.  Poll(BA_OS)
478 			 *	returns 0 and sets the POLLNVAL revents flag
479 			 *	instead.
480 			 */
481 			cu->cu_error.re_errno = errno = EBADF;
482 			release_fd_lock(cu->cu_fd, mask);
483 			return (-1);
484 		}
485 
486 		/* We have some data now */
487 		do {
488 			if (errno == EINTR) {
489 				/*
490 				 * Must make sure errno was not already
491 				 * EINTR in case _recvfrom() returns -1.
492 				 */
493 				errno = 0;
494 			}
495 			fromlen = sizeof (struct sockaddr_storage);
496 			recvlen = _recvfrom(cu->cu_fd, cu->cu_inbuf,
497 			    cu->cu_recvsz, 0, (struct sockaddr *)(void *)&cu->cu_raddr,
498 			    &fromlen);
499 		} while (recvlen < 0 && errno == EINTR);
500 		if (recvlen < 0) {
501 			if (errno == EWOULDBLOCK)
502 				continue;
503 			cu->cu_error.re_errno = errno;
504 			release_fd_lock(cu->cu_fd, mask);
505 			return (cu->cu_error.re_status = RPC_CANTRECV);
506 		}
507 		if (recvlen < sizeof (u_int32_t))
508 			continue;
509 		/* see if reply transaction id matches sent id */
510 		if (cu->cu_async == FALSE &&
511 		    *((u_int32_t *)(void *)(cu->cu_inbuf)) !=
512 		    *((u_int32_t *)(void *)(cu->cu_outbuf)))
513 			continue;
514 		/* we now assume we have the proper reply */
515 		break;
516 	}
517 	inlen = (socklen_t)recvlen;
518 
519 	/*
520 	 * now decode and validate the response
521 	 */
522 
523 	xdrmem_create(&reply_xdrs, cu->cu_inbuf, (u_int)inlen, XDR_DECODE);
524 	ok = xdr_replymsg(&reply_xdrs, &reply_msg);
525 	/* XDR_DESTROY(&reply_xdrs);	save a few cycles on noop destroy */
526 	if (ok) {
527 		if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) &&
528 			(reply_msg.acpted_rply.ar_stat == SUCCESS))
529 			cu->cu_error.re_status = RPC_SUCCESS;
530 		else
531 			_seterr_reply(&reply_msg, &(cu->cu_error));
532 
533 		if (cu->cu_error.re_status == RPC_SUCCESS) {
534 			if (! AUTH_VALIDATE(cl->cl_auth,
535 					    &reply_msg.acpted_rply.ar_verf)) {
536 				cu->cu_error.re_status = RPC_AUTHERROR;
537 				cu->cu_error.re_why = AUTH_INVALIDRESP;
538 			}
539 			if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) {
540 				xdrs->x_op = XDR_FREE;
541 				(void) xdr_opaque_auth(xdrs,
542 					&(reply_msg.acpted_rply.ar_verf));
543 			}
544 		}		/* end successful completion */
545 		/*
546 		 * If unsuccesful AND error is an authentication error
547 		 * then refresh credentials and try again, else break
548 		 */
549 		else if (cu->cu_error.re_status == RPC_AUTHERROR)
550 			/* maybe our credentials need to be refreshed ... */
551 			if (nrefreshes > 0 &&
552 			    AUTH_REFRESH(cl->cl_auth, &reply_msg)) {
553 				nrefreshes--;
554 				goto call_again;
555 			}
556 		/* end of unsuccessful completion */
557 	}	/* end of valid reply message */
558 	else {
559 		cu->cu_error.re_status = RPC_CANTDECODERES;
560 
561 	}
562 	release_fd_lock(cu->cu_fd, mask);
563 	return (cu->cu_error.re_status);
564 }
565 
566 static void
567 clnt_dg_geterr(cl, errp)
568 	CLIENT *cl;
569 	struct rpc_err *errp;
570 {
571 	struct cu_data *cu = (struct cu_data *)cl->cl_private;
572 
573 	*errp = cu->cu_error;
574 }
575 
576 static bool_t
577 clnt_dg_freeres(cl, xdr_res, res_ptr)
578 	CLIENT *cl;
579 	xdrproc_t xdr_res;
580 	caddr_t res_ptr;
581 {
582 	struct cu_data *cu = (struct cu_data *)cl->cl_private;
583 	XDR *xdrs = &(cu->cu_outxdrs);
584 	bool_t dummy;
585 	sigset_t mask;
586 	sigset_t newmask;
587 
588 	sigfillset(&newmask);
589 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
590 	mutex_lock(&clnt_fd_lock);
591 	while (dg_fd_locks[cu->cu_fd])
592 		cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
593 	xdrs->x_op = XDR_FREE;
594 	dummy = (*xdr_res)(xdrs, res_ptr);
595 	mutex_unlock(&clnt_fd_lock);
596 	thr_sigsetmask(SIG_SETMASK, &mask, NULL);
597 	cond_signal(&dg_cv[cu->cu_fd]);
598 	return (dummy);
599 }
600 
601 /*ARGSUSED*/
602 static void
603 clnt_dg_abort(h)
604 	CLIENT *h;
605 {
606 }
607 
608 static bool_t
609 clnt_dg_control(cl, request, info)
610 	CLIENT *cl;
611 	u_int request;
612 	char *info;
613 {
614 	struct cu_data *cu = (struct cu_data *)cl->cl_private;
615 	struct netbuf *addr;
616 	sigset_t mask;
617 	sigset_t newmask;
618 	int rpc_lock_value;
619 
620 	sigfillset(&newmask);
621 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
622 	mutex_lock(&clnt_fd_lock);
623 	while (dg_fd_locks[cu->cu_fd])
624 		cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
625 	if (__isthreaded)
626                 rpc_lock_value = 1;
627         else
628                 rpc_lock_value = 0;
629 	dg_fd_locks[cu->cu_fd] = rpc_lock_value;
630 	mutex_unlock(&clnt_fd_lock);
631 	switch (request) {
632 	case CLSET_FD_CLOSE:
633 		cu->cu_closeit = TRUE;
634 		release_fd_lock(cu->cu_fd, mask);
635 		return (TRUE);
636 	case CLSET_FD_NCLOSE:
637 		cu->cu_closeit = FALSE;
638 		release_fd_lock(cu->cu_fd, mask);
639 		return (TRUE);
640 	}
641 
642 	/* for other requests which use info */
643 	if (info == NULL) {
644 		release_fd_lock(cu->cu_fd, mask);
645 		return (FALSE);
646 	}
647 	switch (request) {
648 	case CLSET_TIMEOUT:
649 		if (time_not_ok((struct timeval *)(void *)info)) {
650 			release_fd_lock(cu->cu_fd, mask);
651 			return (FALSE);
652 		}
653 		cu->cu_total = *(struct timeval *)(void *)info;
654 		break;
655 	case CLGET_TIMEOUT:
656 		*(struct timeval *)(void *)info = cu->cu_total;
657 		break;
658 	case CLGET_SERVER_ADDR:		/* Give him the fd address */
659 		/* Now obsolete. Only for backward compatibility */
660 		(void) memcpy(info, &cu->cu_raddr, (size_t)cu->cu_rlen);
661 		break;
662 	case CLSET_RETRY_TIMEOUT:
663 		if (time_not_ok((struct timeval *)(void *)info)) {
664 			release_fd_lock(cu->cu_fd, mask);
665 			return (FALSE);
666 		}
667 		cu->cu_wait = *(struct timeval *)(void *)info;
668 		break;
669 	case CLGET_RETRY_TIMEOUT:
670 		*(struct timeval *)(void *)info = cu->cu_wait;
671 		break;
672 	case CLGET_FD:
673 		*(int *)(void *)info = cu->cu_fd;
674 		break;
675 	case CLGET_SVC_ADDR:
676 		addr = (struct netbuf *)(void *)info;
677 		addr->buf = &cu->cu_raddr;
678 		addr->len = cu->cu_rlen;
679 		addr->maxlen = sizeof cu->cu_raddr;
680 		break;
681 	case CLSET_SVC_ADDR:		/* set to new address */
682 		addr = (struct netbuf *)(void *)info;
683 		if (addr->len < sizeof cu->cu_raddr) {
684 			release_fd_lock(cu->cu_fd, mask);
685 			return (FALSE);
686 		}
687 		(void) memcpy(&cu->cu_raddr, addr->buf, addr->len);
688 		cu->cu_rlen = addr->len;
689 		break;
690 	case CLGET_XID:
691 		/*
692 		 * use the knowledge that xid is the
693 		 * first element in the call structure *.
694 		 * This will get the xid of the PREVIOUS call
695 		 */
696 		*(u_int32_t *)(void *)info =
697 		    ntohl(*(u_int32_t *)(void *)cu->cu_outbuf);
698 		break;
699 
700 	case CLSET_XID:
701 		/* This will set the xid of the NEXT call */
702 		*(u_int32_t *)(void *)cu->cu_outbuf =
703 		    htonl(*(u_int32_t *)(void *)info - 1);
704 		/* decrement by 1 as clnt_dg_call() increments once */
705 		break;
706 
707 	case CLGET_VERS:
708 		/*
709 		 * This RELIES on the information that, in the call body,
710 		 * the version number field is the fifth field from the
711 		 * begining of the RPC header. MUST be changed if the
712 		 * call_struct is changed
713 		 */
714 		*(u_int32_t *)(void *)info =
715 		    ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf +
716 		    4 * BYTES_PER_XDR_UNIT));
717 		break;
718 
719 	case CLSET_VERS:
720 		*(u_int32_t *)(void *)(cu->cu_outbuf + 4 * BYTES_PER_XDR_UNIT)
721 			= htonl(*(u_int32_t *)(void *)info);
722 		break;
723 
724 	case CLGET_PROG:
725 		/*
726 		 * This RELIES on the information that, in the call body,
727 		 * the program number field is the fourth field from the
728 		 * begining of the RPC header. MUST be changed if the
729 		 * call_struct is changed
730 		 */
731 		*(u_int32_t *)(void *)info =
732 		    ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf +
733 		    3 * BYTES_PER_XDR_UNIT));
734 		break;
735 
736 	case CLSET_PROG:
737 		*(u_int32_t *)(void *)(cu->cu_outbuf + 3 * BYTES_PER_XDR_UNIT)
738 			= htonl(*(u_int32_t *)(void *)info);
739 		break;
740 	case CLSET_ASYNC:
741 		cu->cu_async = *(int *)(void *)info;
742 		break;
743 	default:
744 		release_fd_lock(cu->cu_fd, mask);
745 		return (FALSE);
746 	}
747 	release_fd_lock(cu->cu_fd, mask);
748 	return (TRUE);
749 }
750 
751 static void
752 clnt_dg_destroy(cl)
753 	CLIENT *cl;
754 {
755 	struct cu_data *cu = (struct cu_data *)cl->cl_private;
756 	int cu_fd = cu->cu_fd;
757 	sigset_t mask;
758 	sigset_t newmask;
759 
760 	sigfillset(&newmask);
761 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
762 	mutex_lock(&clnt_fd_lock);
763 	while (dg_fd_locks[cu_fd])
764 		cond_wait(&dg_cv[cu_fd], &clnt_fd_lock);
765 	if (cu->cu_closeit)
766 		(void)_close(cu_fd);
767 	XDR_DESTROY(&(cu->cu_outxdrs));
768 	mem_free(cu, (sizeof (*cu) + cu->cu_sendsz + cu->cu_recvsz));
769 	if (cl->cl_netid && cl->cl_netid[0])
770 		mem_free(cl->cl_netid, strlen(cl->cl_netid) +1);
771 	if (cl->cl_tp && cl->cl_tp[0])
772 		mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
773 	mem_free(cl, sizeof (CLIENT));
774 	mutex_unlock(&clnt_fd_lock);
775 	thr_sigsetmask(SIG_SETMASK, &mask, NULL);
776 	cond_signal(&dg_cv[cu_fd]);
777 }
778 
779 static struct clnt_ops *
780 clnt_dg_ops()
781 {
782 	static struct clnt_ops ops;
783 	extern mutex_t	ops_lock;
784 	sigset_t mask;
785 	sigset_t newmask;
786 
787 /* VARIABLES PROTECTED BY ops_lock: ops */
788 
789 	sigfillset(&newmask);
790 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
791 	mutex_lock(&ops_lock);
792 	if (ops.cl_call == NULL) {
793 		ops.cl_call = clnt_dg_call;
794 		ops.cl_abort = clnt_dg_abort;
795 		ops.cl_geterr = clnt_dg_geterr;
796 		ops.cl_freeres = clnt_dg_freeres;
797 		ops.cl_destroy = clnt_dg_destroy;
798 		ops.cl_control = clnt_dg_control;
799 	}
800 	mutex_unlock(&ops_lock);
801 	thr_sigsetmask(SIG_SETMASK, &mask, NULL);
802 	return (&ops);
803 }
804 
805 /*
806  * Make sure that the time is not garbage.  -1 value is allowed.
807  */
808 static bool_t
809 time_not_ok(t)
810 	struct timeval *t;
811 {
812 	return (t->tv_sec < -1 || t->tv_sec > 100000000 ||
813 		t->tv_usec < -1 || t->tv_usec > 1000000);
814 }
815 
816 
817 /*
818  *	Convert from timevals (used by select) to milliseconds (used by poll).
819  */
820 static int
821 __rpc_timeval_to_msec(t)
822 	struct timeval	*t;
823 {
824 	int	t1, tmp;
825 
826 	/*
827 	 *	We're really returning t->tv_sec * 1000 + (t->tv_usec / 1000)
828 	 *	but try to do so efficiently.  Note:  1000 = 1024 - 16 - 8.
829 	 */
830 	tmp = (int)t->tv_sec << 3;
831 	t1 = -tmp;
832 	t1 += t1 << 1;
833 	t1 += tmp << 7;
834 	if (t->tv_usec)
835 		t1 += (int)(t->tv_usec / 1000);
836 
837 	return (t1);
838 }
839