xref: /illumos-gate/usr/src/lib/libnsl/rpc/svc.c (revision 898c3fec4c1cbcd45754042f6635b76a9067abd6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2014 Nexenta Systems, Inc.  All rights reserved.
25  */
26 /*
27  * Copyright 1993 OpenVision Technologies, Inc., All Rights Reserved.
28  */
29 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
30 /* All Rights Reserved */
31 /*
32  * Portions of this source code were derived from Berkeley
33  * 4.3 BSD under license from the Regents of the University of
34  * California.
35  */
36 
37 /*
38  * svc.c, Server-side remote procedure call interface.
39  *
40  * There are two sets of procedures here.  The xprt routines are
41  * for handling transport handles.  The svc routines handle the
42  * list of service routines.
43  *
44  */
45 
46 #include "mt.h"
47 #include "rpc_mt.h"
48 #include <assert.h>
49 #include <errno.h>
50 #include <sys/types.h>
51 #include <stropts.h>
52 #include <sys/conf.h>
53 #include <rpc/rpc.h>
54 #ifdef PORTMAP
55 #include <rpc/pmap_clnt.h>
56 #endif
57 #include <sys/poll.h>
58 #include <netconfig.h>
59 #include <syslog.h>
60 #include <stdlib.h>
61 #include <unistd.h>
62 #include <string.h>
63 #include <limits.h>
64 
65 extern bool_t __svc_get_door_cred();
66 extern bool_t __rpc_get_local_cred();
67 
68 SVCXPRT **svc_xports;
69 static int nsvc_xports; 	/* total number of svc_xports allocated */
70 
71 XDR **svc_xdrs;		/* common XDR receive area */
72 int nsvc_xdrs;		/* total number of svc_xdrs allocated */
73 
74 int __rpc_use_pollfd_done;	/* to unlimit the number of connections */
75 
76 #define	NULL_SVC ((struct svc_callout *)0)
77 #define	RQCRED_SIZE	400		/* this size is excessive */
78 
79 /*
80  * The services list
81  * Each entry represents a set of procedures (an rpc program).
82  * The dispatch routine takes request structs and runs the
83  * appropriate procedure.
84  */
85 static struct svc_callout {
86 	struct svc_callout *sc_next;
87 	rpcprog_t	    sc_prog;
88 	rpcvers_t	    sc_vers;
89 	char		   *sc_netid;
90 	void		    (*sc_dispatch)();
91 } *svc_head;
92 extern rwlock_t	svc_lock;
93 
94 static struct svc_callout *svc_find();
95 int _svc_prog_dispatch();
96 void svc_getreq_common();
97 char *strdup();
98 
99 extern mutex_t	svc_door_mutex;
100 extern cond_t	svc_door_waitcv;
101 extern int	svc_ndoorfds;
102 extern SVCXPRT_LIST *_svc_xprtlist;
103 extern mutex_t xprtlist_lock;
104 extern void __svc_rm_from_xlist();
105 
106 #if !defined(_LP64)
107 extern fd_set _new_svc_fdset;
108 #endif
109 
110 /*
111  * If the allocated array of reactor is too small, this value is used as a
112  * margin. This reduces the number of allocations.
113  */
114 #define	USER_FD_INCREMENT 5
115 
116 static void add_pollfd(int fd, short events);
117 static void remove_pollfd(int fd);
118 static void __svc_remove_input_of_fd(int fd);
119 
120 /*
121  * Data used to handle reactor:
122  * 	- one file descriptor we listen to,
123  *	- one callback we call if the fd pops,
124  *	- and a cookie passed as a parameter to the callback.
125  *
126  * The structure is an array indexed on the file descriptor. Each entry is
127  * pointing to the first element of a double-linked list of callback.
128  * only one callback may be associated to a couple (fd, event).
129  */
130 
131 struct _svc_user_fd_head;
132 
133 typedef struct {
134 	struct _svc_user_fd_node *next;
135 	struct _svc_user_fd_node *previous;
136 } _svc_user_link;
137 
138 typedef struct _svc_user_fd_node {
139 	_svc_user_link lnk;
140 	svc_input_id_t id;
141 	int	    fd;
142 	unsigned int   events;
143 	svc_callback_t callback;
144 	void*	  cookie;
145 } _svc_user_fd_node;
146 
147 typedef struct _svc_user_fd_head {
148 	struct _svc_user_fd_node *list;
149 	unsigned int mask;    /* logical OR of all sub-masks */
150 } _svc_user_fd_head;
151 
152 
153 /* Array of defined reactor - indexed on file descriptor */
154 static _svc_user_fd_head *svc_userfds = NULL;
155 
156 /* current size of file descriptor */
157 static int svc_nuserfds = 0;
158 
159 /* Mutex to ensure MT safe operations for user fds callbacks. */
160 static mutex_t svc_userfds_lock = DEFAULTMUTEX;
161 
162 
163 /*
164  * This structure is used to have constant time alogrithms. There is an array
165  * of this structure as large as svc_nuserfds. When the user is registering a
166  * new callback, the address of the created structure is stored in a cell of
167  * this array. The address of this cell is the returned unique identifier.
168  *
169  * On removing, the id is given by the user, then we know if this cell is
170  * filled or not (with free). If it is free, we return an error. Otherwise,
171  * we can free the structure pointed by fd_node.
172  *
173  * On insertion, we use the linked list created by (first_free,
174  * next_free). In this way with a constant time computation, we can give a
175  * correct index to the user.
176  */
177 
178 typedef struct _svc_management_user_fd {
179 	bool_t free;
180 	union {
181 		svc_input_id_t next_free;
182 		_svc_user_fd_node *fd_node;
183 	} data;
184 } _svc_management_user_fd;
185 
186 /* index to the first free elem */
187 static svc_input_id_t first_free = (svc_input_id_t)-1;
188 /* the size of this array is the same as svc_nuserfds */
189 static _svc_management_user_fd* user_fd_mgt_array = NULL;
190 
191 /* current size of user_fd_mgt_array */
192 static int svc_nmgtuserfds = 0;
193 
194 
195 /* Define some macros to access data associated to registration ids. */
196 #define	node_from_id(id) (user_fd_mgt_array[(int)id].data.fd_node)
197 #define	is_free_id(id) (user_fd_mgt_array[(int)id].free)
198 
199 #ifndef POLLSTANDARD
200 #define	POLLSTANDARD \
201 	(POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND| \
202 	POLLWRBAND|POLLERR|POLLHUP|POLLNVAL)
203 #endif
204 
205 /*
206  * To free an Id, we set the cell as free and insert its address in the list
207  * of free cell.
208  */
209 
210 static void
211 _svc_free_id(const svc_input_id_t id)
212 {
213 	assert(((int)id >= 0) && ((int)id < svc_nmgtuserfds));
214 	user_fd_mgt_array[(int)id].free = TRUE;
215 	user_fd_mgt_array[(int)id].data.next_free = first_free;
216 	first_free = id;
217 }
218 
219 /*
220  * To get a free cell, we just have to take it from the free linked list and
221  * set the flag to "not free". This function also allocates new memory if
222  * necessary
223  */
224 static svc_input_id_t
225 _svc_attribute_new_id(_svc_user_fd_node *node)
226 {
227 	int selected_index = (int)first_free;
228 	assert(node != NULL);
229 
230 	if (selected_index == -1) {
231 		/* Allocate new entries */
232 		int L_inOldSize = svc_nmgtuserfds;
233 		int i;
234 		_svc_management_user_fd *tmp;
235 
236 		svc_nmgtuserfds += USER_FD_INCREMENT;
237 
238 		tmp = realloc(user_fd_mgt_array,
239 		    svc_nmgtuserfds * sizeof (_svc_management_user_fd));
240 
241 		if (tmp == NULL) {
242 			syslog(LOG_ERR, "_svc_attribute_new_id: out of memory");
243 			svc_nmgtuserfds = L_inOldSize;
244 			errno = ENOMEM;
245 			return ((svc_input_id_t)-1);
246 		}
247 
248 		user_fd_mgt_array = tmp;
249 
250 		for (i = svc_nmgtuserfds - 1; i >= L_inOldSize; i--)
251 			_svc_free_id((svc_input_id_t)i);
252 		selected_index = (int)first_free;
253 	}
254 
255 	node->id = (svc_input_id_t)selected_index;
256 	first_free = user_fd_mgt_array[selected_index].data.next_free;
257 
258 	user_fd_mgt_array[selected_index].data.fd_node = node;
259 	user_fd_mgt_array[selected_index].free = FALSE;
260 
261 	return ((svc_input_id_t)selected_index);
262 }
263 
264 /*
265  * Access to a pollfd treatment. Scan all the associated callbacks that have
266  * at least one bit in their mask that masks a received event.
267  *
268  * If event POLLNVAL is received, we check that one callback processes it, if
269  * not, then remove the file descriptor from the poll. If there is one, let
270  * the user do the work.
271  */
272 void
273 __svc_getreq_user(struct pollfd *pfd)
274 {
275 	int fd = pfd->fd;
276 	short revents = pfd->revents;
277 	bool_t invalHandled = FALSE;
278 	_svc_user_fd_node *node;
279 
280 	(void) mutex_lock(&svc_userfds_lock);
281 
282 	if ((fd < 0) || (fd >= svc_nuserfds)) {
283 		(void) mutex_unlock(&svc_userfds_lock);
284 		return;
285 	}
286 
287 	node = svc_userfds[fd].list;
288 
289 	/* check if at least one mask fits */
290 	if (0 == (revents & svc_userfds[fd].mask)) {
291 		(void) mutex_unlock(&svc_userfds_lock);
292 		return;
293 	}
294 
295 	while ((svc_userfds[fd].mask != 0) && (node != NULL)) {
296 		/*
297 		 * If one of the received events maps the ones the node listens
298 		 * to
299 		 */
300 		_svc_user_fd_node *next = node->lnk.next;
301 
302 		if (node->callback != NULL) {
303 			if (node->events & revents) {
304 				if (revents & POLLNVAL) {
305 					invalHandled = TRUE;
306 				}
307 
308 				/*
309 				 * The lock must be released before calling the
310 				 * user function, as this function can call
311 				 * svc_remove_input() for example.
312 				 */
313 				(void) mutex_unlock(&svc_userfds_lock);
314 				node->callback(node->id, node->fd,
315 				    node->events & revents, node->cookie);
316 				/*
317 				 * Do not use the node structure anymore, as it
318 				 * could have been deallocated by the previous
319 				 * callback.
320 				 */
321 				(void) mutex_lock(&svc_userfds_lock);
322 			}
323 		}
324 		node = next;
325 	}
326 
327 	if ((revents & POLLNVAL) && !invalHandled)
328 		__svc_remove_input_of_fd(fd);
329 	(void) mutex_unlock(&svc_userfds_lock);
330 }
331 
332 
333 /*
334  * Check if a file descriptor is associated with a user reactor.
335  * To do this, just check that the array indexed on fd has a non-void linked
336  * list (ie. first element is not NULL)
337  */
338 bool_t
339 __is_a_userfd(int fd)
340 {
341 	/* Checks argument */
342 	if ((fd < 0) || (fd >= svc_nuserfds))
343 		return (FALSE);
344 	return ((svc_userfds[fd].mask == 0x0000)? FALSE:TRUE);
345 }
346 
347 /* free everything concerning user fd */
348 /* used in svc_run.c => no static */
349 
350 void
351 __destroy_userfd(void)
352 {
353 	int one_fd;
354 	/* Clean user fd */
355 	if (svc_userfds != NULL) {
356 		for (one_fd = 0; one_fd < svc_nuserfds; one_fd++) {
357 			_svc_user_fd_node *node;
358 
359 			node = svc_userfds[one_fd].list;
360 			while (node != NULL) {
361 				_svc_user_fd_node *tmp = node;
362 				_svc_free_id(node->id);
363 				node = node->lnk.next;
364 				free(tmp);
365 			}
366 		}
367 
368 		free(user_fd_mgt_array);
369 		user_fd_mgt_array = NULL;
370 		first_free = (svc_input_id_t)-1;
371 
372 		free(svc_userfds);
373 		svc_userfds = NULL;
374 		svc_nuserfds = 0;
375 	}
376 }
377 
378 /*
379  * Remove all the callback associated with a fd => useful when the fd is
380  * closed for instance
381  */
382 static void
383 __svc_remove_input_of_fd(int fd)
384 {
385 	_svc_user_fd_node **pnode;
386 	_svc_user_fd_node *tmp;
387 
388 	if ((fd < 0) || (fd >= svc_nuserfds))
389 		return;
390 
391 	pnode = &svc_userfds[fd].list;
392 	while ((tmp = *pnode) != NULL) {
393 		*pnode = tmp->lnk.next;
394 
395 		_svc_free_id(tmp->id);
396 		free(tmp);
397 	}
398 
399 	svc_userfds[fd].mask = 0;
400 }
401 
402 /*
403  * Allow user to add an fd in the poll list. If it does not succeed, return
404  * -1. Otherwise, return a svc_id
405  */
406 
407 svc_input_id_t
408 svc_add_input(int user_fd, unsigned int events,
409     svc_callback_t user_callback, void *cookie)
410 {
411 	_svc_user_fd_node *new_node;
412 
413 	if (user_fd < 0) {
414 		errno = EINVAL;
415 		return ((svc_input_id_t)-1);
416 	}
417 
418 	if ((events == 0x0000) ||
419 	    (events & ~(POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND|\
420 	    POLLWRBAND|POLLERR|POLLHUP|POLLNVAL))) {
421 		errno = EINVAL;
422 		return ((svc_input_id_t)-1);
423 	}
424 
425 	(void) mutex_lock(&svc_userfds_lock);
426 
427 	if ((user_fd < svc_nuserfds) &&
428 	    (svc_userfds[user_fd].mask & events) != 0) {
429 		/* Already registrated call-back */
430 		errno = EEXIST;
431 		(void) mutex_unlock(&svc_userfds_lock);
432 		return ((svc_input_id_t)-1);
433 	}
434 
435 	/* Handle memory allocation. */
436 	if (user_fd >= svc_nuserfds) {
437 		int oldSize = svc_nuserfds;
438 		int i;
439 		_svc_user_fd_head *tmp;
440 
441 		svc_nuserfds = (user_fd + 1) + USER_FD_INCREMENT;
442 
443 		tmp = realloc(svc_userfds,
444 		    svc_nuserfds * sizeof (_svc_user_fd_head));
445 
446 		if (tmp == NULL) {
447 			syslog(LOG_ERR, "svc_add_input: out of memory");
448 			svc_nuserfds = oldSize;
449 			errno = ENOMEM;
450 			(void) mutex_unlock(&svc_userfds_lock);
451 			return ((svc_input_id_t)-1);
452 		}
453 
454 		svc_userfds = tmp;
455 
456 		for (i = oldSize; i < svc_nuserfds; i++) {
457 			svc_userfds[i].list = NULL;
458 			svc_userfds[i].mask = 0;
459 		}
460 	}
461 
462 	new_node = malloc(sizeof (_svc_user_fd_node));
463 	if (new_node == NULL) {
464 		syslog(LOG_ERR, "svc_add_input: out of memory");
465 		errno = ENOMEM;
466 		(void) mutex_unlock(&svc_userfds_lock);
467 		return ((svc_input_id_t)-1);
468 	}
469 
470 	/* create a new node */
471 	new_node->fd		= user_fd;
472 	new_node->events	= events;
473 	new_node->callback	= user_callback;
474 	new_node->cookie	= cookie;
475 
476 	if (_svc_attribute_new_id(new_node) == -1) {
477 		(void) mutex_unlock(&svc_userfds_lock);
478 		free(new_node);
479 		return ((svc_input_id_t)-1);
480 	}
481 
482 	/* Add the new element at the beginning of the list. */
483 	if (svc_userfds[user_fd].list != NULL)
484 		svc_userfds[user_fd].list->lnk.previous = new_node;
485 	new_node->lnk.next = svc_userfds[user_fd].list;
486 	new_node->lnk.previous = NULL;
487 
488 	svc_userfds[user_fd].list = new_node;
489 
490 	/* refresh global mask for this file desciptor */
491 	svc_userfds[user_fd].mask |= events;
492 
493 	/* refresh mask for the poll */
494 	add_pollfd(user_fd, (svc_userfds[user_fd].mask));
495 
496 	(void) mutex_unlock(&svc_userfds_lock);
497 	return (new_node->id);
498 }
499 
500 int
501 svc_remove_input(svc_input_id_t id)
502 {
503 	_svc_user_fd_node* node;
504 	_svc_user_fd_node* next;
505 	_svc_user_fd_node* previous;
506 	int fd;		/* caching optim */
507 
508 	(void) mutex_lock(&svc_userfds_lock);
509 
510 	/* Immediately update data for id management */
511 	if (user_fd_mgt_array == NULL || id >= svc_nmgtuserfds ||
512 	    is_free_id(id)) {
513 		errno = EINVAL;
514 		(void) mutex_unlock(&svc_userfds_lock);
515 		return (-1);
516 	}
517 
518 	node = node_from_id(id);
519 	assert(node != NULL);
520 
521 	_svc_free_id(id);
522 	next		= node->lnk.next;
523 	previous	= node->lnk.previous;
524 	fd		= node->fd; /* caching optim */
525 
526 	/* Remove this node from the list. */
527 	if (previous != NULL) {
528 		previous->lnk.next = next;
529 	} else {
530 		assert(svc_userfds[fd].list == node);
531 		svc_userfds[fd].list = next;
532 	}
533 	if (next != NULL)
534 		next->lnk.previous = previous;
535 
536 	/* Remove the node flags from the global mask */
537 	svc_userfds[fd].mask ^= node->events;
538 
539 	free(node);
540 	if (svc_userfds[fd].mask == 0) {
541 		assert(svc_userfds[fd].list == NULL);
542 		remove_pollfd(fd);
543 	} else {
544 		assert(svc_userfds[fd].list != NULL);
545 	}
546 	/* <=> CLEAN NEEDED TO SHRINK MEMORY USAGE */
547 
548 	(void) mutex_unlock(&svc_userfds_lock);
549 	return (0);
550 }
551 
552 /*
553  * Provides default service-side functions for authentication flavors
554  * that do not use all the fields in struct svc_auth_ops.
555  */
556 
557 /*ARGSUSED*/
558 static int
559 authany_wrap(AUTH *auth, XDR *xdrs, xdrproc_t xfunc, caddr_t xwhere)
560 {
561 	return (*xfunc)(xdrs, xwhere);
562 }
563 
564 struct svc_auth_ops svc_auth_any_ops = {
565 	authany_wrap,
566 	authany_wrap,
567 };
568 
569 /*
570  * Return pointer to server authentication structure.
571  */
572 SVCAUTH *
573 __svc_get_svcauth(SVCXPRT *xprt)
574 {
575 /* LINTED pointer alignment */
576 	return (&SVC_XP_AUTH(xprt));
577 }
578 
579 /*
580  * A callback routine to cleanup after a procedure is executed.
581  */
582 void (*__proc_cleanup_cb)() = NULL;
583 
584 void *
585 __svc_set_proc_cleanup_cb(void *cb)
586 {
587 	void	*tmp = (void *)__proc_cleanup_cb;
588 
589 	__proc_cleanup_cb = (void (*)())cb;
590 	return (tmp);
591 }
592 
593 /* ***************  SVCXPRT related stuff **************** */
594 
595 
596 static int pollfd_shrinking = 1;
597 
598 
599 /*
600  * Add fd to svc_pollfd
601  */
602 static void
603 add_pollfd(int fd, short events)
604 {
605 	if (fd < FD_SETSIZE) {
606 		FD_SET(fd, &svc_fdset);
607 #if !defined(_LP64)
608 		FD_SET(fd, &_new_svc_fdset);
609 #endif
610 		svc_nfds++;
611 		svc_nfds_set++;
612 		if (fd >= svc_max_fd)
613 			svc_max_fd = fd + 1;
614 	}
615 	if (fd >= svc_max_pollfd)
616 		svc_max_pollfd = fd + 1;
617 	if (svc_max_pollfd > svc_pollfd_allocd) {
618 		int i = svc_pollfd_allocd;
619 		pollfd_t *tmp;
620 		do {
621 			svc_pollfd_allocd += POLLFD_EXTEND;
622 		} while (svc_max_pollfd > svc_pollfd_allocd);
623 		tmp = realloc(svc_pollfd,
624 		    sizeof (pollfd_t) * svc_pollfd_allocd);
625 		if (tmp != NULL) {
626 			svc_pollfd = tmp;
627 			for (; i < svc_pollfd_allocd; i++)
628 				POLLFD_CLR(i, tmp);
629 		} else {
630 			/*
631 			 * give an error message; undo fdset setting
632 			 * above;  reset the pollfd_shrinking flag.
633 			 * because of this poll will not be done
634 			 * on these fds.
635 			 */
636 			if (fd < FD_SETSIZE) {
637 				FD_CLR(fd, &svc_fdset);
638 #if !defined(_LP64)
639 				FD_CLR(fd, &_new_svc_fdset);
640 #endif
641 				svc_nfds--;
642 				svc_nfds_set--;
643 				if (fd == (svc_max_fd - 1))
644 					svc_max_fd--;
645 			}
646 			if (fd == (svc_max_pollfd - 1))
647 				svc_max_pollfd--;
648 			pollfd_shrinking = 0;
649 			syslog(LOG_ERR, "add_pollfd: out of memory");
650 			_exit(1);
651 		}
652 	}
653 	svc_pollfd[fd].fd	= fd;
654 	svc_pollfd[fd].events	= events;
655 	svc_npollfds++;
656 	svc_npollfds_set++;
657 }
658 
659 /*
660  * the fd is still active but only the bit in fdset is cleared.
661  * do not subtract svc_nfds or svc_npollfds
662  */
663 void
664 clear_pollfd(int fd)
665 {
666 	if (fd < FD_SETSIZE && FD_ISSET(fd, &svc_fdset)) {
667 		FD_CLR(fd, &svc_fdset);
668 #if !defined(_LP64)
669 		FD_CLR(fd, &_new_svc_fdset);
670 #endif
671 		svc_nfds_set--;
672 	}
673 	if (fd < svc_pollfd_allocd && POLLFD_ISSET(fd, svc_pollfd)) {
674 		POLLFD_CLR(fd, svc_pollfd);
675 		svc_npollfds_set--;
676 	}
677 }
678 
679 /*
680  * sets the bit in fdset for an active fd so that poll() is done for that
681  */
682 void
683 set_pollfd(int fd, short events)
684 {
685 	if (fd < FD_SETSIZE) {
686 		FD_SET(fd, &svc_fdset);
687 #if !defined(_LP64)
688 		FD_SET(fd, &_new_svc_fdset);
689 #endif
690 		svc_nfds_set++;
691 	}
692 	if (fd < svc_pollfd_allocd) {
693 		svc_pollfd[fd].fd	= fd;
694 		svc_pollfd[fd].events	= events;
695 		svc_npollfds_set++;
696 	}
697 }
698 
699 /*
700  * remove a svc_pollfd entry; it does not shrink the memory
701  */
702 static void
703 remove_pollfd(int fd)
704 {
705 	clear_pollfd(fd);
706 	if (fd == (svc_max_fd - 1))
707 		svc_max_fd--;
708 	svc_nfds--;
709 	if (fd == (svc_max_pollfd - 1))
710 		svc_max_pollfd--;
711 	svc_npollfds--;
712 }
713 
714 /*
715  * delete a svc_pollfd entry; it shrinks the memory
716  * use remove_pollfd if you do not want to shrink
717  */
718 static void
719 delete_pollfd(int fd)
720 {
721 	remove_pollfd(fd);
722 	if (pollfd_shrinking && svc_max_pollfd <
723 	    (svc_pollfd_allocd - POLLFD_SHRINK)) {
724 		do {
725 			svc_pollfd_allocd -= POLLFD_SHRINK;
726 		} while (svc_max_pollfd < (svc_pollfd_allocd - POLLFD_SHRINK));
727 		svc_pollfd = realloc(svc_pollfd,
728 		    sizeof (pollfd_t) * svc_pollfd_allocd);
729 		if (svc_pollfd == NULL) {
730 			syslog(LOG_ERR, "delete_pollfd: out of memory");
731 			_exit(1);
732 		}
733 	}
734 }
735 
736 
737 /*
738  * Activate a transport handle.
739  */
740 void
741 xprt_register(const SVCXPRT *xprt)
742 {
743 	int fd = xprt->xp_fd;
744 #ifdef CALLBACK
745 	extern void (*_svc_getreqset_proc)();
746 #endif
747 /* VARIABLES PROTECTED BY svc_fd_lock: svc_xports, svc_fdset */
748 
749 	(void) rw_wrlock(&svc_fd_lock);
750 	if (svc_xports == NULL) {
751 		/* allocate some small amount first */
752 		svc_xports = calloc(FD_INCREMENT,  sizeof (SVCXPRT *));
753 		if (svc_xports == NULL) {
754 			syslog(LOG_ERR, "xprt_register: out of memory");
755 			_exit(1);
756 		}
757 		nsvc_xports = FD_INCREMENT;
758 
759 #ifdef CALLBACK
760 		/*
761 		 * XXX: This code does not keep track of the server state.
762 		 *
763 		 * This provides for callback support.	When a client
764 		 * recv's a call from another client on the server fd's,
765 		 * it calls _svc_getreqset_proc() which would return
766 		 * after serving all the server requests.  Also look under
767 		 * clnt_dg.c and clnt_vc.c  (clnt_call part of it)
768 		 */
769 		_svc_getreqset_proc = svc_getreq_poll;
770 #endif
771 	}
772 
773 	while (fd >= nsvc_xports) {
774 		SVCXPRT **tmp_xprts = svc_xports;
775 
776 		/* time to expand svc_xprts */
777 		tmp_xprts = realloc(svc_xports,
778 		    sizeof (SVCXPRT *) * (nsvc_xports + FD_INCREMENT));
779 		if (tmp_xprts == NULL) {
780 			syslog(LOG_ERR, "xprt_register : out of memory.");
781 			_exit(1);
782 		}
783 
784 		svc_xports = tmp_xprts;
785 		(void) memset(&svc_xports[nsvc_xports], 0,
786 		    sizeof (SVCXPRT *) * FD_INCREMENT);
787 		nsvc_xports += FD_INCREMENT;
788 	}
789 
790 	svc_xports[fd] = (SVCXPRT *)xprt;
791 
792 	add_pollfd(fd, MASKVAL);
793 
794 	if (svc_polling) {
795 		char dummy;
796 
797 		/*
798 		 * This happens only in one of the MT modes.
799 		 * Wake up poller.
800 		 */
801 		(void) write(svc_pipe[1], &dummy, sizeof (dummy));
802 	}
803 	/*
804 	 * If already dispatching door based services, start
805 	 * dispatching TLI based services now.
806 	 */
807 	(void) mutex_lock(&svc_door_mutex);
808 	if (svc_ndoorfds > 0)
809 		(void) cond_signal(&svc_door_waitcv);
810 	(void) mutex_unlock(&svc_door_mutex);
811 
812 	if (svc_xdrs == NULL) {
813 		/* allocate initial chunk */
814 		svc_xdrs = calloc(FD_INCREMENT, sizeof (XDR *));
815 		if (svc_xdrs != NULL)
816 			nsvc_xdrs = FD_INCREMENT;
817 		else {
818 			syslog(LOG_ERR, "xprt_register : out of memory.");
819 			_exit(1);
820 		}
821 	}
822 	(void) rw_unlock(&svc_fd_lock);
823 }
824 
825 /*
826  * De-activate a transport handle.
827  */
828 void
829 __xprt_unregister_private(const SVCXPRT *xprt, bool_t lock_not_held)
830 {
831 	int fd = xprt->xp_fd;
832 
833 	if (lock_not_held)
834 		(void) rw_wrlock(&svc_fd_lock);
835 	if ((fd < nsvc_xports) && (svc_xports[fd] == xprt)) {
836 		svc_xports[fd] = NULL;
837 		delete_pollfd(fd);
838 	}
839 	if (lock_not_held)
840 		(void) rw_unlock(&svc_fd_lock);
841 	__svc_rm_from_xlist(&_svc_xprtlist, xprt, &xprtlist_lock);
842 }
843 
844 void
845 xprt_unregister(const SVCXPRT *xprt)
846 {
847 	__xprt_unregister_private(xprt, TRUE);
848 }
849 
850 /* ********************** CALLOUT list related stuff ************* */
851 
852 /*
853  * Add a service program to the callout list.
854  * The dispatch routine will be called when a rpc request for this
855  * program number comes in.
856  */
857 bool_t
858 svc_reg(const SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
859     void (*dispatch)(), const struct netconfig *nconf)
860 {
861 	struct svc_callout *prev;
862 	struct svc_callout *s, **s2;
863 	struct netconfig *tnconf;
864 	char *netid = NULL;
865 	int flag = 0;
866 
867 /* VARIABLES PROTECTED BY svc_lock: s, prev, svc_head */
868 
869 	if (xprt->xp_netid) {
870 		netid = strdup(xprt->xp_netid);
871 		flag = 1;
872 	} else if (nconf && nconf->nc_netid) {
873 		netid = strdup(nconf->nc_netid);
874 		flag = 1;
875 	} else if ((tnconf = __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type))
876 	    != NULL) {
877 		netid = strdup(tnconf->nc_netid);
878 		flag = 1;
879 		freenetconfigent(tnconf);
880 	} /* must have been created with svc_raw_create */
881 	if ((netid == NULL) && (flag == 1))
882 		return (FALSE);
883 
884 	(void) rw_wrlock(&svc_lock);
885 	if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
886 		if (netid)
887 			free(netid);
888 		if (s->sc_dispatch == dispatch)
889 			goto rpcb_it; /* he is registering another xptr */
890 		(void) rw_unlock(&svc_lock);
891 		return (FALSE);
892 	}
893 	s = malloc(sizeof (struct svc_callout));
894 	if (s == NULL) {
895 		if (netid)
896 			free(netid);
897 		(void) rw_unlock(&svc_lock);
898 		return (FALSE);
899 	}
900 
901 	s->sc_prog = prog;
902 	s->sc_vers = vers;
903 	s->sc_dispatch = dispatch;
904 	s->sc_netid = netid;
905 	s->sc_next = NULL;
906 
907 	/*
908 	 * The ordering of transports is such that the most frequently used
909 	 * one appears first.  So add the new entry to the end of the list.
910 	 */
911 	for (s2 = &svc_head; *s2 != NULL; s2 = &(*s2)->sc_next)
912 		;
913 	*s2 = s;
914 
915 	if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
916 		if ((((SVCXPRT *)xprt)->xp_netid = strdup(netid)) == NULL) {
917 			syslog(LOG_ERR, "svc_reg : strdup failed.");
918 			free(netid);
919 			free(s);
920 			*s2 = NULL;
921 			(void) rw_unlock(&svc_lock);
922 			return (FALSE);
923 		}
924 
925 rpcb_it:
926 	(void) rw_unlock(&svc_lock);
927 
928 	/* now register the information with the local binder service */
929 	if (nconf)
930 		return (rpcb_set(prog, vers, nconf, &xprt->xp_ltaddr));
931 	return (TRUE);
932 	/*NOTREACHED*/
933 }
934 
935 /*
936  * Remove a service program from the callout list.
937  */
938 void
939 svc_unreg(const rpcprog_t prog, const rpcvers_t vers)
940 {
941 	struct svc_callout *prev;
942 	struct svc_callout *s;
943 
944 	/* unregister the information anyway */
945 	(void) rpcb_unset(prog, vers, NULL);
946 
947 	(void) rw_wrlock(&svc_lock);
948 	while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
949 		if (prev == NULL_SVC) {
950 			svc_head = s->sc_next;
951 		} else {
952 			prev->sc_next = s->sc_next;
953 		}
954 		s->sc_next = NULL_SVC;
955 		if (s->sc_netid)
956 			free(s->sc_netid);
957 		free(s);
958 	}
959 	(void) rw_unlock(&svc_lock);
960 }
961 
962 #ifdef PORTMAP
963 /*
964  * Add a service program to the callout list.
965  * The dispatch routine will be called when a rpc request for this
966  * program number comes in.
967  * For version 2 portmappers.
968  */
969 bool_t
970 svc_register(SVCXPRT *xprt, rpcprog_t prog, rpcvers_t vers,
971     void (*dispatch)(), int protocol)
972 {
973 	struct svc_callout *prev;
974 	struct svc_callout *s;
975 	struct netconfig *nconf;
976 	char *netid = NULL;
977 	int flag = 0;
978 
979 	if (xprt->xp_netid) {
980 		netid = strdup(xprt->xp_netid);
981 		flag = 1;
982 	} else if ((ioctl(xprt->xp_fd, I_FIND, "timod") > 0) && ((nconf =
983 	    __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type)) != NULL)) {
984 		/* fill in missing netid field in SVCXPRT */
985 		netid = strdup(nconf->nc_netid);
986 		flag = 1;
987 		freenetconfigent(nconf);
988 	} /* must be svc_raw_create */
989 
990 	if ((netid == NULL) && (flag == 1))
991 		return (FALSE);
992 
993 	(void) rw_wrlock(&svc_lock);
994 	if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
995 		if (netid)
996 			free(netid);
997 		if (s->sc_dispatch == dispatch)
998 			goto pmap_it;  /* he is registering another xptr */
999 		(void) rw_unlock(&svc_lock);
1000 		return (FALSE);
1001 	}
1002 	s = malloc(sizeof (struct svc_callout));
1003 	if (s == (struct svc_callout *)0) {
1004 		if (netid)
1005 			free(netid);
1006 		(void) rw_unlock(&svc_lock);
1007 		return (FALSE);
1008 	}
1009 	s->sc_prog = prog;
1010 	s->sc_vers = vers;
1011 	s->sc_dispatch = dispatch;
1012 	s->sc_netid = netid;
1013 	s->sc_next = svc_head;
1014 	svc_head = s;
1015 
1016 	if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
1017 		if ((xprt->xp_netid = strdup(netid)) == NULL) {
1018 			syslog(LOG_ERR, "svc_register : strdup failed.");
1019 			free(netid);
1020 			svc_head = s->sc_next;
1021 			free(s);
1022 			(void) rw_unlock(&svc_lock);
1023 			return (FALSE);
1024 		}
1025 
1026 pmap_it:
1027 	(void) rw_unlock(&svc_lock);
1028 	/* now register the information with the local binder service */
1029 	if (protocol)
1030 		return (pmap_set(prog, vers, protocol, xprt->xp_port));
1031 	return (TRUE);
1032 }
1033 
1034 /*
1035  * Remove a service program from the callout list.
1036  * For version 2 portmappers.
1037  */
1038 void
1039 svc_unregister(rpcprog_t prog, rpcvers_t vers)
1040 {
1041 	struct svc_callout *prev;
1042 	struct svc_callout *s;
1043 
1044 	(void) rw_wrlock(&svc_lock);
1045 	while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
1046 		if (prev == NULL_SVC) {
1047 			svc_head = s->sc_next;
1048 		} else {
1049 			prev->sc_next = s->sc_next;
1050 		}
1051 		s->sc_next = NULL_SVC;
1052 		if (s->sc_netid)
1053 			free(s->sc_netid);
1054 		free(s);
1055 		/* unregister the information with the local binder service */
1056 		(void) pmap_unset(prog, vers);
1057 	}
1058 	(void) rw_unlock(&svc_lock);
1059 }
1060 #endif /* PORTMAP */
1061 
1062 /*
1063  * Search the callout list for a program number, return the callout
1064  * struct.
1065  * Also check for transport as well.  Many routines such as svc_unreg
1066  * dont give any corresponding transport, so dont check for transport if
1067  * netid == NULL
1068  */
1069 static struct svc_callout *
1070 svc_find(rpcprog_t prog, rpcvers_t vers, struct svc_callout **prev, char *netid)
1071 {
1072 	struct svc_callout *s, *p;
1073 
1074 /* WRITE LOCK HELD ON ENTRY: svc_lock */
1075 
1076 /*	assert(RW_WRITE_HELD(&svc_lock)); */
1077 	p = NULL_SVC;
1078 	for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1079 		if (((s->sc_prog == prog) && (s->sc_vers == vers)) &&
1080 		    ((netid == NULL) || (s->sc_netid == NULL) ||
1081 		    (strcmp(netid, s->sc_netid) == 0)))
1082 			break;
1083 		p = s;
1084 	}
1085 	*prev = p;
1086 	return (s);
1087 }
1088 
1089 
1090 /* ******************* REPLY GENERATION ROUTINES  ************ */
1091 
1092 /*
1093  * Send a reply to an rpc request
1094  */
1095 bool_t
1096 svc_sendreply(const SVCXPRT *xprt, const xdrproc_t xdr_results,
1097     const caddr_t xdr_location)
1098 {
1099 	struct rpc_msg rply;
1100 
1101 	rply.rm_direction = REPLY;
1102 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1103 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1104 	rply.acpted_rply.ar_stat = SUCCESS;
1105 	rply.acpted_rply.ar_results.where = xdr_location;
1106 	rply.acpted_rply.ar_results.proc = xdr_results;
1107 	return (SVC_REPLY((SVCXPRT *)xprt, &rply));
1108 }
1109 
1110 /*
1111  * No procedure error reply
1112  */
1113 void
1114 svcerr_noproc(const SVCXPRT *xprt)
1115 {
1116 	struct rpc_msg rply;
1117 
1118 	rply.rm_direction = REPLY;
1119 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1120 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1121 	rply.acpted_rply.ar_stat = PROC_UNAVAIL;
1122 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1123 }
1124 
1125 /*
1126  * Can't decode args error reply
1127  */
1128 void
1129 svcerr_decode(const SVCXPRT *xprt)
1130 {
1131 	struct rpc_msg rply;
1132 
1133 	rply.rm_direction = REPLY;
1134 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1135 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1136 	rply.acpted_rply.ar_stat = GARBAGE_ARGS;
1137 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1138 }
1139 
1140 /*
1141  * Some system error
1142  */
1143 void
1144 svcerr_systemerr(const SVCXPRT *xprt)
1145 {
1146 	struct rpc_msg rply;
1147 
1148 	rply.rm_direction = REPLY;
1149 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1150 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1151 	rply.acpted_rply.ar_stat = SYSTEM_ERR;
1152 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1153 }
1154 
1155 /*
1156  * Tell RPC package to not complain about version errors to the client.	 This
1157  * is useful when revving broadcast protocols that sit on a fixed address.
1158  * There is really one (or should be only one) example of this kind of
1159  * protocol: the portmapper (or rpc binder).
1160  */
1161 void
1162 __svc_versquiet_on(const SVCXPRT *xprt)
1163 {
1164 /* LINTED pointer alignment */
1165 	svc_flags(xprt) |= SVC_VERSQUIET;
1166 }
1167 
1168 void
1169 __svc_versquiet_off(const SVCXPRT *xprt)
1170 {
1171 /* LINTED pointer alignment */
1172 	svc_flags(xprt) &= ~SVC_VERSQUIET;
1173 }
1174 
1175 void
1176 svc_versquiet(const SVCXPRT *xprt)
1177 {
1178 	__svc_versquiet_on(xprt);
1179 }
1180 
1181 int
1182 __svc_versquiet_get(const SVCXPRT *xprt)
1183 {
1184 /* LINTED pointer alignment */
1185 	return (svc_flags(xprt) & SVC_VERSQUIET);
1186 }
1187 
1188 /*
1189  * Authentication error reply
1190  */
1191 void
1192 svcerr_auth(const SVCXPRT *xprt, const enum auth_stat why)
1193 {
1194 	struct rpc_msg rply;
1195 
1196 	rply.rm_direction = REPLY;
1197 	rply.rm_reply.rp_stat = MSG_DENIED;
1198 	rply.rjcted_rply.rj_stat = AUTH_ERROR;
1199 	rply.rjcted_rply.rj_why = why;
1200 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1201 }
1202 
1203 /*
1204  * Auth too weak error reply
1205  */
1206 void
1207 svcerr_weakauth(const SVCXPRT *xprt)
1208 {
1209 	svcerr_auth(xprt, AUTH_TOOWEAK);
1210 }
1211 
1212 /*
1213  * Program unavailable error reply
1214  */
1215 void
1216 svcerr_noprog(const SVCXPRT *xprt)
1217 {
1218 	struct rpc_msg rply;
1219 
1220 	rply.rm_direction = REPLY;
1221 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1222 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1223 	rply.acpted_rply.ar_stat = PROG_UNAVAIL;
1224 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1225 }
1226 
1227 /*
1228  * Program version mismatch error reply
1229  */
1230 void
1231 svcerr_progvers(const SVCXPRT *xprt, const rpcvers_t low_vers,
1232     const rpcvers_t high_vers)
1233 {
1234 	struct rpc_msg rply;
1235 
1236 	rply.rm_direction = REPLY;
1237 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1238 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1239 	rply.acpted_rply.ar_stat = PROG_MISMATCH;
1240 	rply.acpted_rply.ar_vers.low = low_vers;
1241 	rply.acpted_rply.ar_vers.high = high_vers;
1242 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1243 }
1244 
1245 /* ******************* SERVER INPUT STUFF ******************* */
1246 
1247 /*
1248  * Get server side input from some transport.
1249  *
1250  * Statement of authentication parameters management:
1251  * This function owns and manages all authentication parameters, specifically
1252  * the "raw" parameters (msg.rm_call.cb_cred and msg.rm_call.cb_verf) and
1253  * the "cooked" credentials (rqst->rq_clntcred).
1254  * However, this function does not know the structure of the cooked
1255  * credentials, so it make the following assumptions:
1256  *   a) the structure is contiguous (no pointers), and
1257  *   b) the cred structure size does not exceed RQCRED_SIZE bytes.
1258  * In all events, all three parameters are freed upon exit from this routine.
1259  * The storage is trivially management on the call stack in user land, but
1260  * is mallocated in kernel land.
1261  */
1262 
1263 void
1264 svc_getreq(int rdfds)
1265 {
1266 	fd_set readfds;
1267 
1268 	FD_ZERO(&readfds);
1269 	readfds.fds_bits[0] = rdfds;
1270 	svc_getreqset(&readfds);
1271 }
1272 
1273 void
1274 svc_getreqset(fd_set *readfds)
1275 {
1276 	int i;
1277 
1278 	for (i = 0; i < svc_max_fd; i++) {
1279 		/* fd has input waiting */
1280 		if (FD_ISSET(i, readfds))
1281 			svc_getreq_common(i);
1282 	}
1283 }
1284 
1285 void
1286 svc_getreq_poll(struct pollfd *pfdp, const int pollretval)
1287 {
1288 	int i;
1289 	int fds_found;
1290 
1291 	for (i = fds_found = 0; fds_found < pollretval; i++) {
1292 		struct pollfd *p = &pfdp[i];
1293 
1294 		if (p->revents) {
1295 			/* fd has input waiting */
1296 			fds_found++;
1297 			/*
1298 			 *	We assume that this function is only called
1299 			 *	via someone select()ing from svc_fdset or
1300 			 *	poll()ing from svc_pollset[].  Thus it's safe
1301 			 *	to handle the POLLNVAL event by simply turning
1302 			 *	the corresponding bit off in svc_fdset.  The
1303 			 *	svc_pollset[] array is derived from svc_fdset
1304 			 *	and so will also be updated eventually.
1305 			 *
1306 			 *	XXX Should we do an xprt_unregister() instead?
1307 			 */
1308 			/* Handle user callback */
1309 			if (__is_a_userfd(p->fd) == TRUE) {
1310 				(void) rw_rdlock(&svc_fd_lock);
1311 				__svc_getreq_user(p);
1312 				(void) rw_unlock(&svc_fd_lock);
1313 			} else {
1314 				if (p->revents & POLLNVAL) {
1315 					(void) rw_wrlock(&svc_fd_lock);
1316 					remove_pollfd(p->fd);	/* XXX */
1317 					(void) rw_unlock(&svc_fd_lock);
1318 				} else {
1319 					svc_getreq_common(p->fd);
1320 				}
1321 			}
1322 		}
1323 	}
1324 }
1325 
1326 void
1327 svc_getreq_common(const int fd)
1328 {
1329 	SVCXPRT *xprt;
1330 	enum xprt_stat stat;
1331 	struct rpc_msg *msg;
1332 	struct svc_req *r;
1333 	char *cred_area;
1334 
1335 	(void) rw_rdlock(&svc_fd_lock);
1336 
1337 	/* HANDLE USER CALLBACK */
1338 	if (__is_a_userfd(fd) == TRUE) {
1339 		struct pollfd virtual_fd;
1340 
1341 		virtual_fd.events = virtual_fd.revents = (short)0xFFFF;
1342 		virtual_fd.fd = fd;
1343 		__svc_getreq_user(&virtual_fd);
1344 		(void) rw_unlock(&svc_fd_lock);
1345 		return;
1346 	}
1347 
1348 	/*
1349 	 * The transport associated with this fd could have been
1350 	 * removed from svc_timeout_nonblock_xprt_and_LRU, for instance.
1351 	 * This can happen if two or more fds get read events and are
1352 	 * passed to svc_getreq_poll/set, the first fd is seviced by
1353 	 * the dispatch routine and cleans up any dead transports.  If
1354 	 * one of the dead transports removed is the other fd that
1355 	 * had a read event then svc_getreq_common() will be called with no
1356 	 * xprt associated with the fd that had the original read event.
1357 	 */
1358 	if ((fd >= nsvc_xports) || (xprt = svc_xports[fd]) == NULL) {
1359 		(void) rw_unlock(&svc_fd_lock);
1360 		return;
1361 	}
1362 	(void) rw_unlock(&svc_fd_lock);
1363 /* LINTED pointer alignment */
1364 	msg = SVCEXT(xprt)->msg;
1365 /* LINTED pointer alignment */
1366 	r = SVCEXT(xprt)->req;
1367 /* LINTED pointer alignment */
1368 	cred_area = SVCEXT(xprt)->cred_area;
1369 	msg->rm_call.cb_cred.oa_base = cred_area;
1370 	msg->rm_call.cb_verf.oa_base = &(cred_area[MAX_AUTH_BYTES]);
1371 	r->rq_clntcred = &(cred_area[2 * MAX_AUTH_BYTES]);
1372 
1373 	/* receive msgs from xprtprt (support batch calls) */
1374 	do {
1375 		bool_t dispatch;
1376 
1377 		if (dispatch = SVC_RECV(xprt, msg))
1378 			(void) _svc_prog_dispatch(xprt, msg, r);
1379 		/*
1380 		 * Check if the xprt has been disconnected in a recursive call
1381 		 * in the service dispatch routine. If so, then break
1382 		 */
1383 		(void) rw_rdlock(&svc_fd_lock);
1384 		if (xprt != svc_xports[fd]) {
1385 			(void) rw_unlock(&svc_fd_lock);
1386 			break;
1387 		}
1388 		(void) rw_unlock(&svc_fd_lock);
1389 
1390 		/*
1391 		 * Call cleanup procedure if set.
1392 		 */
1393 		if (__proc_cleanup_cb != NULL && dispatch)
1394 			(*__proc_cleanup_cb)(xprt);
1395 
1396 		if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
1397 			SVC_DESTROY(xprt);
1398 			break;
1399 		}
1400 	} while (stat == XPRT_MOREREQS);
1401 }
1402 
1403 int
1404 _svc_prog_dispatch(SVCXPRT *xprt, struct rpc_msg *msg, struct svc_req *r)
1405 {
1406 	struct svc_callout *s;
1407 	enum auth_stat why;
1408 	int prog_found;
1409 	rpcvers_t low_vers;
1410 	rpcvers_t high_vers;
1411 	void (*disp_fn)();
1412 
1413 	r->rq_xprt = xprt;
1414 	r->rq_prog = msg->rm_call.cb_prog;
1415 	r->rq_vers = msg->rm_call.cb_vers;
1416 	r->rq_proc = msg->rm_call.cb_proc;
1417 	r->rq_cred = msg->rm_call.cb_cred;
1418 /* LINTED pointer alignment */
1419 	SVC_XP_AUTH(r->rq_xprt).svc_ah_ops = svc_auth_any_ops;
1420 /* LINTED pointer alignment */
1421 	SVC_XP_AUTH(r->rq_xprt).svc_ah_private = NULL;
1422 
1423 	/* first authenticate the message */
1424 	/* Check for null flavor and bypass these calls if possible */
1425 
1426 	if (msg->rm_call.cb_cred.oa_flavor == AUTH_NULL) {
1427 		r->rq_xprt->xp_verf.oa_flavor = _null_auth.oa_flavor;
1428 		r->rq_xprt->xp_verf.oa_length = 0;
1429 	} else {
1430 		bool_t no_dispatch;
1431 
1432 		if ((why = __gss_authenticate(r, msg,
1433 		    &no_dispatch)) != AUTH_OK) {
1434 			svcerr_auth(xprt, why);
1435 			return (0);
1436 		}
1437 		if (no_dispatch)
1438 			return (0);
1439 	}
1440 	/* match message with a registered service */
1441 	prog_found = FALSE;
1442 	low_vers = (rpcvers_t)(0 - 1);
1443 	high_vers = 0;
1444 	(void) rw_rdlock(&svc_lock);
1445 	for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1446 		if (s->sc_prog == r->rq_prog) {
1447 			prog_found = TRUE;
1448 			if (s->sc_vers == r->rq_vers) {
1449 				if ((xprt->xp_netid == NULL) ||
1450 				    (s->sc_netid == NULL) ||
1451 				    (strcmp(xprt->xp_netid,
1452 				    s->sc_netid) == 0)) {
1453 					disp_fn = (*s->sc_dispatch);
1454 					(void) rw_unlock(&svc_lock);
1455 					disp_fn(r, xprt);
1456 					return (1);
1457 				}
1458 				prog_found = FALSE;
1459 			}
1460 			if (s->sc_vers < low_vers)
1461 				low_vers = s->sc_vers;
1462 			if (s->sc_vers > high_vers)
1463 				high_vers = s->sc_vers;
1464 		}		/* found correct program */
1465 	}
1466 	(void) rw_unlock(&svc_lock);
1467 
1468 	/*
1469 	 * if we got here, the program or version
1470 	 * is not served ...
1471 	 */
1472 	if (prog_found) {
1473 /* LINTED pointer alignment */
1474 		if (!version_keepquiet(xprt))
1475 			svcerr_progvers(xprt, low_vers, high_vers);
1476 	} else {
1477 		svcerr_noprog(xprt);
1478 	}
1479 	return (0);
1480 }
1481 
1482 /* ******************* SVCXPRT allocation and deallocation ***************** */
1483 
1484 /*
1485  * svc_xprt_alloc() - allocate a service transport handle
1486  */
1487 SVCXPRT *
1488 svc_xprt_alloc(void)
1489 {
1490 	SVCXPRT		*xprt = NULL;
1491 	SVCXPRT_EXT	*xt = NULL;
1492 	SVCXPRT_LIST	*xlist = NULL;
1493 	struct rpc_msg	*msg = NULL;
1494 	struct svc_req	*req = NULL;
1495 	char		*cred_area = NULL;
1496 
1497 	if ((xprt = calloc(1, sizeof (SVCXPRT))) == NULL)
1498 		goto err_exit;
1499 
1500 	if ((xt = calloc(1, sizeof (SVCXPRT_EXT))) == NULL)
1501 		goto err_exit;
1502 	xprt->xp_p3 = (caddr_t)xt; /* SVCEXT(xprt) = xt */
1503 
1504 	if ((xlist = calloc(1, sizeof (SVCXPRT_LIST))) == NULL)
1505 		goto err_exit;
1506 	xt->my_xlist = xlist;
1507 	xlist->xprt = xprt;
1508 
1509 	if ((msg = malloc(sizeof (struct rpc_msg))) == NULL)
1510 		goto err_exit;
1511 	xt->msg = msg;
1512 
1513 	if ((req = malloc(sizeof (struct svc_req))) == NULL)
1514 		goto err_exit;
1515 	xt->req = req;
1516 
1517 	if ((cred_area = malloc(2*MAX_AUTH_BYTES + RQCRED_SIZE)) == NULL)
1518 		goto err_exit;
1519 	xt->cred_area = cred_area;
1520 
1521 /* LINTED pointer alignment */
1522 	(void) mutex_init(&svc_send_mutex(xprt), USYNC_THREAD, (void *)0);
1523 	return (xprt);
1524 
1525 err_exit:
1526 	svc_xprt_free(xprt);
1527 	return (NULL);
1528 }
1529 
1530 
1531 /*
1532  * svc_xprt_free() - free a service handle
1533  */
1534 void
1535 svc_xprt_free(SVCXPRT *xprt)
1536 {
1537 /* LINTED pointer alignment */
1538 	SVCXPRT_EXT	*xt = xprt ? SVCEXT(xprt) : NULL;
1539 	SVCXPRT_LIST	*my_xlist = xt ? xt->my_xlist: NULL;
1540 	struct rpc_msg	*msg = xt ? xt->msg : NULL;
1541 	struct svc_req	*req = xt ? xt->req : NULL;
1542 	char		*cred_area = xt ? xt->cred_area : NULL;
1543 
1544 	if (xprt)
1545 		free(xprt);
1546 	if (xt)
1547 		free(xt);
1548 	if (my_xlist)
1549 		free(my_xlist);
1550 	if (msg)
1551 		free(msg);
1552 	if (req)
1553 		free(req);
1554 	if (cred_area)
1555 		free(cred_area);
1556 }
1557 
1558 
1559 /*
1560  * svc_xprt_destroy() - free parent and child xprt list
1561  */
1562 void
1563 svc_xprt_destroy(SVCXPRT *xprt)
1564 {
1565 	SVCXPRT_LIST	*xlist, *xnext = NULL;
1566 	int		type;
1567 
1568 /* LINTED pointer alignment */
1569 	if (SVCEXT(xprt)->parent)
1570 /* LINTED pointer alignment */
1571 		xprt = SVCEXT(xprt)->parent;
1572 /* LINTED pointer alignment */
1573 	type = svc_type(xprt);
1574 /* LINTED pointer alignment */
1575 	for (xlist = SVCEXT(xprt)->my_xlist; xlist != NULL; xlist = xnext) {
1576 		xnext = xlist->next;
1577 		xprt = xlist->xprt;
1578 		switch (type) {
1579 		case SVC_DGRAM:
1580 			svc_dg_xprtfree(xprt);
1581 			break;
1582 		case SVC_RENDEZVOUS:
1583 			svc_vc_xprtfree(xprt);
1584 			break;
1585 		case SVC_CONNECTION:
1586 			svc_fd_xprtfree(xprt);
1587 			break;
1588 		case SVC_DOOR:
1589 			svc_door_xprtfree(xprt);
1590 			break;
1591 		}
1592 	}
1593 }
1594 
1595 
1596 /*
1597  * svc_copy() - make a copy of parent
1598  */
1599 SVCXPRT *
1600 svc_copy(SVCXPRT *xprt)
1601 {
1602 /* LINTED pointer alignment */
1603 	switch (svc_type(xprt)) {
1604 	case SVC_DGRAM:
1605 		return (svc_dg_xprtcopy(xprt));
1606 	case SVC_RENDEZVOUS:
1607 		return (svc_vc_xprtcopy(xprt));
1608 	case SVC_CONNECTION:
1609 		return (svc_fd_xprtcopy(xprt));
1610 	}
1611 	return (NULL);
1612 }
1613 
1614 
1615 /*
1616  * _svc_destroy_private() - private SVC_DESTROY interface
1617  */
1618 void
1619 _svc_destroy_private(SVCXPRT *xprt)
1620 {
1621 /* LINTED pointer alignment */
1622 	switch (svc_type(xprt)) {
1623 	case SVC_DGRAM:
1624 		_svc_dg_destroy_private(xprt);
1625 		break;
1626 	case SVC_RENDEZVOUS:
1627 	case SVC_CONNECTION:
1628 		_svc_vc_destroy_private(xprt, TRUE);
1629 		break;
1630 	}
1631 }
1632 
1633 /*
1634  * svc_get_local_cred() - fetch local user credentials.  This always
1635  * works over doors based transports.  For local transports, this
1636  * does not yield correct results unless the __rpc_negotiate_uid()
1637  * call has been invoked to enable this feature.
1638  */
1639 bool_t
1640 svc_get_local_cred(SVCXPRT *xprt, svc_local_cred_t *lcred)
1641 {
1642 	/* LINTED pointer alignment */
1643 	if (svc_type(xprt) == SVC_DOOR)
1644 		return (__svc_get_door_cred(xprt, lcred));
1645 	return (__rpc_get_local_cred(xprt, lcred));
1646 }
1647 
1648 
1649 /* ******************* DUPLICATE ENTRY HANDLING ROUTINES ************** */
1650 
1651 /*
1652  * the dup cacheing routines below provide a cache of received
1653  * transactions. rpc service routines can use this to detect
1654  * retransmissions and re-send a non-failure response. Uses a
1655  * lru scheme to find entries to get rid of entries in the cache,
1656  * though only DUP_DONE entries are placed on the lru list.
1657  * the routines were written towards development of a generic
1658  * SVC_DUP() interface, which can be expanded to encompass the
1659  * svc_dg_enablecache() routines as well. the cache is currently
1660  * private to the automounter.
1661  */
1662 
1663 
1664 /* dupcache header contains xprt specific information */
1665 struct dupcache {
1666 	rwlock_t	dc_lock;
1667 	time_t		dc_time;
1668 	int		dc_buckets;
1669 	int		dc_maxsz;
1670 	int		dc_basis;
1671 	struct dupreq 	*dc_mru;
1672 	struct dupreq	**dc_hashtbl;
1673 };
1674 
1675 /*
1676  * private duplicate cache request routines
1677  */
1678 static int __svc_dupcache_check(struct svc_req *, caddr_t *, uint_t *,
1679 		struct dupcache *, uint32_t, uint32_t);
1680 static struct dupreq *__svc_dupcache_victim(struct dupcache *, time_t);
1681 static int __svc_dupcache_enter(struct svc_req *, struct dupreq *,
1682 		struct dupcache *, uint32_t, uint32_t, time_t);
1683 static int __svc_dupcache_update(struct svc_req *, caddr_t, uint_t, int,
1684 		struct dupcache *, uint32_t, uint32_t);
1685 #ifdef DUP_DEBUG
1686 static void __svc_dupcache_debug(struct dupcache *);
1687 #endif /* DUP_DEBUG */
1688 
1689 /* default parameters for the dupcache */
1690 #define	DUPCACHE_BUCKETS	257
1691 #define	DUPCACHE_TIME		900
1692 #define	DUPCACHE_MAXSZ		INT_MAX
1693 
1694 /*
1695  * __svc_dupcache_init(void *condition, int basis, char *xprt_cache)
1696  * initialize the duprequest cache and assign it to the xprt_cache
1697  * Use default values depending on the cache condition and basis.
1698  * return TRUE on success and FALSE on failure
1699  */
1700 bool_t
1701 __svc_dupcache_init(void *condition, int basis, char **xprt_cache)
1702 {
1703 	static mutex_t initdc_lock = DEFAULTMUTEX;
1704 	int i;
1705 	struct dupcache *dc;
1706 
1707 	(void) mutex_lock(&initdc_lock);
1708 	if (*xprt_cache != NULL) { /* do only once per xprt */
1709 		(void) mutex_unlock(&initdc_lock);
1710 		syslog(LOG_ERR,
1711 		    "__svc_dupcache_init: multiply defined dup cache");
1712 		return (FALSE);
1713 	}
1714 
1715 	switch (basis) {
1716 	case DUPCACHE_FIXEDTIME:
1717 		dc = malloc(sizeof (struct dupcache));
1718 		if (dc == NULL) {
1719 			(void) mutex_unlock(&initdc_lock);
1720 			syslog(LOG_ERR,
1721 			    "__svc_dupcache_init: memory alloc failed");
1722 			return (FALSE);
1723 		}
1724 		(void) rwlock_init(&(dc->dc_lock), USYNC_THREAD, NULL);
1725 		if (condition != NULL)
1726 			dc->dc_time = *((time_t *)condition);
1727 		else
1728 			dc->dc_time = DUPCACHE_TIME;
1729 		dc->dc_buckets = DUPCACHE_BUCKETS;
1730 		dc->dc_maxsz = DUPCACHE_MAXSZ;
1731 		dc->dc_basis = basis;
1732 		dc->dc_mru = NULL;
1733 		dc->dc_hashtbl = malloc(dc->dc_buckets *
1734 		    sizeof (struct dupreq *));
1735 		if (dc->dc_hashtbl == NULL) {
1736 			free(dc);
1737 			(void) mutex_unlock(&initdc_lock);
1738 			syslog(LOG_ERR,
1739 			    "__svc_dupcache_init: memory alloc failed");
1740 			return (FALSE);
1741 		}
1742 		for (i = 0; i < DUPCACHE_BUCKETS; i++)
1743 			dc->dc_hashtbl[i] = NULL;
1744 		*xprt_cache = (char *)dc;
1745 		break;
1746 	default:
1747 		(void) mutex_unlock(&initdc_lock);
1748 		syslog(LOG_ERR,
1749 		    "__svc_dupcache_init: undefined dup cache basis");
1750 		return (FALSE);
1751 	}
1752 
1753 	(void) mutex_unlock(&initdc_lock);
1754 
1755 	return (TRUE);
1756 }
1757 
1758 /*
1759  * __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1760  *	char *xprt_cache)
1761  * searches the request cache. Creates an entry and returns DUP_NEW if
1762  * the request is not found in the cache.  If it is found, then it
1763  * returns the state of the request (in progress, drop, or done) and
1764  * also allocates, and passes back results to the user (if any) in
1765  * resp_buf, and its length in resp_bufsz. DUP_ERROR is returned on error.
1766  */
1767 int
1768 __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1769     char *xprt_cache)
1770 {
1771 	uint32_t drxid, drhash;
1772 	int rc;
1773 	struct dupreq *dr = NULL;
1774 	time_t timenow = time(NULL);
1775 
1776 	/* LINTED pointer alignment */
1777 	struct dupcache *dc = (struct dupcache *)xprt_cache;
1778 
1779 	if (dc == NULL) {
1780 		syslog(LOG_ERR, "__svc_dup: undefined cache");
1781 		return (DUP_ERROR);
1782 	}
1783 
1784 	/* get the xid of the request */
1785 	if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
1786 		syslog(LOG_ERR, "__svc_dup: xid error");
1787 		return (DUP_ERROR);
1788 	}
1789 	drhash = drxid % dc->dc_buckets;
1790 
1791 	if ((rc = __svc_dupcache_check(req, resp_buf, resp_bufsz, dc, drxid,
1792 	    drhash)) != DUP_NEW)
1793 		return (rc);
1794 
1795 	if ((dr = __svc_dupcache_victim(dc, timenow)) == NULL)
1796 		return (DUP_ERROR);
1797 
1798 	if ((rc = __svc_dupcache_enter(req, dr, dc, drxid, drhash, timenow))
1799 	    == DUP_ERROR)
1800 		return (rc);
1801 
1802 	return (DUP_NEW);
1803 }
1804 
1805 
1806 
1807 /*
1808  * __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf,
1809  *		uint_t *resp_bufsz,truct dupcache *dc, uint32_t drxid,
1810  * 		uint32_t drhash)
1811  * Checks to see whether an entry already exists in the cache. If it does
1812  * copy back into the resp_buf, if appropriate. Return the status of
1813  * the request, or DUP_NEW if the entry is not in the cache
1814  */
1815 static int
1816 __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1817     struct dupcache *dc, uint32_t drxid, uint32_t drhash)
1818 {
1819 	struct dupreq *dr = NULL;
1820 
1821 	(void) rw_rdlock(&(dc->dc_lock));
1822 	dr = dc->dc_hashtbl[drhash];
1823 	while (dr != NULL) {
1824 		if (dr->dr_xid == drxid &&
1825 		    dr->dr_proc == req->rq_proc &&
1826 		    dr->dr_prog == req->rq_prog &&
1827 		    dr->dr_vers == req->rq_vers &&
1828 		    dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
1829 		    memcmp(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
1830 		    dr->dr_addr.len) == 0) { /* entry found */
1831 			if (dr->dr_hash != drhash) {
1832 				/* sanity check */
1833 				(void) rw_unlock((&dc->dc_lock));
1834 				syslog(LOG_ERR,
1835 				    "\n__svc_dupdone: hashing error");
1836 				return (DUP_ERROR);
1837 			}
1838 
1839 			/*
1840 			 * return results for requests on lru list, if
1841 			 * appropriate requests must be DUP_DROP or DUP_DONE
1842 			 * to have a result. A NULL buffer in the cache
1843 			 * implies no results were sent during dupdone.
1844 			 * A NULL buffer in the call implies not interested
1845 			 * in results.
1846 			 */
1847 			if (((dr->dr_status == DUP_DONE) ||
1848 			    (dr->dr_status == DUP_DROP)) &&
1849 			    resp_buf != NULL &&
1850 			    dr->dr_resp.buf != NULL) {
1851 				*resp_buf = malloc(dr->dr_resp.len);
1852 				if (*resp_buf == NULL) {
1853 					syslog(LOG_ERR,
1854 					"__svc_dupcache_check: malloc failed");
1855 					(void) rw_unlock(&(dc->dc_lock));
1856 					return (DUP_ERROR);
1857 				}
1858 				(void) memset(*resp_buf, 0, dr->dr_resp.len);
1859 				(void) memcpy(*resp_buf, dr->dr_resp.buf,
1860 				    dr->dr_resp.len);
1861 				*resp_bufsz = dr->dr_resp.len;
1862 			} else {
1863 				/* no result */
1864 				if (resp_buf)
1865 					*resp_buf = NULL;
1866 				if (resp_bufsz)
1867 					*resp_bufsz = 0;
1868 			}
1869 			(void) rw_unlock(&(dc->dc_lock));
1870 			return (dr->dr_status);
1871 		}
1872 		dr = dr->dr_chain;
1873 	}
1874 	(void) rw_unlock(&(dc->dc_lock));
1875 	return (DUP_NEW);
1876 }
1877 
1878 /*
1879  * __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
1880  * Return a victim dupreq entry to the caller, depending on cache policy.
1881  */
1882 static struct dupreq *
1883 __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
1884 {
1885 	struct dupreq *dr = NULL;
1886 
1887 	switch (dc->dc_basis) {
1888 	case DUPCACHE_FIXEDTIME:
1889 		/*
1890 		 * The hash policy is to free up a bit of the hash
1891 		 * table before allocating a new entry as the victim.
1892 		 * Freeing up the hash table each time should split
1893 		 * the cost of keeping the hash table clean among threads.
1894 		 * Note that only DONE or DROPPED entries are on the lru
1895 		 * list but we do a sanity check anyway.
1896 		 */
1897 		(void) rw_wrlock(&(dc->dc_lock));
1898 		while ((dc->dc_mru) && (dr = dc->dc_mru->dr_next) &&
1899 		    ((timenow - dr->dr_time) > dc->dc_time)) {
1900 			/* clean and then free the entry */
1901 			if (dr->dr_status != DUP_DONE &&
1902 			    dr->dr_status != DUP_DROP) {
1903 				/*
1904 				 * The LRU list can't contain an
1905 				 * entry where the status is other than
1906 				 * DUP_DONE or DUP_DROP.
1907 				 */
1908 				syslog(LOG_ERR,
1909 				    "__svc_dupcache_victim: bad victim");
1910 #ifdef DUP_DEBUG
1911 				/*
1912 				 * Need to hold the reader/writers lock to
1913 				 * print the cache info, since we already
1914 				 * hold the writers lock, we shall continue
1915 				 * calling __svc_dupcache_debug()
1916 				 */
1917 				__svc_dupcache_debug(dc);
1918 #endif /* DUP_DEBUG */
1919 				(void) rw_unlock(&(dc->dc_lock));
1920 				return (NULL);
1921 			}
1922 			/* free buffers */
1923 			if (dr->dr_resp.buf) {
1924 				free(dr->dr_resp.buf);
1925 				dr->dr_resp.buf = NULL;
1926 			}
1927 			if (dr->dr_addr.buf) {
1928 				free(dr->dr_addr.buf);
1929 				dr->dr_addr.buf = NULL;
1930 			}
1931 
1932 			/* unhash the entry */
1933 			if (dr->dr_chain)
1934 				dr->dr_chain->dr_prevchain = dr->dr_prevchain;
1935 			if (dr->dr_prevchain)
1936 				dr->dr_prevchain->dr_chain = dr->dr_chain;
1937 			if (dc->dc_hashtbl[dr->dr_hash] == dr)
1938 				dc->dc_hashtbl[dr->dr_hash] = dr->dr_chain;
1939 
1940 			/* modify the lru pointers */
1941 			if (dc->dc_mru == dr) {
1942 				dc->dc_mru = NULL;
1943 			} else {
1944 				dc->dc_mru->dr_next = dr->dr_next;
1945 				dr->dr_next->dr_prev = dc->dc_mru;
1946 			}
1947 			free(dr);
1948 			dr = NULL;
1949 		}
1950 		(void) rw_unlock(&(dc->dc_lock));
1951 
1952 		/*
1953 		 * Allocate and return new clean entry as victim
1954 		 */
1955 		if ((dr = malloc(sizeof (*dr))) == NULL) {
1956 			syslog(LOG_ERR,
1957 			    "__svc_dupcache_victim: malloc failed");
1958 			return (NULL);
1959 		}
1960 		(void) memset(dr, 0, sizeof (*dr));
1961 		return (dr);
1962 	default:
1963 		syslog(LOG_ERR,
1964 		    "__svc_dupcache_victim: undefined dup cache_basis");
1965 		return (NULL);
1966 	}
1967 }
1968 
1969 /*
1970  * __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
1971  *	struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
1972  * build new duprequest entry and then insert into the cache
1973  */
1974 static int
1975 __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
1976     struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
1977 {
1978 	dr->dr_xid = drxid;
1979 	dr->dr_prog = req->rq_prog;
1980 	dr->dr_vers = req->rq_vers;
1981 	dr->dr_proc = req->rq_proc;
1982 	dr->dr_addr.maxlen = req->rq_xprt->xp_rtaddr.len;
1983 	dr->dr_addr.len = dr->dr_addr.maxlen;
1984 	if ((dr->dr_addr.buf = malloc(dr->dr_addr.maxlen)) == NULL) {
1985 		syslog(LOG_ERR, "__svc_dupcache_enter: malloc failed");
1986 		free(dr);
1987 		return (DUP_ERROR);
1988 	}
1989 	(void) memset(dr->dr_addr.buf, 0, dr->dr_addr.len);
1990 	(void) memcpy(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
1991 	    dr->dr_addr.len);
1992 	dr->dr_resp.buf = NULL;
1993 	dr->dr_resp.maxlen = 0;
1994 	dr->dr_resp.len = 0;
1995 	dr->dr_status = DUP_INPROGRESS;
1996 	dr->dr_time = timenow;
1997 	dr->dr_hash = drhash;	/* needed for efficient victim cleanup */
1998 
1999 	/* place entry at head of hash table */
2000 	(void) rw_wrlock(&(dc->dc_lock));
2001 	dr->dr_chain = dc->dc_hashtbl[drhash];
2002 	dr->dr_prevchain = NULL;
2003 	if (dc->dc_hashtbl[drhash] != NULL)
2004 		dc->dc_hashtbl[drhash]->dr_prevchain = dr;
2005 	dc->dc_hashtbl[drhash] = dr;
2006 	(void) rw_unlock(&(dc->dc_lock));
2007 	return (DUP_NEW);
2008 }
2009 
2010 /*
2011  * __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2012  *		int status, char *xprt_cache)
2013  * Marks the request done (DUP_DONE or DUP_DROP) and stores the response.
2014  * Only DONE and DROP requests can be marked as done. Sets the lru pointers
2015  * to make the entry the most recently used. Returns DUP_ERROR or status.
2016  */
2017 int
2018 __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2019     int status, char *xprt_cache)
2020 {
2021 	uint32_t drxid, drhash;
2022 	int rc;
2023 
2024 	/* LINTED pointer alignment */
2025 	struct dupcache *dc = (struct dupcache *)xprt_cache;
2026 
2027 	if (dc == NULL) {
2028 		syslog(LOG_ERR, "__svc_dupdone: undefined cache");
2029 		return (DUP_ERROR);
2030 	}
2031 
2032 	if (status != DUP_DONE && status != DUP_DROP) {
2033 		syslog(LOG_ERR, "__svc_dupdone: invalid dupdone status");
2034 		syslog(LOG_ERR, "	 must be DUP_DONE or DUP_DROP");
2035 		return (DUP_ERROR);
2036 	}
2037 
2038 	/* find the xid of the entry in the cache */
2039 	if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
2040 		syslog(LOG_ERR, "__svc_dup: xid error");
2041 		return (DUP_ERROR);
2042 	}
2043 	drhash = drxid % dc->dc_buckets;
2044 
2045 	/* update the status of the entry and result buffers, if required */
2046 	if ((rc = __svc_dupcache_update(req, resp_buf, resp_bufsz, status,
2047 	    dc, drxid, drhash)) == DUP_ERROR) {
2048 		syslog(LOG_ERR, "__svc_dupdone: cache entry error");
2049 		return (DUP_ERROR);
2050 	}
2051 
2052 	return (rc);
2053 }
2054 
2055 /*
2056  * __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf,
2057  * 	uint_t resp_bufsz, int status, struct dupcache *dc, uint32_t drxid,
2058  * 	uint32_t drhash)
2059  * Check if entry exists in the dupcacache. If it does, update its status
2060  * and time and also its buffer, if appropriate. Its possible, but unlikely
2061  * for DONE requests to not exist in the cache. Return DUP_ERROR or status.
2062  */
2063 static int
2064 __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2065     int status, struct dupcache *dc, uint32_t drxid, uint32_t drhash)
2066 {
2067 	struct dupreq *dr = NULL;
2068 	time_t timenow = time(NULL);
2069 
2070 	(void) rw_wrlock(&(dc->dc_lock));
2071 	dr = dc->dc_hashtbl[drhash];
2072 	while (dr != NULL) {
2073 		if (dr->dr_xid == drxid &&
2074 		    dr->dr_proc == req->rq_proc &&
2075 		    dr->dr_prog == req->rq_prog &&
2076 		    dr->dr_vers == req->rq_vers &&
2077 		    dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
2078 		    memcmp(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
2079 		    dr->dr_addr.len) == 0) { /* entry found */
2080 			if (dr->dr_hash != drhash) {
2081 				/* sanity check */
2082 				(void) rw_unlock(&(dc->dc_lock));
2083 				syslog(LOG_ERR,
2084 				"\n__svc_dupdone: hashing error");
2085 				return (DUP_ERROR);
2086 			}
2087 
2088 			/* store the results if bufer is not NULL */
2089 			if (resp_buf != NULL) {
2090 				if ((dr->dr_resp.buf =
2091 				    malloc(resp_bufsz)) == NULL) {
2092 					(void) rw_unlock(&(dc->dc_lock));
2093 					syslog(LOG_ERR,
2094 					    "__svc_dupdone: malloc failed");
2095 					return (DUP_ERROR);
2096 				}
2097 				(void) memset(dr->dr_resp.buf, 0, resp_bufsz);
2098 				(void) memcpy(dr->dr_resp.buf, resp_buf,
2099 				    (uint_t)resp_bufsz);
2100 				dr->dr_resp.len = resp_bufsz;
2101 			}
2102 
2103 			/* update status and done time */
2104 			dr->dr_status = status;
2105 			dr->dr_time = timenow;
2106 
2107 			/* move the entry to the mru position */
2108 			if (dc->dc_mru == NULL) {
2109 				dr->dr_next = dr;
2110 				dr->dr_prev = dr;
2111 			} else {
2112 				dr->dr_next = dc->dc_mru->dr_next;
2113 				dc->dc_mru->dr_next->dr_prev = dr;
2114 				dr->dr_prev = dc->dc_mru;
2115 				dc->dc_mru->dr_next = dr;
2116 			}
2117 			dc->dc_mru = dr;
2118 
2119 			(void) rw_unlock(&(dc->dc_lock));
2120 			return (status);
2121 		}
2122 		dr = dr->dr_chain;
2123 	}
2124 	(void) rw_unlock(&(dc->dc_lock));
2125 	syslog(LOG_ERR, "__svc_dupdone: entry not in dup cache");
2126 	return (DUP_ERROR);
2127 }
2128 
2129 #ifdef DUP_DEBUG
2130 /*
2131  * __svc_dupcache_debug(struct dupcache *dc)
2132  * print out the hash table stuff
2133  *
2134  * This function requires the caller to hold the reader
2135  * or writer version of the duplicate request cache lock (dc_lock).
2136  */
2137 static void
2138 __svc_dupcache_debug(struct dupcache *dc)
2139 {
2140 	struct dupreq *dr = NULL;
2141 	int i;
2142 	bool_t bval;
2143 
2144 	fprintf(stderr, "   HASHTABLE\n");
2145 	for (i = 0; i < dc->dc_buckets; i++) {
2146 		bval = FALSE;
2147 		dr = dc->dc_hashtbl[i];
2148 		while (dr != NULL) {
2149 			if (!bval) {	/* ensures bucket printed only once */
2150 				fprintf(stderr, "    bucket : %d\n", i);
2151 				bval = TRUE;
2152 			}
2153 			fprintf(stderr, "\txid: %u status: %d time: %ld",
2154 			    dr->dr_xid, dr->dr_status, dr->dr_time);
2155 			fprintf(stderr, " dr: %x chain: %x prevchain: %x\n",
2156 			    dr, dr->dr_chain, dr->dr_prevchain);
2157 			dr = dr->dr_chain;
2158 		}
2159 	}
2160 
2161 	fprintf(stderr, "   LRU\n");
2162 	if (dc->dc_mru) {
2163 		dr = dc->dc_mru->dr_next;	/* lru */
2164 		while (dr != dc->dc_mru) {
2165 			fprintf(stderr, "\txid: %u status : %d time : %ld",
2166 			    dr->dr_xid, dr->dr_status, dr->dr_time);
2167 			fprintf(stderr, " dr: %x next: %x prev: %x\n",
2168 			    dr, dr->dr_next, dr->dr_prev);
2169 			dr = dr->dr_next;
2170 		}
2171 		fprintf(stderr, "\txid: %u status: %d time: %ld",
2172 		    dr->dr_xid, dr->dr_status, dr->dr_time);
2173 		fprintf(stderr, " dr: %x next: %x prev: %x\n",
2174 		    dr, dr->dr_next, dr->dr_prev);
2175 	}
2176 }
2177 #endif /* DUP_DEBUG */
2178