xref: /illumos-gate/usr/src/lib/libnsl/rpc/svc.c (revision ce326879a41b052db3abafb44e551f9d9c40cdba)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2014 Nexenta Systems, Inc.  All rights reserved.
25  * Copyright (c) 2016 by Delphix. All rights reserved.
26  */
27 /*
28  * Copyright 1993 OpenVision Technologies, Inc., All Rights Reserved.
29  */
30 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
31 /* All Rights Reserved */
32 /*
33  * Portions of this source code were derived from Berkeley
34  * 4.3 BSD under license from the Regents of the University of
35  * California.
36  */
37 
38 /*
39  * svc.c, Server-side remote procedure call interface.
40  *
41  * There are two sets of procedures here.  The xprt routines are
42  * for handling transport handles.  The svc routines handle the
43  * list of service routines.
44  *
45  */
46 
47 #include "mt.h"
48 #include "rpc_mt.h"
49 #include <assert.h>
50 #include <errno.h>
51 #include <sys/types.h>
52 #include <stropts.h>
53 #include <sys/conf.h>
54 #include <rpc/rpc.h>
55 #ifdef PORTMAP
56 #include <rpc/pmap_clnt.h>
57 #endif
58 #include <sys/poll.h>
59 #include <netconfig.h>
60 #include <syslog.h>
61 #include <stdlib.h>
62 #include <unistd.h>
63 #include <string.h>
64 #include <limits.h>
65 
66 extern bool_t __svc_get_door_cred();
67 extern bool_t __rpc_get_local_cred();
68 
69 SVCXPRT **svc_xports;
70 static int nsvc_xports; 	/* total number of svc_xports allocated */
71 
72 XDR **svc_xdrs;		/* common XDR receive area */
73 int nsvc_xdrs;		/* total number of svc_xdrs allocated */
74 
75 int __rpc_use_pollfd_done;	/* to unlimit the number of connections */
76 
77 #define	NULL_SVC ((struct svc_callout *)0)
78 #define	RQCRED_SIZE	400		/* this size is excessive */
79 
80 /*
81  * The services list
82  * Each entry represents a set of procedures (an rpc program).
83  * The dispatch routine takes request structs and runs the
84  * appropriate procedure.
85  */
86 static struct svc_callout {
87 	struct svc_callout *sc_next;
88 	rpcprog_t	    sc_prog;
89 	rpcvers_t	    sc_vers;
90 	char		   *sc_netid;
91 	void		    (*sc_dispatch)();
92 } *svc_head;
93 extern rwlock_t	svc_lock;
94 
95 static struct svc_callout *svc_find();
96 int _svc_prog_dispatch();
97 void svc_getreq_common();
98 char *strdup();
99 
100 extern mutex_t	svc_door_mutex;
101 extern cond_t	svc_door_waitcv;
102 extern int	svc_ndoorfds;
103 extern SVCXPRT_LIST *_svc_xprtlist;
104 extern mutex_t xprtlist_lock;
105 extern void __svc_rm_from_xlist();
106 
107 #if !defined(_LP64)
108 extern fd_set _new_svc_fdset;
109 #endif
110 
111 /*
112  * If the allocated array of reactor is too small, this value is used as a
113  * margin. This reduces the number of allocations.
114  */
115 #define	USER_FD_INCREMENT 5
116 
117 static void add_pollfd(int fd, short events);
118 static void remove_pollfd(int fd);
119 static void __svc_remove_input_of_fd(int fd);
120 
121 /*
122  * Data used to handle reactor:
123  * 	- one file descriptor we listen to,
124  *	- one callback we call if the fd pops,
125  *	- and a cookie passed as a parameter to the callback.
126  *
127  * The structure is an array indexed on the file descriptor. Each entry is
128  * pointing to the first element of a double-linked list of callback.
129  * only one callback may be associated to a couple (fd, event).
130  */
131 
132 struct _svc_user_fd_head;
133 
134 typedef struct {
135 	struct _svc_user_fd_node *next;
136 	struct _svc_user_fd_node *previous;
137 } _svc_user_link;
138 
139 typedef struct _svc_user_fd_node {
140 	_svc_user_link lnk;
141 	svc_input_id_t id;
142 	int	    fd;
143 	unsigned int   events;
144 	svc_callback_t callback;
145 	void*	  cookie;
146 } _svc_user_fd_node;
147 
148 typedef struct _svc_user_fd_head {
149 	struct _svc_user_fd_node *list;
150 	unsigned int mask;    /* logical OR of all sub-masks */
151 } _svc_user_fd_head;
152 
153 
154 /* Array of defined reactor - indexed on file descriptor */
155 static _svc_user_fd_head *svc_userfds = NULL;
156 
157 /* current size of file descriptor */
158 static int svc_nuserfds = 0;
159 
160 /* Mutex to ensure MT safe operations for user fds callbacks. */
161 static mutex_t svc_userfds_lock = DEFAULTMUTEX;
162 
163 
164 /*
165  * This structure is used to have constant time alogrithms. There is an array
166  * of this structure as large as svc_nuserfds. When the user is registering a
167  * new callback, the address of the created structure is stored in a cell of
168  * this array. The address of this cell is the returned unique identifier.
169  *
170  * On removing, the id is given by the user, then we know if this cell is
171  * filled or not (with free). If it is free, we return an error. Otherwise,
172  * we can free the structure pointed by fd_node.
173  *
174  * On insertion, we use the linked list created by (first_free,
175  * next_free). In this way with a constant time computation, we can give a
176  * correct index to the user.
177  */
178 
179 typedef struct _svc_management_user_fd {
180 	bool_t free;
181 	union {
182 		svc_input_id_t next_free;
183 		_svc_user_fd_node *fd_node;
184 	} data;
185 } _svc_management_user_fd;
186 
187 /* index to the first free elem */
188 static svc_input_id_t first_free = (svc_input_id_t)-1;
189 /* the size of this array is the same as svc_nuserfds */
190 static _svc_management_user_fd* user_fd_mgt_array = NULL;
191 
192 /* current size of user_fd_mgt_array */
193 static int svc_nmgtuserfds = 0;
194 
195 
196 /* Define some macros to access data associated to registration ids. */
197 #define	node_from_id(id) (user_fd_mgt_array[(int)id].data.fd_node)
198 #define	is_free_id(id) (user_fd_mgt_array[(int)id].free)
199 
200 #ifndef POLLSTANDARD
201 #define	POLLSTANDARD \
202 	(POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND| \
203 	POLLWRBAND|POLLERR|POLLHUP|POLLNVAL)
204 #endif
205 
206 /*
207  * To free an Id, we set the cell as free and insert its address in the list
208  * of free cell.
209  */
210 
211 static void
212 _svc_free_id(const svc_input_id_t id)
213 {
214 	assert(((int)id >= 0) && ((int)id < svc_nmgtuserfds));
215 	user_fd_mgt_array[(int)id].free = TRUE;
216 	user_fd_mgt_array[(int)id].data.next_free = first_free;
217 	first_free = id;
218 }
219 
220 /*
221  * To get a free cell, we just have to take it from the free linked list and
222  * set the flag to "not free". This function also allocates new memory if
223  * necessary
224  */
225 static svc_input_id_t
226 _svc_attribute_new_id(_svc_user_fd_node *node)
227 {
228 	int selected_index = (int)first_free;
229 	assert(node != NULL);
230 
231 	if (selected_index == -1) {
232 		/* Allocate new entries */
233 		int L_inOldSize = svc_nmgtuserfds;
234 		int i;
235 		_svc_management_user_fd *tmp;
236 
237 		svc_nmgtuserfds += USER_FD_INCREMENT;
238 
239 		tmp = realloc(user_fd_mgt_array,
240 		    svc_nmgtuserfds * sizeof (_svc_management_user_fd));
241 
242 		if (tmp == NULL) {
243 			syslog(LOG_ERR, "_svc_attribute_new_id: out of memory");
244 			svc_nmgtuserfds = L_inOldSize;
245 			errno = ENOMEM;
246 			return ((svc_input_id_t)-1);
247 		}
248 
249 		user_fd_mgt_array = tmp;
250 
251 		for (i = svc_nmgtuserfds - 1; i >= L_inOldSize; i--)
252 			_svc_free_id((svc_input_id_t)i);
253 		selected_index = (int)first_free;
254 	}
255 
256 	node->id = (svc_input_id_t)selected_index;
257 	first_free = user_fd_mgt_array[selected_index].data.next_free;
258 
259 	user_fd_mgt_array[selected_index].data.fd_node = node;
260 	user_fd_mgt_array[selected_index].free = FALSE;
261 
262 	return ((svc_input_id_t)selected_index);
263 }
264 
265 /*
266  * Access to a pollfd treatment. Scan all the associated callbacks that have
267  * at least one bit in their mask that masks a received event.
268  *
269  * If event POLLNVAL is received, we check that one callback processes it, if
270  * not, then remove the file descriptor from the poll. If there is one, let
271  * the user do the work.
272  */
273 void
274 __svc_getreq_user(struct pollfd *pfd)
275 {
276 	int fd = pfd->fd;
277 	short revents = pfd->revents;
278 	bool_t invalHandled = FALSE;
279 	_svc_user_fd_node *node;
280 
281 	(void) mutex_lock(&svc_userfds_lock);
282 
283 	if ((fd < 0) || (fd >= svc_nuserfds)) {
284 		(void) mutex_unlock(&svc_userfds_lock);
285 		return;
286 	}
287 
288 	node = svc_userfds[fd].list;
289 
290 	/* check if at least one mask fits */
291 	if (0 == (revents & svc_userfds[fd].mask)) {
292 		(void) mutex_unlock(&svc_userfds_lock);
293 		return;
294 	}
295 
296 	while ((svc_userfds[fd].mask != 0) && (node != NULL)) {
297 		/*
298 		 * If one of the received events maps the ones the node listens
299 		 * to
300 		 */
301 		_svc_user_fd_node *next = node->lnk.next;
302 
303 		if (node->callback != NULL) {
304 			if (node->events & revents) {
305 				if (revents & POLLNVAL) {
306 					invalHandled = TRUE;
307 				}
308 
309 				/*
310 				 * The lock must be released before calling the
311 				 * user function, as this function can call
312 				 * svc_remove_input() for example.
313 				 */
314 				(void) mutex_unlock(&svc_userfds_lock);
315 				node->callback(node->id, node->fd,
316 				    node->events & revents, node->cookie);
317 				/*
318 				 * Do not use the node structure anymore, as it
319 				 * could have been deallocated by the previous
320 				 * callback.
321 				 */
322 				(void) mutex_lock(&svc_userfds_lock);
323 			}
324 		}
325 		node = next;
326 	}
327 
328 	if ((revents & POLLNVAL) && !invalHandled)
329 		__svc_remove_input_of_fd(fd);
330 	(void) mutex_unlock(&svc_userfds_lock);
331 }
332 
333 
334 /*
335  * Check if a file descriptor is associated with a user reactor.
336  * To do this, just check that the array indexed on fd has a non-void linked
337  * list (ie. first element is not NULL)
338  */
339 bool_t
340 __is_a_userfd(int fd)
341 {
342 	/* Checks argument */
343 	if ((fd < 0) || (fd >= svc_nuserfds))
344 		return (FALSE);
345 	return ((svc_userfds[fd].mask == 0x0000)? FALSE:TRUE);
346 }
347 
348 /* free everything concerning user fd */
349 /* used in svc_run.c => no static */
350 
351 void
352 __destroy_userfd(void)
353 {
354 	int one_fd;
355 	/* Clean user fd */
356 	if (svc_userfds != NULL) {
357 		for (one_fd = 0; one_fd < svc_nuserfds; one_fd++) {
358 			_svc_user_fd_node *node;
359 
360 			node = svc_userfds[one_fd].list;
361 			while (node != NULL) {
362 				_svc_user_fd_node *tmp = node;
363 				_svc_free_id(node->id);
364 				node = node->lnk.next;
365 				free(tmp);
366 			}
367 		}
368 
369 		free(user_fd_mgt_array);
370 		user_fd_mgt_array = NULL;
371 		first_free = (svc_input_id_t)-1;
372 
373 		free(svc_userfds);
374 		svc_userfds = NULL;
375 		svc_nuserfds = 0;
376 	}
377 }
378 
379 /*
380  * Remove all the callback associated with a fd => useful when the fd is
381  * closed for instance
382  */
383 static void
384 __svc_remove_input_of_fd(int fd)
385 {
386 	_svc_user_fd_node **pnode;
387 	_svc_user_fd_node *tmp;
388 
389 	if ((fd < 0) || (fd >= svc_nuserfds))
390 		return;
391 
392 	pnode = &svc_userfds[fd].list;
393 	while ((tmp = *pnode) != NULL) {
394 		*pnode = tmp->lnk.next;
395 
396 		_svc_free_id(tmp->id);
397 		free(tmp);
398 	}
399 
400 	svc_userfds[fd].mask = 0;
401 }
402 
403 /*
404  * Allow user to add an fd in the poll list. If it does not succeed, return
405  * -1. Otherwise, return a svc_id
406  */
407 
408 svc_input_id_t
409 svc_add_input(int user_fd, unsigned int events,
410     svc_callback_t user_callback, void *cookie)
411 {
412 	_svc_user_fd_node *new_node;
413 
414 	if (user_fd < 0) {
415 		errno = EINVAL;
416 		return ((svc_input_id_t)-1);
417 	}
418 
419 	if ((events == 0x0000) ||
420 	    (events & ~(POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND|\
421 	    POLLWRBAND|POLLERR|POLLHUP|POLLNVAL))) {
422 		errno = EINVAL;
423 		return ((svc_input_id_t)-1);
424 	}
425 
426 	(void) mutex_lock(&svc_userfds_lock);
427 
428 	if ((user_fd < svc_nuserfds) &&
429 	    (svc_userfds[user_fd].mask & events) != 0) {
430 		/* Already registrated call-back */
431 		errno = EEXIST;
432 		(void) mutex_unlock(&svc_userfds_lock);
433 		return ((svc_input_id_t)-1);
434 	}
435 
436 	/* Handle memory allocation. */
437 	if (user_fd >= svc_nuserfds) {
438 		int oldSize = svc_nuserfds;
439 		int i;
440 		_svc_user_fd_head *tmp;
441 
442 		svc_nuserfds = (user_fd + 1) + USER_FD_INCREMENT;
443 
444 		tmp = realloc(svc_userfds,
445 		    svc_nuserfds * sizeof (_svc_user_fd_head));
446 
447 		if (tmp == NULL) {
448 			syslog(LOG_ERR, "svc_add_input: out of memory");
449 			svc_nuserfds = oldSize;
450 			errno = ENOMEM;
451 			(void) mutex_unlock(&svc_userfds_lock);
452 			return ((svc_input_id_t)-1);
453 		}
454 
455 		svc_userfds = tmp;
456 
457 		for (i = oldSize; i < svc_nuserfds; i++) {
458 			svc_userfds[i].list = NULL;
459 			svc_userfds[i].mask = 0;
460 		}
461 	}
462 
463 	new_node = malloc(sizeof (_svc_user_fd_node));
464 	if (new_node == NULL) {
465 		syslog(LOG_ERR, "svc_add_input: out of memory");
466 		errno = ENOMEM;
467 		(void) mutex_unlock(&svc_userfds_lock);
468 		return ((svc_input_id_t)-1);
469 	}
470 
471 	/* create a new node */
472 	new_node->fd		= user_fd;
473 	new_node->events	= events;
474 	new_node->callback	= user_callback;
475 	new_node->cookie	= cookie;
476 
477 	if (_svc_attribute_new_id(new_node) == -1) {
478 		(void) mutex_unlock(&svc_userfds_lock);
479 		free(new_node);
480 		return ((svc_input_id_t)-1);
481 	}
482 
483 	/* Add the new element at the beginning of the list. */
484 	if (svc_userfds[user_fd].list != NULL)
485 		svc_userfds[user_fd].list->lnk.previous = new_node;
486 	new_node->lnk.next = svc_userfds[user_fd].list;
487 	new_node->lnk.previous = NULL;
488 
489 	svc_userfds[user_fd].list = new_node;
490 
491 	/* refresh global mask for this file desciptor */
492 	svc_userfds[user_fd].mask |= events;
493 
494 	/* refresh mask for the poll */
495 	add_pollfd(user_fd, (svc_userfds[user_fd].mask));
496 
497 	(void) mutex_unlock(&svc_userfds_lock);
498 	return (new_node->id);
499 }
500 
501 int
502 svc_remove_input(svc_input_id_t id)
503 {
504 	_svc_user_fd_node* node;
505 	_svc_user_fd_node* next;
506 	_svc_user_fd_node* previous;
507 	int fd;		/* caching optim */
508 
509 	(void) mutex_lock(&svc_userfds_lock);
510 
511 	/* Immediately update data for id management */
512 	if (user_fd_mgt_array == NULL || id >= svc_nmgtuserfds ||
513 	    is_free_id(id)) {
514 		errno = EINVAL;
515 		(void) mutex_unlock(&svc_userfds_lock);
516 		return (-1);
517 	}
518 
519 	node = node_from_id(id);
520 	assert(node != NULL);
521 
522 	_svc_free_id(id);
523 	next		= node->lnk.next;
524 	previous	= node->lnk.previous;
525 	fd		= node->fd; /* caching optim */
526 
527 	/* Remove this node from the list. */
528 	if (previous != NULL) {
529 		previous->lnk.next = next;
530 	} else {
531 		assert(svc_userfds[fd].list == node);
532 		svc_userfds[fd].list = next;
533 	}
534 	if (next != NULL)
535 		next->lnk.previous = previous;
536 
537 	/* Remove the node flags from the global mask */
538 	svc_userfds[fd].mask ^= node->events;
539 
540 	free(node);
541 	if (svc_userfds[fd].mask == 0) {
542 		assert(svc_userfds[fd].list == NULL);
543 		remove_pollfd(fd);
544 	} else {
545 		assert(svc_userfds[fd].list != NULL);
546 	}
547 	/* <=> CLEAN NEEDED TO SHRINK MEMORY USAGE */
548 
549 	(void) mutex_unlock(&svc_userfds_lock);
550 	return (0);
551 }
552 
553 /*
554  * Provides default service-side functions for authentication flavors
555  * that do not use all the fields in struct svc_auth_ops.
556  */
557 
558 /*ARGSUSED*/
559 static int
560 authany_wrap(AUTH *auth, XDR *xdrs, xdrproc_t xfunc, caddr_t xwhere)
561 {
562 	return (*xfunc)(xdrs, xwhere);
563 }
564 
565 struct svc_auth_ops svc_auth_any_ops = {
566 	authany_wrap,
567 	authany_wrap,
568 };
569 
570 /*
571  * Return pointer to server authentication structure.
572  */
573 SVCAUTH *
574 __svc_get_svcauth(SVCXPRT *xprt)
575 {
576 /* LINTED pointer alignment */
577 	return (&SVC_XP_AUTH(xprt));
578 }
579 
580 /*
581  * A callback routine to cleanup after a procedure is executed.
582  */
583 void (*__proc_cleanup_cb)() = NULL;
584 
585 void *
586 __svc_set_proc_cleanup_cb(void *cb)
587 {
588 	void	*tmp = (void *)__proc_cleanup_cb;
589 
590 	__proc_cleanup_cb = (void (*)())cb;
591 	return (tmp);
592 }
593 
594 /* ***************  SVCXPRT related stuff **************** */
595 
596 
597 static int pollfd_shrinking = 1;
598 
599 
600 /*
601  * Add fd to svc_pollfd
602  */
603 static void
604 add_pollfd(int fd, short events)
605 {
606 	if (fd < FD_SETSIZE) {
607 		FD_SET(fd, &svc_fdset);
608 #if !defined(_LP64)
609 		FD_SET(fd, &_new_svc_fdset);
610 #endif
611 		svc_nfds++;
612 		svc_nfds_set++;
613 		if (fd >= svc_max_fd)
614 			svc_max_fd = fd + 1;
615 	}
616 	if (fd >= svc_max_pollfd)
617 		svc_max_pollfd = fd + 1;
618 	if (svc_max_pollfd > svc_pollfd_allocd) {
619 		int i = svc_pollfd_allocd;
620 		pollfd_t *tmp;
621 		do {
622 			svc_pollfd_allocd += POLLFD_EXTEND;
623 		} while (svc_max_pollfd > svc_pollfd_allocd);
624 		tmp = realloc(svc_pollfd,
625 		    sizeof (pollfd_t) * svc_pollfd_allocd);
626 		if (tmp != NULL) {
627 			svc_pollfd = tmp;
628 			for (; i < svc_pollfd_allocd; i++)
629 				POLLFD_CLR(i, tmp);
630 		} else {
631 			/*
632 			 * give an error message; undo fdset setting
633 			 * above;  reset the pollfd_shrinking flag.
634 			 * because of this poll will not be done
635 			 * on these fds.
636 			 */
637 			if (fd < FD_SETSIZE) {
638 				FD_CLR(fd, &svc_fdset);
639 #if !defined(_LP64)
640 				FD_CLR(fd, &_new_svc_fdset);
641 #endif
642 				svc_nfds--;
643 				svc_nfds_set--;
644 				if (fd == (svc_max_fd - 1))
645 					svc_max_fd--;
646 			}
647 			if (fd == (svc_max_pollfd - 1))
648 				svc_max_pollfd--;
649 			pollfd_shrinking = 0;
650 			syslog(LOG_ERR, "add_pollfd: out of memory");
651 			_exit(1);
652 		}
653 	}
654 	svc_pollfd[fd].fd	= fd;
655 	svc_pollfd[fd].events	= events;
656 	svc_npollfds++;
657 	svc_npollfds_set++;
658 }
659 
660 /*
661  * the fd is still active but only the bit in fdset is cleared.
662  * do not subtract svc_nfds or svc_npollfds
663  */
664 void
665 clear_pollfd(int fd)
666 {
667 	if (fd < FD_SETSIZE && FD_ISSET(fd, &svc_fdset)) {
668 		FD_CLR(fd, &svc_fdset);
669 #if !defined(_LP64)
670 		FD_CLR(fd, &_new_svc_fdset);
671 #endif
672 		svc_nfds_set--;
673 	}
674 	if (fd < svc_pollfd_allocd && POLLFD_ISSET(fd, svc_pollfd)) {
675 		POLLFD_CLR(fd, svc_pollfd);
676 		svc_npollfds_set--;
677 	}
678 }
679 
680 /*
681  * sets the bit in fdset for an active fd so that poll() is done for that
682  */
683 void
684 set_pollfd(int fd, short events)
685 {
686 	if (fd < FD_SETSIZE) {
687 		FD_SET(fd, &svc_fdset);
688 #if !defined(_LP64)
689 		FD_SET(fd, &_new_svc_fdset);
690 #endif
691 		svc_nfds_set++;
692 	}
693 	if (fd < svc_pollfd_allocd) {
694 		svc_pollfd[fd].fd	= fd;
695 		svc_pollfd[fd].events	= events;
696 		svc_npollfds_set++;
697 	}
698 }
699 
700 /*
701  * remove a svc_pollfd entry; it does not shrink the memory
702  */
703 static void
704 remove_pollfd(int fd)
705 {
706 	clear_pollfd(fd);
707 	if (fd == (svc_max_fd - 1))
708 		svc_max_fd--;
709 	svc_nfds--;
710 	if (fd == (svc_max_pollfd - 1))
711 		svc_max_pollfd--;
712 	svc_npollfds--;
713 }
714 
715 /*
716  * delete a svc_pollfd entry; it shrinks the memory
717  * use remove_pollfd if you do not want to shrink
718  */
719 static void
720 delete_pollfd(int fd)
721 {
722 	remove_pollfd(fd);
723 	if (pollfd_shrinking && svc_max_pollfd <
724 	    (svc_pollfd_allocd - POLLFD_SHRINK)) {
725 		do {
726 			svc_pollfd_allocd -= POLLFD_SHRINK;
727 		} while (svc_max_pollfd < (svc_pollfd_allocd - POLLFD_SHRINK));
728 		svc_pollfd = realloc(svc_pollfd,
729 		    sizeof (pollfd_t) * svc_pollfd_allocd);
730 		if (svc_pollfd == NULL) {
731 			syslog(LOG_ERR, "delete_pollfd: out of memory");
732 			_exit(1);
733 		}
734 	}
735 }
736 
737 
738 /*
739  * Activate a transport handle.
740  */
741 void
742 xprt_register(const SVCXPRT *xprt)
743 {
744 	int fd = xprt->xp_fd;
745 #ifdef CALLBACK
746 	extern void (*_svc_getreqset_proc)();
747 #endif
748 /* VARIABLES PROTECTED BY svc_fd_lock: svc_xports, svc_fdset */
749 
750 	(void) rw_wrlock(&svc_fd_lock);
751 	if (svc_xports == NULL) {
752 		/* allocate some small amount first */
753 		svc_xports = calloc(FD_INCREMENT,  sizeof (SVCXPRT *));
754 		if (svc_xports == NULL) {
755 			syslog(LOG_ERR, "xprt_register: out of memory");
756 			_exit(1);
757 		}
758 		nsvc_xports = FD_INCREMENT;
759 
760 #ifdef CALLBACK
761 		/*
762 		 * XXX: This code does not keep track of the server state.
763 		 *
764 		 * This provides for callback support.	When a client
765 		 * recv's a call from another client on the server fd's,
766 		 * it calls _svc_getreqset_proc() which would return
767 		 * after serving all the server requests.  Also look under
768 		 * clnt_dg.c and clnt_vc.c  (clnt_call part of it)
769 		 */
770 		_svc_getreqset_proc = svc_getreq_poll;
771 #endif
772 	}
773 
774 	while (fd >= nsvc_xports) {
775 		SVCXPRT **tmp_xprts = svc_xports;
776 
777 		/* time to expand svc_xprts */
778 		tmp_xprts = realloc(svc_xports,
779 		    sizeof (SVCXPRT *) * (nsvc_xports + FD_INCREMENT));
780 		if (tmp_xprts == NULL) {
781 			syslog(LOG_ERR, "xprt_register : out of memory.");
782 			_exit(1);
783 		}
784 
785 		svc_xports = tmp_xprts;
786 		(void) memset(&svc_xports[nsvc_xports], 0,
787 		    sizeof (SVCXPRT *) * FD_INCREMENT);
788 		nsvc_xports += FD_INCREMENT;
789 	}
790 
791 	svc_xports[fd] = (SVCXPRT *)xprt;
792 
793 	add_pollfd(fd, MASKVAL);
794 
795 	if (svc_polling) {
796 		char dummy;
797 
798 		/*
799 		 * This happens only in one of the MT modes.
800 		 * Wake up poller.
801 		 */
802 		(void) write(svc_pipe[1], &dummy, sizeof (dummy));
803 	}
804 	/*
805 	 * If already dispatching door based services, start
806 	 * dispatching TLI based services now.
807 	 */
808 	(void) mutex_lock(&svc_door_mutex);
809 	if (svc_ndoorfds > 0)
810 		(void) cond_signal(&svc_door_waitcv);
811 	(void) mutex_unlock(&svc_door_mutex);
812 
813 	if (svc_xdrs == NULL) {
814 		/* allocate initial chunk */
815 		svc_xdrs = calloc(FD_INCREMENT, sizeof (XDR *));
816 		if (svc_xdrs != NULL)
817 			nsvc_xdrs = FD_INCREMENT;
818 		else {
819 			syslog(LOG_ERR, "xprt_register : out of memory.");
820 			_exit(1);
821 		}
822 	}
823 	(void) rw_unlock(&svc_fd_lock);
824 }
825 
826 /*
827  * De-activate a transport handle.
828  */
829 void
830 __xprt_unregister_private(const SVCXPRT *xprt, bool_t lock_not_held)
831 {
832 	int fd = xprt->xp_fd;
833 
834 	if (lock_not_held)
835 		(void) rw_wrlock(&svc_fd_lock);
836 	if ((fd < nsvc_xports) && (svc_xports[fd] == xprt)) {
837 		svc_xports[fd] = NULL;
838 		delete_pollfd(fd);
839 	}
840 	if (lock_not_held)
841 		(void) rw_unlock(&svc_fd_lock);
842 	__svc_rm_from_xlist(&_svc_xprtlist, xprt, &xprtlist_lock);
843 }
844 
845 void
846 xprt_unregister(const SVCXPRT *xprt)
847 {
848 	__xprt_unregister_private(xprt, TRUE);
849 }
850 
851 /* ********************** CALLOUT list related stuff ************* */
852 
853 /*
854  * Add a service program to the callout list.
855  * The dispatch routine will be called when a rpc request for this
856  * program number comes in.
857  */
858 bool_t
859 svc_reg(const SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
860     void (*dispatch)(), const struct netconfig *nconf)
861 {
862 	struct svc_callout *prev;
863 	struct svc_callout *s, **s2;
864 	struct netconfig *tnconf;
865 	char *netid = NULL;
866 	int flag = 0;
867 
868 /* VARIABLES PROTECTED BY svc_lock: s, prev, svc_head */
869 
870 	if (xprt->xp_netid) {
871 		netid = strdup(xprt->xp_netid);
872 		flag = 1;
873 	} else if (nconf && nconf->nc_netid) {
874 		netid = strdup(nconf->nc_netid);
875 		flag = 1;
876 	} else if ((tnconf = __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type))
877 	    != NULL) {
878 		netid = strdup(tnconf->nc_netid);
879 		flag = 1;
880 		freenetconfigent(tnconf);
881 	} /* must have been created with svc_raw_create */
882 	if ((netid == NULL) && (flag == 1))
883 		return (FALSE);
884 
885 	(void) rw_wrlock(&svc_lock);
886 	if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
887 		if (netid)
888 			free(netid);
889 		if (s->sc_dispatch == dispatch)
890 			goto rpcb_it; /* it is registering another xptr */
891 		(void) rw_unlock(&svc_lock);
892 		return (FALSE);
893 	}
894 	s = malloc(sizeof (struct svc_callout));
895 	if (s == NULL) {
896 		if (netid)
897 			free(netid);
898 		(void) rw_unlock(&svc_lock);
899 		return (FALSE);
900 	}
901 
902 	s->sc_prog = prog;
903 	s->sc_vers = vers;
904 	s->sc_dispatch = dispatch;
905 	s->sc_netid = netid;
906 	s->sc_next = NULL;
907 
908 	/*
909 	 * The ordering of transports is such that the most frequently used
910 	 * one appears first.  So add the new entry to the end of the list.
911 	 */
912 	for (s2 = &svc_head; *s2 != NULL; s2 = &(*s2)->sc_next)
913 		;
914 	*s2 = s;
915 
916 	if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
917 		if ((((SVCXPRT *)xprt)->xp_netid = strdup(netid)) == NULL) {
918 			syslog(LOG_ERR, "svc_reg : strdup failed.");
919 			free(netid);
920 			free(s);
921 			*s2 = NULL;
922 			(void) rw_unlock(&svc_lock);
923 			return (FALSE);
924 		}
925 
926 rpcb_it:
927 	(void) rw_unlock(&svc_lock);
928 
929 	/* now register the information with the local binder service */
930 	if (nconf)
931 		return (rpcb_set(prog, vers, nconf, &xprt->xp_ltaddr));
932 	return (TRUE);
933 	/*NOTREACHED*/
934 }
935 
936 /*
937  * Remove a service program from the callout list.
938  */
939 void
940 svc_unreg(const rpcprog_t prog, const rpcvers_t vers)
941 {
942 	struct svc_callout *prev;
943 	struct svc_callout *s;
944 
945 	/* unregister the information anyway */
946 	(void) rpcb_unset(prog, vers, NULL);
947 
948 	(void) rw_wrlock(&svc_lock);
949 	while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
950 		if (prev == NULL_SVC) {
951 			svc_head = s->sc_next;
952 		} else {
953 			prev->sc_next = s->sc_next;
954 		}
955 		s->sc_next = NULL_SVC;
956 		if (s->sc_netid)
957 			free(s->sc_netid);
958 		free(s);
959 	}
960 	(void) rw_unlock(&svc_lock);
961 }
962 
963 #ifdef PORTMAP
964 /*
965  * Add a service program to the callout list.
966  * The dispatch routine will be called when a rpc request for this
967  * program number comes in.
968  * For version 2 portmappers.
969  */
970 bool_t
971 svc_register(SVCXPRT *xprt, rpcprog_t prog, rpcvers_t vers,
972     void (*dispatch)(), int protocol)
973 {
974 	struct svc_callout *prev;
975 	struct svc_callout *s;
976 	struct netconfig *nconf;
977 	char *netid = NULL;
978 	int flag = 0;
979 
980 	if (xprt->xp_netid) {
981 		netid = strdup(xprt->xp_netid);
982 		flag = 1;
983 	} else if ((ioctl(xprt->xp_fd, I_FIND, "timod") > 0) && ((nconf =
984 	    __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type)) != NULL)) {
985 		/* fill in missing netid field in SVCXPRT */
986 		netid = strdup(nconf->nc_netid);
987 		flag = 1;
988 		freenetconfigent(nconf);
989 	} /* must be svc_raw_create */
990 
991 	if ((netid == NULL) && (flag == 1))
992 		return (FALSE);
993 
994 	(void) rw_wrlock(&svc_lock);
995 	if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
996 		if (netid)
997 			free(netid);
998 		if (s->sc_dispatch == dispatch)
999 			goto pmap_it;  /* it is registering another xptr */
1000 		(void) rw_unlock(&svc_lock);
1001 		return (FALSE);
1002 	}
1003 	s = malloc(sizeof (struct svc_callout));
1004 	if (s == (struct svc_callout *)0) {
1005 		if (netid)
1006 			free(netid);
1007 		(void) rw_unlock(&svc_lock);
1008 		return (FALSE);
1009 	}
1010 	s->sc_prog = prog;
1011 	s->sc_vers = vers;
1012 	s->sc_dispatch = dispatch;
1013 	s->sc_netid = netid;
1014 	s->sc_next = svc_head;
1015 	svc_head = s;
1016 
1017 	if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
1018 		if ((xprt->xp_netid = strdup(netid)) == NULL) {
1019 			syslog(LOG_ERR, "svc_register : strdup failed.");
1020 			free(netid);
1021 			svc_head = s->sc_next;
1022 			free(s);
1023 			(void) rw_unlock(&svc_lock);
1024 			return (FALSE);
1025 		}
1026 
1027 pmap_it:
1028 	(void) rw_unlock(&svc_lock);
1029 	/* now register the information with the local binder service */
1030 	if (protocol)
1031 		return (pmap_set(prog, vers, protocol, xprt->xp_port));
1032 	return (TRUE);
1033 }
1034 
1035 /*
1036  * Remove a service program from the callout list.
1037  * For version 2 portmappers.
1038  */
1039 void
1040 svc_unregister(rpcprog_t prog, rpcvers_t vers)
1041 {
1042 	struct svc_callout *prev;
1043 	struct svc_callout *s;
1044 
1045 	(void) rw_wrlock(&svc_lock);
1046 	while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
1047 		if (prev == NULL_SVC) {
1048 			svc_head = s->sc_next;
1049 		} else {
1050 			prev->sc_next = s->sc_next;
1051 		}
1052 		s->sc_next = NULL_SVC;
1053 		if (s->sc_netid)
1054 			free(s->sc_netid);
1055 		free(s);
1056 		/* unregister the information with the local binder service */
1057 		(void) pmap_unset(prog, vers);
1058 	}
1059 	(void) rw_unlock(&svc_lock);
1060 }
1061 #endif /* PORTMAP */
1062 
1063 /*
1064  * Search the callout list for a program number, return the callout
1065  * struct.
1066  * Also check for transport as well.  Many routines such as svc_unreg
1067  * dont give any corresponding transport, so dont check for transport if
1068  * netid == NULL
1069  */
1070 static struct svc_callout *
1071 svc_find(rpcprog_t prog, rpcvers_t vers, struct svc_callout **prev, char *netid)
1072 {
1073 	struct svc_callout *s, *p;
1074 
1075 /* WRITE LOCK HELD ON ENTRY: svc_lock */
1076 
1077 /*	assert(RW_WRITE_HELD(&svc_lock)); */
1078 	p = NULL_SVC;
1079 	for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1080 		if (((s->sc_prog == prog) && (s->sc_vers == vers)) &&
1081 		    ((netid == NULL) || (s->sc_netid == NULL) ||
1082 		    (strcmp(netid, s->sc_netid) == 0)))
1083 			break;
1084 		p = s;
1085 	}
1086 	*prev = p;
1087 	return (s);
1088 }
1089 
1090 
1091 /* ******************* REPLY GENERATION ROUTINES  ************ */
1092 
1093 /*
1094  * Send a reply to an rpc request
1095  */
1096 bool_t
1097 svc_sendreply(const SVCXPRT *xprt, const xdrproc_t xdr_results,
1098     const caddr_t xdr_location)
1099 {
1100 	struct rpc_msg rply;
1101 
1102 	rply.rm_direction = REPLY;
1103 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1104 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1105 	rply.acpted_rply.ar_stat = SUCCESS;
1106 	rply.acpted_rply.ar_results.where = xdr_location;
1107 	rply.acpted_rply.ar_results.proc = xdr_results;
1108 	return (SVC_REPLY((SVCXPRT *)xprt, &rply));
1109 }
1110 
1111 /*
1112  * No procedure error reply
1113  */
1114 void
1115 svcerr_noproc(const SVCXPRT *xprt)
1116 {
1117 	struct rpc_msg rply;
1118 
1119 	rply.rm_direction = REPLY;
1120 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1121 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1122 	rply.acpted_rply.ar_stat = PROC_UNAVAIL;
1123 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1124 }
1125 
1126 /*
1127  * Can't decode args error reply
1128  */
1129 void
1130 svcerr_decode(const SVCXPRT *xprt)
1131 {
1132 	struct rpc_msg rply;
1133 
1134 	rply.rm_direction = REPLY;
1135 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1136 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1137 	rply.acpted_rply.ar_stat = GARBAGE_ARGS;
1138 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1139 }
1140 
1141 /*
1142  * Some system error
1143  */
1144 void
1145 svcerr_systemerr(const SVCXPRT *xprt)
1146 {
1147 	struct rpc_msg rply;
1148 
1149 	rply.rm_direction = REPLY;
1150 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1151 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1152 	rply.acpted_rply.ar_stat = SYSTEM_ERR;
1153 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1154 }
1155 
1156 /*
1157  * Tell RPC package to not complain about version errors to the client.	 This
1158  * is useful when revving broadcast protocols that sit on a fixed address.
1159  * There is really one (or should be only one) example of this kind of
1160  * protocol: the portmapper (or rpc binder).
1161  */
1162 void
1163 __svc_versquiet_on(const SVCXPRT *xprt)
1164 {
1165 /* LINTED pointer alignment */
1166 	svc_flags(xprt) |= SVC_VERSQUIET;
1167 }
1168 
1169 void
1170 __svc_versquiet_off(const SVCXPRT *xprt)
1171 {
1172 /* LINTED pointer alignment */
1173 	svc_flags(xprt) &= ~SVC_VERSQUIET;
1174 }
1175 
1176 void
1177 svc_versquiet(const SVCXPRT *xprt)
1178 {
1179 	__svc_versquiet_on(xprt);
1180 }
1181 
1182 int
1183 __svc_versquiet_get(const SVCXPRT *xprt)
1184 {
1185 /* LINTED pointer alignment */
1186 	return (svc_flags(xprt) & SVC_VERSQUIET);
1187 }
1188 
1189 /*
1190  * Authentication error reply
1191  */
1192 void
1193 svcerr_auth(const SVCXPRT *xprt, const enum auth_stat why)
1194 {
1195 	struct rpc_msg rply;
1196 
1197 	rply.rm_direction = REPLY;
1198 	rply.rm_reply.rp_stat = MSG_DENIED;
1199 	rply.rjcted_rply.rj_stat = AUTH_ERROR;
1200 	rply.rjcted_rply.rj_why = why;
1201 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1202 }
1203 
1204 /*
1205  * Auth too weak error reply
1206  */
1207 void
1208 svcerr_weakauth(const SVCXPRT *xprt)
1209 {
1210 	svcerr_auth(xprt, AUTH_TOOWEAK);
1211 }
1212 
1213 /*
1214  * Program unavailable error reply
1215  */
1216 void
1217 svcerr_noprog(const SVCXPRT *xprt)
1218 {
1219 	struct rpc_msg rply;
1220 
1221 	rply.rm_direction = REPLY;
1222 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1223 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1224 	rply.acpted_rply.ar_stat = PROG_UNAVAIL;
1225 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1226 }
1227 
1228 /*
1229  * Program version mismatch error reply
1230  */
1231 void
1232 svcerr_progvers(const SVCXPRT *xprt, const rpcvers_t low_vers,
1233     const rpcvers_t high_vers)
1234 {
1235 	struct rpc_msg rply;
1236 
1237 	rply.rm_direction = REPLY;
1238 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1239 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1240 	rply.acpted_rply.ar_stat = PROG_MISMATCH;
1241 	rply.acpted_rply.ar_vers.low = low_vers;
1242 	rply.acpted_rply.ar_vers.high = high_vers;
1243 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1244 }
1245 
1246 /* ******************* SERVER INPUT STUFF ******************* */
1247 
1248 /*
1249  * Get server side input from some transport.
1250  *
1251  * Statement of authentication parameters management:
1252  * This function owns and manages all authentication parameters, specifically
1253  * the "raw" parameters (msg.rm_call.cb_cred and msg.rm_call.cb_verf) and
1254  * the "cooked" credentials (rqst->rq_clntcred).
1255  * However, this function does not know the structure of the cooked
1256  * credentials, so it make the following assumptions:
1257  *   a) the structure is contiguous (no pointers), and
1258  *   b) the cred structure size does not exceed RQCRED_SIZE bytes.
1259  * In all events, all three parameters are freed upon exit from this routine.
1260  * The storage is trivially management on the call stack in user land, but
1261  * is mallocated in kernel land.
1262  */
1263 
1264 void
1265 svc_getreq(int rdfds)
1266 {
1267 	fd_set readfds;
1268 
1269 	FD_ZERO(&readfds);
1270 	readfds.fds_bits[0] = rdfds;
1271 	svc_getreqset(&readfds);
1272 }
1273 
1274 void
1275 svc_getreqset(fd_set *readfds)
1276 {
1277 	int i;
1278 
1279 	for (i = 0; i < svc_max_fd; i++) {
1280 		/* fd has input waiting */
1281 		if (FD_ISSET(i, readfds))
1282 			svc_getreq_common(i);
1283 	}
1284 }
1285 
1286 void
1287 svc_getreq_poll(struct pollfd *pfdp, const int pollretval)
1288 {
1289 	int i;
1290 	int fds_found;
1291 
1292 	for (i = fds_found = 0; fds_found < pollretval; i++) {
1293 		struct pollfd *p = &pfdp[i];
1294 
1295 		if (p->revents) {
1296 			/* fd has input waiting */
1297 			fds_found++;
1298 			/*
1299 			 *	We assume that this function is only called
1300 			 *	via someone select()ing from svc_fdset or
1301 			 *	poll()ing from svc_pollset[].  Thus it's safe
1302 			 *	to handle the POLLNVAL event by simply turning
1303 			 *	the corresponding bit off in svc_fdset.  The
1304 			 *	svc_pollset[] array is derived from svc_fdset
1305 			 *	and so will also be updated eventually.
1306 			 *
1307 			 *	XXX Should we do an xprt_unregister() instead?
1308 			 */
1309 			/* Handle user callback */
1310 			if (__is_a_userfd(p->fd) == TRUE) {
1311 				(void) rw_rdlock(&svc_fd_lock);
1312 				__svc_getreq_user(p);
1313 				(void) rw_unlock(&svc_fd_lock);
1314 			} else {
1315 				if (p->revents & POLLNVAL) {
1316 					(void) rw_wrlock(&svc_fd_lock);
1317 					remove_pollfd(p->fd);	/* XXX */
1318 					(void) rw_unlock(&svc_fd_lock);
1319 				} else {
1320 					svc_getreq_common(p->fd);
1321 				}
1322 			}
1323 		}
1324 	}
1325 }
1326 
1327 void
1328 svc_getreq_common(const int fd)
1329 {
1330 	SVCXPRT *xprt;
1331 	enum xprt_stat stat;
1332 	struct rpc_msg *msg;
1333 	struct svc_req *r;
1334 	char *cred_area;
1335 
1336 	(void) rw_rdlock(&svc_fd_lock);
1337 
1338 	/* HANDLE USER CALLBACK */
1339 	if (__is_a_userfd(fd) == TRUE) {
1340 		struct pollfd virtual_fd;
1341 
1342 		virtual_fd.events = virtual_fd.revents = (short)0xFFFF;
1343 		virtual_fd.fd = fd;
1344 		__svc_getreq_user(&virtual_fd);
1345 		(void) rw_unlock(&svc_fd_lock);
1346 		return;
1347 	}
1348 
1349 	/*
1350 	 * The transport associated with this fd could have been
1351 	 * removed from svc_timeout_nonblock_xprt_and_LRU, for instance.
1352 	 * This can happen if two or more fds get read events and are
1353 	 * passed to svc_getreq_poll/set, the first fd is seviced by
1354 	 * the dispatch routine and cleans up any dead transports.  If
1355 	 * one of the dead transports removed is the other fd that
1356 	 * had a read event then svc_getreq_common() will be called with no
1357 	 * xprt associated with the fd that had the original read event.
1358 	 */
1359 	if ((fd >= nsvc_xports) || (xprt = svc_xports[fd]) == NULL) {
1360 		(void) rw_unlock(&svc_fd_lock);
1361 		return;
1362 	}
1363 	(void) rw_unlock(&svc_fd_lock);
1364 /* LINTED pointer alignment */
1365 	msg = SVCEXT(xprt)->msg;
1366 /* LINTED pointer alignment */
1367 	r = SVCEXT(xprt)->req;
1368 /* LINTED pointer alignment */
1369 	cred_area = SVCEXT(xprt)->cred_area;
1370 	msg->rm_call.cb_cred.oa_base = cred_area;
1371 	msg->rm_call.cb_verf.oa_base = &(cred_area[MAX_AUTH_BYTES]);
1372 	r->rq_clntcred = &(cred_area[2 * MAX_AUTH_BYTES]);
1373 
1374 	/* receive msgs from xprtprt (support batch calls) */
1375 	do {
1376 		bool_t dispatch;
1377 
1378 		if (dispatch = SVC_RECV(xprt, msg))
1379 			(void) _svc_prog_dispatch(xprt, msg, r);
1380 		/*
1381 		 * Check if the xprt has been disconnected in a recursive call
1382 		 * in the service dispatch routine. If so, then break
1383 		 */
1384 		(void) rw_rdlock(&svc_fd_lock);
1385 		if (xprt != svc_xports[fd]) {
1386 			(void) rw_unlock(&svc_fd_lock);
1387 			break;
1388 		}
1389 		(void) rw_unlock(&svc_fd_lock);
1390 
1391 		/*
1392 		 * Call cleanup procedure if set.
1393 		 */
1394 		if (__proc_cleanup_cb != NULL && dispatch)
1395 			(*__proc_cleanup_cb)(xprt);
1396 
1397 		if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
1398 			SVC_DESTROY(xprt);
1399 			break;
1400 		}
1401 	} while (stat == XPRT_MOREREQS);
1402 }
1403 
1404 int
1405 _svc_prog_dispatch(SVCXPRT *xprt, struct rpc_msg *msg, struct svc_req *r)
1406 {
1407 	struct svc_callout *s;
1408 	enum auth_stat why;
1409 	int prog_found;
1410 	rpcvers_t low_vers;
1411 	rpcvers_t high_vers;
1412 	void (*disp_fn)();
1413 
1414 	r->rq_xprt = xprt;
1415 	r->rq_prog = msg->rm_call.cb_prog;
1416 	r->rq_vers = msg->rm_call.cb_vers;
1417 	r->rq_proc = msg->rm_call.cb_proc;
1418 	r->rq_cred = msg->rm_call.cb_cred;
1419 /* LINTED pointer alignment */
1420 	SVC_XP_AUTH(r->rq_xprt).svc_ah_ops = svc_auth_any_ops;
1421 /* LINTED pointer alignment */
1422 	SVC_XP_AUTH(r->rq_xprt).svc_ah_private = NULL;
1423 
1424 	/* first authenticate the message */
1425 	/* Check for null flavor and bypass these calls if possible */
1426 
1427 	if (msg->rm_call.cb_cred.oa_flavor == AUTH_NULL) {
1428 		r->rq_xprt->xp_verf.oa_flavor = _null_auth.oa_flavor;
1429 		r->rq_xprt->xp_verf.oa_length = 0;
1430 	} else {
1431 		bool_t no_dispatch;
1432 
1433 		if ((why = __gss_authenticate(r, msg,
1434 		    &no_dispatch)) != AUTH_OK) {
1435 			svcerr_auth(xprt, why);
1436 			return (0);
1437 		}
1438 		if (no_dispatch)
1439 			return (0);
1440 	}
1441 	/* match message with a registered service */
1442 	prog_found = FALSE;
1443 	low_vers = (rpcvers_t)(0 - 1);
1444 	high_vers = 0;
1445 	(void) rw_rdlock(&svc_lock);
1446 	for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1447 		if (s->sc_prog == r->rq_prog) {
1448 			prog_found = TRUE;
1449 			if (s->sc_vers == r->rq_vers) {
1450 				if ((xprt->xp_netid == NULL) ||
1451 				    (s->sc_netid == NULL) ||
1452 				    (strcmp(xprt->xp_netid,
1453 				    s->sc_netid) == 0)) {
1454 					disp_fn = (*s->sc_dispatch);
1455 					(void) rw_unlock(&svc_lock);
1456 					disp_fn(r, xprt);
1457 					return (1);
1458 				}
1459 				prog_found = FALSE;
1460 			}
1461 			if (s->sc_vers < low_vers)
1462 				low_vers = s->sc_vers;
1463 			if (s->sc_vers > high_vers)
1464 				high_vers = s->sc_vers;
1465 		}		/* found correct program */
1466 	}
1467 	(void) rw_unlock(&svc_lock);
1468 
1469 	/*
1470 	 * if we got here, the program or version
1471 	 * is not served ...
1472 	 */
1473 	if (prog_found) {
1474 /* LINTED pointer alignment */
1475 		if (!version_keepquiet(xprt))
1476 			svcerr_progvers(xprt, low_vers, high_vers);
1477 	} else {
1478 		svcerr_noprog(xprt);
1479 	}
1480 	return (0);
1481 }
1482 
1483 /* ******************* SVCXPRT allocation and deallocation ***************** */
1484 
1485 /*
1486  * svc_xprt_alloc() - allocate a service transport handle
1487  */
1488 SVCXPRT *
1489 svc_xprt_alloc(void)
1490 {
1491 	SVCXPRT		*xprt = NULL;
1492 	SVCXPRT_EXT	*xt = NULL;
1493 	SVCXPRT_LIST	*xlist = NULL;
1494 	struct rpc_msg	*msg = NULL;
1495 	struct svc_req	*req = NULL;
1496 	char		*cred_area = NULL;
1497 
1498 	if ((xprt = calloc(1, sizeof (SVCXPRT))) == NULL)
1499 		goto err_exit;
1500 
1501 	if ((xt = calloc(1, sizeof (SVCXPRT_EXT))) == NULL)
1502 		goto err_exit;
1503 	xprt->xp_p3 = (caddr_t)xt; /* SVCEXT(xprt) = xt */
1504 
1505 	if ((xlist = calloc(1, sizeof (SVCXPRT_LIST))) == NULL)
1506 		goto err_exit;
1507 	xt->my_xlist = xlist;
1508 	xlist->xprt = xprt;
1509 
1510 	if ((msg = malloc(sizeof (struct rpc_msg))) == NULL)
1511 		goto err_exit;
1512 	xt->msg = msg;
1513 
1514 	if ((req = malloc(sizeof (struct svc_req))) == NULL)
1515 		goto err_exit;
1516 	xt->req = req;
1517 
1518 	if ((cred_area = malloc(2*MAX_AUTH_BYTES + RQCRED_SIZE)) == NULL)
1519 		goto err_exit;
1520 	xt->cred_area = cred_area;
1521 
1522 /* LINTED pointer alignment */
1523 	(void) mutex_init(&svc_send_mutex(xprt), USYNC_THREAD, (void *)0);
1524 	return (xprt);
1525 
1526 err_exit:
1527 	svc_xprt_free(xprt);
1528 	return (NULL);
1529 }
1530 
1531 
1532 /*
1533  * svc_xprt_free() - free a service handle
1534  */
1535 void
1536 svc_xprt_free(SVCXPRT *xprt)
1537 {
1538 /* LINTED pointer alignment */
1539 	SVCXPRT_EXT	*xt = xprt ? SVCEXT(xprt) : NULL;
1540 	SVCXPRT_LIST	*my_xlist = xt ? xt->my_xlist: NULL;
1541 	struct rpc_msg	*msg = xt ? xt->msg : NULL;
1542 	struct svc_req	*req = xt ? xt->req : NULL;
1543 	char		*cred_area = xt ? xt->cred_area : NULL;
1544 
1545 	if (xprt)
1546 		free(xprt);
1547 	if (xt)
1548 		free(xt);
1549 	if (my_xlist)
1550 		free(my_xlist);
1551 	if (msg)
1552 		free(msg);
1553 	if (req)
1554 		free(req);
1555 	if (cred_area)
1556 		free(cred_area);
1557 }
1558 
1559 
1560 /*
1561  * svc_xprt_destroy() - free parent and child xprt list
1562  */
1563 void
1564 svc_xprt_destroy(SVCXPRT *xprt)
1565 {
1566 	SVCXPRT_LIST	*xlist, *xnext = NULL;
1567 	int		type;
1568 
1569 /* LINTED pointer alignment */
1570 	if (SVCEXT(xprt)->parent)
1571 /* LINTED pointer alignment */
1572 		xprt = SVCEXT(xprt)->parent;
1573 /* LINTED pointer alignment */
1574 	type = svc_type(xprt);
1575 /* LINTED pointer alignment */
1576 	for (xlist = SVCEXT(xprt)->my_xlist; xlist != NULL; xlist = xnext) {
1577 		xnext = xlist->next;
1578 		xprt = xlist->xprt;
1579 		switch (type) {
1580 		case SVC_DGRAM:
1581 			svc_dg_xprtfree(xprt);
1582 			break;
1583 		case SVC_RENDEZVOUS:
1584 			svc_vc_xprtfree(xprt);
1585 			break;
1586 		case SVC_CONNECTION:
1587 			svc_fd_xprtfree(xprt);
1588 			break;
1589 		case SVC_DOOR:
1590 			svc_door_xprtfree(xprt);
1591 			break;
1592 		}
1593 	}
1594 }
1595 
1596 
1597 /*
1598  * svc_copy() - make a copy of parent
1599  */
1600 SVCXPRT *
1601 svc_copy(SVCXPRT *xprt)
1602 {
1603 /* LINTED pointer alignment */
1604 	switch (svc_type(xprt)) {
1605 	case SVC_DGRAM:
1606 		return (svc_dg_xprtcopy(xprt));
1607 	case SVC_RENDEZVOUS:
1608 		return (svc_vc_xprtcopy(xprt));
1609 	case SVC_CONNECTION:
1610 		return (svc_fd_xprtcopy(xprt));
1611 	}
1612 	return (NULL);
1613 }
1614 
1615 
1616 /*
1617  * _svc_destroy_private() - private SVC_DESTROY interface
1618  */
1619 void
1620 _svc_destroy_private(SVCXPRT *xprt)
1621 {
1622 /* LINTED pointer alignment */
1623 	switch (svc_type(xprt)) {
1624 	case SVC_DGRAM:
1625 		_svc_dg_destroy_private(xprt);
1626 		break;
1627 	case SVC_RENDEZVOUS:
1628 	case SVC_CONNECTION:
1629 		_svc_vc_destroy_private(xprt, TRUE);
1630 		break;
1631 	}
1632 }
1633 
1634 /*
1635  * svc_get_local_cred() - fetch local user credentials.  This always
1636  * works over doors based transports.  For local transports, this
1637  * does not yield correct results unless the __rpc_negotiate_uid()
1638  * call has been invoked to enable this feature.
1639  */
1640 bool_t
1641 svc_get_local_cred(SVCXPRT *xprt, svc_local_cred_t *lcred)
1642 {
1643 	/* LINTED pointer alignment */
1644 	if (svc_type(xprt) == SVC_DOOR)
1645 		return (__svc_get_door_cred(xprt, lcred));
1646 	return (__rpc_get_local_cred(xprt, lcred));
1647 }
1648 
1649 
1650 /* ******************* DUPLICATE ENTRY HANDLING ROUTINES ************** */
1651 
1652 /*
1653  * the dup cacheing routines below provide a cache of received
1654  * transactions. rpc service routines can use this to detect
1655  * retransmissions and re-send a non-failure response. Uses a
1656  * lru scheme to find entries to get rid of entries in the cache,
1657  * though only DUP_DONE entries are placed on the lru list.
1658  * the routines were written towards development of a generic
1659  * SVC_DUP() interface, which can be expanded to encompass the
1660  * svc_dg_enablecache() routines as well. the cache is currently
1661  * private to the automounter.
1662  */
1663 
1664 
1665 /* dupcache header contains xprt specific information */
1666 struct dupcache {
1667 	rwlock_t	dc_lock;
1668 	time_t		dc_time;
1669 	int		dc_buckets;
1670 	int		dc_maxsz;
1671 	int		dc_basis;
1672 	struct dupreq 	*dc_mru;
1673 	struct dupreq	**dc_hashtbl;
1674 };
1675 
1676 /*
1677  * private duplicate cache request routines
1678  */
1679 static int __svc_dupcache_check(struct svc_req *, caddr_t *, uint_t *,
1680 		struct dupcache *, uint32_t, uint32_t);
1681 static struct dupreq *__svc_dupcache_victim(struct dupcache *, time_t);
1682 static int __svc_dupcache_enter(struct svc_req *, struct dupreq *,
1683 		struct dupcache *, uint32_t, uint32_t, time_t);
1684 static int __svc_dupcache_update(struct svc_req *, caddr_t, uint_t, int,
1685 		struct dupcache *, uint32_t, uint32_t);
1686 #ifdef DUP_DEBUG
1687 static void __svc_dupcache_debug(struct dupcache *);
1688 #endif /* DUP_DEBUG */
1689 
1690 /* default parameters for the dupcache */
1691 #define	DUPCACHE_BUCKETS	257
1692 #define	DUPCACHE_TIME		900
1693 #define	DUPCACHE_MAXSZ		INT_MAX
1694 
1695 /*
1696  * __svc_dupcache_init(void *condition, int basis, char *xprt_cache)
1697  * initialize the duprequest cache and assign it to the xprt_cache
1698  * Use default values depending on the cache condition and basis.
1699  * return TRUE on success and FALSE on failure
1700  */
1701 bool_t
1702 __svc_dupcache_init(void *condition, int basis, char **xprt_cache)
1703 {
1704 	static mutex_t initdc_lock = DEFAULTMUTEX;
1705 	int i;
1706 	struct dupcache *dc;
1707 
1708 	(void) mutex_lock(&initdc_lock);
1709 	if (*xprt_cache != NULL) { /* do only once per xprt */
1710 		(void) mutex_unlock(&initdc_lock);
1711 		syslog(LOG_ERR,
1712 		    "__svc_dupcache_init: multiply defined dup cache");
1713 		return (FALSE);
1714 	}
1715 
1716 	switch (basis) {
1717 	case DUPCACHE_FIXEDTIME:
1718 		dc = malloc(sizeof (struct dupcache));
1719 		if (dc == NULL) {
1720 			(void) mutex_unlock(&initdc_lock);
1721 			syslog(LOG_ERR,
1722 			    "__svc_dupcache_init: memory alloc failed");
1723 			return (FALSE);
1724 		}
1725 		(void) rwlock_init(&(dc->dc_lock), USYNC_THREAD, NULL);
1726 		if (condition != NULL)
1727 			dc->dc_time = *((time_t *)condition);
1728 		else
1729 			dc->dc_time = DUPCACHE_TIME;
1730 		dc->dc_buckets = DUPCACHE_BUCKETS;
1731 		dc->dc_maxsz = DUPCACHE_MAXSZ;
1732 		dc->dc_basis = basis;
1733 		dc->dc_mru = NULL;
1734 		dc->dc_hashtbl = malloc(dc->dc_buckets *
1735 		    sizeof (struct dupreq *));
1736 		if (dc->dc_hashtbl == NULL) {
1737 			free(dc);
1738 			(void) mutex_unlock(&initdc_lock);
1739 			syslog(LOG_ERR,
1740 			    "__svc_dupcache_init: memory alloc failed");
1741 			return (FALSE);
1742 		}
1743 		for (i = 0; i < DUPCACHE_BUCKETS; i++)
1744 			dc->dc_hashtbl[i] = NULL;
1745 		*xprt_cache = (char *)dc;
1746 		break;
1747 	default:
1748 		(void) mutex_unlock(&initdc_lock);
1749 		syslog(LOG_ERR,
1750 		    "__svc_dupcache_init: undefined dup cache basis");
1751 		return (FALSE);
1752 	}
1753 
1754 	(void) mutex_unlock(&initdc_lock);
1755 
1756 	return (TRUE);
1757 }
1758 
1759 /*
1760  * __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1761  *	char *xprt_cache)
1762  * searches the request cache. Creates an entry and returns DUP_NEW if
1763  * the request is not found in the cache.  If it is found, then it
1764  * returns the state of the request (in progress, drop, or done) and
1765  * also allocates, and passes back results to the user (if any) in
1766  * resp_buf, and its length in resp_bufsz. DUP_ERROR is returned on error.
1767  */
1768 int
1769 __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1770     char *xprt_cache)
1771 {
1772 	uint32_t drxid, drhash;
1773 	int rc;
1774 	struct dupreq *dr = NULL;
1775 	time_t timenow = time(NULL);
1776 
1777 	/* LINTED pointer alignment */
1778 	struct dupcache *dc = (struct dupcache *)xprt_cache;
1779 
1780 	if (dc == NULL) {
1781 		syslog(LOG_ERR, "__svc_dup: undefined cache");
1782 		return (DUP_ERROR);
1783 	}
1784 
1785 	/* get the xid of the request */
1786 	if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
1787 		syslog(LOG_ERR, "__svc_dup: xid error");
1788 		return (DUP_ERROR);
1789 	}
1790 	drhash = drxid % dc->dc_buckets;
1791 
1792 	if ((rc = __svc_dupcache_check(req, resp_buf, resp_bufsz, dc, drxid,
1793 	    drhash)) != DUP_NEW)
1794 		return (rc);
1795 
1796 	if ((dr = __svc_dupcache_victim(dc, timenow)) == NULL)
1797 		return (DUP_ERROR);
1798 
1799 	if ((rc = __svc_dupcache_enter(req, dr, dc, drxid, drhash, timenow))
1800 	    == DUP_ERROR)
1801 		return (rc);
1802 
1803 	return (DUP_NEW);
1804 }
1805 
1806 
1807 
1808 /*
1809  * __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf,
1810  *		uint_t *resp_bufsz,truct dupcache *dc, uint32_t drxid,
1811  * 		uint32_t drhash)
1812  * Checks to see whether an entry already exists in the cache. If it does
1813  * copy back into the resp_buf, if appropriate. Return the status of
1814  * the request, or DUP_NEW if the entry is not in the cache
1815  */
1816 static int
1817 __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1818     struct dupcache *dc, uint32_t drxid, uint32_t drhash)
1819 {
1820 	struct dupreq *dr = NULL;
1821 
1822 	(void) rw_rdlock(&(dc->dc_lock));
1823 	dr = dc->dc_hashtbl[drhash];
1824 	while (dr != NULL) {
1825 		if (dr->dr_xid == drxid &&
1826 		    dr->dr_proc == req->rq_proc &&
1827 		    dr->dr_prog == req->rq_prog &&
1828 		    dr->dr_vers == req->rq_vers &&
1829 		    dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
1830 		    memcmp(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
1831 		    dr->dr_addr.len) == 0) { /* entry found */
1832 			if (dr->dr_hash != drhash) {
1833 				/* sanity check */
1834 				(void) rw_unlock((&dc->dc_lock));
1835 				syslog(LOG_ERR,
1836 				    "\n__svc_dupdone: hashing error");
1837 				return (DUP_ERROR);
1838 			}
1839 
1840 			/*
1841 			 * return results for requests on lru list, if
1842 			 * appropriate requests must be DUP_DROP or DUP_DONE
1843 			 * to have a result. A NULL buffer in the cache
1844 			 * implies no results were sent during dupdone.
1845 			 * A NULL buffer in the call implies not interested
1846 			 * in results.
1847 			 */
1848 			if (((dr->dr_status == DUP_DONE) ||
1849 			    (dr->dr_status == DUP_DROP)) &&
1850 			    resp_buf != NULL &&
1851 			    dr->dr_resp.buf != NULL) {
1852 				*resp_buf = malloc(dr->dr_resp.len);
1853 				if (*resp_buf == NULL) {
1854 					syslog(LOG_ERR,
1855 					"__svc_dupcache_check: malloc failed");
1856 					(void) rw_unlock(&(dc->dc_lock));
1857 					return (DUP_ERROR);
1858 				}
1859 				(void) memset(*resp_buf, 0, dr->dr_resp.len);
1860 				(void) memcpy(*resp_buf, dr->dr_resp.buf,
1861 				    dr->dr_resp.len);
1862 				*resp_bufsz = dr->dr_resp.len;
1863 			} else {
1864 				/* no result */
1865 				if (resp_buf)
1866 					*resp_buf = NULL;
1867 				if (resp_bufsz)
1868 					*resp_bufsz = 0;
1869 			}
1870 			(void) rw_unlock(&(dc->dc_lock));
1871 			return (dr->dr_status);
1872 		}
1873 		dr = dr->dr_chain;
1874 	}
1875 	(void) rw_unlock(&(dc->dc_lock));
1876 	return (DUP_NEW);
1877 }
1878 
1879 /*
1880  * __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
1881  * Return a victim dupreq entry to the caller, depending on cache policy.
1882  */
1883 static struct dupreq *
1884 __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
1885 {
1886 	struct dupreq *dr = NULL;
1887 
1888 	switch (dc->dc_basis) {
1889 	case DUPCACHE_FIXEDTIME:
1890 		/*
1891 		 * The hash policy is to free up a bit of the hash
1892 		 * table before allocating a new entry as the victim.
1893 		 * Freeing up the hash table each time should split
1894 		 * the cost of keeping the hash table clean among threads.
1895 		 * Note that only DONE or DROPPED entries are on the lru
1896 		 * list but we do a sanity check anyway.
1897 		 */
1898 		(void) rw_wrlock(&(dc->dc_lock));
1899 		while ((dc->dc_mru) && (dr = dc->dc_mru->dr_next) &&
1900 		    ((timenow - dr->dr_time) > dc->dc_time)) {
1901 			/* clean and then free the entry */
1902 			if (dr->dr_status != DUP_DONE &&
1903 			    dr->dr_status != DUP_DROP) {
1904 				/*
1905 				 * The LRU list can't contain an
1906 				 * entry where the status is other than
1907 				 * DUP_DONE or DUP_DROP.
1908 				 */
1909 				syslog(LOG_ERR,
1910 				    "__svc_dupcache_victim: bad victim");
1911 #ifdef DUP_DEBUG
1912 				/*
1913 				 * Need to hold the reader/writers lock to
1914 				 * print the cache info, since we already
1915 				 * hold the writers lock, we shall continue
1916 				 * calling __svc_dupcache_debug()
1917 				 */
1918 				__svc_dupcache_debug(dc);
1919 #endif /* DUP_DEBUG */
1920 				(void) rw_unlock(&(dc->dc_lock));
1921 				return (NULL);
1922 			}
1923 			/* free buffers */
1924 			if (dr->dr_resp.buf) {
1925 				free(dr->dr_resp.buf);
1926 				dr->dr_resp.buf = NULL;
1927 			}
1928 			if (dr->dr_addr.buf) {
1929 				free(dr->dr_addr.buf);
1930 				dr->dr_addr.buf = NULL;
1931 			}
1932 
1933 			/* unhash the entry */
1934 			if (dr->dr_chain)
1935 				dr->dr_chain->dr_prevchain = dr->dr_prevchain;
1936 			if (dr->dr_prevchain)
1937 				dr->dr_prevchain->dr_chain = dr->dr_chain;
1938 			if (dc->dc_hashtbl[dr->dr_hash] == dr)
1939 				dc->dc_hashtbl[dr->dr_hash] = dr->dr_chain;
1940 
1941 			/* modify the lru pointers */
1942 			if (dc->dc_mru == dr) {
1943 				dc->dc_mru = NULL;
1944 			} else {
1945 				dc->dc_mru->dr_next = dr->dr_next;
1946 				dr->dr_next->dr_prev = dc->dc_mru;
1947 			}
1948 			free(dr);
1949 			dr = NULL;
1950 		}
1951 		(void) rw_unlock(&(dc->dc_lock));
1952 
1953 		/*
1954 		 * Allocate and return new clean entry as victim
1955 		 */
1956 		if ((dr = malloc(sizeof (*dr))) == NULL) {
1957 			syslog(LOG_ERR,
1958 			    "__svc_dupcache_victim: malloc failed");
1959 			return (NULL);
1960 		}
1961 		(void) memset(dr, 0, sizeof (*dr));
1962 		return (dr);
1963 	default:
1964 		syslog(LOG_ERR,
1965 		    "__svc_dupcache_victim: undefined dup cache_basis");
1966 		return (NULL);
1967 	}
1968 }
1969 
1970 /*
1971  * __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
1972  *	struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
1973  * build new duprequest entry and then insert into the cache
1974  */
1975 static int
1976 __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
1977     struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
1978 {
1979 	dr->dr_xid = drxid;
1980 	dr->dr_prog = req->rq_prog;
1981 	dr->dr_vers = req->rq_vers;
1982 	dr->dr_proc = req->rq_proc;
1983 	dr->dr_addr.maxlen = req->rq_xprt->xp_rtaddr.len;
1984 	dr->dr_addr.len = dr->dr_addr.maxlen;
1985 	if ((dr->dr_addr.buf = malloc(dr->dr_addr.maxlen)) == NULL) {
1986 		syslog(LOG_ERR, "__svc_dupcache_enter: malloc failed");
1987 		free(dr);
1988 		return (DUP_ERROR);
1989 	}
1990 	(void) memset(dr->dr_addr.buf, 0, dr->dr_addr.len);
1991 	(void) memcpy(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
1992 	    dr->dr_addr.len);
1993 	dr->dr_resp.buf = NULL;
1994 	dr->dr_resp.maxlen = 0;
1995 	dr->dr_resp.len = 0;
1996 	dr->dr_status = DUP_INPROGRESS;
1997 	dr->dr_time = timenow;
1998 	dr->dr_hash = drhash;	/* needed for efficient victim cleanup */
1999 
2000 	/* place entry at head of hash table */
2001 	(void) rw_wrlock(&(dc->dc_lock));
2002 	dr->dr_chain = dc->dc_hashtbl[drhash];
2003 	dr->dr_prevchain = NULL;
2004 	if (dc->dc_hashtbl[drhash] != NULL)
2005 		dc->dc_hashtbl[drhash]->dr_prevchain = dr;
2006 	dc->dc_hashtbl[drhash] = dr;
2007 	(void) rw_unlock(&(dc->dc_lock));
2008 	return (DUP_NEW);
2009 }
2010 
2011 /*
2012  * __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2013  *		int status, char *xprt_cache)
2014  * Marks the request done (DUP_DONE or DUP_DROP) and stores the response.
2015  * Only DONE and DROP requests can be marked as done. Sets the lru pointers
2016  * to make the entry the most recently used. Returns DUP_ERROR or status.
2017  */
2018 int
2019 __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2020     int status, char *xprt_cache)
2021 {
2022 	uint32_t drxid, drhash;
2023 	int rc;
2024 
2025 	/* LINTED pointer alignment */
2026 	struct dupcache *dc = (struct dupcache *)xprt_cache;
2027 
2028 	if (dc == NULL) {
2029 		syslog(LOG_ERR, "__svc_dupdone: undefined cache");
2030 		return (DUP_ERROR);
2031 	}
2032 
2033 	if (status != DUP_DONE && status != DUP_DROP) {
2034 		syslog(LOG_ERR, "__svc_dupdone: invalid dupdone status");
2035 		syslog(LOG_ERR, "	 must be DUP_DONE or DUP_DROP");
2036 		return (DUP_ERROR);
2037 	}
2038 
2039 	/* find the xid of the entry in the cache */
2040 	if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
2041 		syslog(LOG_ERR, "__svc_dup: xid error");
2042 		return (DUP_ERROR);
2043 	}
2044 	drhash = drxid % dc->dc_buckets;
2045 
2046 	/* update the status of the entry and result buffers, if required */
2047 	if ((rc = __svc_dupcache_update(req, resp_buf, resp_bufsz, status,
2048 	    dc, drxid, drhash)) == DUP_ERROR) {
2049 		syslog(LOG_ERR, "__svc_dupdone: cache entry error");
2050 		return (DUP_ERROR);
2051 	}
2052 
2053 	return (rc);
2054 }
2055 
2056 /*
2057  * __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf,
2058  * 	uint_t resp_bufsz, int status, struct dupcache *dc, uint32_t drxid,
2059  * 	uint32_t drhash)
2060  * Check if entry exists in the dupcacache. If it does, update its status
2061  * and time and also its buffer, if appropriate. Its possible, but unlikely
2062  * for DONE requests to not exist in the cache. Return DUP_ERROR or status.
2063  */
2064 static int
2065 __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2066     int status, struct dupcache *dc, uint32_t drxid, uint32_t drhash)
2067 {
2068 	struct dupreq *dr = NULL;
2069 	time_t timenow = time(NULL);
2070 
2071 	(void) rw_wrlock(&(dc->dc_lock));
2072 	dr = dc->dc_hashtbl[drhash];
2073 	while (dr != NULL) {
2074 		if (dr->dr_xid == drxid &&
2075 		    dr->dr_proc == req->rq_proc &&
2076 		    dr->dr_prog == req->rq_prog &&
2077 		    dr->dr_vers == req->rq_vers &&
2078 		    dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
2079 		    memcmp(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
2080 		    dr->dr_addr.len) == 0) { /* entry found */
2081 			if (dr->dr_hash != drhash) {
2082 				/* sanity check */
2083 				(void) rw_unlock(&(dc->dc_lock));
2084 				syslog(LOG_ERR,
2085 				"\n__svc_dupdone: hashing error");
2086 				return (DUP_ERROR);
2087 			}
2088 
2089 			/* store the results if bufer is not NULL */
2090 			if (resp_buf != NULL) {
2091 				if ((dr->dr_resp.buf =
2092 				    malloc(resp_bufsz)) == NULL) {
2093 					(void) rw_unlock(&(dc->dc_lock));
2094 					syslog(LOG_ERR,
2095 					    "__svc_dupdone: malloc failed");
2096 					return (DUP_ERROR);
2097 				}
2098 				(void) memset(dr->dr_resp.buf, 0, resp_bufsz);
2099 				(void) memcpy(dr->dr_resp.buf, resp_buf,
2100 				    (uint_t)resp_bufsz);
2101 				dr->dr_resp.len = resp_bufsz;
2102 			}
2103 
2104 			/* update status and done time */
2105 			dr->dr_status = status;
2106 			dr->dr_time = timenow;
2107 
2108 			/* move the entry to the mru position */
2109 			if (dc->dc_mru == NULL) {
2110 				dr->dr_next = dr;
2111 				dr->dr_prev = dr;
2112 			} else {
2113 				dr->dr_next = dc->dc_mru->dr_next;
2114 				dc->dc_mru->dr_next->dr_prev = dr;
2115 				dr->dr_prev = dc->dc_mru;
2116 				dc->dc_mru->dr_next = dr;
2117 			}
2118 			dc->dc_mru = dr;
2119 
2120 			(void) rw_unlock(&(dc->dc_lock));
2121 			return (status);
2122 		}
2123 		dr = dr->dr_chain;
2124 	}
2125 	(void) rw_unlock(&(dc->dc_lock));
2126 	syslog(LOG_ERR, "__svc_dupdone: entry not in dup cache");
2127 	return (DUP_ERROR);
2128 }
2129 
2130 #ifdef DUP_DEBUG
2131 /*
2132  * __svc_dupcache_debug(struct dupcache *dc)
2133  * print out the hash table stuff
2134  *
2135  * This function requires the caller to hold the reader
2136  * or writer version of the duplicate request cache lock (dc_lock).
2137  */
2138 static void
2139 __svc_dupcache_debug(struct dupcache *dc)
2140 {
2141 	struct dupreq *dr = NULL;
2142 	int i;
2143 	bool_t bval;
2144 
2145 	fprintf(stderr, "   HASHTABLE\n");
2146 	for (i = 0; i < dc->dc_buckets; i++) {
2147 		bval = FALSE;
2148 		dr = dc->dc_hashtbl[i];
2149 		while (dr != NULL) {
2150 			if (!bval) {	/* ensures bucket printed only once */
2151 				fprintf(stderr, "    bucket : %d\n", i);
2152 				bval = TRUE;
2153 			}
2154 			fprintf(stderr, "\txid: %u status: %d time: %ld",
2155 			    dr->dr_xid, dr->dr_status, dr->dr_time);
2156 			fprintf(stderr, " dr: %x chain: %x prevchain: %x\n",
2157 			    dr, dr->dr_chain, dr->dr_prevchain);
2158 			dr = dr->dr_chain;
2159 		}
2160 	}
2161 
2162 	fprintf(stderr, "   LRU\n");
2163 	if (dc->dc_mru) {
2164 		dr = dc->dc_mru->dr_next;	/* lru */
2165 		while (dr != dc->dc_mru) {
2166 			fprintf(stderr, "\txid: %u status : %d time : %ld",
2167 			    dr->dr_xid, dr->dr_status, dr->dr_time);
2168 			fprintf(stderr, " dr: %x next: %x prev: %x\n",
2169 			    dr, dr->dr_next, dr->dr_prev);
2170 			dr = dr->dr_next;
2171 		}
2172 		fprintf(stderr, "\txid: %u status: %d time: %ld",
2173 		    dr->dr_xid, dr->dr_status, dr->dr_time);
2174 		fprintf(stderr, " dr: %x next: %x prev: %x\n",
2175 		    dr, dr->dr_next, dr->dr_prev);
2176 	}
2177 }
2178 #endif /* DUP_DEBUG */
2179