xref: /titanic_41/usr/src/lib/libnsl/rpc/svc.c (revision 8eea8e29cc4374d1ee24c25a07f45af132db3499)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 /*
27  * Copyright 1993 OpenVision Technologies, Inc., All Rights Reserved.
28  */
29 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
30 /* All Rights Reserved */
31 /*
32  * Portions of this source code were derived from Berkeley
33  * 4.3 BSD under license from the Regents of the University of
34  * California.
35  */
36 
37 #pragma ident	"%Z%%M%	%I%	%E% SMI"
38 
39 /*
40  * svc.c, Server-side remote procedure call interface.
41  *
42  * There are two sets of procedures here.  The xprt routines are
43  * for handling transport handles.  The svc routines handle the
44  * list of service routines.
45  *
46  */
47 
48 
49 #include "mt.h"
50 #include "rpc_mt.h"
51 #include <assert.h>
52 #include <errno.h>
53 #include <sys/types.h>
54 #include <stropts.h>
55 #include <sys/conf.h>
56 #include <rpc/trace.h>
57 #include <rpc/rpc.h>
58 #ifdef PORTMAP
59 #include <rpc/pmap_clnt.h>
60 #endif
61 #include <sys/poll.h>
62 #include <netconfig.h>
63 #include <syslog.h>
64 #include <stdlib.h>
65 #include <unistd.h>
66 #include <string.h>
67 #include <limits.h>
68 
69 extern bool_t __svc_get_door_cred();
70 extern bool_t __rpc_get_local_cred();
71 
72 SVCXPRT **svc_xports;
73 static int nsvc_xports; 	/* total number of svc_xports allocated */
74 
75 XDR **svc_xdrs;		/* common XDR receive area */
76 int nsvc_xdrs;		/* total number of svc_xdrs allocated */
77 
78 int __rpc_use_pollfd_done;	/* to unlimit the number of connections */
79 
80 #define	NULL_SVC ((struct svc_callout *)0)
81 #define	RQCRED_SIZE	400		/* this size is excessive */
82 
83 /*
84  * The services list
85  * Each entry represents a set of procedures (an rpc program).
86  * The dispatch routine takes request structs and runs the
87  * appropriate procedure.
88  */
89 static struct svc_callout {
90 	struct svc_callout *sc_next;
91 	rpcprog_t	    sc_prog;
92 	rpcvers_t	    sc_vers;
93 	char		   *sc_netid;
94 	void		    (*sc_dispatch)();
95 } *svc_head;
96 extern rwlock_t	svc_lock;
97 
98 static struct svc_callout *svc_find();
99 int _svc_prog_dispatch();
100 void svc_getreq_common();
101 char *strdup();
102 
103 extern mutex_t	svc_door_mutex;
104 extern cond_t	svc_door_waitcv;
105 extern int	svc_ndoorfds;
106 extern SVCXPRT_LIST *_svc_xprtlist;
107 extern mutex_t xprtlist_lock;
108 extern void __svc_rm_from_xlist();
109 
110 extern fd_set _new_svc_fdset;
111 
112 /*
113  * If the allocated array of reactor is too small, this value is used as a
114  * margin. This reduces the number of allocations.
115  */
116 #define	USER_FD_INCREMENT 5
117 
118 static void add_pollfd(int fd, short events);
119 static void remove_pollfd(int fd);
120 static void __svc_remove_input_of_fd(int fd);
121 
122 
123 /*
124  * Data used to handle reactor:
125  * 	- one file descriptor we listen to,
126  *	- one callback we call if the fd pops,
127  *	- and a cookie passed as a parameter to the callback.
128  *
129  * The structure is an array indexed on the file descriptor. Each entry is
130  * pointing to the first element of a double-linked list of callback.
131  * only one callback may be associated to a couple (fd, event).
132  */
133 
134 struct _svc_user_fd_head;
135 
136 typedef struct
137 {
138 	struct _svc_user_fd_node *next;
139 	struct _svc_user_fd_node *previous;
140 } _svc_user_link;
141 
142 typedef struct _svc_user_fd_node
143 {
144 	/* The lnk field must be the first field. */
145 	_svc_user_link lnk;
146 	svc_input_id_t id;
147 	int	    fd;
148 	unsigned int   events;
149 	svc_callback_t callback;
150 	void*	  cookie;
151 } _svc_user_fd_node;
152 
153 typedef struct _svc_user_fd_head
154 {
155 	/* The lnk field must be the first field. */
156 	_svc_user_link lnk;
157 	unsigned int mask;    /* logical OR of all sub-masks */
158 } _svc_user_fd_head;
159 
160 
161 /* Define some macros to manage the linked list. */
162 #define	LIST_ISEMPTY(l) ((_svc_user_fd_node *) &(l.lnk) == l.lnk.next)
163 #define	LIST_CLR(l) \
164 	(l.lnk.previous = l.lnk.next = (_svc_user_fd_node *) &(l.lnk))
165 
166 /* Array of defined reactor - indexed on file descriptor */
167 static _svc_user_fd_head *svc_userfds  = NULL;
168 
169 /* current size of file descriptor */
170 static int svc_nuserfds = 0;
171 
172 /* Mutex to ensure MT safe operations for user fds callbacks. */
173 static mutex_t svc_userfds_lock = DEFAULTMUTEX;
174 
175 
176 /*
177  * This structure is used to have constant time alogrithms. There is an array
178  * of this structure as large as svc_nuserfds. When the user is registering a
179  * new callback, the address of the created structure is stored in a cell of
180  * this array. The address of this cell is the returned unique identifier.
181  *
182  * On removing, the id is given by the user, then we know if this cell is
183  * filled or not (with free). If it is free, we return an error. Otherwise,
184  * we can free the structure pointed by fd_node.
185  *
186  * On insertion, we use the linked list created by (first_free,
187  * next_free). In this way with a constant time computation, we can give a
188  * correct index to the user.
189  */
190 
191 typedef struct _svc_management_user_fd
192 {
193 	bool_t free;
194 	union {
195 		svc_input_id_t next_free;
196 		_svc_user_fd_node *fd_node;
197 	} data;
198 } _svc_management_user_fd;
199 
200 /* index to the first free elem */
201 static svc_input_id_t first_free = (svc_input_id_t)-1;
202 /* the size of this array is the same as svc_nuserfds */
203 static _svc_management_user_fd* user_fd_mgt_array = NULL;
204 
205 /* current size of user_fd_mgt_array */
206 static int svc_nmgtuserfds = 0;
207 
208 
209 /* Define some macros to access data associated to registration ids. */
210 #define	node_from_id(id) (user_fd_mgt_array[(int)id].data.fd_node)
211 #define	is_free_id(id) (user_fd_mgt_array[(int)id].free)
212 
213 #ifndef POLLSTANDARD
214 #define	POLLSTANDARD \
215 	(POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND| \
216 	POLLWRBAND|POLLERR|POLLHUP|POLLNVAL)
217 #endif
218 
219 /*
220  * To free an Id, we set the cell as free and insert its address in the list
221  * of free cell.
222  */
223 
224 static void
225 _svc_free_id(const svc_input_id_t id)
226 {
227 	assert(((int)id >= 0) && ((int)id < svc_nmgtuserfds));
228 	user_fd_mgt_array[(int)id].free = TRUE;
229 	user_fd_mgt_array[(int)id].data.next_free = first_free;
230 	first_free = id;
231 }
232 
233 /*
234  * To get a free cell, we just have to take it from the free linked list and
235  * set the flag to "not free". This function also allocates new memory if
236  * necessary
237  */
238 static svc_input_id_t
239 _svc_attribute_new_id(_svc_user_fd_node *node)
240 {
241 	int selected_index = (int)first_free;
242 	assert(node != NULL);
243 
244 	if (selected_index == -1) {
245 		/* Allocate new entries */
246 		int L_inOldSize = svc_nmgtuserfds;
247 		int i;
248 		_svc_management_user_fd* alloc_array;
249 
250 		svc_nmgtuserfds += USER_FD_INCREMENT;
251 
252 		user_fd_mgt_array = (_svc_management_user_fd *)
253 		    realloc(user_fd_mgt_array, svc_nmgtuserfds
254 			* sizeof (_svc_management_user_fd));
255 
256 		if (user_fd_mgt_array == NULL) {
257 			syslog(LOG_ERR, "_svc_attribute_new_id: out of memory");
258 			errno = ENOMEM;
259 			return ((svc_input_id_t)-1);
260 		}
261 
262 		for (i = svc_nmgtuserfds - 1; i >= L_inOldSize; i--)
263 			_svc_free_id((svc_input_id_t)i);
264 		selected_index = (int)first_free;
265 	}
266 
267 	node->id = (svc_input_id_t)selected_index;
268 	first_free = user_fd_mgt_array[selected_index].data.next_free;
269 
270 	user_fd_mgt_array[selected_index].data.fd_node = node;
271 	user_fd_mgt_array[selected_index].free = FALSE;
272 
273 	return ((svc_input_id_t)selected_index);
274 }
275 
276 /*
277  * Access to a pollfd treatment. Scan all the associated callbacks that have
278  * at least one bit in their mask that masks a received event.
279  *
280  * If event POLLNVAL is received, we check that one callback processes it, if
281  * not, then remove the file descriptor from the poll. If there is one, let
282  * the user do the work.
283  */
284 void
285 __svc_getreq_user(struct pollfd *pfd)
286 {
287 	int fd = pfd->fd;
288 	short revents = pfd->revents;
289 	bool_t invalHandled = FALSE;
290 	_svc_user_fd_node *node;
291 
292 	mutex_lock(&svc_userfds_lock);
293 
294 	if ((fd < 0) || (fd >= svc_nuserfds)) {
295 		mutex_unlock(&svc_userfds_lock);
296 		return;
297 	}
298 
299 	node = svc_userfds[fd].lnk.next;
300 
301 	/* check if at least one mask fits */
302 	if (0 == (revents & svc_userfds[fd].mask)) {
303 		mutex_unlock(&svc_userfds_lock);
304 		return;
305 	}
306 
307 	while ((svc_userfds[fd].mask != 0) &&
308 	    ((_svc_user_link *)node != &(svc_userfds[fd].lnk))) {
309 		/*
310 		 * If one of the received events maps the ones the node listens
311 		 * to
312 		 */
313 		_svc_user_fd_node *next = node->lnk.next;
314 
315 		if (node->callback != NULL) {
316 			if (node->events & revents) {
317 				if (revents & POLLNVAL) {
318 					invalHandled = TRUE;
319 				}
320 
321 				/*
322 				 * The lock must be released before calling the
323 				 * user function, as this function can call
324 				 * svc_remove_input() for example.
325 				 */
326 				mutex_unlock(&svc_userfds_lock);
327 				node->callback(node->id, node->fd,
328 				    node->events & revents, node->cookie);
329 				/*
330 				 * Do not use the node structure anymore, as it
331 				 * could have been deallocated by the previous
332 				 * callback.
333 				 */
334 				mutex_lock(&svc_userfds_lock);
335 			}
336 		}
337 		node = next;
338 	}
339 
340 	if ((revents & POLLNVAL) && !invalHandled)
341 		__svc_remove_input_of_fd(fd);
342 	mutex_unlock(&svc_userfds_lock);
343 }
344 
345 
346 /*
347  * Check if a file descriptor is associated with a user reactor.
348  * To do this, just check that the array indexed on fd has a non-void linked
349  * list (ie. first element is not NULL)
350  */
351 bool_t
352 __is_a_userfd(int fd)
353 {
354 	/* Checks argument */
355 	if ((fd < 0) || (fd >= svc_nuserfds))
356 		return (FALSE);
357 	return ((svc_userfds[fd].mask == 0x0000)? FALSE:TRUE);
358 }
359 
360 /* free everything concerning user fd */
361 /* used in svc_run.c => no static */
362 
363 void
364 __destroy_userfd()
365 {
366 	int one_fd;
367 	/* Clean user fd */
368 	if (svc_userfds != NULL) {
369 		for (one_fd = 0; one_fd < svc_nuserfds; one_fd++) {
370 			_svc_user_fd_node *node;
371 
372 			node = svc_userfds[one_fd].lnk.next;
373 			while ((_svc_user_link *) node
374 			    != (_svc_user_link *) &(svc_userfds[one_fd])) {
375 				_svc_free_id(node->id);
376 				node = node->lnk.next;
377 				free(node->lnk.previous);
378 			}
379 		}
380 
381 		free(user_fd_mgt_array);
382 		user_fd_mgt_array = NULL;
383 		first_free = (svc_input_id_t)-1;
384 
385 		free(svc_userfds);
386 		svc_userfds = NULL;
387 		svc_nuserfds = 0;
388 	}
389 }
390 
391 /*
392  * Remove all the callback associated with a fd => useful when the fd is
393  * closed for instance
394  */
395 static void
396 __svc_remove_input_of_fd(int fd)
397 {
398 	_svc_user_fd_node *one_node;
399 
400 	if ((fd < 0) || (fd >= svc_nuserfds))
401 		return;
402 
403 	one_node = svc_userfds[fd].lnk.next;
404 	while ((_svc_user_link *) one_node
405 	    != (_svc_user_link *) &(svc_userfds[fd].lnk)) {
406 		_svc_free_id(one_node->id);
407 		one_node = one_node->lnk.next;
408 		free(one_node->lnk.previous);
409 	}
410 
411 	LIST_CLR(svc_userfds[fd]);
412 	svc_userfds[fd].mask = 0;
413 }
414 
415 /*
416  * Allow user to add an fd in the poll list. If it does not succeed, return
417  * -1. Otherwise, return a svc_id
418  */
419 
420 svc_input_id_t
421 svc_add_input(int user_fd, unsigned int events,
422     svc_callback_t user_callback, void *cookie)
423 {
424 	_svc_user_fd_node *new_node;
425 
426 	if (user_fd < 0) {
427 		errno = EINVAL;
428 		return ((svc_input_id_t)-1);
429 	}
430 
431 	if ((events == 0x0000) ||
432 	    (events & ~(POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND|\
433 	    POLLWRBAND|POLLERR|POLLHUP|POLLNVAL))) {
434 		errno = EINVAL;
435 		return ((svc_input_id_t)-1);
436 	}
437 
438 	mutex_lock(&svc_userfds_lock);
439 
440 	if ((user_fd < svc_nuserfds) &&
441 	    (svc_userfds[user_fd].mask & events) != 0) {
442 		/* Already registrated call-back */
443 		errno = EEXIST;
444 		mutex_unlock(&svc_userfds_lock);
445 		return ((svc_input_id_t)-1);
446 	}
447 
448 	/* Handle memory allocation. */
449 	if (user_fd >= svc_nuserfds) {
450 		int oldSize = svc_nuserfds;
451 		int i;
452 
453 		svc_nuserfds = (user_fd + 1) + USER_FD_INCREMENT;
454 
455 		svc_userfds = (_svc_user_fd_head *)
456 		    realloc(svc_userfds,
457 			svc_nuserfds * sizeof (_svc_user_fd_head));
458 
459 		if (svc_userfds == NULL) {
460 			syslog(LOG_ERR, "svc_add_input: out of memory");
461 			errno = ENOMEM;
462 			mutex_unlock(&svc_userfds_lock);
463 			return ((svc_input_id_t)-1);
464 		}
465 
466 		for (i = oldSize; i < svc_nuserfds; i++) {
467 			LIST_CLR(svc_userfds[i]);
468 			svc_userfds[i].mask = 0;
469 		}
470 	}
471 
472 	new_node = (_svc_user_fd_node *)malloc(sizeof (_svc_user_fd_node));
473 	if (new_node == NULL) {
474 		syslog(LOG_ERR, "svc_add_input: out of memory");
475 		errno = ENOMEM;
476 		mutex_unlock(&svc_userfds_lock);
477 		return ((svc_input_id_t)-1);
478 	}
479 
480 	/* create a new node */
481 	new_node->fd		= user_fd;
482 	new_node->events	= events;
483 	new_node->callback	= user_callback;
484 	new_node->cookie	= cookie;
485 
486 	(void) _svc_attribute_new_id(new_node);
487 
488 	/* Add the new element at the beginning of the list. */
489 	if (LIST_ISEMPTY(svc_userfds[user_fd])) {
490 		svc_userfds[user_fd].lnk.previous = new_node;
491 	}
492 	new_node->lnk.next = svc_userfds[user_fd].lnk.next;
493 	new_node->lnk.previous = (_svc_user_fd_node *)&(svc_userfds[user_fd]);
494 
495 	svc_userfds[user_fd].lnk.next = new_node;
496 
497 	/* refresh global mask for this file desciptor */
498 	svc_userfds[user_fd].mask |= events;
499 
500 	/* refresh mask for the poll */
501 	add_pollfd(user_fd, (svc_userfds[user_fd].mask));
502 
503 	mutex_unlock(&svc_userfds_lock);
504 	return (new_node->id);
505 }
506 
507 
508 int
509 svc_remove_input(svc_input_id_t id)
510 {
511 	_svc_user_fd_node* node;
512 	_svc_user_fd_node* next;
513 	_svc_user_fd_node* previous;
514 	int fd;		/* caching optim */
515 
516 	mutex_lock(&svc_userfds_lock);
517 
518 	/* Immediately update data for id management */
519 	if (user_fd_mgt_array == NULL || id >= svc_nmgtuserfds ||
520 	    is_free_id(id)) {
521 		errno = EINVAL;
522 		mutex_unlock(&svc_userfds_lock);
523 		return (-1);
524 	}
525 
526 	node = node_from_id(id);
527 	assert(node != NULL);
528 
529 	_svc_free_id(id);
530 	next		= node->lnk.next;
531 	previous	= node->lnk.previous;
532 	fd		= node->fd; /* caching optim */
533 
534 	    /* Remove this node from the list. */
535 	previous->lnk.next = next;
536 	next->lnk.previous = previous;
537 
538 	    /* Remove the node flags from the global mask */
539 	svc_userfds[fd].mask ^= node->events;
540 
541 	free(node);
542 	if (svc_userfds[fd].mask == 0) {
543 		LIST_CLR(svc_userfds[fd]);
544 		assert(LIST_ISEMPTY(svc_userfds[fd]));
545 		remove_pollfd(fd);
546 	}
547 	/* <=> CLEAN NEEDED TO SHRINK MEMORY USAGE */
548 
549 	mutex_unlock(&svc_userfds_lock);
550 	return (0);
551 }
552 
553 
554 /*
555  * Provides default service-side functions for authentication flavors
556  * that do not use all the fields in struct svc_auth_ops.
557  */
558 
559 /*ARGSUSED*/
560 static int
561 authany_wrap(auth, xdrs, xfunc, xwhere)
562 	AUTH		*auth;
563 	XDR		*xdrs;
564 	xdrproc_t	xfunc;
565 	caddr_t		xwhere;
566 {
567 	return (*xfunc)(xdrs, xwhere);
568 }
569 
570 struct svc_auth_ops svc_auth_any_ops = {
571 	authany_wrap,
572 	authany_wrap,
573 };
574 
575 /*
576  * Return pointer to server authentication structure.
577  */
578 SVCAUTH *
579 __svc_get_svcauth(xprt)
580 	SVCXPRT	*xprt;
581 {
582 /* LINTED pointer alignment */
583 	return (&SVC_XP_AUTH(xprt));
584 }
585 
586 /*
587  * A callback routine to cleanup after a procedure is executed.
588  */
589 void (*__proc_cleanup_cb)() = NULL;
590 
591 void *
592 __svc_set_proc_cleanup_cb(cb)
593 	void	*cb;
594 {
595 	void	*tmp = (void *)__proc_cleanup_cb;
596 
597 	__proc_cleanup_cb = (void (*)())cb;
598 	return (tmp);
599 }
600 
601 /* ***************  SVCXPRT related stuff **************** */
602 
603 
604 static int pollfd_shrinking = 1;
605 
606 
607 /*
608  * Add fd to svc_pollfd
609  */
610 static void
611 add_pollfd(int fd, short events)
612 {
613 	if (fd < FD_SETSIZE) {
614 		FD_SET(fd, &svc_fdset);
615 #if !defined(_LP64)
616 		FD_SET(fd, &_new_svc_fdset);
617 #endif
618 		svc_nfds++;
619 		svc_nfds_set++;
620 		if (fd >= svc_max_fd)
621 			svc_max_fd = fd + 1;
622 	}
623 	if (fd >= svc_max_pollfd)
624 		svc_max_pollfd = fd + 1;
625 	if (svc_max_pollfd > svc_pollfd_allocd) {
626 		int i = svc_pollfd_allocd;
627 		pollfd_t *tmp;
628 		do {
629 			svc_pollfd_allocd += POLLFD_EXTEND;
630 		} while (svc_max_pollfd > svc_pollfd_allocd);
631 		tmp = realloc(svc_pollfd,
632 					sizeof (pollfd_t) * svc_pollfd_allocd);
633 		if (tmp != NULL) {
634 			svc_pollfd = tmp;
635 			for (; i < svc_pollfd_allocd; i++)
636 				POLLFD_CLR(i, tmp);
637 		} else {
638 			/*
639 			 * give an error message; undo fdset setting
640 			 * above;  reset the pollfd_shrinking flag.
641 			 * because of this poll will not be done
642 			 * on these fds.
643 			 */
644 			if (fd < FD_SETSIZE) {
645 				FD_CLR(fd, &svc_fdset);
646 #if !defined(_LP64)
647 				FD_CLR(fd, &_new_svc_fdset);
648 #endif
649 				svc_nfds--;
650 				svc_nfds_set--;
651 				if (fd == (svc_max_fd - 1))
652 					svc_max_fd--;
653 			}
654 			if (fd == (svc_max_pollfd - 1))
655 				svc_max_pollfd--;
656 			pollfd_shrinking = 0;
657 			syslog(LOG_ERR, "add_pollfd: out of memory");
658 			_exit(1);
659 		}
660 	}
661 	svc_pollfd[fd].fd	= fd;
662 	svc_pollfd[fd].events	= events;
663 	svc_npollfds++;
664 	svc_npollfds_set++;
665 }
666 
667 /*
668  * the fd is still active but only the bit in fdset is cleared.
669  * do not subtract svc_nfds or svc_npollfds
670  */
671 void
672 clear_pollfd(int fd)
673 {
674 	if (fd < FD_SETSIZE && FD_ISSET(fd, &svc_fdset)) {
675 		FD_CLR(fd, &svc_fdset);
676 #if !defined(_LP64)
677 		FD_CLR(fd, &_new_svc_fdset);
678 #endif
679 		svc_nfds_set--;
680 	}
681 	if (fd < svc_pollfd_allocd && POLLFD_ISSET(fd, svc_pollfd)) {
682 		POLLFD_CLR(fd, svc_pollfd);
683 		svc_npollfds_set--;
684 	}
685 }
686 
687 /*
688  * sets the bit in fdset for an active fd so that poll() is done for that
689  */
690 void
691 set_pollfd(int fd, short events)
692 {
693 	if (fd < FD_SETSIZE) {
694 		FD_SET(fd, &svc_fdset);
695 #if !defined(_LP64)
696 		FD_SET(fd, &_new_svc_fdset);
697 #endif
698 		svc_nfds_set++;
699 	}
700 	if (fd < svc_pollfd_allocd) {
701 		svc_pollfd[fd].fd	= fd;
702 		svc_pollfd[fd].events	= events;
703 		svc_npollfds_set++;
704 	}
705 }
706 
707 /*
708  * remove a svc_pollfd entry; it does not shrink the memory
709  */
710 static void
711 remove_pollfd(fd)
712 	int fd;
713 {
714 	clear_pollfd(fd);
715 	if (fd == (svc_max_fd - 1))
716 		svc_max_fd--;
717 	svc_nfds--;
718 	if (fd == (svc_max_pollfd - 1))
719 		svc_max_pollfd--;
720 	svc_npollfds--;
721 }
722 
723 /*
724  * delete a svc_pollfd entry; it shrinks the memory
725  * use remove_pollfd if you do not want to shrink
726  */
727 static void
728 delete_pollfd(int fd)
729 {
730 	remove_pollfd(fd);
731 	if (pollfd_shrinking && svc_max_pollfd <
732 			(svc_pollfd_allocd - POLLFD_SHRINK)) {
733 		do {
734 			svc_pollfd_allocd -= POLLFD_SHRINK;
735 		} while (svc_max_pollfd < (svc_pollfd_allocd - POLLFD_SHRINK));
736 		svc_pollfd = realloc(svc_pollfd,
737 				sizeof (pollfd_t) * svc_pollfd_allocd);
738 		if (svc_pollfd == NULL) {
739 			syslog(LOG_ERR, "delete_pollfd: out of memory");
740 			_exit(1);
741 		}
742 	}
743 }
744 
745 
746 /*
747  * Activate a transport handle.
748  */
749 void
750 xprt_register(xprt)
751 	const SVCXPRT *xprt;
752 {
753 	int fd = xprt->xp_fd;
754 #ifdef CALLBACK
755 	extern void (*_svc_getreqset_proc)();
756 #endif
757 /* VARIABLES PROTECTED BY svc_fd_lock: svc_xports, svc_fdset */
758 
759 	trace1(TR_xprt_register, 0);
760 	rw_wrlock(&svc_fd_lock);
761 	if (svc_xports == NULL) {
762 		/* allocate some small amount first */
763 		svc_xports = calloc(FD_INCREMENT,  sizeof (SVCXPRT *));
764 		if (svc_xports == NULL) {
765 			syslog(LOG_ERR, "xprt_register: out of memory");
766 			_exit(1);
767 		}
768 		nsvc_xports = FD_INCREMENT;
769 
770 #ifdef CALLBACK
771 		/*
772 		 * XXX: This code does not keep track of the server state.
773 		 *
774 		 * This provides for callback support.	When a client
775 		 * recv's a call from another client on the server fd's,
776 		 * it calls _svc_getreqset_proc() which would return
777 		 * after serving all the server requests.  Also look under
778 		 * clnt_dg.c and clnt_vc.c  (clnt_call part of it)
779 		 */
780 		_svc_getreqset_proc = svc_getreq_poll;
781 #endif
782 	}
783 
784 	while (fd >= nsvc_xports) {
785 		SVCXPRT **tmp_xprts = svc_xports;
786 
787 		/* time to expand svc_xprts */
788 		tmp_xprts = realloc(svc_xports,
789 			sizeof (SVCXPRT *) * (nsvc_xports + FD_INCREMENT));
790 		if (tmp_xprts == NULL) {
791 			syslog(LOG_ERR, "xprt_register : out of memory.");
792 			_exit(1);
793 		}
794 
795 		svc_xports = tmp_xprts;
796 		(void) memset(&svc_xports[nsvc_xports], 0,
797 					sizeof (SVCXPRT *) * FD_INCREMENT);
798 		nsvc_xports += FD_INCREMENT;
799 	}
800 
801 	svc_xports[fd] = (SVCXPRT *)xprt;
802 
803 	add_pollfd(fd, MASKVAL);
804 
805 	if (svc_polling) {
806 		char dummy;
807 
808 		/*
809 		 * This happens only in one of the MT modes.
810 		 * Wake up poller.
811 		 */
812 		write(svc_pipe[1], &dummy, sizeof (dummy));
813 	}
814 	/*
815 	 * If already dispatching door based services, start
816 	 * dispatching TLI based services now.
817 	 */
818 	mutex_lock(&svc_door_mutex);
819 	if (svc_ndoorfds > 0)
820 		cond_signal(&svc_door_waitcv);
821 	mutex_unlock(&svc_door_mutex);
822 
823 	if (svc_xdrs == NULL) {
824 		/* allocate initial chunk */
825 		svc_xdrs = calloc(FD_INCREMENT, sizeof (XDR *));
826 		if (svc_xdrs != NULL)
827 			nsvc_xdrs = FD_INCREMENT;
828 		else {
829 			syslog(LOG_ERR, "xprt_register : out of memory.");
830 			_exit(1);
831 		}
832 	}
833 	rw_unlock(&svc_fd_lock);
834 
835 	trace1(TR_xprt_register, 1);
836 }
837 
838 /*
839  * De-activate a transport handle.
840  */
841 void
842 __xprt_unregister_private(const SVCXPRT *xprt, bool_t lock_not_held)
843 {
844 	int fd = xprt->xp_fd;
845 
846 	trace1(TR_xprt_unregister, 0);
847 	if (lock_not_held)
848 		rw_wrlock(&svc_fd_lock);
849 	if ((fd < nsvc_xports) && (svc_xports[fd] == xprt)) {
850 		svc_xports[fd] = (SVCXPRT *)NULL;
851 		delete_pollfd(fd);
852 	}
853 	if (lock_not_held)
854 		rw_unlock(&svc_fd_lock);
855 	__svc_rm_from_xlist(&_svc_xprtlist, xprt, &xprtlist_lock);
856 	trace1(TR_xprt_unregister, 1);
857 }
858 
859 void
860 xprt_unregister(xprt)
861 	const SVCXPRT *xprt;
862 {
863 	__xprt_unregister_private(xprt, TRUE);
864 }
865 
866 /* ********************** CALLOUT list related stuff ************* */
867 
868 /*
869  * Add a service program to the callout list.
870  * The dispatch routine will be called when a rpc request for this
871  * program number comes in.
872  */
873 bool_t
874 svc_reg(xprt, prog, vers, dispatch, nconf)
875 	const SVCXPRT *xprt;
876 	rpcprog_t prog;
877 	rpcvers_t vers;
878 	void (*dispatch)();
879 	const struct netconfig *nconf;
880 {
881 	bool_t dummy;
882 	struct svc_callout *prev;
883 	struct svc_callout *s, **s2;
884 	struct netconfig *tnconf;
885 	char *netid = NULL;
886 	int flag = 0;
887 
888 /* VARIABLES PROTECTED BY svc_lock: s, prev, svc_head */
889 
890 	trace3(TR_svc_reg, 0, prog, vers);
891 	if (xprt->xp_netid) {
892 		netid = strdup(xprt->xp_netid);
893 		flag = 1;
894 	} else if (nconf && nconf->nc_netid) {
895 		netid = strdup(nconf->nc_netid);
896 		flag = 1;
897 	} else if ((tnconf = __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type))
898 			!= NULL) {
899 		netid = strdup(tnconf->nc_netid);
900 		flag = 1;
901 		freenetconfigent(tnconf);
902 	} /* must have been created with svc_raw_create */
903 	if ((netid == NULL) && (flag == 1)) {
904 		trace3(TR_svc_reg, 1, prog, vers);
905 		return (FALSE);
906 	}
907 
908 	rw_wrlock(&svc_lock);
909 	if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
910 		if (netid)
911 			free(netid);
912 		if (s->sc_dispatch == dispatch)
913 			goto rpcb_it; /* he is registering another xptr */
914 		trace3(TR_svc_reg, 1, prog, vers);
915 		rw_unlock(&svc_lock);
916 		return (FALSE);
917 	}
918 	s = (struct svc_callout *)mem_alloc(sizeof (struct svc_callout));
919 	if (s == (struct svc_callout *)NULL) {
920 		if (netid)
921 			free(netid);
922 		trace3(TR_svc_reg, 1, prog, vers);
923 		rw_unlock(&svc_lock);
924 		return (FALSE);
925 	}
926 
927 	s->sc_prog = prog;
928 	s->sc_vers = vers;
929 	s->sc_dispatch = dispatch;
930 	s->sc_netid = netid;
931 	s->sc_next = NULL;
932 
933 	/*
934 	 * The ordering of transports is such that the most frequently used
935 	 * one appears first.  So add the new entry to the end of the list.
936 	 */
937 	for (s2 = &svc_head; *s2 != NULL; s2 = &(*s2)->sc_next)
938 		;
939 	*s2 = s;
940 
941 	if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
942 		if ((((SVCXPRT *)xprt)->xp_netid = strdup(netid)) == NULL) {
943 			syslog(LOG_ERR, "svc_reg : strdup failed.");
944 			free(netid);
945 			mem_free((char *)s,
946 				(uint_t)sizeof (struct svc_callout));
947 			*s2 = NULL;
948 			rw_unlock(&svc_lock);
949 			trace3(TR_svc_reg, 1, prog, vers);
950 			return (FALSE);
951 		}
952 
953 rpcb_it:
954 	rw_unlock(&svc_lock);
955 	/* now register the information with the local binder service */
956 	if (nconf) {
957 		dummy = rpcb_set(prog, vers, nconf, &xprt->xp_ltaddr);
958 		trace3(TR_svc_reg, 1, prog, vers);
959 		return (dummy);
960 	}
961 	trace3(TR_svc_reg, 1, prog, vers);
962 	return (TRUE);
963 }
964 
965 /*
966  * Remove a service program from the callout list.
967  */
968 void
969 svc_unreg(prog, vers)
970 	rpcprog_t prog;
971 	rpcvers_t vers;
972 {
973 	struct svc_callout *prev;
974 	struct svc_callout *s;
975 
976 	trace3(TR_svc_unreg, 0, prog, vers);
977 	/* unregister the information anyway */
978 	(void) rpcb_unset(prog, vers, NULL);
979 	rw_wrlock(&svc_lock);
980 	while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
981 		if (prev == NULL_SVC) {
982 			svc_head = s->sc_next;
983 		} else {
984 			prev->sc_next = s->sc_next;
985 		}
986 		s->sc_next = NULL_SVC;
987 		if (s->sc_netid)
988 			mem_free((char *)s->sc_netid,
989 					(uint_t)sizeof (s->sc_netid) + 1);
990 		mem_free((char *)s, (uint_t)sizeof (struct svc_callout));
991 	}
992 	rw_unlock(&svc_lock);
993 	trace3(TR_svc_unreg, 1, prog, vers);
994 }
995 
996 #ifdef PORTMAP
997 /*
998  * Add a service program to the callout list.
999  * The dispatch routine will be called when a rpc request for this
1000  * program number comes in.
1001  * For version 2 portmappers.
1002  */
1003 #ifdef KERNEL
1004 /*ARGSUSED*/
1005 #endif
1006 bool_t
1007 svc_register(xprt, prog, vers, dispatch, protocol)
1008 	SVCXPRT *xprt;
1009 	rpcprog_t prog;
1010 	rpcvers_t vers;
1011 	void (*dispatch)();
1012 	int protocol;
1013 {
1014 	bool_t dummy;
1015 	struct svc_callout *prev;
1016 	struct svc_callout *s;
1017 	struct netconfig *nconf;
1018 	char *netid = NULL;
1019 	int flag = 0;
1020 
1021 	trace4(TR_svc_register, 0, prog, vers, protocol);
1022 	if (xprt->xp_netid) {
1023 		netid = strdup(xprt->xp_netid);
1024 		flag = 1;
1025 	} else if ((ioctl(xprt->xp_fd, I_FIND, "timod") > 0) && ((nconf =
1026 	__rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type)) != NULL)) {
1027 		/* fill in missing netid field in SVCXPRT */
1028 		netid = strdup(nconf->nc_netid);
1029 		flag = 1;
1030 		freenetconfigent(nconf);
1031 	} /* must be svc_raw_create */
1032 
1033 	if ((netid == NULL) && (flag == 1)) {
1034 		trace4(TR_svc_register, 1, prog, vers, protocol);
1035 		return (FALSE);
1036 	}
1037 
1038 	rw_wrlock(&svc_lock);
1039 	if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
1040 		if (netid)
1041 			free(netid);
1042 		if (s->sc_dispatch == dispatch)
1043 			goto pmap_it;  /* he is registering another xptr */
1044 		rw_unlock(&svc_lock);
1045 		trace4(TR_svc_register, 1, prog, vers, protocol);
1046 		return (FALSE);
1047 	}
1048 	s = (struct svc_callout *)mem_alloc(sizeof (struct svc_callout));
1049 #ifndef KERNEL
1050 	if (s == (struct svc_callout *)0) {
1051 		if (netid)
1052 			free(netid);
1053 		trace4(TR_svc_register, 1, prog, vers, protocol);
1054 		rw_unlock(&svc_lock);
1055 		return (FALSE);
1056 	}
1057 #endif
1058 	s->sc_prog = prog;
1059 	s->sc_vers = vers;
1060 	s->sc_dispatch = dispatch;
1061 	s->sc_netid = netid;
1062 	s->sc_next = svc_head;
1063 	svc_head = s;
1064 
1065 	if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
1066 		if ((xprt->xp_netid = strdup(netid)) == NULL) {
1067 			syslog(LOG_ERR, "svc_register : strdup failed.");
1068 			free(netid);
1069 			svc_head = s->sc_next;
1070 			mem_free((char *)s,
1071 				(uint_t)sizeof (struct svc_callout));
1072 			rw_unlock(&svc_lock);
1073 			trace4(TR_svc_register, 1, prog, vers, protocol);
1074 			return (FALSE);
1075 		}
1076 
1077 pmap_it:
1078 	rw_unlock(&svc_lock);
1079 #ifndef KERNEL
1080 	/* now register the information with the local binder service */
1081 	if (protocol) {
1082 		dummy = pmap_set(prog, vers, protocol, xprt->xp_port);
1083 		trace4(TR_svc_register, 1, prog, vers, protocol);
1084 		return (dummy);
1085 	}
1086 #endif
1087 	trace4(TR_svc_register, 1, prog, vers, protocol);
1088 	return (TRUE);
1089 }
1090 
1091 /*
1092  * Remove a service program from the callout list.
1093  * For version 2 portmappers.
1094  */
1095 void
1096 svc_unregister(prog, vers)
1097 	rpcprog_t prog;
1098 	rpcvers_t vers;
1099 {
1100 	struct svc_callout *prev;
1101 	struct svc_callout *s;
1102 
1103 	trace3(TR_svc_unregister, 0, prog, vers);
1104 	rw_wrlock(&svc_lock);
1105 	while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
1106 		if (prev == NULL_SVC) {
1107 			svc_head = s->sc_next;
1108 		} else {
1109 			prev->sc_next = s->sc_next;
1110 		}
1111 		s->sc_next = NULL_SVC;
1112 		if (s->sc_netid)
1113 			mem_free((char *)s->sc_netid,
1114 					(uint_t)sizeof (s->sc_netid) + 1);
1115 		mem_free((char *)s, (uint_t)sizeof (struct svc_callout));
1116 #ifndef KERNEL
1117 		/* unregister the information with the local binder service */
1118 		(void) pmap_unset(prog, vers);
1119 #endif
1120 	}
1121 	rw_unlock(&svc_lock);
1122 	trace3(TR_svc_unregister, 1, prog, vers);
1123 }
1124 
1125 #endif /* PORTMAP */
1126 /*
1127  * Search the callout list for a program number, return the callout
1128  * struct.
1129  * Also check for transport as well.  Many routines such as svc_unreg
1130  * dont give any corresponding transport, so dont check for transport if
1131  * netid == NULL
1132  */
1133 static struct svc_callout *
1134 svc_find(prog, vers, prev, netid)
1135 	rpcprog_t prog;
1136 	rpcvers_t vers;
1137 	struct svc_callout **prev;
1138 	char *netid;
1139 {
1140 	struct svc_callout *s, *p;
1141 
1142 	trace3(TR_svc_find, 0, prog, vers);
1143 
1144 /* WRITE LOCK HELD ON ENTRY: svc_lock */
1145 
1146 /*	assert(RW_WRITE_HELD(&svc_lock)); */
1147 	p = NULL_SVC;
1148 	for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1149 		if (((s->sc_prog == prog) && (s->sc_vers == vers)) &&
1150 			((netid == NULL) || (s->sc_netid == NULL) ||
1151 			(strcmp(netid, s->sc_netid) == 0)))
1152 				break;
1153 		p = s;
1154 	}
1155 	*prev = p;
1156 	trace3(TR_svc_find, 1, prog, vers);
1157 	return (s);
1158 }
1159 
1160 
1161 /* ******************* REPLY GENERATION ROUTINES  ************ */
1162 
1163 /*
1164  * Send a reply to an rpc request
1165  */
1166 bool_t
1167 svc_sendreply(xprt, xdr_results, xdr_location)
1168 	const SVCXPRT *xprt;
1169 	xdrproc_t xdr_results;
1170 	caddr_t xdr_location;
1171 {
1172 	bool_t dummy;
1173 	struct rpc_msg rply;
1174 
1175 	trace1(TR_svc_sendreply, 0);
1176 	rply.rm_direction = REPLY;
1177 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1178 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1179 	rply.acpted_rply.ar_stat = SUCCESS;
1180 	rply.acpted_rply.ar_results.where = xdr_location;
1181 	rply.acpted_rply.ar_results.proc = xdr_results;
1182 	dummy = SVC_REPLY((SVCXPRT *)xprt, &rply);
1183 	trace1(TR_svc_sendreply, 1);
1184 	return (dummy);
1185 }
1186 
1187 /*
1188  * No procedure error reply
1189  */
1190 void
1191 svcerr_noproc(xprt)
1192 	const SVCXPRT *xprt;
1193 {
1194 	struct rpc_msg rply;
1195 
1196 	trace1(TR_svcerr_noproc, 0);
1197 	rply.rm_direction = REPLY;
1198 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1199 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1200 	rply.acpted_rply.ar_stat = PROC_UNAVAIL;
1201 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1202 	trace1(TR_svcerr_noproc, 1);
1203 }
1204 
1205 /*
1206  * Can't decode args error reply
1207  */
1208 void
1209 svcerr_decode(xprt)
1210 	const SVCXPRT *xprt;
1211 {
1212 	struct rpc_msg rply;
1213 
1214 	trace1(TR_svcerr_decode, 0);
1215 	rply.rm_direction = REPLY;
1216 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1217 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1218 	rply.acpted_rply.ar_stat = GARBAGE_ARGS;
1219 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1220 	trace1(TR_svcerr_decode, 1);
1221 }
1222 
1223 /*
1224  * Some system error
1225  */
1226 void
1227 svcerr_systemerr(xprt)
1228 	const SVCXPRT *xprt;
1229 {
1230 	struct rpc_msg rply;
1231 
1232 	trace1(TR_svcerr_systemerr, 0);
1233 	rply.rm_direction = REPLY;
1234 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1235 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1236 	rply.acpted_rply.ar_stat = SYSTEM_ERR;
1237 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1238 	trace1(TR_svcerr_systemerr, 1);
1239 }
1240 
1241 /*
1242  * Tell RPC package to not complain about version errors to the client.	 This
1243  * is useful when revving broadcast protocols that sit on a fixed address.
1244  * There is really one (or should be only one) example of this kind of
1245  * protocol: the portmapper (or rpc binder).
1246  */
1247 void
1248 __svc_versquiet_on(xprt)
1249 	SVCXPRT *xprt;
1250 {
1251 	trace1(TR___svc_versquiet_on, 0);
1252 /* LINTED pointer alignment */
1253 	svc_flags(xprt) |= SVC_VERSQUIET;
1254 	trace1(TR___svc_versquiet_on, 1);
1255 }
1256 
1257 void
1258 __svc_versquiet_off(xprt)
1259 	SVCXPRT *xprt;
1260 {
1261 	trace1(TR___svc_versquiet_off, 0);
1262 /* LINTED pointer alignment */
1263 	svc_flags(xprt) &= ~SVC_VERSQUIET;
1264 	trace1(TR___svc_versquiet_off, 1);
1265 }
1266 
1267 void
1268 svc_versquiet(xprt)
1269 	SVCXPRT *xprt;
1270 {
1271 	trace1(TR_svc_versquiet, 0);
1272 	__svc_versquiet_on(xprt);
1273 	trace1(TR_svc_versquiet, 1);
1274 }
1275 
1276 int
1277 __svc_versquiet_get(xprt)
1278 	SVCXPRT *xprt;
1279 {
1280 	trace1(TR___svc_versquiet_get, 0);
1281 	trace2(TR___svc_versquiet_get, 1, tmp);
1282 /* LINTED pointer alignment */
1283 	return (svc_flags(xprt) & SVC_VERSQUIET);
1284 }
1285 
1286 /*
1287  * Authentication error reply
1288  */
1289 void
1290 svcerr_auth(xprt, why)
1291 	const SVCXPRT *xprt;
1292 	enum auth_stat why;
1293 {
1294 	struct rpc_msg rply;
1295 
1296 	trace1(TR_svcerr_auth, 0);
1297 	rply.rm_direction = REPLY;
1298 	rply.rm_reply.rp_stat = MSG_DENIED;
1299 	rply.rjcted_rply.rj_stat = AUTH_ERROR;
1300 	rply.rjcted_rply.rj_why = why;
1301 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1302 	trace1(TR_svcerr_auth, 1);
1303 }
1304 
1305 /*
1306  * Auth too weak error reply
1307  */
1308 void
1309 svcerr_weakauth(xprt)
1310 	const SVCXPRT *xprt;
1311 {
1312 	trace1(TR_svcerr_weakauth, 0);
1313 	svcerr_auth(xprt, AUTH_TOOWEAK);
1314 	trace1(TR_svcerr_weakauth, 1);
1315 }
1316 
1317 /*
1318  * Program unavailable error reply
1319  */
1320 void
1321 svcerr_noprog(xprt)
1322 	const SVCXPRT *xprt;
1323 {
1324 	struct rpc_msg rply;
1325 
1326 	trace1(TR_svcerr_noprog, 0);
1327 	rply.rm_direction = REPLY;
1328 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1329 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1330 	rply.acpted_rply.ar_stat = PROG_UNAVAIL;
1331 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1332 	trace1(TR_svcerr_noprog, 1);
1333 }
1334 
1335 /*
1336  * Program version mismatch error reply
1337  */
1338 void
1339 svcerr_progvers(xprt, low_vers, high_vers)
1340 	const SVCXPRT *xprt;
1341 	rpcvers_t low_vers;
1342 	rpcvers_t high_vers;
1343 {
1344 	struct rpc_msg rply;
1345 
1346 	trace3(TR_svcerr_progvers, 0, low_vers, high_vers);
1347 	rply.rm_direction = REPLY;
1348 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1349 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1350 	rply.acpted_rply.ar_stat = PROG_MISMATCH;
1351 	rply.acpted_rply.ar_vers.low = low_vers;
1352 	rply.acpted_rply.ar_vers.high = high_vers;
1353 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1354 	trace3(TR_svcerr_progvers, 1, low_vers, high_vers);
1355 }
1356 
1357 /* ******************* SERVER INPUT STUFF ******************* */
1358 
1359 /*
1360  * Get server side input from some transport.
1361  *
1362  * Statement of authentication parameters management:
1363  * This function owns and manages all authentication parameters, specifically
1364  * the "raw" parameters (msg.rm_call.cb_cred and msg.rm_call.cb_verf) and
1365  * the "cooked" credentials (rqst->rq_clntcred).
1366  * However, this function does not know the structure of the cooked
1367  * credentials, so it make the following assumptions:
1368  *   a) the structure is contiguous (no pointers), and
1369  *   b) the cred structure size does not exceed RQCRED_SIZE bytes.
1370  * In all events, all three parameters are freed upon exit from this routine.
1371  * The storage is trivially management on the call stack in user land, but
1372  * is mallocated in kernel land.
1373  */
1374 
1375 void
1376 svc_getreq(rdfds)
1377 	int rdfds;
1378 {
1379 	fd_set readfds;
1380 
1381 	trace2(TR_svc_getreq, 0, rdfds);
1382 	FD_ZERO(&readfds);
1383 	readfds.fds_bits[0] = rdfds;
1384 	svc_getreqset(&readfds);
1385 	trace2(TR_svc_getreq, 1, rdfds);
1386 }
1387 
1388 void
1389 svc_getreqset(readfds)
1390 	fd_set *readfds;
1391 {
1392 	int i;
1393 
1394 	trace1(TR_svc_getreqset, 0);
1395 	for (i = 0; i < svc_max_fd; i++) {
1396 		/* fd has input waiting */
1397 		if (FD_ISSET(i, readfds))
1398 			svc_getreq_common(i);
1399 	}
1400 	trace1(TR_svc_getreqset, 1);
1401 }
1402 
1403 void
1404 svc_getreq_poll(pfdp, pollretval)
1405 	struct pollfd	*pfdp;
1406 	int	pollretval;
1407 {
1408 	int i;
1409 	int fds_found;
1410 
1411 	trace2(TR_svc_getreq_poll, 0, pollretval);
1412 	for (i = fds_found = 0; fds_found < pollretval; i++) {
1413 		struct pollfd *p = &pfdp[i];
1414 
1415 		if (p->revents) {
1416 			/* fd has input waiting */
1417 			fds_found++;
1418 			/*
1419 			 *	We assume that this function is only called
1420 			 *	via someone select()ing from svc_fdset or
1421 			 *	poll()ing from svc_pollset[].  Thus it's safe
1422 			 *	to handle the POLLNVAL event by simply turning
1423 			 *	the corresponding bit off in svc_fdset.  The
1424 			 *	svc_pollset[] array is derived from svc_fdset
1425 			 *	and so will also be updated eventually.
1426 			 *
1427 			 *	XXX Should we do an xprt_unregister() instead?
1428 			 */
1429 			/* Handle user callback */
1430 			if (__is_a_userfd(p->fd) == TRUE) {
1431 				rw_rdlock(&svc_fd_lock);
1432 				__svc_getreq_user(p);
1433 				rw_unlock(&svc_fd_lock);
1434 			} else {
1435 				if (p->revents & POLLNVAL) {
1436 					rw_wrlock(&svc_fd_lock);
1437 					remove_pollfd(p->fd);	/* XXX */
1438 					rw_unlock(&svc_fd_lock);
1439 				} else
1440 					svc_getreq_common(p->fd);
1441 			}
1442 		}
1443 	}
1444 	trace2(TR_svc_getreq_poll, 1, pollretval);
1445 }
1446 
1447 void
1448 svc_getreq_common(fd)
1449 	int fd;
1450 {
1451 	SVCXPRT *xprt;
1452 	enum xprt_stat stat;
1453 	struct rpc_msg *msg;
1454 	struct svc_req *r;
1455 	char *cred_area;
1456 
1457 	trace2(TR_svc_getreq_common, 0, fd);
1458 
1459 	rw_rdlock(&svc_fd_lock);
1460 
1461 	/* HANDLE USER CALLBACK */
1462 	if (__is_a_userfd(fd) == TRUE) {
1463 		struct pollfd virtual_fd;
1464 
1465 		virtual_fd.events = virtual_fd.revents = (short)0xFFFF;
1466 		virtual_fd.fd = fd;
1467 		__svc_getreq_user(&virtual_fd);
1468 		rw_unlock(&svc_fd_lock);
1469 		return;
1470 	}
1471 
1472 	/*
1473 	 * The transport associated with this fd could have been
1474 	 * removed from svc_timeout_nonblock_xprt_and_LRU, for instance.
1475 	 * This can happen if two or more fds get read events and are
1476 	 * passed to svc_getreq_poll/set, the first fd is seviced by
1477 	 * the dispatch routine and cleans up any dead transports.  If
1478 	 * one of the dead transports removed is the other fd that
1479 	 * had a read event then svc_getreq_common() will be called with no
1480 	 * xprt associated with the fd that had the original read event.
1481 	 */
1482 	if ((fd >= nsvc_xports) || (xprt = svc_xports[fd]) == NULL) {
1483 		rw_unlock(&svc_fd_lock);
1484 		trace2(TR_svc_getreq_common, 1, fd);
1485 		return;
1486 	}
1487 	rw_unlock(&svc_fd_lock);
1488 /* LINTED pointer alignment */
1489 	msg = SVCEXT(xprt)->msg;
1490 /* LINTED pointer alignment */
1491 	r = SVCEXT(xprt)->req;
1492 /* LINTED pointer alignment */
1493 	cred_area = SVCEXT(xprt)->cred_area;
1494 	msg->rm_call.cb_cred.oa_base = cred_area;
1495 	msg->rm_call.cb_verf.oa_base = &(cred_area[MAX_AUTH_BYTES]);
1496 	r->rq_clntcred = &(cred_area[2 * MAX_AUTH_BYTES]);
1497 
1498 	/* receive msgs from xprtprt (support batch calls) */
1499 	do {
1500 		bool_t dispatch;
1501 
1502 		if (dispatch = SVC_RECV(xprt, msg))
1503 			(void) _svc_prog_dispatch(xprt, msg, r);
1504 		/*
1505 		 * Check if the xprt has been disconnected in a recursive call
1506 		 * in the service dispatch routine. If so, then break
1507 		 */
1508 		rw_rdlock(&svc_fd_lock);
1509 		if (xprt != svc_xports[fd]) {
1510 			rw_unlock(&svc_fd_lock);
1511 			break;
1512 		}
1513 		rw_unlock(&svc_fd_lock);
1514 
1515 		/*
1516 		 * Call cleanup procedure if set.
1517 		 */
1518 		if (__proc_cleanup_cb != NULL && dispatch)
1519 			(*__proc_cleanup_cb)(xprt);
1520 
1521 		if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
1522 			SVC_DESTROY(xprt);
1523 			break;
1524 		}
1525 	} while (stat == XPRT_MOREREQS);
1526 	trace2(TR_svc_getreq_common, 1, fd);
1527 }
1528 
1529 int
1530 _svc_prog_dispatch(xprt, msg, r)
1531 	SVCXPRT *xprt;
1532 	struct rpc_msg *msg;
1533 	struct svc_req *r;
1534 {
1535 	struct svc_callout *s;
1536 	enum auth_stat why;
1537 	int prog_found;
1538 	rpcvers_t low_vers;
1539 	rpcvers_t high_vers;
1540 	void (*disp_fn)();
1541 
1542 	trace1(TR_prog_dispatch, 0);
1543 	r->rq_xprt = xprt;
1544 	r->rq_prog = msg->rm_call.cb_prog;
1545 	r->rq_vers = msg->rm_call.cb_vers;
1546 	r->rq_proc = msg->rm_call.cb_proc;
1547 	r->rq_cred = msg->rm_call.cb_cred;
1548 /* LINTED pointer alignment */
1549 	SVC_XP_AUTH(r->rq_xprt).svc_ah_ops = svc_auth_any_ops;
1550 /* LINTED pointer alignment */
1551 	SVC_XP_AUTH(r->rq_xprt).svc_ah_private = NULL;
1552 
1553 	/* first authenticate the message */
1554 	/* Check for null flavor and bypass these calls if possible */
1555 
1556 	if (msg->rm_call.cb_cred.oa_flavor == AUTH_NULL) {
1557 		r->rq_xprt->xp_verf.oa_flavor = _null_auth.oa_flavor;
1558 		r->rq_xprt->xp_verf.oa_length = 0;
1559 	} else {
1560 		bool_t no_dispatch;
1561 
1562 		if ((why = __gss_authenticate(r, msg,
1563 			&no_dispatch)) != AUTH_OK) {
1564 			svcerr_auth(xprt, why);
1565 			trace1(TR_prog_dispatch, 1);
1566 			return (0);
1567 		}
1568 		if (no_dispatch)
1569 			return (0);
1570 	}
1571 	/* match message with a registered service */
1572 	prog_found = FALSE;
1573 	low_vers = (rpcvers_t)(0 - 1);
1574 	high_vers = 0;
1575 	rw_rdlock(&svc_lock);
1576 	for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1577 		if (s->sc_prog == r->rq_prog) {
1578 			prog_found = TRUE;
1579 			if (s->sc_vers == r->rq_vers) {
1580 				if ((xprt->xp_netid == NULL) ||
1581 				    (s->sc_netid == NULL) ||
1582 				    (strcmp(xprt->xp_netid,
1583 					    s->sc_netid) == 0)) {
1584 					disp_fn = (*s->sc_dispatch);
1585 					rw_unlock(&svc_lock);
1586 					disp_fn(r, xprt);
1587 					trace1(TR_prog_dispatch, 1);
1588 					return (1);
1589 				} else {
1590 					prog_found = FALSE;
1591 				}
1592 			}
1593 			if (s->sc_vers < low_vers)
1594 				low_vers = s->sc_vers;
1595 			if (s->sc_vers > high_vers)
1596 				high_vers = s->sc_vers;
1597 		}		/* found correct program */
1598 	}
1599 	rw_unlock(&svc_lock);
1600 
1601 	/*
1602 	 * if we got here, the program or version
1603 	 * is not served ...
1604 	 */
1605 	if (prog_found) {
1606 /* LINTED pointer alignment */
1607 		if (!version_keepquiet(xprt))
1608 			svcerr_progvers(xprt, low_vers, high_vers);
1609 	} else {
1610 		svcerr_noprog(xprt);
1611 	}
1612 	trace1(TR_prog_dispatch, 1);
1613 	return (0);
1614 }
1615 
1616 /* ******************* SVCXPRT allocation and deallocation ***************** */
1617 
1618 /*
1619  * svc_xprt_alloc() - allocate a service transport handle
1620  */
1621 SVCXPRT *
1622 svc_xprt_alloc()
1623 {
1624 	SVCXPRT		*xprt = NULL;
1625 	SVCXPRT_EXT	*xt = NULL;
1626 	SVCXPRT_LIST	*xlist = NULL;
1627 	struct rpc_msg	*msg = NULL;
1628 	struct svc_req	*req = NULL;
1629 	char		*cred_area = NULL;
1630 
1631 	if ((xprt = (SVCXPRT *)calloc(1, sizeof (SVCXPRT))) == NULL)
1632 		goto err_exit;
1633 
1634 	if ((xt = (SVCXPRT_EXT *)calloc(1, sizeof (SVCXPRT_EXT))) == NULL)
1635 		goto err_exit;
1636 	xprt->xp_p3 = (caddr_t)xt; /* SVCEXT(xprt) = xt */
1637 
1638 	if ((xlist = (SVCXPRT_LIST *)calloc(1, sizeof (SVCXPRT_LIST))) == NULL)
1639 		goto err_exit;
1640 	xt->my_xlist = xlist;
1641 	xlist->xprt = xprt;
1642 
1643 	if ((msg = (struct rpc_msg *)malloc(sizeof (struct rpc_msg))) == NULL)
1644 		goto err_exit;
1645 	xt->msg = msg;
1646 
1647 	if ((req = (struct svc_req *)malloc(sizeof (struct svc_req))) == NULL)
1648 		goto err_exit;
1649 	xt->req = req;
1650 
1651 	if ((cred_area = (char *)malloc(2*MAX_AUTH_BYTES +
1652 							RQCRED_SIZE)) == NULL)
1653 		goto err_exit;
1654 	xt->cred_area = cred_area;
1655 
1656 /* LINTED pointer alignment */
1657 	mutex_init(&svc_send_mutex(xprt), USYNC_THREAD, (void *)0);
1658 	return (xprt);
1659 
1660 err_exit:
1661 	svc_xprt_free(xprt);
1662 	return (NULL);
1663 }
1664 
1665 
1666 /*
1667  * svc_xprt_free() - free a service handle
1668  */
1669 void
1670 svc_xprt_free(xprt)
1671 	SVCXPRT	*xprt;
1672 {
1673 /* LINTED pointer alignment */
1674 	SVCXPRT_EXT	*xt = xprt ? SVCEXT(xprt) : NULL;
1675 	SVCXPRT_LIST	*my_xlist = xt ? xt->my_xlist: NULL;
1676 	struct rpc_msg	*msg = xt ? xt->msg : NULL;
1677 	struct svc_req	*req = xt ? xt->req : NULL;
1678 	char		*cred_area = xt ? xt->cred_area : NULL;
1679 
1680 	if (xprt)
1681 		free((char *)xprt);
1682 	if (xt)
1683 		free((char *)xt);
1684 	if (my_xlist)
1685 		free((char *)my_xlist);
1686 	if (msg)
1687 		free((char *)msg);
1688 	if (req)
1689 		free((char *)req);
1690 	if (cred_area)
1691 		free((char *)cred_area);
1692 }
1693 
1694 
1695 /*
1696  * svc_xprt_destroy() - free parent and child xprt list
1697  */
1698 void
1699 svc_xprt_destroy(xprt)
1700 	SVCXPRT		*xprt;
1701 {
1702 	SVCXPRT_LIST	*xlist, *xnext = NULL;
1703 	int		type;
1704 
1705 /* LINTED pointer alignment */
1706 	if (SVCEXT(xprt)->parent)
1707 /* LINTED pointer alignment */
1708 		xprt = SVCEXT(xprt)->parent;
1709 /* LINTED pointer alignment */
1710 	type = svc_type(xprt);
1711 /* LINTED pointer alignment */
1712 	for (xlist = SVCEXT(xprt)->my_xlist; xlist != NULL; xlist = xnext) {
1713 		xnext = xlist->next;
1714 		xprt = xlist->xprt;
1715 		switch (type) {
1716 		case SVC_DGRAM:
1717 			svc_dg_xprtfree(xprt);
1718 			break;
1719 		case SVC_RENDEZVOUS:
1720 			svc_vc_xprtfree(xprt);
1721 			break;
1722 		case SVC_CONNECTION:
1723 			svc_fd_xprtfree(xprt);
1724 			break;
1725 		case SVC_DOOR:
1726 			svc_door_xprtfree(xprt);
1727 			break;
1728 		}
1729 	}
1730 }
1731 
1732 
1733 /*
1734  * svc_copy() - make a copy of parent
1735  */
1736 SVCXPRT *
1737 svc_copy(xprt)
1738 	SVCXPRT *xprt;
1739 {
1740 /* LINTED pointer alignment */
1741 	switch (svc_type(xprt)) {
1742 	case SVC_DGRAM:
1743 		return (svc_dg_xprtcopy(xprt));
1744 	case SVC_RENDEZVOUS:
1745 		return (svc_vc_xprtcopy(xprt));
1746 	case SVC_CONNECTION:
1747 		return (svc_fd_xprtcopy(xprt));
1748 	}
1749 	return ((SVCXPRT *)NULL);
1750 }
1751 
1752 
1753 /*
1754  * _svc_destroy_private() - private SVC_DESTROY interface
1755  */
1756 void
1757 _svc_destroy_private(xprt)
1758 	SVCXPRT *xprt;
1759 {
1760 /* LINTED pointer alignment */
1761 	switch (svc_type(xprt)) {
1762 	case SVC_DGRAM:
1763 		_svc_dg_destroy_private(xprt);
1764 		break;
1765 	case SVC_RENDEZVOUS:
1766 	case SVC_CONNECTION:
1767 		_svc_vc_destroy_private(xprt, TRUE);
1768 		break;
1769 	}
1770 }
1771 
1772 /*
1773  * svc_get_local_cred() - fetch local user credentials.  This always
1774  * works over doors based transports.  For local transports, this
1775  * does not yield correct results unless the __rpc_negotiate_uid()
1776  * call has been invoked to enable this feature.
1777  */
1778 bool_t
1779 svc_get_local_cred(xprt, lcred)
1780 	SVCXPRT			*xprt;
1781 	svc_local_cred_t	*lcred;
1782 {
1783 	/* LINTED pointer alignment */
1784 	if (svc_type(xprt) == SVC_DOOR)
1785 		return (__svc_get_door_cred(xprt, lcred));
1786 	return (__rpc_get_local_cred(xprt, lcred));
1787 }
1788 
1789 
1790 /* ******************* DUPLICATE ENTRY HANDLING ROUTINES ************** */
1791 
1792 /*
1793  * the dup cacheing routines below provide a cache of received
1794  * transactions. rpc service routines can use this to detect
1795  * retransmissions and re-send a non-failure response. Uses a
1796  * lru scheme to find entries to get rid of entries in the cache,
1797  * though only DUP_DONE entries are placed on the lru list.
1798  * the routines were written towards development of a generic
1799  * SVC_DUP() interface, which can be expanded to encompass the
1800  * svc_dg_enablecache() routines as well. the cache is currently
1801  * private to the automounter.
1802  */
1803 
1804 
1805 /* dupcache header contains xprt specific information */
1806 struct dupcache
1807 {
1808 	rwlock_t	dc_lock;
1809 	time_t		dc_time;
1810 	int		dc_buckets;
1811 	int		dc_maxsz;
1812 	int		dc_basis;
1813 	struct dupreq 	*dc_mru;
1814 	struct dupreq	**dc_hashtbl;
1815 };
1816 
1817 /*
1818  * private duplicate cache request routines
1819  */
1820 static int __svc_dupcache_check(struct svc_req *, caddr_t *, uint_t *,
1821 		struct dupcache *, uint32_t, uint32_t);
1822 static struct dupreq *__svc_dupcache_victim(struct dupcache *, time_t);
1823 static int __svc_dupcache_enter(struct svc_req *, struct dupreq *,
1824 		struct dupcache *, uint32_t, uint32_t, time_t);
1825 static int __svc_dupcache_update(struct svc_req *, caddr_t, uint_t, int,
1826 		struct dupcache *, uint32_t, uint32_t);
1827 #ifdef DUP_DEBUG
1828 static void __svc_dupcache_debug(struct dupcache *);
1829 #endif /* DUP_DEBUG */
1830 
1831 /* default parameters for the dupcache */
1832 #define	DUPCACHE_BUCKETS	257
1833 #define	DUPCACHE_TIME		900
1834 #define	DUPCACHE_MAXSZ		INT_MAX
1835 
1836 /*
1837  * __svc_dupcache_init(void *condition, int basis, char *xprt_cache)
1838  * initialize the duprequest cache and assign it to the xprt_cache
1839  * Use default values depending on the cache condition and basis.
1840  * return TRUE on success and FALSE on failure
1841  */
1842 bool_t
1843 __svc_dupcache_init(void *condition, int basis, char **xprt_cache)
1844 {
1845 	static mutex_t initdc_lock = DEFAULTMUTEX;
1846 	int i;
1847 	struct dupcache *dc;
1848 
1849 	mutex_lock(&initdc_lock);
1850 	if (*xprt_cache != NULL) { /* do only once per xprt */
1851 		mutex_unlock(&initdc_lock);
1852 		syslog(LOG_ERR,
1853 		"__svc_dupcache_init: multiply defined dup cache");
1854 		return (FALSE);
1855 	}
1856 
1857 	switch (basis) {
1858 	case DUPCACHE_FIXEDTIME:
1859 		dc = (struct dupcache *)mem_alloc(sizeof (struct dupcache));
1860 		if (dc == NULL) {
1861 			mutex_unlock(&initdc_lock);
1862 			syslog(LOG_ERR,
1863 				"__svc_dupcache_init: memory alloc failed");
1864 			return (FALSE);
1865 		}
1866 		rwlock_init(&(dc->dc_lock), USYNC_THREAD, NULL);
1867 		if (condition != NULL)
1868 			dc->dc_time = *((time_t *)condition);
1869 		else
1870 			dc->dc_time = DUPCACHE_TIME;
1871 		dc->dc_buckets = DUPCACHE_BUCKETS;
1872 		dc->dc_maxsz = DUPCACHE_MAXSZ;
1873 		dc->dc_basis = basis;
1874 		dc->dc_mru = NULL;
1875 		dc->dc_hashtbl = (struct dupreq **)mem_alloc(dc->dc_buckets *
1876 						sizeof (struct dupreq *));
1877 		if (dc->dc_hashtbl == NULL) {
1878 			mem_free(dc, sizeof (struct dupcache));
1879 			mutex_unlock(&initdc_lock);
1880 			syslog(LOG_ERR,
1881 				"__svc_dupcache_init: memory alloc failed");
1882 			return (FALSE);
1883 		}
1884 		for (i = 0; i < DUPCACHE_BUCKETS; i++)
1885 			dc->dc_hashtbl[i] = NULL;
1886 		*xprt_cache = (char *)dc;
1887 		break;
1888 	default:
1889 		mutex_unlock(&initdc_lock);
1890 		syslog(LOG_ERR,
1891 		"__svc_dupcache_init: undefined dup cache basis");
1892 		return (FALSE);
1893 	}
1894 
1895 	mutex_unlock(&initdc_lock);
1896 
1897 	return (TRUE);
1898 }
1899 
1900 /*
1901  * __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1902  *	char *xprt_cache)
1903  * searches the request cache. Creates an entry and returns DUP_NEW if
1904  * the request is not found in the cache.  If it is found, then it
1905  * returns the state of the request (in progress, drop, or done) and
1906  * also allocates, and passes back results to the user (if any) in
1907  * resp_buf, and its length in resp_bufsz. DUP_ERROR is returned on error.
1908  */
1909 int
1910 __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1911 	char *xprt_cache)
1912 {
1913 	uint32_t drxid, drhash;
1914 	int rc;
1915 	struct dupreq *dr = NULL;
1916 	time_t timenow = time(NULL);
1917 
1918 	/* LINTED pointer alignment */
1919 	struct dupcache *dc = (struct dupcache *)xprt_cache;
1920 
1921 	if (dc == NULL) {
1922 		syslog(LOG_ERR, "__svc_dup: undefined cache");
1923 		return (DUP_ERROR);
1924 	}
1925 
1926 	/* get the xid of the request */
1927 	if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
1928 		syslog(LOG_ERR, "__svc_dup: xid error");
1929 		return (DUP_ERROR);
1930 	}
1931 	drhash = drxid % dc->dc_buckets;
1932 
1933 	if ((rc = __svc_dupcache_check(req, resp_buf, resp_bufsz, dc, drxid,
1934 			drhash)) != DUP_NEW)
1935 		return (rc);
1936 
1937 	if ((dr = __svc_dupcache_victim(dc, timenow)) == NULL)
1938 		return (DUP_ERROR);
1939 
1940 	if ((rc = __svc_dupcache_enter(req, dr, dc, drxid, drhash, timenow))
1941 			== DUP_ERROR)
1942 		return (rc);
1943 
1944 	return (DUP_NEW);
1945 }
1946 
1947 
1948 
1949 /*
1950  * __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf,
1951  *		uint_t *resp_bufsz,truct dupcache *dc, uint32_t drxid,
1952  * 		uint32_t drhash)
1953  * Checks to see whether an entry already exists in the cache. If it does
1954  * copy back into the resp_buf, if appropriate. Return the status of
1955  * the request, or DUP_NEW if the entry is not in the cache
1956  */
1957 static int
1958 __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1959 		struct dupcache *dc, uint32_t drxid, uint32_t drhash)
1960 {
1961 	struct dupreq *dr = NULL;
1962 
1963 	rw_rdlock(&(dc->dc_lock));
1964 	dr = dc->dc_hashtbl[drhash];
1965 	while (dr != NULL) {
1966 		if (dr->dr_xid == drxid &&
1967 		    dr->dr_proc == req->rq_proc &&
1968 		    dr->dr_prog == req->rq_prog &&
1969 		    dr->dr_vers == req->rq_vers &&
1970 		    dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
1971 		    memcmp((caddr_t)dr->dr_addr.buf,
1972 				(caddr_t)req->rq_xprt->xp_rtaddr.buf,
1973 				dr->dr_addr.len) == 0) { /* entry found */
1974 			if (dr->dr_hash != drhash) {
1975 				/* sanity check */
1976 				rw_unlock((&dc->dc_lock));
1977 				syslog(LOG_ERR,
1978 					"\n__svc_dupdone: hashing error");
1979 				return (DUP_ERROR);
1980 			}
1981 
1982 			/*
1983 			 * return results for requests on lru list, if
1984 			 * appropriate requests must be DUP_DROP or DUP_DONE
1985 			 * to have a result. A NULL buffer in the cache
1986 			 * implies no results were sent during dupdone.
1987 			 * A NULL buffer in the call implies not interested
1988 			 * in results.
1989 			 */
1990 			if (((dr->dr_status == DUP_DONE) ||
1991 				(dr->dr_status == DUP_DROP)) &&
1992 				resp_buf != NULL &&
1993 				dr->dr_resp.buf != NULL) {
1994 				*resp_buf = (caddr_t)mem_alloc
1995 					(dr->dr_resp.len);
1996 				if (*resp_buf == NULL) {
1997 					syslog(LOG_ERR,
1998 					"__svc_dupcache_check: malloc failed");
1999 					rw_unlock(&(dc->dc_lock));
2000 					return (DUP_ERROR);
2001 				}
2002 				memset((caddr_t)*resp_buf, 0,
2003 					dr->dr_resp.len);
2004 				memcpy(*resp_buf, (caddr_t)dr->dr_resp.buf,
2005 					dr->dr_resp.len);
2006 				*resp_bufsz = dr->dr_resp.len;
2007 			} else {
2008 				/* no result */
2009 				if (resp_buf)
2010 					*resp_buf = NULL;
2011 				if (resp_bufsz)
2012 					*resp_bufsz = 0;
2013 			}
2014 			rw_unlock(&(dc->dc_lock));
2015 			return (dr->dr_status);
2016 		}
2017 		dr = dr->dr_chain;
2018 	}
2019 	rw_unlock(&(dc->dc_lock));
2020 	return (DUP_NEW);
2021 }
2022 
2023 /*
2024  * __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
2025  * Return a victim dupreq entry to the caller, depending on cache policy.
2026  */
2027 static struct dupreq *
2028 __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
2029 {
2030 	struct dupreq *dr = NULL;
2031 
2032 	switch (dc->dc_basis) {
2033 	case DUPCACHE_FIXEDTIME:
2034 		/*
2035 		 * The hash policy is to free up a bit of the hash
2036 		 * table before allocating a new entry as the victim.
2037 		 * Freeing up the hash table each time should split
2038 		 * the cost of keeping the hash table clean among threads.
2039 		 * Note that only DONE or DROPPED entries are on the lru
2040 		 * list but we do a sanity check anyway.
2041 		 */
2042 		rw_wrlock(&(dc->dc_lock));
2043 		while ((dc->dc_mru) && (dr = dc->dc_mru->dr_next) &&
2044 				((timenow - dr->dr_time) > dc->dc_time)) {
2045 			/* clean and then free the entry */
2046 			if (dr->dr_status != DUP_DONE &&
2047 				dr->dr_status != DUP_DROP) {
2048 				/*
2049 				 * The LRU list can't contain an
2050 				 * entry where the status is other than
2051 				 * DUP_DONE or DUP_DROP.
2052 				 */
2053 				syslog(LOG_ERR,
2054 				"__svc_dupcache_victim: bad victim");
2055 #ifdef DUP_DEBUG
2056 				/*
2057 				 * Need to hold the reader/writers lock to
2058 				 * print the cache info, since we already
2059 				 * hold the writers lock, we shall continue
2060 				 * calling __svc_dupcache_debug()
2061 				 */
2062 				__svc_dupcache_debug(dc);
2063 #endif /* DUP_DEBUG */
2064 				rw_unlock(&(dc->dc_lock));
2065 				return (NULL);
2066 			}
2067 			/* free buffers */
2068 			if (dr->dr_resp.buf) {
2069 				mem_free(dr->dr_resp.buf, dr->dr_resp.len);
2070 				dr->dr_resp.buf = NULL;
2071 			}
2072 			if (dr->dr_addr.buf) {
2073 				mem_free(dr->dr_addr.buf, dr->dr_addr.len);
2074 				dr->dr_addr.buf = NULL;
2075 			}
2076 
2077 			/* unhash the entry */
2078 			if (dr->dr_chain)
2079 				dr->dr_chain->dr_prevchain = dr->dr_prevchain;
2080 			if (dr->dr_prevchain)
2081 				dr->dr_prevchain->dr_chain = dr->dr_chain;
2082 			if (dc->dc_hashtbl[dr->dr_hash] == dr)
2083 				dc->dc_hashtbl[dr->dr_hash] = dr->dr_chain;
2084 
2085 			/* modify the lru pointers */
2086 			if (dc->dc_mru == dr)
2087 				dc->dc_mru = NULL;
2088 			else {
2089 				dc->dc_mru->dr_next = dr->dr_next;
2090 				dr->dr_next->dr_prev = dc->dc_mru;
2091 			}
2092 			mem_free(dr, sizeof (struct dupreq));
2093 			dr = NULL;
2094 		}
2095 		rw_unlock(&(dc->dc_lock));
2096 
2097 		/*
2098 		 * Allocate and return new clean entry as victim
2099 		 */
2100 		if ((dr = (struct dupreq *)mem_alloc(sizeof (*dr))) == NULL) {
2101 			syslog(LOG_ERR,
2102 				"__svc_dupcache_victim: mem_alloc failed");
2103 			return (NULL);
2104 		}
2105 		memset((caddr_t)dr, 0, sizeof (*dr));
2106 		return (dr);
2107 	default:
2108 		syslog(LOG_ERR,
2109 		"__svc_dupcache_victim: undefined dup cache_basis");
2110 		return (NULL);
2111 	}
2112 }
2113 
2114 /*
2115  * __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
2116  *	struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
2117  * build new duprequest entry and then insert into the cache
2118  */
2119 static int
2120 __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
2121 	struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
2122 {
2123 	dr->dr_xid = drxid;
2124 	dr->dr_prog = req->rq_prog;
2125 	dr->dr_vers = req->rq_vers;
2126 	dr->dr_proc = req->rq_proc;
2127 	dr->dr_addr.maxlen = req->rq_xprt->xp_rtaddr.len;
2128 	dr->dr_addr.len = dr->dr_addr.maxlen;
2129 	if ((dr->dr_addr.buf = (caddr_t)mem_alloc(dr->dr_addr.maxlen))
2130 				== NULL) {
2131 		syslog(LOG_ERR, "__svc_dupcache_enter: mem_alloc failed");
2132 		mem_free(dr, sizeof (struct dupreq));
2133 		return (DUP_ERROR);
2134 	}
2135 	memset(dr->dr_addr.buf, 0, dr->dr_addr.len);
2136 	memcpy((caddr_t)dr->dr_addr.buf,
2137 		(caddr_t)req->rq_xprt->xp_rtaddr.buf, dr->dr_addr.len);
2138 	dr->dr_resp.buf = NULL;
2139 	dr->dr_resp.maxlen = 0;
2140 	dr->dr_resp.len = 0;
2141 	dr->dr_status = DUP_INPROGRESS;
2142 	dr->dr_time = timenow;
2143 	dr->dr_hash = drhash;	/* needed for efficient victim cleanup */
2144 
2145 	/* place entry at head of hash table */
2146 	rw_wrlock(&(dc->dc_lock));
2147 	dr->dr_chain = dc->dc_hashtbl[drhash];
2148 	dr->dr_prevchain = NULL;
2149 	if (dc->dc_hashtbl[drhash] != NULL)
2150 		dc->dc_hashtbl[drhash]->dr_prevchain = dr;
2151 	dc->dc_hashtbl[drhash] = dr;
2152 	rw_unlock(&(dc->dc_lock));
2153 	return (DUP_NEW);
2154 }
2155 
2156 /*
2157  * __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2158  *		int status, char *xprt_cache)
2159  * Marks the request done (DUP_DONE or DUP_DROP) and stores the response.
2160  * Only DONE and DROP requests can be marked as done. Sets the lru pointers
2161  * to make the entry the most recently used. Returns DUP_ERROR or status.
2162  */
2163 int
2164 __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2165 		int status, char *xprt_cache)
2166 {
2167 	uint32_t drxid, drhash;
2168 	int rc;
2169 
2170 	/* LINTED pointer alignment */
2171 	struct dupcache *dc = (struct dupcache *)xprt_cache;
2172 
2173 	if (dc == NULL) {
2174 		syslog(LOG_ERR, "__svc_dupdone: undefined cache");
2175 		return (DUP_ERROR);
2176 	}
2177 
2178 	if (status != DUP_DONE && status != DUP_DROP) {
2179 		syslog(LOG_ERR, "__svc_dupdone: invalid dupdone status");
2180 		syslog(LOG_ERR, "	 must be DUP_DONE or DUP_DROP");
2181 		return (DUP_ERROR);
2182 	}
2183 
2184 	/* find the xid of the entry in the cache */
2185 	if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
2186 		syslog(LOG_ERR, "__svc_dup: xid error");
2187 		return (DUP_ERROR);
2188 	}
2189 	drhash = drxid % dc->dc_buckets;
2190 
2191 	/* update the status of the entry and result buffers, if required */
2192 	if ((rc = __svc_dupcache_update(req, resp_buf, resp_bufsz, status,
2193 			dc, drxid, drhash)) == DUP_ERROR) {
2194 		syslog(LOG_ERR, "__svc_dupdone: cache entry error");
2195 		return (DUP_ERROR);
2196 	}
2197 
2198 	return (rc);
2199 }
2200 
2201 /*
2202  * __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf,
2203  * 	uint_t resp_bufsz, int status, struct dupcache *dc, uint32_t drxid,
2204  * 	uint32_t drhash)
2205  * Check if entry exists in the dupcacache. If it does, update its status
2206  * and time and also its buffer, if appropriate. Its possible, but unlikely
2207  * for DONE requests to not exist in the cache. Return DUP_ERROR or status.
2208  */
2209 static int
2210 __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2211 	int status, struct dupcache *dc, uint32_t drxid, uint32_t drhash)
2212 {
2213 	struct dupreq *dr = NULL;
2214 	time_t timenow = time(NULL);
2215 
2216 	rw_wrlock(&(dc->dc_lock));
2217 	dr = dc->dc_hashtbl[drhash];
2218 	while (dr != NULL) {
2219 		if (dr->dr_xid == drxid &&
2220 		    dr->dr_proc == req->rq_proc &&
2221 		    dr->dr_prog == req->rq_prog &&
2222 		    dr->dr_vers == req->rq_vers &&
2223 		    dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
2224 		    memcmp((caddr_t)dr->dr_addr.buf,
2225 				(caddr_t)req->rq_xprt->xp_rtaddr.buf,
2226 				dr->dr_addr.len) == 0) { /* entry found */
2227 			if (dr->dr_hash != drhash) {
2228 				/* sanity check */
2229 				rw_unlock(&(dc->dc_lock));
2230 				syslog(LOG_ERR,
2231 				"\n__svc_dupdone: hashing error");
2232 				return (DUP_ERROR);
2233 			}
2234 
2235 			/* store the results if bufer is not NULL */
2236 			if (resp_buf != NULL) {
2237 				if ((dr->dr_resp.buf = (caddr_t)
2238 					mem_alloc(resp_bufsz)) == NULL) {
2239 					rw_unlock(&(dc->dc_lock));
2240 					syslog(LOG_ERR,
2241 					"__svc_dupdone: mem_alloc failed");
2242 					return (DUP_ERROR);
2243 				}
2244 				memset(dr->dr_resp.buf, 0, resp_bufsz);
2245 				memcpy((caddr_t)dr->dr_resp.buf, resp_buf,
2246 					(uint_t)resp_bufsz);
2247 				dr->dr_resp.len = resp_bufsz;
2248 			}
2249 
2250 			/* update status and done time */
2251 			dr->dr_status = status;
2252 			dr->dr_time = timenow;
2253 
2254 			/* move the entry to the mru position */
2255 			if (dc->dc_mru == NULL) {
2256 				dr->dr_next = dr;
2257 				dr->dr_prev = dr;
2258 			} else {
2259 				dr->dr_next = dc->dc_mru->dr_next;
2260 				dc->dc_mru->dr_next->dr_prev = dr;
2261 				dr->dr_prev = dc->dc_mru;
2262 				dc->dc_mru->dr_next = dr;
2263 			}
2264 			dc->dc_mru = dr;
2265 
2266 			rw_unlock(&(dc->dc_lock));
2267 			return (status);
2268 		}
2269 		dr = dr->dr_chain;
2270 	}
2271 	rw_unlock(&(dc->dc_lock));
2272 	syslog(LOG_ERR, "__svc_dupdone: entry not in dup cache");
2273 	return (DUP_ERROR);
2274 }
2275 
2276 #ifdef DUP_DEBUG
2277 /*
2278  * __svc_dupcache_debug(struct dupcache *dc)
2279  * print out the hash table stuff
2280  *
2281  * This function requires the caller to hold the reader
2282  * or writer version of the duplicate request cache lock (dc_lock).
2283  */
2284 static void
2285 __svc_dupcache_debug(struct dupcache *dc)
2286 {
2287 	struct dupreq *dr = NULL;
2288 	int i;
2289 	bool_t bval;
2290 
2291 	fprintf(stderr, "   HASHTABLE\n");
2292 	for (i = 0; i < dc->dc_buckets; i++) {
2293 		bval = FALSE;
2294 		dr = dc->dc_hashtbl[i];
2295 		while (dr != NULL) {
2296 			if (!bval) {	/* ensures bucket printed only once */
2297 				fprintf(stderr, "    bucket : %d\n", i);
2298 				bval = TRUE;
2299 			}
2300 			fprintf(stderr, "\txid: %u status: %d time: %ld",
2301 				dr->dr_xid, dr->dr_status, dr->dr_time);
2302 			fprintf(stderr, " dr: %x chain: %x prevchain: %x\n",
2303 				dr, dr->dr_chain, dr->dr_prevchain);
2304 			dr = dr->dr_chain;
2305 		}
2306 	}
2307 
2308 	fprintf(stderr, "   LRU\n");
2309 	if (dc->dc_mru) {
2310 		dr = dc->dc_mru->dr_next;	/* lru */
2311 		while (dr != dc->dc_mru) {
2312 			fprintf(stderr, "\txid: %u status : %d time : %ld",
2313 				dr->dr_xid, dr->dr_status, dr->dr_time);
2314 			fprintf(stderr, " dr: %x next: %x prev: %x\n",
2315 				dr, dr->dr_next, dr->dr_prev);
2316 			dr = dr->dr_next;
2317 		}
2318 		fprintf(stderr, "\txid: %u status: %d time: %ld",
2319 			dr->dr_xid, dr->dr_status, dr->dr_time);
2320 		fprintf(stderr, " dr: %x next: %x prev: %x\n", dr,
2321 			dr->dr_next, dr->dr_prev);
2322 	}
2323 }
2324 #endif /* DUP_DEBUG */
2325