xref: /titanic_50/usr/src/lib/libnsl/rpc/svc.c (revision 587032cf0967234b39ccb50adca936a367841063)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 
23 /*
24  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 /*
28  * Copyright 1993 OpenVision Technologies, Inc., All Rights Reserved.
29  */
30 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
31 /* All Rights Reserved */
32 /*
33  * Portions of this source code were derived from Berkeley
34  * 4.3 BSD under license from the Regents of the University of
35  * California.
36  */
37 
38 #pragma ident	"%Z%%M%	%I%	%E% SMI"
39 
40 /*
41  * svc.c, Server-side remote procedure call interface.
42  *
43  * There are two sets of procedures here.  The xprt routines are
44  * for handling transport handles.  The svc routines handle the
45  * list of service routines.
46  *
47  */
48 
49 
50 #include "mt.h"
51 #include "rpc_mt.h"
52 #include <assert.h>
53 #include <errno.h>
54 #include <sys/types.h>
55 #include <stropts.h>
56 #include <sys/conf.h>
57 #include <rpc/rpc.h>
58 #ifdef PORTMAP
59 #include <rpc/pmap_clnt.h>
60 #endif
61 #include <sys/poll.h>
62 #include <netconfig.h>
63 #include <syslog.h>
64 #include <stdlib.h>
65 #include <unistd.h>
66 #include <string.h>
67 #include <limits.h>
68 
69 extern bool_t __svc_get_door_cred();
70 extern bool_t __rpc_get_local_cred();
71 
72 SVCXPRT **svc_xports;
73 static int nsvc_xports; 	/* total number of svc_xports allocated */
74 
75 XDR **svc_xdrs;		/* common XDR receive area */
76 int nsvc_xdrs;		/* total number of svc_xdrs allocated */
77 
78 int __rpc_use_pollfd_done;	/* to unlimit the number of connections */
79 
80 #define	NULL_SVC ((struct svc_callout *)0)
81 #define	RQCRED_SIZE	400		/* this size is excessive */
82 
83 /*
84  * The services list
85  * Each entry represents a set of procedures (an rpc program).
86  * The dispatch routine takes request structs and runs the
87  * appropriate procedure.
88  */
89 static struct svc_callout {
90 	struct svc_callout *sc_next;
91 	rpcprog_t	    sc_prog;
92 	rpcvers_t	    sc_vers;
93 	char		   *sc_netid;
94 	void		    (*sc_dispatch)();
95 } *svc_head;
96 extern rwlock_t	svc_lock;
97 
98 static struct svc_callout *svc_find();
99 int _svc_prog_dispatch();
100 void svc_getreq_common();
101 char *strdup();
102 
103 extern mutex_t	svc_door_mutex;
104 extern cond_t	svc_door_waitcv;
105 extern int	svc_ndoorfds;
106 extern SVCXPRT_LIST *_svc_xprtlist;
107 extern mutex_t xprtlist_lock;
108 extern void __svc_rm_from_xlist();
109 
110 extern fd_set _new_svc_fdset;
111 
112 /*
113  * If the allocated array of reactor is too small, this value is used as a
114  * margin. This reduces the number of allocations.
115  */
116 #define	USER_FD_INCREMENT 5
117 
118 static void add_pollfd(int fd, short events);
119 static void remove_pollfd(int fd);
120 static void __svc_remove_input_of_fd(int fd);
121 
122 
123 /*
124  * Data used to handle reactor:
125  * 	- one file descriptor we listen to,
126  *	- one callback we call if the fd pops,
127  *	- and a cookie passed as a parameter to the callback.
128  *
129  * The structure is an array indexed on the file descriptor. Each entry is
130  * pointing to the first element of a double-linked list of callback.
131  * only one callback may be associated to a couple (fd, event).
132  */
133 
134 struct _svc_user_fd_head;
135 
136 typedef struct {
137 	struct _svc_user_fd_node *next;
138 	struct _svc_user_fd_node *previous;
139 } _svc_user_link;
140 
141 typedef struct _svc_user_fd_node {
142 	/* The lnk field must be the first field. */
143 	_svc_user_link lnk;
144 	svc_input_id_t id;
145 	int	    fd;
146 	unsigned int   events;
147 	svc_callback_t callback;
148 	void*	  cookie;
149 } _svc_user_fd_node;
150 
151 typedef struct _svc_user_fd_head {
152 	/* The lnk field must be the first field. */
153 	_svc_user_link lnk;
154 	unsigned int mask;    /* logical OR of all sub-masks */
155 } _svc_user_fd_head;
156 
157 
158 /* Define some macros to manage the linked list. */
159 #define	LIST_ISEMPTY(l) ((_svc_user_fd_node *) &(l.lnk) == l.lnk.next)
160 #define	LIST_CLR(l) \
161 	(l.lnk.previous = l.lnk.next = (_svc_user_fd_node *) &(l.lnk))
162 
163 /* Array of defined reactor - indexed on file descriptor */
164 static _svc_user_fd_head *svc_userfds  = NULL;
165 
166 /* current size of file descriptor */
167 static int svc_nuserfds = 0;
168 
169 /* Mutex to ensure MT safe operations for user fds callbacks. */
170 static mutex_t svc_userfds_lock = DEFAULTMUTEX;
171 
172 
173 /*
174  * This structure is used to have constant time alogrithms. There is an array
175  * of this structure as large as svc_nuserfds. When the user is registering a
176  * new callback, the address of the created structure is stored in a cell of
177  * this array. The address of this cell is the returned unique identifier.
178  *
179  * On removing, the id is given by the user, then we know if this cell is
180  * filled or not (with free). If it is free, we return an error. Otherwise,
181  * we can free the structure pointed by fd_node.
182  *
183  * On insertion, we use the linked list created by (first_free,
184  * next_free). In this way with a constant time computation, we can give a
185  * correct index to the user.
186  */
187 
188 typedef struct _svc_management_user_fd {
189 	bool_t free;
190 	union {
191 		svc_input_id_t next_free;
192 		_svc_user_fd_node *fd_node;
193 	} data;
194 } _svc_management_user_fd;
195 
196 /* index to the first free elem */
197 static svc_input_id_t first_free = (svc_input_id_t)-1;
198 /* the size of this array is the same as svc_nuserfds */
199 static _svc_management_user_fd* user_fd_mgt_array = NULL;
200 
201 /* current size of user_fd_mgt_array */
202 static int svc_nmgtuserfds = 0;
203 
204 
205 /* Define some macros to access data associated to registration ids. */
206 #define	node_from_id(id) (user_fd_mgt_array[(int)id].data.fd_node)
207 #define	is_free_id(id) (user_fd_mgt_array[(int)id].free)
208 
209 #ifndef POLLSTANDARD
210 #define	POLLSTANDARD \
211 	(POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND| \
212 	POLLWRBAND|POLLERR|POLLHUP|POLLNVAL)
213 #endif
214 
215 /*
216  * To free an Id, we set the cell as free and insert its address in the list
217  * of free cell.
218  */
219 
220 static void
221 _svc_free_id(const svc_input_id_t id)
222 {
223 	assert(((int)id >= 0) && ((int)id < svc_nmgtuserfds));
224 	user_fd_mgt_array[(int)id].free = TRUE;
225 	user_fd_mgt_array[(int)id].data.next_free = first_free;
226 	first_free = id;
227 }
228 
229 /*
230  * To get a free cell, we just have to take it from the free linked list and
231  * set the flag to "not free". This function also allocates new memory if
232  * necessary
233  */
234 static svc_input_id_t
235 _svc_attribute_new_id(_svc_user_fd_node *node)
236 {
237 	int selected_index = (int)first_free;
238 	assert(node != NULL);
239 
240 	if (selected_index == -1) {
241 		/* Allocate new entries */
242 		int L_inOldSize = svc_nmgtuserfds;
243 		int i;
244 
245 		svc_nmgtuserfds += USER_FD_INCREMENT;
246 
247 		user_fd_mgt_array = (_svc_management_user_fd *)
248 		    realloc(user_fd_mgt_array, svc_nmgtuserfds
249 			* sizeof (_svc_management_user_fd));
250 
251 		if (user_fd_mgt_array == NULL) {
252 			syslog(LOG_ERR, "_svc_attribute_new_id: out of memory");
253 			errno = ENOMEM;
254 			return ((svc_input_id_t)-1);
255 		}
256 
257 		for (i = svc_nmgtuserfds - 1; i >= L_inOldSize; i--)
258 			_svc_free_id((svc_input_id_t)i);
259 		selected_index = (int)first_free;
260 	}
261 
262 	node->id = (svc_input_id_t)selected_index;
263 	first_free = user_fd_mgt_array[selected_index].data.next_free;
264 
265 	user_fd_mgt_array[selected_index].data.fd_node = node;
266 	user_fd_mgt_array[selected_index].free = FALSE;
267 
268 	return ((svc_input_id_t)selected_index);
269 }
270 
271 /*
272  * Access to a pollfd treatment. Scan all the associated callbacks that have
273  * at least one bit in their mask that masks a received event.
274  *
275  * If event POLLNVAL is received, we check that one callback processes it, if
276  * not, then remove the file descriptor from the poll. If there is one, let
277  * the user do the work.
278  */
279 void
280 __svc_getreq_user(struct pollfd *pfd)
281 {
282 	int fd = pfd->fd;
283 	short revents = pfd->revents;
284 	bool_t invalHandled = FALSE;
285 	_svc_user_fd_node *node;
286 
287 	(void) mutex_lock(&svc_userfds_lock);
288 
289 	if ((fd < 0) || (fd >= svc_nuserfds)) {
290 		(void) mutex_unlock(&svc_userfds_lock);
291 		return;
292 	}
293 
294 	node = svc_userfds[fd].lnk.next;
295 
296 	/* check if at least one mask fits */
297 	if (0 == (revents & svc_userfds[fd].mask)) {
298 		(void) mutex_unlock(&svc_userfds_lock);
299 		return;
300 	}
301 
302 	while ((svc_userfds[fd].mask != 0) &&
303 	    ((_svc_user_link *)node != &(svc_userfds[fd].lnk))) {
304 		/*
305 		 * If one of the received events maps the ones the node listens
306 		 * to
307 		 */
308 		_svc_user_fd_node *next = node->lnk.next;
309 
310 		if (node->callback != NULL) {
311 			if (node->events & revents) {
312 				if (revents & POLLNVAL) {
313 					invalHandled = TRUE;
314 				}
315 
316 				/*
317 				 * The lock must be released before calling the
318 				 * user function, as this function can call
319 				 * svc_remove_input() for example.
320 				 */
321 				(void) mutex_unlock(&svc_userfds_lock);
322 				node->callback(node->id, node->fd,
323 				    node->events & revents, node->cookie);
324 				/*
325 				 * Do not use the node structure anymore, as it
326 				 * could have been deallocated by the previous
327 				 * callback.
328 				 */
329 				(void) mutex_lock(&svc_userfds_lock);
330 			}
331 		}
332 		node = next;
333 	}
334 
335 	if ((revents & POLLNVAL) && !invalHandled)
336 		__svc_remove_input_of_fd(fd);
337 	(void) mutex_unlock(&svc_userfds_lock);
338 }
339 
340 
341 /*
342  * Check if a file descriptor is associated with a user reactor.
343  * To do this, just check that the array indexed on fd has a non-void linked
344  * list (ie. first element is not NULL)
345  */
346 bool_t
347 __is_a_userfd(int fd)
348 {
349 	/* Checks argument */
350 	if ((fd < 0) || (fd >= svc_nuserfds))
351 		return (FALSE);
352 	return ((svc_userfds[fd].mask == 0x0000)? FALSE:TRUE);
353 }
354 
355 /* free everything concerning user fd */
356 /* used in svc_run.c => no static */
357 
358 void
359 __destroy_userfd(void)
360 {
361 	int one_fd;
362 	/* Clean user fd */
363 	if (svc_userfds != NULL) {
364 		for (one_fd = 0; one_fd < svc_nuserfds; one_fd++) {
365 			_svc_user_fd_node *node;
366 
367 			node = svc_userfds[one_fd].lnk.next;
368 			while ((_svc_user_link *) node
369 			    != (_svc_user_link *) &(svc_userfds[one_fd])) {
370 				_svc_free_id(node->id);
371 				node = node->lnk.next;
372 				free(node->lnk.previous);
373 			}
374 		}
375 
376 		free(user_fd_mgt_array);
377 		user_fd_mgt_array = NULL;
378 		first_free = (svc_input_id_t)-1;
379 
380 		free(svc_userfds);
381 		svc_userfds = NULL;
382 		svc_nuserfds = 0;
383 	}
384 }
385 
386 /*
387  * Remove all the callback associated with a fd => useful when the fd is
388  * closed for instance
389  */
390 static void
391 __svc_remove_input_of_fd(int fd)
392 {
393 	_svc_user_fd_node *one_node;
394 
395 	if ((fd < 0) || (fd >= svc_nuserfds))
396 		return;
397 
398 	one_node = svc_userfds[fd].lnk.next;
399 	while ((_svc_user_link *) one_node
400 	    != (_svc_user_link *) &(svc_userfds[fd].lnk)) {
401 		_svc_free_id(one_node->id);
402 		one_node = one_node->lnk.next;
403 		free(one_node->lnk.previous);
404 	}
405 
406 	LIST_CLR(svc_userfds[fd]);
407 	svc_userfds[fd].mask = 0;
408 }
409 
410 /*
411  * Allow user to add an fd in the poll list. If it does not succeed, return
412  * -1. Otherwise, return a svc_id
413  */
414 
415 svc_input_id_t
416 svc_add_input(int user_fd, unsigned int events,
417     svc_callback_t user_callback, void *cookie)
418 {
419 	_svc_user_fd_node *new_node;
420 
421 	if (user_fd < 0) {
422 		errno = EINVAL;
423 		return ((svc_input_id_t)-1);
424 	}
425 
426 	if ((events == 0x0000) ||
427 	    (events & ~(POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND|\
428 	    POLLWRBAND|POLLERR|POLLHUP|POLLNVAL))) {
429 		errno = EINVAL;
430 		return ((svc_input_id_t)-1);
431 	}
432 
433 	(void) mutex_lock(&svc_userfds_lock);
434 
435 	if ((user_fd < svc_nuserfds) &&
436 	    (svc_userfds[user_fd].mask & events) != 0) {
437 		/* Already registrated call-back */
438 		errno = EEXIST;
439 		(void) mutex_unlock(&svc_userfds_lock);
440 		return ((svc_input_id_t)-1);
441 	}
442 
443 	/* Handle memory allocation. */
444 	if (user_fd >= svc_nuserfds) {
445 		int oldSize = svc_nuserfds;
446 		int i;
447 
448 		svc_nuserfds = (user_fd + 1) + USER_FD_INCREMENT;
449 
450 		svc_userfds = (_svc_user_fd_head *)
451 		    realloc(svc_userfds,
452 			svc_nuserfds * sizeof (_svc_user_fd_head));
453 
454 		if (svc_userfds == NULL) {
455 			syslog(LOG_ERR, "svc_add_input: out of memory");
456 			errno = ENOMEM;
457 			(void) mutex_unlock(&svc_userfds_lock);
458 			return ((svc_input_id_t)-1);
459 		}
460 
461 		for (i = oldSize; i < svc_nuserfds; i++) {
462 			LIST_CLR(svc_userfds[i]);
463 			svc_userfds[i].mask = 0;
464 		}
465 	}
466 
467 	new_node = malloc(sizeof (_svc_user_fd_node));
468 	if (new_node == NULL) {
469 		syslog(LOG_ERR, "svc_add_input: out of memory");
470 		errno = ENOMEM;
471 		(void) mutex_unlock(&svc_userfds_lock);
472 		return ((svc_input_id_t)-1);
473 	}
474 
475 	/* create a new node */
476 	new_node->fd		= user_fd;
477 	new_node->events	= events;
478 	new_node->callback	= user_callback;
479 	new_node->cookie	= cookie;
480 
481 	(void) _svc_attribute_new_id(new_node);
482 
483 	/* Add the new element at the beginning of the list. */
484 	if (LIST_ISEMPTY(svc_userfds[user_fd])) {
485 		svc_userfds[user_fd].lnk.previous = new_node;
486 	}
487 	new_node->lnk.next = svc_userfds[user_fd].lnk.next;
488 	new_node->lnk.previous = (_svc_user_fd_node *)&(svc_userfds[user_fd]);
489 
490 	svc_userfds[user_fd].lnk.next = new_node;
491 
492 	/* refresh global mask for this file desciptor */
493 	svc_userfds[user_fd].mask |= events;
494 
495 	/* refresh mask for the poll */
496 	add_pollfd(user_fd, (svc_userfds[user_fd].mask));
497 
498 	(void) mutex_unlock(&svc_userfds_lock);
499 	return (new_node->id);
500 }
501 
502 
503 int
504 svc_remove_input(svc_input_id_t id)
505 {
506 	_svc_user_fd_node* node;
507 	_svc_user_fd_node* next;
508 	_svc_user_fd_node* previous;
509 	int fd;		/* caching optim */
510 
511 	(void) mutex_lock(&svc_userfds_lock);
512 
513 	/* Immediately update data for id management */
514 	if (user_fd_mgt_array == NULL || id >= svc_nmgtuserfds ||
515 	    is_free_id(id)) {
516 		errno = EINVAL;
517 		(void) mutex_unlock(&svc_userfds_lock);
518 		return (-1);
519 	}
520 
521 	node = node_from_id(id);
522 	assert(node != NULL);
523 
524 	_svc_free_id(id);
525 	next		= node->lnk.next;
526 	previous	= node->lnk.previous;
527 	fd		= node->fd; /* caching optim */
528 
529 	    /* Remove this node from the list. */
530 	previous->lnk.next = next;
531 	next->lnk.previous = previous;
532 
533 	    /* Remove the node flags from the global mask */
534 	svc_userfds[fd].mask ^= node->events;
535 
536 	free(node);
537 	if (svc_userfds[fd].mask == 0) {
538 		LIST_CLR(svc_userfds[fd]);
539 		assert(LIST_ISEMPTY(svc_userfds[fd]));
540 		remove_pollfd(fd);
541 	}
542 	/* <=> CLEAN NEEDED TO SHRINK MEMORY USAGE */
543 
544 	(void) mutex_unlock(&svc_userfds_lock);
545 	return (0);
546 }
547 
548 
549 /*
550  * Provides default service-side functions for authentication flavors
551  * that do not use all the fields in struct svc_auth_ops.
552  */
553 
554 /*ARGSUSED*/
555 static int
556 authany_wrap(AUTH *auth, XDR *xdrs, xdrproc_t xfunc, caddr_t xwhere)
557 {
558 	return (*xfunc)(xdrs, xwhere);
559 }
560 
561 struct svc_auth_ops svc_auth_any_ops = {
562 	authany_wrap,
563 	authany_wrap,
564 };
565 
566 /*
567  * Return pointer to server authentication structure.
568  */
569 SVCAUTH *
570 __svc_get_svcauth(SVCXPRT *xprt)
571 {
572 /* LINTED pointer alignment */
573 	return (&SVC_XP_AUTH(xprt));
574 }
575 
576 /*
577  * A callback routine to cleanup after a procedure is executed.
578  */
579 void (*__proc_cleanup_cb)() = NULL;
580 
581 void *
582 __svc_set_proc_cleanup_cb(void *cb)
583 {
584 	void	*tmp = (void *)__proc_cleanup_cb;
585 
586 	__proc_cleanup_cb = (void (*)())cb;
587 	return (tmp);
588 }
589 
590 /* ***************  SVCXPRT related stuff **************** */
591 
592 
593 static int pollfd_shrinking = 1;
594 
595 
596 /*
597  * Add fd to svc_pollfd
598  */
599 static void
600 add_pollfd(int fd, short events)
601 {
602 	if (fd < FD_SETSIZE) {
603 		FD_SET(fd, &svc_fdset);
604 #if !defined(_LP64)
605 		FD_SET(fd, &_new_svc_fdset);
606 #endif
607 		svc_nfds++;
608 		svc_nfds_set++;
609 		if (fd >= svc_max_fd)
610 			svc_max_fd = fd + 1;
611 	}
612 	if (fd >= svc_max_pollfd)
613 		svc_max_pollfd = fd + 1;
614 	if (svc_max_pollfd > svc_pollfd_allocd) {
615 		int i = svc_pollfd_allocd;
616 		pollfd_t *tmp;
617 		do {
618 			svc_pollfd_allocd += POLLFD_EXTEND;
619 		} while (svc_max_pollfd > svc_pollfd_allocd);
620 		tmp = realloc(svc_pollfd,
621 					sizeof (pollfd_t) * svc_pollfd_allocd);
622 		if (tmp != NULL) {
623 			svc_pollfd = tmp;
624 			for (; i < svc_pollfd_allocd; i++)
625 				POLLFD_CLR(i, tmp);
626 		} else {
627 			/*
628 			 * give an error message; undo fdset setting
629 			 * above;  reset the pollfd_shrinking flag.
630 			 * because of this poll will not be done
631 			 * on these fds.
632 			 */
633 			if (fd < FD_SETSIZE) {
634 				FD_CLR(fd, &svc_fdset);
635 #if !defined(_LP64)
636 				FD_CLR(fd, &_new_svc_fdset);
637 #endif
638 				svc_nfds--;
639 				svc_nfds_set--;
640 				if (fd == (svc_max_fd - 1))
641 					svc_max_fd--;
642 			}
643 			if (fd == (svc_max_pollfd - 1))
644 				svc_max_pollfd--;
645 			pollfd_shrinking = 0;
646 			syslog(LOG_ERR, "add_pollfd: out of memory");
647 			_exit(1);
648 		}
649 	}
650 	svc_pollfd[fd].fd	= fd;
651 	svc_pollfd[fd].events	= events;
652 	svc_npollfds++;
653 	svc_npollfds_set++;
654 }
655 
656 /*
657  * the fd is still active but only the bit in fdset is cleared.
658  * do not subtract svc_nfds or svc_npollfds
659  */
660 void
661 clear_pollfd(int fd)
662 {
663 	if (fd < FD_SETSIZE && FD_ISSET(fd, &svc_fdset)) {
664 		FD_CLR(fd, &svc_fdset);
665 #if !defined(_LP64)
666 		FD_CLR(fd, &_new_svc_fdset);
667 #endif
668 		svc_nfds_set--;
669 	}
670 	if (fd < svc_pollfd_allocd && POLLFD_ISSET(fd, svc_pollfd)) {
671 		POLLFD_CLR(fd, svc_pollfd);
672 		svc_npollfds_set--;
673 	}
674 }
675 
676 /*
677  * sets the bit in fdset for an active fd so that poll() is done for that
678  */
679 void
680 set_pollfd(int fd, short events)
681 {
682 	if (fd < FD_SETSIZE) {
683 		FD_SET(fd, &svc_fdset);
684 #if !defined(_LP64)
685 		FD_SET(fd, &_new_svc_fdset);
686 #endif
687 		svc_nfds_set++;
688 	}
689 	if (fd < svc_pollfd_allocd) {
690 		svc_pollfd[fd].fd	= fd;
691 		svc_pollfd[fd].events	= events;
692 		svc_npollfds_set++;
693 	}
694 }
695 
696 /*
697  * remove a svc_pollfd entry; it does not shrink the memory
698  */
699 static void
700 remove_pollfd(int fd)
701 {
702 	clear_pollfd(fd);
703 	if (fd == (svc_max_fd - 1))
704 		svc_max_fd--;
705 	svc_nfds--;
706 	if (fd == (svc_max_pollfd - 1))
707 		svc_max_pollfd--;
708 	svc_npollfds--;
709 }
710 
711 /*
712  * delete a svc_pollfd entry; it shrinks the memory
713  * use remove_pollfd if you do not want to shrink
714  */
715 static void
716 delete_pollfd(int fd)
717 {
718 	remove_pollfd(fd);
719 	if (pollfd_shrinking && svc_max_pollfd <
720 			(svc_pollfd_allocd - POLLFD_SHRINK)) {
721 		do {
722 			svc_pollfd_allocd -= POLLFD_SHRINK;
723 		} while (svc_max_pollfd < (svc_pollfd_allocd - POLLFD_SHRINK));
724 		svc_pollfd = realloc(svc_pollfd,
725 				sizeof (pollfd_t) * svc_pollfd_allocd);
726 		if (svc_pollfd == NULL) {
727 			syslog(LOG_ERR, "delete_pollfd: out of memory");
728 			_exit(1);
729 		}
730 	}
731 }
732 
733 
734 /*
735  * Activate a transport handle.
736  */
737 void
738 xprt_register(const SVCXPRT *xprt)
739 {
740 	int fd = xprt->xp_fd;
741 #ifdef CALLBACK
742 	extern void (*_svc_getreqset_proc)();
743 #endif
744 /* VARIABLES PROTECTED BY svc_fd_lock: svc_xports, svc_fdset */
745 
746 	(void) rw_wrlock(&svc_fd_lock);
747 	if (svc_xports == NULL) {
748 		/* allocate some small amount first */
749 		svc_xports = calloc(FD_INCREMENT,  sizeof (SVCXPRT *));
750 		if (svc_xports == NULL) {
751 			syslog(LOG_ERR, "xprt_register: out of memory");
752 			_exit(1);
753 		}
754 		nsvc_xports = FD_INCREMENT;
755 
756 #ifdef CALLBACK
757 		/*
758 		 * XXX: This code does not keep track of the server state.
759 		 *
760 		 * This provides for callback support.	When a client
761 		 * recv's a call from another client on the server fd's,
762 		 * it calls _svc_getreqset_proc() which would return
763 		 * after serving all the server requests.  Also look under
764 		 * clnt_dg.c and clnt_vc.c  (clnt_call part of it)
765 		 */
766 		_svc_getreqset_proc = svc_getreq_poll;
767 #endif
768 	}
769 
770 	while (fd >= nsvc_xports) {
771 		SVCXPRT **tmp_xprts = svc_xports;
772 
773 		/* time to expand svc_xprts */
774 		tmp_xprts = realloc(svc_xports,
775 			sizeof (SVCXPRT *) * (nsvc_xports + FD_INCREMENT));
776 		if (tmp_xprts == NULL) {
777 			syslog(LOG_ERR, "xprt_register : out of memory.");
778 			_exit(1);
779 		}
780 
781 		svc_xports = tmp_xprts;
782 		(void) memset(&svc_xports[nsvc_xports], 0,
783 					sizeof (SVCXPRT *) * FD_INCREMENT);
784 		nsvc_xports += FD_INCREMENT;
785 	}
786 
787 	svc_xports[fd] = (SVCXPRT *)xprt;
788 
789 	add_pollfd(fd, MASKVAL);
790 
791 	if (svc_polling) {
792 		char dummy;
793 
794 		/*
795 		 * This happens only in one of the MT modes.
796 		 * Wake up poller.
797 		 */
798 		(void) write(svc_pipe[1], &dummy, sizeof (dummy));
799 	}
800 	/*
801 	 * If already dispatching door based services, start
802 	 * dispatching TLI based services now.
803 	 */
804 	(void) mutex_lock(&svc_door_mutex);
805 	if (svc_ndoorfds > 0)
806 		(void) cond_signal(&svc_door_waitcv);
807 	(void) mutex_unlock(&svc_door_mutex);
808 
809 	if (svc_xdrs == NULL) {
810 		/* allocate initial chunk */
811 		svc_xdrs = calloc(FD_INCREMENT, sizeof (XDR *));
812 		if (svc_xdrs != NULL)
813 			nsvc_xdrs = FD_INCREMENT;
814 		else {
815 			syslog(LOG_ERR, "xprt_register : out of memory.");
816 			_exit(1);
817 		}
818 	}
819 	(void) rw_unlock(&svc_fd_lock);
820 }
821 
822 /*
823  * De-activate a transport handle.
824  */
825 void
826 __xprt_unregister_private(const SVCXPRT *xprt, bool_t lock_not_held)
827 {
828 	int fd = xprt->xp_fd;
829 
830 	if (lock_not_held)
831 		(void) rw_wrlock(&svc_fd_lock);
832 	if ((fd < nsvc_xports) && (svc_xports[fd] == xprt)) {
833 		svc_xports[fd] = NULL;
834 		delete_pollfd(fd);
835 	}
836 	if (lock_not_held)
837 		(void) rw_unlock(&svc_fd_lock);
838 	__svc_rm_from_xlist(&_svc_xprtlist, xprt, &xprtlist_lock);
839 }
840 
841 void
842 xprt_unregister(const SVCXPRT *xprt)
843 {
844 	__xprt_unregister_private(xprt, TRUE);
845 }
846 
847 /* ********************** CALLOUT list related stuff ************* */
848 
849 /*
850  * Add a service program to the callout list.
851  * The dispatch routine will be called when a rpc request for this
852  * program number comes in.
853  */
854 bool_t
855 svc_reg(const SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
856 			void (*dispatch)(), const struct netconfig *nconf)
857 {
858 	struct svc_callout *prev;
859 	struct svc_callout *s, **s2;
860 	struct netconfig *tnconf;
861 	char *netid = NULL;
862 	int flag = 0;
863 
864 /* VARIABLES PROTECTED BY svc_lock: s, prev, svc_head */
865 
866 	if (xprt->xp_netid) {
867 		netid = strdup(xprt->xp_netid);
868 		flag = 1;
869 	} else if (nconf && nconf->nc_netid) {
870 		netid = strdup(nconf->nc_netid);
871 		flag = 1;
872 	} else if ((tnconf = __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type))
873 			!= NULL) {
874 		netid = strdup(tnconf->nc_netid);
875 		flag = 1;
876 		freenetconfigent(tnconf);
877 	} /* must have been created with svc_raw_create */
878 	if ((netid == NULL) && (flag == 1))
879 		return (FALSE);
880 
881 	(void) rw_wrlock(&svc_lock);
882 	if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
883 		if (netid)
884 			free(netid);
885 		if (s->sc_dispatch == dispatch)
886 			goto rpcb_it; /* he is registering another xptr */
887 		(void) rw_unlock(&svc_lock);
888 		return (FALSE);
889 	}
890 	s = malloc(sizeof (struct svc_callout));
891 	if (s == NULL) {
892 		if (netid)
893 			free(netid);
894 		(void) rw_unlock(&svc_lock);
895 		return (FALSE);
896 	}
897 
898 	s->sc_prog = prog;
899 	s->sc_vers = vers;
900 	s->sc_dispatch = dispatch;
901 	s->sc_netid = netid;
902 	s->sc_next = NULL;
903 
904 	/*
905 	 * The ordering of transports is such that the most frequently used
906 	 * one appears first.  So add the new entry to the end of the list.
907 	 */
908 	for (s2 = &svc_head; *s2 != NULL; s2 = &(*s2)->sc_next)
909 		;
910 	*s2 = s;
911 
912 	if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
913 		if ((((SVCXPRT *)xprt)->xp_netid = strdup(netid)) == NULL) {
914 			syslog(LOG_ERR, "svc_reg : strdup failed.");
915 			free(netid);
916 			free(s);
917 			*s2 = NULL;
918 			(void) rw_unlock(&svc_lock);
919 			return (FALSE);
920 		}
921 
922 rpcb_it:
923 	(void) rw_unlock(&svc_lock);
924 	/* now register the information with the local binder service */
925 	if (nconf)
926 		return (rpcb_set(prog, vers, nconf, &xprt->xp_ltaddr));
927 	return (TRUE);
928 }
929 
930 /*
931  * Remove a service program from the callout list.
932  */
933 void
934 svc_unreg(const rpcprog_t prog, const rpcvers_t vers)
935 {
936 	struct svc_callout *prev;
937 	struct svc_callout *s;
938 
939 	/* unregister the information anyway */
940 	(void) rpcb_unset(prog, vers, NULL);
941 	(void) rw_wrlock(&svc_lock);
942 	while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
943 		if (prev == NULL_SVC) {
944 			svc_head = s->sc_next;
945 		} else {
946 			prev->sc_next = s->sc_next;
947 		}
948 		s->sc_next = NULL_SVC;
949 		if (s->sc_netid)
950 			free(s->sc_netid);
951 		free(s);
952 	}
953 	(void) rw_unlock(&svc_lock);
954 }
955 
956 #ifdef PORTMAP
957 /*
958  * Add a service program to the callout list.
959  * The dispatch routine will be called when a rpc request for this
960  * program number comes in.
961  * For version 2 portmappers.
962  */
963 bool_t
964 svc_register(SVCXPRT *xprt, rpcprog_t prog, rpcvers_t vers,
965 					void (*dispatch)(), int protocol)
966 {
967 	struct svc_callout *prev;
968 	struct svc_callout *s;
969 	struct netconfig *nconf;
970 	char *netid = NULL;
971 	int flag = 0;
972 
973 	if (xprt->xp_netid) {
974 		netid = strdup(xprt->xp_netid);
975 		flag = 1;
976 	} else if ((ioctl(xprt->xp_fd, I_FIND, "timod") > 0) && ((nconf =
977 	__rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type)) != NULL)) {
978 		/* fill in missing netid field in SVCXPRT */
979 		netid = strdup(nconf->nc_netid);
980 		flag = 1;
981 		freenetconfigent(nconf);
982 	} /* must be svc_raw_create */
983 
984 	if ((netid == NULL) && (flag == 1))
985 		return (FALSE);
986 
987 	(void) rw_wrlock(&svc_lock);
988 	if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
989 		if (netid)
990 			free(netid);
991 		if (s->sc_dispatch == dispatch)
992 			goto pmap_it;  /* he is registering another xptr */
993 		(void) rw_unlock(&svc_lock);
994 		return (FALSE);
995 	}
996 	s = malloc(sizeof (struct svc_callout));
997 	if (s == (struct svc_callout *)0) {
998 		if (netid)
999 			free(netid);
1000 		(void) rw_unlock(&svc_lock);
1001 		return (FALSE);
1002 	}
1003 	s->sc_prog = prog;
1004 	s->sc_vers = vers;
1005 	s->sc_dispatch = dispatch;
1006 	s->sc_netid = netid;
1007 	s->sc_next = svc_head;
1008 	svc_head = s;
1009 
1010 	if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
1011 		if ((xprt->xp_netid = strdup(netid)) == NULL) {
1012 			syslog(LOG_ERR, "svc_register : strdup failed.");
1013 			free(netid);
1014 			svc_head = s->sc_next;
1015 			free(s);
1016 			(void) rw_unlock(&svc_lock);
1017 			return (FALSE);
1018 		}
1019 
1020 pmap_it:
1021 	(void) rw_unlock(&svc_lock);
1022 	/* now register the information with the local binder service */
1023 	if (protocol)
1024 		return (pmap_set(prog, vers, protocol, xprt->xp_port));
1025 	return (TRUE);
1026 }
1027 
1028 /*
1029  * Remove a service program from the callout list.
1030  * For version 2 portmappers.
1031  */
1032 void
1033 svc_unregister(rpcprog_t prog, rpcvers_t vers)
1034 {
1035 	struct svc_callout *prev;
1036 	struct svc_callout *s;
1037 
1038 	(void) rw_wrlock(&svc_lock);
1039 	while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
1040 		if (prev == NULL_SVC) {
1041 			svc_head = s->sc_next;
1042 		} else {
1043 			prev->sc_next = s->sc_next;
1044 		}
1045 		s->sc_next = NULL_SVC;
1046 		if (s->sc_netid)
1047 			free(s->sc_netid);
1048 		free(s);
1049 		/* unregister the information with the local binder service */
1050 		(void) pmap_unset(prog, vers);
1051 	}
1052 	(void) rw_unlock(&svc_lock);
1053 }
1054 #endif /* PORTMAP */
1055 
1056 /*
1057  * Search the callout list for a program number, return the callout
1058  * struct.
1059  * Also check for transport as well.  Many routines such as svc_unreg
1060  * dont give any corresponding transport, so dont check for transport if
1061  * netid == NULL
1062  */
1063 static struct svc_callout *
1064 svc_find(rpcprog_t prog, rpcvers_t vers, struct svc_callout **prev, char *netid)
1065 {
1066 	struct svc_callout *s, *p;
1067 
1068 /* WRITE LOCK HELD ON ENTRY: svc_lock */
1069 
1070 /*	assert(RW_WRITE_HELD(&svc_lock)); */
1071 	p = NULL_SVC;
1072 	for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1073 		if (((s->sc_prog == prog) && (s->sc_vers == vers)) &&
1074 			((netid == NULL) || (s->sc_netid == NULL) ||
1075 			(strcmp(netid, s->sc_netid) == 0)))
1076 				break;
1077 		p = s;
1078 	}
1079 	*prev = p;
1080 	return (s);
1081 }
1082 
1083 
1084 /* ******************* REPLY GENERATION ROUTINES  ************ */
1085 
1086 /*
1087  * Send a reply to an rpc request
1088  */
1089 bool_t
1090 svc_sendreply(const SVCXPRT *xprt, const xdrproc_t xdr_results,
1091 						const caddr_t xdr_location)
1092 {
1093 	struct rpc_msg rply;
1094 
1095 	rply.rm_direction = REPLY;
1096 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1097 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1098 	rply.acpted_rply.ar_stat = SUCCESS;
1099 	rply.acpted_rply.ar_results.where = xdr_location;
1100 	rply.acpted_rply.ar_results.proc = xdr_results;
1101 	return (SVC_REPLY((SVCXPRT *)xprt, &rply));
1102 }
1103 
1104 /*
1105  * No procedure error reply
1106  */
1107 void
1108 svcerr_noproc(const SVCXPRT *xprt)
1109 {
1110 	struct rpc_msg rply;
1111 
1112 	rply.rm_direction = REPLY;
1113 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1114 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1115 	rply.acpted_rply.ar_stat = PROC_UNAVAIL;
1116 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1117 }
1118 
1119 /*
1120  * Can't decode args error reply
1121  */
1122 void
1123 svcerr_decode(const SVCXPRT *xprt)
1124 {
1125 	struct rpc_msg rply;
1126 
1127 	rply.rm_direction = REPLY;
1128 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1129 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1130 	rply.acpted_rply.ar_stat = GARBAGE_ARGS;
1131 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1132 }
1133 
1134 /*
1135  * Some system error
1136  */
1137 void
1138 svcerr_systemerr(const SVCXPRT *xprt)
1139 {
1140 	struct rpc_msg rply;
1141 
1142 	rply.rm_direction = REPLY;
1143 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1144 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1145 	rply.acpted_rply.ar_stat = SYSTEM_ERR;
1146 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1147 }
1148 
1149 /*
1150  * Tell RPC package to not complain about version errors to the client.	 This
1151  * is useful when revving broadcast protocols that sit on a fixed address.
1152  * There is really one (or should be only one) example of this kind of
1153  * protocol: the portmapper (or rpc binder).
1154  */
1155 void
1156 __svc_versquiet_on(const SVCXPRT *xprt)
1157 {
1158 /* LINTED pointer alignment */
1159 	svc_flags(xprt) |= SVC_VERSQUIET;
1160 }
1161 
1162 void
1163 __svc_versquiet_off(const SVCXPRT *xprt)
1164 {
1165 /* LINTED pointer alignment */
1166 	svc_flags(xprt) &= ~SVC_VERSQUIET;
1167 }
1168 
1169 void
1170 svc_versquiet(const SVCXPRT *xprt)
1171 {
1172 	__svc_versquiet_on(xprt);
1173 }
1174 
1175 int
1176 __svc_versquiet_get(const SVCXPRT *xprt)
1177 {
1178 /* LINTED pointer alignment */
1179 	return (svc_flags(xprt) & SVC_VERSQUIET);
1180 }
1181 
1182 /*
1183  * Authentication error reply
1184  */
1185 void
1186 svcerr_auth(const SVCXPRT *xprt, const enum auth_stat why)
1187 {
1188 	struct rpc_msg rply;
1189 
1190 	rply.rm_direction = REPLY;
1191 	rply.rm_reply.rp_stat = MSG_DENIED;
1192 	rply.rjcted_rply.rj_stat = AUTH_ERROR;
1193 	rply.rjcted_rply.rj_why = why;
1194 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1195 }
1196 
1197 /*
1198  * Auth too weak error reply
1199  */
1200 void
1201 svcerr_weakauth(const SVCXPRT *xprt)
1202 {
1203 	svcerr_auth(xprt, AUTH_TOOWEAK);
1204 }
1205 
1206 /*
1207  * Program unavailable error reply
1208  */
1209 void
1210 svcerr_noprog(const SVCXPRT *xprt)
1211 {
1212 	struct rpc_msg rply;
1213 
1214 	rply.rm_direction = REPLY;
1215 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1216 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1217 	rply.acpted_rply.ar_stat = PROG_UNAVAIL;
1218 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1219 }
1220 
1221 /*
1222  * Program version mismatch error reply
1223  */
1224 void
1225 svcerr_progvers(const SVCXPRT *xprt, const rpcvers_t low_vers,
1226 						const rpcvers_t high_vers)
1227 {
1228 	struct rpc_msg rply;
1229 
1230 	rply.rm_direction = REPLY;
1231 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1232 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1233 	rply.acpted_rply.ar_stat = PROG_MISMATCH;
1234 	rply.acpted_rply.ar_vers.low = low_vers;
1235 	rply.acpted_rply.ar_vers.high = high_vers;
1236 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1237 }
1238 
1239 /* ******************* SERVER INPUT STUFF ******************* */
1240 
1241 /*
1242  * Get server side input from some transport.
1243  *
1244  * Statement of authentication parameters management:
1245  * This function owns and manages all authentication parameters, specifically
1246  * the "raw" parameters (msg.rm_call.cb_cred and msg.rm_call.cb_verf) and
1247  * the "cooked" credentials (rqst->rq_clntcred).
1248  * However, this function does not know the structure of the cooked
1249  * credentials, so it make the following assumptions:
1250  *   a) the structure is contiguous (no pointers), and
1251  *   b) the cred structure size does not exceed RQCRED_SIZE bytes.
1252  * In all events, all three parameters are freed upon exit from this routine.
1253  * The storage is trivially management on the call stack in user land, but
1254  * is mallocated in kernel land.
1255  */
1256 
1257 void
1258 svc_getreq(int rdfds)
1259 {
1260 	fd_set readfds;
1261 
1262 	FD_ZERO(&readfds);
1263 	readfds.fds_bits[0] = rdfds;
1264 	svc_getreqset(&readfds);
1265 }
1266 
1267 void
1268 svc_getreqset(fd_set *readfds)
1269 {
1270 	int i;
1271 
1272 	for (i = 0; i < svc_max_fd; i++) {
1273 		/* fd has input waiting */
1274 		if (FD_ISSET(i, readfds))
1275 			svc_getreq_common(i);
1276 	}
1277 }
1278 
1279 void
1280 svc_getreq_poll(struct pollfd *pfdp, const int pollretval)
1281 {
1282 	int i;
1283 	int fds_found;
1284 
1285 	for (i = fds_found = 0; fds_found < pollretval; i++) {
1286 		struct pollfd *p = &pfdp[i];
1287 
1288 		if (p->revents) {
1289 			/* fd has input waiting */
1290 			fds_found++;
1291 			/*
1292 			 *	We assume that this function is only called
1293 			 *	via someone select()ing from svc_fdset or
1294 			 *	poll()ing from svc_pollset[].  Thus it's safe
1295 			 *	to handle the POLLNVAL event by simply turning
1296 			 *	the corresponding bit off in svc_fdset.  The
1297 			 *	svc_pollset[] array is derived from svc_fdset
1298 			 *	and so will also be updated eventually.
1299 			 *
1300 			 *	XXX Should we do an xprt_unregister() instead?
1301 			 */
1302 			/* Handle user callback */
1303 			if (__is_a_userfd(p->fd) == TRUE) {
1304 				(void) rw_rdlock(&svc_fd_lock);
1305 				__svc_getreq_user(p);
1306 				(void) rw_unlock(&svc_fd_lock);
1307 			} else {
1308 				if (p->revents & POLLNVAL) {
1309 					(void) rw_wrlock(&svc_fd_lock);
1310 					remove_pollfd(p->fd);	/* XXX */
1311 					(void) rw_unlock(&svc_fd_lock);
1312 				} else {
1313 					svc_getreq_common(p->fd);
1314 				}
1315 			}
1316 		}
1317 	}
1318 }
1319 
1320 void
1321 svc_getreq_common(const int fd)
1322 {
1323 	SVCXPRT *xprt;
1324 	enum xprt_stat stat;
1325 	struct rpc_msg *msg;
1326 	struct svc_req *r;
1327 	char *cred_area;
1328 
1329 	(void) rw_rdlock(&svc_fd_lock);
1330 
1331 	/* HANDLE USER CALLBACK */
1332 	if (__is_a_userfd(fd) == TRUE) {
1333 		struct pollfd virtual_fd;
1334 
1335 		virtual_fd.events = virtual_fd.revents = (short)0xFFFF;
1336 		virtual_fd.fd = fd;
1337 		__svc_getreq_user(&virtual_fd);
1338 		(void) rw_unlock(&svc_fd_lock);
1339 		return;
1340 	}
1341 
1342 	/*
1343 	 * The transport associated with this fd could have been
1344 	 * removed from svc_timeout_nonblock_xprt_and_LRU, for instance.
1345 	 * This can happen if two or more fds get read events and are
1346 	 * passed to svc_getreq_poll/set, the first fd is seviced by
1347 	 * the dispatch routine and cleans up any dead transports.  If
1348 	 * one of the dead transports removed is the other fd that
1349 	 * had a read event then svc_getreq_common() will be called with no
1350 	 * xprt associated with the fd that had the original read event.
1351 	 */
1352 	if ((fd >= nsvc_xports) || (xprt = svc_xports[fd]) == NULL) {
1353 		(void) rw_unlock(&svc_fd_lock);
1354 		return;
1355 	}
1356 	(void) rw_unlock(&svc_fd_lock);
1357 /* LINTED pointer alignment */
1358 	msg = SVCEXT(xprt)->msg;
1359 /* LINTED pointer alignment */
1360 	r = SVCEXT(xprt)->req;
1361 /* LINTED pointer alignment */
1362 	cred_area = SVCEXT(xprt)->cred_area;
1363 	msg->rm_call.cb_cred.oa_base = cred_area;
1364 	msg->rm_call.cb_verf.oa_base = &(cred_area[MAX_AUTH_BYTES]);
1365 	r->rq_clntcred = &(cred_area[2 * MAX_AUTH_BYTES]);
1366 
1367 	/* receive msgs from xprtprt (support batch calls) */
1368 	do {
1369 		bool_t dispatch;
1370 
1371 		if (dispatch = SVC_RECV(xprt, msg))
1372 			(void) _svc_prog_dispatch(xprt, msg, r);
1373 		/*
1374 		 * Check if the xprt has been disconnected in a recursive call
1375 		 * in the service dispatch routine. If so, then break
1376 		 */
1377 		(void) rw_rdlock(&svc_fd_lock);
1378 		if (xprt != svc_xports[fd]) {
1379 			(void) rw_unlock(&svc_fd_lock);
1380 			break;
1381 		}
1382 		(void) rw_unlock(&svc_fd_lock);
1383 
1384 		/*
1385 		 * Call cleanup procedure if set.
1386 		 */
1387 		if (__proc_cleanup_cb != NULL && dispatch)
1388 			(*__proc_cleanup_cb)(xprt);
1389 
1390 		if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
1391 			SVC_DESTROY(xprt);
1392 			break;
1393 		}
1394 	} while (stat == XPRT_MOREREQS);
1395 }
1396 
1397 int
1398 _svc_prog_dispatch(SVCXPRT *xprt, struct rpc_msg *msg, struct svc_req *r)
1399 {
1400 	struct svc_callout *s;
1401 	enum auth_stat why;
1402 	int prog_found;
1403 	rpcvers_t low_vers;
1404 	rpcvers_t high_vers;
1405 	void (*disp_fn)();
1406 
1407 	r->rq_xprt = xprt;
1408 	r->rq_prog = msg->rm_call.cb_prog;
1409 	r->rq_vers = msg->rm_call.cb_vers;
1410 	r->rq_proc = msg->rm_call.cb_proc;
1411 	r->rq_cred = msg->rm_call.cb_cred;
1412 /* LINTED pointer alignment */
1413 	SVC_XP_AUTH(r->rq_xprt).svc_ah_ops = svc_auth_any_ops;
1414 /* LINTED pointer alignment */
1415 	SVC_XP_AUTH(r->rq_xprt).svc_ah_private = NULL;
1416 
1417 	/* first authenticate the message */
1418 	/* Check for null flavor and bypass these calls if possible */
1419 
1420 	if (msg->rm_call.cb_cred.oa_flavor == AUTH_NULL) {
1421 		r->rq_xprt->xp_verf.oa_flavor = _null_auth.oa_flavor;
1422 		r->rq_xprt->xp_verf.oa_length = 0;
1423 	} else {
1424 		bool_t no_dispatch;
1425 
1426 		if ((why = __gss_authenticate(r, msg,
1427 			&no_dispatch)) != AUTH_OK) {
1428 			svcerr_auth(xprt, why);
1429 			return (0);
1430 		}
1431 		if (no_dispatch)
1432 			return (0);
1433 	}
1434 	/* match message with a registered service */
1435 	prog_found = FALSE;
1436 	low_vers = (rpcvers_t)(0 - 1);
1437 	high_vers = 0;
1438 	(void) rw_rdlock(&svc_lock);
1439 	for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1440 		if (s->sc_prog == r->rq_prog) {
1441 			prog_found = TRUE;
1442 			if (s->sc_vers == r->rq_vers) {
1443 				if ((xprt->xp_netid == NULL) ||
1444 				    (s->sc_netid == NULL) ||
1445 				    (strcmp(xprt->xp_netid,
1446 					    s->sc_netid) == 0)) {
1447 					disp_fn = (*s->sc_dispatch);
1448 					(void) rw_unlock(&svc_lock);
1449 					disp_fn(r, xprt);
1450 					return (1);
1451 				}
1452 				prog_found = FALSE;
1453 			}
1454 			if (s->sc_vers < low_vers)
1455 				low_vers = s->sc_vers;
1456 			if (s->sc_vers > high_vers)
1457 				high_vers = s->sc_vers;
1458 		}		/* found correct program */
1459 	}
1460 	(void) rw_unlock(&svc_lock);
1461 
1462 	/*
1463 	 * if we got here, the program or version
1464 	 * is not served ...
1465 	 */
1466 	if (prog_found) {
1467 /* LINTED pointer alignment */
1468 		if (!version_keepquiet(xprt))
1469 			svcerr_progvers(xprt, low_vers, high_vers);
1470 	} else {
1471 		svcerr_noprog(xprt);
1472 	}
1473 	return (0);
1474 }
1475 
1476 /* ******************* SVCXPRT allocation and deallocation ***************** */
1477 
1478 /*
1479  * svc_xprt_alloc() - allocate a service transport handle
1480  */
1481 SVCXPRT *
1482 svc_xprt_alloc(void)
1483 {
1484 	SVCXPRT		*xprt = NULL;
1485 	SVCXPRT_EXT	*xt = NULL;
1486 	SVCXPRT_LIST	*xlist = NULL;
1487 	struct rpc_msg	*msg = NULL;
1488 	struct svc_req	*req = NULL;
1489 	char		*cred_area = NULL;
1490 
1491 	if ((xprt = calloc(1, sizeof (SVCXPRT))) == NULL)
1492 		goto err_exit;
1493 
1494 	if ((xt = calloc(1, sizeof (SVCXPRT_EXT))) == NULL)
1495 		goto err_exit;
1496 	xprt->xp_p3 = (caddr_t)xt; /* SVCEXT(xprt) = xt */
1497 
1498 	if ((xlist = calloc(1, sizeof (SVCXPRT_LIST))) == NULL)
1499 		goto err_exit;
1500 	xt->my_xlist = xlist;
1501 	xlist->xprt = xprt;
1502 
1503 	if ((msg = malloc(sizeof (struct rpc_msg))) == NULL)
1504 		goto err_exit;
1505 	xt->msg = msg;
1506 
1507 	if ((req = malloc(sizeof (struct svc_req))) == NULL)
1508 		goto err_exit;
1509 	xt->req = req;
1510 
1511 	if ((cred_area = malloc(2*MAX_AUTH_BYTES + RQCRED_SIZE)) == NULL)
1512 		goto err_exit;
1513 	xt->cred_area = cred_area;
1514 
1515 /* LINTED pointer alignment */
1516 	(void) mutex_init(&svc_send_mutex(xprt), USYNC_THREAD, (void *)0);
1517 	return (xprt);
1518 
1519 err_exit:
1520 	svc_xprt_free(xprt);
1521 	return (NULL);
1522 }
1523 
1524 
1525 /*
1526  * svc_xprt_free() - free a service handle
1527  */
1528 void
1529 svc_xprt_free(SVCXPRT *xprt)
1530 {
1531 /* LINTED pointer alignment */
1532 	SVCXPRT_EXT	*xt = xprt ? SVCEXT(xprt) : NULL;
1533 	SVCXPRT_LIST	*my_xlist = xt ? xt->my_xlist: NULL;
1534 	struct rpc_msg	*msg = xt ? xt->msg : NULL;
1535 	struct svc_req	*req = xt ? xt->req : NULL;
1536 	char		*cred_area = xt ? xt->cred_area : NULL;
1537 
1538 	if (xprt)
1539 		free(xprt);
1540 	if (xt)
1541 		free(xt);
1542 	if (my_xlist)
1543 		free(my_xlist);
1544 	if (msg)
1545 		free(msg);
1546 	if (req)
1547 		free(req);
1548 	if (cred_area)
1549 		free(cred_area);
1550 }
1551 
1552 
1553 /*
1554  * svc_xprt_destroy() - free parent and child xprt list
1555  */
1556 void
1557 svc_xprt_destroy(SVCXPRT *xprt)
1558 {
1559 	SVCXPRT_LIST	*xlist, *xnext = NULL;
1560 	int		type;
1561 
1562 /* LINTED pointer alignment */
1563 	if (SVCEXT(xprt)->parent)
1564 /* LINTED pointer alignment */
1565 		xprt = SVCEXT(xprt)->parent;
1566 /* LINTED pointer alignment */
1567 	type = svc_type(xprt);
1568 /* LINTED pointer alignment */
1569 	for (xlist = SVCEXT(xprt)->my_xlist; xlist != NULL; xlist = xnext) {
1570 		xnext = xlist->next;
1571 		xprt = xlist->xprt;
1572 		switch (type) {
1573 		case SVC_DGRAM:
1574 			svc_dg_xprtfree(xprt);
1575 			break;
1576 		case SVC_RENDEZVOUS:
1577 			svc_vc_xprtfree(xprt);
1578 			break;
1579 		case SVC_CONNECTION:
1580 			svc_fd_xprtfree(xprt);
1581 			break;
1582 		case SVC_DOOR:
1583 			svc_door_xprtfree(xprt);
1584 			break;
1585 		}
1586 	}
1587 }
1588 
1589 
1590 /*
1591  * svc_copy() - make a copy of parent
1592  */
1593 SVCXPRT *
1594 svc_copy(SVCXPRT *xprt)
1595 {
1596 /* LINTED pointer alignment */
1597 	switch (svc_type(xprt)) {
1598 	case SVC_DGRAM:
1599 		return (svc_dg_xprtcopy(xprt));
1600 	case SVC_RENDEZVOUS:
1601 		return (svc_vc_xprtcopy(xprt));
1602 	case SVC_CONNECTION:
1603 		return (svc_fd_xprtcopy(xprt));
1604 	}
1605 	return (NULL);
1606 }
1607 
1608 
1609 /*
1610  * _svc_destroy_private() - private SVC_DESTROY interface
1611  */
1612 void
1613 _svc_destroy_private(SVCXPRT *xprt)
1614 {
1615 /* LINTED pointer alignment */
1616 	switch (svc_type(xprt)) {
1617 	case SVC_DGRAM:
1618 		_svc_dg_destroy_private(xprt);
1619 		break;
1620 	case SVC_RENDEZVOUS:
1621 	case SVC_CONNECTION:
1622 		_svc_vc_destroy_private(xprt, TRUE);
1623 		break;
1624 	}
1625 }
1626 
1627 /*
1628  * svc_get_local_cred() - fetch local user credentials.  This always
1629  * works over doors based transports.  For local transports, this
1630  * does not yield correct results unless the __rpc_negotiate_uid()
1631  * call has been invoked to enable this feature.
1632  */
1633 bool_t
1634 svc_get_local_cred(SVCXPRT *xprt, svc_local_cred_t *lcred)
1635 {
1636 	/* LINTED pointer alignment */
1637 	if (svc_type(xprt) == SVC_DOOR)
1638 		return (__svc_get_door_cred(xprt, lcred));
1639 	return (__rpc_get_local_cred(xprt, lcred));
1640 }
1641 
1642 
1643 /* ******************* DUPLICATE ENTRY HANDLING ROUTINES ************** */
1644 
1645 /*
1646  * the dup cacheing routines below provide a cache of received
1647  * transactions. rpc service routines can use this to detect
1648  * retransmissions and re-send a non-failure response. Uses a
1649  * lru scheme to find entries to get rid of entries in the cache,
1650  * though only DUP_DONE entries are placed on the lru list.
1651  * the routines were written towards development of a generic
1652  * SVC_DUP() interface, which can be expanded to encompass the
1653  * svc_dg_enablecache() routines as well. the cache is currently
1654  * private to the automounter.
1655  */
1656 
1657 
1658 /* dupcache header contains xprt specific information */
1659 struct dupcache {
1660 	rwlock_t	dc_lock;
1661 	time_t		dc_time;
1662 	int		dc_buckets;
1663 	int		dc_maxsz;
1664 	int		dc_basis;
1665 	struct dupreq 	*dc_mru;
1666 	struct dupreq	**dc_hashtbl;
1667 };
1668 
1669 /*
1670  * private duplicate cache request routines
1671  */
1672 static int __svc_dupcache_check(struct svc_req *, caddr_t *, uint_t *,
1673 		struct dupcache *, uint32_t, uint32_t);
1674 static struct dupreq *__svc_dupcache_victim(struct dupcache *, time_t);
1675 static int __svc_dupcache_enter(struct svc_req *, struct dupreq *,
1676 		struct dupcache *, uint32_t, uint32_t, time_t);
1677 static int __svc_dupcache_update(struct svc_req *, caddr_t, uint_t, int,
1678 		struct dupcache *, uint32_t, uint32_t);
1679 #ifdef DUP_DEBUG
1680 static void __svc_dupcache_debug(struct dupcache *);
1681 #endif /* DUP_DEBUG */
1682 
1683 /* default parameters for the dupcache */
1684 #define	DUPCACHE_BUCKETS	257
1685 #define	DUPCACHE_TIME		900
1686 #define	DUPCACHE_MAXSZ		INT_MAX
1687 
1688 /*
1689  * __svc_dupcache_init(void *condition, int basis, char *xprt_cache)
1690  * initialize the duprequest cache and assign it to the xprt_cache
1691  * Use default values depending on the cache condition and basis.
1692  * return TRUE on success and FALSE on failure
1693  */
1694 bool_t
1695 __svc_dupcache_init(void *condition, int basis, char **xprt_cache)
1696 {
1697 	static mutex_t initdc_lock = DEFAULTMUTEX;
1698 	int i;
1699 	struct dupcache *dc;
1700 
1701 	(void) mutex_lock(&initdc_lock);
1702 	if (*xprt_cache != NULL) { /* do only once per xprt */
1703 		(void) mutex_unlock(&initdc_lock);
1704 		syslog(LOG_ERR,
1705 			"__svc_dupcache_init: multiply defined dup cache");
1706 		return (FALSE);
1707 	}
1708 
1709 	switch (basis) {
1710 	case DUPCACHE_FIXEDTIME:
1711 		dc = malloc(sizeof (struct dupcache));
1712 		if (dc == NULL) {
1713 			(void) mutex_unlock(&initdc_lock);
1714 			syslog(LOG_ERR,
1715 				"__svc_dupcache_init: memory alloc failed");
1716 			return (FALSE);
1717 		}
1718 		(void) rwlock_init(&(dc->dc_lock), USYNC_THREAD, NULL);
1719 		if (condition != NULL)
1720 			dc->dc_time = *((time_t *)condition);
1721 		else
1722 			dc->dc_time = DUPCACHE_TIME;
1723 		dc->dc_buckets = DUPCACHE_BUCKETS;
1724 		dc->dc_maxsz = DUPCACHE_MAXSZ;
1725 		dc->dc_basis = basis;
1726 		dc->dc_mru = NULL;
1727 		dc->dc_hashtbl = malloc(dc->dc_buckets *
1728 						sizeof (struct dupreq *));
1729 		if (dc->dc_hashtbl == NULL) {
1730 			free(dc);
1731 			(void) mutex_unlock(&initdc_lock);
1732 			syslog(LOG_ERR,
1733 				"__svc_dupcache_init: memory alloc failed");
1734 			return (FALSE);
1735 		}
1736 		for (i = 0; i < DUPCACHE_BUCKETS; i++)
1737 			dc->dc_hashtbl[i] = NULL;
1738 		*xprt_cache = (char *)dc;
1739 		break;
1740 	default:
1741 		(void) mutex_unlock(&initdc_lock);
1742 		syslog(LOG_ERR,
1743 			"__svc_dupcache_init: undefined dup cache basis");
1744 		return (FALSE);
1745 	}
1746 
1747 	(void) mutex_unlock(&initdc_lock);
1748 
1749 	return (TRUE);
1750 }
1751 
1752 /*
1753  * __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1754  *	char *xprt_cache)
1755  * searches the request cache. Creates an entry and returns DUP_NEW if
1756  * the request is not found in the cache.  If it is found, then it
1757  * returns the state of the request (in progress, drop, or done) and
1758  * also allocates, and passes back results to the user (if any) in
1759  * resp_buf, and its length in resp_bufsz. DUP_ERROR is returned on error.
1760  */
1761 int
1762 __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1763 	char *xprt_cache)
1764 {
1765 	uint32_t drxid, drhash;
1766 	int rc;
1767 	struct dupreq *dr = NULL;
1768 	time_t timenow = time(NULL);
1769 
1770 	/* LINTED pointer alignment */
1771 	struct dupcache *dc = (struct dupcache *)xprt_cache;
1772 
1773 	if (dc == NULL) {
1774 		syslog(LOG_ERR, "__svc_dup: undefined cache");
1775 		return (DUP_ERROR);
1776 	}
1777 
1778 	/* get the xid of the request */
1779 	if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
1780 		syslog(LOG_ERR, "__svc_dup: xid error");
1781 		return (DUP_ERROR);
1782 	}
1783 	drhash = drxid % dc->dc_buckets;
1784 
1785 	if ((rc = __svc_dupcache_check(req, resp_buf, resp_bufsz, dc, drxid,
1786 			drhash)) != DUP_NEW)
1787 		return (rc);
1788 
1789 	if ((dr = __svc_dupcache_victim(dc, timenow)) == NULL)
1790 		return (DUP_ERROR);
1791 
1792 	if ((rc = __svc_dupcache_enter(req, dr, dc, drxid, drhash, timenow))
1793 			== DUP_ERROR)
1794 		return (rc);
1795 
1796 	return (DUP_NEW);
1797 }
1798 
1799 
1800 
1801 /*
1802  * __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf,
1803  *		uint_t *resp_bufsz,truct dupcache *dc, uint32_t drxid,
1804  * 		uint32_t drhash)
1805  * Checks to see whether an entry already exists in the cache. If it does
1806  * copy back into the resp_buf, if appropriate. Return the status of
1807  * the request, or DUP_NEW if the entry is not in the cache
1808  */
1809 static int
1810 __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1811 		struct dupcache *dc, uint32_t drxid, uint32_t drhash)
1812 {
1813 	struct dupreq *dr = NULL;
1814 
1815 	(void) rw_rdlock(&(dc->dc_lock));
1816 	dr = dc->dc_hashtbl[drhash];
1817 	while (dr != NULL) {
1818 		if (dr->dr_xid == drxid &&
1819 		    dr->dr_proc == req->rq_proc &&
1820 		    dr->dr_prog == req->rq_prog &&
1821 		    dr->dr_vers == req->rq_vers &&
1822 		    dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
1823 		    memcmp(dr->dr_addr.buf,
1824 				req->rq_xprt->xp_rtaddr.buf,
1825 				dr->dr_addr.len) == 0) { /* entry found */
1826 			if (dr->dr_hash != drhash) {
1827 				/* sanity check */
1828 				(void) rw_unlock((&dc->dc_lock));
1829 				syslog(LOG_ERR,
1830 					"\n__svc_dupdone: hashing error");
1831 				return (DUP_ERROR);
1832 			}
1833 
1834 			/*
1835 			 * return results for requests on lru list, if
1836 			 * appropriate requests must be DUP_DROP or DUP_DONE
1837 			 * to have a result. A NULL buffer in the cache
1838 			 * implies no results were sent during dupdone.
1839 			 * A NULL buffer in the call implies not interested
1840 			 * in results.
1841 			 */
1842 			if (((dr->dr_status == DUP_DONE) ||
1843 				(dr->dr_status == DUP_DROP)) &&
1844 				resp_buf != NULL &&
1845 				dr->dr_resp.buf != NULL) {
1846 				*resp_buf = malloc(dr->dr_resp.len);
1847 				if (*resp_buf == NULL) {
1848 					syslog(LOG_ERR,
1849 					"__svc_dupcache_check: malloc failed");
1850 					(void) rw_unlock(&(dc->dc_lock));
1851 					return (DUP_ERROR);
1852 				}
1853 				(void) memset(*resp_buf, 0, dr->dr_resp.len);
1854 				(void) memcpy(*resp_buf, dr->dr_resp.buf,
1855 					dr->dr_resp.len);
1856 				*resp_bufsz = dr->dr_resp.len;
1857 			} else {
1858 				/* no result */
1859 				if (resp_buf)
1860 					*resp_buf = NULL;
1861 				if (resp_bufsz)
1862 					*resp_bufsz = 0;
1863 			}
1864 			(void) rw_unlock(&(dc->dc_lock));
1865 			return (dr->dr_status);
1866 		}
1867 		dr = dr->dr_chain;
1868 	}
1869 	(void) rw_unlock(&(dc->dc_lock));
1870 	return (DUP_NEW);
1871 }
1872 
1873 /*
1874  * __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
1875  * Return a victim dupreq entry to the caller, depending on cache policy.
1876  */
1877 static struct dupreq *
1878 __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
1879 {
1880 	struct dupreq *dr = NULL;
1881 
1882 	switch (dc->dc_basis) {
1883 	case DUPCACHE_FIXEDTIME:
1884 		/*
1885 		 * The hash policy is to free up a bit of the hash
1886 		 * table before allocating a new entry as the victim.
1887 		 * Freeing up the hash table each time should split
1888 		 * the cost of keeping the hash table clean among threads.
1889 		 * Note that only DONE or DROPPED entries are on the lru
1890 		 * list but we do a sanity check anyway.
1891 		 */
1892 		(void) rw_wrlock(&(dc->dc_lock));
1893 		while ((dc->dc_mru) && (dr = dc->dc_mru->dr_next) &&
1894 				((timenow - dr->dr_time) > dc->dc_time)) {
1895 			/* clean and then free the entry */
1896 			if (dr->dr_status != DUP_DONE &&
1897 				dr->dr_status != DUP_DROP) {
1898 				/*
1899 				 * The LRU list can't contain an
1900 				 * entry where the status is other than
1901 				 * DUP_DONE or DUP_DROP.
1902 				 */
1903 				syslog(LOG_ERR,
1904 					"__svc_dupcache_victim: bad victim");
1905 #ifdef DUP_DEBUG
1906 				/*
1907 				 * Need to hold the reader/writers lock to
1908 				 * print the cache info, since we already
1909 				 * hold the writers lock, we shall continue
1910 				 * calling __svc_dupcache_debug()
1911 				 */
1912 				__svc_dupcache_debug(dc);
1913 #endif /* DUP_DEBUG */
1914 				(void) rw_unlock(&(dc->dc_lock));
1915 				return (NULL);
1916 			}
1917 			/* free buffers */
1918 			if (dr->dr_resp.buf) {
1919 				free(dr->dr_resp.buf);
1920 				dr->dr_resp.buf = NULL;
1921 			}
1922 			if (dr->dr_addr.buf) {
1923 				free(dr->dr_addr.buf);
1924 				dr->dr_addr.buf = NULL;
1925 			}
1926 
1927 			/* unhash the entry */
1928 			if (dr->dr_chain)
1929 				dr->dr_chain->dr_prevchain = dr->dr_prevchain;
1930 			if (dr->dr_prevchain)
1931 				dr->dr_prevchain->dr_chain = dr->dr_chain;
1932 			if (dc->dc_hashtbl[dr->dr_hash] == dr)
1933 				dc->dc_hashtbl[dr->dr_hash] = dr->dr_chain;
1934 
1935 			/* modify the lru pointers */
1936 			if (dc->dc_mru == dr) {
1937 				dc->dc_mru = NULL;
1938 			} else {
1939 				dc->dc_mru->dr_next = dr->dr_next;
1940 				dr->dr_next->dr_prev = dc->dc_mru;
1941 			}
1942 			free(dr);
1943 			dr = NULL;
1944 		}
1945 		(void) rw_unlock(&(dc->dc_lock));
1946 
1947 		/*
1948 		 * Allocate and return new clean entry as victim
1949 		 */
1950 		if ((dr = malloc(sizeof (*dr))) == NULL) {
1951 			syslog(LOG_ERR,
1952 				"__svc_dupcache_victim: malloc failed");
1953 			return (NULL);
1954 		}
1955 		(void) memset(dr, 0, sizeof (*dr));
1956 		return (dr);
1957 	default:
1958 		syslog(LOG_ERR,
1959 			"__svc_dupcache_victim: undefined dup cache_basis");
1960 		return (NULL);
1961 	}
1962 }
1963 
1964 /*
1965  * __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
1966  *	struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
1967  * build new duprequest entry and then insert into the cache
1968  */
1969 static int
1970 __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
1971 	struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
1972 {
1973 	dr->dr_xid = drxid;
1974 	dr->dr_prog = req->rq_prog;
1975 	dr->dr_vers = req->rq_vers;
1976 	dr->dr_proc = req->rq_proc;
1977 	dr->dr_addr.maxlen = req->rq_xprt->xp_rtaddr.len;
1978 	dr->dr_addr.len = dr->dr_addr.maxlen;
1979 	if ((dr->dr_addr.buf = malloc(dr->dr_addr.maxlen)) == NULL) {
1980 		syslog(LOG_ERR, "__svc_dupcache_enter: malloc failed");
1981 		free(dr);
1982 		return (DUP_ERROR);
1983 	}
1984 	(void) memset(dr->dr_addr.buf, 0, dr->dr_addr.len);
1985 	(void) memcpy(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
1986 							dr->dr_addr.len);
1987 	dr->dr_resp.buf = NULL;
1988 	dr->dr_resp.maxlen = 0;
1989 	dr->dr_resp.len = 0;
1990 	dr->dr_status = DUP_INPROGRESS;
1991 	dr->dr_time = timenow;
1992 	dr->dr_hash = drhash;	/* needed for efficient victim cleanup */
1993 
1994 	/* place entry at head of hash table */
1995 	(void) rw_wrlock(&(dc->dc_lock));
1996 	dr->dr_chain = dc->dc_hashtbl[drhash];
1997 	dr->dr_prevchain = NULL;
1998 	if (dc->dc_hashtbl[drhash] != NULL)
1999 		dc->dc_hashtbl[drhash]->dr_prevchain = dr;
2000 	dc->dc_hashtbl[drhash] = dr;
2001 	(void) rw_unlock(&(dc->dc_lock));
2002 	return (DUP_NEW);
2003 }
2004 
2005 /*
2006  * __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2007  *		int status, char *xprt_cache)
2008  * Marks the request done (DUP_DONE or DUP_DROP) and stores the response.
2009  * Only DONE and DROP requests can be marked as done. Sets the lru pointers
2010  * to make the entry the most recently used. Returns DUP_ERROR or status.
2011  */
2012 int
2013 __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2014 		int status, char *xprt_cache)
2015 {
2016 	uint32_t drxid, drhash;
2017 	int rc;
2018 
2019 	/* LINTED pointer alignment */
2020 	struct dupcache *dc = (struct dupcache *)xprt_cache;
2021 
2022 	if (dc == NULL) {
2023 		syslog(LOG_ERR, "__svc_dupdone: undefined cache");
2024 		return (DUP_ERROR);
2025 	}
2026 
2027 	if (status != DUP_DONE && status != DUP_DROP) {
2028 		syslog(LOG_ERR, "__svc_dupdone: invalid dupdone status");
2029 		syslog(LOG_ERR, "	 must be DUP_DONE or DUP_DROP");
2030 		return (DUP_ERROR);
2031 	}
2032 
2033 	/* find the xid of the entry in the cache */
2034 	if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
2035 		syslog(LOG_ERR, "__svc_dup: xid error");
2036 		return (DUP_ERROR);
2037 	}
2038 	drhash = drxid % dc->dc_buckets;
2039 
2040 	/* update the status of the entry and result buffers, if required */
2041 	if ((rc = __svc_dupcache_update(req, resp_buf, resp_bufsz, status,
2042 			dc, drxid, drhash)) == DUP_ERROR) {
2043 		syslog(LOG_ERR, "__svc_dupdone: cache entry error");
2044 		return (DUP_ERROR);
2045 	}
2046 
2047 	return (rc);
2048 }
2049 
2050 /*
2051  * __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf,
2052  * 	uint_t resp_bufsz, int status, struct dupcache *dc, uint32_t drxid,
2053  * 	uint32_t drhash)
2054  * Check if entry exists in the dupcacache. If it does, update its status
2055  * and time and also its buffer, if appropriate. Its possible, but unlikely
2056  * for DONE requests to not exist in the cache. Return DUP_ERROR or status.
2057  */
2058 static int
2059 __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2060 	int status, struct dupcache *dc, uint32_t drxid, uint32_t drhash)
2061 {
2062 	struct dupreq *dr = NULL;
2063 	time_t timenow = time(NULL);
2064 
2065 	(void) rw_wrlock(&(dc->dc_lock));
2066 	dr = dc->dc_hashtbl[drhash];
2067 	while (dr != NULL) {
2068 		if (dr->dr_xid == drxid &&
2069 		    dr->dr_proc == req->rq_proc &&
2070 		    dr->dr_prog == req->rq_prog &&
2071 		    dr->dr_vers == req->rq_vers &&
2072 		    dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
2073 		    memcmp(dr->dr_addr.buf,
2074 				req->rq_xprt->xp_rtaddr.buf,
2075 				dr->dr_addr.len) == 0) { /* entry found */
2076 			if (dr->dr_hash != drhash) {
2077 				/* sanity check */
2078 				(void) rw_unlock(&(dc->dc_lock));
2079 				syslog(LOG_ERR,
2080 				"\n__svc_dupdone: hashing error");
2081 				return (DUP_ERROR);
2082 			}
2083 
2084 			/* store the results if bufer is not NULL */
2085 			if (resp_buf != NULL) {
2086 				if ((dr->dr_resp.buf =
2087 						malloc(resp_bufsz)) == NULL) {
2088 					(void) rw_unlock(&(dc->dc_lock));
2089 					syslog(LOG_ERR,
2090 						"__svc_dupdone: malloc failed");
2091 					return (DUP_ERROR);
2092 				}
2093 				(void) memset(dr->dr_resp.buf, 0, resp_bufsz);
2094 				(void) memcpy(dr->dr_resp.buf, resp_buf,
2095 					(uint_t)resp_bufsz);
2096 				dr->dr_resp.len = resp_bufsz;
2097 			}
2098 
2099 			/* update status and done time */
2100 			dr->dr_status = status;
2101 			dr->dr_time = timenow;
2102 
2103 			/* move the entry to the mru position */
2104 			if (dc->dc_mru == NULL) {
2105 				dr->dr_next = dr;
2106 				dr->dr_prev = dr;
2107 			} else {
2108 				dr->dr_next = dc->dc_mru->dr_next;
2109 				dc->dc_mru->dr_next->dr_prev = dr;
2110 				dr->dr_prev = dc->dc_mru;
2111 				dc->dc_mru->dr_next = dr;
2112 			}
2113 			dc->dc_mru = dr;
2114 
2115 			(void) rw_unlock(&(dc->dc_lock));
2116 			return (status);
2117 		}
2118 		dr = dr->dr_chain;
2119 	}
2120 	(void) rw_unlock(&(dc->dc_lock));
2121 	syslog(LOG_ERR, "__svc_dupdone: entry not in dup cache");
2122 	return (DUP_ERROR);
2123 }
2124 
2125 #ifdef DUP_DEBUG
2126 /*
2127  * __svc_dupcache_debug(struct dupcache *dc)
2128  * print out the hash table stuff
2129  *
2130  * This function requires the caller to hold the reader
2131  * or writer version of the duplicate request cache lock (dc_lock).
2132  */
2133 static void
2134 __svc_dupcache_debug(struct dupcache *dc)
2135 {
2136 	struct dupreq *dr = NULL;
2137 	int i;
2138 	bool_t bval;
2139 
2140 	fprintf(stderr, "   HASHTABLE\n");
2141 	for (i = 0; i < dc->dc_buckets; i++) {
2142 		bval = FALSE;
2143 		dr = dc->dc_hashtbl[i];
2144 		while (dr != NULL) {
2145 			if (!bval) {	/* ensures bucket printed only once */
2146 				fprintf(stderr, "    bucket : %d\n", i);
2147 				bval = TRUE;
2148 			}
2149 			fprintf(stderr, "\txid: %u status: %d time: %ld",
2150 				dr->dr_xid, dr->dr_status, dr->dr_time);
2151 			fprintf(stderr, " dr: %x chain: %x prevchain: %x\n",
2152 				dr, dr->dr_chain, dr->dr_prevchain);
2153 			dr = dr->dr_chain;
2154 		}
2155 	}
2156 
2157 	fprintf(stderr, "   LRU\n");
2158 	if (dc->dc_mru) {
2159 		dr = dc->dc_mru->dr_next;	/* lru */
2160 		while (dr != dc->dc_mru) {
2161 			fprintf(stderr, "\txid: %u status : %d time : %ld",
2162 				dr->dr_xid, dr->dr_status, dr->dr_time);
2163 			fprintf(stderr, " dr: %x next: %x prev: %x\n",
2164 				dr, dr->dr_next, dr->dr_prev);
2165 			dr = dr->dr_next;
2166 		}
2167 		fprintf(stderr, "\txid: %u status: %d time: %ld",
2168 			dr->dr_xid, dr->dr_status, dr->dr_time);
2169 		fprintf(stderr, " dr: %x next: %x prev: %x\n", dr,
2170 			dr->dr_next, dr->dr_prev);
2171 	}
2172 }
2173 #endif /* DUP_DEBUG */
2174