xref: /titanic_51/usr/src/lib/libnsl/rpc/svc.c (revision a6d42e7d71324c5193c3b94d57d96ba2925d52e1)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 /*
27  * Copyright 1993 OpenVision Technologies, Inc., All Rights Reserved.
28  */
29 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
30 /* All Rights Reserved */
31 /*
32  * Portions of this source code were derived from Berkeley
33  * 4.3 BSD under license from the Regents of the University of
34  * California.
35  */
36 
37 #pragma ident	"%Z%%M%	%I%	%E% SMI"
38 
39 /*
40  * svc.c, Server-side remote procedure call interface.
41  *
42  * There are two sets of procedures here.  The xprt routines are
43  * for handling transport handles.  The svc routines handle the
44  * list of service routines.
45  *
46  */
47 
48 
49 #include "mt.h"
50 #include "rpc_mt.h"
51 #include <assert.h>
52 #include <errno.h>
53 #include <sys/types.h>
54 #include <stropts.h>
55 #include <sys/conf.h>
56 #include <rpc/rpc.h>
57 #ifdef PORTMAP
58 #include <rpc/pmap_clnt.h>
59 #endif
60 #include <sys/poll.h>
61 #include <netconfig.h>
62 #include <syslog.h>
63 #include <stdlib.h>
64 #include <unistd.h>
65 #include <string.h>
66 #include <limits.h>
67 
68 extern bool_t __svc_get_door_cred();
69 extern bool_t __rpc_get_local_cred();
70 
71 extern int use_portmapper;
72 extern bool_t __pmap_set(const rpcprog_t, const rpcvers_t,
73     const struct netconfig *, const struct netbuf *);
74 extern bool_t __pmap_unset(const rpcprog_t, const rpcvers_t);
75 
76 SVCXPRT **svc_xports;
77 static int nsvc_xports; 	/* total number of svc_xports allocated */
78 
79 XDR **svc_xdrs;		/* common XDR receive area */
80 int nsvc_xdrs;		/* total number of svc_xdrs allocated */
81 
82 int __rpc_use_pollfd_done;	/* to unlimit the number of connections */
83 
84 #define	NULL_SVC ((struct svc_callout *)0)
85 #define	RQCRED_SIZE	400		/* this size is excessive */
86 
87 /*
88  * The services list
89  * Each entry represents a set of procedures (an rpc program).
90  * The dispatch routine takes request structs and runs the
91  * appropriate procedure.
92  */
93 static struct svc_callout {
94 	struct svc_callout *sc_next;
95 	rpcprog_t	    sc_prog;
96 	rpcvers_t	    sc_vers;
97 	char		   *sc_netid;
98 	void		    (*sc_dispatch)();
99 } *svc_head;
100 extern rwlock_t	svc_lock;
101 
102 static struct svc_callout *svc_find();
103 int _svc_prog_dispatch();
104 void svc_getreq_common();
105 char *strdup();
106 
107 extern mutex_t	svc_door_mutex;
108 extern cond_t	svc_door_waitcv;
109 extern int	svc_ndoorfds;
110 extern SVCXPRT_LIST *_svc_xprtlist;
111 extern mutex_t xprtlist_lock;
112 extern void __svc_rm_from_xlist();
113 
114 extern fd_set _new_svc_fdset;
115 
116 /*
117  * If the allocated array of reactor is too small, this value is used as a
118  * margin. This reduces the number of allocations.
119  */
120 #define	USER_FD_INCREMENT 5
121 
122 static void add_pollfd(int fd, short events);
123 static void remove_pollfd(int fd);
124 static void __svc_remove_input_of_fd(int fd);
125 
126 
127 /*
128  * Data used to handle reactor:
129  * 	- one file descriptor we listen to,
130  *	- one callback we call if the fd pops,
131  *	- and a cookie passed as a parameter to the callback.
132  *
133  * The structure is an array indexed on the file descriptor. Each entry is
134  * pointing to the first element of a double-linked list of callback.
135  * only one callback may be associated to a couple (fd, event).
136  */
137 
138 struct _svc_user_fd_head;
139 
140 typedef struct {
141 	struct _svc_user_fd_node *next;
142 	struct _svc_user_fd_node *previous;
143 } _svc_user_link;
144 
145 typedef struct _svc_user_fd_node {
146 	/* The lnk field must be the first field. */
147 	_svc_user_link lnk;
148 	svc_input_id_t id;
149 	int	    fd;
150 	unsigned int   events;
151 	svc_callback_t callback;
152 	void*	  cookie;
153 } _svc_user_fd_node;
154 
155 typedef struct _svc_user_fd_head {
156 	/* The lnk field must be the first field. */
157 	_svc_user_link lnk;
158 	unsigned int mask;    /* logical OR of all sub-masks */
159 } _svc_user_fd_head;
160 
161 
162 /* Define some macros to manage the linked list. */
163 #define	LIST_ISEMPTY(l) ((_svc_user_fd_node *) &(l.lnk) == l.lnk.next)
164 #define	LIST_CLR(l) \
165 	(l.lnk.previous = l.lnk.next = (_svc_user_fd_node *) &(l.lnk))
166 
167 /* Array of defined reactor - indexed on file descriptor */
168 static _svc_user_fd_head *svc_userfds  = NULL;
169 
170 /* current size of file descriptor */
171 static int svc_nuserfds = 0;
172 
173 /* Mutex to ensure MT safe operations for user fds callbacks. */
174 static mutex_t svc_userfds_lock = DEFAULTMUTEX;
175 
176 
177 /*
178  * This structure is used to have constant time alogrithms. There is an array
179  * of this structure as large as svc_nuserfds. When the user is registering a
180  * new callback, the address of the created structure is stored in a cell of
181  * this array. The address of this cell is the returned unique identifier.
182  *
183  * On removing, the id is given by the user, then we know if this cell is
184  * filled or not (with free). If it is free, we return an error. Otherwise,
185  * we can free the structure pointed by fd_node.
186  *
187  * On insertion, we use the linked list created by (first_free,
188  * next_free). In this way with a constant time computation, we can give a
189  * correct index to the user.
190  */
191 
192 typedef struct _svc_management_user_fd {
193 	bool_t free;
194 	union {
195 		svc_input_id_t next_free;
196 		_svc_user_fd_node *fd_node;
197 	} data;
198 } _svc_management_user_fd;
199 
200 /* index to the first free elem */
201 static svc_input_id_t first_free = (svc_input_id_t)-1;
202 /* the size of this array is the same as svc_nuserfds */
203 static _svc_management_user_fd* user_fd_mgt_array = NULL;
204 
205 /* current size of user_fd_mgt_array */
206 static int svc_nmgtuserfds = 0;
207 
208 
209 /* Define some macros to access data associated to registration ids. */
210 #define	node_from_id(id) (user_fd_mgt_array[(int)id].data.fd_node)
211 #define	is_free_id(id) (user_fd_mgt_array[(int)id].free)
212 
213 #ifndef POLLSTANDARD
214 #define	POLLSTANDARD \
215 	(POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND| \
216 	POLLWRBAND|POLLERR|POLLHUP|POLLNVAL)
217 #endif
218 
219 /*
220  * To free an Id, we set the cell as free and insert its address in the list
221  * of free cell.
222  */
223 
224 static void
225 _svc_free_id(const svc_input_id_t id)
226 {
227 	assert(((int)id >= 0) && ((int)id < svc_nmgtuserfds));
228 	user_fd_mgt_array[(int)id].free = TRUE;
229 	user_fd_mgt_array[(int)id].data.next_free = first_free;
230 	first_free = id;
231 }
232 
233 /*
234  * To get a free cell, we just have to take it from the free linked list and
235  * set the flag to "not free". This function also allocates new memory if
236  * necessary
237  */
238 static svc_input_id_t
239 _svc_attribute_new_id(_svc_user_fd_node *node)
240 {
241 	int selected_index = (int)first_free;
242 	assert(node != NULL);
243 
244 	if (selected_index == -1) {
245 		/* Allocate new entries */
246 		int L_inOldSize = svc_nmgtuserfds;
247 		int i;
248 
249 		svc_nmgtuserfds += USER_FD_INCREMENT;
250 
251 		user_fd_mgt_array = (_svc_management_user_fd *)
252 		    realloc(user_fd_mgt_array, svc_nmgtuserfds
253 			* sizeof (_svc_management_user_fd));
254 
255 		if (user_fd_mgt_array == NULL) {
256 			syslog(LOG_ERR, "_svc_attribute_new_id: out of memory");
257 			errno = ENOMEM;
258 			return ((svc_input_id_t)-1);
259 		}
260 
261 		for (i = svc_nmgtuserfds - 1; i >= L_inOldSize; i--)
262 			_svc_free_id((svc_input_id_t)i);
263 		selected_index = (int)first_free;
264 	}
265 
266 	node->id = (svc_input_id_t)selected_index;
267 	first_free = user_fd_mgt_array[selected_index].data.next_free;
268 
269 	user_fd_mgt_array[selected_index].data.fd_node = node;
270 	user_fd_mgt_array[selected_index].free = FALSE;
271 
272 	return ((svc_input_id_t)selected_index);
273 }
274 
275 /*
276  * Access to a pollfd treatment. Scan all the associated callbacks that have
277  * at least one bit in their mask that masks a received event.
278  *
279  * If event POLLNVAL is received, we check that one callback processes it, if
280  * not, then remove the file descriptor from the poll. If there is one, let
281  * the user do the work.
282  */
283 void
284 __svc_getreq_user(struct pollfd *pfd)
285 {
286 	int fd = pfd->fd;
287 	short revents = pfd->revents;
288 	bool_t invalHandled = FALSE;
289 	_svc_user_fd_node *node;
290 
291 	(void) mutex_lock(&svc_userfds_lock);
292 
293 	if ((fd < 0) || (fd >= svc_nuserfds)) {
294 		(void) mutex_unlock(&svc_userfds_lock);
295 		return;
296 	}
297 
298 	node = svc_userfds[fd].lnk.next;
299 
300 	/* check if at least one mask fits */
301 	if (0 == (revents & svc_userfds[fd].mask)) {
302 		(void) mutex_unlock(&svc_userfds_lock);
303 		return;
304 	}
305 
306 	while ((svc_userfds[fd].mask != 0) &&
307 	    ((_svc_user_link *)node != &(svc_userfds[fd].lnk))) {
308 		/*
309 		 * If one of the received events maps the ones the node listens
310 		 * to
311 		 */
312 		_svc_user_fd_node *next = node->lnk.next;
313 
314 		if (node->callback != NULL) {
315 			if (node->events & revents) {
316 				if (revents & POLLNVAL) {
317 					invalHandled = TRUE;
318 				}
319 
320 				/*
321 				 * The lock must be released before calling the
322 				 * user function, as this function can call
323 				 * svc_remove_input() for example.
324 				 */
325 				(void) mutex_unlock(&svc_userfds_lock);
326 				node->callback(node->id, node->fd,
327 				    node->events & revents, node->cookie);
328 				/*
329 				 * Do not use the node structure anymore, as it
330 				 * could have been deallocated by the previous
331 				 * callback.
332 				 */
333 				(void) mutex_lock(&svc_userfds_lock);
334 			}
335 		}
336 		node = next;
337 	}
338 
339 	if ((revents & POLLNVAL) && !invalHandled)
340 		__svc_remove_input_of_fd(fd);
341 	(void) mutex_unlock(&svc_userfds_lock);
342 }
343 
344 
345 /*
346  * Check if a file descriptor is associated with a user reactor.
347  * To do this, just check that the array indexed on fd has a non-void linked
348  * list (ie. first element is not NULL)
349  */
350 bool_t
351 __is_a_userfd(int fd)
352 {
353 	/* Checks argument */
354 	if ((fd < 0) || (fd >= svc_nuserfds))
355 		return (FALSE);
356 	return ((svc_userfds[fd].mask == 0x0000)? FALSE:TRUE);
357 }
358 
359 /* free everything concerning user fd */
360 /* used in svc_run.c => no static */
361 
362 void
363 __destroy_userfd(void)
364 {
365 	int one_fd;
366 	/* Clean user fd */
367 	if (svc_userfds != NULL) {
368 		for (one_fd = 0; one_fd < svc_nuserfds; one_fd++) {
369 			_svc_user_fd_node *node;
370 
371 			node = svc_userfds[one_fd].lnk.next;
372 			while ((_svc_user_link *) node
373 			    != (_svc_user_link *) &(svc_userfds[one_fd])) {
374 				_svc_free_id(node->id);
375 				node = node->lnk.next;
376 				free(node->lnk.previous);
377 			}
378 		}
379 
380 		free(user_fd_mgt_array);
381 		user_fd_mgt_array = NULL;
382 		first_free = (svc_input_id_t)-1;
383 
384 		free(svc_userfds);
385 		svc_userfds = NULL;
386 		svc_nuserfds = 0;
387 	}
388 }
389 
390 /*
391  * Remove all the callback associated with a fd => useful when the fd is
392  * closed for instance
393  */
394 static void
395 __svc_remove_input_of_fd(int fd)
396 {
397 	_svc_user_fd_node *one_node;
398 
399 	if ((fd < 0) || (fd >= svc_nuserfds))
400 		return;
401 
402 	one_node = svc_userfds[fd].lnk.next;
403 	while ((_svc_user_link *) one_node
404 	    != (_svc_user_link *) &(svc_userfds[fd].lnk)) {
405 		_svc_free_id(one_node->id);
406 		one_node = one_node->lnk.next;
407 		free(one_node->lnk.previous);
408 	}
409 
410 	LIST_CLR(svc_userfds[fd]);
411 	svc_userfds[fd].mask = 0;
412 }
413 
414 /*
415  * Allow user to add an fd in the poll list. If it does not succeed, return
416  * -1. Otherwise, return a svc_id
417  */
418 
419 svc_input_id_t
420 svc_add_input(int user_fd, unsigned int events,
421     svc_callback_t user_callback, void *cookie)
422 {
423 	_svc_user_fd_node *new_node;
424 
425 	if (user_fd < 0) {
426 		errno = EINVAL;
427 		return ((svc_input_id_t)-1);
428 	}
429 
430 	if ((events == 0x0000) ||
431 	    (events & ~(POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND|\
432 	    POLLWRBAND|POLLERR|POLLHUP|POLLNVAL))) {
433 		errno = EINVAL;
434 		return ((svc_input_id_t)-1);
435 	}
436 
437 	(void) mutex_lock(&svc_userfds_lock);
438 
439 	if ((user_fd < svc_nuserfds) &&
440 	    (svc_userfds[user_fd].mask & events) != 0) {
441 		/* Already registrated call-back */
442 		errno = EEXIST;
443 		(void) mutex_unlock(&svc_userfds_lock);
444 		return ((svc_input_id_t)-1);
445 	}
446 
447 	/* Handle memory allocation. */
448 	if (user_fd >= svc_nuserfds) {
449 		int oldSize = svc_nuserfds;
450 		int i;
451 
452 		svc_nuserfds = (user_fd + 1) + USER_FD_INCREMENT;
453 
454 		svc_userfds = (_svc_user_fd_head *)
455 		    realloc(svc_userfds,
456 			svc_nuserfds * sizeof (_svc_user_fd_head));
457 
458 		if (svc_userfds == NULL) {
459 			syslog(LOG_ERR, "svc_add_input: out of memory");
460 			errno = ENOMEM;
461 			(void) mutex_unlock(&svc_userfds_lock);
462 			return ((svc_input_id_t)-1);
463 		}
464 
465 		for (i = oldSize; i < svc_nuserfds; i++) {
466 			LIST_CLR(svc_userfds[i]);
467 			svc_userfds[i].mask = 0;
468 		}
469 	}
470 
471 	new_node = malloc(sizeof (_svc_user_fd_node));
472 	if (new_node == NULL) {
473 		syslog(LOG_ERR, "svc_add_input: out of memory");
474 		errno = ENOMEM;
475 		(void) mutex_unlock(&svc_userfds_lock);
476 		return ((svc_input_id_t)-1);
477 	}
478 
479 	/* create a new node */
480 	new_node->fd		= user_fd;
481 	new_node->events	= events;
482 	new_node->callback	= user_callback;
483 	new_node->cookie	= cookie;
484 
485 	(void) _svc_attribute_new_id(new_node);
486 
487 	/* Add the new element at the beginning of the list. */
488 	if (LIST_ISEMPTY(svc_userfds[user_fd])) {
489 		svc_userfds[user_fd].lnk.previous = new_node;
490 	}
491 	new_node->lnk.next = svc_userfds[user_fd].lnk.next;
492 	new_node->lnk.previous = (_svc_user_fd_node *)&(svc_userfds[user_fd]);
493 
494 	svc_userfds[user_fd].lnk.next = new_node;
495 
496 	/* refresh global mask for this file desciptor */
497 	svc_userfds[user_fd].mask |= events;
498 
499 	/* refresh mask for the poll */
500 	add_pollfd(user_fd, (svc_userfds[user_fd].mask));
501 
502 	(void) mutex_unlock(&svc_userfds_lock);
503 	return (new_node->id);
504 }
505 
506 
507 int
508 svc_remove_input(svc_input_id_t id)
509 {
510 	_svc_user_fd_node* node;
511 	_svc_user_fd_node* next;
512 	_svc_user_fd_node* previous;
513 	int fd;		/* caching optim */
514 
515 	(void) mutex_lock(&svc_userfds_lock);
516 
517 	/* Immediately update data for id management */
518 	if (user_fd_mgt_array == NULL || id >= svc_nmgtuserfds ||
519 	    is_free_id(id)) {
520 		errno = EINVAL;
521 		(void) mutex_unlock(&svc_userfds_lock);
522 		return (-1);
523 	}
524 
525 	node = node_from_id(id);
526 	assert(node != NULL);
527 
528 	_svc_free_id(id);
529 	next		= node->lnk.next;
530 	previous	= node->lnk.previous;
531 	fd		= node->fd; /* caching optim */
532 
533 	    /* Remove this node from the list. */
534 	previous->lnk.next = next;
535 	next->lnk.previous = previous;
536 
537 	    /* Remove the node flags from the global mask */
538 	svc_userfds[fd].mask ^= node->events;
539 
540 	free(node);
541 	if (svc_userfds[fd].mask == 0) {
542 		LIST_CLR(svc_userfds[fd]);
543 		assert(LIST_ISEMPTY(svc_userfds[fd]));
544 		remove_pollfd(fd);
545 	}
546 	/* <=> CLEAN NEEDED TO SHRINK MEMORY USAGE */
547 
548 	(void) mutex_unlock(&svc_userfds_lock);
549 	return (0);
550 }
551 
552 
553 /*
554  * Provides default service-side functions for authentication flavors
555  * that do not use all the fields in struct svc_auth_ops.
556  */
557 
558 /*ARGSUSED*/
559 static int
560 authany_wrap(AUTH *auth, XDR *xdrs, xdrproc_t xfunc, caddr_t xwhere)
561 {
562 	return (*xfunc)(xdrs, xwhere);
563 }
564 
565 struct svc_auth_ops svc_auth_any_ops = {
566 	authany_wrap,
567 	authany_wrap,
568 };
569 
570 /*
571  * Return pointer to server authentication structure.
572  */
573 SVCAUTH *
574 __svc_get_svcauth(SVCXPRT *xprt)
575 {
576 /* LINTED pointer alignment */
577 	return (&SVC_XP_AUTH(xprt));
578 }
579 
580 /*
581  * A callback routine to cleanup after a procedure is executed.
582  */
583 void (*__proc_cleanup_cb)() = NULL;
584 
585 void *
586 __svc_set_proc_cleanup_cb(void *cb)
587 {
588 	void	*tmp = (void *)__proc_cleanup_cb;
589 
590 	__proc_cleanup_cb = (void (*)())cb;
591 	return (tmp);
592 }
593 
594 /* ***************  SVCXPRT related stuff **************** */
595 
596 
597 static int pollfd_shrinking = 1;
598 
599 
600 /*
601  * Add fd to svc_pollfd
602  */
603 static void
604 add_pollfd(int fd, short events)
605 {
606 	if (fd < FD_SETSIZE) {
607 		FD_SET(fd, &svc_fdset);
608 #if !defined(_LP64)
609 		FD_SET(fd, &_new_svc_fdset);
610 #endif
611 		svc_nfds++;
612 		svc_nfds_set++;
613 		if (fd >= svc_max_fd)
614 			svc_max_fd = fd + 1;
615 	}
616 	if (fd >= svc_max_pollfd)
617 		svc_max_pollfd = fd + 1;
618 	if (svc_max_pollfd > svc_pollfd_allocd) {
619 		int i = svc_pollfd_allocd;
620 		pollfd_t *tmp;
621 		do {
622 			svc_pollfd_allocd += POLLFD_EXTEND;
623 		} while (svc_max_pollfd > svc_pollfd_allocd);
624 		tmp = realloc(svc_pollfd,
625 					sizeof (pollfd_t) * svc_pollfd_allocd);
626 		if (tmp != NULL) {
627 			svc_pollfd = tmp;
628 			for (; i < svc_pollfd_allocd; i++)
629 				POLLFD_CLR(i, tmp);
630 		} else {
631 			/*
632 			 * give an error message; undo fdset setting
633 			 * above;  reset the pollfd_shrinking flag.
634 			 * because of this poll will not be done
635 			 * on these fds.
636 			 */
637 			if (fd < FD_SETSIZE) {
638 				FD_CLR(fd, &svc_fdset);
639 #if !defined(_LP64)
640 				FD_CLR(fd, &_new_svc_fdset);
641 #endif
642 				svc_nfds--;
643 				svc_nfds_set--;
644 				if (fd == (svc_max_fd - 1))
645 					svc_max_fd--;
646 			}
647 			if (fd == (svc_max_pollfd - 1))
648 				svc_max_pollfd--;
649 			pollfd_shrinking = 0;
650 			syslog(LOG_ERR, "add_pollfd: out of memory");
651 			_exit(1);
652 		}
653 	}
654 	svc_pollfd[fd].fd	= fd;
655 	svc_pollfd[fd].events	= events;
656 	svc_npollfds++;
657 	svc_npollfds_set++;
658 }
659 
660 /*
661  * the fd is still active but only the bit in fdset is cleared.
662  * do not subtract svc_nfds or svc_npollfds
663  */
664 void
665 clear_pollfd(int fd)
666 {
667 	if (fd < FD_SETSIZE && FD_ISSET(fd, &svc_fdset)) {
668 		FD_CLR(fd, &svc_fdset);
669 #if !defined(_LP64)
670 		FD_CLR(fd, &_new_svc_fdset);
671 #endif
672 		svc_nfds_set--;
673 	}
674 	if (fd < svc_pollfd_allocd && POLLFD_ISSET(fd, svc_pollfd)) {
675 		POLLFD_CLR(fd, svc_pollfd);
676 		svc_npollfds_set--;
677 	}
678 }
679 
680 /*
681  * sets the bit in fdset for an active fd so that poll() is done for that
682  */
683 void
684 set_pollfd(int fd, short events)
685 {
686 	if (fd < FD_SETSIZE) {
687 		FD_SET(fd, &svc_fdset);
688 #if !defined(_LP64)
689 		FD_SET(fd, &_new_svc_fdset);
690 #endif
691 		svc_nfds_set++;
692 	}
693 	if (fd < svc_pollfd_allocd) {
694 		svc_pollfd[fd].fd	= fd;
695 		svc_pollfd[fd].events	= events;
696 		svc_npollfds_set++;
697 	}
698 }
699 
700 /*
701  * remove a svc_pollfd entry; it does not shrink the memory
702  */
703 static void
704 remove_pollfd(int fd)
705 {
706 	clear_pollfd(fd);
707 	if (fd == (svc_max_fd - 1))
708 		svc_max_fd--;
709 	svc_nfds--;
710 	if (fd == (svc_max_pollfd - 1))
711 		svc_max_pollfd--;
712 	svc_npollfds--;
713 }
714 
715 /*
716  * delete a svc_pollfd entry; it shrinks the memory
717  * use remove_pollfd if you do not want to shrink
718  */
719 static void
720 delete_pollfd(int fd)
721 {
722 	remove_pollfd(fd);
723 	if (pollfd_shrinking && svc_max_pollfd <
724 			(svc_pollfd_allocd - POLLFD_SHRINK)) {
725 		do {
726 			svc_pollfd_allocd -= POLLFD_SHRINK;
727 		} while (svc_max_pollfd < (svc_pollfd_allocd - POLLFD_SHRINK));
728 		svc_pollfd = realloc(svc_pollfd,
729 				sizeof (pollfd_t) * svc_pollfd_allocd);
730 		if (svc_pollfd == NULL) {
731 			syslog(LOG_ERR, "delete_pollfd: out of memory");
732 			_exit(1);
733 		}
734 	}
735 }
736 
737 
738 /*
739  * Activate a transport handle.
740  */
741 void
742 xprt_register(const SVCXPRT *xprt)
743 {
744 	int fd = xprt->xp_fd;
745 #ifdef CALLBACK
746 	extern void (*_svc_getreqset_proc)();
747 #endif
748 /* VARIABLES PROTECTED BY svc_fd_lock: svc_xports, svc_fdset */
749 
750 	(void) rw_wrlock(&svc_fd_lock);
751 	if (svc_xports == NULL) {
752 		/* allocate some small amount first */
753 		svc_xports = calloc(FD_INCREMENT,  sizeof (SVCXPRT *));
754 		if (svc_xports == NULL) {
755 			syslog(LOG_ERR, "xprt_register: out of memory");
756 			_exit(1);
757 		}
758 		nsvc_xports = FD_INCREMENT;
759 
760 #ifdef CALLBACK
761 		/*
762 		 * XXX: This code does not keep track of the server state.
763 		 *
764 		 * This provides for callback support.	When a client
765 		 * recv's a call from another client on the server fd's,
766 		 * it calls _svc_getreqset_proc() which would return
767 		 * after serving all the server requests.  Also look under
768 		 * clnt_dg.c and clnt_vc.c  (clnt_call part of it)
769 		 */
770 		_svc_getreqset_proc = svc_getreq_poll;
771 #endif
772 	}
773 
774 	while (fd >= nsvc_xports) {
775 		SVCXPRT **tmp_xprts = svc_xports;
776 
777 		/* time to expand svc_xprts */
778 		tmp_xprts = realloc(svc_xports,
779 			sizeof (SVCXPRT *) * (nsvc_xports + FD_INCREMENT));
780 		if (tmp_xprts == NULL) {
781 			syslog(LOG_ERR, "xprt_register : out of memory.");
782 			_exit(1);
783 		}
784 
785 		svc_xports = tmp_xprts;
786 		(void) memset(&svc_xports[nsvc_xports], 0,
787 					sizeof (SVCXPRT *) * FD_INCREMENT);
788 		nsvc_xports += FD_INCREMENT;
789 	}
790 
791 	svc_xports[fd] = (SVCXPRT *)xprt;
792 
793 	add_pollfd(fd, MASKVAL);
794 
795 	if (svc_polling) {
796 		char dummy;
797 
798 		/*
799 		 * This happens only in one of the MT modes.
800 		 * Wake up poller.
801 		 */
802 		(void) write(svc_pipe[1], &dummy, sizeof (dummy));
803 	}
804 	/*
805 	 * If already dispatching door based services, start
806 	 * dispatching TLI based services now.
807 	 */
808 	(void) mutex_lock(&svc_door_mutex);
809 	if (svc_ndoorfds > 0)
810 		(void) cond_signal(&svc_door_waitcv);
811 	(void) mutex_unlock(&svc_door_mutex);
812 
813 	if (svc_xdrs == NULL) {
814 		/* allocate initial chunk */
815 		svc_xdrs = calloc(FD_INCREMENT, sizeof (XDR *));
816 		if (svc_xdrs != NULL)
817 			nsvc_xdrs = FD_INCREMENT;
818 		else {
819 			syslog(LOG_ERR, "xprt_register : out of memory.");
820 			_exit(1);
821 		}
822 	}
823 	(void) rw_unlock(&svc_fd_lock);
824 }
825 
826 /*
827  * De-activate a transport handle.
828  */
829 void
830 __xprt_unregister_private(const SVCXPRT *xprt, bool_t lock_not_held)
831 {
832 	int fd = xprt->xp_fd;
833 
834 	if (lock_not_held)
835 		(void) rw_wrlock(&svc_fd_lock);
836 	if ((fd < nsvc_xports) && (svc_xports[fd] == xprt)) {
837 		svc_xports[fd] = NULL;
838 		delete_pollfd(fd);
839 	}
840 	if (lock_not_held)
841 		(void) rw_unlock(&svc_fd_lock);
842 	__svc_rm_from_xlist(&_svc_xprtlist, xprt, &xprtlist_lock);
843 }
844 
845 void
846 xprt_unregister(const SVCXPRT *xprt)
847 {
848 	__xprt_unregister_private(xprt, TRUE);
849 }
850 
851 /* ********************** CALLOUT list related stuff ************* */
852 
853 /*
854  * Add a service program to the callout list.
855  * The dispatch routine will be called when a rpc request for this
856  * program number comes in.
857  */
858 bool_t
859 svc_reg(const SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
860 			void (*dispatch)(), const struct netconfig *nconf)
861 {
862 	struct svc_callout *prev;
863 	struct svc_callout *s, **s2;
864 	struct netconfig *tnconf;
865 	char *netid = NULL;
866 	int flag = 0;
867 
868 /* VARIABLES PROTECTED BY svc_lock: s, prev, svc_head */
869 
870 	if (xprt->xp_netid) {
871 		netid = strdup(xprt->xp_netid);
872 		flag = 1;
873 	} else if (nconf && nconf->nc_netid) {
874 		netid = strdup(nconf->nc_netid);
875 		flag = 1;
876 	} else if ((tnconf = __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type))
877 			!= NULL) {
878 		netid = strdup(tnconf->nc_netid);
879 		flag = 1;
880 		freenetconfigent(tnconf);
881 	} /* must have been created with svc_raw_create */
882 	if ((netid == NULL) && (flag == 1))
883 		return (FALSE);
884 
885 	(void) rw_wrlock(&svc_lock);
886 	if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
887 		if (netid)
888 			free(netid);
889 		if (s->sc_dispatch == dispatch)
890 			goto rpcb_it; /* he is registering another xptr */
891 		(void) rw_unlock(&svc_lock);
892 		return (FALSE);
893 	}
894 	s = malloc(sizeof (struct svc_callout));
895 	if (s == NULL) {
896 		if (netid)
897 			free(netid);
898 		(void) rw_unlock(&svc_lock);
899 		return (FALSE);
900 	}
901 
902 	s->sc_prog = prog;
903 	s->sc_vers = vers;
904 	s->sc_dispatch = dispatch;
905 	s->sc_netid = netid;
906 	s->sc_next = NULL;
907 
908 	/*
909 	 * The ordering of transports is such that the most frequently used
910 	 * one appears first.  So add the new entry to the end of the list.
911 	 */
912 	for (s2 = &svc_head; *s2 != NULL; s2 = &(*s2)->sc_next)
913 		;
914 	*s2 = s;
915 
916 	if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
917 		if ((((SVCXPRT *)xprt)->xp_netid = strdup(netid)) == NULL) {
918 			syslog(LOG_ERR, "svc_reg : strdup failed.");
919 			free(netid);
920 			free(s);
921 			*s2 = NULL;
922 			(void) rw_unlock(&svc_lock);
923 			return (FALSE);
924 		}
925 
926 rpcb_it:
927 	(void) rw_unlock(&svc_lock);
928 	if (!nconf)
929 		return (TRUE);
930 
931 	/* now register the information with the local binder service */
932 	if (!use_portmapper)
933 		return (rpcb_set(prog, vers, nconf, &xprt->xp_ltaddr));
934 	else
935 		return (__pmap_set(prog, vers, nconf, &xprt->xp_ltaddr));
936 	/*NOTREACHED*/
937 }
938 
939 /*
940  * Remove a service program from the callout list.
941  */
942 void
943 svc_unreg(const rpcprog_t prog, const rpcvers_t vers)
944 {
945 	struct svc_callout *prev;
946 	struct svc_callout *s;
947 
948 	/* unregister the information anyway */
949 	if (!use_portmapper)
950 		(void) rpcb_unset(prog, vers, NULL);
951 	else
952 		(void) __pmap_unset(prog, vers);
953 	(void) rw_wrlock(&svc_lock);
954 	while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
955 		if (prev == NULL_SVC) {
956 			svc_head = s->sc_next;
957 		} else {
958 			prev->sc_next = s->sc_next;
959 		}
960 		s->sc_next = NULL_SVC;
961 		if (s->sc_netid)
962 			free(s->sc_netid);
963 		free(s);
964 	}
965 	(void) rw_unlock(&svc_lock);
966 }
967 
968 #ifdef PORTMAP
969 /*
970  * Add a service program to the callout list.
971  * The dispatch routine will be called when a rpc request for this
972  * program number comes in.
973  * For version 2 portmappers.
974  */
975 bool_t
976 svc_register(SVCXPRT *xprt, rpcprog_t prog, rpcvers_t vers,
977 					void (*dispatch)(), int protocol)
978 {
979 	struct svc_callout *prev;
980 	struct svc_callout *s;
981 	struct netconfig *nconf;
982 	char *netid = NULL;
983 	int flag = 0;
984 
985 	if (xprt->xp_netid) {
986 		netid = strdup(xprt->xp_netid);
987 		flag = 1;
988 	} else if ((ioctl(xprt->xp_fd, I_FIND, "timod") > 0) && ((nconf =
989 	__rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type)) != NULL)) {
990 		/* fill in missing netid field in SVCXPRT */
991 		netid = strdup(nconf->nc_netid);
992 		flag = 1;
993 		freenetconfigent(nconf);
994 	} /* must be svc_raw_create */
995 
996 	if ((netid == NULL) && (flag == 1))
997 		return (FALSE);
998 
999 	(void) rw_wrlock(&svc_lock);
1000 	if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
1001 		if (netid)
1002 			free(netid);
1003 		if (s->sc_dispatch == dispatch)
1004 			goto pmap_it;  /* he is registering another xptr */
1005 		(void) rw_unlock(&svc_lock);
1006 		return (FALSE);
1007 	}
1008 	s = malloc(sizeof (struct svc_callout));
1009 	if (s == (struct svc_callout *)0) {
1010 		if (netid)
1011 			free(netid);
1012 		(void) rw_unlock(&svc_lock);
1013 		return (FALSE);
1014 	}
1015 	s->sc_prog = prog;
1016 	s->sc_vers = vers;
1017 	s->sc_dispatch = dispatch;
1018 	s->sc_netid = netid;
1019 	s->sc_next = svc_head;
1020 	svc_head = s;
1021 
1022 	if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
1023 		if ((xprt->xp_netid = strdup(netid)) == NULL) {
1024 			syslog(LOG_ERR, "svc_register : strdup failed.");
1025 			free(netid);
1026 			svc_head = s->sc_next;
1027 			free(s);
1028 			(void) rw_unlock(&svc_lock);
1029 			return (FALSE);
1030 		}
1031 
1032 pmap_it:
1033 	(void) rw_unlock(&svc_lock);
1034 	/* now register the information with the local binder service */
1035 	if (protocol)
1036 		return (pmap_set(prog, vers, protocol, xprt->xp_port));
1037 	return (TRUE);
1038 }
1039 
1040 /*
1041  * Remove a service program from the callout list.
1042  * For version 2 portmappers.
1043  */
1044 void
1045 svc_unregister(rpcprog_t prog, rpcvers_t vers)
1046 {
1047 	struct svc_callout *prev;
1048 	struct svc_callout *s;
1049 
1050 	(void) rw_wrlock(&svc_lock);
1051 	while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
1052 		if (prev == NULL_SVC) {
1053 			svc_head = s->sc_next;
1054 		} else {
1055 			prev->sc_next = s->sc_next;
1056 		}
1057 		s->sc_next = NULL_SVC;
1058 		if (s->sc_netid)
1059 			free(s->sc_netid);
1060 		free(s);
1061 		/* unregister the information with the local binder service */
1062 		(void) pmap_unset(prog, vers);
1063 	}
1064 	(void) rw_unlock(&svc_lock);
1065 }
1066 #endif /* PORTMAP */
1067 
1068 /*
1069  * Search the callout list for a program number, return the callout
1070  * struct.
1071  * Also check for transport as well.  Many routines such as svc_unreg
1072  * dont give any corresponding transport, so dont check for transport if
1073  * netid == NULL
1074  */
1075 static struct svc_callout *
1076 svc_find(rpcprog_t prog, rpcvers_t vers, struct svc_callout **prev, char *netid)
1077 {
1078 	struct svc_callout *s, *p;
1079 
1080 /* WRITE LOCK HELD ON ENTRY: svc_lock */
1081 
1082 /*	assert(RW_WRITE_HELD(&svc_lock)); */
1083 	p = NULL_SVC;
1084 	for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1085 		if (((s->sc_prog == prog) && (s->sc_vers == vers)) &&
1086 			((netid == NULL) || (s->sc_netid == NULL) ||
1087 			(strcmp(netid, s->sc_netid) == 0)))
1088 				break;
1089 		p = s;
1090 	}
1091 	*prev = p;
1092 	return (s);
1093 }
1094 
1095 
1096 /* ******************* REPLY GENERATION ROUTINES  ************ */
1097 
1098 /*
1099  * Send a reply to an rpc request
1100  */
1101 bool_t
1102 svc_sendreply(const SVCXPRT *xprt, const xdrproc_t xdr_results,
1103 						const caddr_t xdr_location)
1104 {
1105 	struct rpc_msg rply;
1106 
1107 	rply.rm_direction = REPLY;
1108 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1109 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1110 	rply.acpted_rply.ar_stat = SUCCESS;
1111 	rply.acpted_rply.ar_results.where = xdr_location;
1112 	rply.acpted_rply.ar_results.proc = xdr_results;
1113 	return (SVC_REPLY((SVCXPRT *)xprt, &rply));
1114 }
1115 
1116 /*
1117  * No procedure error reply
1118  */
1119 void
1120 svcerr_noproc(const SVCXPRT *xprt)
1121 {
1122 	struct rpc_msg rply;
1123 
1124 	rply.rm_direction = REPLY;
1125 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1126 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1127 	rply.acpted_rply.ar_stat = PROC_UNAVAIL;
1128 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1129 }
1130 
1131 /*
1132  * Can't decode args error reply
1133  */
1134 void
1135 svcerr_decode(const SVCXPRT *xprt)
1136 {
1137 	struct rpc_msg rply;
1138 
1139 	rply.rm_direction = REPLY;
1140 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1141 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1142 	rply.acpted_rply.ar_stat = GARBAGE_ARGS;
1143 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1144 }
1145 
1146 /*
1147  * Some system error
1148  */
1149 void
1150 svcerr_systemerr(const SVCXPRT *xprt)
1151 {
1152 	struct rpc_msg rply;
1153 
1154 	rply.rm_direction = REPLY;
1155 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1156 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1157 	rply.acpted_rply.ar_stat = SYSTEM_ERR;
1158 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1159 }
1160 
1161 /*
1162  * Tell RPC package to not complain about version errors to the client.	 This
1163  * is useful when revving broadcast protocols that sit on a fixed address.
1164  * There is really one (or should be only one) example of this kind of
1165  * protocol: the portmapper (or rpc binder).
1166  */
1167 void
1168 __svc_versquiet_on(const SVCXPRT *xprt)
1169 {
1170 /* LINTED pointer alignment */
1171 	svc_flags(xprt) |= SVC_VERSQUIET;
1172 }
1173 
1174 void
1175 __svc_versquiet_off(const SVCXPRT *xprt)
1176 {
1177 /* LINTED pointer alignment */
1178 	svc_flags(xprt) &= ~SVC_VERSQUIET;
1179 }
1180 
1181 void
1182 svc_versquiet(const SVCXPRT *xprt)
1183 {
1184 	__svc_versquiet_on(xprt);
1185 }
1186 
1187 int
1188 __svc_versquiet_get(const SVCXPRT *xprt)
1189 {
1190 /* LINTED pointer alignment */
1191 	return (svc_flags(xprt) & SVC_VERSQUIET);
1192 }
1193 
1194 /*
1195  * Authentication error reply
1196  */
1197 void
1198 svcerr_auth(const SVCXPRT *xprt, const enum auth_stat why)
1199 {
1200 	struct rpc_msg rply;
1201 
1202 	rply.rm_direction = REPLY;
1203 	rply.rm_reply.rp_stat = MSG_DENIED;
1204 	rply.rjcted_rply.rj_stat = AUTH_ERROR;
1205 	rply.rjcted_rply.rj_why = why;
1206 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1207 }
1208 
1209 /*
1210  * Auth too weak error reply
1211  */
1212 void
1213 svcerr_weakauth(const SVCXPRT *xprt)
1214 {
1215 	svcerr_auth(xprt, AUTH_TOOWEAK);
1216 }
1217 
1218 /*
1219  * Program unavailable error reply
1220  */
1221 void
1222 svcerr_noprog(const SVCXPRT *xprt)
1223 {
1224 	struct rpc_msg rply;
1225 
1226 	rply.rm_direction = REPLY;
1227 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1228 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1229 	rply.acpted_rply.ar_stat = PROG_UNAVAIL;
1230 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1231 }
1232 
1233 /*
1234  * Program version mismatch error reply
1235  */
1236 void
1237 svcerr_progvers(const SVCXPRT *xprt, const rpcvers_t low_vers,
1238 						const rpcvers_t high_vers)
1239 {
1240 	struct rpc_msg rply;
1241 
1242 	rply.rm_direction = REPLY;
1243 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1244 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1245 	rply.acpted_rply.ar_stat = PROG_MISMATCH;
1246 	rply.acpted_rply.ar_vers.low = low_vers;
1247 	rply.acpted_rply.ar_vers.high = high_vers;
1248 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1249 }
1250 
1251 /* ******************* SERVER INPUT STUFF ******************* */
1252 
1253 /*
1254  * Get server side input from some transport.
1255  *
1256  * Statement of authentication parameters management:
1257  * This function owns and manages all authentication parameters, specifically
1258  * the "raw" parameters (msg.rm_call.cb_cred and msg.rm_call.cb_verf) and
1259  * the "cooked" credentials (rqst->rq_clntcred).
1260  * However, this function does not know the structure of the cooked
1261  * credentials, so it make the following assumptions:
1262  *   a) the structure is contiguous (no pointers), and
1263  *   b) the cred structure size does not exceed RQCRED_SIZE bytes.
1264  * In all events, all three parameters are freed upon exit from this routine.
1265  * The storage is trivially management on the call stack in user land, but
1266  * is mallocated in kernel land.
1267  */
1268 
1269 void
1270 svc_getreq(int rdfds)
1271 {
1272 	fd_set readfds;
1273 
1274 	FD_ZERO(&readfds);
1275 	readfds.fds_bits[0] = rdfds;
1276 	svc_getreqset(&readfds);
1277 }
1278 
1279 void
1280 svc_getreqset(fd_set *readfds)
1281 {
1282 	int i;
1283 
1284 	for (i = 0; i < svc_max_fd; i++) {
1285 		/* fd has input waiting */
1286 		if (FD_ISSET(i, readfds))
1287 			svc_getreq_common(i);
1288 	}
1289 }
1290 
1291 void
1292 svc_getreq_poll(struct pollfd *pfdp, const int pollretval)
1293 {
1294 	int i;
1295 	int fds_found;
1296 
1297 	for (i = fds_found = 0; fds_found < pollretval; i++) {
1298 		struct pollfd *p = &pfdp[i];
1299 
1300 		if (p->revents) {
1301 			/* fd has input waiting */
1302 			fds_found++;
1303 			/*
1304 			 *	We assume that this function is only called
1305 			 *	via someone select()ing from svc_fdset or
1306 			 *	poll()ing from svc_pollset[].  Thus it's safe
1307 			 *	to handle the POLLNVAL event by simply turning
1308 			 *	the corresponding bit off in svc_fdset.  The
1309 			 *	svc_pollset[] array is derived from svc_fdset
1310 			 *	and so will also be updated eventually.
1311 			 *
1312 			 *	XXX Should we do an xprt_unregister() instead?
1313 			 */
1314 			/* Handle user callback */
1315 			if (__is_a_userfd(p->fd) == TRUE) {
1316 				(void) rw_rdlock(&svc_fd_lock);
1317 				__svc_getreq_user(p);
1318 				(void) rw_unlock(&svc_fd_lock);
1319 			} else {
1320 				if (p->revents & POLLNVAL) {
1321 					(void) rw_wrlock(&svc_fd_lock);
1322 					remove_pollfd(p->fd);	/* XXX */
1323 					(void) rw_unlock(&svc_fd_lock);
1324 				} else {
1325 					svc_getreq_common(p->fd);
1326 				}
1327 			}
1328 		}
1329 	}
1330 }
1331 
1332 void
1333 svc_getreq_common(const int fd)
1334 {
1335 	SVCXPRT *xprt;
1336 	enum xprt_stat stat;
1337 	struct rpc_msg *msg;
1338 	struct svc_req *r;
1339 	char *cred_area;
1340 
1341 	(void) rw_rdlock(&svc_fd_lock);
1342 
1343 	/* HANDLE USER CALLBACK */
1344 	if (__is_a_userfd(fd) == TRUE) {
1345 		struct pollfd virtual_fd;
1346 
1347 		virtual_fd.events = virtual_fd.revents = (short)0xFFFF;
1348 		virtual_fd.fd = fd;
1349 		__svc_getreq_user(&virtual_fd);
1350 		(void) rw_unlock(&svc_fd_lock);
1351 		return;
1352 	}
1353 
1354 	/*
1355 	 * The transport associated with this fd could have been
1356 	 * removed from svc_timeout_nonblock_xprt_and_LRU, for instance.
1357 	 * This can happen if two or more fds get read events and are
1358 	 * passed to svc_getreq_poll/set, the first fd is seviced by
1359 	 * the dispatch routine and cleans up any dead transports.  If
1360 	 * one of the dead transports removed is the other fd that
1361 	 * had a read event then svc_getreq_common() will be called with no
1362 	 * xprt associated with the fd that had the original read event.
1363 	 */
1364 	if ((fd >= nsvc_xports) || (xprt = svc_xports[fd]) == NULL) {
1365 		(void) rw_unlock(&svc_fd_lock);
1366 		return;
1367 	}
1368 	(void) rw_unlock(&svc_fd_lock);
1369 /* LINTED pointer alignment */
1370 	msg = SVCEXT(xprt)->msg;
1371 /* LINTED pointer alignment */
1372 	r = SVCEXT(xprt)->req;
1373 /* LINTED pointer alignment */
1374 	cred_area = SVCEXT(xprt)->cred_area;
1375 	msg->rm_call.cb_cred.oa_base = cred_area;
1376 	msg->rm_call.cb_verf.oa_base = &(cred_area[MAX_AUTH_BYTES]);
1377 	r->rq_clntcred = &(cred_area[2 * MAX_AUTH_BYTES]);
1378 
1379 	/* receive msgs from xprtprt (support batch calls) */
1380 	do {
1381 		bool_t dispatch;
1382 
1383 		if (dispatch = SVC_RECV(xprt, msg))
1384 			(void) _svc_prog_dispatch(xprt, msg, r);
1385 		/*
1386 		 * Check if the xprt has been disconnected in a recursive call
1387 		 * in the service dispatch routine. If so, then break
1388 		 */
1389 		(void) rw_rdlock(&svc_fd_lock);
1390 		if (xprt != svc_xports[fd]) {
1391 			(void) rw_unlock(&svc_fd_lock);
1392 			break;
1393 		}
1394 		(void) rw_unlock(&svc_fd_lock);
1395 
1396 		/*
1397 		 * Call cleanup procedure if set.
1398 		 */
1399 		if (__proc_cleanup_cb != NULL && dispatch)
1400 			(*__proc_cleanup_cb)(xprt);
1401 
1402 		if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
1403 			SVC_DESTROY(xprt);
1404 			break;
1405 		}
1406 	} while (stat == XPRT_MOREREQS);
1407 }
1408 
1409 int
1410 _svc_prog_dispatch(SVCXPRT *xprt, struct rpc_msg *msg, struct svc_req *r)
1411 {
1412 	struct svc_callout *s;
1413 	enum auth_stat why;
1414 	int prog_found;
1415 	rpcvers_t low_vers;
1416 	rpcvers_t high_vers;
1417 	void (*disp_fn)();
1418 
1419 	r->rq_xprt = xprt;
1420 	r->rq_prog = msg->rm_call.cb_prog;
1421 	r->rq_vers = msg->rm_call.cb_vers;
1422 	r->rq_proc = msg->rm_call.cb_proc;
1423 	r->rq_cred = msg->rm_call.cb_cred;
1424 /* LINTED pointer alignment */
1425 	SVC_XP_AUTH(r->rq_xprt).svc_ah_ops = svc_auth_any_ops;
1426 /* LINTED pointer alignment */
1427 	SVC_XP_AUTH(r->rq_xprt).svc_ah_private = NULL;
1428 
1429 	/* first authenticate the message */
1430 	/* Check for null flavor and bypass these calls if possible */
1431 
1432 	if (msg->rm_call.cb_cred.oa_flavor == AUTH_NULL) {
1433 		r->rq_xprt->xp_verf.oa_flavor = _null_auth.oa_flavor;
1434 		r->rq_xprt->xp_verf.oa_length = 0;
1435 	} else {
1436 		bool_t no_dispatch;
1437 
1438 		if ((why = __gss_authenticate(r, msg,
1439 			&no_dispatch)) != AUTH_OK) {
1440 			svcerr_auth(xprt, why);
1441 			return (0);
1442 		}
1443 		if (no_dispatch)
1444 			return (0);
1445 	}
1446 	/* match message with a registered service */
1447 	prog_found = FALSE;
1448 	low_vers = (rpcvers_t)(0 - 1);
1449 	high_vers = 0;
1450 	(void) rw_rdlock(&svc_lock);
1451 	for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1452 		if (s->sc_prog == r->rq_prog) {
1453 			prog_found = TRUE;
1454 			if (s->sc_vers == r->rq_vers) {
1455 				if ((xprt->xp_netid == NULL) ||
1456 				    (s->sc_netid == NULL) ||
1457 				    (strcmp(xprt->xp_netid,
1458 					    s->sc_netid) == 0)) {
1459 					disp_fn = (*s->sc_dispatch);
1460 					(void) rw_unlock(&svc_lock);
1461 					disp_fn(r, xprt);
1462 					return (1);
1463 				}
1464 				prog_found = FALSE;
1465 			}
1466 			if (s->sc_vers < low_vers)
1467 				low_vers = s->sc_vers;
1468 			if (s->sc_vers > high_vers)
1469 				high_vers = s->sc_vers;
1470 		}		/* found correct program */
1471 	}
1472 	(void) rw_unlock(&svc_lock);
1473 
1474 	/*
1475 	 * if we got here, the program or version
1476 	 * is not served ...
1477 	 */
1478 	if (prog_found) {
1479 /* LINTED pointer alignment */
1480 		if (!version_keepquiet(xprt))
1481 			svcerr_progvers(xprt, low_vers, high_vers);
1482 	} else {
1483 		svcerr_noprog(xprt);
1484 	}
1485 	return (0);
1486 }
1487 
1488 /* ******************* SVCXPRT allocation and deallocation ***************** */
1489 
1490 /*
1491  * svc_xprt_alloc() - allocate a service transport handle
1492  */
1493 SVCXPRT *
1494 svc_xprt_alloc(void)
1495 {
1496 	SVCXPRT		*xprt = NULL;
1497 	SVCXPRT_EXT	*xt = NULL;
1498 	SVCXPRT_LIST	*xlist = NULL;
1499 	struct rpc_msg	*msg = NULL;
1500 	struct svc_req	*req = NULL;
1501 	char		*cred_area = NULL;
1502 
1503 	if ((xprt = calloc(1, sizeof (SVCXPRT))) == NULL)
1504 		goto err_exit;
1505 
1506 	if ((xt = calloc(1, sizeof (SVCXPRT_EXT))) == NULL)
1507 		goto err_exit;
1508 	xprt->xp_p3 = (caddr_t)xt; /* SVCEXT(xprt) = xt */
1509 
1510 	if ((xlist = calloc(1, sizeof (SVCXPRT_LIST))) == NULL)
1511 		goto err_exit;
1512 	xt->my_xlist = xlist;
1513 	xlist->xprt = xprt;
1514 
1515 	if ((msg = malloc(sizeof (struct rpc_msg))) == NULL)
1516 		goto err_exit;
1517 	xt->msg = msg;
1518 
1519 	if ((req = malloc(sizeof (struct svc_req))) == NULL)
1520 		goto err_exit;
1521 	xt->req = req;
1522 
1523 	if ((cred_area = malloc(2*MAX_AUTH_BYTES + RQCRED_SIZE)) == NULL)
1524 		goto err_exit;
1525 	xt->cred_area = cred_area;
1526 
1527 /* LINTED pointer alignment */
1528 	(void) mutex_init(&svc_send_mutex(xprt), USYNC_THREAD, (void *)0);
1529 	return (xprt);
1530 
1531 err_exit:
1532 	svc_xprt_free(xprt);
1533 	return (NULL);
1534 }
1535 
1536 
1537 /*
1538  * svc_xprt_free() - free a service handle
1539  */
1540 void
1541 svc_xprt_free(SVCXPRT *xprt)
1542 {
1543 /* LINTED pointer alignment */
1544 	SVCXPRT_EXT	*xt = xprt ? SVCEXT(xprt) : NULL;
1545 	SVCXPRT_LIST	*my_xlist = xt ? xt->my_xlist: NULL;
1546 	struct rpc_msg	*msg = xt ? xt->msg : NULL;
1547 	struct svc_req	*req = xt ? xt->req : NULL;
1548 	char		*cred_area = xt ? xt->cred_area : NULL;
1549 
1550 	if (xprt)
1551 		free(xprt);
1552 	if (xt)
1553 		free(xt);
1554 	if (my_xlist)
1555 		free(my_xlist);
1556 	if (msg)
1557 		free(msg);
1558 	if (req)
1559 		free(req);
1560 	if (cred_area)
1561 		free(cred_area);
1562 }
1563 
1564 
1565 /*
1566  * svc_xprt_destroy() - free parent and child xprt list
1567  */
1568 void
1569 svc_xprt_destroy(SVCXPRT *xprt)
1570 {
1571 	SVCXPRT_LIST	*xlist, *xnext = NULL;
1572 	int		type;
1573 
1574 /* LINTED pointer alignment */
1575 	if (SVCEXT(xprt)->parent)
1576 /* LINTED pointer alignment */
1577 		xprt = SVCEXT(xprt)->parent;
1578 /* LINTED pointer alignment */
1579 	type = svc_type(xprt);
1580 /* LINTED pointer alignment */
1581 	for (xlist = SVCEXT(xprt)->my_xlist; xlist != NULL; xlist = xnext) {
1582 		xnext = xlist->next;
1583 		xprt = xlist->xprt;
1584 		switch (type) {
1585 		case SVC_DGRAM:
1586 			svc_dg_xprtfree(xprt);
1587 			break;
1588 		case SVC_RENDEZVOUS:
1589 			svc_vc_xprtfree(xprt);
1590 			break;
1591 		case SVC_CONNECTION:
1592 			svc_fd_xprtfree(xprt);
1593 			break;
1594 		case SVC_DOOR:
1595 			svc_door_xprtfree(xprt);
1596 			break;
1597 		}
1598 	}
1599 }
1600 
1601 
1602 /*
1603  * svc_copy() - make a copy of parent
1604  */
1605 SVCXPRT *
1606 svc_copy(SVCXPRT *xprt)
1607 {
1608 /* LINTED pointer alignment */
1609 	switch (svc_type(xprt)) {
1610 	case SVC_DGRAM:
1611 		return (svc_dg_xprtcopy(xprt));
1612 	case SVC_RENDEZVOUS:
1613 		return (svc_vc_xprtcopy(xprt));
1614 	case SVC_CONNECTION:
1615 		return (svc_fd_xprtcopy(xprt));
1616 	}
1617 	return (NULL);
1618 }
1619 
1620 
1621 /*
1622  * _svc_destroy_private() - private SVC_DESTROY interface
1623  */
1624 void
1625 _svc_destroy_private(SVCXPRT *xprt)
1626 {
1627 /* LINTED pointer alignment */
1628 	switch (svc_type(xprt)) {
1629 	case SVC_DGRAM:
1630 		_svc_dg_destroy_private(xprt);
1631 		break;
1632 	case SVC_RENDEZVOUS:
1633 	case SVC_CONNECTION:
1634 		_svc_vc_destroy_private(xprt, TRUE);
1635 		break;
1636 	}
1637 }
1638 
1639 /*
1640  * svc_get_local_cred() - fetch local user credentials.  This always
1641  * works over doors based transports.  For local transports, this
1642  * does not yield correct results unless the __rpc_negotiate_uid()
1643  * call has been invoked to enable this feature.
1644  */
1645 bool_t
1646 svc_get_local_cred(SVCXPRT *xprt, svc_local_cred_t *lcred)
1647 {
1648 	/* LINTED pointer alignment */
1649 	if (svc_type(xprt) == SVC_DOOR)
1650 		return (__svc_get_door_cred(xprt, lcred));
1651 	return (__rpc_get_local_cred(xprt, lcred));
1652 }
1653 
1654 
1655 /* ******************* DUPLICATE ENTRY HANDLING ROUTINES ************** */
1656 
1657 /*
1658  * the dup cacheing routines below provide a cache of received
1659  * transactions. rpc service routines can use this to detect
1660  * retransmissions and re-send a non-failure response. Uses a
1661  * lru scheme to find entries to get rid of entries in the cache,
1662  * though only DUP_DONE entries are placed on the lru list.
1663  * the routines were written towards development of a generic
1664  * SVC_DUP() interface, which can be expanded to encompass the
1665  * svc_dg_enablecache() routines as well. the cache is currently
1666  * private to the automounter.
1667  */
1668 
1669 
1670 /* dupcache header contains xprt specific information */
1671 struct dupcache {
1672 	rwlock_t	dc_lock;
1673 	time_t		dc_time;
1674 	int		dc_buckets;
1675 	int		dc_maxsz;
1676 	int		dc_basis;
1677 	struct dupreq 	*dc_mru;
1678 	struct dupreq	**dc_hashtbl;
1679 };
1680 
1681 /*
1682  * private duplicate cache request routines
1683  */
1684 static int __svc_dupcache_check(struct svc_req *, caddr_t *, uint_t *,
1685 		struct dupcache *, uint32_t, uint32_t);
1686 static struct dupreq *__svc_dupcache_victim(struct dupcache *, time_t);
1687 static int __svc_dupcache_enter(struct svc_req *, struct dupreq *,
1688 		struct dupcache *, uint32_t, uint32_t, time_t);
1689 static int __svc_dupcache_update(struct svc_req *, caddr_t, uint_t, int,
1690 		struct dupcache *, uint32_t, uint32_t);
1691 #ifdef DUP_DEBUG
1692 static void __svc_dupcache_debug(struct dupcache *);
1693 #endif /* DUP_DEBUG */
1694 
1695 /* default parameters for the dupcache */
1696 #define	DUPCACHE_BUCKETS	257
1697 #define	DUPCACHE_TIME		900
1698 #define	DUPCACHE_MAXSZ		INT_MAX
1699 
1700 /*
1701  * __svc_dupcache_init(void *condition, int basis, char *xprt_cache)
1702  * initialize the duprequest cache and assign it to the xprt_cache
1703  * Use default values depending on the cache condition and basis.
1704  * return TRUE on success and FALSE on failure
1705  */
1706 bool_t
1707 __svc_dupcache_init(void *condition, int basis, char **xprt_cache)
1708 {
1709 	static mutex_t initdc_lock = DEFAULTMUTEX;
1710 	int i;
1711 	struct dupcache *dc;
1712 
1713 	(void) mutex_lock(&initdc_lock);
1714 	if (*xprt_cache != NULL) { /* do only once per xprt */
1715 		(void) mutex_unlock(&initdc_lock);
1716 		syslog(LOG_ERR,
1717 			"__svc_dupcache_init: multiply defined dup cache");
1718 		return (FALSE);
1719 	}
1720 
1721 	switch (basis) {
1722 	case DUPCACHE_FIXEDTIME:
1723 		dc = malloc(sizeof (struct dupcache));
1724 		if (dc == NULL) {
1725 			(void) mutex_unlock(&initdc_lock);
1726 			syslog(LOG_ERR,
1727 				"__svc_dupcache_init: memory alloc failed");
1728 			return (FALSE);
1729 		}
1730 		(void) rwlock_init(&(dc->dc_lock), USYNC_THREAD, NULL);
1731 		if (condition != NULL)
1732 			dc->dc_time = *((time_t *)condition);
1733 		else
1734 			dc->dc_time = DUPCACHE_TIME;
1735 		dc->dc_buckets = DUPCACHE_BUCKETS;
1736 		dc->dc_maxsz = DUPCACHE_MAXSZ;
1737 		dc->dc_basis = basis;
1738 		dc->dc_mru = NULL;
1739 		dc->dc_hashtbl = malloc(dc->dc_buckets *
1740 						sizeof (struct dupreq *));
1741 		if (dc->dc_hashtbl == NULL) {
1742 			free(dc);
1743 			(void) mutex_unlock(&initdc_lock);
1744 			syslog(LOG_ERR,
1745 				"__svc_dupcache_init: memory alloc failed");
1746 			return (FALSE);
1747 		}
1748 		for (i = 0; i < DUPCACHE_BUCKETS; i++)
1749 			dc->dc_hashtbl[i] = NULL;
1750 		*xprt_cache = (char *)dc;
1751 		break;
1752 	default:
1753 		(void) mutex_unlock(&initdc_lock);
1754 		syslog(LOG_ERR,
1755 			"__svc_dupcache_init: undefined dup cache basis");
1756 		return (FALSE);
1757 	}
1758 
1759 	(void) mutex_unlock(&initdc_lock);
1760 
1761 	return (TRUE);
1762 }
1763 
1764 /*
1765  * __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1766  *	char *xprt_cache)
1767  * searches the request cache. Creates an entry and returns DUP_NEW if
1768  * the request is not found in the cache.  If it is found, then it
1769  * returns the state of the request (in progress, drop, or done) and
1770  * also allocates, and passes back results to the user (if any) in
1771  * resp_buf, and its length in resp_bufsz. DUP_ERROR is returned on error.
1772  */
1773 int
1774 __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1775 	char *xprt_cache)
1776 {
1777 	uint32_t drxid, drhash;
1778 	int rc;
1779 	struct dupreq *dr = NULL;
1780 	time_t timenow = time(NULL);
1781 
1782 	/* LINTED pointer alignment */
1783 	struct dupcache *dc = (struct dupcache *)xprt_cache;
1784 
1785 	if (dc == NULL) {
1786 		syslog(LOG_ERR, "__svc_dup: undefined cache");
1787 		return (DUP_ERROR);
1788 	}
1789 
1790 	/* get the xid of the request */
1791 	if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
1792 		syslog(LOG_ERR, "__svc_dup: xid error");
1793 		return (DUP_ERROR);
1794 	}
1795 	drhash = drxid % dc->dc_buckets;
1796 
1797 	if ((rc = __svc_dupcache_check(req, resp_buf, resp_bufsz, dc, drxid,
1798 			drhash)) != DUP_NEW)
1799 		return (rc);
1800 
1801 	if ((dr = __svc_dupcache_victim(dc, timenow)) == NULL)
1802 		return (DUP_ERROR);
1803 
1804 	if ((rc = __svc_dupcache_enter(req, dr, dc, drxid, drhash, timenow))
1805 			== DUP_ERROR)
1806 		return (rc);
1807 
1808 	return (DUP_NEW);
1809 }
1810 
1811 
1812 
1813 /*
1814  * __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf,
1815  *		uint_t *resp_bufsz,truct dupcache *dc, uint32_t drxid,
1816  * 		uint32_t drhash)
1817  * Checks to see whether an entry already exists in the cache. If it does
1818  * copy back into the resp_buf, if appropriate. Return the status of
1819  * the request, or DUP_NEW if the entry is not in the cache
1820  */
1821 static int
1822 __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1823 		struct dupcache *dc, uint32_t drxid, uint32_t drhash)
1824 {
1825 	struct dupreq *dr = NULL;
1826 
1827 	(void) rw_rdlock(&(dc->dc_lock));
1828 	dr = dc->dc_hashtbl[drhash];
1829 	while (dr != NULL) {
1830 		if (dr->dr_xid == drxid &&
1831 		    dr->dr_proc == req->rq_proc &&
1832 		    dr->dr_prog == req->rq_prog &&
1833 		    dr->dr_vers == req->rq_vers &&
1834 		    dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
1835 		    memcmp(dr->dr_addr.buf,
1836 				req->rq_xprt->xp_rtaddr.buf,
1837 				dr->dr_addr.len) == 0) { /* entry found */
1838 			if (dr->dr_hash != drhash) {
1839 				/* sanity check */
1840 				(void) rw_unlock((&dc->dc_lock));
1841 				syslog(LOG_ERR,
1842 					"\n__svc_dupdone: hashing error");
1843 				return (DUP_ERROR);
1844 			}
1845 
1846 			/*
1847 			 * return results for requests on lru list, if
1848 			 * appropriate requests must be DUP_DROP or DUP_DONE
1849 			 * to have a result. A NULL buffer in the cache
1850 			 * implies no results were sent during dupdone.
1851 			 * A NULL buffer in the call implies not interested
1852 			 * in results.
1853 			 */
1854 			if (((dr->dr_status == DUP_DONE) ||
1855 				(dr->dr_status == DUP_DROP)) &&
1856 				resp_buf != NULL &&
1857 				dr->dr_resp.buf != NULL) {
1858 				*resp_buf = malloc(dr->dr_resp.len);
1859 				if (*resp_buf == NULL) {
1860 					syslog(LOG_ERR,
1861 					"__svc_dupcache_check: malloc failed");
1862 					(void) rw_unlock(&(dc->dc_lock));
1863 					return (DUP_ERROR);
1864 				}
1865 				(void) memset(*resp_buf, 0, dr->dr_resp.len);
1866 				(void) memcpy(*resp_buf, dr->dr_resp.buf,
1867 					dr->dr_resp.len);
1868 				*resp_bufsz = dr->dr_resp.len;
1869 			} else {
1870 				/* no result */
1871 				if (resp_buf)
1872 					*resp_buf = NULL;
1873 				if (resp_bufsz)
1874 					*resp_bufsz = 0;
1875 			}
1876 			(void) rw_unlock(&(dc->dc_lock));
1877 			return (dr->dr_status);
1878 		}
1879 		dr = dr->dr_chain;
1880 	}
1881 	(void) rw_unlock(&(dc->dc_lock));
1882 	return (DUP_NEW);
1883 }
1884 
1885 /*
1886  * __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
1887  * Return a victim dupreq entry to the caller, depending on cache policy.
1888  */
1889 static struct dupreq *
1890 __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
1891 {
1892 	struct dupreq *dr = NULL;
1893 
1894 	switch (dc->dc_basis) {
1895 	case DUPCACHE_FIXEDTIME:
1896 		/*
1897 		 * The hash policy is to free up a bit of the hash
1898 		 * table before allocating a new entry as the victim.
1899 		 * Freeing up the hash table each time should split
1900 		 * the cost of keeping the hash table clean among threads.
1901 		 * Note that only DONE or DROPPED entries are on the lru
1902 		 * list but we do a sanity check anyway.
1903 		 */
1904 		(void) rw_wrlock(&(dc->dc_lock));
1905 		while ((dc->dc_mru) && (dr = dc->dc_mru->dr_next) &&
1906 				((timenow - dr->dr_time) > dc->dc_time)) {
1907 			/* clean and then free the entry */
1908 			if (dr->dr_status != DUP_DONE &&
1909 				dr->dr_status != DUP_DROP) {
1910 				/*
1911 				 * The LRU list can't contain an
1912 				 * entry where the status is other than
1913 				 * DUP_DONE or DUP_DROP.
1914 				 */
1915 				syslog(LOG_ERR,
1916 					"__svc_dupcache_victim: bad victim");
1917 #ifdef DUP_DEBUG
1918 				/*
1919 				 * Need to hold the reader/writers lock to
1920 				 * print the cache info, since we already
1921 				 * hold the writers lock, we shall continue
1922 				 * calling __svc_dupcache_debug()
1923 				 */
1924 				__svc_dupcache_debug(dc);
1925 #endif /* DUP_DEBUG */
1926 				(void) rw_unlock(&(dc->dc_lock));
1927 				return (NULL);
1928 			}
1929 			/* free buffers */
1930 			if (dr->dr_resp.buf) {
1931 				free(dr->dr_resp.buf);
1932 				dr->dr_resp.buf = NULL;
1933 			}
1934 			if (dr->dr_addr.buf) {
1935 				free(dr->dr_addr.buf);
1936 				dr->dr_addr.buf = NULL;
1937 			}
1938 
1939 			/* unhash the entry */
1940 			if (dr->dr_chain)
1941 				dr->dr_chain->dr_prevchain = dr->dr_prevchain;
1942 			if (dr->dr_prevchain)
1943 				dr->dr_prevchain->dr_chain = dr->dr_chain;
1944 			if (dc->dc_hashtbl[dr->dr_hash] == dr)
1945 				dc->dc_hashtbl[dr->dr_hash] = dr->dr_chain;
1946 
1947 			/* modify the lru pointers */
1948 			if (dc->dc_mru == dr) {
1949 				dc->dc_mru = NULL;
1950 			} else {
1951 				dc->dc_mru->dr_next = dr->dr_next;
1952 				dr->dr_next->dr_prev = dc->dc_mru;
1953 			}
1954 			free(dr);
1955 			dr = NULL;
1956 		}
1957 		(void) rw_unlock(&(dc->dc_lock));
1958 
1959 		/*
1960 		 * Allocate and return new clean entry as victim
1961 		 */
1962 		if ((dr = malloc(sizeof (*dr))) == NULL) {
1963 			syslog(LOG_ERR,
1964 				"__svc_dupcache_victim: malloc failed");
1965 			return (NULL);
1966 		}
1967 		(void) memset(dr, 0, sizeof (*dr));
1968 		return (dr);
1969 	default:
1970 		syslog(LOG_ERR,
1971 			"__svc_dupcache_victim: undefined dup cache_basis");
1972 		return (NULL);
1973 	}
1974 }
1975 
1976 /*
1977  * __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
1978  *	struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
1979  * build new duprequest entry and then insert into the cache
1980  */
1981 static int
1982 __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
1983 	struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
1984 {
1985 	dr->dr_xid = drxid;
1986 	dr->dr_prog = req->rq_prog;
1987 	dr->dr_vers = req->rq_vers;
1988 	dr->dr_proc = req->rq_proc;
1989 	dr->dr_addr.maxlen = req->rq_xprt->xp_rtaddr.len;
1990 	dr->dr_addr.len = dr->dr_addr.maxlen;
1991 	if ((dr->dr_addr.buf = malloc(dr->dr_addr.maxlen)) == NULL) {
1992 		syslog(LOG_ERR, "__svc_dupcache_enter: malloc failed");
1993 		free(dr);
1994 		return (DUP_ERROR);
1995 	}
1996 	(void) memset(dr->dr_addr.buf, 0, dr->dr_addr.len);
1997 	(void) memcpy(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
1998 							dr->dr_addr.len);
1999 	dr->dr_resp.buf = NULL;
2000 	dr->dr_resp.maxlen = 0;
2001 	dr->dr_resp.len = 0;
2002 	dr->dr_status = DUP_INPROGRESS;
2003 	dr->dr_time = timenow;
2004 	dr->dr_hash = drhash;	/* needed for efficient victim cleanup */
2005 
2006 	/* place entry at head of hash table */
2007 	(void) rw_wrlock(&(dc->dc_lock));
2008 	dr->dr_chain = dc->dc_hashtbl[drhash];
2009 	dr->dr_prevchain = NULL;
2010 	if (dc->dc_hashtbl[drhash] != NULL)
2011 		dc->dc_hashtbl[drhash]->dr_prevchain = dr;
2012 	dc->dc_hashtbl[drhash] = dr;
2013 	(void) rw_unlock(&(dc->dc_lock));
2014 	return (DUP_NEW);
2015 }
2016 
2017 /*
2018  * __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2019  *		int status, char *xprt_cache)
2020  * Marks the request done (DUP_DONE or DUP_DROP) and stores the response.
2021  * Only DONE and DROP requests can be marked as done. Sets the lru pointers
2022  * to make the entry the most recently used. Returns DUP_ERROR or status.
2023  */
2024 int
2025 __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2026 		int status, char *xprt_cache)
2027 {
2028 	uint32_t drxid, drhash;
2029 	int rc;
2030 
2031 	/* LINTED pointer alignment */
2032 	struct dupcache *dc = (struct dupcache *)xprt_cache;
2033 
2034 	if (dc == NULL) {
2035 		syslog(LOG_ERR, "__svc_dupdone: undefined cache");
2036 		return (DUP_ERROR);
2037 	}
2038 
2039 	if (status != DUP_DONE && status != DUP_DROP) {
2040 		syslog(LOG_ERR, "__svc_dupdone: invalid dupdone status");
2041 		syslog(LOG_ERR, "	 must be DUP_DONE or DUP_DROP");
2042 		return (DUP_ERROR);
2043 	}
2044 
2045 	/* find the xid of the entry in the cache */
2046 	if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
2047 		syslog(LOG_ERR, "__svc_dup: xid error");
2048 		return (DUP_ERROR);
2049 	}
2050 	drhash = drxid % dc->dc_buckets;
2051 
2052 	/* update the status of the entry and result buffers, if required */
2053 	if ((rc = __svc_dupcache_update(req, resp_buf, resp_bufsz, status,
2054 			dc, drxid, drhash)) == DUP_ERROR) {
2055 		syslog(LOG_ERR, "__svc_dupdone: cache entry error");
2056 		return (DUP_ERROR);
2057 	}
2058 
2059 	return (rc);
2060 }
2061 
2062 /*
2063  * __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf,
2064  * 	uint_t resp_bufsz, int status, struct dupcache *dc, uint32_t drxid,
2065  * 	uint32_t drhash)
2066  * Check if entry exists in the dupcacache. If it does, update its status
2067  * and time and also its buffer, if appropriate. Its possible, but unlikely
2068  * for DONE requests to not exist in the cache. Return DUP_ERROR or status.
2069  */
2070 static int
2071 __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2072 	int status, struct dupcache *dc, uint32_t drxid, uint32_t drhash)
2073 {
2074 	struct dupreq *dr = NULL;
2075 	time_t timenow = time(NULL);
2076 
2077 	(void) rw_wrlock(&(dc->dc_lock));
2078 	dr = dc->dc_hashtbl[drhash];
2079 	while (dr != NULL) {
2080 		if (dr->dr_xid == drxid &&
2081 		    dr->dr_proc == req->rq_proc &&
2082 		    dr->dr_prog == req->rq_prog &&
2083 		    dr->dr_vers == req->rq_vers &&
2084 		    dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
2085 		    memcmp(dr->dr_addr.buf,
2086 				req->rq_xprt->xp_rtaddr.buf,
2087 				dr->dr_addr.len) == 0) { /* entry found */
2088 			if (dr->dr_hash != drhash) {
2089 				/* sanity check */
2090 				(void) rw_unlock(&(dc->dc_lock));
2091 				syslog(LOG_ERR,
2092 				"\n__svc_dupdone: hashing error");
2093 				return (DUP_ERROR);
2094 			}
2095 
2096 			/* store the results if bufer is not NULL */
2097 			if (resp_buf != NULL) {
2098 				if ((dr->dr_resp.buf =
2099 						malloc(resp_bufsz)) == NULL) {
2100 					(void) rw_unlock(&(dc->dc_lock));
2101 					syslog(LOG_ERR,
2102 						"__svc_dupdone: malloc failed");
2103 					return (DUP_ERROR);
2104 				}
2105 				(void) memset(dr->dr_resp.buf, 0, resp_bufsz);
2106 				(void) memcpy(dr->dr_resp.buf, resp_buf,
2107 					(uint_t)resp_bufsz);
2108 				dr->dr_resp.len = resp_bufsz;
2109 			}
2110 
2111 			/* update status and done time */
2112 			dr->dr_status = status;
2113 			dr->dr_time = timenow;
2114 
2115 			/* move the entry to the mru position */
2116 			if (dc->dc_mru == NULL) {
2117 				dr->dr_next = dr;
2118 				dr->dr_prev = dr;
2119 			} else {
2120 				dr->dr_next = dc->dc_mru->dr_next;
2121 				dc->dc_mru->dr_next->dr_prev = dr;
2122 				dr->dr_prev = dc->dc_mru;
2123 				dc->dc_mru->dr_next = dr;
2124 			}
2125 			dc->dc_mru = dr;
2126 
2127 			(void) rw_unlock(&(dc->dc_lock));
2128 			return (status);
2129 		}
2130 		dr = dr->dr_chain;
2131 	}
2132 	(void) rw_unlock(&(dc->dc_lock));
2133 	syslog(LOG_ERR, "__svc_dupdone: entry not in dup cache");
2134 	return (DUP_ERROR);
2135 }
2136 
2137 #ifdef DUP_DEBUG
2138 /*
2139  * __svc_dupcache_debug(struct dupcache *dc)
2140  * print out the hash table stuff
2141  *
2142  * This function requires the caller to hold the reader
2143  * or writer version of the duplicate request cache lock (dc_lock).
2144  */
2145 static void
2146 __svc_dupcache_debug(struct dupcache *dc)
2147 {
2148 	struct dupreq *dr = NULL;
2149 	int i;
2150 	bool_t bval;
2151 
2152 	fprintf(stderr, "   HASHTABLE\n");
2153 	for (i = 0; i < dc->dc_buckets; i++) {
2154 		bval = FALSE;
2155 		dr = dc->dc_hashtbl[i];
2156 		while (dr != NULL) {
2157 			if (!bval) {	/* ensures bucket printed only once */
2158 				fprintf(stderr, "    bucket : %d\n", i);
2159 				bval = TRUE;
2160 			}
2161 			fprintf(stderr, "\txid: %u status: %d time: %ld",
2162 				dr->dr_xid, dr->dr_status, dr->dr_time);
2163 			fprintf(stderr, " dr: %x chain: %x prevchain: %x\n",
2164 				dr, dr->dr_chain, dr->dr_prevchain);
2165 			dr = dr->dr_chain;
2166 		}
2167 	}
2168 
2169 	fprintf(stderr, "   LRU\n");
2170 	if (dc->dc_mru) {
2171 		dr = dc->dc_mru->dr_next;	/* lru */
2172 		while (dr != dc->dc_mru) {
2173 			fprintf(stderr, "\txid: %u status : %d time : %ld",
2174 				dr->dr_xid, dr->dr_status, dr->dr_time);
2175 			fprintf(stderr, " dr: %x next: %x prev: %x\n",
2176 				dr, dr->dr_next, dr->dr_prev);
2177 			dr = dr->dr_next;
2178 		}
2179 		fprintf(stderr, "\txid: %u status: %d time: %ld",
2180 			dr->dr_xid, dr->dr_status, dr->dr_time);
2181 		fprintf(stderr, " dr: %x next: %x prev: %x\n", dr,
2182 			dr->dr_next, dr->dr_prev);
2183 	}
2184 }
2185 #endif /* DUP_DEBUG */
2186