1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25 * Copyright 2017 Joyent Inc
26 */
27 /*
28 * Copyright 1993 OpenVision Technologies, Inc., All Rights Reserved.
29 */
30 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
31 /* All Rights Reserved */
32 /*
33 * Portions of this source code were derived from Berkeley
34 * 4.3 BSD under license from the Regents of the University of
35 * California.
36 */
37
38 /*
39 * svc.c, Server-side remote procedure call interface.
40 *
41 * There are two sets of procedures here. The xprt routines are
42 * for handling transport handles. The svc routines handle the
43 * list of service routines.
44 *
45 */
46
47 #include "mt.h"
48 #include "rpc_mt.h"
49 #include <assert.h>
50 #include <errno.h>
51 #include <sys/types.h>
52 #include <stropts.h>
53 #include <sys/conf.h>
54 #include <rpc/rpc.h>
55 #include <rpc/auth.h>
56 #ifdef PORTMAP
57 #include <rpc/pmap_clnt.h>
58 #endif
59 #include <sys/poll.h>
60 #include <netconfig.h>
61 #include <syslog.h>
62 #include <stdlib.h>
63 #include <unistd.h>
64 #include <string.h>
65 #include <limits.h>
66
67 extern bool_t __svc_get_door_cred();
68 extern bool_t __rpc_get_local_cred();
69
70 SVCXPRT **svc_xports;
71 static int nsvc_xports; /* total number of svc_xports allocated */
72
73 XDR **svc_xdrs; /* common XDR receive area */
74 int nsvc_xdrs; /* total number of svc_xdrs allocated */
75
76 int __rpc_use_pollfd_done; /* to unlimit the number of connections */
77
78 #define NULL_SVC ((struct svc_callout *)0)
79
80 /*
81 * The services list
82 * Each entry represents a set of procedures (an rpc program).
83 * The dispatch routine takes request structs and runs the
84 * appropriate procedure.
85 */
86 static struct svc_callout {
87 struct svc_callout *sc_next;
88 rpcprog_t sc_prog;
89 rpcvers_t sc_vers;
90 char *sc_netid;
91 void (*sc_dispatch)();
92 } *svc_head;
93 extern rwlock_t svc_lock;
94
95 static struct svc_callout *svc_find();
96 int _svc_prog_dispatch();
97 void svc_getreq_common();
98 char *strdup();
99
100 extern mutex_t svc_door_mutex;
101 extern cond_t svc_door_waitcv;
102 extern int svc_ndoorfds;
103 extern SVCXPRT_LIST *_svc_xprtlist;
104 extern mutex_t xprtlist_lock;
105 extern void __svc_rm_from_xlist();
106
107 extern fd_set _new_svc_fdset;
108
109 /*
110 * If the allocated array of reactor is too small, this value is used as a
111 * margin. This reduces the number of allocations.
112 */
113 #define USER_FD_INCREMENT 5
114
115 static void add_pollfd(int fd, short events);
116 static void remove_pollfd(int fd);
117 static void __svc_remove_input_of_fd(int fd);
118
119 /*
120 * Data used to handle reactor:
121 * - one file descriptor we listen to,
122 * - one callback we call if the fd pops,
123 * - and a cookie passed as a parameter to the callback.
124 *
125 * The structure is an array indexed on the file descriptor. Each entry is
126 * pointing to the first element of a double-linked list of callback.
127 * only one callback may be associated to a couple (fd, event).
128 */
129
130 struct _svc_user_fd_head;
131
132 typedef struct {
133 struct _svc_user_fd_node *next;
134 struct _svc_user_fd_node *previous;
135 } _svc_user_link;
136
137 typedef struct _svc_user_fd_node {
138 _svc_user_link lnk;
139 svc_input_id_t id;
140 int fd;
141 unsigned int events;
142 svc_callback_t callback;
143 void* cookie;
144 } _svc_user_fd_node;
145
146 typedef struct _svc_user_fd_head {
147 struct _svc_user_fd_node *list;
148 unsigned int mask; /* logical OR of all sub-masks */
149 } _svc_user_fd_head;
150
151
152 /* Array of defined reactor - indexed on file descriptor */
153 static _svc_user_fd_head *svc_userfds = NULL;
154
155 /* current size of file descriptor */
156 static int svc_nuserfds = 0;
157
158 /* Mutex to ensure MT safe operations for user fds callbacks. */
159 static mutex_t svc_userfds_lock = DEFAULTMUTEX;
160
161
162 /*
163 * This structure is used to have constant time alogrithms. There is an array
164 * of this structure as large as svc_nuserfds. When the user is registering a
165 * new callback, the address of the created structure is stored in a cell of
166 * this array. The address of this cell is the returned unique identifier.
167 *
168 * On removing, the id is given by the user, then we know if this cell is
169 * filled or not (with free). If it is free, we return an error. Otherwise,
170 * we can free the structure pointed by fd_node.
171 *
172 * On insertion, we use the linked list created by (first_free,
173 * next_free). In this way with a constant time computation, we can give a
174 * correct index to the user.
175 */
176
177 typedef struct _svc_management_user_fd {
178 bool_t free;
179 union {
180 svc_input_id_t next_free;
181 _svc_user_fd_node *fd_node;
182 } data;
183 } _svc_management_user_fd;
184
185 /* index to the first free elem */
186 static svc_input_id_t first_free = (svc_input_id_t)-1;
187 /* the size of this array is the same as svc_nuserfds */
188 static _svc_management_user_fd* user_fd_mgt_array = NULL;
189
190 /* current size of user_fd_mgt_array */
191 static int svc_nmgtuserfds = 0;
192
193
194 /* Define some macros to access data associated to registration ids. */
195 #define node_from_id(id) (user_fd_mgt_array[(int)id].data.fd_node)
196 #define is_free_id(id) (user_fd_mgt_array[(int)id].free)
197
198 #ifndef POLLSTANDARD
199 #define POLLSTANDARD \
200 (POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND| \
201 POLLWRBAND|POLLERR|POLLHUP|POLLNVAL)
202 #endif
203
204 /*
205 * To free an Id, we set the cell as free and insert its address in the list
206 * of free cell.
207 */
208
209 static void
_svc_free_id(const svc_input_id_t id)210 _svc_free_id(const svc_input_id_t id)
211 {
212 assert(((int)id >= 0) && ((int)id < svc_nmgtuserfds));
213 user_fd_mgt_array[(int)id].free = TRUE;
214 user_fd_mgt_array[(int)id].data.next_free = first_free;
215 first_free = id;
216 }
217
218 /*
219 * To get a free cell, we just have to take it from the free linked list and
220 * set the flag to "not free". This function also allocates new memory if
221 * necessary
222 */
223 static svc_input_id_t
_svc_attribute_new_id(_svc_user_fd_node * node)224 _svc_attribute_new_id(_svc_user_fd_node *node)
225 {
226 int selected_index = (int)first_free;
227 assert(node != NULL);
228
229 if (selected_index == -1) {
230 /* Allocate new entries */
231 int L_inOldSize = svc_nmgtuserfds;
232 int i;
233 _svc_management_user_fd *tmp;
234
235 svc_nmgtuserfds += USER_FD_INCREMENT;
236
237 tmp = realloc(user_fd_mgt_array,
238 svc_nmgtuserfds * sizeof (_svc_management_user_fd));
239
240 if (tmp == NULL) {
241 syslog(LOG_ERR, "_svc_attribute_new_id: out of memory");
242 svc_nmgtuserfds = L_inOldSize;
243 errno = ENOMEM;
244 return ((svc_input_id_t)-1);
245 }
246
247 user_fd_mgt_array = tmp;
248
249 for (i = svc_nmgtuserfds - 1; i >= L_inOldSize; i--)
250 _svc_free_id((svc_input_id_t)i);
251 selected_index = (int)first_free;
252 }
253
254 node->id = (svc_input_id_t)selected_index;
255 first_free = user_fd_mgt_array[selected_index].data.next_free;
256
257 user_fd_mgt_array[selected_index].data.fd_node = node;
258 user_fd_mgt_array[selected_index].free = FALSE;
259
260 return ((svc_input_id_t)selected_index);
261 }
262
263 /*
264 * Access to a pollfd treatment. Scan all the associated callbacks that have
265 * at least one bit in their mask that masks a received event.
266 *
267 * If event POLLNVAL is received, we check that one callback processes it, if
268 * not, then remove the file descriptor from the poll. If there is one, let
269 * the user do the work.
270 */
271 void
__svc_getreq_user(struct pollfd * pfd)272 __svc_getreq_user(struct pollfd *pfd)
273 {
274 int fd = pfd->fd;
275 short revents = pfd->revents;
276 bool_t invalHandled = FALSE;
277 _svc_user_fd_node *node;
278
279 (void) mutex_lock(&svc_userfds_lock);
280
281 if ((fd < 0) || (fd >= svc_nuserfds)) {
282 (void) mutex_unlock(&svc_userfds_lock);
283 return;
284 }
285
286 node = svc_userfds[fd].list;
287
288 /* check if at least one mask fits */
289 if (0 == (revents & svc_userfds[fd].mask)) {
290 (void) mutex_unlock(&svc_userfds_lock);
291 return;
292 }
293
294 while ((svc_userfds[fd].mask != 0) && (node != NULL)) {
295 /*
296 * If one of the received events maps the ones the node listens
297 * to
298 */
299 _svc_user_fd_node *next = node->lnk.next;
300
301 if (node->callback != NULL) {
302 if (node->events & revents) {
303 if (revents & POLLNVAL) {
304 invalHandled = TRUE;
305 }
306
307 /*
308 * The lock must be released before calling the
309 * user function, as this function can call
310 * svc_remove_input() for example.
311 */
312 (void) mutex_unlock(&svc_userfds_lock);
313 node->callback(node->id, node->fd,
314 node->events & revents, node->cookie);
315 /*
316 * Do not use the node structure anymore, as it
317 * could have been deallocated by the previous
318 * callback.
319 */
320 (void) mutex_lock(&svc_userfds_lock);
321 }
322 }
323 node = next;
324 }
325
326 if ((revents & POLLNVAL) && !invalHandled)
327 __svc_remove_input_of_fd(fd);
328 (void) mutex_unlock(&svc_userfds_lock);
329 }
330
331
332 /*
333 * Check if a file descriptor is associated with a user reactor.
334 * To do this, just check that the array indexed on fd has a non-void linked
335 * list (ie. first element is not NULL)
336 */
337 bool_t
__is_a_userfd(int fd)338 __is_a_userfd(int fd)
339 {
340 /* Checks argument */
341 if ((fd < 0) || (fd >= svc_nuserfds))
342 return (FALSE);
343 return ((svc_userfds[fd].mask == 0x0000)? FALSE:TRUE);
344 }
345
346 /* free everything concerning user fd */
347 /* used in svc_run.c => no static */
348
349 void
__destroy_userfd(void)350 __destroy_userfd(void)
351 {
352 int one_fd;
353 /* Clean user fd */
354 if (svc_userfds != NULL) {
355 for (one_fd = 0; one_fd < svc_nuserfds; one_fd++) {
356 _svc_user_fd_node *node;
357
358 node = svc_userfds[one_fd].list;
359 while (node != NULL) {
360 _svc_user_fd_node *tmp = node;
361 _svc_free_id(node->id);
362 node = node->lnk.next;
363 free(tmp);
364 }
365 }
366
367 free(user_fd_mgt_array);
368 user_fd_mgt_array = NULL;
369 first_free = (svc_input_id_t)-1;
370
371 free(svc_userfds);
372 svc_userfds = NULL;
373 svc_nuserfds = 0;
374 }
375 }
376
377 /*
378 * Remove all the callback associated with a fd => useful when the fd is
379 * closed for instance
380 */
381 static void
__svc_remove_input_of_fd(int fd)382 __svc_remove_input_of_fd(int fd)
383 {
384 _svc_user_fd_node **pnode;
385 _svc_user_fd_node *tmp;
386
387 if ((fd < 0) || (fd >= svc_nuserfds))
388 return;
389
390 pnode = &svc_userfds[fd].list;
391 while ((tmp = *pnode) != NULL) {
392 *pnode = tmp->lnk.next;
393
394 _svc_free_id(tmp->id);
395 free(tmp);
396 }
397
398 svc_userfds[fd].mask = 0;
399 }
400
401 /*
402 * Allow user to add an fd in the poll list. If it does not succeed, return
403 * -1. Otherwise, return a svc_id
404 */
405
406 svc_input_id_t
svc_add_input(int user_fd,unsigned int events,svc_callback_t user_callback,void * cookie)407 svc_add_input(int user_fd, unsigned int events,
408 svc_callback_t user_callback, void *cookie)
409 {
410 _svc_user_fd_node *new_node;
411
412 if (user_fd < 0) {
413 errno = EINVAL;
414 return ((svc_input_id_t)-1);
415 }
416
417 if ((events == 0x0000) ||
418 (events & ~(POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND|\
419 POLLWRBAND|POLLERR|POLLHUP|POLLNVAL))) {
420 errno = EINVAL;
421 return ((svc_input_id_t)-1);
422 }
423
424 (void) mutex_lock(&svc_userfds_lock);
425
426 if ((user_fd < svc_nuserfds) &&
427 (svc_userfds[user_fd].mask & events) != 0) {
428 /* Already registrated call-back */
429 errno = EEXIST;
430 (void) mutex_unlock(&svc_userfds_lock);
431 return ((svc_input_id_t)-1);
432 }
433
434 /* Handle memory allocation. */
435 if (user_fd >= svc_nuserfds) {
436 int oldSize = svc_nuserfds;
437 int i;
438 _svc_user_fd_head *tmp;
439
440 svc_nuserfds = (user_fd + 1) + USER_FD_INCREMENT;
441
442 tmp = realloc(svc_userfds,
443 svc_nuserfds * sizeof (_svc_user_fd_head));
444
445 if (tmp == NULL) {
446 syslog(LOG_ERR, "svc_add_input: out of memory");
447 svc_nuserfds = oldSize;
448 errno = ENOMEM;
449 (void) mutex_unlock(&svc_userfds_lock);
450 return ((svc_input_id_t)-1);
451 }
452
453 svc_userfds = tmp;
454
455 for (i = oldSize; i < svc_nuserfds; i++) {
456 svc_userfds[i].list = NULL;
457 svc_userfds[i].mask = 0;
458 }
459 }
460
461 new_node = malloc(sizeof (_svc_user_fd_node));
462 if (new_node == NULL) {
463 syslog(LOG_ERR, "svc_add_input: out of memory");
464 errno = ENOMEM;
465 (void) mutex_unlock(&svc_userfds_lock);
466 return ((svc_input_id_t)-1);
467 }
468
469 /* create a new node */
470 new_node->fd = user_fd;
471 new_node->events = events;
472 new_node->callback = user_callback;
473 new_node->cookie = cookie;
474
475 if (_svc_attribute_new_id(new_node) == -1) {
476 (void) mutex_unlock(&svc_userfds_lock);
477 free(new_node);
478 return ((svc_input_id_t)-1);
479 }
480
481 /* Add the new element at the beginning of the list. */
482 if (svc_userfds[user_fd].list != NULL)
483 svc_userfds[user_fd].list->lnk.previous = new_node;
484 new_node->lnk.next = svc_userfds[user_fd].list;
485 new_node->lnk.previous = NULL;
486
487 svc_userfds[user_fd].list = new_node;
488
489 /* refresh global mask for this file desciptor */
490 svc_userfds[user_fd].mask |= events;
491
492 /* refresh mask for the poll */
493 add_pollfd(user_fd, (svc_userfds[user_fd].mask));
494
495 (void) mutex_unlock(&svc_userfds_lock);
496 return (new_node->id);
497 }
498
499 int
svc_remove_input(svc_input_id_t id)500 svc_remove_input(svc_input_id_t id)
501 {
502 _svc_user_fd_node* node;
503 _svc_user_fd_node* next;
504 _svc_user_fd_node* previous;
505 int fd; /* caching optim */
506
507 (void) mutex_lock(&svc_userfds_lock);
508
509 /* Immediately update data for id management */
510 if (user_fd_mgt_array == NULL || id >= svc_nmgtuserfds ||
511 is_free_id(id)) {
512 errno = EINVAL;
513 (void) mutex_unlock(&svc_userfds_lock);
514 return (-1);
515 }
516
517 node = node_from_id(id);
518 assert(node != NULL);
519
520 _svc_free_id(id);
521 next = node->lnk.next;
522 previous = node->lnk.previous;
523 fd = node->fd; /* caching optim */
524
525 /* Remove this node from the list. */
526 if (previous != NULL) {
527 previous->lnk.next = next;
528 } else {
529 assert(svc_userfds[fd].list == node);
530 svc_userfds[fd].list = next;
531 }
532 if (next != NULL)
533 next->lnk.previous = previous;
534
535 /* Remove the node flags from the global mask */
536 svc_userfds[fd].mask ^= node->events;
537
538 free(node);
539 if (svc_userfds[fd].mask == 0) {
540 assert(svc_userfds[fd].list == NULL);
541 remove_pollfd(fd);
542 } else {
543 assert(svc_userfds[fd].list != NULL);
544 }
545 /* <=> CLEAN NEEDED TO SHRINK MEMORY USAGE */
546
547 (void) mutex_unlock(&svc_userfds_lock);
548 return (0);
549 }
550
551 /*
552 * Provides default service-side functions for authentication flavors
553 * that do not use all the fields in struct svc_auth_ops.
554 */
555
556 /*ARGSUSED*/
557 static int
authany_wrap(AUTH * auth,XDR * xdrs,xdrproc_t xfunc,caddr_t xwhere)558 authany_wrap(AUTH *auth, XDR *xdrs, xdrproc_t xfunc, caddr_t xwhere)
559 {
560 return (*xfunc)(xdrs, xwhere);
561 }
562
563 struct svc_auth_ops svc_auth_any_ops = {
564 authany_wrap,
565 authany_wrap,
566 };
567
568 /*
569 * Return pointer to server authentication structure.
570 */
571 SVCAUTH *
__svc_get_svcauth(SVCXPRT * xprt)572 __svc_get_svcauth(SVCXPRT *xprt)
573 {
574 /* LINTED pointer alignment */
575 return (&SVC_XP_AUTH(xprt));
576 }
577
578 /*
579 * A callback routine to cleanup after a procedure is executed.
580 */
581 void (*__proc_cleanup_cb)() = NULL;
582
583 void *
__svc_set_proc_cleanup_cb(void * cb)584 __svc_set_proc_cleanup_cb(void *cb)
585 {
586 void *tmp = (void *)__proc_cleanup_cb;
587
588 __proc_cleanup_cb = (void (*)())cb;
589 return (tmp);
590 }
591
592 /* *************** SVCXPRT related stuff **************** */
593
594
595 static int pollfd_shrinking = 1;
596
597
598 /*
599 * Add fd to svc_pollfd
600 */
601 static void
add_pollfd(int fd,short events)602 add_pollfd(int fd, short events)
603 {
604 if (fd < FD_SETSIZE) {
605 FD_SET(fd, &svc_fdset);
606 #if !defined(_LP64)
607 FD_SET(fd, &_new_svc_fdset);
608 #endif
609 svc_nfds++;
610 svc_nfds_set++;
611 if (fd >= svc_max_fd)
612 svc_max_fd = fd + 1;
613 }
614 if (fd >= svc_max_pollfd)
615 svc_max_pollfd = fd + 1;
616 if (svc_max_pollfd > svc_pollfd_allocd) {
617 int i = svc_pollfd_allocd;
618 pollfd_t *tmp;
619 do {
620 svc_pollfd_allocd += POLLFD_EXTEND;
621 } while (svc_max_pollfd > svc_pollfd_allocd);
622 tmp = realloc(svc_pollfd,
623 sizeof (pollfd_t) * svc_pollfd_allocd);
624 if (tmp != NULL) {
625 svc_pollfd = tmp;
626 for (; i < svc_pollfd_allocd; i++)
627 POLLFD_CLR(i, tmp);
628 } else {
629 /*
630 * give an error message; undo fdset setting
631 * above; reset the pollfd_shrinking flag.
632 * because of this poll will not be done
633 * on these fds.
634 */
635 if (fd < FD_SETSIZE) {
636 FD_CLR(fd, &svc_fdset);
637 #if !defined(_LP64)
638 FD_CLR(fd, &_new_svc_fdset);
639 #endif
640 svc_nfds--;
641 svc_nfds_set--;
642 if (fd == (svc_max_fd - 1))
643 svc_max_fd--;
644 }
645 if (fd == (svc_max_pollfd - 1))
646 svc_max_pollfd--;
647 pollfd_shrinking = 0;
648 syslog(LOG_ERR, "add_pollfd: out of memory");
649 _exit(1);
650 }
651 }
652 svc_pollfd[fd].fd = fd;
653 svc_pollfd[fd].events = events;
654 svc_npollfds++;
655 svc_npollfds_set++;
656 }
657
658 /*
659 * the fd is still active but only the bit in fdset is cleared.
660 * do not subtract svc_nfds or svc_npollfds
661 */
662 void
clear_pollfd(int fd)663 clear_pollfd(int fd)
664 {
665 if (fd < FD_SETSIZE && FD_ISSET(fd, &svc_fdset)) {
666 FD_CLR(fd, &svc_fdset);
667 #if !defined(_LP64)
668 FD_CLR(fd, &_new_svc_fdset);
669 #endif
670 svc_nfds_set--;
671 }
672 if (fd < svc_pollfd_allocd && POLLFD_ISSET(fd, svc_pollfd)) {
673 POLLFD_CLR(fd, svc_pollfd);
674 svc_npollfds_set--;
675 }
676 }
677
678 /*
679 * sets the bit in fdset for an active fd so that poll() is done for that
680 */
681 void
set_pollfd(int fd,short events)682 set_pollfd(int fd, short events)
683 {
684 if (fd < FD_SETSIZE) {
685 FD_SET(fd, &svc_fdset);
686 #if !defined(_LP64)
687 FD_SET(fd, &_new_svc_fdset);
688 #endif
689 svc_nfds_set++;
690 }
691 if (fd < svc_pollfd_allocd) {
692 svc_pollfd[fd].fd = fd;
693 svc_pollfd[fd].events = events;
694 svc_npollfds_set++;
695 }
696 }
697
698 /*
699 * remove a svc_pollfd entry; it does not shrink the memory
700 */
701 static void
remove_pollfd(int fd)702 remove_pollfd(int fd)
703 {
704 clear_pollfd(fd);
705 if (fd == (svc_max_fd - 1))
706 svc_max_fd--;
707 svc_nfds--;
708 if (fd == (svc_max_pollfd - 1))
709 svc_max_pollfd--;
710 svc_npollfds--;
711 }
712
713 /*
714 * delete a svc_pollfd entry; it shrinks the memory
715 * use remove_pollfd if you do not want to shrink
716 */
717 static void
delete_pollfd(int fd)718 delete_pollfd(int fd)
719 {
720 remove_pollfd(fd);
721 if (pollfd_shrinking && svc_max_pollfd <
722 (svc_pollfd_allocd - POLLFD_SHRINK)) {
723 do {
724 svc_pollfd_allocd -= POLLFD_SHRINK;
725 } while (svc_max_pollfd < (svc_pollfd_allocd - POLLFD_SHRINK));
726 svc_pollfd = realloc(svc_pollfd,
727 sizeof (pollfd_t) * svc_pollfd_allocd);
728 if (svc_pollfd == NULL) {
729 syslog(LOG_ERR, "delete_pollfd: out of memory");
730 _exit(1);
731 }
732 }
733 }
734
735
736 /*
737 * Activate a transport handle.
738 */
739 void
xprt_register(const SVCXPRT * xprt)740 xprt_register(const SVCXPRT *xprt)
741 {
742 int fd = xprt->xp_fd;
743 #ifdef CALLBACK
744 extern void (*_svc_getreqset_proc)();
745 #endif
746 /* VARIABLES PROTECTED BY svc_fd_lock: svc_xports, svc_fdset */
747
748 (void) rw_wrlock(&svc_fd_lock);
749 if (svc_xports == NULL) {
750 /* allocate some small amount first */
751 svc_xports = calloc(FD_INCREMENT, sizeof (SVCXPRT *));
752 if (svc_xports == NULL) {
753 syslog(LOG_ERR, "xprt_register: out of memory");
754 _exit(1);
755 }
756 nsvc_xports = FD_INCREMENT;
757
758 #ifdef CALLBACK
759 /*
760 * XXX: This code does not keep track of the server state.
761 *
762 * This provides for callback support. When a client
763 * recv's a call from another client on the server fd's,
764 * it calls _svc_getreqset_proc() which would return
765 * after serving all the server requests. Also look under
766 * clnt_dg.c and clnt_vc.c (clnt_call part of it)
767 */
768 _svc_getreqset_proc = svc_getreq_poll;
769 #endif
770 }
771
772 while (fd >= nsvc_xports) {
773 SVCXPRT **tmp_xprts = svc_xports;
774
775 /* time to expand svc_xprts */
776 tmp_xprts = realloc(svc_xports,
777 sizeof (SVCXPRT *) * (nsvc_xports + FD_INCREMENT));
778 if (tmp_xprts == NULL) {
779 syslog(LOG_ERR, "xprt_register : out of memory.");
780 _exit(1);
781 }
782
783 svc_xports = tmp_xprts;
784 (void) memset(&svc_xports[nsvc_xports], 0,
785 sizeof (SVCXPRT *) * FD_INCREMENT);
786 nsvc_xports += FD_INCREMENT;
787 }
788
789 svc_xports[fd] = (SVCXPRT *)xprt;
790
791 add_pollfd(fd, MASKVAL);
792
793 if (svc_polling) {
794 char dummy;
795
796 /*
797 * This happens only in one of the MT modes.
798 * Wake up poller.
799 */
800 (void) write(svc_pipe[1], &dummy, sizeof (dummy));
801 }
802 /*
803 * If already dispatching door based services, start
804 * dispatching TLI based services now.
805 */
806 (void) mutex_lock(&svc_door_mutex);
807 if (svc_ndoorfds > 0)
808 (void) cond_signal(&svc_door_waitcv);
809 (void) mutex_unlock(&svc_door_mutex);
810
811 if (svc_xdrs == NULL) {
812 /* allocate initial chunk */
813 svc_xdrs = calloc(FD_INCREMENT, sizeof (XDR *));
814 if (svc_xdrs != NULL)
815 nsvc_xdrs = FD_INCREMENT;
816 else {
817 syslog(LOG_ERR, "xprt_register : out of memory.");
818 _exit(1);
819 }
820 }
821 (void) rw_unlock(&svc_fd_lock);
822 }
823
824 /*
825 * De-activate a transport handle.
826 */
827 void
__xprt_unregister_private(const SVCXPRT * xprt,bool_t lock_not_held)828 __xprt_unregister_private(const SVCXPRT *xprt, bool_t lock_not_held)
829 {
830 int fd = xprt->xp_fd;
831
832 if (lock_not_held)
833 (void) rw_wrlock(&svc_fd_lock);
834 if ((fd < nsvc_xports) && (svc_xports[fd] == xprt)) {
835 svc_xports[fd] = NULL;
836 delete_pollfd(fd);
837 }
838 if (lock_not_held)
839 (void) rw_unlock(&svc_fd_lock);
840 __svc_rm_from_xlist(&_svc_xprtlist, xprt, &xprtlist_lock);
841 }
842
843 void
xprt_unregister(const SVCXPRT * xprt)844 xprt_unregister(const SVCXPRT *xprt)
845 {
846 __xprt_unregister_private(xprt, TRUE);
847 }
848
849 /* ********************** CALLOUT list related stuff ************* */
850
851 /*
852 * Add a service program to the callout list.
853 * The dispatch routine will be called when a rpc request for this
854 * program number comes in.
855 */
856 bool_t
svc_reg(const SVCXPRT * xprt,const rpcprog_t prog,const rpcvers_t vers,void (* dispatch)(),const struct netconfig * nconf)857 svc_reg(const SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
858 void (*dispatch)(), const struct netconfig *nconf)
859 {
860 struct svc_callout *prev;
861 struct svc_callout *s, **s2;
862 struct netconfig *tnconf;
863 char *netid = NULL;
864 int flag = 0;
865
866 /* VARIABLES PROTECTED BY svc_lock: s, prev, svc_head */
867
868 if (xprt->xp_netid) {
869 netid = strdup(xprt->xp_netid);
870 flag = 1;
871 } else if (nconf && nconf->nc_netid) {
872 netid = strdup(nconf->nc_netid);
873 flag = 1;
874 } else if ((tnconf = __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type))
875 != NULL) {
876 netid = strdup(tnconf->nc_netid);
877 flag = 1;
878 freenetconfigent(tnconf);
879 } /* must have been created with svc_raw_create */
880 if ((netid == NULL) && (flag == 1))
881 return (FALSE);
882
883 (void) rw_wrlock(&svc_lock);
884 if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
885 if (netid)
886 free(netid);
887 if (s->sc_dispatch == dispatch)
888 goto rpcb_it; /* he is registering another xptr */
889 (void) rw_unlock(&svc_lock);
890 return (FALSE);
891 }
892 s = malloc(sizeof (struct svc_callout));
893 if (s == NULL) {
894 if (netid)
895 free(netid);
896 (void) rw_unlock(&svc_lock);
897 return (FALSE);
898 }
899
900 s->sc_prog = prog;
901 s->sc_vers = vers;
902 s->sc_dispatch = dispatch;
903 s->sc_netid = netid;
904 s->sc_next = NULL;
905
906 /*
907 * The ordering of transports is such that the most frequently used
908 * one appears first. So add the new entry to the end of the list.
909 */
910 for (s2 = &svc_head; *s2 != NULL; s2 = &(*s2)->sc_next)
911 ;
912 *s2 = s;
913
914 if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
915 if ((((SVCXPRT *)xprt)->xp_netid = strdup(netid)) == NULL) {
916 syslog(LOG_ERR, "svc_reg : strdup failed.");
917 free(netid);
918 free(s);
919 *s2 = NULL;
920 (void) rw_unlock(&svc_lock);
921 return (FALSE);
922 }
923
924 rpcb_it:
925 (void) rw_unlock(&svc_lock);
926
927 /* now register the information with the local binder service */
928 if (nconf)
929 return (rpcb_set(prog, vers, nconf, &xprt->xp_ltaddr));
930 return (TRUE);
931 /*NOTREACHED*/
932 }
933
934 /*
935 * Remove a service program from the callout list.
936 */
937 void
svc_unreg(const rpcprog_t prog,const rpcvers_t vers)938 svc_unreg(const rpcprog_t prog, const rpcvers_t vers)
939 {
940 struct svc_callout *prev;
941 struct svc_callout *s;
942
943 /* unregister the information anyway */
944 (void) rpcb_unset(prog, vers, NULL);
945
946 (void) rw_wrlock(&svc_lock);
947 while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
948 if (prev == NULL_SVC) {
949 svc_head = s->sc_next;
950 } else {
951 prev->sc_next = s->sc_next;
952 }
953 s->sc_next = NULL_SVC;
954 if (s->sc_netid)
955 free(s->sc_netid);
956 free(s);
957 }
958 (void) rw_unlock(&svc_lock);
959 }
960
961 #ifdef PORTMAP
962 /*
963 * Add a service program to the callout list.
964 * The dispatch routine will be called when a rpc request for this
965 * program number comes in.
966 * For version 2 portmappers.
967 */
968 bool_t
svc_register(SVCXPRT * xprt,rpcprog_t prog,rpcvers_t vers,void (* dispatch)(),int protocol)969 svc_register(SVCXPRT *xprt, rpcprog_t prog, rpcvers_t vers,
970 void (*dispatch)(), int protocol)
971 {
972 struct svc_callout *prev;
973 struct svc_callout *s;
974 struct netconfig *nconf;
975 char *netid = NULL;
976 int flag = 0;
977
978 if (xprt->xp_netid) {
979 netid = strdup(xprt->xp_netid);
980 flag = 1;
981 } else if ((ioctl(xprt->xp_fd, I_FIND, "timod") > 0) && ((nconf =
982 __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type)) != NULL)) {
983 /* fill in missing netid field in SVCXPRT */
984 netid = strdup(nconf->nc_netid);
985 flag = 1;
986 freenetconfigent(nconf);
987 } /* must be svc_raw_create */
988
989 if ((netid == NULL) && (flag == 1))
990 return (FALSE);
991
992 (void) rw_wrlock(&svc_lock);
993 if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
994 if (netid)
995 free(netid);
996 if (s->sc_dispatch == dispatch)
997 goto pmap_it; /* he is registering another xptr */
998 (void) rw_unlock(&svc_lock);
999 return (FALSE);
1000 }
1001 s = malloc(sizeof (struct svc_callout));
1002 if (s == (struct svc_callout *)0) {
1003 if (netid)
1004 free(netid);
1005 (void) rw_unlock(&svc_lock);
1006 return (FALSE);
1007 }
1008 s->sc_prog = prog;
1009 s->sc_vers = vers;
1010 s->sc_dispatch = dispatch;
1011 s->sc_netid = netid;
1012 s->sc_next = svc_head;
1013 svc_head = s;
1014
1015 if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
1016 if ((xprt->xp_netid = strdup(netid)) == NULL) {
1017 syslog(LOG_ERR, "svc_register : strdup failed.");
1018 free(netid);
1019 svc_head = s->sc_next;
1020 free(s);
1021 (void) rw_unlock(&svc_lock);
1022 return (FALSE);
1023 }
1024
1025 pmap_it:
1026 (void) rw_unlock(&svc_lock);
1027 /* now register the information with the local binder service */
1028 if (protocol)
1029 return (pmap_set(prog, vers, protocol, xprt->xp_port));
1030 return (TRUE);
1031 }
1032
1033 /*
1034 * Remove a service program from the callout list.
1035 * For version 2 portmappers.
1036 */
1037 void
svc_unregister(rpcprog_t prog,rpcvers_t vers)1038 svc_unregister(rpcprog_t prog, rpcvers_t vers)
1039 {
1040 struct svc_callout *prev;
1041 struct svc_callout *s;
1042
1043 (void) rw_wrlock(&svc_lock);
1044 while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
1045 if (prev == NULL_SVC) {
1046 svc_head = s->sc_next;
1047 } else {
1048 prev->sc_next = s->sc_next;
1049 }
1050 s->sc_next = NULL_SVC;
1051 if (s->sc_netid)
1052 free(s->sc_netid);
1053 free(s);
1054 /* unregister the information with the local binder service */
1055 (void) pmap_unset(prog, vers);
1056 }
1057 (void) rw_unlock(&svc_lock);
1058 }
1059 #endif /* PORTMAP */
1060
1061 /*
1062 * Search the callout list for a program number, return the callout
1063 * struct.
1064 * Also check for transport as well. Many routines such as svc_unreg
1065 * dont give any corresponding transport, so dont check for transport if
1066 * netid == NULL
1067 */
1068 static struct svc_callout *
svc_find(rpcprog_t prog,rpcvers_t vers,struct svc_callout ** prev,char * netid)1069 svc_find(rpcprog_t prog, rpcvers_t vers, struct svc_callout **prev, char *netid)
1070 {
1071 struct svc_callout *s, *p;
1072
1073 /* WRITE LOCK HELD ON ENTRY: svc_lock */
1074
1075 /* assert(RW_WRITE_HELD(&svc_lock)); */
1076 p = NULL_SVC;
1077 for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1078 if (((s->sc_prog == prog) && (s->sc_vers == vers)) &&
1079 ((netid == NULL) || (s->sc_netid == NULL) ||
1080 (strcmp(netid, s->sc_netid) == 0)))
1081 break;
1082 p = s;
1083 }
1084 *prev = p;
1085 return (s);
1086 }
1087
1088
1089 /* ******************* REPLY GENERATION ROUTINES ************ */
1090
1091 /*
1092 * Send a reply to an rpc request
1093 */
1094 bool_t
svc_sendreply(const SVCXPRT * xprt,const xdrproc_t xdr_results,const caddr_t xdr_location)1095 svc_sendreply(const SVCXPRT *xprt, const xdrproc_t xdr_results,
1096 const caddr_t xdr_location)
1097 {
1098 struct rpc_msg rply;
1099
1100 rply.rm_direction = REPLY;
1101 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1102 rply.acpted_rply.ar_verf = xprt->xp_verf;
1103 rply.acpted_rply.ar_stat = SUCCESS;
1104 rply.acpted_rply.ar_results.where = xdr_location;
1105 rply.acpted_rply.ar_results.proc = xdr_results;
1106 return (SVC_REPLY((SVCXPRT *)xprt, &rply));
1107 }
1108
1109 /*
1110 * No procedure error reply
1111 */
1112 void
svcerr_noproc(const SVCXPRT * xprt)1113 svcerr_noproc(const SVCXPRT *xprt)
1114 {
1115 struct rpc_msg rply;
1116
1117 rply.rm_direction = REPLY;
1118 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1119 rply.acpted_rply.ar_verf = xprt->xp_verf;
1120 rply.acpted_rply.ar_stat = PROC_UNAVAIL;
1121 SVC_REPLY((SVCXPRT *)xprt, &rply);
1122 }
1123
1124 /*
1125 * Can't decode args error reply
1126 */
1127 void
svcerr_decode(const SVCXPRT * xprt)1128 svcerr_decode(const SVCXPRT *xprt)
1129 {
1130 struct rpc_msg rply;
1131
1132 rply.rm_direction = REPLY;
1133 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1134 rply.acpted_rply.ar_verf = xprt->xp_verf;
1135 rply.acpted_rply.ar_stat = GARBAGE_ARGS;
1136 SVC_REPLY((SVCXPRT *)xprt, &rply);
1137 }
1138
1139 /*
1140 * Some system error
1141 */
1142 void
svcerr_systemerr(const SVCXPRT * xprt)1143 svcerr_systemerr(const SVCXPRT *xprt)
1144 {
1145 struct rpc_msg rply;
1146
1147 rply.rm_direction = REPLY;
1148 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1149 rply.acpted_rply.ar_verf = xprt->xp_verf;
1150 rply.acpted_rply.ar_stat = SYSTEM_ERR;
1151 SVC_REPLY((SVCXPRT *)xprt, &rply);
1152 }
1153
1154 /*
1155 * Tell RPC package to not complain about version errors to the client. This
1156 * is useful when revving broadcast protocols that sit on a fixed address.
1157 * There is really one (or should be only one) example of this kind of
1158 * protocol: the portmapper (or rpc binder).
1159 */
1160 void
__svc_versquiet_on(const SVCXPRT * xprt)1161 __svc_versquiet_on(const SVCXPRT *xprt)
1162 {
1163 /* LINTED pointer alignment */
1164 svc_flags(xprt) |= SVC_VERSQUIET;
1165 }
1166
1167 void
__svc_versquiet_off(const SVCXPRT * xprt)1168 __svc_versquiet_off(const SVCXPRT *xprt)
1169 {
1170 /* LINTED pointer alignment */
1171 svc_flags(xprt) &= ~SVC_VERSQUIET;
1172 }
1173
1174 void
svc_versquiet(const SVCXPRT * xprt)1175 svc_versquiet(const SVCXPRT *xprt)
1176 {
1177 __svc_versquiet_on(xprt);
1178 }
1179
1180 int
__svc_versquiet_get(const SVCXPRT * xprt)1181 __svc_versquiet_get(const SVCXPRT *xprt)
1182 {
1183 /* LINTED pointer alignment */
1184 return (svc_flags(xprt) & SVC_VERSQUIET);
1185 }
1186
1187 /*
1188 * Authentication error reply
1189 */
1190 void
svcerr_auth(const SVCXPRT * xprt,const enum auth_stat why)1191 svcerr_auth(const SVCXPRT *xprt, const enum auth_stat why)
1192 {
1193 struct rpc_msg rply;
1194
1195 rply.rm_direction = REPLY;
1196 rply.rm_reply.rp_stat = MSG_DENIED;
1197 rply.rjcted_rply.rj_stat = AUTH_ERROR;
1198 rply.rjcted_rply.rj_why = why;
1199 SVC_REPLY((SVCXPRT *)xprt, &rply);
1200 }
1201
1202 /*
1203 * Auth too weak error reply
1204 */
1205 void
svcerr_weakauth(const SVCXPRT * xprt)1206 svcerr_weakauth(const SVCXPRT *xprt)
1207 {
1208 svcerr_auth(xprt, AUTH_TOOWEAK);
1209 }
1210
1211 /*
1212 * Program unavailable error reply
1213 */
1214 void
svcerr_noprog(const SVCXPRT * xprt)1215 svcerr_noprog(const SVCXPRT *xprt)
1216 {
1217 struct rpc_msg rply;
1218
1219 rply.rm_direction = REPLY;
1220 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1221 rply.acpted_rply.ar_verf = xprt->xp_verf;
1222 rply.acpted_rply.ar_stat = PROG_UNAVAIL;
1223 SVC_REPLY((SVCXPRT *)xprt, &rply);
1224 }
1225
1226 /*
1227 * Program version mismatch error reply
1228 */
1229 void
svcerr_progvers(const SVCXPRT * xprt,const rpcvers_t low_vers,const rpcvers_t high_vers)1230 svcerr_progvers(const SVCXPRT *xprt, const rpcvers_t low_vers,
1231 const rpcvers_t high_vers)
1232 {
1233 struct rpc_msg rply;
1234
1235 rply.rm_direction = REPLY;
1236 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1237 rply.acpted_rply.ar_verf = xprt->xp_verf;
1238 rply.acpted_rply.ar_stat = PROG_MISMATCH;
1239 rply.acpted_rply.ar_vers.low = low_vers;
1240 rply.acpted_rply.ar_vers.high = high_vers;
1241 SVC_REPLY((SVCXPRT *)xprt, &rply);
1242 }
1243
1244 /* ******************* SERVER INPUT STUFF ******************* */
1245
1246 /*
1247 * Get server side input from some transport.
1248 *
1249 * Statement of authentication parameters management:
1250 * This function owns and manages all authentication parameters, specifically
1251 * the "raw" parameters (msg.rm_call.cb_cred and msg.rm_call.cb_verf) and
1252 * the "cooked" credentials (rqst->rq_clntcred).
1253 * However, this function does not know the structure of the cooked
1254 * credentials, so it make the following assumptions:
1255 * a) the structure is contiguous (no pointers), and
1256 * b) the cred structure size does not exceed RQCRED_SIZE bytes.
1257 * In all events, all three parameters are freed upon exit from this routine.
1258 * The storage is trivially management on the call stack in user land, but
1259 * is mallocated in kernel land.
1260 */
1261
1262 void
svc_getreq(int rdfds)1263 svc_getreq(int rdfds)
1264 {
1265 fd_set readfds;
1266
1267 FD_ZERO(&readfds);
1268 readfds.fds_bits[0] = rdfds;
1269 svc_getreqset(&readfds);
1270 }
1271
1272 void
svc_getreqset(fd_set * readfds)1273 svc_getreqset(fd_set *readfds)
1274 {
1275 int i;
1276
1277 for (i = 0; i < svc_max_fd; i++) {
1278 /* fd has input waiting */
1279 if (FD_ISSET(i, readfds))
1280 svc_getreq_common(i);
1281 }
1282 }
1283
1284 void
svc_getreq_poll(struct pollfd * pfdp,const int pollretval)1285 svc_getreq_poll(struct pollfd *pfdp, const int pollretval)
1286 {
1287 int i;
1288 int fds_found;
1289
1290 for (i = fds_found = 0; fds_found < pollretval; i++) {
1291 struct pollfd *p = &pfdp[i];
1292
1293 if (p->revents) {
1294 /* fd has input waiting */
1295 fds_found++;
1296 /*
1297 * We assume that this function is only called
1298 * via someone select()ing from svc_fdset or
1299 * poll()ing from svc_pollset[]. Thus it's safe
1300 * to handle the POLLNVAL event by simply turning
1301 * the corresponding bit off in svc_fdset. The
1302 * svc_pollset[] array is derived from svc_fdset
1303 * and so will also be updated eventually.
1304 *
1305 * XXX Should we do an xprt_unregister() instead?
1306 */
1307 /* Handle user callback */
1308 if (__is_a_userfd(p->fd) == TRUE) {
1309 (void) rw_rdlock(&svc_fd_lock);
1310 __svc_getreq_user(p);
1311 (void) rw_unlock(&svc_fd_lock);
1312 } else {
1313 if (p->revents & POLLNVAL) {
1314 (void) rw_wrlock(&svc_fd_lock);
1315 remove_pollfd(p->fd); /* XXX */
1316 (void) rw_unlock(&svc_fd_lock);
1317 } else {
1318 svc_getreq_common(p->fd);
1319 }
1320 }
1321 }
1322 }
1323 }
1324
1325 void
svc_getreq_common(const int fd)1326 svc_getreq_common(const int fd)
1327 {
1328 SVCXPRT *xprt;
1329 enum xprt_stat stat;
1330 struct rpc_msg *msg;
1331 struct svc_req *r;
1332 char *cred_area;
1333
1334 (void) rw_rdlock(&svc_fd_lock);
1335
1336 /* HANDLE USER CALLBACK */
1337 if (__is_a_userfd(fd) == TRUE) {
1338 struct pollfd virtual_fd;
1339
1340 virtual_fd.events = virtual_fd.revents = (short)0xFFFF;
1341 virtual_fd.fd = fd;
1342 __svc_getreq_user(&virtual_fd);
1343 (void) rw_unlock(&svc_fd_lock);
1344 return;
1345 }
1346
1347 /*
1348 * The transport associated with this fd could have been
1349 * removed from svc_timeout_nonblock_xprt_and_LRU, for instance.
1350 * This can happen if two or more fds get read events and are
1351 * passed to svc_getreq_poll/set, the first fd is seviced by
1352 * the dispatch routine and cleans up any dead transports. If
1353 * one of the dead transports removed is the other fd that
1354 * had a read event then svc_getreq_common() will be called with no
1355 * xprt associated with the fd that had the original read event.
1356 */
1357 if ((fd >= nsvc_xports) || (xprt = svc_xports[fd]) == NULL) {
1358 (void) rw_unlock(&svc_fd_lock);
1359 return;
1360 }
1361 (void) rw_unlock(&svc_fd_lock);
1362 /* LINTED pointer alignment */
1363 msg = SVCEXT(xprt)->msg;
1364 /* LINTED pointer alignment */
1365 r = SVCEXT(xprt)->req;
1366 /* LINTED pointer alignment */
1367 cred_area = SVCEXT(xprt)->cred_area;
1368 msg->rm_call.cb_cred.oa_base = cred_area;
1369 msg->rm_call.cb_verf.oa_base = &(cred_area[MAX_AUTH_BYTES]);
1370 r->rq_clntcred = &(cred_area[2 * MAX_AUTH_BYTES]);
1371
1372 /* receive msgs from xprtprt (support batch calls) */
1373 do {
1374 bool_t dispatch;
1375
1376 if (dispatch = SVC_RECV(xprt, msg))
1377 (void) _svc_prog_dispatch(xprt, msg, r);
1378 /*
1379 * Check if the xprt has been disconnected in a recursive call
1380 * in the service dispatch routine. If so, then break
1381 */
1382 (void) rw_rdlock(&svc_fd_lock);
1383 if (xprt != svc_xports[fd]) {
1384 (void) rw_unlock(&svc_fd_lock);
1385 break;
1386 }
1387 (void) rw_unlock(&svc_fd_lock);
1388
1389 /*
1390 * Call cleanup procedure if set.
1391 */
1392 if (__proc_cleanup_cb != NULL && dispatch)
1393 (*__proc_cleanup_cb)(xprt);
1394
1395 if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
1396 SVC_DESTROY(xprt);
1397 break;
1398 }
1399 } while (stat == XPRT_MOREREQS);
1400 }
1401
1402 int
_svc_prog_dispatch(SVCXPRT * xprt,struct rpc_msg * msg,struct svc_req * r)1403 _svc_prog_dispatch(SVCXPRT *xprt, struct rpc_msg *msg, struct svc_req *r)
1404 {
1405 struct svc_callout *s;
1406 enum auth_stat why;
1407 int prog_found;
1408 rpcvers_t low_vers;
1409 rpcvers_t high_vers;
1410 void (*disp_fn)();
1411
1412 r->rq_xprt = xprt;
1413 r->rq_prog = msg->rm_call.cb_prog;
1414 r->rq_vers = msg->rm_call.cb_vers;
1415 r->rq_proc = msg->rm_call.cb_proc;
1416 r->rq_cred = msg->rm_call.cb_cred;
1417 /* LINTED pointer alignment */
1418 SVC_XP_AUTH(r->rq_xprt).svc_ah_ops = svc_auth_any_ops;
1419 /* LINTED pointer alignment */
1420 SVC_XP_AUTH(r->rq_xprt).svc_ah_private = NULL;
1421
1422 /* first authenticate the message */
1423 /* Check for null flavor and bypass these calls if possible */
1424
1425 if (msg->rm_call.cb_cred.oa_flavor == AUTH_NULL) {
1426 r->rq_xprt->xp_verf.oa_flavor = _null_auth.oa_flavor;
1427 r->rq_xprt->xp_verf.oa_length = 0;
1428 } else {
1429 bool_t no_dispatch;
1430
1431 if ((why = __gss_authenticate(r, msg,
1432 &no_dispatch)) != AUTH_OK) {
1433 svcerr_auth(xprt, why);
1434 return (0);
1435 }
1436 if (no_dispatch)
1437 return (0);
1438 }
1439 /* match message with a registered service */
1440 prog_found = FALSE;
1441 low_vers = (rpcvers_t)(0 - 1);
1442 high_vers = 0;
1443 (void) rw_rdlock(&svc_lock);
1444 for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1445 if (s->sc_prog == r->rq_prog) {
1446 prog_found = TRUE;
1447 if (s->sc_vers == r->rq_vers) {
1448 if ((xprt->xp_netid == NULL) ||
1449 (s->sc_netid == NULL) ||
1450 (strcmp(xprt->xp_netid,
1451 s->sc_netid) == 0)) {
1452 disp_fn = (*s->sc_dispatch);
1453 (void) rw_unlock(&svc_lock);
1454 disp_fn(r, xprt);
1455 return (1);
1456 }
1457 prog_found = FALSE;
1458 }
1459 if (s->sc_vers < low_vers)
1460 low_vers = s->sc_vers;
1461 if (s->sc_vers > high_vers)
1462 high_vers = s->sc_vers;
1463 } /* found correct program */
1464 }
1465 (void) rw_unlock(&svc_lock);
1466
1467 /*
1468 * if we got here, the program or version
1469 * is not served ...
1470 */
1471 if (prog_found) {
1472 /* LINTED pointer alignment */
1473 if (!version_keepquiet(xprt))
1474 svcerr_progvers(xprt, low_vers, high_vers);
1475 } else {
1476 svcerr_noprog(xprt);
1477 }
1478 return (0);
1479 }
1480
1481 /* ******************* SVCXPRT allocation and deallocation ***************** */
1482
1483 /*
1484 * svc_xprt_alloc() - allocate a service transport handle
1485 */
1486 SVCXPRT *
svc_xprt_alloc(void)1487 svc_xprt_alloc(void)
1488 {
1489 SVCXPRT *xprt = NULL;
1490 SVCXPRT_EXT *xt = NULL;
1491 SVCXPRT_LIST *xlist = NULL;
1492 struct rpc_msg *msg = NULL;
1493 struct svc_req *req = NULL;
1494 char *cred_area = NULL;
1495
1496 if ((xprt = calloc(1, sizeof (SVCXPRT))) == NULL)
1497 goto err_exit;
1498
1499 if ((xt = calloc(1, sizeof (SVCXPRT_EXT))) == NULL)
1500 goto err_exit;
1501 xprt->xp_p3 = (caddr_t)xt; /* SVCEXT(xprt) = xt */
1502
1503 if ((xlist = calloc(1, sizeof (SVCXPRT_LIST))) == NULL)
1504 goto err_exit;
1505 xt->my_xlist = xlist;
1506 xlist->xprt = xprt;
1507
1508 if ((msg = malloc(sizeof (struct rpc_msg))) == NULL)
1509 goto err_exit;
1510 xt->msg = msg;
1511
1512 if ((req = malloc(sizeof (struct svc_req))) == NULL)
1513 goto err_exit;
1514 xt->req = req;
1515
1516 if ((cred_area = malloc(2*MAX_AUTH_BYTES + RQCRED_SIZE)) == NULL)
1517 goto err_exit;
1518 xt->cred_area = cred_area;
1519
1520 /* LINTED pointer alignment */
1521 (void) mutex_init(&svc_send_mutex(xprt), USYNC_THREAD, (void *)0);
1522 return (xprt);
1523
1524 err_exit:
1525 svc_xprt_free(xprt);
1526 return (NULL);
1527 }
1528
1529
1530 /*
1531 * svc_xprt_free() - free a service handle
1532 */
1533 void
svc_xprt_free(SVCXPRT * xprt)1534 svc_xprt_free(SVCXPRT *xprt)
1535 {
1536 /* LINTED pointer alignment */
1537 SVCXPRT_EXT *xt = xprt ? SVCEXT(xprt) : NULL;
1538 SVCXPRT_LIST *my_xlist = xt ? xt->my_xlist: NULL;
1539 struct rpc_msg *msg = xt ? xt->msg : NULL;
1540 struct svc_req *req = xt ? xt->req : NULL;
1541 char *cred_area = xt ? xt->cred_area : NULL;
1542
1543 if (xprt)
1544 free(xprt);
1545 if (xt)
1546 free(xt);
1547 if (my_xlist)
1548 free(my_xlist);
1549 if (msg)
1550 free(msg);
1551 if (req)
1552 free(req);
1553 if (cred_area)
1554 free(cred_area);
1555 }
1556
1557
1558 /*
1559 * svc_xprt_destroy() - free parent and child xprt list
1560 */
1561 void
svc_xprt_destroy(SVCXPRT * xprt)1562 svc_xprt_destroy(SVCXPRT *xprt)
1563 {
1564 SVCXPRT_LIST *xlist, *xnext = NULL;
1565 int type;
1566
1567 /* LINTED pointer alignment */
1568 if (SVCEXT(xprt)->parent)
1569 /* LINTED pointer alignment */
1570 xprt = SVCEXT(xprt)->parent;
1571 /* LINTED pointer alignment */
1572 type = svc_type(xprt);
1573 /* LINTED pointer alignment */
1574 for (xlist = SVCEXT(xprt)->my_xlist; xlist != NULL; xlist = xnext) {
1575 xnext = xlist->next;
1576 xprt = xlist->xprt;
1577 switch (type) {
1578 case SVC_DGRAM:
1579 svc_dg_xprtfree(xprt);
1580 break;
1581 case SVC_RENDEZVOUS:
1582 svc_vc_xprtfree(xprt);
1583 break;
1584 case SVC_CONNECTION:
1585 svc_fd_xprtfree(xprt);
1586 break;
1587 case SVC_DOOR:
1588 svc_door_xprtfree(xprt);
1589 break;
1590 }
1591 }
1592 }
1593
1594
1595 /*
1596 * svc_copy() - make a copy of parent
1597 */
1598 SVCXPRT *
svc_copy(SVCXPRT * xprt)1599 svc_copy(SVCXPRT *xprt)
1600 {
1601 /* LINTED pointer alignment */
1602 switch (svc_type(xprt)) {
1603 case SVC_DGRAM:
1604 return (svc_dg_xprtcopy(xprt));
1605 case SVC_RENDEZVOUS:
1606 return (svc_vc_xprtcopy(xprt));
1607 case SVC_CONNECTION:
1608 return (svc_fd_xprtcopy(xprt));
1609 }
1610 return (NULL);
1611 }
1612
1613
1614 /*
1615 * _svc_destroy_private() - private SVC_DESTROY interface
1616 */
1617 void
_svc_destroy_private(SVCXPRT * xprt)1618 _svc_destroy_private(SVCXPRT *xprt)
1619 {
1620 /* LINTED pointer alignment */
1621 switch (svc_type(xprt)) {
1622 case SVC_DGRAM:
1623 _svc_dg_destroy_private(xprt);
1624 break;
1625 case SVC_RENDEZVOUS:
1626 case SVC_CONNECTION:
1627 _svc_vc_destroy_private(xprt, TRUE);
1628 break;
1629 }
1630 }
1631
1632 /*
1633 * svc_get_local_cred() - fetch local user credentials. This always
1634 * works over doors based transports. For local transports, this
1635 * does not yield correct results unless the __rpc_negotiate_uid()
1636 * call has been invoked to enable this feature.
1637 */
1638 bool_t
svc_get_local_cred(SVCXPRT * xprt,svc_local_cred_t * lcred)1639 svc_get_local_cred(SVCXPRT *xprt, svc_local_cred_t *lcred)
1640 {
1641 /* LINTED pointer alignment */
1642 if (svc_type(xprt) == SVC_DOOR)
1643 return (__svc_get_door_cred(xprt, lcred));
1644 return (__rpc_get_local_cred(xprt, lcred));
1645 }
1646
1647
1648 /* ******************* DUPLICATE ENTRY HANDLING ROUTINES ************** */
1649
1650 /*
1651 * the dup cacheing routines below provide a cache of received
1652 * transactions. rpc service routines can use this to detect
1653 * retransmissions and re-send a non-failure response. Uses a
1654 * lru scheme to find entries to get rid of entries in the cache,
1655 * though only DUP_DONE entries are placed on the lru list.
1656 * the routines were written towards development of a generic
1657 * SVC_DUP() interface, which can be expanded to encompass the
1658 * svc_dg_enablecache() routines as well. the cache is currently
1659 * private to the automounter.
1660 */
1661
1662
1663 /* dupcache header contains xprt specific information */
1664 struct dupcache {
1665 rwlock_t dc_lock;
1666 time_t dc_time;
1667 int dc_buckets;
1668 int dc_maxsz;
1669 int dc_basis;
1670 struct dupreq *dc_mru;
1671 struct dupreq **dc_hashtbl;
1672 };
1673
1674 /*
1675 * private duplicate cache request routines
1676 */
1677 static int __svc_dupcache_check(struct svc_req *, caddr_t *, uint_t *,
1678 struct dupcache *, uint32_t, uint32_t);
1679 static struct dupreq *__svc_dupcache_victim(struct dupcache *, time_t);
1680 static int __svc_dupcache_enter(struct svc_req *, struct dupreq *,
1681 struct dupcache *, uint32_t, uint32_t, time_t);
1682 static int __svc_dupcache_update(struct svc_req *, caddr_t, uint_t, int,
1683 struct dupcache *, uint32_t, uint32_t);
1684 #ifdef DUP_DEBUG
1685 static void __svc_dupcache_debug(struct dupcache *);
1686 #endif /* DUP_DEBUG */
1687
1688 /* default parameters for the dupcache */
1689 #define DUPCACHE_BUCKETS 257
1690 #define DUPCACHE_TIME 900
1691 #define DUPCACHE_MAXSZ INT_MAX
1692
1693 /*
1694 * __svc_dupcache_init(void *condition, int basis, char *xprt_cache)
1695 * initialize the duprequest cache and assign it to the xprt_cache
1696 * Use default values depending on the cache condition and basis.
1697 * return TRUE on success and FALSE on failure
1698 */
1699 bool_t
__svc_dupcache_init(void * condition,int basis,char ** xprt_cache)1700 __svc_dupcache_init(void *condition, int basis, char **xprt_cache)
1701 {
1702 static mutex_t initdc_lock = DEFAULTMUTEX;
1703 int i;
1704 struct dupcache *dc;
1705
1706 (void) mutex_lock(&initdc_lock);
1707 if (*xprt_cache != NULL) { /* do only once per xprt */
1708 (void) mutex_unlock(&initdc_lock);
1709 syslog(LOG_ERR,
1710 "__svc_dupcache_init: multiply defined dup cache");
1711 return (FALSE);
1712 }
1713
1714 switch (basis) {
1715 case DUPCACHE_FIXEDTIME:
1716 dc = malloc(sizeof (struct dupcache));
1717 if (dc == NULL) {
1718 (void) mutex_unlock(&initdc_lock);
1719 syslog(LOG_ERR,
1720 "__svc_dupcache_init: memory alloc failed");
1721 return (FALSE);
1722 }
1723 (void) rwlock_init(&(dc->dc_lock), USYNC_THREAD, NULL);
1724 if (condition != NULL)
1725 dc->dc_time = *((time_t *)condition);
1726 else
1727 dc->dc_time = DUPCACHE_TIME;
1728 dc->dc_buckets = DUPCACHE_BUCKETS;
1729 dc->dc_maxsz = DUPCACHE_MAXSZ;
1730 dc->dc_basis = basis;
1731 dc->dc_mru = NULL;
1732 dc->dc_hashtbl = malloc(dc->dc_buckets *
1733 sizeof (struct dupreq *));
1734 if (dc->dc_hashtbl == NULL) {
1735 free(dc);
1736 (void) mutex_unlock(&initdc_lock);
1737 syslog(LOG_ERR,
1738 "__svc_dupcache_init: memory alloc failed");
1739 return (FALSE);
1740 }
1741 for (i = 0; i < DUPCACHE_BUCKETS; i++)
1742 dc->dc_hashtbl[i] = NULL;
1743 *xprt_cache = (char *)dc;
1744 break;
1745 default:
1746 (void) mutex_unlock(&initdc_lock);
1747 syslog(LOG_ERR,
1748 "__svc_dupcache_init: undefined dup cache basis");
1749 return (FALSE);
1750 }
1751
1752 (void) mutex_unlock(&initdc_lock);
1753
1754 return (TRUE);
1755 }
1756
1757 /*
1758 * __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1759 * char *xprt_cache)
1760 * searches the request cache. Creates an entry and returns DUP_NEW if
1761 * the request is not found in the cache. If it is found, then it
1762 * returns the state of the request (in progress, drop, or done) and
1763 * also allocates, and passes back results to the user (if any) in
1764 * resp_buf, and its length in resp_bufsz. DUP_ERROR is returned on error.
1765 */
1766 int
__svc_dup(struct svc_req * req,caddr_t * resp_buf,uint_t * resp_bufsz,char * xprt_cache)1767 __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1768 char *xprt_cache)
1769 {
1770 uint32_t drxid, drhash;
1771 int rc;
1772 struct dupreq *dr = NULL;
1773 time_t timenow = time(NULL);
1774
1775 /* LINTED pointer alignment */
1776 struct dupcache *dc = (struct dupcache *)xprt_cache;
1777
1778 if (dc == NULL) {
1779 syslog(LOG_ERR, "__svc_dup: undefined cache");
1780 return (DUP_ERROR);
1781 }
1782
1783 /* get the xid of the request */
1784 if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
1785 syslog(LOG_ERR, "__svc_dup: xid error");
1786 return (DUP_ERROR);
1787 }
1788 drhash = drxid % dc->dc_buckets;
1789
1790 if ((rc = __svc_dupcache_check(req, resp_buf, resp_bufsz, dc, drxid,
1791 drhash)) != DUP_NEW)
1792 return (rc);
1793
1794 if ((dr = __svc_dupcache_victim(dc, timenow)) == NULL)
1795 return (DUP_ERROR);
1796
1797 if ((rc = __svc_dupcache_enter(req, dr, dc, drxid, drhash, timenow))
1798 == DUP_ERROR)
1799 return (rc);
1800
1801 return (DUP_NEW);
1802 }
1803
1804
1805
1806 /*
1807 * __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf,
1808 * uint_t *resp_bufsz,truct dupcache *dc, uint32_t drxid,
1809 * uint32_t drhash)
1810 * Checks to see whether an entry already exists in the cache. If it does
1811 * copy back into the resp_buf, if appropriate. Return the status of
1812 * the request, or DUP_NEW if the entry is not in the cache
1813 */
1814 static int
__svc_dupcache_check(struct svc_req * req,caddr_t * resp_buf,uint_t * resp_bufsz,struct dupcache * dc,uint32_t drxid,uint32_t drhash)1815 __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1816 struct dupcache *dc, uint32_t drxid, uint32_t drhash)
1817 {
1818 struct dupreq *dr = NULL;
1819
1820 (void) rw_rdlock(&(dc->dc_lock));
1821 dr = dc->dc_hashtbl[drhash];
1822 while (dr != NULL) {
1823 if (dr->dr_xid == drxid &&
1824 dr->dr_proc == req->rq_proc &&
1825 dr->dr_prog == req->rq_prog &&
1826 dr->dr_vers == req->rq_vers &&
1827 dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
1828 memcmp(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
1829 dr->dr_addr.len) == 0) { /* entry found */
1830 if (dr->dr_hash != drhash) {
1831 /* sanity check */
1832 (void) rw_unlock((&dc->dc_lock));
1833 syslog(LOG_ERR,
1834 "\n__svc_dupdone: hashing error");
1835 return (DUP_ERROR);
1836 }
1837
1838 /*
1839 * return results for requests on lru list, if
1840 * appropriate requests must be DUP_DROP or DUP_DONE
1841 * to have a result. A NULL buffer in the cache
1842 * implies no results were sent during dupdone.
1843 * A NULL buffer in the call implies not interested
1844 * in results.
1845 */
1846 if (((dr->dr_status == DUP_DONE) ||
1847 (dr->dr_status == DUP_DROP)) &&
1848 resp_buf != NULL &&
1849 dr->dr_resp.buf != NULL) {
1850 *resp_buf = malloc(dr->dr_resp.len);
1851 if (*resp_buf == NULL) {
1852 syslog(LOG_ERR,
1853 "__svc_dupcache_check: malloc failed");
1854 (void) rw_unlock(&(dc->dc_lock));
1855 return (DUP_ERROR);
1856 }
1857 (void) memset(*resp_buf, 0, dr->dr_resp.len);
1858 (void) memcpy(*resp_buf, dr->dr_resp.buf,
1859 dr->dr_resp.len);
1860 *resp_bufsz = dr->dr_resp.len;
1861 } else {
1862 /* no result */
1863 if (resp_buf)
1864 *resp_buf = NULL;
1865 if (resp_bufsz)
1866 *resp_bufsz = 0;
1867 }
1868 (void) rw_unlock(&(dc->dc_lock));
1869 return (dr->dr_status);
1870 }
1871 dr = dr->dr_chain;
1872 }
1873 (void) rw_unlock(&(dc->dc_lock));
1874 return (DUP_NEW);
1875 }
1876
1877 /*
1878 * __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
1879 * Return a victim dupreq entry to the caller, depending on cache policy.
1880 */
1881 static struct dupreq *
__svc_dupcache_victim(struct dupcache * dc,time_t timenow)1882 __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
1883 {
1884 struct dupreq *dr = NULL;
1885
1886 switch (dc->dc_basis) {
1887 case DUPCACHE_FIXEDTIME:
1888 /*
1889 * The hash policy is to free up a bit of the hash
1890 * table before allocating a new entry as the victim.
1891 * Freeing up the hash table each time should split
1892 * the cost of keeping the hash table clean among threads.
1893 * Note that only DONE or DROPPED entries are on the lru
1894 * list but we do a sanity check anyway.
1895 */
1896 (void) rw_wrlock(&(dc->dc_lock));
1897 while ((dc->dc_mru) && (dr = dc->dc_mru->dr_next) &&
1898 ((timenow - dr->dr_time) > dc->dc_time)) {
1899 /* clean and then free the entry */
1900 if (dr->dr_status != DUP_DONE &&
1901 dr->dr_status != DUP_DROP) {
1902 /*
1903 * The LRU list can't contain an
1904 * entry where the status is other than
1905 * DUP_DONE or DUP_DROP.
1906 */
1907 syslog(LOG_ERR,
1908 "__svc_dupcache_victim: bad victim");
1909 #ifdef DUP_DEBUG
1910 /*
1911 * Need to hold the reader/writers lock to
1912 * print the cache info, since we already
1913 * hold the writers lock, we shall continue
1914 * calling __svc_dupcache_debug()
1915 */
1916 __svc_dupcache_debug(dc);
1917 #endif /* DUP_DEBUG */
1918 (void) rw_unlock(&(dc->dc_lock));
1919 return (NULL);
1920 }
1921 /* free buffers */
1922 if (dr->dr_resp.buf) {
1923 free(dr->dr_resp.buf);
1924 dr->dr_resp.buf = NULL;
1925 }
1926 if (dr->dr_addr.buf) {
1927 free(dr->dr_addr.buf);
1928 dr->dr_addr.buf = NULL;
1929 }
1930
1931 /* unhash the entry */
1932 if (dr->dr_chain)
1933 dr->dr_chain->dr_prevchain = dr->dr_prevchain;
1934 if (dr->dr_prevchain)
1935 dr->dr_prevchain->dr_chain = dr->dr_chain;
1936 if (dc->dc_hashtbl[dr->dr_hash] == dr)
1937 dc->dc_hashtbl[dr->dr_hash] = dr->dr_chain;
1938
1939 /* modify the lru pointers */
1940 if (dc->dc_mru == dr) {
1941 dc->dc_mru = NULL;
1942 } else {
1943 dc->dc_mru->dr_next = dr->dr_next;
1944 dr->dr_next->dr_prev = dc->dc_mru;
1945 }
1946 free(dr);
1947 dr = NULL;
1948 }
1949 (void) rw_unlock(&(dc->dc_lock));
1950
1951 /*
1952 * Allocate and return new clean entry as victim
1953 */
1954 if ((dr = malloc(sizeof (*dr))) == NULL) {
1955 syslog(LOG_ERR,
1956 "__svc_dupcache_victim: malloc failed");
1957 return (NULL);
1958 }
1959 (void) memset(dr, 0, sizeof (*dr));
1960 return (dr);
1961 default:
1962 syslog(LOG_ERR,
1963 "__svc_dupcache_victim: undefined dup cache_basis");
1964 return (NULL);
1965 }
1966 }
1967
1968 /*
1969 * __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
1970 * struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
1971 * build new duprequest entry and then insert into the cache
1972 */
1973 static int
__svc_dupcache_enter(struct svc_req * req,struct dupreq * dr,struct dupcache * dc,uint32_t drxid,uint32_t drhash,time_t timenow)1974 __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
1975 struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
1976 {
1977 dr->dr_xid = drxid;
1978 dr->dr_prog = req->rq_prog;
1979 dr->dr_vers = req->rq_vers;
1980 dr->dr_proc = req->rq_proc;
1981 dr->dr_addr.maxlen = req->rq_xprt->xp_rtaddr.len;
1982 dr->dr_addr.len = dr->dr_addr.maxlen;
1983 if ((dr->dr_addr.buf = malloc(dr->dr_addr.maxlen)) == NULL) {
1984 syslog(LOG_ERR, "__svc_dupcache_enter: malloc failed");
1985 free(dr);
1986 return (DUP_ERROR);
1987 }
1988 (void) memset(dr->dr_addr.buf, 0, dr->dr_addr.len);
1989 (void) memcpy(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
1990 dr->dr_addr.len);
1991 dr->dr_resp.buf = NULL;
1992 dr->dr_resp.maxlen = 0;
1993 dr->dr_resp.len = 0;
1994 dr->dr_status = DUP_INPROGRESS;
1995 dr->dr_time = timenow;
1996 dr->dr_hash = drhash; /* needed for efficient victim cleanup */
1997
1998 /* place entry at head of hash table */
1999 (void) rw_wrlock(&(dc->dc_lock));
2000 dr->dr_chain = dc->dc_hashtbl[drhash];
2001 dr->dr_prevchain = NULL;
2002 if (dc->dc_hashtbl[drhash] != NULL)
2003 dc->dc_hashtbl[drhash]->dr_prevchain = dr;
2004 dc->dc_hashtbl[drhash] = dr;
2005 (void) rw_unlock(&(dc->dc_lock));
2006 return (DUP_NEW);
2007 }
2008
2009 /*
2010 * __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2011 * int status, char *xprt_cache)
2012 * Marks the request done (DUP_DONE or DUP_DROP) and stores the response.
2013 * Only DONE and DROP requests can be marked as done. Sets the lru pointers
2014 * to make the entry the most recently used. Returns DUP_ERROR or status.
2015 */
2016 int
__svc_dupdone(struct svc_req * req,caddr_t resp_buf,uint_t resp_bufsz,int status,char * xprt_cache)2017 __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2018 int status, char *xprt_cache)
2019 {
2020 uint32_t drxid, drhash;
2021 int rc;
2022
2023 /* LINTED pointer alignment */
2024 struct dupcache *dc = (struct dupcache *)xprt_cache;
2025
2026 if (dc == NULL) {
2027 syslog(LOG_ERR, "__svc_dupdone: undefined cache");
2028 return (DUP_ERROR);
2029 }
2030
2031 if (status != DUP_DONE && status != DUP_DROP) {
2032 syslog(LOG_ERR, "__svc_dupdone: invalid dupdone status");
2033 syslog(LOG_ERR, " must be DUP_DONE or DUP_DROP");
2034 return (DUP_ERROR);
2035 }
2036
2037 /* find the xid of the entry in the cache */
2038 if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
2039 syslog(LOG_ERR, "__svc_dup: xid error");
2040 return (DUP_ERROR);
2041 }
2042 drhash = drxid % dc->dc_buckets;
2043
2044 /* update the status of the entry and result buffers, if required */
2045 if ((rc = __svc_dupcache_update(req, resp_buf, resp_bufsz, status,
2046 dc, drxid, drhash)) == DUP_ERROR) {
2047 syslog(LOG_ERR, "__svc_dupdone: cache entry error");
2048 return (DUP_ERROR);
2049 }
2050
2051 return (rc);
2052 }
2053
2054 /*
2055 * __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf,
2056 * uint_t resp_bufsz, int status, struct dupcache *dc, uint32_t drxid,
2057 * uint32_t drhash)
2058 * Check if entry exists in the dupcacache. If it does, update its status
2059 * and time and also its buffer, if appropriate. Its possible, but unlikely
2060 * for DONE requests to not exist in the cache. Return DUP_ERROR or status.
2061 */
2062 static int
__svc_dupcache_update(struct svc_req * req,caddr_t resp_buf,uint_t resp_bufsz,int status,struct dupcache * dc,uint32_t drxid,uint32_t drhash)2063 __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2064 int status, struct dupcache *dc, uint32_t drxid, uint32_t drhash)
2065 {
2066 struct dupreq *dr = NULL;
2067 time_t timenow = time(NULL);
2068
2069 (void) rw_wrlock(&(dc->dc_lock));
2070 dr = dc->dc_hashtbl[drhash];
2071 while (dr != NULL) {
2072 if (dr->dr_xid == drxid &&
2073 dr->dr_proc == req->rq_proc &&
2074 dr->dr_prog == req->rq_prog &&
2075 dr->dr_vers == req->rq_vers &&
2076 dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
2077 memcmp(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
2078 dr->dr_addr.len) == 0) { /* entry found */
2079 if (dr->dr_hash != drhash) {
2080 /* sanity check */
2081 (void) rw_unlock(&(dc->dc_lock));
2082 syslog(LOG_ERR,
2083 "\n__svc_dupdone: hashing error");
2084 return (DUP_ERROR);
2085 }
2086
2087 /* store the results if bufer is not NULL */
2088 if (resp_buf != NULL) {
2089 if ((dr->dr_resp.buf =
2090 malloc(resp_bufsz)) == NULL) {
2091 (void) rw_unlock(&(dc->dc_lock));
2092 syslog(LOG_ERR,
2093 "__svc_dupdone: malloc failed");
2094 return (DUP_ERROR);
2095 }
2096 (void) memset(dr->dr_resp.buf, 0, resp_bufsz);
2097 (void) memcpy(dr->dr_resp.buf, resp_buf,
2098 (uint_t)resp_bufsz);
2099 dr->dr_resp.len = resp_bufsz;
2100 }
2101
2102 /* update status and done time */
2103 dr->dr_status = status;
2104 dr->dr_time = timenow;
2105
2106 /* move the entry to the mru position */
2107 if (dc->dc_mru == NULL) {
2108 dr->dr_next = dr;
2109 dr->dr_prev = dr;
2110 } else {
2111 dr->dr_next = dc->dc_mru->dr_next;
2112 dc->dc_mru->dr_next->dr_prev = dr;
2113 dr->dr_prev = dc->dc_mru;
2114 dc->dc_mru->dr_next = dr;
2115 }
2116 dc->dc_mru = dr;
2117
2118 (void) rw_unlock(&(dc->dc_lock));
2119 return (status);
2120 }
2121 dr = dr->dr_chain;
2122 }
2123 (void) rw_unlock(&(dc->dc_lock));
2124 syslog(LOG_ERR, "__svc_dupdone: entry not in dup cache");
2125 return (DUP_ERROR);
2126 }
2127
2128 #ifdef DUP_DEBUG
2129 /*
2130 * __svc_dupcache_debug(struct dupcache *dc)
2131 * print out the hash table stuff
2132 *
2133 * This function requires the caller to hold the reader
2134 * or writer version of the duplicate request cache lock (dc_lock).
2135 */
2136 static void
__svc_dupcache_debug(struct dupcache * dc)2137 __svc_dupcache_debug(struct dupcache *dc)
2138 {
2139 struct dupreq *dr = NULL;
2140 int i;
2141 bool_t bval;
2142
2143 fprintf(stderr, " HASHTABLE\n");
2144 for (i = 0; i < dc->dc_buckets; i++) {
2145 bval = FALSE;
2146 dr = dc->dc_hashtbl[i];
2147 while (dr != NULL) {
2148 if (!bval) { /* ensures bucket printed only once */
2149 fprintf(stderr, " bucket : %d\n", i);
2150 bval = TRUE;
2151 }
2152 fprintf(stderr, "\txid: %u status: %d time: %ld",
2153 dr->dr_xid, dr->dr_status, dr->dr_time);
2154 fprintf(stderr, " dr: %x chain: %x prevchain: %x\n",
2155 dr, dr->dr_chain, dr->dr_prevchain);
2156 dr = dr->dr_chain;
2157 }
2158 }
2159
2160 fprintf(stderr, " LRU\n");
2161 if (dc->dc_mru) {
2162 dr = dc->dc_mru->dr_next; /* lru */
2163 while (dr != dc->dc_mru) {
2164 fprintf(stderr, "\txid: %u status : %d time : %ld",
2165 dr->dr_xid, dr->dr_status, dr->dr_time);
2166 fprintf(stderr, " dr: %x next: %x prev: %x\n",
2167 dr, dr->dr_next, dr->dr_prev);
2168 dr = dr->dr_next;
2169 }
2170 fprintf(stderr, "\txid: %u status: %d time: %ld",
2171 dr->dr_xid, dr->dr_status, dr->dr_time);
2172 fprintf(stderr, " dr: %x next: %x prev: %x\n",
2173 dr, dr->dr_next, dr->dr_prev);
2174 }
2175 }
2176 #endif /* DUP_DEBUG */
2177