1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2016 by Delphix. All rights reserved.
26 * Copyright 2017 Joyent Inc
27 */
28 /*
29 * Copyright 1993 OpenVision Technologies, Inc., All Rights Reserved.
30 */
31 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
32 /* All Rights Reserved */
33 /*
34 * Portions of this source code were derived from Berkeley
35 * 4.3 BSD under license from the Regents of the University of
36 * California.
37 */
38
39 /*
40 * svc.c, Server-side remote procedure call interface.
41 *
42 * There are two sets of procedures here. The xprt routines are
43 * for handling transport handles. The svc routines handle the
44 * list of service routines.
45 *
46 */
47
48 #include "mt.h"
49 #include "rpc_mt.h"
50 #include <assert.h>
51 #include <errno.h>
52 #include <sys/types.h>
53 #include <stropts.h>
54 #include <sys/conf.h>
55 #include <rpc/rpc.h>
56 #include <rpc/auth.h>
57 #ifdef PORTMAP
58 #include <rpc/pmap_clnt.h>
59 #endif
60 #include <sys/poll.h>
61 #include <netconfig.h>
62 #include <syslog.h>
63 #include <stdlib.h>
64 #include <unistd.h>
65 #include <string.h>
66 #include <limits.h>
67
68 extern bool_t __svc_get_door_cred();
69 extern bool_t __rpc_get_local_cred();
70
71 SVCXPRT **svc_xports;
72 static int nsvc_xports; /* total number of svc_xports allocated */
73
74 XDR **svc_xdrs; /* common XDR receive area */
75 int nsvc_xdrs; /* total number of svc_xdrs allocated */
76
77 int __rpc_use_pollfd_done; /* to unlimit the number of connections */
78
79 #define NULL_SVC ((struct svc_callout *)0)
80
81 /*
82 * The services list
83 * Each entry represents a set of procedures (an rpc program).
84 * The dispatch routine takes request structs and runs the
85 * appropriate procedure.
86 */
87 static struct svc_callout {
88 struct svc_callout *sc_next;
89 rpcprog_t sc_prog;
90 rpcvers_t sc_vers;
91 char *sc_netid;
92 void (*sc_dispatch)();
93 } *svc_head;
94 extern rwlock_t svc_lock;
95
96 static struct svc_callout *svc_find();
97 int _svc_prog_dispatch();
98 void svc_getreq_common();
99 char *strdup();
100
101 extern mutex_t svc_door_mutex;
102 extern cond_t svc_door_waitcv;
103 extern int svc_ndoorfds;
104 extern SVCXPRT_LIST *_svc_xprtlist;
105 extern mutex_t xprtlist_lock;
106 extern void __svc_rm_from_xlist();
107
108 #if !defined(_LP64)
109 extern fd_set _new_svc_fdset;
110 #endif
111
112 /*
113 * If the allocated array of reactor is too small, this value is used as a
114 * margin. This reduces the number of allocations.
115 */
116 #define USER_FD_INCREMENT 5
117
118 static void add_pollfd(int fd, short events);
119 static void remove_pollfd(int fd);
120 static void __svc_remove_input_of_fd(int fd);
121
122 /*
123 * Data used to handle reactor:
124 * - one file descriptor we listen to,
125 * - one callback we call if the fd pops,
126 * - and a cookie passed as a parameter to the callback.
127 *
128 * The structure is an array indexed on the file descriptor. Each entry is
129 * pointing to the first element of a double-linked list of callback.
130 * only one callback may be associated to a couple (fd, event).
131 */
132
133 struct _svc_user_fd_head;
134
135 typedef struct {
136 struct _svc_user_fd_node *next;
137 struct _svc_user_fd_node *previous;
138 } _svc_user_link;
139
140 typedef struct _svc_user_fd_node {
141 _svc_user_link lnk;
142 svc_input_id_t id;
143 int fd;
144 unsigned int events;
145 svc_callback_t callback;
146 void* cookie;
147 } _svc_user_fd_node;
148
149 typedef struct _svc_user_fd_head {
150 struct _svc_user_fd_node *list;
151 unsigned int mask; /* logical OR of all sub-masks */
152 } _svc_user_fd_head;
153
154
155 /* Array of defined reactor - indexed on file descriptor */
156 static _svc_user_fd_head *svc_userfds = NULL;
157
158 /* current size of file descriptor */
159 static int svc_nuserfds = 0;
160
161 /* Mutex to ensure MT safe operations for user fds callbacks. */
162 static mutex_t svc_userfds_lock = DEFAULTMUTEX;
163
164
165 /*
166 * This structure is used to have constant time alogrithms. There is an array
167 * of this structure as large as svc_nuserfds. When the user is registering a
168 * new callback, the address of the created structure is stored in a cell of
169 * this array. The address of this cell is the returned unique identifier.
170 *
171 * On removing, the id is given by the user, then we know if this cell is
172 * filled or not (with free). If it is free, we return an error. Otherwise,
173 * we can free the structure pointed by fd_node.
174 *
175 * On insertion, we use the linked list created by (first_free,
176 * next_free). In this way with a constant time computation, we can give a
177 * correct index to the user.
178 */
179
180 typedef struct _svc_management_user_fd {
181 bool_t free;
182 union {
183 svc_input_id_t next_free;
184 _svc_user_fd_node *fd_node;
185 } data;
186 } _svc_management_user_fd;
187
188 /* index to the first free elem */
189 static svc_input_id_t first_free = (svc_input_id_t)-1;
190 /* the size of this array is the same as svc_nuserfds */
191 static _svc_management_user_fd* user_fd_mgt_array = NULL;
192
193 /* current size of user_fd_mgt_array */
194 static int svc_nmgtuserfds = 0;
195
196
197 /* Define some macros to access data associated to registration ids. */
198 #define node_from_id(id) (user_fd_mgt_array[(int)id].data.fd_node)
199 #define is_free_id(id) (user_fd_mgt_array[(int)id].free)
200
201 #ifndef POLLSTANDARD
202 #define POLLSTANDARD \
203 (POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND| \
204 POLLWRBAND|POLLERR|POLLHUP|POLLNVAL)
205 #endif
206
207 /*
208 * To free an Id, we set the cell as free and insert its address in the list
209 * of free cell.
210 */
211
212 static void
_svc_free_id(const svc_input_id_t id)213 _svc_free_id(const svc_input_id_t id)
214 {
215 assert(((int)id >= 0) && ((int)id < svc_nmgtuserfds));
216 user_fd_mgt_array[(int)id].free = TRUE;
217 user_fd_mgt_array[(int)id].data.next_free = first_free;
218 first_free = id;
219 }
220
221 /*
222 * To get a free cell, we just have to take it from the free linked list and
223 * set the flag to "not free". This function also allocates new memory if
224 * necessary
225 */
226 static svc_input_id_t
_svc_attribute_new_id(_svc_user_fd_node * node)227 _svc_attribute_new_id(_svc_user_fd_node *node)
228 {
229 int selected_index = (int)first_free;
230 assert(node != NULL);
231
232 if (selected_index == -1) {
233 /* Allocate new entries */
234 int L_inOldSize = svc_nmgtuserfds;
235 int i;
236 _svc_management_user_fd *tmp;
237
238 svc_nmgtuserfds += USER_FD_INCREMENT;
239
240 tmp = realloc(user_fd_mgt_array,
241 svc_nmgtuserfds * sizeof (_svc_management_user_fd));
242
243 if (tmp == NULL) {
244 syslog(LOG_ERR, "_svc_attribute_new_id: out of memory");
245 svc_nmgtuserfds = L_inOldSize;
246 errno = ENOMEM;
247 return ((svc_input_id_t)-1);
248 }
249
250 user_fd_mgt_array = tmp;
251
252 for (i = svc_nmgtuserfds - 1; i >= L_inOldSize; i--)
253 _svc_free_id((svc_input_id_t)i);
254 selected_index = (int)first_free;
255 }
256
257 node->id = (svc_input_id_t)selected_index;
258 first_free = user_fd_mgt_array[selected_index].data.next_free;
259
260 user_fd_mgt_array[selected_index].data.fd_node = node;
261 user_fd_mgt_array[selected_index].free = FALSE;
262
263 return ((svc_input_id_t)selected_index);
264 }
265
266 /*
267 * Access to a pollfd treatment. Scan all the associated callbacks that have
268 * at least one bit in their mask that masks a received event.
269 *
270 * If event POLLNVAL is received, we check that one callback processes it, if
271 * not, then remove the file descriptor from the poll. If there is one, let
272 * the user do the work.
273 */
274 void
__svc_getreq_user(struct pollfd * pfd)275 __svc_getreq_user(struct pollfd *pfd)
276 {
277 int fd = pfd->fd;
278 short revents = pfd->revents;
279 bool_t invalHandled = FALSE;
280 _svc_user_fd_node *node;
281
282 (void) mutex_lock(&svc_userfds_lock);
283
284 if ((fd < 0) || (fd >= svc_nuserfds)) {
285 (void) mutex_unlock(&svc_userfds_lock);
286 return;
287 }
288
289 node = svc_userfds[fd].list;
290
291 /* check if at least one mask fits */
292 if (0 == (revents & svc_userfds[fd].mask)) {
293 (void) mutex_unlock(&svc_userfds_lock);
294 return;
295 }
296
297 while ((svc_userfds[fd].mask != 0) && (node != NULL)) {
298 /*
299 * If one of the received events maps the ones the node listens
300 * to
301 */
302 _svc_user_fd_node *next = node->lnk.next;
303
304 if (node->callback != NULL) {
305 if (node->events & revents) {
306 if (revents & POLLNVAL) {
307 invalHandled = TRUE;
308 }
309
310 /*
311 * The lock must be released before calling the
312 * user function, as this function can call
313 * svc_remove_input() for example.
314 */
315 (void) mutex_unlock(&svc_userfds_lock);
316 node->callback(node->id, node->fd,
317 node->events & revents, node->cookie);
318 /*
319 * Do not use the node structure anymore, as it
320 * could have been deallocated by the previous
321 * callback.
322 */
323 (void) mutex_lock(&svc_userfds_lock);
324 }
325 }
326 node = next;
327 }
328
329 if ((revents & POLLNVAL) && !invalHandled)
330 __svc_remove_input_of_fd(fd);
331 (void) mutex_unlock(&svc_userfds_lock);
332 }
333
334
335 /*
336 * Check if a file descriptor is associated with a user reactor.
337 * To do this, just check that the array indexed on fd has a non-void linked
338 * list (ie. first element is not NULL)
339 */
340 bool_t
__is_a_userfd(int fd)341 __is_a_userfd(int fd)
342 {
343 /* Checks argument */
344 if ((fd < 0) || (fd >= svc_nuserfds))
345 return (FALSE);
346 return ((svc_userfds[fd].mask == 0x0000)? FALSE:TRUE);
347 }
348
349 /* free everything concerning user fd */
350 /* used in svc_run.c => no static */
351
352 void
__destroy_userfd(void)353 __destroy_userfd(void)
354 {
355 int one_fd;
356 /* Clean user fd */
357 if (svc_userfds != NULL) {
358 for (one_fd = 0; one_fd < svc_nuserfds; one_fd++) {
359 _svc_user_fd_node *node;
360
361 node = svc_userfds[one_fd].list;
362 while (node != NULL) {
363 _svc_user_fd_node *tmp = node;
364 _svc_free_id(node->id);
365 node = node->lnk.next;
366 free(tmp);
367 }
368 }
369
370 free(user_fd_mgt_array);
371 user_fd_mgt_array = NULL;
372 first_free = (svc_input_id_t)-1;
373
374 free(svc_userfds);
375 svc_userfds = NULL;
376 svc_nuserfds = 0;
377 }
378 }
379
380 /*
381 * Remove all the callback associated with a fd => useful when the fd is
382 * closed for instance
383 */
384 static void
__svc_remove_input_of_fd(int fd)385 __svc_remove_input_of_fd(int fd)
386 {
387 _svc_user_fd_node **pnode;
388 _svc_user_fd_node *tmp;
389
390 if ((fd < 0) || (fd >= svc_nuserfds))
391 return;
392
393 pnode = &svc_userfds[fd].list;
394 while ((tmp = *pnode) != NULL) {
395 *pnode = tmp->lnk.next;
396
397 _svc_free_id(tmp->id);
398 free(tmp);
399 }
400
401 svc_userfds[fd].mask = 0;
402 }
403
404 /*
405 * Allow user to add an fd in the poll list. If it does not succeed, return
406 * -1. Otherwise, return a svc_id
407 */
408
409 svc_input_id_t
svc_add_input(int user_fd,unsigned int events,svc_callback_t user_callback,void * cookie)410 svc_add_input(int user_fd, unsigned int events,
411 svc_callback_t user_callback, void *cookie)
412 {
413 _svc_user_fd_node *new_node;
414
415 if (user_fd < 0) {
416 errno = EINVAL;
417 return ((svc_input_id_t)-1);
418 }
419
420 if ((events == 0x0000) ||
421 (events & ~(POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND|\
422 POLLWRBAND|POLLERR|POLLHUP|POLLNVAL))) {
423 errno = EINVAL;
424 return ((svc_input_id_t)-1);
425 }
426
427 (void) mutex_lock(&svc_userfds_lock);
428
429 if ((user_fd < svc_nuserfds) &&
430 (svc_userfds[user_fd].mask & events) != 0) {
431 /* Already registrated call-back */
432 errno = EEXIST;
433 (void) mutex_unlock(&svc_userfds_lock);
434 return ((svc_input_id_t)-1);
435 }
436
437 /* Handle memory allocation. */
438 if (user_fd >= svc_nuserfds) {
439 int oldSize = svc_nuserfds;
440 int i;
441 _svc_user_fd_head *tmp;
442
443 svc_nuserfds = (user_fd + 1) + USER_FD_INCREMENT;
444
445 tmp = realloc(svc_userfds,
446 svc_nuserfds * sizeof (_svc_user_fd_head));
447
448 if (tmp == NULL) {
449 syslog(LOG_ERR, "svc_add_input: out of memory");
450 svc_nuserfds = oldSize;
451 errno = ENOMEM;
452 (void) mutex_unlock(&svc_userfds_lock);
453 return ((svc_input_id_t)-1);
454 }
455
456 svc_userfds = tmp;
457
458 for (i = oldSize; i < svc_nuserfds; i++) {
459 svc_userfds[i].list = NULL;
460 svc_userfds[i].mask = 0;
461 }
462 }
463
464 new_node = malloc(sizeof (_svc_user_fd_node));
465 if (new_node == NULL) {
466 syslog(LOG_ERR, "svc_add_input: out of memory");
467 errno = ENOMEM;
468 (void) mutex_unlock(&svc_userfds_lock);
469 return ((svc_input_id_t)-1);
470 }
471
472 /* create a new node */
473 new_node->fd = user_fd;
474 new_node->events = events;
475 new_node->callback = user_callback;
476 new_node->cookie = cookie;
477
478 if (_svc_attribute_new_id(new_node) == -1) {
479 (void) mutex_unlock(&svc_userfds_lock);
480 free(new_node);
481 return ((svc_input_id_t)-1);
482 }
483
484 /* Add the new element at the beginning of the list. */
485 if (svc_userfds[user_fd].list != NULL)
486 svc_userfds[user_fd].list->lnk.previous = new_node;
487 new_node->lnk.next = svc_userfds[user_fd].list;
488 new_node->lnk.previous = NULL;
489
490 svc_userfds[user_fd].list = new_node;
491
492 /* refresh global mask for this file desciptor */
493 svc_userfds[user_fd].mask |= events;
494
495 /* refresh mask for the poll */
496 add_pollfd(user_fd, (svc_userfds[user_fd].mask));
497
498 (void) mutex_unlock(&svc_userfds_lock);
499 return (new_node->id);
500 }
501
502 int
svc_remove_input(svc_input_id_t id)503 svc_remove_input(svc_input_id_t id)
504 {
505 _svc_user_fd_node* node;
506 _svc_user_fd_node* next;
507 _svc_user_fd_node* previous;
508 int fd; /* caching optim */
509
510 (void) mutex_lock(&svc_userfds_lock);
511
512 /* Immediately update data for id management */
513 if (user_fd_mgt_array == NULL || id >= svc_nmgtuserfds ||
514 is_free_id(id)) {
515 errno = EINVAL;
516 (void) mutex_unlock(&svc_userfds_lock);
517 return (-1);
518 }
519
520 node = node_from_id(id);
521 assert(node != NULL);
522
523 _svc_free_id(id);
524 next = node->lnk.next;
525 previous = node->lnk.previous;
526 fd = node->fd; /* caching optim */
527
528 /* Remove this node from the list. */
529 if (previous != NULL) {
530 previous->lnk.next = next;
531 } else {
532 assert(svc_userfds[fd].list == node);
533 svc_userfds[fd].list = next;
534 }
535 if (next != NULL)
536 next->lnk.previous = previous;
537
538 /* Remove the node flags from the global mask */
539 svc_userfds[fd].mask ^= node->events;
540
541 free(node);
542 if (svc_userfds[fd].mask == 0) {
543 assert(svc_userfds[fd].list == NULL);
544 remove_pollfd(fd);
545 } else {
546 assert(svc_userfds[fd].list != NULL);
547 }
548 /* <=> CLEAN NEEDED TO SHRINK MEMORY USAGE */
549
550 (void) mutex_unlock(&svc_userfds_lock);
551 return (0);
552 }
553
554 /*
555 * Provides default service-side functions for authentication flavors
556 * that do not use all the fields in struct svc_auth_ops.
557 */
558
559 /*ARGSUSED*/
560 static int
authany_wrap(AUTH * auth,XDR * xdrs,xdrproc_t xfunc,caddr_t xwhere)561 authany_wrap(AUTH *auth, XDR *xdrs, xdrproc_t xfunc, caddr_t xwhere)
562 {
563 return (*xfunc)(xdrs, xwhere);
564 }
565
566 struct svc_auth_ops svc_auth_any_ops = {
567 authany_wrap,
568 authany_wrap,
569 };
570
571 /*
572 * Return pointer to server authentication structure.
573 */
574 SVCAUTH *
__svc_get_svcauth(SVCXPRT * xprt)575 __svc_get_svcauth(SVCXPRT *xprt)
576 {
577 /* LINTED pointer alignment */
578 return (&SVC_XP_AUTH(xprt));
579 }
580
581 /*
582 * A callback routine to cleanup after a procedure is executed.
583 */
584 void (*__proc_cleanup_cb)() = NULL;
585
586 void *
__svc_set_proc_cleanup_cb(void * cb)587 __svc_set_proc_cleanup_cb(void *cb)
588 {
589 void *tmp = (void *)__proc_cleanup_cb;
590
591 __proc_cleanup_cb = (void (*)())cb;
592 return (tmp);
593 }
594
595 /* *************** SVCXPRT related stuff **************** */
596
597
598 static int pollfd_shrinking = 1;
599
600
601 /*
602 * Add fd to svc_pollfd
603 */
604 static void
add_pollfd(int fd,short events)605 add_pollfd(int fd, short events)
606 {
607 if (fd < FD_SETSIZE) {
608 FD_SET(fd, &svc_fdset);
609 #if !defined(_LP64)
610 FD_SET(fd, &_new_svc_fdset);
611 #endif
612 svc_nfds++;
613 svc_nfds_set++;
614 if (fd >= svc_max_fd)
615 svc_max_fd = fd + 1;
616 }
617 if (fd >= svc_max_pollfd)
618 svc_max_pollfd = fd + 1;
619 if (svc_max_pollfd > svc_pollfd_allocd) {
620 int i = svc_pollfd_allocd;
621 pollfd_t *tmp;
622 do {
623 svc_pollfd_allocd += POLLFD_EXTEND;
624 } while (svc_max_pollfd > svc_pollfd_allocd);
625 tmp = realloc(svc_pollfd,
626 sizeof (pollfd_t) * svc_pollfd_allocd);
627 if (tmp != NULL) {
628 svc_pollfd = tmp;
629 for (; i < svc_pollfd_allocd; i++)
630 POLLFD_CLR(i, tmp);
631 } else {
632 /*
633 * give an error message; undo fdset setting
634 * above; reset the pollfd_shrinking flag.
635 * because of this poll will not be done
636 * on these fds.
637 */
638 if (fd < FD_SETSIZE) {
639 FD_CLR(fd, &svc_fdset);
640 #if !defined(_LP64)
641 FD_CLR(fd, &_new_svc_fdset);
642 #endif
643 svc_nfds--;
644 svc_nfds_set--;
645 if (fd == (svc_max_fd - 1))
646 svc_max_fd--;
647 }
648 if (fd == (svc_max_pollfd - 1))
649 svc_max_pollfd--;
650 pollfd_shrinking = 0;
651 syslog(LOG_ERR, "add_pollfd: out of memory");
652 _exit(1);
653 }
654 }
655 svc_pollfd[fd].fd = fd;
656 svc_pollfd[fd].events = events;
657 svc_npollfds++;
658 svc_npollfds_set++;
659 }
660
661 /*
662 * the fd is still active but only the bit in fdset is cleared.
663 * do not subtract svc_nfds or svc_npollfds
664 */
665 void
clear_pollfd(int fd)666 clear_pollfd(int fd)
667 {
668 if (fd < FD_SETSIZE && FD_ISSET(fd, &svc_fdset)) {
669 FD_CLR(fd, &svc_fdset);
670 #if !defined(_LP64)
671 FD_CLR(fd, &_new_svc_fdset);
672 #endif
673 svc_nfds_set--;
674 }
675 if (fd < svc_pollfd_allocd && POLLFD_ISSET(fd, svc_pollfd)) {
676 POLLFD_CLR(fd, svc_pollfd);
677 svc_npollfds_set--;
678 }
679 }
680
681 /*
682 * sets the bit in fdset for an active fd so that poll() is done for that
683 */
684 void
set_pollfd(int fd,short events)685 set_pollfd(int fd, short events)
686 {
687 if (fd < FD_SETSIZE) {
688 FD_SET(fd, &svc_fdset);
689 #if !defined(_LP64)
690 FD_SET(fd, &_new_svc_fdset);
691 #endif
692 svc_nfds_set++;
693 }
694 if (fd < svc_pollfd_allocd) {
695 svc_pollfd[fd].fd = fd;
696 svc_pollfd[fd].events = events;
697 svc_npollfds_set++;
698 }
699 }
700
701 /*
702 * remove a svc_pollfd entry; it does not shrink the memory
703 */
704 static void
remove_pollfd(int fd)705 remove_pollfd(int fd)
706 {
707 clear_pollfd(fd);
708 if (fd == (svc_max_fd - 1))
709 svc_max_fd--;
710 svc_nfds--;
711 if (fd == (svc_max_pollfd - 1))
712 svc_max_pollfd--;
713 svc_npollfds--;
714 }
715
716 /*
717 * delete a svc_pollfd entry; it shrinks the memory
718 * use remove_pollfd if you do not want to shrink
719 */
720 static void
delete_pollfd(int fd)721 delete_pollfd(int fd)
722 {
723 remove_pollfd(fd);
724 if (pollfd_shrinking && svc_max_pollfd <
725 (svc_pollfd_allocd - POLLFD_SHRINK)) {
726 do {
727 svc_pollfd_allocd -= POLLFD_SHRINK;
728 } while (svc_max_pollfd < (svc_pollfd_allocd - POLLFD_SHRINK));
729 svc_pollfd = realloc(svc_pollfd,
730 sizeof (pollfd_t) * svc_pollfd_allocd);
731 if (svc_pollfd == NULL) {
732 syslog(LOG_ERR, "delete_pollfd: out of memory");
733 _exit(1);
734 }
735 }
736 }
737
738
739 /*
740 * Activate a transport handle.
741 */
742 void
xprt_register(const SVCXPRT * xprt)743 xprt_register(const SVCXPRT *xprt)
744 {
745 int fd = xprt->xp_fd;
746 #ifdef CALLBACK
747 extern void (*_svc_getreqset_proc)();
748 #endif
749 /* VARIABLES PROTECTED BY svc_fd_lock: svc_xports, svc_fdset */
750
751 (void) rw_wrlock(&svc_fd_lock);
752 if (svc_xports == NULL) {
753 /* allocate some small amount first */
754 svc_xports = calloc(FD_INCREMENT, sizeof (SVCXPRT *));
755 if (svc_xports == NULL) {
756 syslog(LOG_ERR, "xprt_register: out of memory");
757 _exit(1);
758 }
759 nsvc_xports = FD_INCREMENT;
760
761 #ifdef CALLBACK
762 /*
763 * XXX: This code does not keep track of the server state.
764 *
765 * This provides for callback support. When a client
766 * recv's a call from another client on the server fd's,
767 * it calls _svc_getreqset_proc() which would return
768 * after serving all the server requests. Also look under
769 * clnt_dg.c and clnt_vc.c (clnt_call part of it)
770 */
771 _svc_getreqset_proc = svc_getreq_poll;
772 #endif
773 }
774
775 while (fd >= nsvc_xports) {
776 SVCXPRT **tmp_xprts = svc_xports;
777
778 /* time to expand svc_xprts */
779 tmp_xprts = realloc(svc_xports,
780 sizeof (SVCXPRT *) * (nsvc_xports + FD_INCREMENT));
781 if (tmp_xprts == NULL) {
782 syslog(LOG_ERR, "xprt_register : out of memory.");
783 _exit(1);
784 }
785
786 svc_xports = tmp_xprts;
787 (void) memset(&svc_xports[nsvc_xports], 0,
788 sizeof (SVCXPRT *) * FD_INCREMENT);
789 nsvc_xports += FD_INCREMENT;
790 }
791
792 svc_xports[fd] = (SVCXPRT *)xprt;
793
794 add_pollfd(fd, MASKVAL);
795
796 if (svc_polling) {
797 char dummy;
798
799 /*
800 * This happens only in one of the MT modes.
801 * Wake up poller.
802 */
803 (void) write(svc_pipe[1], &dummy, sizeof (dummy));
804 }
805 /*
806 * If already dispatching door based services, start
807 * dispatching TLI based services now.
808 */
809 (void) mutex_lock(&svc_door_mutex);
810 if (svc_ndoorfds > 0)
811 (void) cond_signal(&svc_door_waitcv);
812 (void) mutex_unlock(&svc_door_mutex);
813
814 if (svc_xdrs == NULL) {
815 /* allocate initial chunk */
816 svc_xdrs = calloc(FD_INCREMENT, sizeof (XDR *));
817 if (svc_xdrs != NULL)
818 nsvc_xdrs = FD_INCREMENT;
819 else {
820 syslog(LOG_ERR, "xprt_register : out of memory.");
821 _exit(1);
822 }
823 }
824 (void) rw_unlock(&svc_fd_lock);
825 }
826
827 /*
828 * De-activate a transport handle.
829 */
830 void
__xprt_unregister_private(const SVCXPRT * xprt,bool_t lock_not_held)831 __xprt_unregister_private(const SVCXPRT *xprt, bool_t lock_not_held)
832 {
833 int fd = xprt->xp_fd;
834
835 if (lock_not_held)
836 (void) rw_wrlock(&svc_fd_lock);
837 if ((fd < nsvc_xports) && (svc_xports[fd] == xprt)) {
838 svc_xports[fd] = NULL;
839 delete_pollfd(fd);
840 }
841 if (lock_not_held)
842 (void) rw_unlock(&svc_fd_lock);
843 __svc_rm_from_xlist(&_svc_xprtlist, xprt, &xprtlist_lock);
844 }
845
846 void
xprt_unregister(const SVCXPRT * xprt)847 xprt_unregister(const SVCXPRT *xprt)
848 {
849 __xprt_unregister_private(xprt, TRUE);
850 }
851
852 /* ********************** CALLOUT list related stuff ************* */
853
854 /*
855 * Add a service program to the callout list.
856 * The dispatch routine will be called when a rpc request for this
857 * program number comes in.
858 */
859 bool_t
svc_reg(const SVCXPRT * xprt,const rpcprog_t prog,const rpcvers_t vers,void (* dispatch)(),const struct netconfig * nconf)860 svc_reg(const SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
861 void (*dispatch)(), const struct netconfig *nconf)
862 {
863 struct svc_callout *prev;
864 struct svc_callout *s, **s2;
865 struct netconfig *tnconf;
866 char *netid = NULL;
867 int flag = 0;
868
869 /* VARIABLES PROTECTED BY svc_lock: s, prev, svc_head */
870
871 if (xprt->xp_netid) {
872 netid = strdup(xprt->xp_netid);
873 flag = 1;
874 } else if (nconf && nconf->nc_netid) {
875 netid = strdup(nconf->nc_netid);
876 flag = 1;
877 } else if ((tnconf = __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type))
878 != NULL) {
879 netid = strdup(tnconf->nc_netid);
880 flag = 1;
881 freenetconfigent(tnconf);
882 } /* must have been created with svc_raw_create */
883 if ((netid == NULL) && (flag == 1))
884 return (FALSE);
885
886 (void) rw_wrlock(&svc_lock);
887 if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
888 if (netid)
889 free(netid);
890 if (s->sc_dispatch == dispatch)
891 goto rpcb_it; /* it is registering another xptr */
892 (void) rw_unlock(&svc_lock);
893 return (FALSE);
894 }
895 s = malloc(sizeof (struct svc_callout));
896 if (s == NULL) {
897 if (netid)
898 free(netid);
899 (void) rw_unlock(&svc_lock);
900 return (FALSE);
901 }
902
903 s->sc_prog = prog;
904 s->sc_vers = vers;
905 s->sc_dispatch = dispatch;
906 s->sc_netid = netid;
907 s->sc_next = NULL;
908
909 /*
910 * The ordering of transports is such that the most frequently used
911 * one appears first. So add the new entry to the end of the list.
912 */
913 for (s2 = &svc_head; *s2 != NULL; s2 = &(*s2)->sc_next)
914 ;
915 *s2 = s;
916
917 if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
918 if ((((SVCXPRT *)xprt)->xp_netid = strdup(netid)) == NULL) {
919 syslog(LOG_ERR, "svc_reg : strdup failed.");
920 free(netid);
921 free(s);
922 *s2 = NULL;
923 (void) rw_unlock(&svc_lock);
924 return (FALSE);
925 }
926
927 rpcb_it:
928 (void) rw_unlock(&svc_lock);
929
930 /* now register the information with the local binder service */
931 if (nconf)
932 return (rpcb_set(prog, vers, nconf, &xprt->xp_ltaddr));
933 return (TRUE);
934 /*NOTREACHED*/
935 }
936
937 /*
938 * Remove a service program from the callout list.
939 */
940 void
svc_unreg(const rpcprog_t prog,const rpcvers_t vers)941 svc_unreg(const rpcprog_t prog, const rpcvers_t vers)
942 {
943 struct svc_callout *prev;
944 struct svc_callout *s;
945
946 /* unregister the information anyway */
947 (void) rpcb_unset(prog, vers, NULL);
948
949 (void) rw_wrlock(&svc_lock);
950 while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
951 if (prev == NULL_SVC) {
952 svc_head = s->sc_next;
953 } else {
954 prev->sc_next = s->sc_next;
955 }
956 s->sc_next = NULL_SVC;
957 if (s->sc_netid)
958 free(s->sc_netid);
959 free(s);
960 }
961 (void) rw_unlock(&svc_lock);
962 }
963
964 #ifdef PORTMAP
965 /*
966 * Add a service program to the callout list.
967 * The dispatch routine will be called when a rpc request for this
968 * program number comes in.
969 * For version 2 portmappers.
970 */
971 bool_t
svc_register(SVCXPRT * xprt,rpcprog_t prog,rpcvers_t vers,void (* dispatch)(),int protocol)972 svc_register(SVCXPRT *xprt, rpcprog_t prog, rpcvers_t vers,
973 void (*dispatch)(), int protocol)
974 {
975 struct svc_callout *prev;
976 struct svc_callout *s;
977 struct netconfig *nconf;
978 char *netid = NULL;
979 int flag = 0;
980
981 if (xprt->xp_netid) {
982 netid = strdup(xprt->xp_netid);
983 flag = 1;
984 } else if ((ioctl(xprt->xp_fd, I_FIND, "timod") > 0) && ((nconf =
985 __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type)) != NULL)) {
986 /* fill in missing netid field in SVCXPRT */
987 netid = strdup(nconf->nc_netid);
988 flag = 1;
989 freenetconfigent(nconf);
990 } /* must be svc_raw_create */
991
992 if ((netid == NULL) && (flag == 1))
993 return (FALSE);
994
995 (void) rw_wrlock(&svc_lock);
996 if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
997 if (netid)
998 free(netid);
999 if (s->sc_dispatch == dispatch)
1000 goto pmap_it; /* it is registering another xptr */
1001 (void) rw_unlock(&svc_lock);
1002 return (FALSE);
1003 }
1004 s = malloc(sizeof (struct svc_callout));
1005 if (s == (struct svc_callout *)0) {
1006 if (netid)
1007 free(netid);
1008 (void) rw_unlock(&svc_lock);
1009 return (FALSE);
1010 }
1011 s->sc_prog = prog;
1012 s->sc_vers = vers;
1013 s->sc_dispatch = dispatch;
1014 s->sc_netid = netid;
1015 s->sc_next = svc_head;
1016 svc_head = s;
1017
1018 if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
1019 if ((xprt->xp_netid = strdup(netid)) == NULL) {
1020 syslog(LOG_ERR, "svc_register : strdup failed.");
1021 free(netid);
1022 svc_head = s->sc_next;
1023 free(s);
1024 (void) rw_unlock(&svc_lock);
1025 return (FALSE);
1026 }
1027
1028 pmap_it:
1029 (void) rw_unlock(&svc_lock);
1030 /* now register the information with the local binder service */
1031 if (protocol)
1032 return (pmap_set(prog, vers, protocol, xprt->xp_port));
1033 return (TRUE);
1034 }
1035
1036 /*
1037 * Remove a service program from the callout list.
1038 * For version 2 portmappers.
1039 */
1040 void
svc_unregister(rpcprog_t prog,rpcvers_t vers)1041 svc_unregister(rpcprog_t prog, rpcvers_t vers)
1042 {
1043 struct svc_callout *prev;
1044 struct svc_callout *s;
1045
1046 (void) rw_wrlock(&svc_lock);
1047 while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
1048 if (prev == NULL_SVC) {
1049 svc_head = s->sc_next;
1050 } else {
1051 prev->sc_next = s->sc_next;
1052 }
1053 s->sc_next = NULL_SVC;
1054 if (s->sc_netid)
1055 free(s->sc_netid);
1056 free(s);
1057 /* unregister the information with the local binder service */
1058 (void) pmap_unset(prog, vers);
1059 }
1060 (void) rw_unlock(&svc_lock);
1061 }
1062 #endif /* PORTMAP */
1063
1064 /*
1065 * Search the callout list for a program number, return the callout
1066 * struct.
1067 * Also check for transport as well. Many routines such as svc_unreg
1068 * dont give any corresponding transport, so dont check for transport if
1069 * netid == NULL
1070 */
1071 static struct svc_callout *
svc_find(rpcprog_t prog,rpcvers_t vers,struct svc_callout ** prev,char * netid)1072 svc_find(rpcprog_t prog, rpcvers_t vers, struct svc_callout **prev, char *netid)
1073 {
1074 struct svc_callout *s, *p;
1075
1076 /* WRITE LOCK HELD ON ENTRY: svc_lock */
1077
1078 /* assert(RW_WRITE_HELD(&svc_lock)); */
1079 p = NULL_SVC;
1080 for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1081 if (((s->sc_prog == prog) && (s->sc_vers == vers)) &&
1082 ((netid == NULL) || (s->sc_netid == NULL) ||
1083 (strcmp(netid, s->sc_netid) == 0)))
1084 break;
1085 p = s;
1086 }
1087 *prev = p;
1088 return (s);
1089 }
1090
1091
1092 /* ******************* REPLY GENERATION ROUTINES ************ */
1093
1094 /*
1095 * Send a reply to an rpc request
1096 */
1097 bool_t
svc_sendreply(const SVCXPRT * xprt,const xdrproc_t xdr_results,const caddr_t xdr_location)1098 svc_sendreply(const SVCXPRT *xprt, const xdrproc_t xdr_results,
1099 const caddr_t xdr_location)
1100 {
1101 struct rpc_msg rply;
1102
1103 rply.rm_direction = REPLY;
1104 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1105 rply.acpted_rply.ar_verf = xprt->xp_verf;
1106 rply.acpted_rply.ar_stat = SUCCESS;
1107 rply.acpted_rply.ar_results.where = xdr_location;
1108 rply.acpted_rply.ar_results.proc = xdr_results;
1109 return (SVC_REPLY((SVCXPRT *)xprt, &rply));
1110 }
1111
1112 /*
1113 * No procedure error reply
1114 */
1115 void
svcerr_noproc(const SVCXPRT * xprt)1116 svcerr_noproc(const SVCXPRT *xprt)
1117 {
1118 struct rpc_msg rply;
1119
1120 rply.rm_direction = REPLY;
1121 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1122 rply.acpted_rply.ar_verf = xprt->xp_verf;
1123 rply.acpted_rply.ar_stat = PROC_UNAVAIL;
1124 SVC_REPLY((SVCXPRT *)xprt, &rply);
1125 }
1126
1127 /*
1128 * Can't decode args error reply
1129 */
1130 void
svcerr_decode(const SVCXPRT * xprt)1131 svcerr_decode(const SVCXPRT *xprt)
1132 {
1133 struct rpc_msg rply;
1134
1135 rply.rm_direction = REPLY;
1136 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1137 rply.acpted_rply.ar_verf = xprt->xp_verf;
1138 rply.acpted_rply.ar_stat = GARBAGE_ARGS;
1139 SVC_REPLY((SVCXPRT *)xprt, &rply);
1140 }
1141
1142 /*
1143 * Some system error
1144 */
1145 void
svcerr_systemerr(const SVCXPRT * xprt)1146 svcerr_systemerr(const SVCXPRT *xprt)
1147 {
1148 struct rpc_msg rply;
1149
1150 rply.rm_direction = REPLY;
1151 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1152 rply.acpted_rply.ar_verf = xprt->xp_verf;
1153 rply.acpted_rply.ar_stat = SYSTEM_ERR;
1154 SVC_REPLY((SVCXPRT *)xprt, &rply);
1155 }
1156
1157 /*
1158 * Tell RPC package to not complain about version errors to the client. This
1159 * is useful when revving broadcast protocols that sit on a fixed address.
1160 * There is really one (or should be only one) example of this kind of
1161 * protocol: the portmapper (or rpc binder).
1162 */
1163 void
__svc_versquiet_on(const SVCXPRT * xprt)1164 __svc_versquiet_on(const SVCXPRT *xprt)
1165 {
1166 /* LINTED pointer alignment */
1167 svc_flags(xprt) |= SVC_VERSQUIET;
1168 }
1169
1170 void
__svc_versquiet_off(const SVCXPRT * xprt)1171 __svc_versquiet_off(const SVCXPRT *xprt)
1172 {
1173 /* LINTED pointer alignment */
1174 svc_flags(xprt) &= ~SVC_VERSQUIET;
1175 }
1176
1177 void
svc_versquiet(const SVCXPRT * xprt)1178 svc_versquiet(const SVCXPRT *xprt)
1179 {
1180 __svc_versquiet_on(xprt);
1181 }
1182
1183 int
__svc_versquiet_get(const SVCXPRT * xprt)1184 __svc_versquiet_get(const SVCXPRT *xprt)
1185 {
1186 /* LINTED pointer alignment */
1187 return (svc_flags(xprt) & SVC_VERSQUIET);
1188 }
1189
1190 /*
1191 * Authentication error reply
1192 */
1193 void
svcerr_auth(const SVCXPRT * xprt,const enum auth_stat why)1194 svcerr_auth(const SVCXPRT *xprt, const enum auth_stat why)
1195 {
1196 struct rpc_msg rply;
1197
1198 rply.rm_direction = REPLY;
1199 rply.rm_reply.rp_stat = MSG_DENIED;
1200 rply.rjcted_rply.rj_stat = AUTH_ERROR;
1201 rply.rjcted_rply.rj_why = why;
1202 SVC_REPLY((SVCXPRT *)xprt, &rply);
1203 }
1204
1205 /*
1206 * Auth too weak error reply
1207 */
1208 void
svcerr_weakauth(const SVCXPRT * xprt)1209 svcerr_weakauth(const SVCXPRT *xprt)
1210 {
1211 svcerr_auth(xprt, AUTH_TOOWEAK);
1212 }
1213
1214 /*
1215 * Program unavailable error reply
1216 */
1217 void
svcerr_noprog(const SVCXPRT * xprt)1218 svcerr_noprog(const SVCXPRT *xprt)
1219 {
1220 struct rpc_msg rply;
1221
1222 rply.rm_direction = REPLY;
1223 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1224 rply.acpted_rply.ar_verf = xprt->xp_verf;
1225 rply.acpted_rply.ar_stat = PROG_UNAVAIL;
1226 SVC_REPLY((SVCXPRT *)xprt, &rply);
1227 }
1228
1229 /*
1230 * Program version mismatch error reply
1231 */
1232 void
svcerr_progvers(const SVCXPRT * xprt,const rpcvers_t low_vers,const rpcvers_t high_vers)1233 svcerr_progvers(const SVCXPRT *xprt, const rpcvers_t low_vers,
1234 const rpcvers_t high_vers)
1235 {
1236 struct rpc_msg rply;
1237
1238 rply.rm_direction = REPLY;
1239 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1240 rply.acpted_rply.ar_verf = xprt->xp_verf;
1241 rply.acpted_rply.ar_stat = PROG_MISMATCH;
1242 rply.acpted_rply.ar_vers.low = low_vers;
1243 rply.acpted_rply.ar_vers.high = high_vers;
1244 SVC_REPLY((SVCXPRT *)xprt, &rply);
1245 }
1246
1247 /* ******************* SERVER INPUT STUFF ******************* */
1248
1249 /*
1250 * Get server side input from some transport.
1251 *
1252 * Statement of authentication parameters management:
1253 * This function owns and manages all authentication parameters, specifically
1254 * the "raw" parameters (msg.rm_call.cb_cred and msg.rm_call.cb_verf) and
1255 * the "cooked" credentials (rqst->rq_clntcred).
1256 * However, this function does not know the structure of the cooked
1257 * credentials, so it make the following assumptions:
1258 * a) the structure is contiguous (no pointers), and
1259 * b) the cred structure size does not exceed RQCRED_SIZE bytes.
1260 * In all events, all three parameters are freed upon exit from this routine.
1261 * The storage is trivially management on the call stack in user land, but
1262 * is mallocated in kernel land.
1263 */
1264
1265 void
svc_getreq(int rdfds)1266 svc_getreq(int rdfds)
1267 {
1268 fd_set readfds;
1269
1270 FD_ZERO(&readfds);
1271 readfds.fds_bits[0] = rdfds;
1272 svc_getreqset(&readfds);
1273 }
1274
1275 void
svc_getreqset(fd_set * readfds)1276 svc_getreqset(fd_set *readfds)
1277 {
1278 int i;
1279
1280 for (i = 0; i < svc_max_fd; i++) {
1281 /* fd has input waiting */
1282 if (FD_ISSET(i, readfds))
1283 svc_getreq_common(i);
1284 }
1285 }
1286
1287 void
svc_getreq_poll(struct pollfd * pfdp,const int pollretval)1288 svc_getreq_poll(struct pollfd *pfdp, const int pollretval)
1289 {
1290 int i;
1291 int fds_found;
1292
1293 for (i = fds_found = 0; fds_found < pollretval; i++) {
1294 struct pollfd *p = &pfdp[i];
1295
1296 if (p->revents) {
1297 /* fd has input waiting */
1298 fds_found++;
1299 /*
1300 * We assume that this function is only called
1301 * via someone select()ing from svc_fdset or
1302 * poll()ing from svc_pollset[]. Thus it's safe
1303 * to handle the POLLNVAL event by simply turning
1304 * the corresponding bit off in svc_fdset. The
1305 * svc_pollset[] array is derived from svc_fdset
1306 * and so will also be updated eventually.
1307 *
1308 * XXX Should we do an xprt_unregister() instead?
1309 */
1310 /* Handle user callback */
1311 if (__is_a_userfd(p->fd) == TRUE) {
1312 (void) rw_rdlock(&svc_fd_lock);
1313 __svc_getreq_user(p);
1314 (void) rw_unlock(&svc_fd_lock);
1315 } else {
1316 if (p->revents & POLLNVAL) {
1317 (void) rw_wrlock(&svc_fd_lock);
1318 remove_pollfd(p->fd); /* XXX */
1319 (void) rw_unlock(&svc_fd_lock);
1320 } else {
1321 svc_getreq_common(p->fd);
1322 }
1323 }
1324 }
1325 }
1326 }
1327
1328 void
svc_getreq_common(const int fd)1329 svc_getreq_common(const int fd)
1330 {
1331 SVCXPRT *xprt;
1332 enum xprt_stat stat;
1333 struct rpc_msg *msg;
1334 struct svc_req *r;
1335 char *cred_area;
1336
1337 (void) rw_rdlock(&svc_fd_lock);
1338
1339 /* HANDLE USER CALLBACK */
1340 if (__is_a_userfd(fd) == TRUE) {
1341 struct pollfd virtual_fd;
1342
1343 virtual_fd.events = virtual_fd.revents = (short)0xFFFF;
1344 virtual_fd.fd = fd;
1345 __svc_getreq_user(&virtual_fd);
1346 (void) rw_unlock(&svc_fd_lock);
1347 return;
1348 }
1349
1350 /*
1351 * The transport associated with this fd could have been
1352 * removed from svc_timeout_nonblock_xprt_and_LRU, for instance.
1353 * This can happen if two or more fds get read events and are
1354 * passed to svc_getreq_poll/set, the first fd is seviced by
1355 * the dispatch routine and cleans up any dead transports. If
1356 * one of the dead transports removed is the other fd that
1357 * had a read event then svc_getreq_common() will be called with no
1358 * xprt associated with the fd that had the original read event.
1359 */
1360 if ((fd >= nsvc_xports) || (xprt = svc_xports[fd]) == NULL) {
1361 (void) rw_unlock(&svc_fd_lock);
1362 return;
1363 }
1364 (void) rw_unlock(&svc_fd_lock);
1365 /* LINTED pointer alignment */
1366 msg = SVCEXT(xprt)->msg;
1367 /* LINTED pointer alignment */
1368 r = SVCEXT(xprt)->req;
1369 /* LINTED pointer alignment */
1370 cred_area = SVCEXT(xprt)->cred_area;
1371 msg->rm_call.cb_cred.oa_base = cred_area;
1372 msg->rm_call.cb_verf.oa_base = &(cred_area[MAX_AUTH_BYTES]);
1373 r->rq_clntcred = &(cred_area[2 * MAX_AUTH_BYTES]);
1374
1375 /* receive msgs from xprtprt (support batch calls) */
1376 do {
1377 bool_t dispatch;
1378
1379 if (dispatch = SVC_RECV(xprt, msg))
1380 (void) _svc_prog_dispatch(xprt, msg, r);
1381 /*
1382 * Check if the xprt has been disconnected in a recursive call
1383 * in the service dispatch routine. If so, then break
1384 */
1385 (void) rw_rdlock(&svc_fd_lock);
1386 if (xprt != svc_xports[fd]) {
1387 (void) rw_unlock(&svc_fd_lock);
1388 break;
1389 }
1390 (void) rw_unlock(&svc_fd_lock);
1391
1392 /*
1393 * Call cleanup procedure if set.
1394 */
1395 if (__proc_cleanup_cb != NULL && dispatch)
1396 (*__proc_cleanup_cb)(xprt);
1397
1398 if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
1399 SVC_DESTROY(xprt);
1400 break;
1401 }
1402 } while (stat == XPRT_MOREREQS);
1403 }
1404
1405 int
_svc_prog_dispatch(SVCXPRT * xprt,struct rpc_msg * msg,struct svc_req * r)1406 _svc_prog_dispatch(SVCXPRT *xprt, struct rpc_msg *msg, struct svc_req *r)
1407 {
1408 struct svc_callout *s;
1409 enum auth_stat why;
1410 int prog_found;
1411 rpcvers_t low_vers;
1412 rpcvers_t high_vers;
1413 void (*disp_fn)();
1414
1415 r->rq_xprt = xprt;
1416 r->rq_prog = msg->rm_call.cb_prog;
1417 r->rq_vers = msg->rm_call.cb_vers;
1418 r->rq_proc = msg->rm_call.cb_proc;
1419 r->rq_cred = msg->rm_call.cb_cred;
1420 /* LINTED pointer alignment */
1421 SVC_XP_AUTH(r->rq_xprt).svc_ah_ops = svc_auth_any_ops;
1422 /* LINTED pointer alignment */
1423 SVC_XP_AUTH(r->rq_xprt).svc_ah_private = NULL;
1424
1425 /* first authenticate the message */
1426 /* Check for null flavor and bypass these calls if possible */
1427
1428 if (msg->rm_call.cb_cred.oa_flavor == AUTH_NULL) {
1429 r->rq_xprt->xp_verf.oa_flavor = _null_auth.oa_flavor;
1430 r->rq_xprt->xp_verf.oa_length = 0;
1431 } else {
1432 bool_t no_dispatch;
1433
1434 if ((why = __gss_authenticate(r, msg,
1435 &no_dispatch)) != AUTH_OK) {
1436 svcerr_auth(xprt, why);
1437 return (0);
1438 }
1439 if (no_dispatch)
1440 return (0);
1441 }
1442 /* match message with a registered service */
1443 prog_found = FALSE;
1444 low_vers = (rpcvers_t)(0 - 1);
1445 high_vers = 0;
1446 (void) rw_rdlock(&svc_lock);
1447 for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1448 if (s->sc_prog == r->rq_prog) {
1449 prog_found = TRUE;
1450 if (s->sc_vers == r->rq_vers) {
1451 if ((xprt->xp_netid == NULL) ||
1452 (s->sc_netid == NULL) ||
1453 (strcmp(xprt->xp_netid,
1454 s->sc_netid) == 0)) {
1455 disp_fn = (*s->sc_dispatch);
1456 (void) rw_unlock(&svc_lock);
1457 disp_fn(r, xprt);
1458 return (1);
1459 }
1460 prog_found = FALSE;
1461 }
1462 if (s->sc_vers < low_vers)
1463 low_vers = s->sc_vers;
1464 if (s->sc_vers > high_vers)
1465 high_vers = s->sc_vers;
1466 } /* found correct program */
1467 }
1468 (void) rw_unlock(&svc_lock);
1469
1470 /*
1471 * if we got here, the program or version
1472 * is not served ...
1473 */
1474 if (prog_found) {
1475 /* LINTED pointer alignment */
1476 if (!version_keepquiet(xprt))
1477 svcerr_progvers(xprt, low_vers, high_vers);
1478 } else {
1479 svcerr_noprog(xprt);
1480 }
1481 return (0);
1482 }
1483
1484 /* ******************* SVCXPRT allocation and deallocation ***************** */
1485
1486 /*
1487 * svc_xprt_alloc() - allocate a service transport handle
1488 */
1489 SVCXPRT *
svc_xprt_alloc(void)1490 svc_xprt_alloc(void)
1491 {
1492 SVCXPRT *xprt = NULL;
1493 SVCXPRT_EXT *xt = NULL;
1494 SVCXPRT_LIST *xlist = NULL;
1495 struct rpc_msg *msg = NULL;
1496 struct svc_req *req = NULL;
1497 char *cred_area = NULL;
1498
1499 if ((xprt = calloc(1, sizeof (SVCXPRT))) == NULL)
1500 goto err_exit;
1501
1502 if ((xt = calloc(1, sizeof (SVCXPRT_EXT))) == NULL)
1503 goto err_exit;
1504 xprt->xp_p3 = (caddr_t)xt; /* SVCEXT(xprt) = xt */
1505
1506 if ((xlist = calloc(1, sizeof (SVCXPRT_LIST))) == NULL)
1507 goto err_exit;
1508 xt->my_xlist = xlist;
1509 xlist->xprt = xprt;
1510
1511 if ((msg = malloc(sizeof (struct rpc_msg))) == NULL)
1512 goto err_exit;
1513 xt->msg = msg;
1514
1515 if ((req = malloc(sizeof (struct svc_req))) == NULL)
1516 goto err_exit;
1517 xt->req = req;
1518
1519 if ((cred_area = malloc(2*MAX_AUTH_BYTES + RQCRED_SIZE)) == NULL)
1520 goto err_exit;
1521 xt->cred_area = cred_area;
1522
1523 /* LINTED pointer alignment */
1524 (void) mutex_init(&svc_send_mutex(xprt), USYNC_THREAD, (void *)0);
1525 return (xprt);
1526
1527 err_exit:
1528 svc_xprt_free(xprt);
1529 return (NULL);
1530 }
1531
1532
1533 /*
1534 * svc_xprt_free() - free a service handle
1535 */
1536 void
svc_xprt_free(SVCXPRT * xprt)1537 svc_xprt_free(SVCXPRT *xprt)
1538 {
1539 /* LINTED pointer alignment */
1540 SVCXPRT_EXT *xt = xprt ? SVCEXT(xprt) : NULL;
1541 SVCXPRT_LIST *my_xlist = xt ? xt->my_xlist: NULL;
1542 struct rpc_msg *msg = xt ? xt->msg : NULL;
1543 struct svc_req *req = xt ? xt->req : NULL;
1544 char *cred_area = xt ? xt->cred_area : NULL;
1545
1546 if (xprt)
1547 free(xprt);
1548 if (xt)
1549 free(xt);
1550 if (my_xlist)
1551 free(my_xlist);
1552 if (msg)
1553 free(msg);
1554 if (req)
1555 free(req);
1556 if (cred_area)
1557 free(cred_area);
1558 }
1559
1560
1561 /*
1562 * svc_xprt_destroy() - free parent and child xprt list
1563 */
1564 void
svc_xprt_destroy(SVCXPRT * xprt)1565 svc_xprt_destroy(SVCXPRT *xprt)
1566 {
1567 SVCXPRT_LIST *xlist, *xnext = NULL;
1568 int type;
1569
1570 /* LINTED pointer alignment */
1571 if (SVCEXT(xprt)->parent)
1572 /* LINTED pointer alignment */
1573 xprt = SVCEXT(xprt)->parent;
1574 /* LINTED pointer alignment */
1575 type = svc_type(xprt);
1576 /* LINTED pointer alignment */
1577 for (xlist = SVCEXT(xprt)->my_xlist; xlist != NULL; xlist = xnext) {
1578 xnext = xlist->next;
1579 xprt = xlist->xprt;
1580 switch (type) {
1581 case SVC_DGRAM:
1582 svc_dg_xprtfree(xprt);
1583 break;
1584 case SVC_RENDEZVOUS:
1585 svc_vc_xprtfree(xprt);
1586 break;
1587 case SVC_CONNECTION:
1588 svc_fd_xprtfree(xprt);
1589 break;
1590 case SVC_DOOR:
1591 svc_door_xprtfree(xprt);
1592 break;
1593 }
1594 }
1595 }
1596
1597
1598 /*
1599 * svc_copy() - make a copy of parent
1600 */
1601 SVCXPRT *
svc_copy(SVCXPRT * xprt)1602 svc_copy(SVCXPRT *xprt)
1603 {
1604 /* LINTED pointer alignment */
1605 switch (svc_type(xprt)) {
1606 case SVC_DGRAM:
1607 return (svc_dg_xprtcopy(xprt));
1608 case SVC_RENDEZVOUS:
1609 return (svc_vc_xprtcopy(xprt));
1610 case SVC_CONNECTION:
1611 return (svc_fd_xprtcopy(xprt));
1612 }
1613 return (NULL);
1614 }
1615
1616
1617 /*
1618 * _svc_destroy_private() - private SVC_DESTROY interface
1619 */
1620 void
_svc_destroy_private(SVCXPRT * xprt)1621 _svc_destroy_private(SVCXPRT *xprt)
1622 {
1623 /* LINTED pointer alignment */
1624 switch (svc_type(xprt)) {
1625 case SVC_DGRAM:
1626 _svc_dg_destroy_private(xprt);
1627 break;
1628 case SVC_RENDEZVOUS:
1629 case SVC_CONNECTION:
1630 _svc_vc_destroy_private(xprt, TRUE);
1631 break;
1632 }
1633 }
1634
1635 /*
1636 * svc_get_local_cred() - fetch local user credentials. This always
1637 * works over doors based transports. For local transports, this
1638 * does not yield correct results unless the __rpc_negotiate_uid()
1639 * call has been invoked to enable this feature.
1640 */
1641 bool_t
svc_get_local_cred(SVCXPRT * xprt,svc_local_cred_t * lcred)1642 svc_get_local_cred(SVCXPRT *xprt, svc_local_cred_t *lcred)
1643 {
1644 /* LINTED pointer alignment */
1645 if (svc_type(xprt) == SVC_DOOR)
1646 return (__svc_get_door_cred(xprt, lcred));
1647 return (__rpc_get_local_cred(xprt, lcred));
1648 }
1649
1650
1651 /* ******************* DUPLICATE ENTRY HANDLING ROUTINES ************** */
1652
1653 /*
1654 * the dup cacheing routines below provide a cache of received
1655 * transactions. rpc service routines can use this to detect
1656 * retransmissions and re-send a non-failure response. Uses a
1657 * lru scheme to find entries to get rid of entries in the cache,
1658 * though only DUP_DONE entries are placed on the lru list.
1659 * the routines were written towards development of a generic
1660 * SVC_DUP() interface, which can be expanded to encompass the
1661 * svc_dg_enablecache() routines as well. the cache is currently
1662 * private to the automounter.
1663 */
1664
1665
1666 /* dupcache header contains xprt specific information */
1667 struct dupcache {
1668 rwlock_t dc_lock;
1669 time_t dc_time;
1670 int dc_buckets;
1671 int dc_maxsz;
1672 int dc_basis;
1673 struct dupreq *dc_mru;
1674 struct dupreq **dc_hashtbl;
1675 };
1676
1677 /*
1678 * private duplicate cache request routines
1679 */
1680 static int __svc_dupcache_check(struct svc_req *, caddr_t *, uint_t *,
1681 struct dupcache *, uint32_t, uint32_t);
1682 static struct dupreq *__svc_dupcache_victim(struct dupcache *, time_t);
1683 static int __svc_dupcache_enter(struct svc_req *, struct dupreq *,
1684 struct dupcache *, uint32_t, uint32_t, time_t);
1685 static int __svc_dupcache_update(struct svc_req *, caddr_t, uint_t, int,
1686 struct dupcache *, uint32_t, uint32_t);
1687 #ifdef DUP_DEBUG
1688 static void __svc_dupcache_debug(struct dupcache *);
1689 #endif /* DUP_DEBUG */
1690
1691 /* default parameters for the dupcache */
1692 #define DUPCACHE_BUCKETS 257
1693 #define DUPCACHE_TIME 900
1694 #define DUPCACHE_MAXSZ INT_MAX
1695
1696 /*
1697 * __svc_dupcache_init(void *condition, int basis, char *xprt_cache)
1698 * initialize the duprequest cache and assign it to the xprt_cache
1699 * Use default values depending on the cache condition and basis.
1700 * return TRUE on success and FALSE on failure
1701 */
1702 bool_t
__svc_dupcache_init(void * condition,int basis,char ** xprt_cache)1703 __svc_dupcache_init(void *condition, int basis, char **xprt_cache)
1704 {
1705 static mutex_t initdc_lock = DEFAULTMUTEX;
1706 int i;
1707 struct dupcache *dc;
1708
1709 (void) mutex_lock(&initdc_lock);
1710 if (*xprt_cache != NULL) { /* do only once per xprt */
1711 (void) mutex_unlock(&initdc_lock);
1712 syslog(LOG_ERR,
1713 "__svc_dupcache_init: multiply defined dup cache");
1714 return (FALSE);
1715 }
1716
1717 switch (basis) {
1718 case DUPCACHE_FIXEDTIME:
1719 dc = malloc(sizeof (struct dupcache));
1720 if (dc == NULL) {
1721 (void) mutex_unlock(&initdc_lock);
1722 syslog(LOG_ERR,
1723 "__svc_dupcache_init: memory alloc failed");
1724 return (FALSE);
1725 }
1726 (void) rwlock_init(&(dc->dc_lock), USYNC_THREAD, NULL);
1727 if (condition != NULL)
1728 dc->dc_time = *((time_t *)condition);
1729 else
1730 dc->dc_time = DUPCACHE_TIME;
1731 dc->dc_buckets = DUPCACHE_BUCKETS;
1732 dc->dc_maxsz = DUPCACHE_MAXSZ;
1733 dc->dc_basis = basis;
1734 dc->dc_mru = NULL;
1735 dc->dc_hashtbl = malloc(dc->dc_buckets *
1736 sizeof (struct dupreq *));
1737 if (dc->dc_hashtbl == NULL) {
1738 free(dc);
1739 (void) mutex_unlock(&initdc_lock);
1740 syslog(LOG_ERR,
1741 "__svc_dupcache_init: memory alloc failed");
1742 return (FALSE);
1743 }
1744 for (i = 0; i < DUPCACHE_BUCKETS; i++)
1745 dc->dc_hashtbl[i] = NULL;
1746 *xprt_cache = (char *)dc;
1747 break;
1748 default:
1749 (void) mutex_unlock(&initdc_lock);
1750 syslog(LOG_ERR,
1751 "__svc_dupcache_init: undefined dup cache basis");
1752 return (FALSE);
1753 }
1754
1755 (void) mutex_unlock(&initdc_lock);
1756
1757 return (TRUE);
1758 }
1759
1760 /*
1761 * __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1762 * char *xprt_cache)
1763 * searches the request cache. Creates an entry and returns DUP_NEW if
1764 * the request is not found in the cache. If it is found, then it
1765 * returns the state of the request (in progress, drop, or done) and
1766 * also allocates, and passes back results to the user (if any) in
1767 * resp_buf, and its length in resp_bufsz. DUP_ERROR is returned on error.
1768 */
1769 int
__svc_dup(struct svc_req * req,caddr_t * resp_buf,uint_t * resp_bufsz,char * xprt_cache)1770 __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1771 char *xprt_cache)
1772 {
1773 uint32_t drxid, drhash;
1774 int rc;
1775 struct dupreq *dr = NULL;
1776 time_t timenow = time(NULL);
1777
1778 /* LINTED pointer alignment */
1779 struct dupcache *dc = (struct dupcache *)xprt_cache;
1780
1781 if (dc == NULL) {
1782 syslog(LOG_ERR, "__svc_dup: undefined cache");
1783 return (DUP_ERROR);
1784 }
1785
1786 /* get the xid of the request */
1787 if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
1788 syslog(LOG_ERR, "__svc_dup: xid error");
1789 return (DUP_ERROR);
1790 }
1791 drhash = drxid % dc->dc_buckets;
1792
1793 if ((rc = __svc_dupcache_check(req, resp_buf, resp_bufsz, dc, drxid,
1794 drhash)) != DUP_NEW)
1795 return (rc);
1796
1797 if ((dr = __svc_dupcache_victim(dc, timenow)) == NULL)
1798 return (DUP_ERROR);
1799
1800 if ((rc = __svc_dupcache_enter(req, dr, dc, drxid, drhash, timenow))
1801 == DUP_ERROR)
1802 return (rc);
1803
1804 return (DUP_NEW);
1805 }
1806
1807
1808
1809 /*
1810 * __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf,
1811 * uint_t *resp_bufsz,truct dupcache *dc, uint32_t drxid,
1812 * uint32_t drhash)
1813 * Checks to see whether an entry already exists in the cache. If it does
1814 * copy back into the resp_buf, if appropriate. Return the status of
1815 * the request, or DUP_NEW if the entry is not in the cache
1816 */
1817 static int
__svc_dupcache_check(struct svc_req * req,caddr_t * resp_buf,uint_t * resp_bufsz,struct dupcache * dc,uint32_t drxid,uint32_t drhash)1818 __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1819 struct dupcache *dc, uint32_t drxid, uint32_t drhash)
1820 {
1821 struct dupreq *dr = NULL;
1822
1823 (void) rw_rdlock(&(dc->dc_lock));
1824 dr = dc->dc_hashtbl[drhash];
1825 while (dr != NULL) {
1826 if (dr->dr_xid == drxid &&
1827 dr->dr_proc == req->rq_proc &&
1828 dr->dr_prog == req->rq_prog &&
1829 dr->dr_vers == req->rq_vers &&
1830 dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
1831 memcmp(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
1832 dr->dr_addr.len) == 0) { /* entry found */
1833 if (dr->dr_hash != drhash) {
1834 /* sanity check */
1835 (void) rw_unlock((&dc->dc_lock));
1836 syslog(LOG_ERR,
1837 "\n__svc_dupdone: hashing error");
1838 return (DUP_ERROR);
1839 }
1840
1841 /*
1842 * return results for requests on lru list, if
1843 * appropriate requests must be DUP_DROP or DUP_DONE
1844 * to have a result. A NULL buffer in the cache
1845 * implies no results were sent during dupdone.
1846 * A NULL buffer in the call implies not interested
1847 * in results.
1848 */
1849 if (((dr->dr_status == DUP_DONE) ||
1850 (dr->dr_status == DUP_DROP)) &&
1851 resp_buf != NULL &&
1852 dr->dr_resp.buf != NULL) {
1853 *resp_buf = malloc(dr->dr_resp.len);
1854 if (*resp_buf == NULL) {
1855 syslog(LOG_ERR,
1856 "__svc_dupcache_check: malloc failed");
1857 (void) rw_unlock(&(dc->dc_lock));
1858 return (DUP_ERROR);
1859 }
1860 (void) memset(*resp_buf, 0, dr->dr_resp.len);
1861 (void) memcpy(*resp_buf, dr->dr_resp.buf,
1862 dr->dr_resp.len);
1863 *resp_bufsz = dr->dr_resp.len;
1864 } else {
1865 /* no result */
1866 if (resp_buf)
1867 *resp_buf = NULL;
1868 if (resp_bufsz)
1869 *resp_bufsz = 0;
1870 }
1871 (void) rw_unlock(&(dc->dc_lock));
1872 return (dr->dr_status);
1873 }
1874 dr = dr->dr_chain;
1875 }
1876 (void) rw_unlock(&(dc->dc_lock));
1877 return (DUP_NEW);
1878 }
1879
1880 /*
1881 * __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
1882 * Return a victim dupreq entry to the caller, depending on cache policy.
1883 */
1884 static struct dupreq *
__svc_dupcache_victim(struct dupcache * dc,time_t timenow)1885 __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
1886 {
1887 struct dupreq *dr = NULL;
1888
1889 switch (dc->dc_basis) {
1890 case DUPCACHE_FIXEDTIME:
1891 /*
1892 * The hash policy is to free up a bit of the hash
1893 * table before allocating a new entry as the victim.
1894 * Freeing up the hash table each time should split
1895 * the cost of keeping the hash table clean among threads.
1896 * Note that only DONE or DROPPED entries are on the lru
1897 * list but we do a sanity check anyway.
1898 */
1899 (void) rw_wrlock(&(dc->dc_lock));
1900 while ((dc->dc_mru) && (dr = dc->dc_mru->dr_next) &&
1901 ((timenow - dr->dr_time) > dc->dc_time)) {
1902 /* clean and then free the entry */
1903 if (dr->dr_status != DUP_DONE &&
1904 dr->dr_status != DUP_DROP) {
1905 /*
1906 * The LRU list can't contain an
1907 * entry where the status is other than
1908 * DUP_DONE or DUP_DROP.
1909 */
1910 syslog(LOG_ERR,
1911 "__svc_dupcache_victim: bad victim");
1912 #ifdef DUP_DEBUG
1913 /*
1914 * Need to hold the reader/writers lock to
1915 * print the cache info, since we already
1916 * hold the writers lock, we shall continue
1917 * calling __svc_dupcache_debug()
1918 */
1919 __svc_dupcache_debug(dc);
1920 #endif /* DUP_DEBUG */
1921 (void) rw_unlock(&(dc->dc_lock));
1922 return (NULL);
1923 }
1924 /* free buffers */
1925 if (dr->dr_resp.buf) {
1926 free(dr->dr_resp.buf);
1927 dr->dr_resp.buf = NULL;
1928 }
1929 if (dr->dr_addr.buf) {
1930 free(dr->dr_addr.buf);
1931 dr->dr_addr.buf = NULL;
1932 }
1933
1934 /* unhash the entry */
1935 if (dr->dr_chain)
1936 dr->dr_chain->dr_prevchain = dr->dr_prevchain;
1937 if (dr->dr_prevchain)
1938 dr->dr_prevchain->dr_chain = dr->dr_chain;
1939 if (dc->dc_hashtbl[dr->dr_hash] == dr)
1940 dc->dc_hashtbl[dr->dr_hash] = dr->dr_chain;
1941
1942 /* modify the lru pointers */
1943 if (dc->dc_mru == dr) {
1944 dc->dc_mru = NULL;
1945 } else {
1946 dc->dc_mru->dr_next = dr->dr_next;
1947 dr->dr_next->dr_prev = dc->dc_mru;
1948 }
1949 free(dr);
1950 dr = NULL;
1951 }
1952 (void) rw_unlock(&(dc->dc_lock));
1953
1954 /*
1955 * Allocate and return new clean entry as victim
1956 */
1957 if ((dr = malloc(sizeof (*dr))) == NULL) {
1958 syslog(LOG_ERR,
1959 "__svc_dupcache_victim: malloc failed");
1960 return (NULL);
1961 }
1962 (void) memset(dr, 0, sizeof (*dr));
1963 return (dr);
1964 default:
1965 syslog(LOG_ERR,
1966 "__svc_dupcache_victim: undefined dup cache_basis");
1967 return (NULL);
1968 }
1969 }
1970
1971 /*
1972 * __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
1973 * struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
1974 * build new duprequest entry and then insert into the cache
1975 */
1976 static int
__svc_dupcache_enter(struct svc_req * req,struct dupreq * dr,struct dupcache * dc,uint32_t drxid,uint32_t drhash,time_t timenow)1977 __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
1978 struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
1979 {
1980 dr->dr_xid = drxid;
1981 dr->dr_prog = req->rq_prog;
1982 dr->dr_vers = req->rq_vers;
1983 dr->dr_proc = req->rq_proc;
1984 dr->dr_addr.maxlen = req->rq_xprt->xp_rtaddr.len;
1985 dr->dr_addr.len = dr->dr_addr.maxlen;
1986 if ((dr->dr_addr.buf = malloc(dr->dr_addr.maxlen)) == NULL) {
1987 syslog(LOG_ERR, "__svc_dupcache_enter: malloc failed");
1988 free(dr);
1989 return (DUP_ERROR);
1990 }
1991 (void) memset(dr->dr_addr.buf, 0, dr->dr_addr.len);
1992 (void) memcpy(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
1993 dr->dr_addr.len);
1994 dr->dr_resp.buf = NULL;
1995 dr->dr_resp.maxlen = 0;
1996 dr->dr_resp.len = 0;
1997 dr->dr_status = DUP_INPROGRESS;
1998 dr->dr_time = timenow;
1999 dr->dr_hash = drhash; /* needed for efficient victim cleanup */
2000
2001 /* place entry at head of hash table */
2002 (void) rw_wrlock(&(dc->dc_lock));
2003 dr->dr_chain = dc->dc_hashtbl[drhash];
2004 dr->dr_prevchain = NULL;
2005 if (dc->dc_hashtbl[drhash] != NULL)
2006 dc->dc_hashtbl[drhash]->dr_prevchain = dr;
2007 dc->dc_hashtbl[drhash] = dr;
2008 (void) rw_unlock(&(dc->dc_lock));
2009 return (DUP_NEW);
2010 }
2011
2012 /*
2013 * __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2014 * int status, char *xprt_cache)
2015 * Marks the request done (DUP_DONE or DUP_DROP) and stores the response.
2016 * Only DONE and DROP requests can be marked as done. Sets the lru pointers
2017 * to make the entry the most recently used. Returns DUP_ERROR or status.
2018 */
2019 int
__svc_dupdone(struct svc_req * req,caddr_t resp_buf,uint_t resp_bufsz,int status,char * xprt_cache)2020 __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2021 int status, char *xprt_cache)
2022 {
2023 uint32_t drxid, drhash;
2024 int rc;
2025
2026 /* LINTED pointer alignment */
2027 struct dupcache *dc = (struct dupcache *)xprt_cache;
2028
2029 if (dc == NULL) {
2030 syslog(LOG_ERR, "__svc_dupdone: undefined cache");
2031 return (DUP_ERROR);
2032 }
2033
2034 if (status != DUP_DONE && status != DUP_DROP) {
2035 syslog(LOG_ERR, "__svc_dupdone: invalid dupdone status");
2036 syslog(LOG_ERR, " must be DUP_DONE or DUP_DROP");
2037 return (DUP_ERROR);
2038 }
2039
2040 /* find the xid of the entry in the cache */
2041 if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
2042 syslog(LOG_ERR, "__svc_dup: xid error");
2043 return (DUP_ERROR);
2044 }
2045 drhash = drxid % dc->dc_buckets;
2046
2047 /* update the status of the entry and result buffers, if required */
2048 if ((rc = __svc_dupcache_update(req, resp_buf, resp_bufsz, status,
2049 dc, drxid, drhash)) == DUP_ERROR) {
2050 syslog(LOG_ERR, "__svc_dupdone: cache entry error");
2051 return (DUP_ERROR);
2052 }
2053
2054 return (rc);
2055 }
2056
2057 /*
2058 * __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf,
2059 * uint_t resp_bufsz, int status, struct dupcache *dc, uint32_t drxid,
2060 * uint32_t drhash)
2061 * Check if entry exists in the dupcacache. If it does, update its status
2062 * and time and also its buffer, if appropriate. Its possible, but unlikely
2063 * for DONE requests to not exist in the cache. Return DUP_ERROR or status.
2064 */
2065 static int
__svc_dupcache_update(struct svc_req * req,caddr_t resp_buf,uint_t resp_bufsz,int status,struct dupcache * dc,uint32_t drxid,uint32_t drhash)2066 __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2067 int status, struct dupcache *dc, uint32_t drxid, uint32_t drhash)
2068 {
2069 struct dupreq *dr = NULL;
2070 time_t timenow = time(NULL);
2071
2072 (void) rw_wrlock(&(dc->dc_lock));
2073 dr = dc->dc_hashtbl[drhash];
2074 while (dr != NULL) {
2075 if (dr->dr_xid == drxid &&
2076 dr->dr_proc == req->rq_proc &&
2077 dr->dr_prog == req->rq_prog &&
2078 dr->dr_vers == req->rq_vers &&
2079 dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
2080 memcmp(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
2081 dr->dr_addr.len) == 0) { /* entry found */
2082 if (dr->dr_hash != drhash) {
2083 /* sanity check */
2084 (void) rw_unlock(&(dc->dc_lock));
2085 syslog(LOG_ERR,
2086 "\n__svc_dupdone: hashing error");
2087 return (DUP_ERROR);
2088 }
2089
2090 /* store the results if bufer is not NULL */
2091 if (resp_buf != NULL) {
2092 if ((dr->dr_resp.buf =
2093 malloc(resp_bufsz)) == NULL) {
2094 (void) rw_unlock(&(dc->dc_lock));
2095 syslog(LOG_ERR,
2096 "__svc_dupdone: malloc failed");
2097 return (DUP_ERROR);
2098 }
2099 (void) memset(dr->dr_resp.buf, 0, resp_bufsz);
2100 (void) memcpy(dr->dr_resp.buf, resp_buf,
2101 (uint_t)resp_bufsz);
2102 dr->dr_resp.len = resp_bufsz;
2103 }
2104
2105 /* update status and done time */
2106 dr->dr_status = status;
2107 dr->dr_time = timenow;
2108
2109 /* move the entry to the mru position */
2110 if (dc->dc_mru == NULL) {
2111 dr->dr_next = dr;
2112 dr->dr_prev = dr;
2113 } else {
2114 dr->dr_next = dc->dc_mru->dr_next;
2115 dc->dc_mru->dr_next->dr_prev = dr;
2116 dr->dr_prev = dc->dc_mru;
2117 dc->dc_mru->dr_next = dr;
2118 }
2119 dc->dc_mru = dr;
2120
2121 (void) rw_unlock(&(dc->dc_lock));
2122 return (status);
2123 }
2124 dr = dr->dr_chain;
2125 }
2126 (void) rw_unlock(&(dc->dc_lock));
2127 syslog(LOG_ERR, "__svc_dupdone: entry not in dup cache");
2128 return (DUP_ERROR);
2129 }
2130
2131 #ifdef DUP_DEBUG
2132 /*
2133 * __svc_dupcache_debug(struct dupcache *dc)
2134 * print out the hash table stuff
2135 *
2136 * This function requires the caller to hold the reader
2137 * or writer version of the duplicate request cache lock (dc_lock).
2138 */
2139 static void
__svc_dupcache_debug(struct dupcache * dc)2140 __svc_dupcache_debug(struct dupcache *dc)
2141 {
2142 struct dupreq *dr = NULL;
2143 int i;
2144 bool_t bval;
2145
2146 fprintf(stderr, " HASHTABLE\n");
2147 for (i = 0; i < dc->dc_buckets; i++) {
2148 bval = FALSE;
2149 dr = dc->dc_hashtbl[i];
2150 while (dr != NULL) {
2151 if (!bval) { /* ensures bucket printed only once */
2152 fprintf(stderr, " bucket : %d\n", i);
2153 bval = TRUE;
2154 }
2155 fprintf(stderr, "\txid: %u status: %d time: %ld",
2156 dr->dr_xid, dr->dr_status, dr->dr_time);
2157 fprintf(stderr, " dr: %x chain: %x prevchain: %x\n",
2158 dr, dr->dr_chain, dr->dr_prevchain);
2159 dr = dr->dr_chain;
2160 }
2161 }
2162
2163 fprintf(stderr, " LRU\n");
2164 if (dc->dc_mru) {
2165 dr = dc->dc_mru->dr_next; /* lru */
2166 while (dr != dc->dc_mru) {
2167 fprintf(stderr, "\txid: %u status : %d time : %ld",
2168 dr->dr_xid, dr->dr_status, dr->dr_time);
2169 fprintf(stderr, " dr: %x next: %x prev: %x\n",
2170 dr, dr->dr_next, dr->dr_prev);
2171 dr = dr->dr_next;
2172 }
2173 fprintf(stderr, "\txid: %u status: %d time: %ld",
2174 dr->dr_xid, dr->dr_status, dr->dr_time);
2175 fprintf(stderr, " dr: %x next: %x prev: %x\n",
2176 dr, dr->dr_next, dr->dr_prev);
2177 }
2178 }
2179 #endif /* DUP_DEBUG */
2180