xref: /freebsd/sys/rpc/svc_vc.c (revision c5405d1c850765d04f74067ebb71f57e9a26b8ea)
1 /*	$NetBSD: svc_vc.c,v 1.7 2000/08/03 00:01:53 fvdl Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-3-Clause
5  *
6  * Copyright (c) 2009, Sun Microsystems, Inc.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  * - Redistributions of source code must retain the above copyright notice,
12  *   this list of conditions and the following disclaimer.
13  * - Redistributions in binary form must reproduce the above copyright notice,
14  *   this list of conditions and the following disclaimer in the documentation
15  *   and/or other materials provided with the distribution.
16  * - Neither the name of Sun Microsystems, Inc. nor the names of its
17  *   contributors may be used to endorse or promote products derived
18  *   from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 /*
35  * svc_vc.c, Server side for Connection Oriented based RPC.
36  *
37  * Actually implements two flavors of transporter -
38  * a tcp rendezvouser (a listner and connection establisher)
39  * and a record/tcp stream.
40  */
41 
42 #include "opt_kern_tls.h"
43 
44 #include <sys/param.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/kernel.h>
48 #include <sys/ktls.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/protosw.h>
54 #include <sys/queue.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/sx.h>
58 #include <sys/systm.h>
59 #include <sys/uio.h>
60 
61 #include <net/vnet.h>
62 
63 #include <netinet/tcp.h>
64 
65 #include <rpc/rpc.h>
66 #include <rpc/rpcsec_tls.h>
67 
68 #include <rpc/krpc.h>
69 #include <rpc/rpc_com.h>
70 
71 #include <security/mac/mac_framework.h>
72 
73 SYSCTL_NODE(_kern, OID_AUTO, rpc, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
74     "RPC");
75 SYSCTL_NODE(_kern_rpc, OID_AUTO, tls, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
76     "TLS");
77 SYSCTL_NODE(_kern_rpc, OID_AUTO, unenc, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
78     "unencrypted");
79 
80 KRPC_VNET_DEFINE_STATIC(uint64_t, svc_vc_rx_msgbytes) = 0;
81 SYSCTL_U64(_kern_rpc_unenc, OID_AUTO, rx_msgbytes, CTLFLAG_KRPC_VNET | CTLFLAG_RW,
82     &KRPC_VNET_NAME(svc_vc_rx_msgbytes), 0, "Count of non-TLS rx bytes");
83 
84 KRPC_VNET_DEFINE_STATIC(uint64_t, svc_vc_rx_msgcnt) = 0;
85 SYSCTL_U64(_kern_rpc_unenc, OID_AUTO, rx_msgcnt, CTLFLAG_KRPC_VNET | CTLFLAG_RW,
86     &KRPC_VNET_NAME(svc_vc_rx_msgcnt), 0, "Count of non-TLS rx messages");
87 
88 KRPC_VNET_DEFINE_STATIC(uint64_t, svc_vc_tx_msgbytes) = 0;
89 SYSCTL_U64(_kern_rpc_unenc, OID_AUTO, tx_msgbytes, CTLFLAG_KRPC_VNET | CTLFLAG_RW,
90     &KRPC_VNET_NAME(svc_vc_tx_msgbytes), 0, "Count of non-TLS tx bytes");
91 
92 KRPC_VNET_DEFINE_STATIC(uint64_t, svc_vc_tx_msgcnt) = 0;
93 SYSCTL_U64(_kern_rpc_unenc, OID_AUTO, tx_msgcnt, CTLFLAG_KRPC_VNET | CTLFLAG_RW,
94     &KRPC_VNET_NAME(svc_vc_tx_msgcnt), 0, "Count of non-TLS tx messages");
95 
96 KRPC_VNET_DEFINE_STATIC(uint64_t, svc_vc_tls_alerts) = 0;
97 SYSCTL_U64(_kern_rpc_tls, OID_AUTO, alerts,
98     CTLFLAG_KRPC_VNET | CTLFLAG_RW, &KRPC_VNET_NAME(svc_vc_tls_alerts), 0,
99     "Count of TLS alert messages");
100 
101 KRPC_VNET_DEFINE(uint64_t, svc_vc_tls_handshake_failed) = 0;
102 SYSCTL_U64(_kern_rpc_tls, OID_AUTO, handshake_failed,
103     CTLFLAG_KRPC_VNET | CTLFLAG_RW,
104     &KRPC_VNET_NAME(svc_vc_tls_handshake_failed), 0,
105     "Count of TLS failed handshakes");
106 
107 KRPC_VNET_DEFINE(uint64_t, svc_vc_tls_handshake_success) = 0;
108 SYSCTL_U64(_kern_rpc_tls, OID_AUTO, handshake_success,
109     CTLFLAG_KRPC_VNET | CTLFLAG_RW,
110     &KRPC_VNET_NAME(svc_vc_tls_handshake_success), 0,
111     "Count of TLS successful handshakes");
112 
113 KRPC_VNET_DEFINE_STATIC(uint64_t, svc_vc_tls_rx_msgbytes) = 0;
114 SYSCTL_U64(_kern_rpc_tls, OID_AUTO, rx_msgbytes,
115     CTLFLAG_KRPC_VNET | CTLFLAG_RW, &KRPC_VNET_NAME(svc_vc_tls_rx_msgbytes), 0,
116     "Count of TLS rx bytes");
117 
118 KRPC_VNET_DEFINE_STATIC(uint64_t, svc_vc_tls_rx_msgcnt) = 0;
119 SYSCTL_U64(_kern_rpc_tls, OID_AUTO, rx_msgcnt,
120     CTLFLAG_KRPC_VNET | CTLFLAG_RW, &KRPC_VNET_NAME(svc_vc_tls_rx_msgcnt), 0,
121     "Count of TLS rx messages");
122 
123 KRPC_VNET_DEFINE_STATIC(uint64_t, svc_vc_tls_tx_msgbytes) = 0;
124 SYSCTL_U64(_kern_rpc_tls, OID_AUTO, tx_msgbytes,
125     CTLFLAG_KRPC_VNET | CTLFLAG_RW, &KRPC_VNET_NAME(svc_vc_tls_tx_msgbytes), 0,
126     "Count of TLS tx bytes");
127 
128 KRPC_VNET_DEFINE_STATIC(uint64_t, svc_vc_tls_tx_msgcnt) = 0;
129 SYSCTL_U64(_kern_rpc_tls, OID_AUTO, tx_msgcnt,
130     CTLFLAG_KRPC_VNET | CTLFLAG_RW, &KRPC_VNET_NAME(svc_vc_tls_tx_msgcnt), 0,
131     "Count of TLS tx messages");
132 
133 static bool_t svc_vc_rendezvous_recv(SVCXPRT *, struct rpc_msg *,
134     struct sockaddr **, struct mbuf **);
135 static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *);
136 static void svc_vc_rendezvous_destroy(SVCXPRT *);
137 static bool_t svc_vc_null(void);
138 static void svc_vc_destroy(SVCXPRT *);
139 static enum xprt_stat svc_vc_stat(SVCXPRT *);
140 static bool_t svc_vc_ack(SVCXPRT *, uint32_t *);
141 static bool_t svc_vc_recv(SVCXPRT *, struct rpc_msg *,
142     struct sockaddr **, struct mbuf **);
143 static bool_t svc_vc_reply(SVCXPRT *, struct rpc_msg *,
144     struct sockaddr *, struct mbuf *, uint32_t *seq);
145 static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in);
146 static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq,
147     void *in);
148 static void svc_vc_backchannel_destroy(SVCXPRT *);
149 static enum xprt_stat svc_vc_backchannel_stat(SVCXPRT *);
150 static bool_t svc_vc_backchannel_recv(SVCXPRT *, struct rpc_msg *,
151     struct sockaddr **, struct mbuf **);
152 static bool_t svc_vc_backchannel_reply(SVCXPRT *, struct rpc_msg *,
153     struct sockaddr *, struct mbuf *, uint32_t *);
154 static bool_t svc_vc_backchannel_control(SVCXPRT *xprt, const u_int rq,
155     void *in);
156 static SVCXPRT *svc_vc_create_conn(SVCPOOL *pool, struct socket *so,
157     struct sockaddr *raddr);
158 static int svc_vc_accept(struct socket *head, struct socket **sop);
159 static int svc_vc_soupcall(struct socket *so, void *arg, int waitflag);
160 static int svc_vc_rendezvous_soupcall(struct socket *, void *, int);
161 
162 static const struct xp_ops svc_vc_rendezvous_ops = {
163 	.xp_recv =	svc_vc_rendezvous_recv,
164 	.xp_stat =	svc_vc_rendezvous_stat,
165 	.xp_reply =	(bool_t (*)(SVCXPRT *, struct rpc_msg *,
166 		struct sockaddr *, struct mbuf *, uint32_t *))svc_vc_null,
167 	.xp_destroy =	svc_vc_rendezvous_destroy,
168 	.xp_control =	svc_vc_rendezvous_control
169 };
170 
171 static const struct xp_ops svc_vc_ops = {
172 	.xp_recv =	svc_vc_recv,
173 	.xp_stat =	svc_vc_stat,
174 	.xp_ack =	svc_vc_ack,
175 	.xp_reply =	svc_vc_reply,
176 	.xp_destroy =	svc_vc_destroy,
177 	.xp_control =	svc_vc_control
178 };
179 
180 static const struct xp_ops svc_vc_backchannel_ops = {
181 	.xp_recv =	svc_vc_backchannel_recv,
182 	.xp_stat =	svc_vc_backchannel_stat,
183 	.xp_reply =	svc_vc_backchannel_reply,
184 	.xp_destroy =	svc_vc_backchannel_destroy,
185 	.xp_control =	svc_vc_backchannel_control
186 };
187 
188 /*
189  * Usage:
190  *	xprt = svc_vc_create(sock, send_buf_size, recv_buf_size);
191  *
192  * Creates, registers, and returns a (rpc) tcp based transporter.
193  * Once *xprt is initialized, it is registered as a transporter
194  * see (svc.h, xprt_register).  This routine returns
195  * a NULL if a problem occurred.
196  *
197  * The filedescriptor passed in is expected to refer to a bound, but
198  * not yet connected socket.
199  *
200  * Since streams do buffered io similar to stdio, the caller can specify
201  * how big the send and receive buffers are via the second and third parms;
202  * 0 => use the system default.
203  */
204 SVCXPRT *
205 svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
206     size_t recvsize)
207 {
208 	SVCXPRT *xprt;
209 	struct sockaddr* sa;
210 	int error;
211 
212 	SOCK_LOCK(so);
213 	if (so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED)) {
214 		SOCK_UNLOCK(so);
215 		CURVNET_SET(so->so_vnet);
216 		error = so->so_proto->pr_peeraddr(so, &sa);
217 		CURVNET_RESTORE();
218 		if (error)
219 			return (NULL);
220 		xprt = svc_vc_create_conn(pool, so, sa);
221 		free(sa, M_SONAME);
222 		return (xprt);
223 	}
224 	SOCK_UNLOCK(so);
225 
226 	xprt = svc_xprt_alloc();
227 	sx_init(&xprt->xp_lock, "xprt->xp_lock");
228 	xprt->xp_pool = pool;
229 	xprt->xp_socket = so;
230 	xprt->xp_p1 = NULL;
231 	xprt->xp_p2 = NULL;
232 	xprt->xp_ops = &svc_vc_rendezvous_ops;
233 
234 	CURVNET_SET(so->so_vnet);
235 	error = so->so_proto->pr_sockaddr(so, &sa);
236 	CURVNET_RESTORE();
237 	if (error) {
238 		goto cleanup_svc_vc_create;
239 	}
240 
241 	memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
242 	free(sa, M_SONAME);
243 
244 	xprt_register(xprt);
245 
246 	solisten(so, -1, curthread);
247 
248 	SOLISTEN_LOCK(so);
249 	xprt->xp_upcallset = 1;
250 	solisten_upcall_set(so, svc_vc_rendezvous_soupcall, xprt);
251 	SOLISTEN_UNLOCK(so);
252 
253 	return (xprt);
254 
255 cleanup_svc_vc_create:
256 	sx_destroy(&xprt->xp_lock);
257 	svc_xprt_free(xprt);
258 
259 	return (NULL);
260 }
261 
262 /*
263  * Create a new transport for a socket optained via soaccept().
264  */
265 SVCXPRT *
266 svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr)
267 {
268 	SVCXPRT *xprt;
269 	struct cf_conn *cd;
270 	struct sockaddr* sa = NULL;
271 	struct sockopt opt;
272 	int one = 1;
273 	int error;
274 
275 	bzero(&opt, sizeof(struct sockopt));
276 	opt.sopt_dir = SOPT_SET;
277 	opt.sopt_level = SOL_SOCKET;
278 	opt.sopt_name = SO_KEEPALIVE;
279 	opt.sopt_val = &one;
280 	opt.sopt_valsize = sizeof(one);
281 	error = sosetopt(so, &opt);
282 	if (error) {
283 		return (NULL);
284 	}
285 
286 	if (so->so_proto->pr_protocol == IPPROTO_TCP) {
287 		bzero(&opt, sizeof(struct sockopt));
288 		opt.sopt_dir = SOPT_SET;
289 		opt.sopt_level = IPPROTO_TCP;
290 		opt.sopt_name = TCP_NODELAY;
291 		opt.sopt_val = &one;
292 		opt.sopt_valsize = sizeof(one);
293 		error = sosetopt(so, &opt);
294 		if (error) {
295 			return (NULL);
296 		}
297 	}
298 
299 	cd = mem_alloc(sizeof(*cd));
300 	cd->strm_stat = XPRT_IDLE;
301 
302 	xprt = svc_xprt_alloc();
303 	sx_init(&xprt->xp_lock, "xprt->xp_lock");
304 	xprt->xp_pool = pool;
305 	xprt->xp_socket = so;
306 	xprt->xp_p1 = cd;
307 	xprt->xp_p2 = NULL;
308 	xprt->xp_ops = &svc_vc_ops;
309 
310 	/*
311 	 * See http://www.connectathon.org/talks96/nfstcp.pdf - client
312 	 * has a 5 minute timer, server has a 6 minute timer.
313 	 */
314 	xprt->xp_idletimeout = 6 * 60;
315 
316 	memcpy(&xprt->xp_rtaddr, raddr, raddr->sa_len);
317 
318 	CURVNET_SET(so->so_vnet);
319 	error = so->so_proto->pr_sockaddr(so, &sa);
320 	CURVNET_RESTORE();
321 	if (error)
322 		goto cleanup_svc_vc_create;
323 
324 	memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
325 	free(sa, M_SONAME);
326 
327 	xprt_register(xprt);
328 
329 	SOCKBUF_LOCK(&so->so_rcv);
330 	xprt->xp_upcallset = 1;
331 	soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt);
332 	SOCKBUF_UNLOCK(&so->so_rcv);
333 
334 	/*
335 	 * Throw the transport into the active list in case it already
336 	 * has some data buffered.
337 	 */
338 	sx_xlock(&xprt->xp_lock);
339 	xprt_active(xprt);
340 	sx_xunlock(&xprt->xp_lock);
341 
342 	return (xprt);
343 cleanup_svc_vc_create:
344 	sx_destroy(&xprt->xp_lock);
345 	svc_xprt_free(xprt);
346 	mem_free(cd, sizeof(*cd));
347 
348 	return (NULL);
349 }
350 
351 /*
352  * Create a new transport for a backchannel on a clnt_vc socket.
353  */
354 SVCXPRT *
355 svc_vc_create_backchannel(SVCPOOL *pool)
356 {
357 	SVCXPRT *xprt = NULL;
358 	struct cf_conn *cd = NULL;
359 
360 	cd = mem_alloc(sizeof(*cd));
361 	cd->strm_stat = XPRT_IDLE;
362 
363 	xprt = svc_xprt_alloc();
364 	sx_init(&xprt->xp_lock, "xprt->xp_lock");
365 	xprt->xp_pool = pool;
366 	xprt->xp_socket = NULL;
367 	xprt->xp_p1 = cd;
368 	xprt->xp_p2 = NULL;
369 	xprt->xp_ops = &svc_vc_backchannel_ops;
370 	return (xprt);
371 }
372 
373 /*
374  * This does all of the accept except the final call to soaccept. The
375  * caller will call soaccept after dropping its locks (soaccept may
376  * call malloc).
377  */
378 int
379 svc_vc_accept(struct socket *head, struct socket **sop)
380 {
381 	struct socket *so;
382 	int error = 0;
383 	short nbio;
384 
385 	KASSERT(SOLISTENING(head),
386 	    ("%s: socket %p is not listening", __func__, head));
387 
388 #ifdef MAC
389 	error = mac_socket_check_accept(curthread->td_ucred, head);
390 	if (error != 0)
391 		goto done;
392 #endif
393 	/*
394 	 * XXXGL: we want non-blocking semantics.  The socket could be a
395 	 * socket created by kernel as well as socket shared with userland,
396 	 * so we can't be sure about presense of SS_NBIO.  We also shall not
397 	 * toggle it on the socket, since that may surprise userland.  So we
398 	 * set SS_NBIO only temporarily.
399 	 */
400 	SOLISTEN_LOCK(head);
401 	nbio = head->so_state & SS_NBIO;
402 	head->so_state |= SS_NBIO;
403 	error = solisten_dequeue(head, &so, 0);
404 	head->so_state &= (nbio & ~SS_NBIO);
405 	if (error)
406 		goto done;
407 
408 	so->so_state |= nbio;
409 	*sop = so;
410 
411 	/* connection has been removed from the listen queue */
412 	KNOTE_UNLOCKED(&head->so_rdsel.si_note, 0);
413 done:
414 	return (error);
415 }
416 
417 /*ARGSUSED*/
418 static bool_t
419 svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg,
420     struct sockaddr **addrp, struct mbuf **mp)
421 {
422 	struct socket *so = NULL;
423 	struct sockaddr *sa = NULL;
424 	int error;
425 	SVCXPRT *new_xprt;
426 
427 	/*
428 	 * The socket upcall calls xprt_active() which will eventually
429 	 * cause the server to call us here. We attempt to accept a
430 	 * connection from the socket and turn it into a new
431 	 * transport. If the accept fails, we have drained all pending
432 	 * connections so we call xprt_inactive().
433 	 */
434 	sx_xlock(&xprt->xp_lock);
435 
436 	error = svc_vc_accept(xprt->xp_socket, &so);
437 
438 	if (error == EWOULDBLOCK) {
439 		/*
440 		 * We must re-test for new connections after taking
441 		 * the lock to protect us in the case where a new
442 		 * connection arrives after our call to accept fails
443 		 * with EWOULDBLOCK.
444 		 */
445 		SOLISTEN_LOCK(xprt->xp_socket);
446 		if (TAILQ_EMPTY(&xprt->xp_socket->sol_comp))
447 			xprt_inactive_self(xprt);
448 		SOLISTEN_UNLOCK(xprt->xp_socket);
449 		sx_xunlock(&xprt->xp_lock);
450 		return (FALSE);
451 	}
452 
453 	if (error) {
454 		SOLISTEN_LOCK(xprt->xp_socket);
455 		if (xprt->xp_upcallset) {
456 			xprt->xp_upcallset = 0;
457 			soupcall_clear(xprt->xp_socket, SO_RCV);
458 		}
459 		SOLISTEN_UNLOCK(xprt->xp_socket);
460 		xprt_inactive_self(xprt);
461 		sx_xunlock(&xprt->xp_lock);
462 		return (FALSE);
463 	}
464 
465 	sx_xunlock(&xprt->xp_lock);
466 
467 	sa = NULL;
468 	error = soaccept(so, &sa);
469 
470 	if (error) {
471 		/*
472 		 * XXX not sure if I need to call sofree or soclose here.
473 		 */
474 		if (sa)
475 			free(sa, M_SONAME);
476 		return (FALSE);
477 	}
478 
479 	/*
480 	 * svc_vc_create_conn will call xprt_register - we don't need
481 	 * to do anything with the new connection except derefence it.
482 	 */
483 	new_xprt = svc_vc_create_conn(xprt->xp_pool, so, sa);
484 	if (!new_xprt) {
485 		soclose(so);
486 	} else {
487 		SVC_RELEASE(new_xprt);
488 	}
489 
490 	free(sa, M_SONAME);
491 
492 	return (FALSE); /* there is never an rpc msg to be processed */
493 }
494 
495 /*ARGSUSED*/
496 static enum xprt_stat
497 svc_vc_rendezvous_stat(SVCXPRT *xprt)
498 {
499 
500 	return (XPRT_IDLE);
501 }
502 
503 static void
504 svc_vc_destroy_common(SVCXPRT *xprt)
505 {
506 	uint32_t reterr;
507 
508 	if (xprt->xp_socket) {
509 		if ((xprt->xp_tls & (RPCTLS_FLAGS_HANDSHAKE |
510 		    RPCTLS_FLAGS_HANDSHFAIL)) != 0) {
511 			if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0) {
512 				/*
513 				 * If the upcall fails, the socket has
514 				 * probably been closed via the rpctlssd
515 				 * daemon having crashed or been
516 				 * restarted, so just ignore returned stat.
517 				 */
518 				rpctls_srv_disconnect(xprt->xp_sslsec,
519 				    xprt->xp_sslusec, xprt->xp_sslrefno,
520 				    xprt->xp_sslproc, &reterr);
521 			}
522 			/* Must sorele() to get rid of reference. */
523 			CURVNET_SET(xprt->xp_socket->so_vnet);
524 			sorele(xprt->xp_socket);
525 			CURVNET_RESTORE();
526 		} else
527 			(void)soclose(xprt->xp_socket);
528 	}
529 
530 	if (xprt->xp_netid)
531 		(void) mem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1);
532 	svc_xprt_free(xprt);
533 }
534 
535 static void
536 svc_vc_rendezvous_destroy(SVCXPRT *xprt)
537 {
538 
539 	SOLISTEN_LOCK(xprt->xp_socket);
540 	if (xprt->xp_upcallset) {
541 		xprt->xp_upcallset = 0;
542 		solisten_upcall_set(xprt->xp_socket, NULL, NULL);
543 	}
544 	SOLISTEN_UNLOCK(xprt->xp_socket);
545 
546 	svc_vc_destroy_common(xprt);
547 }
548 
549 static void
550 svc_vc_destroy(SVCXPRT *xprt)
551 {
552 	struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1;
553 	CLIENT *cl = (CLIENT *)xprt->xp_p2;
554 
555 	SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
556 	if (xprt->xp_upcallset) {
557 		xprt->xp_upcallset = 0;
558 		if (xprt->xp_socket->so_rcv.sb_upcall != NULL)
559 			soupcall_clear(xprt->xp_socket, SO_RCV);
560 	}
561 	SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
562 
563 	if (cl != NULL)
564 		CLNT_RELEASE(cl);
565 
566 	svc_vc_destroy_common(xprt);
567 
568 	if (cd->mreq)
569 		m_freem(cd->mreq);
570 	if (cd->mpending)
571 		m_freem(cd->mpending);
572 	mem_free(cd, sizeof(*cd));
573 }
574 
575 static void
576 svc_vc_backchannel_destroy(SVCXPRT *xprt)
577 {
578 	struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1;
579 	struct mbuf *m, *m2;
580 
581 	svc_xprt_free(xprt);
582 	m = cd->mreq;
583 	while (m != NULL) {
584 		m2 = m;
585 		m = m->m_nextpkt;
586 		m_freem(m2);
587 	}
588 	mem_free(cd, sizeof(*cd));
589 }
590 
591 /*ARGSUSED*/
592 static bool_t
593 svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in)
594 {
595 	return (FALSE);
596 }
597 
598 static bool_t
599 svc_vc_rendezvous_control(SVCXPRT *xprt, const u_int rq, void *in)
600 {
601 
602 	return (FALSE);
603 }
604 
605 static bool_t
606 svc_vc_backchannel_control(SVCXPRT *xprt, const u_int rq, void *in)
607 {
608 
609 	return (FALSE);
610 }
611 
612 static enum xprt_stat
613 svc_vc_stat(SVCXPRT *xprt)
614 {
615 	struct cf_conn *cd;
616 
617 	cd = (struct cf_conn *)(xprt->xp_p1);
618 
619 	if (cd->strm_stat == XPRT_DIED)
620 		return (XPRT_DIED);
621 
622 	if (cd->mreq != NULL && cd->resid == 0 && cd->eor)
623 		return (XPRT_MOREREQS);
624 
625 	if (soreadable(xprt->xp_socket))
626 		return (XPRT_MOREREQS);
627 
628 	return (XPRT_IDLE);
629 }
630 
631 static bool_t
632 svc_vc_ack(SVCXPRT *xprt, uint32_t *ack)
633 {
634 
635 	*ack = atomic_load_acq_32(&xprt->xp_snt_cnt);
636 	*ack -= sbused(&xprt->xp_socket->so_snd);
637 	return (TRUE);
638 }
639 
640 static enum xprt_stat
641 svc_vc_backchannel_stat(SVCXPRT *xprt)
642 {
643 	struct cf_conn *cd;
644 
645 	cd = (struct cf_conn *)(xprt->xp_p1);
646 
647 	if (cd->mreq != NULL)
648 		return (XPRT_MOREREQS);
649 
650 	return (XPRT_IDLE);
651 }
652 
653 /*
654  * If we have an mbuf chain in cd->mpending, try to parse a record from it,
655  * leaving the result in cd->mreq. If we don't have a complete record, leave
656  * the partial result in cd->mreq and try to read more from the socket.
657  */
658 static int
659 svc_vc_process_pending(SVCXPRT *xprt)
660 {
661 	struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
662 	struct socket *so = xprt->xp_socket;
663 	struct mbuf *m;
664 
665 	/*
666 	 * If cd->resid is non-zero, we have part of the
667 	 * record already, otherwise we are expecting a record
668 	 * marker.
669 	 */
670 	if (!cd->resid && cd->mpending) {
671 		/*
672 		 * See if there is enough data buffered to
673 		 * make up a record marker. Make sure we can
674 		 * handle the case where the record marker is
675 		 * split across more than one mbuf.
676 		 */
677 		size_t n = 0;
678 		uint32_t header;
679 
680 		m = cd->mpending;
681 		while (n < sizeof(uint32_t) && m) {
682 			n += m->m_len;
683 			m = m->m_next;
684 		}
685 		if (n < sizeof(uint32_t)) {
686 			so->so_rcv.sb_lowat = sizeof(uint32_t) - n;
687 			return (FALSE);
688 		}
689 		m_copydata(cd->mpending, 0, sizeof(header),
690 		    (char *)&header);
691 		header = ntohl(header);
692 		cd->eor = (header & 0x80000000) != 0;
693 		cd->resid = header & 0x7fffffff;
694 		m_adj(cd->mpending, sizeof(uint32_t));
695 	}
696 
697 	/*
698 	 * Start pulling off mbufs from cd->mpending
699 	 * until we either have a complete record or
700 	 * we run out of data. We use m_split to pull
701 	 * data - it will pull as much as possible and
702 	 * split the last mbuf if necessary.
703 	 */
704 	while (cd->mpending && cd->resid) {
705 		m = cd->mpending;
706 		if (cd->mpending->m_next
707 		    || cd->mpending->m_len > cd->resid)
708 			cd->mpending = m_split(cd->mpending,
709 			    cd->resid, M_WAITOK);
710 		else
711 			cd->mpending = NULL;
712 		if (cd->mreq)
713 			m_last(cd->mreq)->m_next = m;
714 		else
715 			cd->mreq = m;
716 		while (m) {
717 			cd->resid -= m->m_len;
718 			m = m->m_next;
719 		}
720 	}
721 
722 	/*
723 	 * Block receive upcalls if we have more data pending,
724 	 * otherwise report our need.
725 	 */
726 	if (cd->mpending)
727 		so->so_rcv.sb_lowat = INT_MAX;
728 	else
729 		so->so_rcv.sb_lowat =
730 		    imax(1, imin(cd->resid, so->so_rcv.sb_hiwat / 2));
731 	return (TRUE);
732 }
733 
734 static bool_t
735 svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg,
736     struct sockaddr **addrp, struct mbuf **mp)
737 {
738 	struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
739 	struct uio uio;
740 	struct mbuf *m, *ctrl;
741 	struct socket* so = xprt->xp_socket;
742 	XDR xdrs;
743 	int error, rcvflag;
744 	uint32_t reterr, xid_plus_direction[2];
745 	struct cmsghdr *cmsg;
746 	struct tls_get_record tgr;
747 	enum clnt_stat ret;
748 
749 	/*
750 	 * Serialise access to the socket and our own record parsing
751 	 * state.
752 	 */
753 	sx_xlock(&xprt->xp_lock);
754 
755 	for (;;) {
756 		/* If we have no request ready, check pending queue. */
757 		while (cd->mpending &&
758 		    (cd->mreq == NULL || cd->resid != 0 || !cd->eor)) {
759 			if (!svc_vc_process_pending(xprt))
760 				break;
761 		}
762 
763 		/* Process and return complete request in cd->mreq. */
764 		if (cd->mreq != NULL && cd->resid == 0 && cd->eor) {
765 
766 			/*
767 			 * Now, check for a backchannel reply.
768 			 * The XID is in the first uint32_t of the reply
769 			 * and the message direction is the second one.
770 			 */
771 			if ((cd->mreq->m_len >= sizeof(xid_plus_direction) ||
772 			    m_length(cd->mreq, NULL) >=
773 			    sizeof(xid_plus_direction)) &&
774 			    xprt->xp_p2 != NULL) {
775 				m_copydata(cd->mreq, 0,
776 				    sizeof(xid_plus_direction),
777 				    (char *)xid_plus_direction);
778 				xid_plus_direction[0] =
779 				    ntohl(xid_plus_direction[0]);
780 				xid_plus_direction[1] =
781 				    ntohl(xid_plus_direction[1]);
782 				/* Check message direction. */
783 				if (xid_plus_direction[1] == REPLY) {
784 					clnt_bck_svccall(xprt->xp_p2,
785 					    cd->mreq,
786 					    xid_plus_direction[0]);
787 					cd->mreq = NULL;
788 					continue;
789 				}
790 			}
791 
792 			xdrmbuf_create(&xdrs, cd->mreq, XDR_DECODE);
793 			cd->mreq = NULL;
794 
795 			/* Check for next request in a pending queue. */
796 			svc_vc_process_pending(xprt);
797 			if (cd->mreq == NULL || cd->resid != 0) {
798 				SOCKBUF_LOCK(&so->so_rcv);
799 				if (!soreadable(so))
800 					xprt_inactive_self(xprt);
801 				SOCKBUF_UNLOCK(&so->so_rcv);
802 			}
803 
804 			sx_xunlock(&xprt->xp_lock);
805 
806 			if (! xdr_callmsg(&xdrs, msg)) {
807 				XDR_DESTROY(&xdrs);
808 				return (FALSE);
809 			}
810 
811 			*addrp = NULL;
812 			*mp = xdrmbuf_getall(&xdrs);
813 			XDR_DESTROY(&xdrs);
814 
815 			return (TRUE);
816 		}
817 
818 		/*
819 		 * If receiving is disabled so that a TLS handshake can be
820 		 * done by the rpctlssd daemon, return FALSE here.
821 		 */
822 		rcvflag = MSG_DONTWAIT;
823 		if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0)
824 			rcvflag |= MSG_TLSAPPDATA;
825 tryagain:
826 		if (xprt->xp_dontrcv) {
827 			sx_xunlock(&xprt->xp_lock);
828 			return (FALSE);
829 		}
830 
831 		/*
832 		 * The socket upcall calls xprt_active() which will eventually
833 		 * cause the server to call us here. We attempt to
834 		 * read as much as possible from the socket and put
835 		 * the result in cd->mpending. If the read fails,
836 		 * we have drained both cd->mpending and the socket so
837 		 * we can call xprt_inactive().
838 		 */
839 		uio.uio_resid = 1000000000;
840 		uio.uio_td = curthread;
841 		ctrl = m = NULL;
842 		error = soreceive(so, NULL, &uio, &m, &ctrl, &rcvflag);
843 
844 		if (error == EWOULDBLOCK) {
845 			/*
846 			 * We must re-test for readability after
847 			 * taking the lock to protect us in the case
848 			 * where a new packet arrives on the socket
849 			 * after our call to soreceive fails with
850 			 * EWOULDBLOCK.
851 			 */
852 			SOCKBUF_LOCK(&so->so_rcv);
853 			if (!soreadable(so))
854 				xprt_inactive_self(xprt);
855 			SOCKBUF_UNLOCK(&so->so_rcv);
856 			sx_xunlock(&xprt->xp_lock);
857 			return (FALSE);
858 		}
859 
860 		/*
861 		 * A return of ENXIO indicates that there is an
862 		 * alert record at the head of the
863 		 * socket's receive queue, for TLS connections.
864 		 * This record needs to be handled in userland
865 		 * via an SSL_read() call, so do an upcall to the daemon.
866 		 */
867 		KRPC_CURVNET_SET(so->so_vnet);
868 		if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0 &&
869 		    error == ENXIO) {
870 			KRPC_VNET(svc_vc_tls_alerts)++;
871 			KRPC_CURVNET_RESTORE();
872 			/* Disable reception. */
873 			xprt->xp_dontrcv = TRUE;
874 			sx_xunlock(&xprt->xp_lock);
875 			ret = rpctls_srv_handlerecord(xprt->xp_sslsec,
876 			    xprt->xp_sslusec, xprt->xp_sslrefno,
877 			    xprt->xp_sslproc, &reterr);
878 			sx_xlock(&xprt->xp_lock);
879 			xprt->xp_dontrcv = FALSE;
880 			if (ret != RPC_SUCCESS || reterr != RPCTLSERR_OK) {
881 				/*
882 				 * All we can do is soreceive() it and
883 				 * then toss it.
884 				 */
885 				rcvflag = MSG_DONTWAIT;
886 				goto tryagain;
887 			}
888 			sx_xunlock(&xprt->xp_lock);
889 			xprt_active(xprt);   /* Harmless if already active. */
890 			return (FALSE);
891 		}
892 
893 		if (error) {
894 			KRPC_CURVNET_RESTORE();
895 			SOCKBUF_LOCK(&so->so_rcv);
896 			if (xprt->xp_upcallset) {
897 				xprt->xp_upcallset = 0;
898 				soupcall_clear(so, SO_RCV);
899 			}
900 			SOCKBUF_UNLOCK(&so->so_rcv);
901 			xprt_inactive_self(xprt);
902 			cd->strm_stat = XPRT_DIED;
903 			sx_xunlock(&xprt->xp_lock);
904 			return (FALSE);
905 		}
906 
907 		if (!m) {
908 			KRPC_CURVNET_RESTORE();
909 			/*
910 			 * EOF - the other end has closed the socket.
911 			 */
912 			xprt_inactive_self(xprt);
913 			cd->strm_stat = XPRT_DIED;
914 			sx_xunlock(&xprt->xp_lock);
915 			return (FALSE);
916 		}
917 
918 		/* Process any record header(s). */
919 		if (ctrl != NULL) {
920 			cmsg = mtod(ctrl, struct cmsghdr *);
921 			if (cmsg->cmsg_type == TLS_GET_RECORD &&
922 			    cmsg->cmsg_len == CMSG_LEN(sizeof(tgr))) {
923 				memcpy(&tgr, CMSG_DATA(cmsg), sizeof(tgr));
924 				/*
925 				 * TLS_RLTYPE_ALERT records should be handled
926 				 * since soreceive() would have returned
927 				 * ENXIO.  Just throw any other
928 				 * non-TLS_RLTYPE_APP records away.
929 				 */
930 				if (tgr.tls_type != TLS_RLTYPE_APP) {
931 					m_freem(m);
932 					m_free(ctrl);
933 					rcvflag = MSG_DONTWAIT | MSG_TLSAPPDATA;
934 					KRPC_CURVNET_RESTORE();
935 					goto tryagain;
936 				}
937 				KRPC_VNET(svc_vc_tls_rx_msgcnt)++;
938 				KRPC_VNET(svc_vc_tls_rx_msgbytes) +=
939 				    1000000000 - uio.uio_resid;
940 			}
941 			m_free(ctrl);
942 		} else {
943 			KRPC_VNET(svc_vc_rx_msgcnt)++;
944 			KRPC_VNET(svc_vc_rx_msgbytes) += 1000000000 -
945 			    uio.uio_resid;
946 		}
947 		KRPC_CURVNET_RESTORE();
948 
949 		if (cd->mpending)
950 			m_last(cd->mpending)->m_next = m;
951 		else
952 			cd->mpending = m;
953 	}
954 }
955 
956 static bool_t
957 svc_vc_backchannel_recv(SVCXPRT *xprt, struct rpc_msg *msg,
958     struct sockaddr **addrp, struct mbuf **mp)
959 {
960 	struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
961 	struct ct_data *ct;
962 	struct mbuf *m;
963 	XDR xdrs;
964 
965 	sx_xlock(&xprt->xp_lock);
966 	ct = (struct ct_data *)xprt->xp_p2;
967 	if (ct == NULL) {
968 		sx_xunlock(&xprt->xp_lock);
969 		return (FALSE);
970 	}
971 	mtx_lock(&ct->ct_lock);
972 	m = cd->mreq;
973 	if (m == NULL) {
974 		xprt_inactive_self(xprt);
975 		mtx_unlock(&ct->ct_lock);
976 		sx_xunlock(&xprt->xp_lock);
977 		return (FALSE);
978 	}
979 	cd->mreq = m->m_nextpkt;
980 	mtx_unlock(&ct->ct_lock);
981 	sx_xunlock(&xprt->xp_lock);
982 
983 	xdrmbuf_create(&xdrs, m, XDR_DECODE);
984 	if (! xdr_callmsg(&xdrs, msg)) {
985 		XDR_DESTROY(&xdrs);
986 		return (FALSE);
987 	}
988 	*addrp = NULL;
989 	*mp = xdrmbuf_getall(&xdrs);
990 	XDR_DESTROY(&xdrs);
991 	return (TRUE);
992 }
993 
994 static bool_t
995 svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg,
996     struct sockaddr *addr, struct mbuf *m, uint32_t *seq)
997 {
998 	XDR xdrs;
999 	struct mbuf *mrep;
1000 	bool_t stat = TRUE;
1001 	int error, len, maxextsiz;
1002 #ifdef KERN_TLS
1003 	u_int maxlen;
1004 #endif
1005 
1006 	/*
1007 	 * Leave space for record mark.
1008 	 */
1009 	mrep = m_gethdr(M_WAITOK, MT_DATA);
1010 	mrep->m_data += sizeof(uint32_t);
1011 
1012 	xdrmbuf_create(&xdrs, mrep, XDR_ENCODE);
1013 
1014 	if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
1015 	    msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
1016 		if (!xdr_replymsg(&xdrs, msg))
1017 			stat = FALSE;
1018 		else
1019 			xdrmbuf_append(&xdrs, m);
1020 	} else {
1021 		stat = xdr_replymsg(&xdrs, msg);
1022 	}
1023 
1024 	if (stat) {
1025 		m_fixhdr(mrep);
1026 
1027 		/*
1028 		 * Prepend a record marker containing the reply length.
1029 		 */
1030 		M_PREPEND(mrep, sizeof(uint32_t), M_WAITOK);
1031 		len = mrep->m_pkthdr.len;
1032 		*mtod(mrep, uint32_t *) =
1033 			htonl(0x80000000 | (len - sizeof(uint32_t)));
1034 
1035 		/* For RPC-over-TLS, copy mrep to a chain of ext_pgs. */
1036 		KRPC_CURVNET_SET(xprt->xp_socket->so_vnet);
1037 		if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0) {
1038 			/*
1039 			 * Copy the mbuf chain to a chain of
1040 			 * ext_pgs mbuf(s) as required by KERN_TLS.
1041 			 */
1042 			maxextsiz = TLS_MAX_MSG_SIZE_V10_2;
1043 #ifdef KERN_TLS
1044 			if (rpctls_getinfo(&maxlen, false, false))
1045 				maxextsiz = min(maxextsiz, maxlen);
1046 #endif
1047 			mrep = _rpc_copym_into_ext_pgs(mrep, maxextsiz);
1048 			KRPC_VNET(svc_vc_tls_tx_msgcnt)++;
1049 			KRPC_VNET(svc_vc_tls_tx_msgbytes) += len;
1050 		} else {
1051 			KRPC_VNET(svc_vc_tx_msgcnt)++;
1052 			KRPC_VNET(svc_vc_tx_msgbytes) += len;
1053 		}
1054 		KRPC_CURVNET_RESTORE();
1055 		atomic_add_32(&xprt->xp_snd_cnt, len);
1056 		/*
1057 		 * sosend consumes mreq.
1058 		 */
1059 		error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL,
1060 		    0, curthread);
1061 		if (!error) {
1062 			atomic_add_rel_32(&xprt->xp_snt_cnt, len);
1063 			if (seq)
1064 				*seq = xprt->xp_snd_cnt;
1065 			stat = TRUE;
1066 		} else
1067 			atomic_subtract_32(&xprt->xp_snd_cnt, len);
1068 	} else {
1069 		m_freem(mrep);
1070 	}
1071 
1072 	XDR_DESTROY(&xdrs);
1073 
1074 	return (stat);
1075 }
1076 
1077 static bool_t
1078 svc_vc_backchannel_reply(SVCXPRT *xprt, struct rpc_msg *msg,
1079     struct sockaddr *addr, struct mbuf *m, uint32_t *seq)
1080 {
1081 	struct ct_data *ct;
1082 	XDR xdrs;
1083 	struct mbuf *mrep;
1084 	bool_t stat = TRUE;
1085 	int error, maxextsiz;
1086 #ifdef KERN_TLS
1087 	u_int maxlen;
1088 #endif
1089 
1090 	/*
1091 	 * Leave space for record mark.
1092 	 */
1093 	mrep = m_gethdr(M_WAITOK, MT_DATA);
1094 	mrep->m_data += sizeof(uint32_t);
1095 
1096 	xdrmbuf_create(&xdrs, mrep, XDR_ENCODE);
1097 
1098 	if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
1099 	    msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
1100 		if (!xdr_replymsg(&xdrs, msg))
1101 			stat = FALSE;
1102 		else
1103 			xdrmbuf_append(&xdrs, m);
1104 	} else {
1105 		stat = xdr_replymsg(&xdrs, msg);
1106 	}
1107 
1108 	if (stat) {
1109 		m_fixhdr(mrep);
1110 
1111 		/*
1112 		 * Prepend a record marker containing the reply length.
1113 		 */
1114 		M_PREPEND(mrep, sizeof(uint32_t), M_WAITOK);
1115 		*mtod(mrep, uint32_t *) =
1116 			htonl(0x80000000 | (mrep->m_pkthdr.len
1117 				- sizeof(uint32_t)));
1118 
1119 		/* For RPC-over-TLS, copy mrep to a chain of ext_pgs. */
1120 		if ((xprt->xp_tls & RPCTLS_FLAGS_HANDSHAKE) != 0) {
1121 			/*
1122 			 * Copy the mbuf chain to a chain of
1123 			 * ext_pgs mbuf(s) as required by KERN_TLS.
1124 			 */
1125 			maxextsiz = TLS_MAX_MSG_SIZE_V10_2;
1126 #ifdef KERN_TLS
1127 			if (rpctls_getinfo(&maxlen, false, false))
1128 				maxextsiz = min(maxextsiz, maxlen);
1129 #endif
1130 			mrep = _rpc_copym_into_ext_pgs(mrep, maxextsiz);
1131 		}
1132 		sx_xlock(&xprt->xp_lock);
1133 		ct = (struct ct_data *)xprt->xp_p2;
1134 		if (ct != NULL)
1135 			error = sosend(ct->ct_socket, NULL, NULL, mrep, NULL,
1136 			    0, curthread);
1137 		else
1138 			error = EPIPE;
1139 		sx_xunlock(&xprt->xp_lock);
1140 		if (!error) {
1141 			stat = TRUE;
1142 		}
1143 	} else {
1144 		m_freem(mrep);
1145 	}
1146 
1147 	XDR_DESTROY(&xdrs);
1148 
1149 	return (stat);
1150 }
1151 
1152 static bool_t
1153 svc_vc_null(void)
1154 {
1155 
1156 	return (FALSE);
1157 }
1158 
1159 static int
1160 svc_vc_soupcall(struct socket *so, void *arg, int waitflag)
1161 {
1162 	SVCXPRT *xprt = (SVCXPRT *) arg;
1163 
1164 	if (soreadable(xprt->xp_socket))
1165 		xprt_active(xprt);
1166 	return (SU_OK);
1167 }
1168 
1169 static int
1170 svc_vc_rendezvous_soupcall(struct socket *head, void *arg, int waitflag)
1171 {
1172 	SVCXPRT *xprt = (SVCXPRT *) arg;
1173 
1174 	if (!TAILQ_EMPTY(&head->sol_comp))
1175 		xprt_active(xprt);
1176 	return (SU_OK);
1177 }
1178 
1179 #if 0
1180 /*
1181  * Get the effective UID of the sending process. Used by rpcbind, keyserv
1182  * and rpc.yppasswdd on AF_LOCAL.
1183  */
1184 int
1185 __rpc_get_local_uid(SVCXPRT *transp, uid_t *uid) {
1186 	int sock, ret;
1187 	gid_t egid;
1188 	uid_t euid;
1189 	struct sockaddr *sa;
1190 
1191 	sock = transp->xp_fd;
1192 	sa = (struct sockaddr *)transp->xp_rtaddr;
1193 	if (sa->sa_family == AF_LOCAL) {
1194 		ret = getpeereid(sock, &euid, &egid);
1195 		if (ret == 0)
1196 			*uid = euid;
1197 		return (ret);
1198 	} else
1199 		return (-1);
1200 }
1201 #endif
1202