xref: /illumos-gate/usr/src/uts/common/fs/doorfs/door_sys.c (revision b35c6776bcf599e80d0bcf7e248313c3e5b4847a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * System call I/F to doors (outside of vnodes I/F) and misc support
31  * routines
32  */
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/door.h>
36 #include <sys/door_data.h>
37 #include <sys/proc.h>
38 #include <sys/thread.h>
39 #include <sys/class.h>
40 #include <sys/cred.h>
41 #include <sys/kmem.h>
42 #include <sys/cmn_err.h>
43 #include <sys/stack.h>
44 #include <sys/debug.h>
45 #include <sys/cpuvar.h>
46 #include <sys/file.h>
47 #include <sys/fcntl.h>
48 #include <sys/vnode.h>
49 #include <sys/vfs.h>
50 #include <sys/vfs_opreg.h>
51 #include <sys/sobject.h>
52 #include <sys/schedctl.h>
53 #include <sys/callb.h>
54 #include <sys/ucred.h>
55 
56 #include <sys/mman.h>
57 #include <sys/sysmacros.h>
58 #include <sys/vmsystm.h>
59 #include <vm/as.h>
60 #include <vm/hat.h>
61 #include <vm/page.h>
62 #include <vm/seg.h>
63 #include <vm/seg_vn.h>
64 #include <vm/seg_vn.h>
65 
66 #include <sys/modctl.h>
67 #include <sys/syscall.h>
68 #include <sys/pathname.h>
69 #include <sys/rctl.h>
70 
71 /*
72  * The maximum amount of data (in bytes) that will be transferred using
73  * an intermediate kernel buffer.  For sizes greater than this we map
74  * in the destination pages and perform a 1-copy transfer.
75  */
76 size_t	door_max_arg = 16 * 1024;
77 
78 /*
79  * Maximum amount of data that will be transferred in a reply to a
80  * door_upcall.  Need to guard against a process returning huge amounts
81  * of data and getting the kernel stuck in kmem_alloc.
82  */
83 size_t	door_max_upcall_reply = 1024 * 1024;
84 
85 /*
86  * Maximum number of descriptors allowed to be passed in a single
87  * door_call or door_return.  We need to allocate kernel memory
88  * for all of them at once, so we can't let it scale without limit.
89  */
90 uint_t door_max_desc = 1024;
91 
92 /*
93  * Definition of a door handle, used by other kernel subsystems when
94  * calling door functions.  This is really a file structure but we
95  * want to hide that fact.
96  */
97 struct __door_handle {
98 	file_t dh_file;
99 };
100 
101 #define	DHTOF(dh) ((file_t *)(dh))
102 #define	FTODH(fp) ((door_handle_t)(fp))
103 
104 static int doorfs(long, long, long, long, long, long);
105 
106 static struct sysent door_sysent = {
107 	6,
108 	SE_ARGC | SE_NOUNLOAD,
109 	(int (*)())doorfs,
110 };
111 
112 static struct modlsys modlsys = {
113 	&mod_syscallops, "doors", &door_sysent
114 };
115 
116 #ifdef _SYSCALL32_IMPL
117 
118 static int
119 doorfs32(int32_t arg1, int32_t arg2, int32_t arg3, int32_t arg4,
120     int32_t arg5, int32_t subcode);
121 
122 static struct sysent door_sysent32 = {
123 	6,
124 	SE_ARGC | SE_NOUNLOAD,
125 	(int (*)())doorfs32,
126 };
127 
128 static struct modlsys modlsys32 = {
129 	&mod_syscallops32,
130 	"32-bit door syscalls",
131 	&door_sysent32
132 };
133 #endif
134 
135 static struct modlinkage modlinkage = {
136 	MODREV_1,
137 	&modlsys,
138 #ifdef _SYSCALL32_IMPL
139 	&modlsys32,
140 #endif
141 	NULL
142 };
143 
144 dev_t	doordev;
145 
146 extern	struct vfs door_vfs;
147 extern	struct vnodeops *door_vnodeops;
148 
149 int
150 _init(void)
151 {
152 	static const fs_operation_def_t door_vfsops_template[] = {
153 		NULL, NULL
154 	};
155 	extern const fs_operation_def_t door_vnodeops_template[];
156 	vfsops_t *door_vfsops;
157 	major_t major;
158 	int error;
159 
160 	mutex_init(&door_knob, NULL, MUTEX_DEFAULT, NULL);
161 	if ((major = getudev()) == (major_t)-1)
162 		return (ENXIO);
163 	doordev = makedevice(major, 0);
164 
165 	/* Create a dummy vfs */
166 	error = vfs_makefsops(door_vfsops_template, &door_vfsops);
167 	if (error != 0) {
168 		cmn_err(CE_WARN, "door init: bad vfs ops");
169 		return (error);
170 	}
171 	VFS_INIT(&door_vfs, door_vfsops, NULL);
172 	door_vfs.vfs_flag = VFS_RDONLY;
173 	door_vfs.vfs_dev = doordev;
174 	vfs_make_fsid(&(door_vfs.vfs_fsid), doordev, 0);
175 
176 	error = vn_make_ops("doorfs", door_vnodeops_template, &door_vnodeops);
177 	if (error != 0) {
178 		vfs_freevfsops(door_vfsops);
179 		cmn_err(CE_WARN, "door init: bad vnode ops");
180 		return (error);
181 	}
182 	return (mod_install(&modlinkage));
183 }
184 
185 int
186 _info(struct modinfo *modinfop)
187 {
188 	return (mod_info(&modlinkage, modinfop));
189 }
190 
191 /* system call functions */
192 static int door_call(int, void *);
193 static int door_return(caddr_t, size_t, door_desc_t *, uint_t, caddr_t, size_t);
194 static int door_create(void (*pc_cookie)(void *, char *, size_t, door_desc_t *,
195     uint_t), void *data_cookie, uint_t);
196 static int door_revoke(int);
197 static int door_info(int, struct door_info *);
198 static int door_ucred(struct ucred_s *);
199 static int door_bind(int);
200 static int door_unbind(void);
201 static int door_unref(void);
202 static int door_getparam(int, int, size_t *);
203 static int door_setparam(int, int, size_t);
204 
205 #define	DOOR_RETURN_OLD	4		/* historic value, for s10 */
206 
207 /*
208  * System call wrapper for all door related system calls
209  */
210 static int
211 doorfs(long arg1, long arg2, long arg3, long arg4, long arg5, long subcode)
212 {
213 	switch (subcode) {
214 	case DOOR_CALL:
215 		return (door_call(arg1, (void *)arg2));
216 	case DOOR_RETURN: {
217 		door_return_desc_t *drdp = (door_return_desc_t *)arg3;
218 
219 		if (drdp != NULL) {
220 			door_return_desc_t drd;
221 			if (copyin(drdp, &drd, sizeof (drd)))
222 				return (EFAULT);
223 			return (door_return((caddr_t)arg1, arg2, drd.desc_ptr,
224 			    drd.desc_num, (caddr_t)arg4, arg5));
225 		}
226 		return (door_return((caddr_t)arg1, arg2, NULL,
227 		    0, (caddr_t)arg4, arg5));
228 	}
229 	case DOOR_RETURN_OLD:
230 		/*
231 		 * In order to support the S10 runtime environment, we
232 		 * still respond to the old syscall subcode for door_return.
233 		 * We treat it as having no stack limits.  This code should
234 		 * be removed when such support is no longer needed.
235 		 */
236 		return (door_return((caddr_t)arg1, arg2, (door_desc_t *)arg3,
237 		    arg4, (caddr_t)arg5, 0));
238 	case DOOR_CREATE:
239 		return (door_create((void (*)())arg1, (void *)arg2, arg3));
240 	case DOOR_REVOKE:
241 		return (door_revoke(arg1));
242 	case DOOR_INFO:
243 		return (door_info(arg1, (struct door_info *)arg2));
244 	case DOOR_BIND:
245 		return (door_bind(arg1));
246 	case DOOR_UNBIND:
247 		return (door_unbind());
248 	case DOOR_UNREFSYS:
249 		return (door_unref());
250 	case DOOR_UCRED:
251 		return (door_ucred((struct ucred_s *)arg1));
252 	case DOOR_GETPARAM:
253 		return (door_getparam(arg1, arg2, (size_t *)arg3));
254 	case DOOR_SETPARAM:
255 		return (door_setparam(arg1, arg2, arg3));
256 	default:
257 		return (set_errno(EINVAL));
258 	}
259 }
260 
261 #ifdef _SYSCALL32_IMPL
262 /*
263  * System call wrapper for all door related system calls from 32-bit programs.
264  * Needed at the moment because of the casts - they undo some damage
265  * that truss causes (sign-extending the stack pointer) when truss'ing
266  * a 32-bit program using doors.
267  */
268 static int
269 doorfs32(int32_t arg1, int32_t arg2, int32_t arg3,
270     int32_t arg4, int32_t arg5, int32_t subcode)
271 {
272 	switch (subcode) {
273 	case DOOR_CALL:
274 		return (door_call(arg1, (void *)(uintptr_t)(caddr32_t)arg2));
275 	case DOOR_RETURN: {
276 		door_return_desc32_t *drdp =
277 		    (door_return_desc32_t *)(uintptr_t)(caddr32_t)arg3;
278 		if (drdp != NULL) {
279 			door_return_desc32_t drd;
280 			if (copyin(drdp, &drd, sizeof (drd)))
281 				return (EFAULT);
282 			return (door_return(
283 			    (caddr_t)(uintptr_t)(caddr32_t)arg1, arg2,
284 			    (door_desc_t *)(uintptr_t)drd.desc_ptr,
285 			    drd.desc_num, (caddr_t)(uintptr_t)(caddr32_t)arg4,
286 			    (size_t)(uintptr_t)(size32_t)arg5));
287 		}
288 		return (door_return((caddr_t)(uintptr_t)(caddr32_t)arg1,
289 		    arg2, NULL, 0, (caddr_t)(uintptr_t)(caddr32_t)arg4,
290 		    (size_t)(uintptr_t)(size32_t)arg5));
291 	}
292 	case DOOR_RETURN_OLD:
293 		/*
294 		 * In order to support the S10 runtime environment, we
295 		 * still respond to the old syscall subcode for door_return.
296 		 * We treat it as having no stack limits.  This code should
297 		 * be removed when such support is no longer needed.
298 		 */
299 		return (door_return((caddr_t)(uintptr_t)(caddr32_t)arg1, arg2,
300 		    (door_desc_t *)(uintptr_t)(caddr32_t)arg3, arg4,
301 		    (caddr_t)(uintptr_t)(caddr32_t)arg5, 0));
302 	case DOOR_CREATE:
303 		return (door_create((void (*)())(uintptr_t)(caddr32_t)arg1,
304 		    (void *)(uintptr_t)(caddr32_t)arg2, arg3));
305 	case DOOR_REVOKE:
306 		return (door_revoke(arg1));
307 	case DOOR_INFO:
308 		return (door_info(arg1,
309 		    (struct door_info *)(uintptr_t)(caddr32_t)arg2));
310 	case DOOR_BIND:
311 		return (door_bind(arg1));
312 	case DOOR_UNBIND:
313 		return (door_unbind());
314 	case DOOR_UNREFSYS:
315 		return (door_unref());
316 	case DOOR_UCRED:
317 		return (door_ucred(
318 		    (struct ucred_s *)(uintptr_t)(caddr32_t)arg1));
319 	case DOOR_GETPARAM:
320 		return (door_getparam(arg1, arg2,
321 		    (size_t *)(uintptr_t)(caddr32_t)arg3));
322 	case DOOR_SETPARAM:
323 		return (door_setparam(arg1, arg2, (size_t)(size32_t)arg3));
324 
325 	default:
326 		return (set_errno(EINVAL));
327 	}
328 }
329 #endif
330 
331 void shuttle_resume(kthread_t *, kmutex_t *);
332 void shuttle_swtch(kmutex_t *);
333 void shuttle_sleep(kthread_t *);
334 
335 /*
336  * Support routines
337  */
338 static int door_create_common(void (*)(), void *, uint_t, int, int *,
339     file_t **);
340 static int door_overflow(kthread_t *, caddr_t, size_t, door_desc_t *, uint_t);
341 static int door_args(kthread_t *, int);
342 static int door_results(kthread_t *, caddr_t, size_t, door_desc_t *, uint_t);
343 static int door_copy(struct as *, caddr_t, caddr_t, uint_t);
344 static void	door_server_exit(proc_t *, kthread_t *);
345 static void	door_release_server(door_node_t *, kthread_t *);
346 static kthread_t	*door_get_server(door_node_t *);
347 static door_node_t	*door_lookup(int, file_t **);
348 static int	door_translate_in(void);
349 static int	door_translate_out(void);
350 static void	door_fd_rele(door_desc_t *, uint_t, int);
351 static void	door_list_insert(door_node_t *);
352 static void	door_info_common(door_node_t *, door_info_t *, file_t *);
353 static int	door_release_fds(door_desc_t *, uint_t);
354 static void	door_fd_close(door_desc_t *, uint_t);
355 static void	door_fp_close(struct file **, uint_t);
356 
357 static door_data_t *
358 door_my_data(int create_if_missing)
359 {
360 	door_data_t *ddp;
361 
362 	ddp = curthread->t_door;
363 	if (create_if_missing && ddp == NULL)
364 		ddp = curthread->t_door = kmem_zalloc(sizeof (*ddp), KM_SLEEP);
365 
366 	return (ddp);
367 }
368 
369 static door_server_t *
370 door_my_server(int create_if_missing)
371 {
372 	door_data_t *ddp = door_my_data(create_if_missing);
373 
374 	return ((ddp != NULL)? DOOR_SERVER(ddp) : NULL);
375 }
376 
377 static door_client_t *
378 door_my_client(int create_if_missing)
379 {
380 	door_data_t *ddp = door_my_data(create_if_missing);
381 
382 	return ((ddp != NULL)? DOOR_CLIENT(ddp) : NULL);
383 }
384 
385 /*
386  * System call to create a door
387  */
388 int
389 door_create(void (*pc_cookie)(), void *data_cookie, uint_t attributes)
390 {
391 	int fd;
392 	int err;
393 
394 	if ((attributes & ~DOOR_CREATE_MASK) ||
395 	    ((attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) ==
396 	    (DOOR_UNREF | DOOR_UNREF_MULTI)))
397 		return (set_errno(EINVAL));
398 
399 	if ((err = door_create_common(pc_cookie, data_cookie, attributes, 0,
400 	    &fd, NULL)) != 0)
401 		return (set_errno(err));
402 
403 	f_setfd(fd, FD_CLOEXEC);
404 	return (fd);
405 }
406 
407 /*
408  * Common code for creating user and kernel doors.  If a door was
409  * created, stores a file structure pointer in the location pointed
410  * to by fpp (if fpp is non-NULL) and returns 0.  Also, if a non-NULL
411  * pointer to a file descriptor is passed in as fdp, allocates a file
412  * descriptor representing the door.  If a door could not be created,
413  * returns an error.
414  */
415 static int
416 door_create_common(void (*pc_cookie)(), void *data_cookie, uint_t attributes,
417     int from_kernel, int *fdp, file_t **fpp)
418 {
419 	door_node_t	*dp;
420 	vnode_t		*vp;
421 	struct file	*fp;
422 	static door_id_t index = 0;
423 	proc_t		*p = (from_kernel)? &p0 : curproc;
424 
425 	dp = kmem_zalloc(sizeof (door_node_t), KM_SLEEP);
426 
427 	dp->door_vnode = vn_alloc(KM_SLEEP);
428 	dp->door_target = p;
429 	dp->door_data = data_cookie;
430 	dp->door_pc = pc_cookie;
431 	dp->door_flags = attributes;
432 #ifdef _SYSCALL32_IMPL
433 	if (!from_kernel && get_udatamodel() != DATAMODEL_NATIVE)
434 		dp->door_data_max = UINT32_MAX;
435 	else
436 #endif
437 		dp->door_data_max = SIZE_MAX;
438 	dp->door_data_min = 0UL;
439 	dp->door_desc_max = (attributes & DOOR_REFUSE_DESC)? 0 : INT_MAX;
440 
441 	vp = DTOV(dp);
442 	vn_setops(vp, door_vnodeops);
443 	vp->v_type = VDOOR;
444 	vp->v_vfsp = &door_vfs;
445 	vp->v_data = (caddr_t)dp;
446 	mutex_enter(&door_knob);
447 	dp->door_index = index++;
448 	/* add to per-process door list */
449 	door_list_insert(dp);
450 	mutex_exit(&door_knob);
451 
452 	if (falloc(vp, FREAD | FWRITE, &fp, fdp)) {
453 		/*
454 		 * If the file table is full, remove the door from the
455 		 * per-process list, free the door, and return NULL.
456 		 */
457 		mutex_enter(&door_knob);
458 		door_list_delete(dp);
459 		mutex_exit(&door_knob);
460 		vn_free(vp);
461 		kmem_free(dp, sizeof (door_node_t));
462 		return (EMFILE);
463 	}
464 	vn_exists(vp);
465 	if (fdp != NULL)
466 		setf(*fdp, fp);
467 	mutex_exit(&fp->f_tlock);
468 
469 	if (fpp != NULL)
470 		*fpp = fp;
471 	return (0);
472 }
473 
474 static int
475 door_check_limits(door_node_t *dp, door_arg_t *da, int upcall)
476 {
477 	ASSERT(MUTEX_HELD(&door_knob));
478 
479 	/* we allow unref upcalls through, despite any minimum */
480 	if (da->data_size < dp->door_data_min &&
481 	    !(upcall && da->data_ptr == DOOR_UNREF_DATA))
482 		return (ENOBUFS);
483 
484 	if (da->data_size > dp->door_data_max)
485 		return (ENOBUFS);
486 
487 	if (da->desc_num > 0 && (dp->door_flags & DOOR_REFUSE_DESC))
488 		return (ENOTSUP);
489 
490 	if (da->desc_num > dp->door_desc_max)
491 		return (ENFILE);
492 
493 	return (0);
494 }
495 
496 /*
497  * Door invocation.
498  */
499 int
500 door_call(int did, void *args)
501 {
502 	/* Locals */
503 	door_node_t	*dp;
504 	kthread_t	*server_thread;
505 	int		error = 0;
506 	klwp_t		*lwp;
507 	door_client_t	*ct;		/* curthread door_data */
508 	door_server_t	*st;		/* server thread door_data */
509 	door_desc_t	*start = NULL;
510 	uint_t		ncopied = 0;
511 	size_t		dsize;
512 	/* destructor for data returned by a kernel server */
513 	void		(*destfn)() = NULL;
514 	void		*destarg;
515 	model_t		datamodel;
516 	int		gotresults = 0;
517 	int		cancel_pending;
518 
519 	lwp = ttolwp(curthread);
520 	datamodel = lwp_getdatamodel(lwp);
521 
522 	ct = door_my_client(1);
523 
524 	/*
525 	 * Get the arguments
526 	 */
527 	if (args) {
528 		if (datamodel == DATAMODEL_NATIVE) {
529 			if (copyin(args, &ct->d_args, sizeof (door_arg_t)) != 0)
530 				return (set_errno(EFAULT));
531 		} else {
532 			door_arg32_t    da32;
533 
534 			if (copyin(args, &da32, sizeof (door_arg32_t)) != 0)
535 				return (set_errno(EFAULT));
536 			ct->d_args.data_ptr =
537 			    (char *)(uintptr_t)da32.data_ptr;
538 			ct->d_args.data_size = da32.data_size;
539 			ct->d_args.desc_ptr =
540 			    (door_desc_t *)(uintptr_t)da32.desc_ptr;
541 			ct->d_args.desc_num = da32.desc_num;
542 			ct->d_args.rbuf =
543 			    (char *)(uintptr_t)da32.rbuf;
544 			ct->d_args.rsize = da32.rsize;
545 		}
546 	} else {
547 		/* No arguments, and no results allowed */
548 		ct->d_noresults = 1;
549 		ct->d_args.data_size = 0;
550 		ct->d_args.desc_num = 0;
551 		ct->d_args.rsize = 0;
552 	}
553 
554 	if ((dp = door_lookup(did, NULL)) == NULL)
555 		return (set_errno(EBADF));
556 
557 	/*
558 	 * We don't want to hold the door FD over the entire operation;
559 	 * instead, we put a hold on the door vnode and release the FD
560 	 * immediately
561 	 */
562 	VN_HOLD(DTOV(dp));
563 	releasef(did);
564 
565 	mutex_enter(&door_knob);
566 	if (DOOR_INVALID(dp)) {
567 		mutex_exit(&door_knob);
568 		error = EBADF;
569 		goto out;
570 	}
571 
572 	/*
573 	 * before we do anything, check that we are not overflowing the
574 	 * required limits.
575 	 */
576 	error = door_check_limits(dp, &ct->d_args, 0);
577 	if (error != 0) {
578 		mutex_exit(&door_knob);
579 		goto out;
580 	}
581 
582 	/*
583 	 * Check for in-kernel door server.
584 	 */
585 	if (dp->door_target == &p0) {
586 		caddr_t rbuf = ct->d_args.rbuf;
587 		size_t rsize = ct->d_args.rsize;
588 
589 		dp->door_active++;
590 		ct->d_kernel = 1;
591 		ct->d_error = DOOR_WAIT;
592 		mutex_exit(&door_knob);
593 		/* translate file descriptors to vnodes */
594 		if (ct->d_args.desc_num) {
595 			error = door_translate_in();
596 			if (error)
597 				goto out;
598 		}
599 		/*
600 		 * Call kernel door server.  Arguments are passed and
601 		 * returned as a door_arg pointer.  When called, data_ptr
602 		 * points to user data and desc_ptr points to a kernel list
603 		 * of door descriptors that have been converted to file
604 		 * structure pointers.  It's the server function's
605 		 * responsibility to copyin the data pointed to by data_ptr
606 		 * (this avoids extra copying in some cases).  On return,
607 		 * data_ptr points to a user buffer of data, and desc_ptr
608 		 * points to a kernel list of door descriptors representing
609 		 * files.  When a reference is passed to a kernel server,
610 		 * it is the server's responsibility to release the reference
611 		 * (by calling closef).  When the server includes a
612 		 * reference in its reply, it is released as part of the
613 		 * the call (the server must duplicate the reference if
614 		 * it wants to retain a copy).  The destfn, if set to
615 		 * non-NULL, is a destructor to be called when the returned
616 		 * kernel data (if any) is no longer needed (has all been
617 		 * translated and copied to user level).
618 		 */
619 		(*(dp->door_pc))(dp->door_data, &ct->d_args,
620 		    &destfn, &destarg, &error);
621 		mutex_enter(&door_knob);
622 		/* not implemented yet */
623 		if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
624 			door_deliver_unref(dp);
625 		mutex_exit(&door_knob);
626 		if (error)
627 			goto out;
628 
629 		/* translate vnodes to files */
630 		if (ct->d_args.desc_num) {
631 			error = door_translate_out();
632 			if (error)
633 				goto out;
634 		}
635 		ct->d_buf = ct->d_args.rbuf;
636 		ct->d_bufsize = ct->d_args.rsize;
637 		if (rsize < (ct->d_args.data_size +
638 		    (ct->d_args.desc_num * sizeof (door_desc_t)))) {
639 			/* handle overflow */
640 			error = door_overflow(curthread, ct->d_args.data_ptr,
641 			    ct->d_args.data_size, ct->d_args.desc_ptr,
642 			    ct->d_args.desc_num);
643 			if (error)
644 				goto out;
645 			/* door_overflow sets d_args rbuf and rsize */
646 		} else {
647 			ct->d_args.rbuf = rbuf;
648 			ct->d_args.rsize = rsize;
649 		}
650 		goto results;
651 	}
652 
653 	/*
654 	 * Get a server thread from the target domain
655 	 */
656 	if ((server_thread = door_get_server(dp)) == NULL) {
657 		if (DOOR_INVALID(dp))
658 			error = EBADF;
659 		else
660 			error = EAGAIN;
661 		mutex_exit(&door_knob);
662 		goto out;
663 	}
664 
665 	st = DOOR_SERVER(server_thread->t_door);
666 	if (ct->d_args.desc_num || ct->d_args.data_size) {
667 		int is_private = (dp->door_flags & DOOR_PRIVATE);
668 		/*
669 		 * Move data from client to server
670 		 */
671 		DOOR_T_HOLD(st);
672 		mutex_exit(&door_knob);
673 		error = door_args(server_thread, is_private);
674 		mutex_enter(&door_knob);
675 		DOOR_T_RELEASE(st);
676 		if (error) {
677 			/*
678 			 * We're not going to resume this thread after all
679 			 */
680 			door_release_server(dp, server_thread);
681 			shuttle_sleep(server_thread);
682 			mutex_exit(&door_knob);
683 			goto out;
684 		}
685 	}
686 
687 	dp->door_active++;
688 	ct->d_error = DOOR_WAIT;
689 	ct->d_args_done = 0;
690 	st->d_caller = curthread;
691 	st->d_active = dp;
692 
693 	shuttle_resume(server_thread, &door_knob);
694 
695 	mutex_enter(&door_knob);
696 shuttle_return:
697 	if ((error = ct->d_error) < 0) {	/* DOOR_WAIT or DOOR_EXIT */
698 		/*
699 		 * Premature wakeup. Find out why (stop, forkall, sig, exit ...)
700 		 */
701 		mutex_exit(&door_knob);		/* May block in ISSIG */
702 		cancel_pending = 0;
703 		if (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort ||
704 		    MUSTRETURN(curproc, curthread) ||
705 		    (cancel_pending = schedctl_cancel_pending()) != 0) {
706 			/* Signal, forkall, ... */
707 			lwp->lwp_sysabort = 0;
708 			if (cancel_pending)
709 				schedctl_cancel_eintr();
710 			mutex_enter(&door_knob);
711 			error = EINTR;
712 			/*
713 			 * If the server has finished processing our call,
714 			 * or exited (calling door_slam()), then d_error
715 			 * will have changed.  If the server hasn't finished
716 			 * yet, d_error will still be DOOR_WAIT, and we
717 			 * let it know we are not interested in any
718 			 * results by sending a SIGCANCEL, unless the door
719 			 * is marked with DOOR_NO_CANCEL.
720 			 */
721 			if (ct->d_error == DOOR_WAIT &&
722 			    st->d_caller == curthread) {
723 				proc_t	*p = ttoproc(server_thread);
724 
725 				st->d_active = NULL;
726 				st->d_caller = NULL;
727 
728 				if (!(dp->door_flags & DOOR_NO_CANCEL)) {
729 					DOOR_T_HOLD(st);
730 					mutex_exit(&door_knob);
731 
732 					mutex_enter(&p->p_lock);
733 					sigtoproc(p, server_thread, SIGCANCEL);
734 					mutex_exit(&p->p_lock);
735 
736 					mutex_enter(&door_knob);
737 					DOOR_T_RELEASE(st);
738 				}
739 			}
740 		} else {
741 			/*
742 			 * Return from stop(), server exit...
743 			 *
744 			 * Note that the server could have done a
745 			 * door_return while the client was in stop state
746 			 * (ISSIG), in which case the error condition
747 			 * is updated by the server.
748 			 */
749 			mutex_enter(&door_knob);
750 			if (ct->d_error == DOOR_WAIT) {
751 				/* Still waiting for a reply */
752 				shuttle_swtch(&door_knob);
753 				mutex_enter(&door_knob);
754 				lwp->lwp_asleep = 0;
755 				goto	shuttle_return;
756 			} else if (ct->d_error == DOOR_EXIT) {
757 				/* Server exit */
758 				error = EINTR;
759 			} else {
760 				/* Server did a door_return during ISSIG */
761 				error = ct->d_error;
762 			}
763 		}
764 		/*
765 		 * Can't exit if the server is currently copying
766 		 * results for me.
767 		 */
768 		while (DOOR_T_HELD(ct))
769 			cv_wait(&ct->d_cv, &door_knob);
770 
771 		/*
772 		 * If the server has not processed our message, free the
773 		 * descriptors.
774 		 */
775 		if (!ct->d_args_done) {
776 			door_fp_close(ct->d_fpp, ct->d_args.desc_num);
777 			ct->d_args_done = 1;
778 		}
779 
780 		/*
781 		 * Find out if results were successfully copied.
782 		 */
783 		if (ct->d_error == 0)
784 			gotresults = 1;
785 	}
786 	ASSERT(ct->d_args_done);
787 	lwp->lwp_asleep = 0;		/* /proc */
788 	lwp->lwp_sysabort = 0;		/* /proc */
789 	if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
790 		door_deliver_unref(dp);
791 	mutex_exit(&door_knob);
792 
793 results:
794 	/*
795 	 * Move the results to userland (if any)
796 	 */
797 
798 	if (ct->d_noresults)
799 		goto out;
800 
801 	if (error) {
802 		/*
803 		 * If server returned results successfully, then we've
804 		 * been interrupted and may need to clean up.
805 		 */
806 		if (gotresults) {
807 			ASSERT(error == EINTR);
808 			door_fp_close(ct->d_fpp, ct->d_args.desc_num);
809 		}
810 		goto out;
811 	}
812 
813 	/*
814 	 * Copy back data if we haven't caused an overflow (already
815 	 * handled) and we are using a 2 copy transfer, or we are
816 	 * returning data from a kernel server.
817 	 */
818 	if (ct->d_args.data_size) {
819 		ct->d_args.data_ptr = ct->d_args.rbuf;
820 		if (ct->d_kernel || (!ct->d_overflow &&
821 		    ct->d_args.data_size <= door_max_arg)) {
822 			if (copyout_nowatch(ct->d_buf, ct->d_args.rbuf,
823 			    ct->d_args.data_size)) {
824 				door_fp_close(ct->d_fpp, ct->d_args.desc_num);
825 				error = EFAULT;
826 				goto out;
827 			}
828 		}
829 	}
830 
831 	/*
832 	 * stuff returned doors into our proc, copyout the descriptors
833 	 */
834 	if (ct->d_args.desc_num) {
835 		struct file	**fpp;
836 		door_desc_t	*didpp;
837 		uint_t		n = ct->d_args.desc_num;
838 
839 		dsize = n * sizeof (door_desc_t);
840 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
841 		fpp = ct->d_fpp;
842 
843 		while (n--) {
844 			if (door_insert(*fpp, didpp) == -1) {
845 				/* Close remaining files */
846 				door_fp_close(fpp, n + 1);
847 				error = EMFILE;
848 				goto out;
849 			}
850 			fpp++; didpp++; ncopied++;
851 		}
852 
853 		ct->d_args.desc_ptr = (door_desc_t *)(ct->d_args.rbuf +
854 		    roundup(ct->d_args.data_size, sizeof (door_desc_t)));
855 
856 		if (copyout_nowatch(start, ct->d_args.desc_ptr, dsize)) {
857 			error = EFAULT;
858 			goto out;
859 		}
860 	}
861 
862 	/*
863 	 * Return the results
864 	 */
865 	if (datamodel == DATAMODEL_NATIVE) {
866 		if (copyout_nowatch(&ct->d_args, args,
867 		    sizeof (door_arg_t)) != 0)
868 			error = EFAULT;
869 	} else {
870 		door_arg32_t    da32;
871 
872 		da32.data_ptr = (caddr32_t)(uintptr_t)ct->d_args.data_ptr;
873 		da32.data_size = ct->d_args.data_size;
874 		da32.desc_ptr = (caddr32_t)(uintptr_t)ct->d_args.desc_ptr;
875 		da32.desc_num = ct->d_args.desc_num;
876 		da32.rbuf = (caddr32_t)(uintptr_t)ct->d_args.rbuf;
877 		da32.rsize = ct->d_args.rsize;
878 		if (copyout_nowatch(&da32, args, sizeof (door_arg32_t)) != 0) {
879 			error = EFAULT;
880 		}
881 	}
882 
883 out:
884 	ct->d_noresults = 0;
885 
886 	/* clean up the overflow buffer if an error occurred */
887 	if (error != 0 && ct->d_overflow) {
888 		(void) as_unmap(curproc->p_as, ct->d_args.rbuf,
889 		    ct->d_args.rsize);
890 	}
891 	ct->d_overflow = 0;
892 
893 	/* call destructor */
894 	if (destfn) {
895 		ASSERT(ct->d_kernel);
896 		(*destfn)(dp->door_data, destarg);
897 		ct->d_buf = NULL;
898 		ct->d_bufsize = 0;
899 	}
900 
901 	if (dp)
902 		VN_RELE(DTOV(dp));
903 
904 	if (ct->d_buf) {
905 		ASSERT(!ct->d_kernel);
906 		kmem_free(ct->d_buf, ct->d_bufsize);
907 		ct->d_buf = NULL;
908 		ct->d_bufsize = 0;
909 	}
910 	ct->d_kernel = 0;
911 
912 	/* clean up the descriptor copyout buffer */
913 	if (start != NULL) {
914 		if (error != 0)
915 			door_fd_close(start, ncopied);
916 		kmem_free(start, dsize);
917 	}
918 
919 	if (ct->d_fpp) {
920 		kmem_free(ct->d_fpp, ct->d_fpp_size);
921 		ct->d_fpp = NULL;
922 		ct->d_fpp_size = 0;
923 	}
924 
925 	if (error)
926 		return (set_errno(error));
927 
928 	return (0);
929 }
930 
931 static int
932 door_setparam_common(door_node_t *dp, int from_kernel, int type, size_t val)
933 {
934 	int error = 0;
935 
936 	mutex_enter(&door_knob);
937 
938 	if (DOOR_INVALID(dp)) {
939 		mutex_exit(&door_knob);
940 		return (EBADF);
941 	}
942 
943 	/*
944 	 * door_ki_setparam() can only affect kernel doors.
945 	 * door_setparam() can only affect doors attached to the current
946 	 * process.
947 	 */
948 	if ((from_kernel && dp->door_target != &p0) ||
949 	    (!from_kernel && dp->door_target != curproc)) {
950 		mutex_exit(&door_knob);
951 		return (EPERM);
952 	}
953 
954 	switch (type) {
955 	case DOOR_PARAM_DESC_MAX:
956 		if (val > INT_MAX)
957 			error = ERANGE;
958 		else if ((dp->door_flags & DOOR_REFUSE_DESC) && val != 0)
959 			error = ENOTSUP;
960 		else
961 			dp->door_desc_max = (uint_t)val;
962 		break;
963 
964 	case DOOR_PARAM_DATA_MIN:
965 		if (val > dp->door_data_max)
966 			error = EINVAL;
967 		else
968 			dp->door_data_min = val;
969 		break;
970 
971 	case DOOR_PARAM_DATA_MAX:
972 		if (val < dp->door_data_min)
973 			error = EINVAL;
974 		else
975 			dp->door_data_max = val;
976 		break;
977 
978 	default:
979 		error = EINVAL;
980 		break;
981 	}
982 
983 	mutex_exit(&door_knob);
984 	return (error);
985 }
986 
987 static int
988 door_getparam_common(door_node_t *dp, int type, size_t *out)
989 {
990 	int error = 0;
991 
992 	mutex_enter(&door_knob);
993 	switch (type) {
994 	case DOOR_PARAM_DESC_MAX:
995 		*out = (size_t)dp->door_desc_max;
996 		break;
997 	case DOOR_PARAM_DATA_MIN:
998 		*out = dp->door_data_min;
999 		break;
1000 	case DOOR_PARAM_DATA_MAX:
1001 		*out = dp->door_data_max;
1002 		break;
1003 	default:
1004 		error = EINVAL;
1005 		break;
1006 	}
1007 	mutex_exit(&door_knob);
1008 	return (error);
1009 }
1010 
1011 int
1012 door_setparam(int did, int type, size_t val)
1013 {
1014 	door_node_t *dp;
1015 	int error = 0;
1016 
1017 	if ((dp = door_lookup(did, NULL)) == NULL)
1018 		return (set_errno(EBADF));
1019 
1020 	error = door_setparam_common(dp, 0, type, val);
1021 
1022 	releasef(did);
1023 
1024 	if (error)
1025 		return (set_errno(error));
1026 
1027 	return (0);
1028 }
1029 
1030 int
1031 door_getparam(int did, int type, size_t *out)
1032 {
1033 	door_node_t *dp;
1034 	size_t val = 0;
1035 	int error = 0;
1036 
1037 	if ((dp = door_lookup(did, NULL)) == NULL)
1038 		return (set_errno(EBADF));
1039 
1040 	error = door_getparam_common(dp, type, &val);
1041 
1042 	releasef(did);
1043 
1044 	if (error)
1045 		return (set_errno(error));
1046 
1047 	if (get_udatamodel() == DATAMODEL_NATIVE) {
1048 		if (copyout(&val, out, sizeof (val)))
1049 			return (set_errno(EFAULT));
1050 #ifdef _SYSCALL32_IMPL
1051 	} else {
1052 		size32_t val32 = (size32_t)val;
1053 
1054 		if (val != val32)
1055 			return (set_errno(EOVERFLOW));
1056 
1057 		if (copyout(&val32, out, sizeof (val32)))
1058 			return (set_errno(EFAULT));
1059 #endif /* _SYSCALL32_IMPL */
1060 	}
1061 
1062 	return (0);
1063 }
1064 
1065 /*
1066  * A copyout() which proceeds from high addresses to low addresses.  This way,
1067  * stack guard pages are effective.
1068  *
1069  * Note that we use copyout_nowatch();  this is called while the client is
1070  * held.
1071  */
1072 static int
1073 door_stack_copyout(const void *kaddr, void *uaddr, size_t count)
1074 {
1075 	const char *kbase = (const char *)kaddr;
1076 	uintptr_t ubase = (uintptr_t)uaddr;
1077 	size_t pgsize = PAGESIZE;
1078 
1079 	if (count <= pgsize)
1080 		return (copyout_nowatch(kaddr, uaddr, count));
1081 
1082 	while (count > 0) {
1083 		uintptr_t start, end, offset, amount;
1084 
1085 		end = ubase + count;
1086 		start = P2ALIGN(end - 1, pgsize);
1087 		if (P2ALIGN(ubase, pgsize) == start)
1088 			start = ubase;
1089 
1090 		offset = start - ubase;
1091 		amount = end - start;
1092 
1093 		ASSERT(amount > 0 && amount <= count && amount <= pgsize);
1094 
1095 		if (copyout_nowatch(kbase + offset, (void *)start, amount))
1096 			return (1);
1097 		count -= amount;
1098 	}
1099 	return (0);
1100 }
1101 
1102 /*
1103  * Writes the stack layout for door_return() into the door_server_t of the
1104  * server thread.
1105  */
1106 static int
1107 door_layout(kthread_t *tp, size_t data_size, uint_t ndesc, int info_needed)
1108 {
1109 	door_server_t *st = DOOR_SERVER(tp->t_door);
1110 	door_layout_t *out = &st->d_layout;
1111 	uintptr_t base_sp = (uintptr_t)st->d_sp;
1112 	size_t ssize = st->d_ssize;
1113 	size_t descsz;
1114 	uintptr_t descp, datap, infop, resultsp, finalsp;
1115 	size_t align = STACK_ALIGN;
1116 	size_t results_sz = sizeof (struct door_results);
1117 	model_t datamodel = lwp_getdatamodel(ttolwp(tp));
1118 
1119 	ASSERT(!st->d_layout_done);
1120 
1121 #ifndef _STACK_GROWS_DOWNWARD
1122 #error stack does not grow downward, door_layout() must change
1123 #endif
1124 
1125 #ifdef _SYSCALL32_IMPL
1126 	if (datamodel != DATAMODEL_NATIVE) {
1127 		align = STACK_ALIGN32;
1128 		results_sz = sizeof (struct door_results32);
1129 	}
1130 #endif
1131 
1132 	descsz = ndesc * sizeof (door_desc_t);
1133 
1134 	/*
1135 	 * To speed up the overflow checking, we do an initial check
1136 	 * that the passed in data size won't cause us to wrap past
1137 	 * base_sp.  Since door_max_desc limits descsz, we can
1138 	 * safely use it here.  65535 is an arbitrary 'bigger than
1139 	 * we need, small enough to not cause trouble' constant;
1140 	 * the only constraint is that it must be > than:
1141 	 *
1142 	 *	5 * STACK_ALIGN +
1143 	 *	    sizeof (door_info_t) +
1144 	 *	    sizeof (door_results_t) +
1145 	 *	    (max adjustment from door_final_sp())
1146 	 *
1147 	 * After we compute the layout, we can safely do a "did we wrap
1148 	 * around" check, followed by a check against the recorded
1149 	 * stack size.
1150 	 */
1151 	if (data_size >= SIZE_MAX - (size_t)65535UL - descsz)
1152 		return (E2BIG);		/* overflow */
1153 
1154 	descp = P2ALIGN(base_sp - descsz, align);
1155 	datap = P2ALIGN(descp - data_size, align);
1156 
1157 	if (info_needed)
1158 		infop = P2ALIGN(datap - sizeof (door_info_t), align);
1159 	else
1160 		infop = datap;
1161 
1162 	resultsp = P2ALIGN(infop - results_sz, align);
1163 	finalsp = door_final_sp(resultsp, align, datamodel);
1164 
1165 	if (finalsp > base_sp)
1166 		return (E2BIG);		/* overflow */
1167 
1168 	if (ssize != 0 && (base_sp - finalsp) > ssize)
1169 		return (E2BIG);		/* doesn't fit in stack */
1170 
1171 	out->dl_descp = (ndesc != 0)? (caddr_t)descp : 0;
1172 	out->dl_datap = (data_size != 0)? (caddr_t)datap : 0;
1173 	out->dl_infop = info_needed? (caddr_t)infop : 0;
1174 	out->dl_resultsp = (caddr_t)resultsp;
1175 	out->dl_sp = (caddr_t)finalsp;
1176 
1177 	st->d_layout_done = 1;
1178 	return (0);
1179 }
1180 
1181 static int
1182 door_server_dispatch(door_client_t *ct, door_node_t *dp)
1183 {
1184 	door_server_t *st = DOOR_SERVER(curthread->t_door);
1185 	door_layout_t *layout = &st->d_layout;
1186 	int error = 0;
1187 
1188 	int is_private = (dp->door_flags & DOOR_PRIVATE);
1189 
1190 	door_pool_t *pool = (is_private)? &dp->door_servers :
1191 	    &curproc->p_server_threads;
1192 
1193 	int empty_pool = (pool->dp_threads == NULL);
1194 
1195 	caddr_t infop = NULL;
1196 	char *datap = NULL;
1197 	size_t datasize = 0;
1198 	size_t descsize;
1199 
1200 	file_t **fpp = ct->d_fpp;
1201 	door_desc_t *start = NULL;
1202 	uint_t ndesc = 0;
1203 	uint_t ncopied = 0;
1204 
1205 	if (ct != NULL) {
1206 		datap = ct->d_args.data_ptr;
1207 		datasize = ct->d_args.data_size;
1208 		ndesc = ct->d_args.desc_num;
1209 	}
1210 
1211 	descsize = ndesc * sizeof (door_desc_t);
1212 
1213 	/*
1214 	 * Reset datap to NULL if we aren't passing any data.  Be careful
1215 	 * to let unref notifications through, though.
1216 	 */
1217 	if (datap == DOOR_UNREF_DATA) {
1218 		if (ct->d_upcall)
1219 			datasize = 0;
1220 		else
1221 			datap = NULL;
1222 	} else if (datasize == 0) {
1223 		datap = NULL;
1224 	}
1225 
1226 	/*
1227 	 * Get the stack layout, if it hasn't already been done.
1228 	 */
1229 	if (!st->d_layout_done) {
1230 		error = door_layout(curthread, datasize, ndesc,
1231 		    (is_private && empty_pool));
1232 		if (error != 0)
1233 			goto fail;
1234 	}
1235 
1236 	/*
1237 	 * fill out the stack, starting from the top.  Layout was already
1238 	 * filled in by door_args() or door_translate_out().
1239 	 */
1240 	if (layout->dl_descp != NULL) {
1241 		ASSERT(ndesc != 0);
1242 		start = kmem_alloc(descsize, KM_SLEEP);
1243 
1244 		while (ndesc > 0) {
1245 			if (door_insert(*fpp, &start[ncopied]) == -1) {
1246 				error = EMFILE;
1247 				goto fail;
1248 			}
1249 			ndesc--;
1250 			ncopied++;
1251 			fpp++;
1252 		}
1253 		if (door_stack_copyout(start, layout->dl_descp, descsize)) {
1254 			error = E2BIG;
1255 			goto fail;
1256 		}
1257 	}
1258 	fpp = NULL;			/* finished processing */
1259 
1260 	if (layout->dl_datap != NULL) {
1261 		ASSERT(datasize != 0);
1262 		datap = layout->dl_datap;
1263 		if (ct->d_upcall || datasize <= door_max_arg) {
1264 			if (door_stack_copyout(ct->d_buf, datap, datasize)) {
1265 				error = E2BIG;
1266 				goto fail;
1267 			}
1268 		}
1269 	}
1270 
1271 	if (is_private && empty_pool) {
1272 		door_info_t di;
1273 
1274 		infop = layout->dl_infop;
1275 		ASSERT(infop != NULL);
1276 
1277 		di.di_target = curproc->p_pid;
1278 		di.di_proc = (door_ptr_t)(uintptr_t)dp->door_pc;
1279 		di.di_data = (door_ptr_t)(uintptr_t)dp->door_data;
1280 		di.di_uniquifier = dp->door_index;
1281 		di.di_attributes = (dp->door_flags & DOOR_ATTR_MASK) |
1282 		    DOOR_LOCAL;
1283 
1284 		if (door_stack_copyout(&di, infop, sizeof (di))) {
1285 			error = E2BIG;
1286 			goto fail;
1287 		}
1288 	}
1289 
1290 	if (get_udatamodel() == DATAMODEL_NATIVE) {
1291 		struct door_results dr;
1292 
1293 		dr.cookie = dp->door_data;
1294 		dr.data_ptr = datap;
1295 		dr.data_size = datasize;
1296 		dr.desc_ptr = (door_desc_t *)layout->dl_descp;
1297 		dr.desc_num = ncopied;
1298 		dr.pc = dp->door_pc;
1299 		dr.nservers = !empty_pool;
1300 		dr.door_info = (door_info_t *)infop;
1301 
1302 		if (door_stack_copyout(&dr, layout->dl_resultsp, sizeof (dr))) {
1303 			error = E2BIG;
1304 			goto fail;
1305 		}
1306 #ifdef _SYSCALL32_IMPL
1307 	} else {
1308 		struct door_results32 dr32;
1309 
1310 		dr32.cookie = (caddr32_t)(uintptr_t)dp->door_data;
1311 		dr32.data_ptr = (caddr32_t)(uintptr_t)datap;
1312 		dr32.data_size = (size32_t)datasize;
1313 		dr32.desc_ptr = (caddr32_t)(uintptr_t)layout->dl_descp;
1314 		dr32.desc_num = ncopied;
1315 		dr32.pc = (caddr32_t)(uintptr_t)dp->door_pc;
1316 		dr32.nservers = !empty_pool;
1317 		dr32.door_info = (caddr32_t)(uintptr_t)infop;
1318 
1319 		if (door_stack_copyout(&dr32, layout->dl_resultsp,
1320 		    sizeof (dr32))) {
1321 			error = E2BIG;
1322 			goto fail;
1323 		}
1324 #endif
1325 	}
1326 
1327 	error = door_finish_dispatch(layout->dl_sp);
1328 fail:
1329 	if (start != NULL) {
1330 		if (error != 0)
1331 			door_fd_close(start, ncopied);
1332 		kmem_free(start, descsize);
1333 	}
1334 	if (fpp != NULL)
1335 		door_fp_close(fpp, ndesc);
1336 
1337 	return (error);
1338 }
1339 
1340 /*
1341  * Return the results (if any) to the caller (if any) and wait for the
1342  * next invocation on a door.
1343  */
1344 int
1345 door_return(caddr_t data_ptr, size_t data_size,
1346     door_desc_t *desc_ptr, uint_t desc_num, caddr_t sp, size_t ssize)
1347 {
1348 	kthread_t	*caller;
1349 	klwp_t		*lwp;
1350 	int		error = 0;
1351 	door_node_t	*dp;
1352 	door_server_t	*st;		/* curthread door_data */
1353 	door_client_t	*ct;		/* caller door_data */
1354 	int		cancel_pending;
1355 
1356 	st = door_my_server(1);
1357 
1358 	/*
1359 	 * If thread was bound to a door that no longer exists, return
1360 	 * an error.  This can happen if a thread is bound to a door
1361 	 * before the process calls forkall(); in the child, the door
1362 	 * doesn't exist and door_fork() sets the d_invbound flag.
1363 	 */
1364 	if (st->d_invbound)
1365 		return (set_errno(EINVAL));
1366 
1367 	st->d_sp = sp;			/* Save base of stack. */
1368 	st->d_ssize = ssize;		/* and its size */
1369 
1370 	/*
1371 	 * before we release our stack to the whims of our next caller,
1372 	 * copy in the syscall arguments if we're being traced by /proc.
1373 	 */
1374 	if (curthread->t_post_sys && PTOU(ttoproc(curthread))->u_systrap)
1375 		(void) save_syscall_args();
1376 
1377 	/* Make sure the caller hasn't gone away */
1378 	mutex_enter(&door_knob);
1379 	if ((caller = st->d_caller) == NULL || caller->t_door == NULL) {
1380 		if (desc_num != 0) {
1381 			/* close any DOOR_RELEASE descriptors */
1382 			mutex_exit(&door_knob);
1383 			error = door_release_fds(desc_ptr, desc_num);
1384 			if (error)
1385 				return (set_errno(error));
1386 			mutex_enter(&door_knob);
1387 		}
1388 		goto out;
1389 	}
1390 	ct = DOOR_CLIENT(caller->t_door);
1391 
1392 	ct->d_args.data_size = data_size;
1393 	ct->d_args.desc_num = desc_num;
1394 	/*
1395 	 * Transfer results, if any, to the client
1396 	 */
1397 	if (data_size != 0 || desc_num != 0) {
1398 		/*
1399 		 * Prevent the client from exiting until we have finished
1400 		 * moving results.
1401 		 */
1402 		DOOR_T_HOLD(ct);
1403 		mutex_exit(&door_knob);
1404 		error = door_results(caller, data_ptr, data_size,
1405 		    desc_ptr, desc_num);
1406 		mutex_enter(&door_knob);
1407 		DOOR_T_RELEASE(ct);
1408 		/*
1409 		 * Pass EOVERFLOW errors back to the client
1410 		 */
1411 		if (error && error != EOVERFLOW) {
1412 			mutex_exit(&door_knob);
1413 			return (set_errno(error));
1414 		}
1415 	}
1416 out:
1417 	/* Put ourselves on the available server thread list */
1418 	door_release_server(st->d_pool, curthread);
1419 
1420 	/*
1421 	 * Make sure the caller is still waiting to be resumed
1422 	 */
1423 	if (caller) {
1424 		disp_lock_t *tlp;
1425 
1426 		thread_lock(caller);
1427 		ct->d_error = error;		/* Return any errors */
1428 		if (caller->t_state == TS_SLEEP &&
1429 		    SOBJ_TYPE(caller->t_sobj_ops) == SOBJ_SHUTTLE) {
1430 			cpu_t *cp = CPU;
1431 
1432 			tlp = caller->t_lockp;
1433 			/*
1434 			 * Setting t_disp_queue prevents erroneous preemptions
1435 			 * if this thread is still in execution on another
1436 			 * processor
1437 			 */
1438 			caller->t_disp_queue = cp->cpu_disp;
1439 			CL_ACTIVE(caller);
1440 			/*
1441 			 * We are calling thread_onproc() instead of
1442 			 * THREAD_ONPROC() because compiler can reorder
1443 			 * the two stores of t_state and t_lockp in
1444 			 * THREAD_ONPROC().
1445 			 */
1446 			thread_onproc(caller, cp);
1447 			disp_lock_exit_high(tlp);
1448 			shuttle_resume(caller, &door_knob);
1449 		} else {
1450 			/* May have been setrun or in stop state */
1451 			thread_unlock(caller);
1452 			shuttle_swtch(&door_knob);
1453 		}
1454 	} else {
1455 		shuttle_swtch(&door_knob);
1456 	}
1457 
1458 	/*
1459 	 * We've sprung to life. Determine if we are part of a door
1460 	 * invocation, or just interrupted
1461 	 */
1462 	lwp = ttolwp(curthread);
1463 	mutex_enter(&door_knob);
1464 	if ((dp = st->d_active) != NULL) {
1465 		/*
1466 		 * Normal door invocation. Return any error condition
1467 		 * encountered while trying to pass args to the server
1468 		 * thread.
1469 		 */
1470 		lwp->lwp_asleep = 0;
1471 		/*
1472 		 * Prevent the caller from leaving us while we
1473 		 * are copying out the arguments from it's buffer.
1474 		 */
1475 		ASSERT(st->d_caller != NULL);
1476 		ct = DOOR_CLIENT(st->d_caller->t_door);
1477 
1478 		DOOR_T_HOLD(ct);
1479 		mutex_exit(&door_knob);
1480 		error = door_server_dispatch(ct, dp);
1481 		mutex_enter(&door_knob);
1482 		DOOR_T_RELEASE(ct);
1483 
1484 		/* let the client know we have processed his message */
1485 		ct->d_args_done = 1;
1486 
1487 		if (error) {
1488 			caller = st->d_caller;
1489 			if (caller)
1490 				ct = DOOR_CLIENT(caller->t_door);
1491 			else
1492 				ct = NULL;
1493 			goto out;
1494 		}
1495 		mutex_exit(&door_knob);
1496 		return (0);
1497 	} else {
1498 		/*
1499 		 * We are not involved in a door_invocation.
1500 		 * Check for /proc related activity...
1501 		 */
1502 		st->d_caller = NULL;
1503 		door_server_exit(curproc, curthread);
1504 		mutex_exit(&door_knob);
1505 		cancel_pending = 0;
1506 		if (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort ||
1507 		    MUSTRETURN(curproc, curthread) ||
1508 		    (cancel_pending = schedctl_cancel_pending()) != 0) {
1509 			if (cancel_pending)
1510 				schedctl_cancel_eintr();
1511 			lwp->lwp_asleep = 0;
1512 			lwp->lwp_sysabort = 0;
1513 			return (set_errno(EINTR));
1514 		}
1515 		/* Go back and wait for another request */
1516 		lwp->lwp_asleep = 0;
1517 		mutex_enter(&door_knob);
1518 		caller = NULL;
1519 		goto out;
1520 	}
1521 }
1522 
1523 /*
1524  * Revoke any future invocations on this door
1525  */
1526 int
1527 door_revoke(int did)
1528 {
1529 	door_node_t	*d;
1530 	int		error;
1531 
1532 	if ((d = door_lookup(did, NULL)) == NULL)
1533 		return (set_errno(EBADF));
1534 
1535 	mutex_enter(&door_knob);
1536 	if (d->door_target != curproc) {
1537 		mutex_exit(&door_knob);
1538 		releasef(did);
1539 		return (set_errno(EPERM));
1540 	}
1541 	d->door_flags |= DOOR_REVOKED;
1542 	if (d->door_flags & DOOR_PRIVATE)
1543 		cv_broadcast(&d->door_servers.dp_cv);
1544 	else
1545 		cv_broadcast(&curproc->p_server_threads.dp_cv);
1546 	mutex_exit(&door_knob);
1547 	releasef(did);
1548 	/* Invalidate the descriptor */
1549 	if ((error = closeandsetf(did, NULL)) != 0)
1550 		return (set_errno(error));
1551 	return (0);
1552 }
1553 
1554 int
1555 door_info(int did, struct door_info *d_info)
1556 {
1557 	door_node_t	*dp;
1558 	door_info_t	di;
1559 	door_server_t	*st;
1560 	file_t		*fp = NULL;
1561 
1562 	if (did == DOOR_QUERY) {
1563 		/* Get information on door current thread is bound to */
1564 		if ((st = door_my_server(0)) == NULL ||
1565 		    (dp = st->d_pool) == NULL)
1566 			/* Thread isn't bound to a door */
1567 			return (set_errno(EBADF));
1568 	} else if ((dp = door_lookup(did, &fp)) == NULL) {
1569 		/* Not a door */
1570 		return (set_errno(EBADF));
1571 	}
1572 
1573 	door_info_common(dp, &di, fp);
1574 
1575 	if (did != DOOR_QUERY)
1576 		releasef(did);
1577 
1578 	if (copyout(&di, d_info, sizeof (struct door_info)))
1579 		return (set_errno(EFAULT));
1580 	return (0);
1581 }
1582 
1583 /*
1584  * Common code for getting information about a door either via the
1585  * door_info system call or the door_ki_info kernel call.
1586  */
1587 void
1588 door_info_common(door_node_t *dp, struct door_info *dip, file_t *fp)
1589 {
1590 	int unref_count;
1591 
1592 	bzero(dip, sizeof (door_info_t));
1593 
1594 	mutex_enter(&door_knob);
1595 	if (dp->door_target == NULL)
1596 		dip->di_target = -1;
1597 	else
1598 		dip->di_target = dp->door_target->p_pid;
1599 
1600 	dip->di_attributes = dp->door_flags & DOOR_ATTR_MASK;
1601 	if (dp->door_target == curproc)
1602 		dip->di_attributes |= DOOR_LOCAL;
1603 	dip->di_proc = (door_ptr_t)(uintptr_t)dp->door_pc;
1604 	dip->di_data = (door_ptr_t)(uintptr_t)dp->door_data;
1605 	dip->di_uniquifier = dp->door_index;
1606 	/*
1607 	 * If this door is in the middle of having an unreferenced
1608 	 * notification delivered, don't count the VN_HOLD by
1609 	 * door_deliver_unref in determining if it is unreferenced.
1610 	 * This handles the case where door_info is called from the
1611 	 * thread delivering the unref notification.
1612 	 */
1613 	if (dp->door_flags & DOOR_UNREF_ACTIVE)
1614 		unref_count = 2;
1615 	else
1616 		unref_count = 1;
1617 	mutex_exit(&door_knob);
1618 
1619 	if (fp == NULL) {
1620 		/*
1621 		 * If this thread is bound to the door, then we can just
1622 		 * check the vnode; a ref count of 1 (or 2 if this is
1623 		 * handling an unref notification) means that the hold
1624 		 * from the door_bind is the only reference to the door
1625 		 * (no file descriptor refers to it).
1626 		 */
1627 		if (DTOV(dp)->v_count == unref_count)
1628 			dip->di_attributes |= DOOR_IS_UNREF;
1629 	} else {
1630 		/*
1631 		 * If we're working from a file descriptor or door handle
1632 		 * we need to look at the file structure count.  We don't
1633 		 * need to hold the vnode lock since this is just a snapshot.
1634 		 */
1635 		mutex_enter(&fp->f_tlock);
1636 		if (fp->f_count == 1 && DTOV(dp)->v_count == unref_count)
1637 			dip->di_attributes |= DOOR_IS_UNREF;
1638 		mutex_exit(&fp->f_tlock);
1639 	}
1640 }
1641 
1642 /*
1643  * Return credentials of the door caller (if any) for this invocation
1644  */
1645 int
1646 door_ucred(struct ucred_s *uch)
1647 {
1648 	kthread_t	*caller;
1649 	door_server_t	*st;
1650 	door_client_t	*ct;
1651 	struct proc	*p;
1652 	struct ucred_s	*res;
1653 	int		err;
1654 
1655 	mutex_enter(&door_knob);
1656 	if ((st = door_my_server(0)) == NULL ||
1657 	    (caller = st->d_caller) == NULL) {
1658 		mutex_exit(&door_knob);
1659 		return (set_errno(EINVAL));
1660 	}
1661 
1662 	ASSERT(caller->t_door != NULL);
1663 	ct = DOOR_CLIENT(caller->t_door);
1664 
1665 	/* Prevent caller from exiting while we examine the cred */
1666 	DOOR_T_HOLD(ct);
1667 	mutex_exit(&door_knob);
1668 
1669 	p = ttoproc(caller);
1670 
1671 	/*
1672 	 * If the credentials are not specified by the client, get the one
1673 	 * associated with the calling process.
1674 	 */
1675 	if (ct->d_cred == NULL) {
1676 		res = pgetucred(p);
1677 	} else {
1678 		res = cred2ucred(ct->d_cred, ct->d_upcall ?
1679 		    p0.p_pid : p->p_pid, NULL, CRED());
1680 	}
1681 
1682 	mutex_enter(&door_knob);
1683 	DOOR_T_RELEASE(ct);
1684 	mutex_exit(&door_knob);
1685 
1686 	err = copyout(res, uch, res->uc_size);
1687 
1688 	kmem_free(res, res->uc_size);
1689 
1690 	if (err != 0)
1691 		return (set_errno(EFAULT));
1692 
1693 	return (0);
1694 }
1695 
1696 /*
1697  * Bind the current lwp to the server thread pool associated with 'did'
1698  */
1699 int
1700 door_bind(int did)
1701 {
1702 	door_node_t	*dp;
1703 	door_server_t	*st;
1704 
1705 	if ((dp = door_lookup(did, NULL)) == NULL) {
1706 		/* Not a door */
1707 		return (set_errno(EBADF));
1708 	}
1709 
1710 	/*
1711 	 * Can't bind to a non-private door, and can't bind to a door
1712 	 * served by another process.
1713 	 */
1714 	if ((dp->door_flags & DOOR_PRIVATE) == 0 ||
1715 	    dp->door_target != curproc) {
1716 		releasef(did);
1717 		return (set_errno(EINVAL));
1718 	}
1719 
1720 	st = door_my_server(1);
1721 	if (st->d_pool)
1722 		door_unbind_thread(st->d_pool);
1723 	st->d_pool = dp;
1724 	st->d_invbound = 0;
1725 	door_bind_thread(dp);
1726 	releasef(did);
1727 
1728 	return (0);
1729 }
1730 
1731 /*
1732  * Unbind the current lwp from it's server thread pool
1733  */
1734 int
1735 door_unbind(void)
1736 {
1737 	door_server_t *st;
1738 
1739 	if ((st = door_my_server(0)) == NULL)
1740 		return (set_errno(EBADF));
1741 
1742 	if (st->d_invbound) {
1743 		ASSERT(st->d_pool == NULL);
1744 		st->d_invbound = 0;
1745 		return (0);
1746 	}
1747 	if (st->d_pool == NULL)
1748 		return (set_errno(EBADF));
1749 	door_unbind_thread(st->d_pool);
1750 	st->d_pool = NULL;
1751 	return (0);
1752 }
1753 
1754 /*
1755  * Create a descriptor for the associated file and fill in the
1756  * attributes associated with it.
1757  *
1758  * Return 0 for success, -1 otherwise;
1759  */
1760 int
1761 door_insert(struct file *fp, door_desc_t *dp)
1762 {
1763 	struct vnode *vp;
1764 	int	fd;
1765 	door_attr_t attributes = DOOR_DESCRIPTOR;
1766 
1767 	ASSERT(MUTEX_NOT_HELD(&door_knob));
1768 	if ((fd = ufalloc(0)) == -1)
1769 		return (-1);
1770 	setf(fd, fp);
1771 	dp->d_data.d_desc.d_descriptor = fd;
1772 
1773 	/* Fill in the attributes */
1774 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
1775 		vp = fp->f_vnode;
1776 	if (vp && vp->v_type == VDOOR) {
1777 		if (VTOD(vp)->door_target == curproc)
1778 			attributes |= DOOR_LOCAL;
1779 		attributes |= VTOD(vp)->door_flags & DOOR_ATTR_MASK;
1780 		dp->d_data.d_desc.d_id = VTOD(vp)->door_index;
1781 	}
1782 	dp->d_attributes = attributes;
1783 	return (0);
1784 }
1785 
1786 /*
1787  * Return an available thread for this server.  A NULL return value indicates
1788  * that either:
1789  *	The door has been revoked, or
1790  *	a signal was received.
1791  * The two conditions can be differentiated using DOOR_INVALID(dp).
1792  */
1793 static kthread_t *
1794 door_get_server(door_node_t *dp)
1795 {
1796 	kthread_t **ktp;
1797 	kthread_t *server_t;
1798 	door_pool_t *pool;
1799 	door_server_t *st;
1800 	int signalled;
1801 
1802 	disp_lock_t *tlp;
1803 	cpu_t *cp;
1804 
1805 	ASSERT(MUTEX_HELD(&door_knob));
1806 
1807 	if (dp->door_flags & DOOR_PRIVATE)
1808 		pool = &dp->door_servers;
1809 	else
1810 		pool = &dp->door_target->p_server_threads;
1811 
1812 	for (;;) {
1813 		/*
1814 		 * We search the thread pool, looking for a server thread
1815 		 * ready to take an invocation (i.e. one which is still
1816 		 * sleeping on a shuttle object).  If none are available,
1817 		 * we sleep on the pool's CV, and will be signaled when a
1818 		 * thread is added to the pool.
1819 		 *
1820 		 * This relies on the fact that once a thread in the thread
1821 		 * pool wakes up, it *must* remove and add itself to the pool
1822 		 * before it can receive door calls.
1823 		 */
1824 		if (DOOR_INVALID(dp))
1825 			return (NULL);	/* Target has become invalid */
1826 
1827 		for (ktp = &pool->dp_threads;
1828 		    (server_t = *ktp) != NULL;
1829 		    ktp = &st->d_servers) {
1830 			st = DOOR_SERVER(server_t->t_door);
1831 
1832 			thread_lock(server_t);
1833 			if (server_t->t_state == TS_SLEEP &&
1834 			    SOBJ_TYPE(server_t->t_sobj_ops) == SOBJ_SHUTTLE)
1835 				break;
1836 			thread_unlock(server_t);
1837 		}
1838 		if (server_t != NULL)
1839 			break;		/* we've got a live one! */
1840 
1841 		if (!cv_wait_sig_swap_core(&pool->dp_cv, &door_knob,
1842 		    &signalled)) {
1843 			/*
1844 			 * If we were signaled and the door is still
1845 			 * valid, pass the signal on to another waiter.
1846 			 */
1847 			if (signalled && !DOOR_INVALID(dp))
1848 				cv_signal(&pool->dp_cv);
1849 			return (NULL);	/* Got a signal */
1850 		}
1851 	}
1852 
1853 	/*
1854 	 * We've got a thread_lock()ed thread which is still on the
1855 	 * shuttle.  Take it off the list of available server threads
1856 	 * and mark it as ONPROC.  We are committed to resuming this
1857 	 * thread now.
1858 	 */
1859 	tlp = server_t->t_lockp;
1860 	cp = CPU;
1861 
1862 	*ktp = st->d_servers;
1863 	st->d_servers = NULL;
1864 	/*
1865 	 * Setting t_disp_queue prevents erroneous preemptions
1866 	 * if this thread is still in execution on another processor
1867 	 */
1868 	server_t->t_disp_queue = cp->cpu_disp;
1869 	CL_ACTIVE(server_t);
1870 	/*
1871 	 * We are calling thread_onproc() instead of
1872 	 * THREAD_ONPROC() because compiler can reorder
1873 	 * the two stores of t_state and t_lockp in
1874 	 * THREAD_ONPROC().
1875 	 */
1876 	thread_onproc(server_t, cp);
1877 	disp_lock_exit(tlp);
1878 	return (server_t);
1879 }
1880 
1881 /*
1882  * Put a server thread back in the pool.
1883  */
1884 static void
1885 door_release_server(door_node_t *dp, kthread_t *t)
1886 {
1887 	door_server_t *st = DOOR_SERVER(t->t_door);
1888 	door_pool_t *pool;
1889 
1890 	ASSERT(MUTEX_HELD(&door_knob));
1891 	st->d_active = NULL;
1892 	st->d_caller = NULL;
1893 	st->d_layout_done = 0;
1894 	if (dp && (dp->door_flags & DOOR_PRIVATE)) {
1895 		ASSERT(dp->door_target == NULL ||
1896 		    dp->door_target == ttoproc(t));
1897 		pool = &dp->door_servers;
1898 	} else {
1899 		pool = &ttoproc(t)->p_server_threads;
1900 	}
1901 
1902 	st->d_servers = pool->dp_threads;
1903 	pool->dp_threads = t;
1904 
1905 	/* If someone is waiting for a server thread, wake him up */
1906 	cv_signal(&pool->dp_cv);
1907 }
1908 
1909 /*
1910  * Remove a server thread from the pool if present.
1911  */
1912 static void
1913 door_server_exit(proc_t *p, kthread_t *t)
1914 {
1915 	door_pool_t *pool;
1916 	kthread_t **next;
1917 	door_server_t *st = DOOR_SERVER(t->t_door);
1918 
1919 	ASSERT(MUTEX_HELD(&door_knob));
1920 	if (st->d_pool != NULL) {
1921 		ASSERT(st->d_pool->door_flags & DOOR_PRIVATE);
1922 		pool = &st->d_pool->door_servers;
1923 	} else {
1924 		pool = &p->p_server_threads;
1925 	}
1926 
1927 	next = &pool->dp_threads;
1928 	while (*next != NULL) {
1929 		if (*next == t) {
1930 			*next = DOOR_SERVER(t->t_door)->d_servers;
1931 			return;
1932 		}
1933 		next = &(DOOR_SERVER((*next)->t_door)->d_servers);
1934 	}
1935 }
1936 
1937 /*
1938  * Lookup the door descriptor. Caller must call releasef when finished
1939  * with associated door.
1940  */
1941 static door_node_t *
1942 door_lookup(int did, file_t **fpp)
1943 {
1944 	vnode_t	*vp;
1945 	file_t *fp;
1946 
1947 	ASSERT(MUTEX_NOT_HELD(&door_knob));
1948 	if ((fp = getf(did)) == NULL)
1949 		return (NULL);
1950 	/*
1951 	 * Use the underlying vnode (we may be namefs mounted)
1952 	 */
1953 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
1954 		vp = fp->f_vnode;
1955 
1956 	if (vp == NULL || vp->v_type != VDOOR) {
1957 		releasef(did);
1958 		return (NULL);
1959 	}
1960 
1961 	if (fpp)
1962 		*fpp = fp;
1963 
1964 	return (VTOD(vp));
1965 }
1966 
1967 /*
1968  * The current thread is exiting, so clean up any pending
1969  * invocation details
1970  */
1971 void
1972 door_slam(void)
1973 {
1974 	door_node_t *dp;
1975 	door_data_t *dt;
1976 	door_client_t *ct;
1977 	door_server_t *st;
1978 
1979 	/*
1980 	 * If we are an active door server, notify our
1981 	 * client that we are exiting and revoke our door.
1982 	 */
1983 	if ((dt = door_my_data(0)) == NULL)
1984 		return;
1985 	ct = DOOR_CLIENT(dt);
1986 	st = DOOR_SERVER(dt);
1987 
1988 	mutex_enter(&door_knob);
1989 	for (;;) {
1990 		if (DOOR_T_HELD(ct))
1991 			cv_wait(&ct->d_cv, &door_knob);
1992 		else if (DOOR_T_HELD(st))
1993 			cv_wait(&st->d_cv, &door_knob);
1994 		else
1995 			break;			/* neither flag is set */
1996 	}
1997 	curthread->t_door = NULL;
1998 	if ((dp = st->d_active) != NULL) {
1999 		kthread_t *t = st->d_caller;
2000 		proc_t *p = curproc;
2001 
2002 		/* Revoke our door if the process is exiting */
2003 		if (dp->door_target == p && (p->p_flag & SEXITING)) {
2004 			door_list_delete(dp);
2005 			dp->door_target = NULL;
2006 			dp->door_flags |= DOOR_REVOKED;
2007 			if (dp->door_flags & DOOR_PRIVATE)
2008 				cv_broadcast(&dp->door_servers.dp_cv);
2009 			else
2010 				cv_broadcast(&p->p_server_threads.dp_cv);
2011 		}
2012 
2013 		if (t != NULL) {
2014 			/*
2015 			 * Let the caller know we are gone
2016 			 */
2017 			DOOR_CLIENT(t->t_door)->d_error = DOOR_EXIT;
2018 			thread_lock(t);
2019 			if (t->t_state == TS_SLEEP &&
2020 			    SOBJ_TYPE(t->t_sobj_ops) == SOBJ_SHUTTLE)
2021 				setrun_locked(t);
2022 			thread_unlock(t);
2023 		}
2024 	}
2025 	mutex_exit(&door_knob);
2026 	if (st->d_pool)
2027 		door_unbind_thread(st->d_pool);	/* Implicit door_unbind */
2028 	kmem_free(dt, sizeof (door_data_t));
2029 }
2030 
2031 /*
2032  * Set DOOR_REVOKED for all doors of the current process. This is called
2033  * on exit before all lwp's are being terminated so that door calls will
2034  * return with an error.
2035  */
2036 void
2037 door_revoke_all()
2038 {
2039 	door_node_t *dp;
2040 	proc_t *p = ttoproc(curthread);
2041 
2042 	mutex_enter(&door_knob);
2043 	for (dp = p->p_door_list; dp != NULL; dp = dp->door_list) {
2044 		ASSERT(dp->door_target == p);
2045 		dp->door_flags |= DOOR_REVOKED;
2046 		if (dp->door_flags & DOOR_PRIVATE)
2047 			cv_broadcast(&dp->door_servers.dp_cv);
2048 	}
2049 	cv_broadcast(&p->p_server_threads.dp_cv);
2050 	mutex_exit(&door_knob);
2051 }
2052 
2053 /*
2054  * The process is exiting, and all doors it created need to be revoked.
2055  */
2056 void
2057 door_exit(void)
2058 {
2059 	door_node_t *dp;
2060 	proc_t *p = ttoproc(curthread);
2061 
2062 	ASSERT(p->p_lwpcnt == 1);
2063 	/*
2064 	 * Walk the list of active doors created by this process and
2065 	 * revoke them all.
2066 	 */
2067 	mutex_enter(&door_knob);
2068 	for (dp = p->p_door_list; dp != NULL; dp = dp->door_list) {
2069 		dp->door_target = NULL;
2070 		dp->door_flags |= DOOR_REVOKED;
2071 		if (dp->door_flags & DOOR_PRIVATE)
2072 			cv_broadcast(&dp->door_servers.dp_cv);
2073 	}
2074 	cv_broadcast(&p->p_server_threads.dp_cv);
2075 	/* Clear the list */
2076 	p->p_door_list = NULL;
2077 
2078 	/* Clean up the unref list */
2079 	while ((dp = p->p_unref_list) != NULL) {
2080 		p->p_unref_list = dp->door_ulist;
2081 		dp->door_ulist = NULL;
2082 		mutex_exit(&door_knob);
2083 		VN_RELE(DTOV(dp));
2084 		mutex_enter(&door_knob);
2085 	}
2086 	mutex_exit(&door_knob);
2087 }
2088 
2089 
2090 /*
2091  * The process is executing forkall(), and we need to flag threads that
2092  * are bound to a door in the child.  This will make the child threads
2093  * return an error to door_return unless they call door_unbind first.
2094  */
2095 void
2096 door_fork(kthread_t *parent, kthread_t *child)
2097 {
2098 	door_data_t *pt = parent->t_door;
2099 	door_server_t *st = DOOR_SERVER(pt);
2100 	door_data_t *dt;
2101 
2102 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2103 	if (pt != NULL && (st->d_pool != NULL || st->d_invbound)) {
2104 		/* parent thread is bound to a door */
2105 		dt = child->t_door =
2106 		    kmem_zalloc(sizeof (door_data_t), KM_SLEEP);
2107 		DOOR_SERVER(dt)->d_invbound = 1;
2108 	}
2109 }
2110 
2111 /*
2112  * Deliver queued unrefs to appropriate door server.
2113  */
2114 static int
2115 door_unref(void)
2116 {
2117 	door_node_t	*dp;
2118 	static door_arg_t unref_args = { DOOR_UNREF_DATA, 0, 0, 0, 0, 0 };
2119 	proc_t *p = ttoproc(curthread);
2120 
2121 	/* make sure there's only one unref thread per process */
2122 	mutex_enter(&door_knob);
2123 	if (p->p_unref_thread) {
2124 		mutex_exit(&door_knob);
2125 		return (set_errno(EALREADY));
2126 	}
2127 	p->p_unref_thread = 1;
2128 	mutex_exit(&door_knob);
2129 
2130 	(void) door_my_data(1);			/* create info, if necessary */
2131 
2132 	for (;;) {
2133 		mutex_enter(&door_knob);
2134 
2135 		/* Grab a queued request */
2136 		while ((dp = p->p_unref_list) == NULL) {
2137 			if (!cv_wait_sig(&p->p_unref_cv, &door_knob)) {
2138 				/*
2139 				 * Interrupted.
2140 				 * Return so we can finish forkall() or exit().
2141 				 */
2142 				p->p_unref_thread = 0;
2143 				mutex_exit(&door_knob);
2144 				return (set_errno(EINTR));
2145 			}
2146 		}
2147 		p->p_unref_list = dp->door_ulist;
2148 		dp->door_ulist = NULL;
2149 		dp->door_flags |= DOOR_UNREF_ACTIVE;
2150 		mutex_exit(&door_knob);
2151 
2152 		(void) door_upcall(DTOV(dp), &unref_args, NULL);
2153 
2154 		mutex_enter(&door_knob);
2155 		ASSERT(dp->door_flags & DOOR_UNREF_ACTIVE);
2156 		dp->door_flags &= ~DOOR_UNREF_ACTIVE;
2157 		mutex_exit(&door_knob);
2158 		VN_RELE(DTOV(dp));
2159 	}
2160 }
2161 
2162 
2163 /*
2164  * Deliver queued unrefs to kernel door server.
2165  */
2166 /* ARGSUSED */
2167 static void
2168 door_unref_kernel(caddr_t arg)
2169 {
2170 	door_node_t	*dp;
2171 	static door_arg_t unref_args = { DOOR_UNREF_DATA, 0, 0, 0, 0, 0 };
2172 	proc_t *p = ttoproc(curthread);
2173 	callb_cpr_t cprinfo;
2174 
2175 	/* should only be one of these */
2176 	mutex_enter(&door_knob);
2177 	if (p->p_unref_thread) {
2178 		mutex_exit(&door_knob);
2179 		return;
2180 	}
2181 	p->p_unref_thread = 1;
2182 	mutex_exit(&door_knob);
2183 
2184 	(void) door_my_data(1);		/* make sure we have a door_data_t */
2185 
2186 	CALLB_CPR_INIT(&cprinfo, &door_knob, callb_generic_cpr, "door_unref");
2187 	for (;;) {
2188 		mutex_enter(&door_knob);
2189 		/* Grab a queued request */
2190 		while ((dp = p->p_unref_list) == NULL) {
2191 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
2192 			cv_wait(&p->p_unref_cv, &door_knob);
2193 			CALLB_CPR_SAFE_END(&cprinfo, &door_knob);
2194 		}
2195 		p->p_unref_list = dp->door_ulist;
2196 		dp->door_ulist = NULL;
2197 		dp->door_flags |= DOOR_UNREF_ACTIVE;
2198 		mutex_exit(&door_knob);
2199 
2200 		(*(dp->door_pc))(dp->door_data, &unref_args, NULL, NULL, NULL);
2201 
2202 		mutex_enter(&door_knob);
2203 		ASSERT(dp->door_flags & DOOR_UNREF_ACTIVE);
2204 		dp->door_flags &= ~DOOR_UNREF_ACTIVE;
2205 		mutex_exit(&door_knob);
2206 		VN_RELE(DTOV(dp));
2207 	}
2208 }
2209 
2210 
2211 /*
2212  * Queue an unref invocation for processing for the current process
2213  * The door may or may not be revoked at this point.
2214  */
2215 void
2216 door_deliver_unref(door_node_t *d)
2217 {
2218 	struct proc *server = d->door_target;
2219 
2220 	ASSERT(MUTEX_HELD(&door_knob));
2221 	ASSERT(d->door_active == 0);
2222 
2223 	if (server == NULL)
2224 		return;
2225 	/*
2226 	 * Create a lwp to deliver unref calls if one isn't already running.
2227 	 *
2228 	 * A separate thread is used to deliver unrefs since the current
2229 	 * thread may be holding resources (e.g. locks) in user land that
2230 	 * may be needed by the unref processing. This would cause a
2231 	 * deadlock.
2232 	 */
2233 	if (d->door_flags & DOOR_UNREF_MULTI) {
2234 		/* multiple unrefs */
2235 		d->door_flags &= ~DOOR_DELAY;
2236 	} else {
2237 		/* Only 1 unref per door */
2238 		d->door_flags &= ~(DOOR_UNREF|DOOR_DELAY);
2239 	}
2240 	mutex_exit(&door_knob);
2241 
2242 	/*
2243 	 * Need to bump the vnode count before putting the door on the
2244 	 * list so it doesn't get prematurely released by door_unref.
2245 	 */
2246 	VN_HOLD(DTOV(d));
2247 
2248 	mutex_enter(&door_knob);
2249 	/* is this door already on the unref list? */
2250 	if (d->door_flags & DOOR_UNREF_MULTI) {
2251 		door_node_t *dp;
2252 		for (dp = server->p_unref_list; dp != NULL;
2253 		    dp = dp->door_ulist) {
2254 			if (d == dp) {
2255 				/* already there, don't need to add another */
2256 				mutex_exit(&door_knob);
2257 				VN_RELE(DTOV(d));
2258 				mutex_enter(&door_knob);
2259 				return;
2260 			}
2261 		}
2262 	}
2263 	ASSERT(d->door_ulist == NULL);
2264 	d->door_ulist = server->p_unref_list;
2265 	server->p_unref_list = d;
2266 	cv_broadcast(&server->p_unref_cv);
2267 }
2268 
2269 /*
2270  * The callers buffer isn't big enough for all of the data/fd's. Allocate
2271  * space in the callers address space for the results and copy the data
2272  * there.
2273  *
2274  * For EOVERFLOW, we must clean up the server's door descriptors.
2275  */
2276 static int
2277 door_overflow(
2278 	kthread_t	*caller,
2279 	caddr_t		data_ptr,	/* data location */
2280 	size_t		data_size,	/* data size */
2281 	door_desc_t	*desc_ptr,	/* descriptor location */
2282 	uint_t		desc_num)	/* descriptor size */
2283 {
2284 	proc_t *callerp = ttoproc(caller);
2285 	struct as *as = callerp->p_as;
2286 	door_client_t *ct = DOOR_CLIENT(caller->t_door);
2287 	caddr_t	addr;			/* Resulting address in target */
2288 	size_t	rlen;			/* Rounded len */
2289 	size_t	len;
2290 	uint_t	i;
2291 	size_t	ds = desc_num * sizeof (door_desc_t);
2292 
2293 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2294 	ASSERT(DOOR_T_HELD(ct) || ct->d_kernel);
2295 
2296 	/* Do initial overflow check */
2297 	if (!ufcanalloc(callerp, desc_num))
2298 		return (EMFILE);
2299 
2300 	/*
2301 	 * Allocate space for this stuff in the callers address space
2302 	 */
2303 	rlen = roundup(data_size + ds, PAGESIZE);
2304 	as_rangelock(as);
2305 	map_addr_proc(&addr, rlen, 0, 1, as->a_userlimit, ttoproc(caller), 0);
2306 	if (addr == NULL ||
2307 	    as_map(as, addr, rlen, segvn_create, zfod_argsp) != 0) {
2308 		/* No virtual memory available, or anon mapping failed */
2309 		as_rangeunlock(as);
2310 		if (!ct->d_kernel && desc_num > 0) {
2311 			int error = door_release_fds(desc_ptr, desc_num);
2312 			if (error)
2313 				return (error);
2314 		}
2315 		return (EOVERFLOW);
2316 	}
2317 	as_rangeunlock(as);
2318 
2319 	if (ct->d_kernel)
2320 		goto out;
2321 
2322 	if (data_size != 0) {
2323 		caddr_t	src = data_ptr;
2324 		caddr_t saddr = addr;
2325 
2326 		/* Copy any data */
2327 		len = data_size;
2328 		while (len != 0) {
2329 			int	amount;
2330 			int	error;
2331 
2332 			amount = len > PAGESIZE ? PAGESIZE : len;
2333 			if ((error = door_copy(as, src, saddr, amount)) != 0) {
2334 				(void) as_unmap(as, addr, rlen);
2335 				return (error);
2336 			}
2337 			saddr += amount;
2338 			src += amount;
2339 			len -= amount;
2340 		}
2341 	}
2342 	/* Copy any fd's */
2343 	if (desc_num != 0) {
2344 		door_desc_t	*didpp, *start;
2345 		struct file	**fpp;
2346 		int		fpp_size;
2347 
2348 		start = didpp = kmem_alloc(ds, KM_SLEEP);
2349 		if (copyin_nowatch(desc_ptr, didpp, ds)) {
2350 			kmem_free(start, ds);
2351 			(void) as_unmap(as, addr, rlen);
2352 			return (EFAULT);
2353 		}
2354 
2355 		fpp_size = desc_num * sizeof (struct file *);
2356 		if (fpp_size > ct->d_fpp_size) {
2357 			/* make more space */
2358 			if (ct->d_fpp_size)
2359 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2360 			ct->d_fpp_size = fpp_size;
2361 			ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2362 		}
2363 		fpp = ct->d_fpp;
2364 
2365 		for (i = 0; i < desc_num; i++) {
2366 			struct file *fp;
2367 			int fd = didpp->d_data.d_desc.d_descriptor;
2368 
2369 			if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2370 			    (fp = getf(fd)) == NULL) {
2371 				/* close translated references */
2372 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2373 				/* close untranslated references */
2374 				door_fd_rele(didpp, desc_num - i, 0);
2375 				kmem_free(start, ds);
2376 				(void) as_unmap(as, addr, rlen);
2377 				return (EINVAL);
2378 			}
2379 			mutex_enter(&fp->f_tlock);
2380 			fp->f_count++;
2381 			mutex_exit(&fp->f_tlock);
2382 
2383 			*fpp = fp;
2384 			releasef(fd);
2385 
2386 			if (didpp->d_attributes & DOOR_RELEASE) {
2387 				/* release passed reference */
2388 				(void) closeandsetf(fd, NULL);
2389 			}
2390 
2391 			fpp++; didpp++;
2392 		}
2393 		kmem_free(start, ds);
2394 	}
2395 
2396 out:
2397 	ct->d_overflow = 1;
2398 	ct->d_args.rbuf = addr;
2399 	ct->d_args.rsize = rlen;
2400 	return (0);
2401 }
2402 
2403 /*
2404  * Transfer arguments from the client to the server.
2405  */
2406 static int
2407 door_args(kthread_t *server, int is_private)
2408 {
2409 	door_server_t *st = DOOR_SERVER(server->t_door);
2410 	door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2411 	uint_t	ndid;
2412 	size_t	dsize;
2413 	int	error;
2414 
2415 	ASSERT(DOOR_T_HELD(st));
2416 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2417 
2418 	ndid = ct->d_args.desc_num;
2419 	if (ndid > door_max_desc)
2420 		return (E2BIG);
2421 
2422 	/*
2423 	 * Get the stack layout, and fail now if it won't fit.
2424 	 */
2425 	error = door_layout(server, ct->d_args.data_size, ndid, is_private);
2426 	if (error != 0)
2427 		return (error);
2428 
2429 	dsize = ndid * sizeof (door_desc_t);
2430 	if (ct->d_args.data_size != 0) {
2431 		if (ct->d_args.data_size <= door_max_arg) {
2432 			/*
2433 			 * Use a 2 copy method for small amounts of data
2434 			 *
2435 			 * Allocate a little more than we need for the
2436 			 * args, in the hope that the results will fit
2437 			 * without having to reallocate a buffer
2438 			 */
2439 			ASSERT(ct->d_buf == NULL);
2440 			ct->d_bufsize = roundup(ct->d_args.data_size,
2441 			    DOOR_ROUND);
2442 			ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2443 			if (copyin_nowatch(ct->d_args.data_ptr,
2444 			    ct->d_buf, ct->d_args.data_size) != 0) {
2445 				kmem_free(ct->d_buf, ct->d_bufsize);
2446 				ct->d_buf = NULL;
2447 				ct->d_bufsize = 0;
2448 				return (EFAULT);
2449 			}
2450 		} else {
2451 			struct as	*as;
2452 			caddr_t		src;
2453 			caddr_t		dest;
2454 			size_t		len = ct->d_args.data_size;
2455 			uintptr_t	base;
2456 
2457 			/*
2458 			 * Use a 1 copy method
2459 			 */
2460 			as = ttoproc(server)->p_as;
2461 			src = ct->d_args.data_ptr;
2462 
2463 			dest = st->d_layout.dl_datap;
2464 			base = (uintptr_t)dest;
2465 
2466 			/*
2467 			 * Copy data directly into server.  We proceed
2468 			 * downward from the top of the stack, to mimic
2469 			 * normal stack usage. This allows the guard page
2470 			 * to stop us before we corrupt anything.
2471 			 */
2472 			while (len != 0) {
2473 				uintptr_t start;
2474 				uintptr_t end;
2475 				uintptr_t offset;
2476 				size_t	amount;
2477 
2478 				/*
2479 				 * Locate the next part to copy.
2480 				 */
2481 				end = base + len;
2482 				start = P2ALIGN(end - 1, PAGESIZE);
2483 
2484 				/*
2485 				 * if we are on the final (first) page, fix
2486 				 * up the start position.
2487 				 */
2488 				if (P2ALIGN(base, PAGESIZE) == start)
2489 					start = base;
2490 
2491 				offset = start - base;	/* the copy offset */
2492 				amount = end - start;	/* # bytes to copy */
2493 
2494 				ASSERT(amount > 0 && amount <= len &&
2495 				    amount <= PAGESIZE);
2496 
2497 				error = door_copy(as, src + offset,
2498 				    dest + offset, amount);
2499 				if (error != 0)
2500 					return (error);
2501 				len -= amount;
2502 			}
2503 		}
2504 	}
2505 	/*
2506 	 * Copyin the door args and translate them into files
2507 	 */
2508 	if (ndid != 0) {
2509 		door_desc_t	*didpp;
2510 		door_desc_t	*start;
2511 		struct file	**fpp;
2512 
2513 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
2514 
2515 		if (copyin_nowatch(ct->d_args.desc_ptr, didpp, dsize)) {
2516 			kmem_free(start, dsize);
2517 			return (EFAULT);
2518 		}
2519 		ct->d_fpp_size = ndid * sizeof (struct file *);
2520 		ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2521 		fpp = ct->d_fpp;
2522 		while (ndid--) {
2523 			struct file *fp;
2524 			int fd = didpp->d_data.d_desc.d_descriptor;
2525 
2526 			/* We only understand file descriptors as passed objs */
2527 			if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2528 			    (fp = getf(fd)) == NULL) {
2529 				/* close translated references */
2530 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2531 				/* close untranslated references */
2532 				door_fd_rele(didpp, ndid + 1, 0);
2533 				kmem_free(start, dsize);
2534 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2535 				ct->d_fpp = NULL;
2536 				ct->d_fpp_size = 0;
2537 				return (EINVAL);
2538 			}
2539 			/* Hold the fp */
2540 			mutex_enter(&fp->f_tlock);
2541 			fp->f_count++;
2542 			mutex_exit(&fp->f_tlock);
2543 
2544 			*fpp = fp;
2545 			releasef(fd);
2546 
2547 			if (didpp->d_attributes & DOOR_RELEASE) {
2548 				/* release passed reference */
2549 				(void) closeandsetf(fd, NULL);
2550 			}
2551 
2552 			fpp++; didpp++;
2553 		}
2554 		kmem_free(start, dsize);
2555 	}
2556 	return (0);
2557 }
2558 
2559 /*
2560  * Transfer arguments from a user client to a kernel server.  This copies in
2561  * descriptors and translates them into door handles.  It doesn't touch the
2562  * other data, letting the kernel server deal with that (to avoid needing
2563  * to copy the data twice).
2564  */
2565 static int
2566 door_translate_in(void)
2567 {
2568 	door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2569 	uint_t	ndid;
2570 
2571 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2572 	ndid = ct->d_args.desc_num;
2573 	if (ndid > door_max_desc)
2574 		return (E2BIG);
2575 	/*
2576 	 * Copyin the door args and translate them into door handles.
2577 	 */
2578 	if (ndid != 0) {
2579 		door_desc_t	*didpp;
2580 		door_desc_t	*start;
2581 		size_t		dsize = ndid * sizeof (door_desc_t);
2582 		struct file	*fp;
2583 
2584 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
2585 
2586 		if (copyin_nowatch(ct->d_args.desc_ptr, didpp, dsize)) {
2587 			kmem_free(start, dsize);
2588 			return (EFAULT);
2589 		}
2590 		while (ndid--) {
2591 			vnode_t	*vp;
2592 			int fd = didpp->d_data.d_desc.d_descriptor;
2593 
2594 			/*
2595 			 * We only understand file descriptors as passed objs
2596 			 */
2597 			if ((didpp->d_attributes & DOOR_DESCRIPTOR) &&
2598 			    (fp = getf(fd)) != NULL) {
2599 				didpp->d_data.d_handle = FTODH(fp);
2600 				/* Hold the door */
2601 				door_ki_hold(didpp->d_data.d_handle);
2602 
2603 				releasef(fd);
2604 
2605 				if (didpp->d_attributes & DOOR_RELEASE) {
2606 					/* release passed reference */
2607 					(void) closeandsetf(fd, NULL);
2608 				}
2609 
2610 				if (VOP_REALVP(fp->f_vnode, &vp, NULL))
2611 					vp = fp->f_vnode;
2612 
2613 				/* Set attributes */
2614 				didpp->d_attributes = DOOR_HANDLE |
2615 				    (VTOD(vp)->door_flags & DOOR_ATTR_MASK);
2616 			} else {
2617 				/* close translated references */
2618 				door_fd_close(start, didpp - start);
2619 				/* close untranslated references */
2620 				door_fd_rele(didpp, ndid + 1, 0);
2621 				kmem_free(start, dsize);
2622 				return (EINVAL);
2623 			}
2624 			didpp++;
2625 		}
2626 		ct->d_args.desc_ptr = start;
2627 	}
2628 	return (0);
2629 }
2630 
2631 /*
2632  * Translate door arguments from kernel to user.  This copies the passed
2633  * door handles.  It doesn't touch other data.  It is used by door_upcall,
2634  * and for data returned by a door_call to a kernel server.
2635  */
2636 static int
2637 door_translate_out(void)
2638 {
2639 	door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2640 	uint_t	ndid;
2641 
2642 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2643 	ndid = ct->d_args.desc_num;
2644 	if (ndid > door_max_desc) {
2645 		door_fd_rele(ct->d_args.desc_ptr, ndid, 1);
2646 		return (E2BIG);
2647 	}
2648 	/*
2649 	 * Translate the door args into files
2650 	 */
2651 	if (ndid != 0) {
2652 		door_desc_t	*didpp = ct->d_args.desc_ptr;
2653 		struct file	**fpp;
2654 
2655 		ct->d_fpp_size = ndid * sizeof (struct file *);
2656 		fpp = ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2657 		while (ndid--) {
2658 			struct file *fp = NULL;
2659 			int fd = -1;
2660 
2661 			/*
2662 			 * We understand file descriptors and door
2663 			 * handles as passed objs.
2664 			 */
2665 			if (didpp->d_attributes & DOOR_DESCRIPTOR) {
2666 				fd = didpp->d_data.d_desc.d_descriptor;
2667 				fp = getf(fd);
2668 			} else if (didpp->d_attributes & DOOR_HANDLE)
2669 				fp = DHTOF(didpp->d_data.d_handle);
2670 			if (fp != NULL) {
2671 				/* Hold the fp */
2672 				mutex_enter(&fp->f_tlock);
2673 				fp->f_count++;
2674 				mutex_exit(&fp->f_tlock);
2675 
2676 				*fpp = fp;
2677 				if (didpp->d_attributes & DOOR_DESCRIPTOR)
2678 					releasef(fd);
2679 				if (didpp->d_attributes & DOOR_RELEASE) {
2680 					/* release passed reference */
2681 					if (fd >= 0)
2682 						(void) closeandsetf(fd, NULL);
2683 					else
2684 						(void) closef(fp);
2685 				}
2686 			} else {
2687 				/* close translated references */
2688 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2689 				/* close untranslated references */
2690 				door_fd_rele(didpp, ndid + 1, 1);
2691 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2692 				ct->d_fpp = NULL;
2693 				ct->d_fpp_size = 0;
2694 				return (EINVAL);
2695 			}
2696 			fpp++; didpp++;
2697 		}
2698 	}
2699 	return (0);
2700 }
2701 
2702 /*
2703  * Move the results from the server to the client
2704  */
2705 static int
2706 door_results(kthread_t *caller, caddr_t data_ptr, size_t data_size,
2707 		door_desc_t *desc_ptr, uint_t desc_num)
2708 {
2709 	door_client_t	*ct = DOOR_CLIENT(caller->t_door);
2710 	size_t		dsize;
2711 	size_t		rlen;
2712 	size_t		result_size;
2713 
2714 	ASSERT(DOOR_T_HELD(ct));
2715 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2716 
2717 	if (ct->d_noresults)
2718 		return (E2BIG);		/* No results expected */
2719 
2720 	if (desc_num > door_max_desc)
2721 		return (E2BIG);		/* Too many descriptors */
2722 
2723 	dsize = desc_num * sizeof (door_desc_t);
2724 	/*
2725 	 * Check if the results are bigger than the clients buffer
2726 	 */
2727 	if (dsize)
2728 		rlen = roundup(data_size, sizeof (door_desc_t));
2729 	else
2730 		rlen = data_size;
2731 	if ((result_size = rlen + dsize) == 0)
2732 		return (0);
2733 
2734 	if (ct->d_upcall) {
2735 		/*
2736 		 * Handle upcalls
2737 		 */
2738 		if (ct->d_args.rbuf == NULL || ct->d_args.rsize < result_size) {
2739 			/*
2740 			 * If there's no return buffer or the buffer is too
2741 			 * small, allocate a new one.  The old buffer (if it
2742 			 * exists) will be freed by the upcall client.
2743 			 */
2744 			if (result_size > door_max_upcall_reply)
2745 				return (E2BIG);
2746 			ct->d_args.rsize = result_size;
2747 			ct->d_args.rbuf = kmem_alloc(result_size, KM_SLEEP);
2748 		}
2749 		ct->d_args.data_ptr = ct->d_args.rbuf;
2750 		if (data_size != 0 &&
2751 		    copyin_nowatch(data_ptr, ct->d_args.data_ptr,
2752 		    data_size) != 0)
2753 			return (EFAULT);
2754 	} else if (result_size > ct->d_args.rsize) {
2755 		return (door_overflow(caller, data_ptr, data_size,
2756 		    desc_ptr, desc_num));
2757 	} else if (data_size != 0) {
2758 		if (data_size <= door_max_arg) {
2759 			/*
2760 			 * Use a 2 copy method for small amounts of data
2761 			 */
2762 			if (ct->d_buf == NULL) {
2763 				ct->d_bufsize = data_size;
2764 				ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2765 			} else if (ct->d_bufsize < data_size) {
2766 				kmem_free(ct->d_buf, ct->d_bufsize);
2767 				ct->d_bufsize = data_size;
2768 				ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2769 			}
2770 			if (copyin_nowatch(data_ptr, ct->d_buf, data_size) != 0)
2771 				return (EFAULT);
2772 		} else {
2773 			struct as *as = ttoproc(caller)->p_as;
2774 			caddr_t	dest = ct->d_args.rbuf;
2775 			caddr_t	src = data_ptr;
2776 			size_t	len = data_size;
2777 
2778 			/* Copy data directly into client */
2779 			while (len != 0) {
2780 				uint_t	amount;
2781 				uint_t	max;
2782 				uint_t	off;
2783 				int	error;
2784 
2785 				off = (uintptr_t)dest & PAGEOFFSET;
2786 				if (off)
2787 					max = PAGESIZE - off;
2788 				else
2789 					max = PAGESIZE;
2790 				amount = len > max ? max : len;
2791 				error = door_copy(as, src, dest, amount);
2792 				if (error != 0)
2793 					return (error);
2794 				dest += amount;
2795 				src += amount;
2796 				len -= amount;
2797 			}
2798 		}
2799 	}
2800 
2801 	/*
2802 	 * Copyin the returned door ids and translate them into door_node_t
2803 	 */
2804 	if (desc_num != 0) {
2805 		door_desc_t *start;
2806 		door_desc_t *didpp;
2807 		struct file **fpp;
2808 		size_t	fpp_size;
2809 		uint_t	i;
2810 
2811 		/* First, check if we would overflow client */
2812 		if (!ufcanalloc(ttoproc(caller), desc_num))
2813 			return (EMFILE);
2814 
2815 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
2816 		if (copyin_nowatch(desc_ptr, didpp, dsize)) {
2817 			kmem_free(start, dsize);
2818 			return (EFAULT);
2819 		}
2820 		fpp_size = desc_num * sizeof (struct file *);
2821 		if (fpp_size > ct->d_fpp_size) {
2822 			/* make more space */
2823 			if (ct->d_fpp_size)
2824 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2825 			ct->d_fpp_size = fpp_size;
2826 			ct->d_fpp = kmem_alloc(fpp_size, KM_SLEEP);
2827 		}
2828 		fpp = ct->d_fpp;
2829 
2830 		for (i = 0; i < desc_num; i++) {
2831 			struct file *fp;
2832 			int fd = didpp->d_data.d_desc.d_descriptor;
2833 
2834 			/* Only understand file descriptor results */
2835 			if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2836 			    (fp = getf(fd)) == NULL) {
2837 				/* close translated references */
2838 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2839 				/* close untranslated references */
2840 				door_fd_rele(didpp, desc_num - i, 0);
2841 				kmem_free(start, dsize);
2842 				return (EINVAL);
2843 			}
2844 
2845 			mutex_enter(&fp->f_tlock);
2846 			fp->f_count++;
2847 			mutex_exit(&fp->f_tlock);
2848 
2849 			*fpp = fp;
2850 			releasef(fd);
2851 
2852 			if (didpp->d_attributes & DOOR_RELEASE) {
2853 				/* release passed reference */
2854 				(void) closeandsetf(fd, NULL);
2855 			}
2856 
2857 			fpp++; didpp++;
2858 		}
2859 		kmem_free(start, dsize);
2860 	}
2861 	return (0);
2862 }
2863 
2864 /*
2865  * Close all the descriptors.
2866  */
2867 static void
2868 door_fd_close(door_desc_t *d, uint_t n)
2869 {
2870 	uint_t	i;
2871 
2872 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2873 	for (i = 0; i < n; i++) {
2874 		if (d->d_attributes & DOOR_DESCRIPTOR) {
2875 			(void) closeandsetf(
2876 			    d->d_data.d_desc.d_descriptor, NULL);
2877 		} else if (d->d_attributes & DOOR_HANDLE) {
2878 			door_ki_rele(d->d_data.d_handle);
2879 		}
2880 		d++;
2881 	}
2882 }
2883 
2884 /*
2885  * Close descriptors that have the DOOR_RELEASE attribute set.
2886  */
2887 void
2888 door_fd_rele(door_desc_t *d, uint_t n, int from_kernel)
2889 {
2890 	uint_t	i;
2891 
2892 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2893 	for (i = 0; i < n; i++) {
2894 		if (d->d_attributes & DOOR_RELEASE) {
2895 			if (d->d_attributes & DOOR_DESCRIPTOR) {
2896 				(void) closeandsetf(
2897 				    d->d_data.d_desc.d_descriptor, NULL);
2898 			} else if (from_kernel &&
2899 			    (d->d_attributes & DOOR_HANDLE)) {
2900 				door_ki_rele(d->d_data.d_handle);
2901 			}
2902 		}
2903 		d++;
2904 	}
2905 }
2906 
2907 /*
2908  * Copy descriptors into the kernel so we can release any marked
2909  * DOOR_RELEASE.
2910  */
2911 int
2912 door_release_fds(door_desc_t *desc_ptr, uint_t ndesc)
2913 {
2914 	size_t dsize;
2915 	door_desc_t *didpp;
2916 	uint_t desc_num;
2917 
2918 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2919 	ASSERT(ndesc != 0);
2920 
2921 	desc_num = MIN(ndesc, door_max_desc);
2922 
2923 	dsize = desc_num * sizeof (door_desc_t);
2924 	didpp = kmem_alloc(dsize, KM_SLEEP);
2925 
2926 	while (ndesc > 0) {
2927 		uint_t count = MIN(ndesc, desc_num);
2928 
2929 		if (copyin_nowatch(desc_ptr, didpp,
2930 		    count * sizeof (door_desc_t))) {
2931 			kmem_free(didpp, dsize);
2932 			return (EFAULT);
2933 		}
2934 		door_fd_rele(didpp, count, 0);
2935 
2936 		ndesc -= count;
2937 		desc_ptr += count;
2938 	}
2939 	kmem_free(didpp, dsize);
2940 	return (0);
2941 }
2942 
2943 /*
2944  * Decrement ref count on all the files passed
2945  */
2946 static void
2947 door_fp_close(struct file **fp, uint_t n)
2948 {
2949 	uint_t	i;
2950 
2951 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2952 
2953 	for (i = 0; i < n; i++)
2954 		(void) closef(fp[i]);
2955 }
2956 
2957 /*
2958  * Copy data from 'src' in current address space to 'dest' in 'as' for 'len'
2959  * bytes.
2960  *
2961  * Performs this using 1 mapin and 1 copy operation.
2962  *
2963  * We really should do more than 1 page at a time to improve
2964  * performance, but for now this is treated as an anomalous condition.
2965  */
2966 static int
2967 door_copy(struct as *as, caddr_t src, caddr_t dest, uint_t len)
2968 {
2969 	caddr_t	kaddr;
2970 	caddr_t	rdest;
2971 	uint_t	off;
2972 	page_t	**pplist;
2973 	page_t	*pp = NULL;
2974 	int	error = 0;
2975 
2976 	ASSERT(len <= PAGESIZE);
2977 	off = (uintptr_t)dest & PAGEOFFSET;	/* offset within the page */
2978 	rdest = (caddr_t)((uintptr_t)dest &
2979 	    (uintptr_t)PAGEMASK);	/* Page boundary */
2980 	ASSERT(off + len <= PAGESIZE);
2981 
2982 	/*
2983 	 * Lock down destination page.
2984 	 */
2985 	if (as_pagelock(as, &pplist, rdest, PAGESIZE, S_WRITE))
2986 		return (E2BIG);
2987 	/*
2988 	 * Check if we have a shadow page list from as_pagelock. If not,
2989 	 * we took the slow path and have to find our page struct the hard
2990 	 * way.
2991 	 */
2992 	if (pplist == NULL) {
2993 		pfn_t	pfnum;
2994 
2995 		/* MMU mapping is already locked down */
2996 		AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
2997 		pfnum = hat_getpfnum(as->a_hat, rdest);
2998 		AS_LOCK_EXIT(as, &as->a_lock);
2999 
3000 		/*
3001 		 * TODO: The pfn step should not be necessary - need
3002 		 * a hat_getpp() function.
3003 		 */
3004 		if (pf_is_memory(pfnum)) {
3005 			pp = page_numtopp_nolock(pfnum);
3006 			ASSERT(pp == NULL || PAGE_LOCKED(pp));
3007 		} else
3008 			pp = NULL;
3009 		if (pp == NULL) {
3010 			as_pageunlock(as, pplist, rdest, PAGESIZE, S_WRITE);
3011 			return (E2BIG);
3012 		}
3013 	} else {
3014 		pp = *pplist;
3015 	}
3016 	/*
3017 	 * Map destination page into kernel address
3018 	 */
3019 	kaddr = (caddr_t)ppmapin(pp, PROT_READ | PROT_WRITE, (caddr_t)-1);
3020 
3021 	/*
3022 	 * Copy from src to dest
3023 	 */
3024 	if (copyin_nowatch(src, kaddr + off, len) != 0)
3025 		error = EFAULT;
3026 	/*
3027 	 * Unmap destination page from kernel
3028 	 */
3029 	ppmapout(kaddr);
3030 	/*
3031 	 * Unlock destination page
3032 	 */
3033 	as_pageunlock(as, pplist, rdest, PAGESIZE, S_WRITE);
3034 	return (error);
3035 }
3036 
3037 /*
3038  * General kernel upcall using doors
3039  *	Returns 0 on success, errno for failures.
3040  *	Caller must have a hold on the door based vnode, and on any
3041  *	references passed in desc_ptr.  The references are released
3042  *	in the event of an error, and passed without duplication
3043  *	otherwise.  Note that param->rbuf must be 64-bit aligned in
3044  *	a 64-bit kernel, since it may be used to store door descriptors
3045  *	if they are returned by the server.  The caller is responsible
3046  *	for holding a reference to the cred passed in.
3047  */
3048 int
3049 door_upcall(vnode_t *vp, door_arg_t *param, struct cred *cred)
3050 {
3051 	/* Locals */
3052 	door_node_t	*dp;
3053 	kthread_t	*server_thread;
3054 	int		error = 0;
3055 	klwp_t		*lwp;
3056 	door_client_t	*ct;		/* curthread door_data */
3057 	door_server_t	*st;		/* server thread door_data */
3058 	int		gotresults = 0;
3059 	int		cancel_pending;
3060 
3061 	if (vp->v_type != VDOOR) {
3062 		if (param->desc_num)
3063 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3064 		return (EINVAL);
3065 	}
3066 
3067 	lwp = ttolwp(curthread);
3068 	ct = door_my_client(1);
3069 	dp = VTOD(vp);	/* Convert to a door_node_t */
3070 
3071 	mutex_enter(&door_knob);
3072 	if (DOOR_INVALID(dp)) {
3073 		mutex_exit(&door_knob);
3074 		if (param->desc_num)
3075 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3076 		error = EBADF;
3077 		goto out;
3078 	}
3079 
3080 	if (dp->door_target == &p0) {
3081 		/* Can't do an upcall to a kernel server */
3082 		mutex_exit(&door_knob);
3083 		if (param->desc_num)
3084 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3085 		error = EINVAL;
3086 		goto out;
3087 	}
3088 
3089 	error = door_check_limits(dp, param, 1);
3090 	if (error != 0) {
3091 		mutex_exit(&door_knob);
3092 		if (param->desc_num)
3093 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3094 		goto out;
3095 	}
3096 
3097 	/*
3098 	 * Get a server thread from the target domain
3099 	 */
3100 	if ((server_thread = door_get_server(dp)) == NULL) {
3101 		if (DOOR_INVALID(dp))
3102 			error = EBADF;
3103 		else
3104 			error = EAGAIN;
3105 		mutex_exit(&door_knob);
3106 		if (param->desc_num)
3107 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3108 		goto out;
3109 	}
3110 
3111 	st = DOOR_SERVER(server_thread->t_door);
3112 	ct->d_buf = param->data_ptr;
3113 	ct->d_bufsize = param->data_size;
3114 	ct->d_args = *param;	/* structure assignment */
3115 
3116 	if (ct->d_args.desc_num) {
3117 		/*
3118 		 * Move data from client to server
3119 		 */
3120 		DOOR_T_HOLD(st);
3121 		mutex_exit(&door_knob);
3122 		error = door_translate_out();
3123 		mutex_enter(&door_knob);
3124 		DOOR_T_RELEASE(st);
3125 		if (error) {
3126 			/*
3127 			 * We're not going to resume this thread after all
3128 			 */
3129 			door_release_server(dp, server_thread);
3130 			shuttle_sleep(server_thread);
3131 			mutex_exit(&door_knob);
3132 			goto out;
3133 		}
3134 	}
3135 
3136 	ct->d_upcall = 1;
3137 	ct->d_cred = cred;
3138 	if (param->rsize == 0)
3139 		ct->d_noresults = 1;
3140 	else
3141 		ct->d_noresults = 0;
3142 
3143 	dp->door_active++;
3144 
3145 	ct->d_error = DOOR_WAIT;
3146 	st->d_caller = curthread;
3147 	st->d_active = dp;
3148 
3149 	shuttle_resume(server_thread, &door_knob);
3150 
3151 	mutex_enter(&door_knob);
3152 shuttle_return:
3153 	if ((error = ct->d_error) < 0) {	/* DOOR_WAIT or DOOR_EXIT */
3154 		/*
3155 		 * Premature wakeup. Find out why (stop, forkall, sig, exit ...)
3156 		 */
3157 		mutex_exit(&door_knob);		/* May block in ISSIG */
3158 		cancel_pending = 0;
3159 		if (lwp && (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort ||
3160 		    MUSTRETURN(curproc, curthread) ||
3161 		    (cancel_pending = schedctl_cancel_pending()) != 0)) {
3162 			/* Signal, forkall, ... */
3163 			if (cancel_pending)
3164 				schedctl_cancel_eintr();
3165 			lwp->lwp_sysabort = 0;
3166 			mutex_enter(&door_knob);
3167 			error = EINTR;
3168 			/*
3169 			 * If the server has finished processing our call,
3170 			 * or exited (calling door_slam()), then d_error
3171 			 * will have changed.  If the server hasn't finished
3172 			 * yet, d_error will still be DOOR_WAIT, and we
3173 			 * let it know we are not interested in any
3174 			 * results by sending a SIGCANCEL, unless the door
3175 			 * is marked with DOOR_NO_CANCEL.
3176 			 */
3177 			if (ct->d_error == DOOR_WAIT &&
3178 			    st->d_caller == curthread) {
3179 				proc_t	*p = ttoproc(server_thread);
3180 
3181 				st->d_active = NULL;
3182 				st->d_caller = NULL;
3183 				if (!(dp->door_flags & DOOR_NO_CANCEL)) {
3184 					DOOR_T_HOLD(st);
3185 					mutex_exit(&door_knob);
3186 
3187 					mutex_enter(&p->p_lock);
3188 					sigtoproc(p, server_thread, SIGCANCEL);
3189 					mutex_exit(&p->p_lock);
3190 
3191 					mutex_enter(&door_knob);
3192 					DOOR_T_RELEASE(st);
3193 				}
3194 			}
3195 		} else {
3196 			/*
3197 			 * Return from stop(), server exit...
3198 			 *
3199 			 * Note that the server could have done a
3200 			 * door_return while the client was in stop state
3201 			 * (ISSIG), in which case the error condition
3202 			 * is updated by the server.
3203 			 */
3204 			mutex_enter(&door_knob);
3205 			if (ct->d_error == DOOR_WAIT) {
3206 				/* Still waiting for a reply */
3207 				shuttle_swtch(&door_knob);
3208 				mutex_enter(&door_knob);
3209 				if (lwp)
3210 					lwp->lwp_asleep = 0;
3211 				goto	shuttle_return;
3212 			} else if (ct->d_error == DOOR_EXIT) {
3213 				/* Server exit */
3214 				error = EINTR;
3215 			} else {
3216 				/* Server did a door_return during ISSIG */
3217 				error = ct->d_error;
3218 			}
3219 		}
3220 		/*
3221 		 * Can't exit if the server is currently copying
3222 		 * results for me
3223 		 */
3224 		while (DOOR_T_HELD(ct))
3225 			cv_wait(&ct->d_cv, &door_knob);
3226 
3227 		/*
3228 		 * Find out if results were successfully copied.
3229 		 */
3230 		if (ct->d_error == 0)
3231 			gotresults = 1;
3232 	}
3233 	if (lwp) {
3234 		lwp->lwp_asleep = 0;		/* /proc */
3235 		lwp->lwp_sysabort = 0;		/* /proc */
3236 	}
3237 	if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
3238 		door_deliver_unref(dp);
3239 	mutex_exit(&door_knob);
3240 
3241 	/*
3242 	 * Translate returned doors (if any)
3243 	 */
3244 
3245 	if (ct->d_noresults)
3246 		goto out;
3247 
3248 	if (error) {
3249 		/*
3250 		 * If server returned results successfully, then we've
3251 		 * been interrupted and may need to clean up.
3252 		 */
3253 		if (gotresults) {
3254 			ASSERT(error == EINTR);
3255 			door_fp_close(ct->d_fpp, ct->d_args.desc_num);
3256 		}
3257 		goto out;
3258 	}
3259 
3260 	if (ct->d_args.desc_num) {
3261 		struct file	**fpp;
3262 		door_desc_t	*didpp;
3263 		vnode_t		*vp;
3264 		uint_t		n = ct->d_args.desc_num;
3265 
3266 		didpp = ct->d_args.desc_ptr = (door_desc_t *)(ct->d_args.rbuf +
3267 		    roundup(ct->d_args.data_size, sizeof (door_desc_t)));
3268 		fpp = ct->d_fpp;
3269 
3270 		while (n--) {
3271 			struct file *fp;
3272 
3273 			fp = *fpp;
3274 			if (VOP_REALVP(fp->f_vnode, &vp, NULL))
3275 				vp = fp->f_vnode;
3276 
3277 			didpp->d_attributes = DOOR_HANDLE |
3278 			    (VTOD(vp)->door_flags & DOOR_ATTR_MASK);
3279 			didpp->d_data.d_handle = FTODH(fp);
3280 
3281 			fpp++; didpp++;
3282 		}
3283 	}
3284 
3285 	/* on return data is in rbuf */
3286 	*param = ct->d_args;		/* structure assignment */
3287 
3288 out:
3289 	if (ct->d_fpp) {
3290 		kmem_free(ct->d_fpp, ct->d_fpp_size);
3291 		ct->d_fpp = NULL;
3292 		ct->d_fpp_size = 0;
3293 	}
3294 
3295 	ct->d_cred = NULL;
3296 	ct->d_upcall = 0;
3297 	ct->d_noresults = 0;
3298 	ct->d_buf = NULL;
3299 	ct->d_bufsize = 0;
3300 	return (error);
3301 }
3302 
3303 /*
3304  * Add a door to the per-process list of active doors for which the
3305  * process is a server.
3306  */
3307 static void
3308 door_list_insert(door_node_t *dp)
3309 {
3310 	proc_t *p = dp->door_target;
3311 
3312 	ASSERT(MUTEX_HELD(&door_knob));
3313 	dp->door_list = p->p_door_list;
3314 	p->p_door_list = dp;
3315 }
3316 
3317 /*
3318  * Remove a door from the per-process list of active doors.
3319  */
3320 void
3321 door_list_delete(door_node_t *dp)
3322 {
3323 	door_node_t **pp;
3324 
3325 	ASSERT(MUTEX_HELD(&door_knob));
3326 	/*
3327 	 * Find the door in the list.  If the door belongs to another process,
3328 	 * it's OK to use p_door_list since that process can't exit until all
3329 	 * doors have been taken off the list (see door_exit).
3330 	 */
3331 	pp = &(dp->door_target->p_door_list);
3332 	while (*pp != dp)
3333 		pp = &((*pp)->door_list);
3334 
3335 	/* found it, take it off the list */
3336 	*pp = dp->door_list;
3337 }
3338 
3339 
3340 /*
3341  * External kernel interfaces for doors.  These functions are available
3342  * outside the doorfs module for use in creating and using doors from
3343  * within the kernel.
3344  */
3345 
3346 /*
3347  * door_ki_upcall invokes a user-level door server from the kernel, with
3348  * the credentials associated with curthread.
3349  */
3350 int
3351 door_ki_upcall(door_handle_t dh, door_arg_t *param)
3352 {
3353 	return (door_ki_upcall_cred(dh, param, NULL));
3354 }
3355 
3356 /*
3357  * door_ki_upcall_cred invokes a user-level door server from the kernel with
3358  * the given credentials. If the "cred" argument is NULL, uses the credentials
3359  * associated with current thread.
3360  */
3361 int
3362 door_ki_upcall_cred(door_handle_t dh, door_arg_t *param, struct cred *cred)
3363 {
3364 	file_t *fp = DHTOF(dh);
3365 	vnode_t *realvp;
3366 
3367 	if (VOP_REALVP(fp->f_vnode, &realvp, NULL))
3368 		realvp = fp->f_vnode;
3369 	return (door_upcall(realvp, param, cred));
3370 }
3371 
3372 /*
3373  * Function call to create a "kernel" door server.  A kernel door
3374  * server provides a way for a user-level process to invoke a function
3375  * in the kernel through a door_call.  From the caller's point of
3376  * view, a kernel door server looks the same as a user-level one
3377  * (except the server pid is 0).  Unlike normal door calls, the
3378  * kernel door function is invoked via a normal function call in the
3379  * same thread and context as the caller.
3380  */
3381 int
3382 door_ki_create(void (*pc_cookie)(), void *data_cookie, uint_t attributes,
3383     door_handle_t *dhp)
3384 {
3385 	int err;
3386 	file_t *fp;
3387 
3388 	/* no DOOR_PRIVATE */
3389 	if ((attributes & ~DOOR_KI_CREATE_MASK) ||
3390 	    (attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) ==
3391 	    (DOOR_UNREF | DOOR_UNREF_MULTI))
3392 		return (EINVAL);
3393 
3394 	err = door_create_common(pc_cookie, data_cookie, attributes,
3395 	    1, NULL, &fp);
3396 	if (err == 0 && (attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) &&
3397 	    p0.p_unref_thread == 0) {
3398 		/* need to create unref thread for process 0 */
3399 		(void) thread_create(NULL, 0, door_unref_kernel, NULL, 0, &p0,
3400 		    TS_RUN, minclsyspri);
3401 	}
3402 	if (err == 0) {
3403 		*dhp = FTODH(fp);
3404 	}
3405 	return (err);
3406 }
3407 
3408 void
3409 door_ki_hold(door_handle_t dh)
3410 {
3411 	file_t *fp = DHTOF(dh);
3412 
3413 	mutex_enter(&fp->f_tlock);
3414 	fp->f_count++;
3415 	mutex_exit(&fp->f_tlock);
3416 }
3417 
3418 void
3419 door_ki_rele(door_handle_t dh)
3420 {
3421 	file_t *fp = DHTOF(dh);
3422 
3423 	(void) closef(fp);
3424 }
3425 
3426 int
3427 door_ki_open(char *pathname, door_handle_t *dhp)
3428 {
3429 	file_t *fp;
3430 	vnode_t *vp;
3431 	int err;
3432 
3433 	if ((err = lookupname(pathname, UIO_SYSSPACE, FOLLOW, NULL, &vp)) != 0)
3434 		return (err);
3435 	if (err = VOP_OPEN(&vp, FREAD, kcred, NULL)) {
3436 		VN_RELE(vp);
3437 		return (err);
3438 	}
3439 	if (vp->v_type != VDOOR) {
3440 		VN_RELE(vp);
3441 		return (EINVAL);
3442 	}
3443 	if ((err = falloc(vp, FREAD | FWRITE, &fp, NULL)) != 0) {
3444 		VN_RELE(vp);
3445 		return (err);
3446 	}
3447 	/* falloc returns with f_tlock held on success */
3448 	mutex_exit(&fp->f_tlock);
3449 	*dhp = FTODH(fp);
3450 	return (0);
3451 }
3452 
3453 int
3454 door_ki_info(door_handle_t dh, struct door_info *dip)
3455 {
3456 	file_t *fp = DHTOF(dh);
3457 	vnode_t *vp;
3458 
3459 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
3460 		vp = fp->f_vnode;
3461 	if (vp->v_type != VDOOR)
3462 		return (EINVAL);
3463 	door_info_common(VTOD(vp), dip, fp);
3464 	return (0);
3465 }
3466 
3467 door_handle_t
3468 door_ki_lookup(int did)
3469 {
3470 	file_t *fp;
3471 	door_handle_t dh;
3472 
3473 	/* is the descriptor really a door? */
3474 	if (door_lookup(did, &fp) == NULL)
3475 		return (NULL);
3476 	/* got the door, put a hold on it and release the fd */
3477 	dh = FTODH(fp);
3478 	door_ki_hold(dh);
3479 	releasef(did);
3480 	return (dh);
3481 }
3482 
3483 int
3484 door_ki_setparam(door_handle_t dh, int type, size_t val)
3485 {
3486 	file_t *fp = DHTOF(dh);
3487 	vnode_t *vp;
3488 
3489 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
3490 		vp = fp->f_vnode;
3491 	if (vp->v_type != VDOOR)
3492 		return (EINVAL);
3493 	return (door_setparam_common(VTOD(vp), 1, type, val));
3494 }
3495 
3496 int
3497 door_ki_getparam(door_handle_t dh, int type, size_t *out)
3498 {
3499 	file_t *fp = DHTOF(dh);
3500 	vnode_t *vp;
3501 
3502 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
3503 		vp = fp->f_vnode;
3504 	if (vp->v_type != VDOOR)
3505 		return (EINVAL);
3506 	return (door_getparam_common(VTOD(vp), type, out));
3507 }
3508