xref: /illumos-gate/usr/src/uts/common/fs/doorfs/door_sys.c (revision b7b97454b9b1f6625e7e655e9651e744a8dee09d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * System call I/F to doors (outside of vnodes I/F) and misc support
31  * routines
32  */
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/door.h>
36 #include <sys/door_data.h>
37 #include <sys/proc.h>
38 #include <sys/thread.h>
39 #include <sys/class.h>
40 #include <sys/cred.h>
41 #include <sys/kmem.h>
42 #include <sys/cmn_err.h>
43 #include <sys/stack.h>
44 #include <sys/debug.h>
45 #include <sys/cpuvar.h>
46 #include <sys/file.h>
47 #include <sys/fcntl.h>
48 #include <sys/vnode.h>
49 #include <sys/vfs.h>
50 #include <sys/vfs_opreg.h>
51 #include <sys/sobject.h>
52 #include <sys/schedctl.h>
53 #include <sys/callb.h>
54 #include <sys/ucred.h>
55 
56 #include <sys/mman.h>
57 #include <sys/sysmacros.h>
58 #include <sys/vmsystm.h>
59 #include <vm/as.h>
60 #include <vm/hat.h>
61 #include <vm/page.h>
62 #include <vm/seg.h>
63 #include <vm/seg_vn.h>
64 #include <vm/seg_vn.h>
65 
66 #include <sys/modctl.h>
67 #include <sys/syscall.h>
68 #include <sys/pathname.h>
69 #include <sys/rctl.h>
70 
71 /*
72  * The maximum amount of data (in bytes) that will be transferred using
73  * an intermediate kernel buffer.  For sizes greater than this we map
74  * in the destination pages and perform a 1-copy transfer.
75  */
76 size_t	door_max_arg = 16 * 1024;
77 
78 /*
79  * Maximum amount of data that will be transferred in a reply to a
80  * door_upcall.  Need to guard against a process returning huge amounts
81  * of data and getting the kernel stuck in kmem_alloc.
82  */
83 size_t	door_max_upcall_reply = 1024 * 1024;
84 
85 /*
86  * Maximum number of descriptors allowed to be passed in a single
87  * door_call or door_return.  We need to allocate kernel memory
88  * for all of them at once, so we can't let it scale without limit.
89  */
90 uint_t door_max_desc = 1024;
91 
92 /*
93  * Definition of a door handle, used by other kernel subsystems when
94  * calling door functions.  This is really a file structure but we
95  * want to hide that fact.
96  */
97 struct __door_handle {
98 	file_t dh_file;
99 };
100 
101 #define	DHTOF(dh) ((file_t *)(dh))
102 #define	FTODH(fp) ((door_handle_t)(fp))
103 
104 static int doorfs(long, long, long, long, long, long);
105 
106 static struct sysent door_sysent = {
107 	6,
108 	SE_ARGC | SE_NOUNLOAD,
109 	(int (*)())doorfs,
110 };
111 
112 static struct modlsys modlsys = {
113 	&mod_syscallops, "doors", &door_sysent
114 };
115 
116 #ifdef _SYSCALL32_IMPL
117 
118 static int
119 doorfs32(int32_t arg1, int32_t arg2, int32_t arg3, int32_t arg4,
120     int32_t arg5, int32_t subcode);
121 
122 static struct sysent door_sysent32 = {
123 	6,
124 	SE_ARGC | SE_NOUNLOAD,
125 	(int (*)())doorfs32,
126 };
127 
128 static struct modlsys modlsys32 = {
129 	&mod_syscallops32,
130 	"32-bit door syscalls",
131 	&door_sysent32
132 };
133 #endif
134 
135 static struct modlinkage modlinkage = {
136 	MODREV_1,
137 	&modlsys,
138 #ifdef _SYSCALL32_IMPL
139 	&modlsys32,
140 #endif
141 	NULL
142 };
143 
144 dev_t	doordev;
145 
146 extern	struct vfs door_vfs;
147 extern	struct vnodeops *door_vnodeops;
148 
149 int
150 _init(void)
151 {
152 	static const fs_operation_def_t door_vfsops_template[] = {
153 		NULL, NULL
154 	};
155 	extern const fs_operation_def_t door_vnodeops_template[];
156 	vfsops_t *door_vfsops;
157 	major_t major;
158 	int error;
159 
160 	mutex_init(&door_knob, NULL, MUTEX_DEFAULT, NULL);
161 	if ((major = getudev()) == (major_t)-1)
162 		return (ENXIO);
163 	doordev = makedevice(major, 0);
164 
165 	/* Create a dummy vfs */
166 	error = vfs_makefsops(door_vfsops_template, &door_vfsops);
167 	if (error != 0) {
168 		cmn_err(CE_WARN, "door init: bad vfs ops");
169 		return (error);
170 	}
171 	VFS_INIT(&door_vfs, door_vfsops, NULL);
172 	door_vfs.vfs_flag = VFS_RDONLY;
173 	door_vfs.vfs_dev = doordev;
174 	vfs_make_fsid(&(door_vfs.vfs_fsid), doordev, 0);
175 
176 	error = vn_make_ops("doorfs", door_vnodeops_template, &door_vnodeops);
177 	if (error != 0) {
178 		vfs_freevfsops(door_vfsops);
179 		cmn_err(CE_WARN, "door init: bad vnode ops");
180 		return (error);
181 	}
182 	return (mod_install(&modlinkage));
183 }
184 
185 int
186 _info(struct modinfo *modinfop)
187 {
188 	return (mod_info(&modlinkage, modinfop));
189 }
190 
191 /* system call functions */
192 static int door_call(int, void *);
193 static int door_return(caddr_t, size_t, door_desc_t *, uint_t, caddr_t, size_t);
194 static int door_create(void (*pc_cookie)(void *, char *, size_t, door_desc_t *,
195     uint_t), void *data_cookie, uint_t);
196 static int door_revoke(int);
197 static int door_info(int, struct door_info *);
198 static int door_ucred(struct ucred_s *);
199 static int door_bind(int);
200 static int door_unbind(void);
201 static int door_unref(void);
202 static int door_getparam(int, int, size_t *);
203 static int door_setparam(int, int, size_t);
204 
205 #define	DOOR_RETURN_OLD	4		/* historic value, for s10 */
206 
207 /*
208  * System call wrapper for all door related system calls
209  */
210 static int
211 doorfs(long arg1, long arg2, long arg3, long arg4, long arg5, long subcode)
212 {
213 	switch (subcode) {
214 	case DOOR_CALL:
215 		return (door_call(arg1, (void *)arg2));
216 	case DOOR_RETURN: {
217 		door_return_desc_t *drdp = (door_return_desc_t *)arg3;
218 
219 		if (drdp != NULL) {
220 			door_return_desc_t drd;
221 			if (copyin(drdp, &drd, sizeof (drd)))
222 				return (EFAULT);
223 			return (door_return((caddr_t)arg1, arg2, drd.desc_ptr,
224 			    drd.desc_num, (caddr_t)arg4, arg5));
225 		}
226 		return (door_return((caddr_t)arg1, arg2, NULL,
227 		    0, (caddr_t)arg4, arg5));
228 	}
229 	case DOOR_RETURN_OLD:
230 		/*
231 		 * In order to support the S10 runtime environment, we
232 		 * still respond to the old syscall subcode for door_return.
233 		 * We treat it as having no stack limits.  This code should
234 		 * be removed when such support is no longer needed.
235 		 */
236 		return (door_return((caddr_t)arg1, arg2, (door_desc_t *)arg3,
237 		    arg4, (caddr_t)arg5, 0));
238 	case DOOR_CREATE:
239 		return (door_create((void (*)())arg1, (void *)arg2, arg3));
240 	case DOOR_REVOKE:
241 		return (door_revoke(arg1));
242 	case DOOR_INFO:
243 		return (door_info(arg1, (struct door_info *)arg2));
244 	case DOOR_BIND:
245 		return (door_bind(arg1));
246 	case DOOR_UNBIND:
247 		return (door_unbind());
248 	case DOOR_UNREFSYS:
249 		return (door_unref());
250 	case DOOR_UCRED:
251 		return (door_ucred((struct ucred_s *)arg1));
252 	case DOOR_GETPARAM:
253 		return (door_getparam(arg1, arg2, (size_t *)arg3));
254 	case DOOR_SETPARAM:
255 		return (door_setparam(arg1, arg2, arg3));
256 	default:
257 		return (set_errno(EINVAL));
258 	}
259 }
260 
261 #ifdef _SYSCALL32_IMPL
262 /*
263  * System call wrapper for all door related system calls from 32-bit programs.
264  * Needed at the moment because of the casts - they undo some damage
265  * that truss causes (sign-extending the stack pointer) when truss'ing
266  * a 32-bit program using doors.
267  */
268 static int
269 doorfs32(int32_t arg1, int32_t arg2, int32_t arg3,
270     int32_t arg4, int32_t arg5, int32_t subcode)
271 {
272 	switch (subcode) {
273 	case DOOR_CALL:
274 		return (door_call(arg1, (void *)(uintptr_t)(caddr32_t)arg2));
275 	case DOOR_RETURN: {
276 		door_return_desc32_t *drdp =
277 		    (door_return_desc32_t *)(uintptr_t)(caddr32_t)arg3;
278 		if (drdp != NULL) {
279 			door_return_desc32_t drd;
280 			if (copyin(drdp, &drd, sizeof (drd)))
281 				return (EFAULT);
282 			return (door_return(
283 			    (caddr_t)(uintptr_t)(caddr32_t)arg1, arg2,
284 			    (door_desc_t *)(uintptr_t)drd.desc_ptr,
285 			    drd.desc_num, (caddr_t)(uintptr_t)(caddr32_t)arg4,
286 			    (size_t)(uintptr_t)(size32_t)arg5));
287 		}
288 		return (door_return((caddr_t)(uintptr_t)(caddr32_t)arg1,
289 		    arg2, NULL, 0, (caddr_t)(uintptr_t)(caddr32_t)arg4,
290 		    (size_t)(uintptr_t)(size32_t)arg5));
291 	}
292 	case DOOR_RETURN_OLD:
293 		/*
294 		 * In order to support the S10 runtime environment, we
295 		 * still respond to the old syscall subcode for door_return.
296 		 * We treat it as having no stack limits.  This code should
297 		 * be removed when such support is no longer needed.
298 		 */
299 		return (door_return((caddr_t)(uintptr_t)(caddr32_t)arg1, arg2,
300 		    (door_desc_t *)(uintptr_t)(caddr32_t)arg3, arg4,
301 		    (caddr_t)(uintptr_t)(caddr32_t)arg5, 0));
302 	case DOOR_CREATE:
303 		return (door_create((void (*)())(uintptr_t)(caddr32_t)arg1,
304 		    (void *)(uintptr_t)(caddr32_t)arg2, arg3));
305 	case DOOR_REVOKE:
306 		return (door_revoke(arg1));
307 	case DOOR_INFO:
308 		return (door_info(arg1,
309 		    (struct door_info *)(uintptr_t)(caddr32_t)arg2));
310 	case DOOR_BIND:
311 		return (door_bind(arg1));
312 	case DOOR_UNBIND:
313 		return (door_unbind());
314 	case DOOR_UNREFSYS:
315 		return (door_unref());
316 	case DOOR_UCRED:
317 		return (door_ucred(
318 		    (struct ucred_s *)(uintptr_t)(caddr32_t)arg1));
319 	case DOOR_GETPARAM:
320 		return (door_getparam(arg1, arg2,
321 		    (size_t *)(uintptr_t)(caddr32_t)arg3));
322 	case DOOR_SETPARAM:
323 		return (door_setparam(arg1, arg2, (size_t)(size32_t)arg3));
324 
325 	default:
326 		return (set_errno(EINVAL));
327 	}
328 }
329 #endif
330 
331 void shuttle_resume(kthread_t *, kmutex_t *);
332 void shuttle_swtch(kmutex_t *);
333 void shuttle_sleep(kthread_t *);
334 
335 /*
336  * Support routines
337  */
338 static int door_create_common(void (*)(), void *, uint_t, int, int *,
339     file_t **);
340 static int door_overflow(kthread_t *, caddr_t, size_t, door_desc_t *, uint_t);
341 static int door_args(kthread_t *, int);
342 static int door_results(kthread_t *, caddr_t, size_t, door_desc_t *, uint_t);
343 static int door_copy(struct as *, caddr_t, caddr_t, uint_t);
344 static void	door_server_exit(proc_t *, kthread_t *);
345 static void	door_release_server(door_node_t *, kthread_t *);
346 static kthread_t	*door_get_server(door_node_t *);
347 static door_node_t	*door_lookup(int, file_t **);
348 static int	door_translate_in(void);
349 static int	door_translate_out(void);
350 static void	door_fd_rele(door_desc_t *, uint_t, int);
351 static void	door_list_insert(door_node_t *);
352 static void	door_info_common(door_node_t *, door_info_t *, file_t *);
353 static int	door_release_fds(door_desc_t *, uint_t);
354 static void	door_fd_close(door_desc_t *, uint_t);
355 static void	door_fp_close(struct file **, uint_t);
356 
357 static door_data_t *
358 door_my_data(int create_if_missing)
359 {
360 	door_data_t *ddp;
361 
362 	ddp = curthread->t_door;
363 	if (create_if_missing && ddp == NULL)
364 		ddp = curthread->t_door = kmem_zalloc(sizeof (*ddp), KM_SLEEP);
365 
366 	return (ddp);
367 }
368 
369 static door_server_t *
370 door_my_server(int create_if_missing)
371 {
372 	door_data_t *ddp = door_my_data(create_if_missing);
373 
374 	return ((ddp != NULL)? DOOR_SERVER(ddp) : NULL);
375 }
376 
377 static door_client_t *
378 door_my_client(int create_if_missing)
379 {
380 	door_data_t *ddp = door_my_data(create_if_missing);
381 
382 	return ((ddp != NULL)? DOOR_CLIENT(ddp) : NULL);
383 }
384 
385 /*
386  * System call to create a door
387  */
388 int
389 door_create(void (*pc_cookie)(), void *data_cookie, uint_t attributes)
390 {
391 	int fd;
392 	int err;
393 
394 	if ((attributes & ~DOOR_CREATE_MASK) ||
395 	    ((attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) ==
396 	    (DOOR_UNREF | DOOR_UNREF_MULTI)))
397 		return (set_errno(EINVAL));
398 
399 	if ((err = door_create_common(pc_cookie, data_cookie, attributes, 0,
400 	    &fd, NULL)) != 0)
401 		return (set_errno(err));
402 
403 	f_setfd(fd, FD_CLOEXEC);
404 	return (fd);
405 }
406 
407 /*
408  * Common code for creating user and kernel doors.  If a door was
409  * created, stores a file structure pointer in the location pointed
410  * to by fpp (if fpp is non-NULL) and returns 0.  Also, if a non-NULL
411  * pointer to a file descriptor is passed in as fdp, allocates a file
412  * descriptor representing the door.  If a door could not be created,
413  * returns an error.
414  */
415 static int
416 door_create_common(void (*pc_cookie)(), void *data_cookie, uint_t attributes,
417     int from_kernel, int *fdp, file_t **fpp)
418 {
419 	door_node_t	*dp;
420 	vnode_t		*vp;
421 	struct file	*fp;
422 	static door_id_t index = 0;
423 	proc_t		*p = (from_kernel)? &p0 : curproc;
424 
425 	dp = kmem_zalloc(sizeof (door_node_t), KM_SLEEP);
426 
427 	dp->door_vnode = vn_alloc(KM_SLEEP);
428 	dp->door_target = p;
429 	dp->door_data = data_cookie;
430 	dp->door_pc = pc_cookie;
431 	dp->door_flags = attributes;
432 #ifdef _SYSCALL32_IMPL
433 	if (!from_kernel && get_udatamodel() != DATAMODEL_NATIVE)
434 		dp->door_data_max = UINT32_MAX;
435 	else
436 #endif
437 		dp->door_data_max = SIZE_MAX;
438 	dp->door_data_min = 0UL;
439 	dp->door_desc_max = (attributes & DOOR_REFUSE_DESC)? 0 : INT_MAX;
440 
441 	vp = DTOV(dp);
442 	vn_setops(vp, door_vnodeops);
443 	vp->v_type = VDOOR;
444 	vp->v_vfsp = &door_vfs;
445 	vp->v_data = (caddr_t)dp;
446 	mutex_enter(&door_knob);
447 	dp->door_index = index++;
448 	/* add to per-process door list */
449 	door_list_insert(dp);
450 	mutex_exit(&door_knob);
451 
452 	if (falloc(vp, FREAD | FWRITE, &fp, fdp)) {
453 		/*
454 		 * If the file table is full, remove the door from the
455 		 * per-process list, free the door, and return NULL.
456 		 */
457 		mutex_enter(&door_knob);
458 		door_list_delete(dp);
459 		mutex_exit(&door_knob);
460 		vn_free(vp);
461 		kmem_free(dp, sizeof (door_node_t));
462 		return (EMFILE);
463 	}
464 	vn_exists(vp);
465 	if (fdp != NULL)
466 		setf(*fdp, fp);
467 	mutex_exit(&fp->f_tlock);
468 
469 	if (fpp != NULL)
470 		*fpp = fp;
471 	return (0);
472 }
473 
474 static int
475 door_check_limits(door_node_t *dp, door_arg_t *da, int upcall)
476 {
477 	ASSERT(MUTEX_HELD(&door_knob));
478 
479 	/* we allow unref upcalls through, despite any minimum */
480 	if (da->data_size < dp->door_data_min &&
481 	    !(upcall && da->data_ptr == DOOR_UNREF_DATA))
482 		return (ENOBUFS);
483 
484 	if (da->data_size > dp->door_data_max)
485 		return (ENOBUFS);
486 
487 	if (da->desc_num > 0 && (dp->door_flags & DOOR_REFUSE_DESC))
488 		return (ENOTSUP);
489 
490 	if (da->desc_num > dp->door_desc_max)
491 		return (ENFILE);
492 
493 	return (0);
494 }
495 
496 /*
497  * Door invocation.
498  */
499 int
500 door_call(int did, void *args)
501 {
502 	/* Locals */
503 	door_node_t	*dp;
504 	kthread_t	*server_thread;
505 	int		error = 0;
506 	klwp_t		*lwp;
507 	door_client_t	*ct;		/* curthread door_data */
508 	door_server_t	*st;		/* server thread door_data */
509 	door_desc_t	*start = NULL;
510 	uint_t		ncopied = 0;
511 	size_t		dsize;
512 	/* destructor for data returned by a kernel server */
513 	void		(*destfn)() = NULL;
514 	void		*destarg;
515 	model_t		datamodel;
516 	int		gotresults = 0;
517 	int		cancel_pending;
518 
519 	lwp = ttolwp(curthread);
520 	datamodel = lwp_getdatamodel(lwp);
521 
522 	ct = door_my_client(1);
523 
524 	/*
525 	 * Get the arguments
526 	 */
527 	if (args) {
528 		if (datamodel == DATAMODEL_NATIVE) {
529 			if (copyin(args, &ct->d_args, sizeof (door_arg_t)) != 0)
530 				return (set_errno(EFAULT));
531 		} else {
532 			door_arg32_t    da32;
533 
534 			if (copyin(args, &da32, sizeof (door_arg32_t)) != 0)
535 				return (set_errno(EFAULT));
536 			ct->d_args.data_ptr =
537 			    (char *)(uintptr_t)da32.data_ptr;
538 			ct->d_args.data_size = da32.data_size;
539 			ct->d_args.desc_ptr =
540 			    (door_desc_t *)(uintptr_t)da32.desc_ptr;
541 			ct->d_args.desc_num = da32.desc_num;
542 			ct->d_args.rbuf =
543 			    (char *)(uintptr_t)da32.rbuf;
544 			ct->d_args.rsize = da32.rsize;
545 		}
546 	} else {
547 		/* No arguments, and no results allowed */
548 		ct->d_noresults = 1;
549 		ct->d_args.data_size = 0;
550 		ct->d_args.desc_num = 0;
551 		ct->d_args.rsize = 0;
552 	}
553 
554 	if ((dp = door_lookup(did, NULL)) == NULL)
555 		return (set_errno(EBADF));
556 
557 	mutex_enter(&door_knob);
558 	if (DOOR_INVALID(dp)) {
559 		mutex_exit(&door_knob);
560 		error = EBADF;
561 		goto out;
562 	}
563 
564 	/*
565 	 * before we do anything, check that we are not overflowing the
566 	 * required limits.
567 	 */
568 	error = door_check_limits(dp, &ct->d_args, 0);
569 	if (error != 0) {
570 		mutex_exit(&door_knob);
571 		goto out;
572 	}
573 
574 	/*
575 	 * Check for in-kernel door server.
576 	 */
577 	if (dp->door_target == &p0) {
578 		caddr_t rbuf = ct->d_args.rbuf;
579 		size_t rsize = ct->d_args.rsize;
580 
581 		dp->door_active++;
582 		ct->d_kernel = 1;
583 		ct->d_error = DOOR_WAIT;
584 		mutex_exit(&door_knob);
585 		/* translate file descriptors to vnodes */
586 		if (ct->d_args.desc_num) {
587 			error = door_translate_in();
588 			if (error)
589 				goto out;
590 		}
591 		/*
592 		 * Call kernel door server.  Arguments are passed and
593 		 * returned as a door_arg pointer.  When called, data_ptr
594 		 * points to user data and desc_ptr points to a kernel list
595 		 * of door descriptors that have been converted to file
596 		 * structure pointers.  It's the server function's
597 		 * responsibility to copyin the data pointed to by data_ptr
598 		 * (this avoids extra copying in some cases).  On return,
599 		 * data_ptr points to a user buffer of data, and desc_ptr
600 		 * points to a kernel list of door descriptors representing
601 		 * files.  When a reference is passed to a kernel server,
602 		 * it is the server's responsibility to release the reference
603 		 * (by calling closef).  When the server includes a
604 		 * reference in its reply, it is released as part of the
605 		 * the call (the server must duplicate the reference if
606 		 * it wants to retain a copy).  The destfn, if set to
607 		 * non-NULL, is a destructor to be called when the returned
608 		 * kernel data (if any) is no longer needed (has all been
609 		 * translated and copied to user level).
610 		 */
611 		(*(dp->door_pc))(dp->door_data, &ct->d_args,
612 		    &destfn, &destarg, &error);
613 		mutex_enter(&door_knob);
614 		/* not implemented yet */
615 		if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
616 			door_deliver_unref(dp);
617 		mutex_exit(&door_knob);
618 		if (error)
619 			goto out;
620 
621 		/* translate vnodes to files */
622 		if (ct->d_args.desc_num) {
623 			error = door_translate_out();
624 			if (error)
625 				goto out;
626 		}
627 		ct->d_buf = ct->d_args.rbuf;
628 		ct->d_bufsize = ct->d_args.rsize;
629 		if (rsize < (ct->d_args.data_size +
630 		    (ct->d_args.desc_num * sizeof (door_desc_t)))) {
631 			/* handle overflow */
632 			error = door_overflow(curthread, ct->d_args.data_ptr,
633 			    ct->d_args.data_size, ct->d_args.desc_ptr,
634 			    ct->d_args.desc_num);
635 			if (error)
636 				goto out;
637 			/* door_overflow sets d_args rbuf and rsize */
638 		} else {
639 			ct->d_args.rbuf = rbuf;
640 			ct->d_args.rsize = rsize;
641 		}
642 		goto results;
643 	}
644 
645 	/*
646 	 * Get a server thread from the target domain
647 	 */
648 	if ((server_thread = door_get_server(dp)) == NULL) {
649 		if (DOOR_INVALID(dp))
650 			error = EBADF;
651 		else
652 			error = EAGAIN;
653 		mutex_exit(&door_knob);
654 		goto out;
655 	}
656 
657 	st = DOOR_SERVER(server_thread->t_door);
658 	if (ct->d_args.desc_num || ct->d_args.data_size) {
659 		int is_private = (dp->door_flags & DOOR_PRIVATE);
660 		/*
661 		 * Move data from client to server
662 		 */
663 		DOOR_T_HOLD(st);
664 		mutex_exit(&door_knob);
665 		error = door_args(server_thread, is_private);
666 		mutex_enter(&door_knob);
667 		DOOR_T_RELEASE(st);
668 		if (error) {
669 			/*
670 			 * We're not going to resume this thread after all
671 			 */
672 			door_release_server(dp, server_thread);
673 			shuttle_sleep(server_thread);
674 			mutex_exit(&door_knob);
675 			goto out;
676 		}
677 	}
678 
679 	dp->door_active++;
680 	ct->d_error = DOOR_WAIT;
681 	st->d_caller = curthread;
682 	st->d_active = dp;
683 
684 	shuttle_resume(server_thread, &door_knob);
685 
686 	mutex_enter(&door_knob);
687 shuttle_return:
688 	if ((error = ct->d_error) < 0) {	/* DOOR_WAIT or DOOR_EXIT */
689 		/*
690 		 * Premature wakeup. Find out why (stop, forkall, sig, exit ...)
691 		 */
692 		mutex_exit(&door_knob);		/* May block in ISSIG */
693 		cancel_pending = 0;
694 		if (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort ||
695 		    MUSTRETURN(curproc, curthread) ||
696 		    (cancel_pending = schedctl_cancel_pending()) != 0) {
697 			/* Signal, forkall, ... */
698 			lwp->lwp_sysabort = 0;
699 			if (cancel_pending)
700 				schedctl_cancel_eintr();
701 			mutex_enter(&door_knob);
702 			error = EINTR;
703 			/*
704 			 * If the server has finished processing our call,
705 			 * or exited (calling door_slam()), then d_error
706 			 * will have changed.  If the server hasn't finished
707 			 * yet, d_error will still be DOOR_WAIT, and we
708 			 * let it know we are not interested in any
709 			 * results by sending a SIGCANCEL, unless the door
710 			 * is marked with DOOR_NO_CANCEL.
711 			 */
712 			if (ct->d_error == DOOR_WAIT &&
713 			    st->d_caller == curthread) {
714 				proc_t	*p = ttoproc(server_thread);
715 
716 				st->d_active = NULL;
717 				st->d_caller = NULL;
718 
719 				if (!(dp->door_flags & DOOR_NO_CANCEL)) {
720 					DOOR_T_HOLD(st);
721 					mutex_exit(&door_knob);
722 
723 					mutex_enter(&p->p_lock);
724 					sigtoproc(p, server_thread, SIGCANCEL);
725 					mutex_exit(&p->p_lock);
726 
727 					mutex_enter(&door_knob);
728 					DOOR_T_RELEASE(st);
729 				}
730 			}
731 		} else {
732 			/*
733 			 * Return from stop(), server exit...
734 			 *
735 			 * Note that the server could have done a
736 			 * door_return while the client was in stop state
737 			 * (ISSIG), in which case the error condition
738 			 * is updated by the server.
739 			 */
740 			mutex_enter(&door_knob);
741 			if (ct->d_error == DOOR_WAIT) {
742 				/* Still waiting for a reply */
743 				shuttle_swtch(&door_knob);
744 				mutex_enter(&door_knob);
745 				lwp->lwp_asleep = 0;
746 				goto	shuttle_return;
747 			} else if (ct->d_error == DOOR_EXIT) {
748 				/* Server exit */
749 				error = EINTR;
750 			} else {
751 				/* Server did a door_return during ISSIG */
752 				error = ct->d_error;
753 			}
754 		}
755 		/*
756 		 * Can't exit if the server is currently copying
757 		 * results for me.
758 		 */
759 		while (DOOR_T_HELD(ct))
760 			cv_wait(&ct->d_cv, &door_knob);
761 
762 		/*
763 		 * Find out if results were successfully copied.
764 		 */
765 		if (ct->d_error == 0)
766 			gotresults = 1;
767 	}
768 	lwp->lwp_asleep = 0;		/* /proc */
769 	lwp->lwp_sysabort = 0;		/* /proc */
770 	if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
771 		door_deliver_unref(dp);
772 	mutex_exit(&door_knob);
773 
774 results:
775 	/*
776 	 * Move the results to userland (if any)
777 	 */
778 
779 	if (ct->d_noresults)
780 		goto out;
781 
782 	if (error) {
783 		/*
784 		 * If server returned results successfully, then we've
785 		 * been interrupted and may need to clean up.
786 		 */
787 		if (gotresults) {
788 			ASSERT(error == EINTR);
789 			door_fp_close(ct->d_fpp, ct->d_args.desc_num);
790 		}
791 		goto out;
792 	}
793 
794 	/*
795 	 * Copy back data if we haven't caused an overflow (already
796 	 * handled) and we are using a 2 copy transfer, or we are
797 	 * returning data from a kernel server.
798 	 */
799 	if (ct->d_args.data_size) {
800 		ct->d_args.data_ptr = ct->d_args.rbuf;
801 		if (ct->d_kernel || (!ct->d_overflow &&
802 		    ct->d_args.data_size <= door_max_arg)) {
803 			if (copyout(ct->d_buf, ct->d_args.rbuf,
804 			    ct->d_args.data_size)) {
805 				door_fp_close(ct->d_fpp, ct->d_args.desc_num);
806 				error = EFAULT;
807 				goto out;
808 			}
809 		}
810 	}
811 
812 	/*
813 	 * stuff returned doors into our proc, copyout the descriptors
814 	 */
815 	if (ct->d_args.desc_num) {
816 		struct file	**fpp;
817 		door_desc_t	*didpp;
818 		uint_t		n = ct->d_args.desc_num;
819 
820 		dsize = n * sizeof (door_desc_t);
821 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
822 		fpp = ct->d_fpp;
823 
824 		while (n--) {
825 			if (door_insert(*fpp, didpp) == -1) {
826 				/* Close remaining files */
827 				door_fp_close(fpp, n + 1);
828 				error = EMFILE;
829 				goto out;
830 			}
831 			fpp++; didpp++; ncopied++;
832 		}
833 
834 		ct->d_args.desc_ptr = (door_desc_t *)(ct->d_args.rbuf +
835 		    roundup(ct->d_args.data_size, sizeof (door_desc_t)));
836 
837 		if (copyout(start, ct->d_args.desc_ptr, dsize)) {
838 			error = EFAULT;
839 			goto out;
840 		}
841 	}
842 
843 	/*
844 	 * Return the results
845 	 */
846 	if (datamodel == DATAMODEL_NATIVE) {
847 		if (copyout(&ct->d_args, args, sizeof (door_arg_t)) != 0)
848 			error = EFAULT;
849 	} else {
850 		door_arg32_t    da32;
851 
852 		da32.data_ptr = (caddr32_t)(uintptr_t)ct->d_args.data_ptr;
853 		da32.data_size = ct->d_args.data_size;
854 		da32.desc_ptr = (caddr32_t)(uintptr_t)ct->d_args.desc_ptr;
855 		da32.desc_num = ct->d_args.desc_num;
856 		da32.rbuf = (caddr32_t)(uintptr_t)ct->d_args.rbuf;
857 		da32.rsize = ct->d_args.rsize;
858 		if (copyout(&da32, args, sizeof (door_arg32_t)) != 0) {
859 			error = EFAULT;
860 		}
861 	}
862 
863 out:
864 	ct->d_noresults = 0;
865 
866 	/* clean up the overflow buffer if an error occurred */
867 	if (error != 0 && ct->d_overflow) {
868 		(void) as_unmap(curproc->p_as, ct->d_args.rbuf,
869 		    ct->d_args.rsize);
870 	}
871 	ct->d_overflow = 0;
872 
873 	/* call destructor */
874 	if (destfn) {
875 		ASSERT(ct->d_kernel);
876 		(*destfn)(dp->door_data, destarg);
877 		ct->d_buf = NULL;
878 		ct->d_bufsize = 0;
879 	}
880 
881 	if (dp)
882 		releasef(did);
883 
884 	if (ct->d_buf) {
885 		ASSERT(!ct->d_kernel);
886 		kmem_free(ct->d_buf, ct->d_bufsize);
887 		ct->d_buf = NULL;
888 		ct->d_bufsize = 0;
889 	}
890 	ct->d_kernel = 0;
891 
892 	/* clean up the descriptor copyout buffer */
893 	if (start != NULL) {
894 		if (error != 0)
895 			door_fd_close(start, ncopied);
896 		kmem_free(start, dsize);
897 	}
898 
899 	if (ct->d_fpp) {
900 		kmem_free(ct->d_fpp, ct->d_fpp_size);
901 		ct->d_fpp = NULL;
902 		ct->d_fpp_size = 0;
903 	}
904 
905 	if (error)
906 		return (set_errno(error));
907 
908 	return (0);
909 }
910 
911 static int
912 door_setparam_common(door_node_t *dp, int from_kernel, int type, size_t val)
913 {
914 	int error = 0;
915 
916 	mutex_enter(&door_knob);
917 
918 	if (DOOR_INVALID(dp)) {
919 		mutex_exit(&door_knob);
920 		return (EBADF);
921 	}
922 
923 	/*
924 	 * door_ki_setparam() can only affect kernel doors.
925 	 * door_setparam() can only affect doors attached to the current
926 	 * process.
927 	 */
928 	if ((from_kernel && dp->door_target != &p0) ||
929 	    (!from_kernel && dp->door_target != curproc)) {
930 		mutex_exit(&door_knob);
931 		return (EPERM);
932 	}
933 
934 	switch (type) {
935 	case DOOR_PARAM_DESC_MAX:
936 		if (val > INT_MAX)
937 			error = ERANGE;
938 		else if ((dp->door_flags & DOOR_REFUSE_DESC) && val != 0)
939 			error = ENOTSUP;
940 		else
941 			dp->door_desc_max = (uint_t)val;
942 		break;
943 
944 	case DOOR_PARAM_DATA_MIN:
945 		if (val > dp->door_data_max)
946 			error = EINVAL;
947 		else
948 			dp->door_data_min = val;
949 		break;
950 
951 	case DOOR_PARAM_DATA_MAX:
952 		if (val < dp->door_data_min)
953 			error = EINVAL;
954 		else
955 			dp->door_data_max = val;
956 		break;
957 
958 	default:
959 		error = EINVAL;
960 		break;
961 	}
962 
963 	mutex_exit(&door_knob);
964 	return (error);
965 }
966 
967 static int
968 door_getparam_common(door_node_t *dp, int type, size_t *out)
969 {
970 	int error = 0;
971 
972 	mutex_enter(&door_knob);
973 	switch (type) {
974 	case DOOR_PARAM_DESC_MAX:
975 		*out = (size_t)dp->door_desc_max;
976 		break;
977 	case DOOR_PARAM_DATA_MIN:
978 		*out = dp->door_data_min;
979 		break;
980 	case DOOR_PARAM_DATA_MAX:
981 		*out = dp->door_data_max;
982 		break;
983 	default:
984 		error = EINVAL;
985 		break;
986 	}
987 	mutex_exit(&door_knob);
988 	return (error);
989 }
990 
991 int
992 door_setparam(int did, int type, size_t val)
993 {
994 	door_node_t *dp;
995 	int error = 0;
996 
997 	if ((dp = door_lookup(did, NULL)) == NULL)
998 		return (set_errno(EBADF));
999 
1000 	error = door_setparam_common(dp, 0, type, val);
1001 
1002 	releasef(did);
1003 
1004 	if (error)
1005 		return (set_errno(error));
1006 
1007 	return (0);
1008 }
1009 
1010 int
1011 door_getparam(int did, int type, size_t *out)
1012 {
1013 	door_node_t *dp;
1014 	size_t val = 0;
1015 	int error = 0;
1016 
1017 	if ((dp = door_lookup(did, NULL)) == NULL)
1018 		return (set_errno(EBADF));
1019 
1020 	error = door_getparam_common(dp, type, &val);
1021 
1022 	releasef(did);
1023 
1024 	if (error)
1025 		return (set_errno(error));
1026 
1027 	if (get_udatamodel() == DATAMODEL_NATIVE) {
1028 		if (copyout(&val, out, sizeof (val)))
1029 			return (set_errno(EFAULT));
1030 #ifdef _SYSCALL32_IMPL
1031 	} else {
1032 		size32_t val32 = (size32_t)val;
1033 
1034 		if (val != val32)
1035 			return (set_errno(EOVERFLOW));
1036 
1037 		if (copyout(&val32, out, sizeof (val32)))
1038 			return (set_errno(EFAULT));
1039 #endif /* _SYSCALL32_IMPL */
1040 	}
1041 
1042 	return (0);
1043 }
1044 
1045 /*
1046  * A copyout() which proceeds from high addresses to low addresses.  This way,
1047  * stack guard pages are effective.
1048  */
1049 static int
1050 door_stack_copyout(const void *kaddr, void *uaddr, size_t count)
1051 {
1052 	const char *kbase = (const char *)kaddr;
1053 	uintptr_t ubase = (uintptr_t)uaddr;
1054 	size_t pgsize = PAGESIZE;
1055 
1056 	if (count <= pgsize)
1057 		return (copyout(kaddr, uaddr, count));
1058 
1059 	while (count > 0) {
1060 		uintptr_t start, end, offset, amount;
1061 
1062 		end = ubase + count;
1063 		start = P2ALIGN(end - 1, pgsize);
1064 		if (P2ALIGN(ubase, pgsize) == start)
1065 			start = ubase;
1066 
1067 		offset = start - ubase;
1068 		amount = end - start;
1069 
1070 		ASSERT(amount > 0 && amount <= count && amount <= pgsize);
1071 
1072 		if (copyout(kbase + offset, (void *)start, amount))
1073 			return (1);
1074 		count -= amount;
1075 	}
1076 	return (0);
1077 }
1078 
1079 /*
1080  * Writes the stack layout for door_return() into the door_server_t of the
1081  * server thread.
1082  */
1083 static int
1084 door_layout(kthread_t *tp, size_t data_size, uint_t ndesc, int info_needed)
1085 {
1086 	door_server_t *st = DOOR_SERVER(tp->t_door);
1087 	door_layout_t *out = &st->d_layout;
1088 	uintptr_t base_sp = (uintptr_t)st->d_sp;
1089 	size_t ssize = st->d_ssize;
1090 	size_t descsz;
1091 	uintptr_t descp, datap, infop, resultsp, finalsp;
1092 	size_t align = STACK_ALIGN;
1093 	size_t results_sz = sizeof (struct door_results);
1094 	model_t datamodel = lwp_getdatamodel(ttolwp(tp));
1095 
1096 	ASSERT(!st->d_layout_done);
1097 
1098 #ifndef _STACK_GROWS_DOWNWARD
1099 #error stack does not grow downward, door_layout() must change
1100 #endif
1101 
1102 #ifdef _SYSCALL32_IMPL
1103 	if (datamodel != DATAMODEL_NATIVE) {
1104 		align = STACK_ALIGN32;
1105 		results_sz = sizeof (struct door_results32);
1106 	}
1107 #endif
1108 
1109 	descsz = ndesc * sizeof (door_desc_t);
1110 
1111 	/*
1112 	 * To speed up the overflow checking, we do an initial check
1113 	 * that the passed in data size won't cause us to wrap past
1114 	 * base_sp.  Since door_max_desc limits descsz, we can
1115 	 * safely use it here.  65535 is an arbitrary 'bigger than
1116 	 * we need, small enough to not cause trouble' constant;
1117 	 * the only constraint is that it must be > than:
1118 	 *
1119 	 *	5 * STACK_ALIGN +
1120 	 *	    sizeof (door_info_t) +
1121 	 *	    sizeof (door_results_t) +
1122 	 *	    (max adjustment from door_final_sp())
1123 	 *
1124 	 * After we compute the layout, we can safely do a "did we wrap
1125 	 * around" check, followed by a check against the recorded
1126 	 * stack size.
1127 	 */
1128 	if (data_size >= SIZE_MAX - (size_t)65535UL - descsz)
1129 		return (E2BIG);		/* overflow */
1130 
1131 	descp = P2ALIGN(base_sp - descsz, align);
1132 	datap = P2ALIGN(descp - data_size, align);
1133 
1134 	if (info_needed)
1135 		infop = P2ALIGN(datap - sizeof (door_info_t), align);
1136 	else
1137 		infop = datap;
1138 
1139 	resultsp = P2ALIGN(infop - results_sz, align);
1140 	finalsp = door_final_sp(resultsp, align, datamodel);
1141 
1142 	if (finalsp > base_sp)
1143 		return (E2BIG);		/* overflow */
1144 
1145 	if (ssize != 0 && (base_sp - finalsp) > ssize)
1146 		return (E2BIG);		/* doesn't fit in stack */
1147 
1148 	out->dl_descp = (ndesc != 0)? (caddr_t)descp : 0;
1149 	out->dl_datap = (data_size != 0)? (caddr_t)datap : 0;
1150 	out->dl_infop = info_needed? (caddr_t)infop : 0;
1151 	out->dl_resultsp = (caddr_t)resultsp;
1152 	out->dl_sp = (caddr_t)finalsp;
1153 
1154 	st->d_layout_done = 1;
1155 	return (0);
1156 }
1157 
1158 static int
1159 door_server_dispatch(door_client_t *ct, door_node_t *dp)
1160 {
1161 	door_server_t *st = DOOR_SERVER(curthread->t_door);
1162 	door_layout_t *layout = &st->d_layout;
1163 	int error = 0;
1164 
1165 	int is_private = (dp->door_flags & DOOR_PRIVATE);
1166 
1167 	door_pool_t *pool = (is_private)? &dp->door_servers :
1168 	    &curproc->p_server_threads;
1169 
1170 	int empty_pool = (pool->dp_threads == NULL);
1171 
1172 	caddr_t infop = NULL;
1173 	char *datap = NULL;
1174 	size_t datasize = 0;
1175 	size_t descsize;
1176 
1177 	file_t **fpp = ct->d_fpp;
1178 	door_desc_t *start = NULL;
1179 	uint_t ndesc = 0;
1180 	uint_t ncopied = 0;
1181 
1182 	if (ct != NULL) {
1183 		datap = ct->d_args.data_ptr;
1184 		datasize = ct->d_args.data_size;
1185 		ndesc = ct->d_args.desc_num;
1186 	}
1187 
1188 	descsize = ndesc * sizeof (door_desc_t);
1189 
1190 	/*
1191 	 * Reset datap to NULL if we aren't passing any data.  Be careful
1192 	 * to let unref notifications through, though.
1193 	 */
1194 	if (datap == DOOR_UNREF_DATA) {
1195 		if (ct->d_upcall)
1196 			datasize = 0;
1197 		else
1198 			datap = NULL;
1199 	} else if (datasize == 0) {
1200 		datap = NULL;
1201 	}
1202 
1203 	/*
1204 	 * Get the stack layout, if it hasn't already been done.
1205 	 */
1206 	if (!st->d_layout_done) {
1207 		error = door_layout(curthread, datasize, ndesc,
1208 		    (is_private && empty_pool));
1209 		if (error != 0)
1210 			goto fail;
1211 	}
1212 
1213 	/*
1214 	 * fill out the stack, starting from the top.  Layout was already
1215 	 * filled in by door_args() or door_translate_out().
1216 	 */
1217 	if (layout->dl_descp != NULL) {
1218 		ASSERT(ndesc != 0);
1219 		start = kmem_alloc(descsize, KM_SLEEP);
1220 
1221 		while (ndesc > 0) {
1222 			if (door_insert(*fpp, &start[ncopied]) == -1) {
1223 				error = EMFILE;
1224 				goto fail;
1225 			}
1226 			ndesc--;
1227 			ncopied++;
1228 			fpp++;
1229 		}
1230 		if (door_stack_copyout(start, layout->dl_descp, descsize)) {
1231 			error = E2BIG;
1232 			goto fail;
1233 		}
1234 	}
1235 	fpp = NULL;			/* finished processing */
1236 
1237 	if (layout->dl_datap != NULL) {
1238 		ASSERT(datasize != 0);
1239 		datap = layout->dl_datap;
1240 		if (ct->d_upcall || datasize <= door_max_arg) {
1241 			if (door_stack_copyout(ct->d_buf, datap, datasize)) {
1242 				error = E2BIG;
1243 				goto fail;
1244 			}
1245 		}
1246 	}
1247 
1248 	if (is_private && empty_pool) {
1249 		door_info_t di;
1250 
1251 		infop = layout->dl_infop;
1252 		ASSERT(infop != NULL);
1253 
1254 		di.di_target = curproc->p_pid;
1255 		di.di_proc = (door_ptr_t)(uintptr_t)dp->door_pc;
1256 		di.di_data = (door_ptr_t)(uintptr_t)dp->door_data;
1257 		di.di_uniquifier = dp->door_index;
1258 		di.di_attributes = (dp->door_flags & DOOR_ATTR_MASK) |
1259 		    DOOR_LOCAL;
1260 
1261 		if (copyout(&di, infop, sizeof (di))) {
1262 			error = E2BIG;
1263 			goto fail;
1264 		}
1265 	}
1266 
1267 	if (get_udatamodel() == DATAMODEL_NATIVE) {
1268 		struct door_results dr;
1269 
1270 		dr.cookie = dp->door_data;
1271 		dr.data_ptr = datap;
1272 		dr.data_size = datasize;
1273 		dr.desc_ptr = (door_desc_t *)layout->dl_descp;
1274 		dr.desc_num = ncopied;
1275 		dr.pc = dp->door_pc;
1276 		dr.nservers = !empty_pool;
1277 		dr.door_info = (door_info_t *)infop;
1278 
1279 		if (copyout(&dr, layout->dl_resultsp, sizeof (dr))) {
1280 			error = E2BIG;
1281 			goto fail;
1282 		}
1283 #ifdef _SYSCALL32_IMPL
1284 	} else {
1285 		struct door_results32 dr32;
1286 
1287 		dr32.cookie = (caddr32_t)(uintptr_t)dp->door_data;
1288 		dr32.data_ptr = (caddr32_t)(uintptr_t)datap;
1289 		dr32.data_size = (size32_t)datasize;
1290 		dr32.desc_ptr = (caddr32_t)(uintptr_t)layout->dl_descp;
1291 		dr32.desc_num = ncopied;
1292 		dr32.pc = (caddr32_t)(uintptr_t)dp->door_pc;
1293 		dr32.nservers = !empty_pool;
1294 		dr32.door_info = (caddr32_t)(uintptr_t)infop;
1295 
1296 		if (copyout(&dr32, layout->dl_resultsp, sizeof (dr32))) {
1297 			error = E2BIG;
1298 			goto fail;
1299 		}
1300 #endif
1301 	}
1302 
1303 	error = door_finish_dispatch(layout->dl_sp);
1304 fail:
1305 	if (start != NULL) {
1306 		if (error != 0)
1307 			door_fd_close(start, ncopied);
1308 		kmem_free(start, descsize);
1309 	}
1310 	if (fpp != NULL)
1311 		door_fp_close(fpp, ndesc);
1312 
1313 	return (error);
1314 }
1315 
1316 /*
1317  * Return the results (if any) to the caller (if any) and wait for the
1318  * next invocation on a door.
1319  */
1320 int
1321 door_return(caddr_t data_ptr, size_t data_size,
1322     door_desc_t *desc_ptr, uint_t desc_num, caddr_t sp, size_t ssize)
1323 {
1324 	kthread_t	*caller;
1325 	klwp_t		*lwp;
1326 	int		error = 0;
1327 	door_node_t	*dp;
1328 	door_server_t	*st;		/* curthread door_data */
1329 	door_client_t	*ct;		/* caller door_data */
1330 	int		cancel_pending;
1331 
1332 	st = door_my_server(1);
1333 
1334 	/*
1335 	 * If thread was bound to a door that no longer exists, return
1336 	 * an error.  This can happen if a thread is bound to a door
1337 	 * before the process calls forkall(); in the child, the door
1338 	 * doesn't exist and door_fork() sets the d_invbound flag.
1339 	 */
1340 	if (st->d_invbound)
1341 		return (set_errno(EINVAL));
1342 
1343 	st->d_sp = sp;			/* Save base of stack. */
1344 	st->d_ssize = ssize;		/* and its size */
1345 
1346 	/*
1347 	 * before we release our stack to the whims of our next caller,
1348 	 * copy in the syscall arguments if we're being traced by /proc.
1349 	 */
1350 	if (curthread->t_post_sys && PTOU(ttoproc(curthread))->u_systrap)
1351 		(void) save_syscall_args();
1352 
1353 	/* Make sure the caller hasn't gone away */
1354 	mutex_enter(&door_knob);
1355 	if ((caller = st->d_caller) == NULL || caller->t_door == NULL) {
1356 		if (desc_num != 0) {
1357 			/* close any DOOR_RELEASE descriptors */
1358 			mutex_exit(&door_knob);
1359 			error = door_release_fds(desc_ptr, desc_num);
1360 			if (error)
1361 				return (set_errno(error));
1362 			mutex_enter(&door_knob);
1363 		}
1364 		goto out;
1365 	}
1366 	ct = DOOR_CLIENT(caller->t_door);
1367 
1368 	ct->d_args.data_size = data_size;
1369 	ct->d_args.desc_num = desc_num;
1370 	/*
1371 	 * Transfer results, if any, to the client
1372 	 */
1373 	if (data_size != 0 || desc_num != 0) {
1374 		/*
1375 		 * Prevent the client from exiting until we have finished
1376 		 * moving results.
1377 		 */
1378 		DOOR_T_HOLD(ct);
1379 		mutex_exit(&door_knob);
1380 		error = door_results(caller, data_ptr, data_size,
1381 		    desc_ptr, desc_num);
1382 		mutex_enter(&door_knob);
1383 		DOOR_T_RELEASE(ct);
1384 		/*
1385 		 * Pass EOVERFLOW errors back to the client
1386 		 */
1387 		if (error && error != EOVERFLOW) {
1388 			mutex_exit(&door_knob);
1389 			return (set_errno(error));
1390 		}
1391 	}
1392 out:
1393 	/* Put ourselves on the available server thread list */
1394 	door_release_server(st->d_pool, curthread);
1395 
1396 	/*
1397 	 * Make sure the caller is still waiting to be resumed
1398 	 */
1399 	if (caller) {
1400 		disp_lock_t *tlp;
1401 
1402 		thread_lock(caller);
1403 		ct->d_error = error;		/* Return any errors */
1404 		if (caller->t_state == TS_SLEEP &&
1405 		    SOBJ_TYPE(caller->t_sobj_ops) == SOBJ_SHUTTLE) {
1406 			cpu_t *cp = CPU;
1407 
1408 			tlp = caller->t_lockp;
1409 			/*
1410 			 * Setting t_disp_queue prevents erroneous preemptions
1411 			 * if this thread is still in execution on another
1412 			 * processor
1413 			 */
1414 			caller->t_disp_queue = cp->cpu_disp;
1415 			CL_ACTIVE(caller);
1416 			/*
1417 			 * We are calling thread_onproc() instead of
1418 			 * THREAD_ONPROC() because compiler can reorder
1419 			 * the two stores of t_state and t_lockp in
1420 			 * THREAD_ONPROC().
1421 			 */
1422 			thread_onproc(caller, cp);
1423 			disp_lock_exit_high(tlp);
1424 			shuttle_resume(caller, &door_knob);
1425 		} else {
1426 			/* May have been setrun or in stop state */
1427 			thread_unlock(caller);
1428 			shuttle_swtch(&door_knob);
1429 		}
1430 	} else {
1431 		shuttle_swtch(&door_knob);
1432 	}
1433 
1434 	/*
1435 	 * We've sprung to life. Determine if we are part of a door
1436 	 * invocation, or just interrupted
1437 	 */
1438 	lwp = ttolwp(curthread);
1439 	mutex_enter(&door_knob);
1440 	if ((dp = st->d_active) != NULL) {
1441 		/*
1442 		 * Normal door invocation. Return any error condition
1443 		 * encountered while trying to pass args to the server
1444 		 * thread.
1445 		 */
1446 		lwp->lwp_asleep = 0;
1447 		/*
1448 		 * Prevent the caller from leaving us while we
1449 		 * are copying out the arguments from it's buffer.
1450 		 */
1451 		ASSERT(st->d_caller != NULL);
1452 		ct = DOOR_CLIENT(st->d_caller->t_door);
1453 
1454 		DOOR_T_HOLD(ct);
1455 		mutex_exit(&door_knob);
1456 		error = door_server_dispatch(ct, dp);
1457 		mutex_enter(&door_knob);
1458 		DOOR_T_RELEASE(ct);
1459 
1460 		if (error) {
1461 			caller = st->d_caller;
1462 			if (caller)
1463 				ct = DOOR_CLIENT(caller->t_door);
1464 			else
1465 				ct = NULL;
1466 			goto out;
1467 		}
1468 		mutex_exit(&door_knob);
1469 		return (0);
1470 	} else {
1471 		/*
1472 		 * We are not involved in a door_invocation.
1473 		 * Check for /proc related activity...
1474 		 */
1475 		st->d_caller = NULL;
1476 		door_server_exit(curproc, curthread);
1477 		mutex_exit(&door_knob);
1478 		cancel_pending = 0;
1479 		if (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort ||
1480 		    MUSTRETURN(curproc, curthread) ||
1481 		    (cancel_pending = schedctl_cancel_pending()) != 0) {
1482 			if (cancel_pending)
1483 				schedctl_cancel_eintr();
1484 			lwp->lwp_asleep = 0;
1485 			lwp->lwp_sysabort = 0;
1486 			return (set_errno(EINTR));
1487 		}
1488 		/* Go back and wait for another request */
1489 		lwp->lwp_asleep = 0;
1490 		mutex_enter(&door_knob);
1491 		caller = NULL;
1492 		goto out;
1493 	}
1494 }
1495 
1496 /*
1497  * Revoke any future invocations on this door
1498  */
1499 int
1500 door_revoke(int did)
1501 {
1502 	door_node_t	*d;
1503 	int		error;
1504 
1505 	if ((d = door_lookup(did, NULL)) == NULL)
1506 		return (set_errno(EBADF));
1507 
1508 	mutex_enter(&door_knob);
1509 	if (d->door_target != curproc) {
1510 		mutex_exit(&door_knob);
1511 		releasef(did);
1512 		return (set_errno(EPERM));
1513 	}
1514 	d->door_flags |= DOOR_REVOKED;
1515 	if (d->door_flags & DOOR_PRIVATE)
1516 		cv_broadcast(&d->door_servers.dp_cv);
1517 	else
1518 		cv_broadcast(&curproc->p_server_threads.dp_cv);
1519 	mutex_exit(&door_knob);
1520 	releasef(did);
1521 	/* Invalidate the descriptor */
1522 	if ((error = closeandsetf(did, NULL)) != 0)
1523 		return (set_errno(error));
1524 	return (0);
1525 }
1526 
1527 int
1528 door_info(int did, struct door_info *d_info)
1529 {
1530 	door_node_t	*dp;
1531 	door_info_t	di;
1532 	door_server_t	*st;
1533 	file_t		*fp = NULL;
1534 
1535 	if (did == DOOR_QUERY) {
1536 		/* Get information on door current thread is bound to */
1537 		if ((st = door_my_server(0)) == NULL ||
1538 		    (dp = st->d_pool) == NULL)
1539 			/* Thread isn't bound to a door */
1540 			return (set_errno(EBADF));
1541 	} else if ((dp = door_lookup(did, &fp)) == NULL) {
1542 		/* Not a door */
1543 		return (set_errno(EBADF));
1544 	}
1545 
1546 	door_info_common(dp, &di, fp);
1547 
1548 	if (did != DOOR_QUERY)
1549 		releasef(did);
1550 
1551 	if (copyout(&di, d_info, sizeof (struct door_info)))
1552 		return (set_errno(EFAULT));
1553 	return (0);
1554 }
1555 
1556 /*
1557  * Common code for getting information about a door either via the
1558  * door_info system call or the door_ki_info kernel call.
1559  */
1560 void
1561 door_info_common(door_node_t *dp, struct door_info *dip, file_t *fp)
1562 {
1563 	int unref_count;
1564 
1565 	bzero(dip, sizeof (door_info_t));
1566 
1567 	mutex_enter(&door_knob);
1568 	if (dp->door_target == NULL)
1569 		dip->di_target = -1;
1570 	else
1571 		dip->di_target = dp->door_target->p_pid;
1572 
1573 	dip->di_attributes = dp->door_flags & DOOR_ATTR_MASK;
1574 	if (dp->door_target == curproc)
1575 		dip->di_attributes |= DOOR_LOCAL;
1576 	dip->di_proc = (door_ptr_t)(uintptr_t)dp->door_pc;
1577 	dip->di_data = (door_ptr_t)(uintptr_t)dp->door_data;
1578 	dip->di_uniquifier = dp->door_index;
1579 	/*
1580 	 * If this door is in the middle of having an unreferenced
1581 	 * notification delivered, don't count the VN_HOLD by
1582 	 * door_deliver_unref in determining if it is unreferenced.
1583 	 * This handles the case where door_info is called from the
1584 	 * thread delivering the unref notification.
1585 	 */
1586 	if (dp->door_flags & DOOR_UNREF_ACTIVE)
1587 		unref_count = 2;
1588 	else
1589 		unref_count = 1;
1590 	mutex_exit(&door_knob);
1591 
1592 	if (fp == NULL) {
1593 		/*
1594 		 * If this thread is bound to the door, then we can just
1595 		 * check the vnode; a ref count of 1 (or 2 if this is
1596 		 * handling an unref notification) means that the hold
1597 		 * from the door_bind is the only reference to the door
1598 		 * (no file descriptor refers to it).
1599 		 */
1600 		if (DTOV(dp)->v_count == unref_count)
1601 			dip->di_attributes |= DOOR_IS_UNREF;
1602 	} else {
1603 		/*
1604 		 * If we're working from a file descriptor or door handle
1605 		 * we need to look at the file structure count.  We don't
1606 		 * need to hold the vnode lock since this is just a snapshot.
1607 		 */
1608 		mutex_enter(&fp->f_tlock);
1609 		if (fp->f_count == 1 && DTOV(dp)->v_count == unref_count)
1610 			dip->di_attributes |= DOOR_IS_UNREF;
1611 		mutex_exit(&fp->f_tlock);
1612 	}
1613 }
1614 
1615 /*
1616  * Return credentials of the door caller (if any) for this invocation
1617  */
1618 int
1619 door_ucred(struct ucred_s *uch)
1620 {
1621 	kthread_t	*caller;
1622 	door_server_t	*st;
1623 	door_client_t	*ct;
1624 	struct proc	*p;
1625 	struct ucred_s	*res;
1626 	int		err;
1627 
1628 	mutex_enter(&door_knob);
1629 	if ((st = door_my_server(0)) == NULL ||
1630 	    (caller = st->d_caller) == NULL) {
1631 		mutex_exit(&door_knob);
1632 		return (set_errno(EINVAL));
1633 	}
1634 
1635 	ASSERT(caller->t_door != NULL);
1636 	ct = DOOR_CLIENT(caller->t_door);
1637 
1638 	/* Prevent caller from exiting while we examine the cred */
1639 	DOOR_T_HOLD(ct);
1640 	mutex_exit(&door_knob);
1641 
1642 	/* Get the credentials of the calling process */
1643 	p = ttoproc(caller);
1644 
1645 	res = pgetucred(p);
1646 
1647 	mutex_enter(&door_knob);
1648 	DOOR_T_RELEASE(ct);
1649 	mutex_exit(&door_knob);
1650 
1651 	err = copyout(res, uch, res->uc_size);
1652 
1653 	kmem_free(res, res->uc_size);
1654 
1655 	if (err != 0)
1656 		return (set_errno(EFAULT));
1657 
1658 	return (0);
1659 }
1660 
1661 /*
1662  * Bind the current lwp to the server thread pool associated with 'did'
1663  */
1664 int
1665 door_bind(int did)
1666 {
1667 	door_node_t	*dp;
1668 	door_server_t	*st;
1669 
1670 	if ((dp = door_lookup(did, NULL)) == NULL) {
1671 		/* Not a door */
1672 		return (set_errno(EBADF));
1673 	}
1674 
1675 	/*
1676 	 * Can't bind to a non-private door, and can't bind to a door
1677 	 * served by another process.
1678 	 */
1679 	if ((dp->door_flags & DOOR_PRIVATE) == 0 ||
1680 	    dp->door_target != curproc) {
1681 		releasef(did);
1682 		return (set_errno(EINVAL));
1683 	}
1684 
1685 	st = door_my_server(1);
1686 	if (st->d_pool)
1687 		door_unbind_thread(st->d_pool);
1688 	st->d_pool = dp;
1689 	st->d_invbound = 0;
1690 	door_bind_thread(dp);
1691 	releasef(did);
1692 
1693 	return (0);
1694 }
1695 
1696 /*
1697  * Unbind the current lwp from it's server thread pool
1698  */
1699 int
1700 door_unbind(void)
1701 {
1702 	door_server_t *st;
1703 
1704 	if ((st = door_my_server(0)) == NULL)
1705 		return (set_errno(EBADF));
1706 
1707 	if (st->d_invbound) {
1708 		ASSERT(st->d_pool == NULL);
1709 		st->d_invbound = 0;
1710 		return (0);
1711 	}
1712 	if (st->d_pool == NULL)
1713 		return (set_errno(EBADF));
1714 	door_unbind_thread(st->d_pool);
1715 	st->d_pool = NULL;
1716 	return (0);
1717 }
1718 
1719 /*
1720  * Create a descriptor for the associated file and fill in the
1721  * attributes associated with it.
1722  *
1723  * Return 0 for success, -1 otherwise;
1724  */
1725 int
1726 door_insert(struct file *fp, door_desc_t *dp)
1727 {
1728 	struct vnode *vp;
1729 	int	fd;
1730 	door_attr_t attributes = DOOR_DESCRIPTOR;
1731 
1732 	ASSERT(MUTEX_NOT_HELD(&door_knob));
1733 	if ((fd = ufalloc(0)) == -1)
1734 		return (-1);
1735 	setf(fd, fp);
1736 	dp->d_data.d_desc.d_descriptor = fd;
1737 
1738 	/* Fill in the attributes */
1739 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
1740 		vp = fp->f_vnode;
1741 	if (vp && vp->v_type == VDOOR) {
1742 		if (VTOD(vp)->door_target == curproc)
1743 			attributes |= DOOR_LOCAL;
1744 		attributes |= VTOD(vp)->door_flags & DOOR_ATTR_MASK;
1745 		dp->d_data.d_desc.d_id = VTOD(vp)->door_index;
1746 	}
1747 	dp->d_attributes = attributes;
1748 	return (0);
1749 }
1750 
1751 /*
1752  * Return an available thread for this server.  A NULL return value indicates
1753  * that either:
1754  *	The door has been revoked, or
1755  *	a signal was received.
1756  * The two conditions can be differentiated using DOOR_INVALID(dp).
1757  */
1758 static kthread_t *
1759 door_get_server(door_node_t *dp)
1760 {
1761 	kthread_t **ktp;
1762 	kthread_t *server_t;
1763 	door_pool_t *pool;
1764 	door_server_t *st;
1765 	int signalled;
1766 
1767 	disp_lock_t *tlp;
1768 	cpu_t *cp;
1769 
1770 	ASSERT(MUTEX_HELD(&door_knob));
1771 
1772 	if (dp->door_flags & DOOR_PRIVATE)
1773 		pool = &dp->door_servers;
1774 	else
1775 		pool = &dp->door_target->p_server_threads;
1776 
1777 	for (;;) {
1778 		/*
1779 		 * We search the thread pool, looking for a server thread
1780 		 * ready to take an invocation (i.e. one which is still
1781 		 * sleeping on a shuttle object).  If none are available,
1782 		 * we sleep on the pool's CV, and will be signaled when a
1783 		 * thread is added to the pool.
1784 		 *
1785 		 * This relies on the fact that once a thread in the thread
1786 		 * pool wakes up, it *must* remove and add itself to the pool
1787 		 * before it can receive door calls.
1788 		 */
1789 		if (DOOR_INVALID(dp))
1790 			return (NULL);	/* Target has become invalid */
1791 
1792 		for (ktp = &pool->dp_threads;
1793 		    (server_t = *ktp) != NULL;
1794 		    ktp = &st->d_servers) {
1795 			st = DOOR_SERVER(server_t->t_door);
1796 
1797 			thread_lock(server_t);
1798 			if (server_t->t_state == TS_SLEEP &&
1799 			    SOBJ_TYPE(server_t->t_sobj_ops) == SOBJ_SHUTTLE)
1800 				break;
1801 			thread_unlock(server_t);
1802 		}
1803 		if (server_t != NULL)
1804 			break;		/* we've got a live one! */
1805 
1806 		if (!cv_wait_sig_swap_core(&pool->dp_cv, &door_knob,
1807 		    &signalled)) {
1808 			/*
1809 			 * If we were signaled and the door is still
1810 			 * valid, pass the signal on to another waiter.
1811 			 */
1812 			if (signalled && !DOOR_INVALID(dp))
1813 				cv_signal(&pool->dp_cv);
1814 			return (NULL);	/* Got a signal */
1815 		}
1816 	}
1817 
1818 	/*
1819 	 * We've got a thread_lock()ed thread which is still on the
1820 	 * shuttle.  Take it off the list of available server threads
1821 	 * and mark it as ONPROC.  We are committed to resuming this
1822 	 * thread now.
1823 	 */
1824 	tlp = server_t->t_lockp;
1825 	cp = CPU;
1826 
1827 	*ktp = st->d_servers;
1828 	st->d_servers = NULL;
1829 	/*
1830 	 * Setting t_disp_queue prevents erroneous preemptions
1831 	 * if this thread is still in execution on another processor
1832 	 */
1833 	server_t->t_disp_queue = cp->cpu_disp;
1834 	CL_ACTIVE(server_t);
1835 	/*
1836 	 * We are calling thread_onproc() instead of
1837 	 * THREAD_ONPROC() because compiler can reorder
1838 	 * the two stores of t_state and t_lockp in
1839 	 * THREAD_ONPROC().
1840 	 */
1841 	thread_onproc(server_t, cp);
1842 	disp_lock_exit(tlp);
1843 	return (server_t);
1844 }
1845 
1846 /*
1847  * Put a server thread back in the pool.
1848  */
1849 static void
1850 door_release_server(door_node_t *dp, kthread_t *t)
1851 {
1852 	door_server_t *st = DOOR_SERVER(t->t_door);
1853 	door_pool_t *pool;
1854 
1855 	ASSERT(MUTEX_HELD(&door_knob));
1856 	st->d_active = NULL;
1857 	st->d_caller = NULL;
1858 	st->d_layout_done = 0;
1859 	if (dp && (dp->door_flags & DOOR_PRIVATE)) {
1860 		ASSERT(dp->door_target == NULL ||
1861 		    dp->door_target == ttoproc(t));
1862 		pool = &dp->door_servers;
1863 	} else {
1864 		pool = &ttoproc(t)->p_server_threads;
1865 	}
1866 
1867 	st->d_servers = pool->dp_threads;
1868 	pool->dp_threads = t;
1869 
1870 	/* If someone is waiting for a server thread, wake him up */
1871 	cv_signal(&pool->dp_cv);
1872 }
1873 
1874 /*
1875  * Remove a server thread from the pool if present.
1876  */
1877 static void
1878 door_server_exit(proc_t *p, kthread_t *t)
1879 {
1880 	door_pool_t *pool;
1881 	kthread_t **next;
1882 	door_server_t *st = DOOR_SERVER(t->t_door);
1883 
1884 	ASSERT(MUTEX_HELD(&door_knob));
1885 	if (st->d_pool != NULL) {
1886 		ASSERT(st->d_pool->door_flags & DOOR_PRIVATE);
1887 		pool = &st->d_pool->door_servers;
1888 	} else {
1889 		pool = &p->p_server_threads;
1890 	}
1891 
1892 	next = &pool->dp_threads;
1893 	while (*next != NULL) {
1894 		if (*next == t) {
1895 			*next = DOOR_SERVER(t->t_door)->d_servers;
1896 			return;
1897 		}
1898 		next = &(DOOR_SERVER((*next)->t_door)->d_servers);
1899 	}
1900 }
1901 
1902 /*
1903  * Lookup the door descriptor. Caller must call releasef when finished
1904  * with associated door.
1905  */
1906 static door_node_t *
1907 door_lookup(int did, file_t **fpp)
1908 {
1909 	vnode_t	*vp;
1910 	file_t *fp;
1911 
1912 	ASSERT(MUTEX_NOT_HELD(&door_knob));
1913 	if ((fp = getf(did)) == NULL)
1914 		return (NULL);
1915 	/*
1916 	 * Use the underlying vnode (we may be namefs mounted)
1917 	 */
1918 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
1919 		vp = fp->f_vnode;
1920 
1921 	if (vp == NULL || vp->v_type != VDOOR) {
1922 		releasef(did);
1923 		return (NULL);
1924 	}
1925 
1926 	if (fpp)
1927 		*fpp = fp;
1928 
1929 	return (VTOD(vp));
1930 }
1931 
1932 /*
1933  * The current thread is exiting, so clean up any pending
1934  * invocation details
1935  */
1936 void
1937 door_slam(void)
1938 {
1939 	door_node_t *dp;
1940 	door_data_t *dt;
1941 	door_client_t *ct;
1942 	door_server_t *st;
1943 
1944 	/*
1945 	 * If we are an active door server, notify our
1946 	 * client that we are exiting and revoke our door.
1947 	 */
1948 	if ((dt = door_my_data(0)) == NULL)
1949 		return;
1950 	ct = DOOR_CLIENT(dt);
1951 	st = DOOR_SERVER(dt);
1952 
1953 	mutex_enter(&door_knob);
1954 	for (;;) {
1955 		if (DOOR_T_HELD(ct))
1956 			cv_wait(&ct->d_cv, &door_knob);
1957 		else if (DOOR_T_HELD(st))
1958 			cv_wait(&st->d_cv, &door_knob);
1959 		else
1960 			break;			/* neither flag is set */
1961 	}
1962 	curthread->t_door = NULL;
1963 	if ((dp = st->d_active) != NULL) {
1964 		kthread_t *t = st->d_caller;
1965 		proc_t *p = curproc;
1966 
1967 		/* Revoke our door if the process is exiting */
1968 		if (dp->door_target == p && (p->p_flag & SEXITING)) {
1969 			door_list_delete(dp);
1970 			dp->door_target = NULL;
1971 			dp->door_flags |= DOOR_REVOKED;
1972 			if (dp->door_flags & DOOR_PRIVATE)
1973 				cv_broadcast(&dp->door_servers.dp_cv);
1974 			else
1975 				cv_broadcast(&p->p_server_threads.dp_cv);
1976 		}
1977 
1978 		if (t != NULL) {
1979 			/*
1980 			 * Let the caller know we are gone
1981 			 */
1982 			DOOR_CLIENT(t->t_door)->d_error = DOOR_EXIT;
1983 			thread_lock(t);
1984 			if (t->t_state == TS_SLEEP &&
1985 			    SOBJ_TYPE(t->t_sobj_ops) == SOBJ_SHUTTLE)
1986 				setrun_locked(t);
1987 			thread_unlock(t);
1988 		}
1989 	}
1990 	mutex_exit(&door_knob);
1991 	if (st->d_pool)
1992 		door_unbind_thread(st->d_pool);	/* Implicit door_unbind */
1993 	kmem_free(dt, sizeof (door_data_t));
1994 }
1995 
1996 /*
1997  * Set DOOR_REVOKED for all doors of the current process. This is called
1998  * on exit before all lwp's are being terminated so that door calls will
1999  * return with an error.
2000  */
2001 void
2002 door_revoke_all()
2003 {
2004 	door_node_t *dp;
2005 	proc_t *p = ttoproc(curthread);
2006 
2007 	mutex_enter(&door_knob);
2008 	for (dp = p->p_door_list; dp != NULL; dp = dp->door_list) {
2009 		ASSERT(dp->door_target == p);
2010 		dp->door_flags |= DOOR_REVOKED;
2011 		if (dp->door_flags & DOOR_PRIVATE)
2012 			cv_broadcast(&dp->door_servers.dp_cv);
2013 	}
2014 	cv_broadcast(&p->p_server_threads.dp_cv);
2015 	mutex_exit(&door_knob);
2016 }
2017 
2018 /*
2019  * The process is exiting, and all doors it created need to be revoked.
2020  */
2021 void
2022 door_exit(void)
2023 {
2024 	door_node_t *dp;
2025 	proc_t *p = ttoproc(curthread);
2026 
2027 	ASSERT(p->p_lwpcnt == 1);
2028 	/*
2029 	 * Walk the list of active doors created by this process and
2030 	 * revoke them all.
2031 	 */
2032 	mutex_enter(&door_knob);
2033 	for (dp = p->p_door_list; dp != NULL; dp = dp->door_list) {
2034 		dp->door_target = NULL;
2035 		dp->door_flags |= DOOR_REVOKED;
2036 		if (dp->door_flags & DOOR_PRIVATE)
2037 			cv_broadcast(&dp->door_servers.dp_cv);
2038 	}
2039 	cv_broadcast(&p->p_server_threads.dp_cv);
2040 	/* Clear the list */
2041 	p->p_door_list = NULL;
2042 
2043 	/* Clean up the unref list */
2044 	while ((dp = p->p_unref_list) != NULL) {
2045 		p->p_unref_list = dp->door_ulist;
2046 		dp->door_ulist = NULL;
2047 		mutex_exit(&door_knob);
2048 		VN_RELE(DTOV(dp));
2049 		mutex_enter(&door_knob);
2050 	}
2051 	mutex_exit(&door_knob);
2052 }
2053 
2054 
2055 /*
2056  * The process is executing forkall(), and we need to flag threads that
2057  * are bound to a door in the child.  This will make the child threads
2058  * return an error to door_return unless they call door_unbind first.
2059  */
2060 void
2061 door_fork(kthread_t *parent, kthread_t *child)
2062 {
2063 	door_data_t *pt = parent->t_door;
2064 	door_server_t *st = DOOR_SERVER(pt);
2065 	door_data_t *dt;
2066 
2067 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2068 	if (pt != NULL && (st->d_pool != NULL || st->d_invbound)) {
2069 		/* parent thread is bound to a door */
2070 		dt = child->t_door =
2071 		    kmem_zalloc(sizeof (door_data_t), KM_SLEEP);
2072 		DOOR_SERVER(dt)->d_invbound = 1;
2073 	}
2074 }
2075 
2076 /*
2077  * Deliver queued unrefs to appropriate door server.
2078  */
2079 static int
2080 door_unref(void)
2081 {
2082 	door_node_t	*dp;
2083 	static door_arg_t unref_args = { DOOR_UNREF_DATA, 0, 0, 0, 0, 0 };
2084 	proc_t *p = ttoproc(curthread);
2085 
2086 	/* make sure there's only one unref thread per process */
2087 	mutex_enter(&door_knob);
2088 	if (p->p_unref_thread) {
2089 		mutex_exit(&door_knob);
2090 		return (set_errno(EALREADY));
2091 	}
2092 	p->p_unref_thread = 1;
2093 	mutex_exit(&door_knob);
2094 
2095 	(void) door_my_data(1);			/* create info, if necessary */
2096 
2097 	for (;;) {
2098 		mutex_enter(&door_knob);
2099 
2100 		/* Grab a queued request */
2101 		while ((dp = p->p_unref_list) == NULL) {
2102 			if (!cv_wait_sig(&p->p_unref_cv, &door_knob)) {
2103 				/*
2104 				 * Interrupted.
2105 				 * Return so we can finish forkall() or exit().
2106 				 */
2107 				p->p_unref_thread = 0;
2108 				mutex_exit(&door_knob);
2109 				return (set_errno(EINTR));
2110 			}
2111 		}
2112 		p->p_unref_list = dp->door_ulist;
2113 		dp->door_ulist = NULL;
2114 		dp->door_flags |= DOOR_UNREF_ACTIVE;
2115 		mutex_exit(&door_knob);
2116 
2117 		(void) door_upcall(DTOV(dp), &unref_args);
2118 
2119 		mutex_enter(&door_knob);
2120 		ASSERT(dp->door_flags & DOOR_UNREF_ACTIVE);
2121 		dp->door_flags &= ~DOOR_UNREF_ACTIVE;
2122 		mutex_exit(&door_knob);
2123 		VN_RELE(DTOV(dp));
2124 	}
2125 }
2126 
2127 
2128 /*
2129  * Deliver queued unrefs to kernel door server.
2130  */
2131 /* ARGSUSED */
2132 static void
2133 door_unref_kernel(caddr_t arg)
2134 {
2135 	door_node_t	*dp;
2136 	static door_arg_t unref_args = { DOOR_UNREF_DATA, 0, 0, 0, 0, 0 };
2137 	proc_t *p = ttoproc(curthread);
2138 	callb_cpr_t cprinfo;
2139 
2140 	/* should only be one of these */
2141 	mutex_enter(&door_knob);
2142 	if (p->p_unref_thread) {
2143 		mutex_exit(&door_knob);
2144 		return;
2145 	}
2146 	p->p_unref_thread = 1;
2147 	mutex_exit(&door_knob);
2148 
2149 	(void) door_my_data(1);		/* make sure we have a door_data_t */
2150 
2151 	CALLB_CPR_INIT(&cprinfo, &door_knob, callb_generic_cpr, "door_unref");
2152 	for (;;) {
2153 		mutex_enter(&door_knob);
2154 		/* Grab a queued request */
2155 		while ((dp = p->p_unref_list) == NULL) {
2156 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
2157 			cv_wait(&p->p_unref_cv, &door_knob);
2158 			CALLB_CPR_SAFE_END(&cprinfo, &door_knob);
2159 		}
2160 		p->p_unref_list = dp->door_ulist;
2161 		dp->door_ulist = NULL;
2162 		dp->door_flags |= DOOR_UNREF_ACTIVE;
2163 		mutex_exit(&door_knob);
2164 
2165 		(*(dp->door_pc))(dp->door_data, &unref_args, NULL, NULL, NULL);
2166 
2167 		mutex_enter(&door_knob);
2168 		ASSERT(dp->door_flags & DOOR_UNREF_ACTIVE);
2169 		dp->door_flags &= ~DOOR_UNREF_ACTIVE;
2170 		mutex_exit(&door_knob);
2171 		VN_RELE(DTOV(dp));
2172 	}
2173 }
2174 
2175 
2176 /*
2177  * Queue an unref invocation for processing for the current process
2178  * The door may or may not be revoked at this point.
2179  */
2180 void
2181 door_deliver_unref(door_node_t *d)
2182 {
2183 	struct proc *server = d->door_target;
2184 
2185 	ASSERT(MUTEX_HELD(&door_knob));
2186 	ASSERT(d->door_active == 0);
2187 
2188 	if (server == NULL)
2189 		return;
2190 	/*
2191 	 * Create a lwp to deliver unref calls if one isn't already running.
2192 	 *
2193 	 * A separate thread is used to deliver unrefs since the current
2194 	 * thread may be holding resources (e.g. locks) in user land that
2195 	 * may be needed by the unref processing. This would cause a
2196 	 * deadlock.
2197 	 */
2198 	if (d->door_flags & DOOR_UNREF_MULTI) {
2199 		/* multiple unrefs */
2200 		d->door_flags &= ~DOOR_DELAY;
2201 	} else {
2202 		/* Only 1 unref per door */
2203 		d->door_flags &= ~(DOOR_UNREF|DOOR_DELAY);
2204 	}
2205 	mutex_exit(&door_knob);
2206 
2207 	/*
2208 	 * Need to bump the vnode count before putting the door on the
2209 	 * list so it doesn't get prematurely released by door_unref.
2210 	 */
2211 	VN_HOLD(DTOV(d));
2212 
2213 	mutex_enter(&door_knob);
2214 	/* is this door already on the unref list? */
2215 	if (d->door_flags & DOOR_UNREF_MULTI) {
2216 		door_node_t *dp;
2217 		for (dp = server->p_unref_list; dp != NULL;
2218 		    dp = dp->door_ulist) {
2219 			if (d == dp) {
2220 				/* already there, don't need to add another */
2221 				mutex_exit(&door_knob);
2222 				VN_RELE(DTOV(d));
2223 				mutex_enter(&door_knob);
2224 				return;
2225 			}
2226 		}
2227 	}
2228 	ASSERT(d->door_ulist == NULL);
2229 	d->door_ulist = server->p_unref_list;
2230 	server->p_unref_list = d;
2231 	cv_broadcast(&server->p_unref_cv);
2232 }
2233 
2234 /*
2235  * The callers buffer isn't big enough for all of the data/fd's. Allocate
2236  * space in the callers address space for the results and copy the data
2237  * there.
2238  *
2239  * For EOVERFLOW, we must clean up the server's door descriptors.
2240  */
2241 static int
2242 door_overflow(
2243 	kthread_t	*caller,
2244 	caddr_t		data_ptr,	/* data location */
2245 	size_t		data_size,	/* data size */
2246 	door_desc_t	*desc_ptr,	/* descriptor location */
2247 	uint_t		desc_num)	/* descriptor size */
2248 {
2249 	proc_t *callerp = ttoproc(caller);
2250 	struct as *as = callerp->p_as;
2251 	door_client_t *ct = DOOR_CLIENT(caller->t_door);
2252 	caddr_t	addr;			/* Resulting address in target */
2253 	size_t	rlen;			/* Rounded len */
2254 	size_t	len;
2255 	uint_t	i;
2256 	size_t	ds = desc_num * sizeof (door_desc_t);
2257 
2258 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2259 	ASSERT(DOOR_T_HELD(ct) || ct->d_kernel);
2260 
2261 	/* Do initial overflow check */
2262 	if (!ufcanalloc(callerp, desc_num))
2263 		return (EMFILE);
2264 
2265 	/*
2266 	 * Allocate space for this stuff in the callers address space
2267 	 */
2268 	rlen = roundup(data_size + ds, PAGESIZE);
2269 	as_rangelock(as);
2270 	map_addr_proc(&addr, rlen, 0, 1, as->a_userlimit, ttoproc(caller), 0);
2271 	if (addr == NULL ||
2272 	    as_map(as, addr, rlen, segvn_create, zfod_argsp) != 0) {
2273 		/* No virtual memory available, or anon mapping failed */
2274 		as_rangeunlock(as);
2275 		if (!ct->d_kernel && desc_num > 0) {
2276 			int error = door_release_fds(desc_ptr, desc_num);
2277 			if (error)
2278 				return (error);
2279 		}
2280 		return (EOVERFLOW);
2281 	}
2282 	as_rangeunlock(as);
2283 
2284 	if (ct->d_kernel)
2285 		goto out;
2286 
2287 	if (data_size != 0) {
2288 		caddr_t	src = data_ptr;
2289 		caddr_t saddr = addr;
2290 
2291 		/* Copy any data */
2292 		len = data_size;
2293 		while (len != 0) {
2294 			int	amount;
2295 			int	error;
2296 
2297 			amount = len > PAGESIZE ? PAGESIZE : len;
2298 			if ((error = door_copy(as, src, saddr, amount)) != 0) {
2299 				(void) as_unmap(as, addr, rlen);
2300 				return (error);
2301 			}
2302 			saddr += amount;
2303 			src += amount;
2304 			len -= amount;
2305 		}
2306 	}
2307 	/* Copy any fd's */
2308 	if (desc_num != 0) {
2309 		door_desc_t	*didpp, *start;
2310 		struct file	**fpp;
2311 		int		fpp_size;
2312 
2313 		start = didpp = kmem_alloc(ds, KM_SLEEP);
2314 		if (copyin(desc_ptr, didpp, ds)) {
2315 			kmem_free(start, ds);
2316 			(void) as_unmap(as, addr, rlen);
2317 			return (EFAULT);
2318 		}
2319 
2320 		fpp_size = desc_num * sizeof (struct file *);
2321 		if (fpp_size > ct->d_fpp_size) {
2322 			/* make more space */
2323 			if (ct->d_fpp_size)
2324 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2325 			ct->d_fpp_size = fpp_size;
2326 			ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2327 		}
2328 		fpp = ct->d_fpp;
2329 
2330 		for (i = 0; i < desc_num; i++) {
2331 			struct file *fp;
2332 			int fd = didpp->d_data.d_desc.d_descriptor;
2333 
2334 			if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2335 			    (fp = getf(fd)) == NULL) {
2336 				/* close translated references */
2337 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2338 				/* close untranslated references */
2339 				door_fd_rele(didpp, desc_num - i, 0);
2340 				kmem_free(start, ds);
2341 				(void) as_unmap(as, addr, rlen);
2342 				return (EINVAL);
2343 			}
2344 			mutex_enter(&fp->f_tlock);
2345 			fp->f_count++;
2346 			mutex_exit(&fp->f_tlock);
2347 
2348 			*fpp = fp;
2349 			releasef(fd);
2350 
2351 			if (didpp->d_attributes & DOOR_RELEASE) {
2352 				/* release passed reference */
2353 				(void) closeandsetf(fd, NULL);
2354 			}
2355 
2356 			fpp++; didpp++;
2357 		}
2358 		kmem_free(start, ds);
2359 	}
2360 
2361 out:
2362 	ct->d_overflow = 1;
2363 	ct->d_args.rbuf = addr;
2364 	ct->d_args.rsize = rlen;
2365 	return (0);
2366 }
2367 
2368 /*
2369  * Transfer arguments from the client to the server.
2370  */
2371 static int
2372 door_args(kthread_t *server, int is_private)
2373 {
2374 	door_server_t *st = DOOR_SERVER(server->t_door);
2375 	door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2376 	uint_t	ndid;
2377 	size_t	dsize;
2378 	int	error;
2379 
2380 	ASSERT(DOOR_T_HELD(st));
2381 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2382 
2383 	ndid = ct->d_args.desc_num;
2384 	if (ndid > door_max_desc)
2385 		return (E2BIG);
2386 
2387 	/*
2388 	 * Get the stack layout, and fail now if it won't fit.
2389 	 */
2390 	error = door_layout(server, ct->d_args.data_size, ndid, is_private);
2391 	if (error != 0)
2392 		return (error);
2393 
2394 	dsize = ndid * sizeof (door_desc_t);
2395 	if (ct->d_args.data_size != 0) {
2396 		if (ct->d_args.data_size <= door_max_arg) {
2397 			/*
2398 			 * Use a 2 copy method for small amounts of data
2399 			 *
2400 			 * Allocate a little more than we need for the
2401 			 * args, in the hope that the results will fit
2402 			 * without having to reallocate a buffer
2403 			 */
2404 			ASSERT(ct->d_buf == NULL);
2405 			ct->d_bufsize = roundup(ct->d_args.data_size,
2406 			    DOOR_ROUND);
2407 			ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2408 			if (copyin(ct->d_args.data_ptr,
2409 			    ct->d_buf, ct->d_args.data_size) != 0) {
2410 				kmem_free(ct->d_buf, ct->d_bufsize);
2411 				ct->d_buf = NULL;
2412 				ct->d_bufsize = 0;
2413 				return (EFAULT);
2414 			}
2415 		} else {
2416 			struct as	*as;
2417 			caddr_t		src;
2418 			caddr_t		dest;
2419 			size_t		len = ct->d_args.data_size;
2420 			uintptr_t	base;
2421 
2422 			/*
2423 			 * Use a 1 copy method
2424 			 */
2425 			as = ttoproc(server)->p_as;
2426 			src = ct->d_args.data_ptr;
2427 
2428 			dest = st->d_layout.dl_datap;
2429 			base = (uintptr_t)dest;
2430 
2431 			/*
2432 			 * Copy data directly into server.  We proceed
2433 			 * downward from the top of the stack, to mimic
2434 			 * normal stack usage. This allows the guard page
2435 			 * to stop us before we corrupt anything.
2436 			 */
2437 			while (len != 0) {
2438 				uintptr_t start;
2439 				uintptr_t end;
2440 				uintptr_t offset;
2441 				size_t	amount;
2442 
2443 				/*
2444 				 * Locate the next part to copy.
2445 				 */
2446 				end = base + len;
2447 				start = P2ALIGN(end - 1, PAGESIZE);
2448 
2449 				/*
2450 				 * if we are on the final (first) page, fix
2451 				 * up the start position.
2452 				 */
2453 				if (P2ALIGN(base, PAGESIZE) == start)
2454 					start = base;
2455 
2456 				offset = start - base;	/* the copy offset */
2457 				amount = end - start;	/* # bytes to copy */
2458 
2459 				ASSERT(amount > 0 && amount <= len &&
2460 				    amount <= PAGESIZE);
2461 
2462 				error = door_copy(as, src + offset,
2463 				    dest + offset, amount);
2464 				if (error != 0)
2465 					return (error);
2466 				len -= amount;
2467 			}
2468 		}
2469 	}
2470 	/*
2471 	 * Copyin the door args and translate them into files
2472 	 */
2473 	if (ndid != 0) {
2474 		door_desc_t	*didpp;
2475 		door_desc_t	*start;
2476 		struct file	**fpp;
2477 
2478 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
2479 
2480 		if (copyin(ct->d_args.desc_ptr, didpp, dsize)) {
2481 			kmem_free(start, dsize);
2482 			return (EFAULT);
2483 		}
2484 		ct->d_fpp_size = ndid * sizeof (struct file *);
2485 		ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2486 		fpp = ct->d_fpp;
2487 		while (ndid--) {
2488 			struct file *fp;
2489 			int fd = didpp->d_data.d_desc.d_descriptor;
2490 
2491 			/* We only understand file descriptors as passed objs */
2492 			if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2493 			    (fp = getf(fd)) == NULL) {
2494 				/* close translated references */
2495 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2496 				/* close untranslated references */
2497 				door_fd_rele(didpp, ndid + 1, 0);
2498 				kmem_free(start, dsize);
2499 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2500 				ct->d_fpp = NULL;
2501 				ct->d_fpp_size = 0;
2502 				return (EINVAL);
2503 			}
2504 			/* Hold the fp */
2505 			mutex_enter(&fp->f_tlock);
2506 			fp->f_count++;
2507 			mutex_exit(&fp->f_tlock);
2508 
2509 			*fpp = fp;
2510 			releasef(fd);
2511 
2512 			if (didpp->d_attributes & DOOR_RELEASE) {
2513 				/* release passed reference */
2514 				(void) closeandsetf(fd, NULL);
2515 			}
2516 
2517 			fpp++; didpp++;
2518 		}
2519 		kmem_free(start, dsize);
2520 	}
2521 	return (0);
2522 }
2523 
2524 /*
2525  * Transfer arguments from a user client to a kernel server.  This copies in
2526  * descriptors and translates them into door handles.  It doesn't touch the
2527  * other data, letting the kernel server deal with that (to avoid needing
2528  * to copy the data twice).
2529  */
2530 static int
2531 door_translate_in(void)
2532 {
2533 	door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2534 	uint_t	ndid;
2535 
2536 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2537 	ndid = ct->d_args.desc_num;
2538 	if (ndid > door_max_desc)
2539 		return (E2BIG);
2540 	/*
2541 	 * Copyin the door args and translate them into door handles.
2542 	 */
2543 	if (ndid != 0) {
2544 		door_desc_t	*didpp;
2545 		door_desc_t	*start;
2546 		size_t		dsize = ndid * sizeof (door_desc_t);
2547 		struct file	*fp;
2548 
2549 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
2550 
2551 		if (copyin(ct->d_args.desc_ptr, didpp, dsize)) {
2552 			kmem_free(start, dsize);
2553 			return (EFAULT);
2554 		}
2555 		while (ndid--) {
2556 			vnode_t	*vp;
2557 			int fd = didpp->d_data.d_desc.d_descriptor;
2558 
2559 			/*
2560 			 * We only understand file descriptors as passed objs
2561 			 */
2562 			if ((didpp->d_attributes & DOOR_DESCRIPTOR) &&
2563 			    (fp = getf(fd)) != NULL) {
2564 				didpp->d_data.d_handle = FTODH(fp);
2565 				/* Hold the door */
2566 				door_ki_hold(didpp->d_data.d_handle);
2567 
2568 				releasef(fd);
2569 
2570 				if (didpp->d_attributes & DOOR_RELEASE) {
2571 					/* release passed reference */
2572 					(void) closeandsetf(fd, NULL);
2573 				}
2574 
2575 				if (VOP_REALVP(fp->f_vnode, &vp, NULL))
2576 					vp = fp->f_vnode;
2577 
2578 				/* Set attributes */
2579 				didpp->d_attributes = DOOR_HANDLE |
2580 				    (VTOD(vp)->door_flags & DOOR_ATTR_MASK);
2581 			} else {
2582 				/* close translated references */
2583 				door_fd_close(start, didpp - start);
2584 				/* close untranslated references */
2585 				door_fd_rele(didpp, ndid + 1, 0);
2586 				kmem_free(start, dsize);
2587 				return (EINVAL);
2588 			}
2589 			didpp++;
2590 		}
2591 		ct->d_args.desc_ptr = start;
2592 	}
2593 	return (0);
2594 }
2595 
2596 /*
2597  * Translate door arguments from kernel to user.  This copies the passed
2598  * door handles.  It doesn't touch other data.  It is used by door_upcall,
2599  * and for data returned by a door_call to a kernel server.
2600  */
2601 static int
2602 door_translate_out(void)
2603 {
2604 	door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2605 	uint_t	ndid;
2606 
2607 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2608 	ndid = ct->d_args.desc_num;
2609 	if (ndid > door_max_desc) {
2610 		door_fd_rele(ct->d_args.desc_ptr, ndid, 1);
2611 		return (E2BIG);
2612 	}
2613 	/*
2614 	 * Translate the door args into files
2615 	 */
2616 	if (ndid != 0) {
2617 		door_desc_t	*didpp = ct->d_args.desc_ptr;
2618 		struct file	**fpp;
2619 
2620 		ct->d_fpp_size = ndid * sizeof (struct file *);
2621 		fpp = ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2622 		while (ndid--) {
2623 			struct file *fp = NULL;
2624 			int fd = -1;
2625 
2626 			/*
2627 			 * We understand file descriptors and door
2628 			 * handles as passed objs.
2629 			 */
2630 			if (didpp->d_attributes & DOOR_DESCRIPTOR) {
2631 				fd = didpp->d_data.d_desc.d_descriptor;
2632 				fp = getf(fd);
2633 			} else if (didpp->d_attributes & DOOR_HANDLE)
2634 				fp = DHTOF(didpp->d_data.d_handle);
2635 			if (fp != NULL) {
2636 				/* Hold the fp */
2637 				mutex_enter(&fp->f_tlock);
2638 				fp->f_count++;
2639 				mutex_exit(&fp->f_tlock);
2640 
2641 				*fpp = fp;
2642 				if (didpp->d_attributes & DOOR_DESCRIPTOR)
2643 					releasef(fd);
2644 				if (didpp->d_attributes & DOOR_RELEASE) {
2645 					/* release passed reference */
2646 					if (fd >= 0)
2647 						(void) closeandsetf(fd, NULL);
2648 					else
2649 						(void) closef(fp);
2650 				}
2651 			} else {
2652 				/* close translated references */
2653 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2654 				/* close untranslated references */
2655 				door_fd_rele(didpp, ndid + 1, 1);
2656 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2657 				ct->d_fpp = NULL;
2658 				ct->d_fpp_size = 0;
2659 				return (EINVAL);
2660 			}
2661 			fpp++; didpp++;
2662 		}
2663 	}
2664 	return (0);
2665 }
2666 
2667 /*
2668  * Move the results from the server to the client
2669  */
2670 static int
2671 door_results(kthread_t *caller, caddr_t data_ptr, size_t data_size,
2672 		door_desc_t *desc_ptr, uint_t desc_num)
2673 {
2674 	door_client_t	*ct = DOOR_CLIENT(caller->t_door);
2675 	size_t		dsize;
2676 	size_t		rlen;
2677 	size_t		result_size;
2678 
2679 	ASSERT(DOOR_T_HELD(ct));
2680 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2681 
2682 	if (ct->d_noresults)
2683 		return (E2BIG);		/* No results expected */
2684 
2685 	if (desc_num > door_max_desc)
2686 		return (E2BIG);		/* Too many descriptors */
2687 
2688 	dsize = desc_num * sizeof (door_desc_t);
2689 	/*
2690 	 * Check if the results are bigger than the clients buffer
2691 	 */
2692 	if (dsize)
2693 		rlen = roundup(data_size, sizeof (door_desc_t));
2694 	else
2695 		rlen = data_size;
2696 	if ((result_size = rlen + dsize) == 0)
2697 		return (0);
2698 
2699 	if (ct->d_upcall) {
2700 		/*
2701 		 * Handle upcalls
2702 		 */
2703 		if (ct->d_args.rbuf == NULL || ct->d_args.rsize < result_size) {
2704 			/*
2705 			 * If there's no return buffer or the buffer is too
2706 			 * small, allocate a new one.  The old buffer (if it
2707 			 * exists) will be freed by the upcall client.
2708 			 */
2709 			if (result_size > door_max_upcall_reply)
2710 				return (E2BIG);
2711 			ct->d_args.rsize = result_size;
2712 			ct->d_args.rbuf = kmem_alloc(result_size, KM_SLEEP);
2713 		}
2714 		ct->d_args.data_ptr = ct->d_args.rbuf;
2715 		if (data_size != 0 &&
2716 		    copyin(data_ptr, ct->d_args.data_ptr, data_size) != 0)
2717 			return (EFAULT);
2718 	} else if (result_size > ct->d_args.rsize) {
2719 		return (door_overflow(caller, data_ptr, data_size,
2720 		    desc_ptr, desc_num));
2721 	} else if (data_size != 0) {
2722 		if (data_size <= door_max_arg) {
2723 			/*
2724 			 * Use a 2 copy method for small amounts of data
2725 			 */
2726 			if (ct->d_buf == NULL) {
2727 				ct->d_bufsize = data_size;
2728 				ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2729 			} else if (ct->d_bufsize < data_size) {
2730 				kmem_free(ct->d_buf, ct->d_bufsize);
2731 				ct->d_bufsize = data_size;
2732 				ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2733 			}
2734 			if (copyin(data_ptr, ct->d_buf, data_size) != 0)
2735 				return (EFAULT);
2736 		} else {
2737 			struct as *as = ttoproc(caller)->p_as;
2738 			caddr_t	dest = ct->d_args.rbuf;
2739 			caddr_t	src = data_ptr;
2740 			size_t	len = data_size;
2741 
2742 			/* Copy data directly into client */
2743 			while (len != 0) {
2744 				uint_t	amount;
2745 				uint_t	max;
2746 				uint_t	off;
2747 				int	error;
2748 
2749 				off = (uintptr_t)dest & PAGEOFFSET;
2750 				if (off)
2751 					max = PAGESIZE - off;
2752 				else
2753 					max = PAGESIZE;
2754 				amount = len > max ? max : len;
2755 				error = door_copy(as, src, dest, amount);
2756 				if (error != 0)
2757 					return (error);
2758 				dest += amount;
2759 				src += amount;
2760 				len -= amount;
2761 			}
2762 		}
2763 	}
2764 
2765 	/*
2766 	 * Copyin the returned door ids and translate them into door_node_t
2767 	 */
2768 	if (desc_num != 0) {
2769 		door_desc_t *start;
2770 		door_desc_t *didpp;
2771 		struct file **fpp;
2772 		size_t	fpp_size;
2773 		uint_t	i;
2774 
2775 		/* First, check if we would overflow client */
2776 		if (!ufcanalloc(ttoproc(caller), desc_num))
2777 			return (EMFILE);
2778 
2779 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
2780 		if (copyin(desc_ptr, didpp, dsize)) {
2781 			kmem_free(start, dsize);
2782 			return (EFAULT);
2783 		}
2784 		fpp_size = desc_num * sizeof (struct file *);
2785 		if (fpp_size > ct->d_fpp_size) {
2786 			/* make more space */
2787 			if (ct->d_fpp_size)
2788 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2789 			ct->d_fpp_size = fpp_size;
2790 			ct->d_fpp = kmem_alloc(fpp_size, KM_SLEEP);
2791 		}
2792 		fpp = ct->d_fpp;
2793 
2794 		for (i = 0; i < desc_num; i++) {
2795 			struct file *fp;
2796 			int fd = didpp->d_data.d_desc.d_descriptor;
2797 
2798 			/* Only understand file descriptor results */
2799 			if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2800 			    (fp = getf(fd)) == NULL) {
2801 				/* close translated references */
2802 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2803 				/* close untranslated references */
2804 				door_fd_rele(didpp, desc_num - i, 0);
2805 				kmem_free(start, dsize);
2806 				return (EINVAL);
2807 			}
2808 
2809 			mutex_enter(&fp->f_tlock);
2810 			fp->f_count++;
2811 			mutex_exit(&fp->f_tlock);
2812 
2813 			*fpp = fp;
2814 			releasef(fd);
2815 
2816 			if (didpp->d_attributes & DOOR_RELEASE) {
2817 				/* release passed reference */
2818 				(void) closeandsetf(fd, NULL);
2819 			}
2820 
2821 			fpp++; didpp++;
2822 		}
2823 		kmem_free(start, dsize);
2824 	}
2825 	return (0);
2826 }
2827 
2828 /*
2829  * Close all the descriptors.
2830  */
2831 static void
2832 door_fd_close(door_desc_t *d, uint_t n)
2833 {
2834 	uint_t	i;
2835 
2836 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2837 	for (i = 0; i < n; i++) {
2838 		if (d->d_attributes & DOOR_DESCRIPTOR) {
2839 			(void) closeandsetf(
2840 			    d->d_data.d_desc.d_descriptor, NULL);
2841 		} else if (d->d_attributes & DOOR_HANDLE) {
2842 			door_ki_rele(d->d_data.d_handle);
2843 		}
2844 		d++;
2845 	}
2846 }
2847 
2848 /*
2849  * Close descriptors that have the DOOR_RELEASE attribute set.
2850  */
2851 void
2852 door_fd_rele(door_desc_t *d, uint_t n, int from_kernel)
2853 {
2854 	uint_t	i;
2855 
2856 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2857 	for (i = 0; i < n; i++) {
2858 		if (d->d_attributes & DOOR_RELEASE) {
2859 			if (d->d_attributes & DOOR_DESCRIPTOR) {
2860 				(void) closeandsetf(
2861 				    d->d_data.d_desc.d_descriptor, NULL);
2862 			} else if (from_kernel &&
2863 			    (d->d_attributes & DOOR_HANDLE)) {
2864 				door_ki_rele(d->d_data.d_handle);
2865 			}
2866 		}
2867 		d++;
2868 	}
2869 }
2870 
2871 /*
2872  * Copy descriptors into the kernel so we can release any marked
2873  * DOOR_RELEASE.
2874  */
2875 int
2876 door_release_fds(door_desc_t *desc_ptr, uint_t ndesc)
2877 {
2878 	size_t dsize;
2879 	door_desc_t *didpp;
2880 	uint_t desc_num;
2881 
2882 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2883 	ASSERT(ndesc != 0);
2884 
2885 	desc_num = MIN(ndesc, door_max_desc);
2886 
2887 	dsize = desc_num * sizeof (door_desc_t);
2888 	didpp = kmem_alloc(dsize, KM_SLEEP);
2889 
2890 	while (ndesc > 0) {
2891 		uint_t count = MIN(ndesc, desc_num);
2892 
2893 		if (copyin(desc_ptr, didpp, count * sizeof (door_desc_t))) {
2894 			kmem_free(didpp, dsize);
2895 			return (EFAULT);
2896 		}
2897 		door_fd_rele(didpp, count, 0);
2898 
2899 		ndesc -= count;
2900 		desc_ptr += count;
2901 	}
2902 	kmem_free(didpp, dsize);
2903 	return (0);
2904 }
2905 
2906 /*
2907  * Decrement ref count on all the files passed
2908  */
2909 static void
2910 door_fp_close(struct file **fp, uint_t n)
2911 {
2912 	uint_t	i;
2913 
2914 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2915 
2916 	for (i = 0; i < n; i++)
2917 		(void) closef(fp[i]);
2918 }
2919 
2920 /*
2921  * Copy data from 'src' in current address space to 'dest' in 'as' for 'len'
2922  * bytes.
2923  *
2924  * Performs this using 1 mapin and 1 copy operation.
2925  *
2926  * We really should do more than 1 page at a time to improve
2927  * performance, but for now this is treated as an anomalous condition.
2928  */
2929 static int
2930 door_copy(struct as *as, caddr_t src, caddr_t dest, uint_t len)
2931 {
2932 	caddr_t	kaddr;
2933 	caddr_t	rdest;
2934 	uint_t	off;
2935 	page_t	**pplist;
2936 	page_t	*pp = NULL;
2937 	int	error = 0;
2938 
2939 	ASSERT(len <= PAGESIZE);
2940 	off = (uintptr_t)dest & PAGEOFFSET;	/* offset within the page */
2941 	rdest = (caddr_t)((uintptr_t)dest &
2942 	    (uintptr_t)PAGEMASK);	/* Page boundary */
2943 	ASSERT(off + len <= PAGESIZE);
2944 
2945 	/*
2946 	 * Lock down destination page.
2947 	 */
2948 	if (as_pagelock(as, &pplist, rdest, PAGESIZE, S_WRITE))
2949 		return (E2BIG);
2950 	/*
2951 	 * Check if we have a shadow page list from as_pagelock. If not,
2952 	 * we took the slow path and have to find our page struct the hard
2953 	 * way.
2954 	 */
2955 	if (pplist == NULL) {
2956 		pfn_t	pfnum;
2957 
2958 		/* MMU mapping is already locked down */
2959 		AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
2960 		pfnum = hat_getpfnum(as->a_hat, rdest);
2961 		AS_LOCK_EXIT(as, &as->a_lock);
2962 
2963 		/*
2964 		 * TODO: The pfn step should not be necessary - need
2965 		 * a hat_getpp() function.
2966 		 */
2967 		if (pf_is_memory(pfnum)) {
2968 			pp = page_numtopp_nolock(pfnum);
2969 			ASSERT(pp == NULL || PAGE_LOCKED(pp));
2970 		} else
2971 			pp = NULL;
2972 		if (pp == NULL) {
2973 			as_pageunlock(as, pplist, rdest, PAGESIZE, S_WRITE);
2974 			return (E2BIG);
2975 		}
2976 	} else {
2977 		pp = *pplist;
2978 	}
2979 	/*
2980 	 * Map destination page into kernel address
2981 	 */
2982 	kaddr = (caddr_t)ppmapin(pp, PROT_READ | PROT_WRITE, (caddr_t)-1);
2983 
2984 	/*
2985 	 * Copy from src to dest
2986 	 */
2987 	if (copyin(src, kaddr + off, len) != 0)
2988 		error = EFAULT;
2989 	/*
2990 	 * Unmap destination page from kernel
2991 	 */
2992 	ppmapout(kaddr);
2993 	/*
2994 	 * Unlock destination page
2995 	 */
2996 	as_pageunlock(as, pplist, rdest, PAGESIZE, S_WRITE);
2997 	return (error);
2998 }
2999 
3000 /*
3001  * General kernel upcall using doors
3002  *	Returns 0 on success, errno for failures.
3003  *	Caller must have a hold on the door based vnode, and on any
3004  *	references passed in desc_ptr.  The references are released
3005  *	in the event of an error, and passed without duplication
3006  *	otherwise.  Note that param->rbuf must be 64-bit aligned in
3007  *	a 64-bit kernel, since it may be used to store door descriptors
3008  *	if they are returned by the server.
3009  */
3010 int
3011 door_upcall(vnode_t *vp, door_arg_t *param)
3012 {
3013 	/* Locals */
3014 	door_node_t	*dp;
3015 	kthread_t	*server_thread;
3016 	int		error = 0;
3017 	klwp_t		*lwp;
3018 	door_client_t	*ct;		/* curthread door_data */
3019 	door_server_t	*st;		/* server thread door_data */
3020 	int		gotresults = 0;
3021 	int		cancel_pending;
3022 
3023 	if (vp->v_type != VDOOR) {
3024 		if (param->desc_num)
3025 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3026 		return (EINVAL);
3027 	}
3028 
3029 	lwp = ttolwp(curthread);
3030 	ct = door_my_client(1);
3031 	dp = VTOD(vp);	/* Convert to a door_node_t */
3032 
3033 	mutex_enter(&door_knob);
3034 	if (DOOR_INVALID(dp)) {
3035 		mutex_exit(&door_knob);
3036 		if (param->desc_num)
3037 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3038 		error = EBADF;
3039 		goto out;
3040 	}
3041 
3042 	if (dp->door_target == &p0) {
3043 		/* Can't do an upcall to a kernel server */
3044 		mutex_exit(&door_knob);
3045 		if (param->desc_num)
3046 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3047 		error = EINVAL;
3048 		goto out;
3049 	}
3050 
3051 	error = door_check_limits(dp, param, 1);
3052 	if (error != 0) {
3053 		mutex_exit(&door_knob);
3054 		if (param->desc_num)
3055 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3056 		goto out;
3057 	}
3058 
3059 	/*
3060 	 * Get a server thread from the target domain
3061 	 */
3062 	if ((server_thread = door_get_server(dp)) == NULL) {
3063 		if (DOOR_INVALID(dp))
3064 			error = EBADF;
3065 		else
3066 			error = EAGAIN;
3067 		mutex_exit(&door_knob);
3068 		if (param->desc_num)
3069 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3070 		goto out;
3071 	}
3072 
3073 	st = DOOR_SERVER(server_thread->t_door);
3074 	ct->d_buf = param->data_ptr;
3075 	ct->d_bufsize = param->data_size;
3076 	ct->d_args = *param;	/* structure assignment */
3077 
3078 	if (ct->d_args.desc_num) {
3079 		/*
3080 		 * Move data from client to server
3081 		 */
3082 		DOOR_T_HOLD(st);
3083 		mutex_exit(&door_knob);
3084 		error = door_translate_out();
3085 		mutex_enter(&door_knob);
3086 		DOOR_T_RELEASE(st);
3087 		if (error) {
3088 			/*
3089 			 * We're not going to resume this thread after all
3090 			 */
3091 			door_release_server(dp, server_thread);
3092 			shuttle_sleep(server_thread);
3093 			mutex_exit(&door_knob);
3094 			goto out;
3095 		}
3096 	}
3097 
3098 	ct->d_upcall = 1;
3099 	if (param->rsize == 0)
3100 		ct->d_noresults = 1;
3101 	else
3102 		ct->d_noresults = 0;
3103 
3104 	dp->door_active++;
3105 
3106 	ct->d_error = DOOR_WAIT;
3107 	st->d_caller = curthread;
3108 	st->d_active = dp;
3109 
3110 	shuttle_resume(server_thread, &door_knob);
3111 
3112 	mutex_enter(&door_knob);
3113 shuttle_return:
3114 	if ((error = ct->d_error) < 0) {	/* DOOR_WAIT or DOOR_EXIT */
3115 		/*
3116 		 * Premature wakeup. Find out why (stop, forkall, sig, exit ...)
3117 		 */
3118 		mutex_exit(&door_knob);		/* May block in ISSIG */
3119 		cancel_pending = 0;
3120 		if (lwp && (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort ||
3121 		    MUSTRETURN(curproc, curthread) ||
3122 		    (cancel_pending = schedctl_cancel_pending()) != 0)) {
3123 			/* Signal, forkall, ... */
3124 			if (cancel_pending)
3125 				schedctl_cancel_eintr();
3126 			lwp->lwp_sysabort = 0;
3127 			mutex_enter(&door_knob);
3128 			error = EINTR;
3129 			/*
3130 			 * If the server has finished processing our call,
3131 			 * or exited (calling door_slam()), then d_error
3132 			 * will have changed.  If the server hasn't finished
3133 			 * yet, d_error will still be DOOR_WAIT, and we
3134 			 * let it know we are not interested in any
3135 			 * results by sending a SIGCANCEL, unless the door
3136 			 * is marked with DOOR_NO_CANCEL.
3137 			 */
3138 			if (ct->d_error == DOOR_WAIT &&
3139 			    st->d_caller == curthread) {
3140 				proc_t	*p = ttoproc(server_thread);
3141 
3142 				st->d_active = NULL;
3143 				st->d_caller = NULL;
3144 				if (!(dp->door_flags & DOOR_NO_CANCEL)) {
3145 					DOOR_T_HOLD(st);
3146 					mutex_exit(&door_knob);
3147 
3148 					mutex_enter(&p->p_lock);
3149 					sigtoproc(p, server_thread, SIGCANCEL);
3150 					mutex_exit(&p->p_lock);
3151 
3152 					mutex_enter(&door_knob);
3153 					DOOR_T_RELEASE(st);
3154 				}
3155 			}
3156 		} else {
3157 			/*
3158 			 * Return from stop(), server exit...
3159 			 *
3160 			 * Note that the server could have done a
3161 			 * door_return while the client was in stop state
3162 			 * (ISSIG), in which case the error condition
3163 			 * is updated by the server.
3164 			 */
3165 			mutex_enter(&door_knob);
3166 			if (ct->d_error == DOOR_WAIT) {
3167 				/* Still waiting for a reply */
3168 				shuttle_swtch(&door_knob);
3169 				mutex_enter(&door_knob);
3170 				if (lwp)
3171 					lwp->lwp_asleep = 0;
3172 				goto	shuttle_return;
3173 			} else if (ct->d_error == DOOR_EXIT) {
3174 				/* Server exit */
3175 				error = EINTR;
3176 			} else {
3177 				/* Server did a door_return during ISSIG */
3178 				error = ct->d_error;
3179 			}
3180 		}
3181 		/*
3182 		 * Can't exit if the server is currently copying
3183 		 * results for me
3184 		 */
3185 		while (DOOR_T_HELD(ct))
3186 			cv_wait(&ct->d_cv, &door_knob);
3187 
3188 		/*
3189 		 * Find out if results were successfully copied.
3190 		 */
3191 		if (ct->d_error == 0)
3192 			gotresults = 1;
3193 	}
3194 	if (lwp) {
3195 		lwp->lwp_asleep = 0;		/* /proc */
3196 		lwp->lwp_sysabort = 0;		/* /proc */
3197 	}
3198 	if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
3199 		door_deliver_unref(dp);
3200 	mutex_exit(&door_knob);
3201 
3202 	/*
3203 	 * Translate returned doors (if any)
3204 	 */
3205 
3206 	if (ct->d_noresults)
3207 		goto out;
3208 
3209 	if (error) {
3210 		/*
3211 		 * If server returned results successfully, then we've
3212 		 * been interrupted and may need to clean up.
3213 		 */
3214 		if (gotresults) {
3215 			ASSERT(error == EINTR);
3216 			door_fp_close(ct->d_fpp, ct->d_args.desc_num);
3217 		}
3218 		goto out;
3219 	}
3220 
3221 	if (ct->d_args.desc_num) {
3222 		struct file	**fpp;
3223 		door_desc_t	*didpp;
3224 		vnode_t		*vp;
3225 		uint_t		n = ct->d_args.desc_num;
3226 
3227 		didpp = ct->d_args.desc_ptr = (door_desc_t *)(ct->d_args.rbuf +
3228 		    roundup(ct->d_args.data_size, sizeof (door_desc_t)));
3229 		fpp = ct->d_fpp;
3230 
3231 		while (n--) {
3232 			struct file *fp;
3233 
3234 			fp = *fpp;
3235 			if (VOP_REALVP(fp->f_vnode, &vp, NULL))
3236 				vp = fp->f_vnode;
3237 
3238 			didpp->d_attributes = DOOR_HANDLE |
3239 			    (VTOD(vp)->door_flags & DOOR_ATTR_MASK);
3240 			didpp->d_data.d_handle = FTODH(fp);
3241 
3242 			fpp++; didpp++;
3243 		}
3244 	}
3245 
3246 	/* on return data is in rbuf */
3247 	*param = ct->d_args;		/* structure assignment */
3248 
3249 out:
3250 	if (ct->d_fpp) {
3251 		kmem_free(ct->d_fpp, ct->d_fpp_size);
3252 		ct->d_fpp = NULL;
3253 		ct->d_fpp_size = 0;
3254 	}
3255 
3256 	ct->d_upcall = 0;
3257 	ct->d_noresults = 0;
3258 	ct->d_buf = NULL;
3259 	ct->d_bufsize = 0;
3260 	return (error);
3261 }
3262 
3263 /*
3264  * Add a door to the per-process list of active doors for which the
3265  * process is a server.
3266  */
3267 static void
3268 door_list_insert(door_node_t *dp)
3269 {
3270 	proc_t *p = dp->door_target;
3271 
3272 	ASSERT(MUTEX_HELD(&door_knob));
3273 	dp->door_list = p->p_door_list;
3274 	p->p_door_list = dp;
3275 }
3276 
3277 /*
3278  * Remove a door from the per-process list of active doors.
3279  */
3280 void
3281 door_list_delete(door_node_t *dp)
3282 {
3283 	door_node_t **pp;
3284 
3285 	ASSERT(MUTEX_HELD(&door_knob));
3286 	/*
3287 	 * Find the door in the list.  If the door belongs to another process,
3288 	 * it's OK to use p_door_list since that process can't exit until all
3289 	 * doors have been taken off the list (see door_exit).
3290 	 */
3291 	pp = &(dp->door_target->p_door_list);
3292 	while (*pp != dp)
3293 		pp = &((*pp)->door_list);
3294 
3295 	/* found it, take it off the list */
3296 	*pp = dp->door_list;
3297 }
3298 
3299 
3300 /*
3301  * External kernel interfaces for doors.  These functions are available
3302  * outside the doorfs module for use in creating and using doors from
3303  * within the kernel.
3304  */
3305 
3306 /*
3307  * door_ki_upcall invokes a user-level door server from the kernel.
3308  */
3309 int
3310 door_ki_upcall(door_handle_t dh, door_arg_t *param)
3311 {
3312 	file_t *fp = DHTOF(dh);
3313 	vnode_t *realvp;
3314 
3315 	if (VOP_REALVP(fp->f_vnode, &realvp, NULL))
3316 		realvp = fp->f_vnode;
3317 	return (door_upcall(realvp, param));
3318 }
3319 
3320 /*
3321  * Function call to create a "kernel" door server.  A kernel door
3322  * server provides a way for a user-level process to invoke a function
3323  * in the kernel through a door_call.  From the caller's point of
3324  * view, a kernel door server looks the same as a user-level one
3325  * (except the server pid is 0).  Unlike normal door calls, the
3326  * kernel door function is invoked via a normal function call in the
3327  * same thread and context as the caller.
3328  */
3329 int
3330 door_ki_create(void (*pc_cookie)(), void *data_cookie, uint_t attributes,
3331     door_handle_t *dhp)
3332 {
3333 	int err;
3334 	file_t *fp;
3335 
3336 	/* no DOOR_PRIVATE */
3337 	if ((attributes & ~DOOR_KI_CREATE_MASK) ||
3338 	    (attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) ==
3339 	    (DOOR_UNREF | DOOR_UNREF_MULTI))
3340 		return (EINVAL);
3341 
3342 	err = door_create_common(pc_cookie, data_cookie, attributes,
3343 	    1, NULL, &fp);
3344 	if (err == 0 && (attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) &&
3345 	    p0.p_unref_thread == 0) {
3346 		/* need to create unref thread for process 0 */
3347 		(void) thread_create(NULL, 0, door_unref_kernel, NULL, 0, &p0,
3348 		    TS_RUN, minclsyspri);
3349 	}
3350 	if (err == 0) {
3351 		*dhp = FTODH(fp);
3352 	}
3353 	return (err);
3354 }
3355 
3356 void
3357 door_ki_hold(door_handle_t dh)
3358 {
3359 	file_t *fp = DHTOF(dh);
3360 
3361 	mutex_enter(&fp->f_tlock);
3362 	fp->f_count++;
3363 	mutex_exit(&fp->f_tlock);
3364 }
3365 
3366 void
3367 door_ki_rele(door_handle_t dh)
3368 {
3369 	file_t *fp = DHTOF(dh);
3370 
3371 	(void) closef(fp);
3372 }
3373 
3374 int
3375 door_ki_open(char *pathname, door_handle_t *dhp)
3376 {
3377 	file_t *fp;
3378 	vnode_t *vp;
3379 	int err;
3380 
3381 	if ((err = lookupname(pathname, UIO_SYSSPACE, FOLLOW, NULL, &vp)) != 0)
3382 		return (err);
3383 	if (err = VOP_OPEN(&vp, FREAD, kcred, NULL)) {
3384 		VN_RELE(vp);
3385 		return (err);
3386 	}
3387 	if (vp->v_type != VDOOR) {
3388 		VN_RELE(vp);
3389 		return (EINVAL);
3390 	}
3391 	if ((err = falloc(vp, FREAD | FWRITE, &fp, NULL)) != 0) {
3392 		VN_RELE(vp);
3393 		return (err);
3394 	}
3395 	/* falloc returns with f_tlock held on success */
3396 	mutex_exit(&fp->f_tlock);
3397 	*dhp = FTODH(fp);
3398 	return (0);
3399 }
3400 
3401 int
3402 door_ki_info(door_handle_t dh, struct door_info *dip)
3403 {
3404 	file_t *fp = DHTOF(dh);
3405 	vnode_t *vp;
3406 
3407 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
3408 		vp = fp->f_vnode;
3409 	if (vp->v_type != VDOOR)
3410 		return (EINVAL);
3411 	door_info_common(VTOD(vp), dip, fp);
3412 	return (0);
3413 }
3414 
3415 door_handle_t
3416 door_ki_lookup(int did)
3417 {
3418 	file_t *fp;
3419 	door_handle_t dh;
3420 
3421 	/* is the descriptor really a door? */
3422 	if (door_lookup(did, &fp) == NULL)
3423 		return (NULL);
3424 	/* got the door, put a hold on it and release the fd */
3425 	dh = FTODH(fp);
3426 	door_ki_hold(dh);
3427 	releasef(did);
3428 	return (dh);
3429 }
3430 
3431 int
3432 door_ki_setparam(door_handle_t dh, int type, size_t val)
3433 {
3434 	file_t *fp = DHTOF(dh);
3435 	vnode_t *vp;
3436 
3437 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
3438 		vp = fp->f_vnode;
3439 	if (vp->v_type != VDOOR)
3440 		return (EINVAL);
3441 	return (door_setparam_common(VTOD(vp), 1, type, val));
3442 }
3443 
3444 int
3445 door_ki_getparam(door_handle_t dh, int type, size_t *out)
3446 {
3447 	file_t *fp = DHTOF(dh);
3448 	vnode_t *vp;
3449 
3450 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
3451 		vp = fp->f_vnode;
3452 	if (vp->v_type != VDOOR)
3453 		return (EINVAL);
3454 	return (door_getparam_common(VTOD(vp), type, out));
3455 }
3456