xref: /titanic_41/usr/src/uts/common/fs/doorfs/door_sys.c (revision 93fb2a5ff9019dc98ff5e9836d0c2c7b5c5ecd7f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*
27  * System call I/F to doors (outside of vnodes I/F) and misc support
28  * routines
29  */
30 #include <sys/types.h>
31 #include <sys/systm.h>
32 #include <sys/door.h>
33 #include <sys/door_data.h>
34 #include <sys/proc.h>
35 #include <sys/thread.h>
36 #include <sys/prsystm.h>
37 #include <sys/procfs.h>
38 #include <sys/class.h>
39 #include <sys/cred.h>
40 #include <sys/kmem.h>
41 #include <sys/cmn_err.h>
42 #include <sys/stack.h>
43 #include <sys/debug.h>
44 #include <sys/cpuvar.h>
45 #include <sys/file.h>
46 #include <sys/fcntl.h>
47 #include <sys/vnode.h>
48 #include <sys/vfs.h>
49 #include <sys/vfs_opreg.h>
50 #include <sys/sobject.h>
51 #include <sys/schedctl.h>
52 #include <sys/callb.h>
53 #include <sys/ucred.h>
54 
55 #include <sys/mman.h>
56 #include <sys/sysmacros.h>
57 #include <sys/vmsystm.h>
58 #include <vm/as.h>
59 #include <vm/hat.h>
60 #include <vm/page.h>
61 #include <vm/seg.h>
62 #include <vm/seg_vn.h>
63 #include <vm/seg_vn.h>
64 #include <vm/seg_kpm.h>
65 
66 #include <sys/modctl.h>
67 #include <sys/syscall.h>
68 #include <sys/pathname.h>
69 #include <sys/rctl.h>
70 
71 /*
72  * The maximum amount of data (in bytes) that will be transferred using
73  * an intermediate kernel buffer.  For sizes greater than this we map
74  * in the destination pages and perform a 1-copy transfer.
75  */
76 size_t	door_max_arg = 16 * 1024;
77 
78 /*
79  * Maximum amount of data that will be transferred in a reply to a
80  * door_upcall.  Need to guard against a process returning huge amounts
81  * of data and getting the kernel stuck in kmem_alloc.
82  */
83 size_t	door_max_upcall_reply = 1024 * 1024;
84 
85 /*
86  * Maximum number of descriptors allowed to be passed in a single
87  * door_call or door_return.  We need to allocate kernel memory
88  * for all of them at once, so we can't let it scale without limit.
89  */
90 uint_t door_max_desc = 1024;
91 
92 /*
93  * Definition of a door handle, used by other kernel subsystems when
94  * calling door functions.  This is really a file structure but we
95  * want to hide that fact.
96  */
97 struct __door_handle {
98 	file_t dh_file;
99 };
100 
101 #define	DHTOF(dh) ((file_t *)(dh))
102 #define	FTODH(fp) ((door_handle_t)(fp))
103 
104 static int doorfs(long, long, long, long, long, long);
105 
106 static struct sysent door_sysent = {
107 	6,
108 	SE_ARGC | SE_NOUNLOAD,
109 	(int (*)())doorfs,
110 };
111 
112 static struct modlsys modlsys = {
113 	&mod_syscallops, "doors", &door_sysent
114 };
115 
116 #ifdef _SYSCALL32_IMPL
117 
118 static int
119 doorfs32(int32_t arg1, int32_t arg2, int32_t arg3, int32_t arg4,
120     int32_t arg5, int32_t subcode);
121 
122 static struct sysent door_sysent32 = {
123 	6,
124 	SE_ARGC | SE_NOUNLOAD,
125 	(int (*)())doorfs32,
126 };
127 
128 static struct modlsys modlsys32 = {
129 	&mod_syscallops32,
130 	"32-bit door syscalls",
131 	&door_sysent32
132 };
133 #endif
134 
135 static struct modlinkage modlinkage = {
136 	MODREV_1,
137 	&modlsys,
138 #ifdef _SYSCALL32_IMPL
139 	&modlsys32,
140 #endif
141 	NULL
142 };
143 
144 dev_t	doordev;
145 
146 extern	struct vfs door_vfs;
147 extern	struct vnodeops *door_vnodeops;
148 
149 int
_init(void)150 _init(void)
151 {
152 	static const fs_operation_def_t door_vfsops_template[] = {
153 		NULL, NULL
154 	};
155 	extern const fs_operation_def_t door_vnodeops_template[];
156 	vfsops_t *door_vfsops;
157 	major_t major;
158 	int error;
159 
160 	mutex_init(&door_knob, NULL, MUTEX_DEFAULT, NULL);
161 	if ((major = getudev()) == (major_t)-1)
162 		return (ENXIO);
163 	doordev = makedevice(major, 0);
164 
165 	/* Create a dummy vfs */
166 	error = vfs_makefsops(door_vfsops_template, &door_vfsops);
167 	if (error != 0) {
168 		cmn_err(CE_WARN, "door init: bad vfs ops");
169 		return (error);
170 	}
171 	VFS_INIT(&door_vfs, door_vfsops, NULL);
172 	door_vfs.vfs_flag = VFS_RDONLY;
173 	door_vfs.vfs_dev = doordev;
174 	vfs_make_fsid(&(door_vfs.vfs_fsid), doordev, 0);
175 
176 	error = vn_make_ops("doorfs", door_vnodeops_template, &door_vnodeops);
177 	if (error != 0) {
178 		vfs_freevfsops(door_vfsops);
179 		cmn_err(CE_WARN, "door init: bad vnode ops");
180 		return (error);
181 	}
182 	return (mod_install(&modlinkage));
183 }
184 
185 int
_info(struct modinfo * modinfop)186 _info(struct modinfo *modinfop)
187 {
188 	return (mod_info(&modlinkage, modinfop));
189 }
190 
191 /* system call functions */
192 static int door_call(int, void *);
193 static int door_return(caddr_t, size_t, door_desc_t *, uint_t, caddr_t, size_t);
194 static int door_create(void (*pc_cookie)(void *, char *, size_t, door_desc_t *,
195     uint_t), void *data_cookie, uint_t);
196 static int door_revoke(int);
197 static int door_info(int, struct door_info *);
198 static int door_ucred(struct ucred_s *);
199 static int door_bind(int);
200 static int door_unbind(void);
201 static int door_unref(void);
202 static int door_getparam(int, int, size_t *);
203 static int door_setparam(int, int, size_t);
204 
205 #define	DOOR_RETURN_OLD	4		/* historic value, for s10 */
206 
207 /*
208  * System call wrapper for all door related system calls
209  */
210 static int
doorfs(long arg1,long arg2,long arg3,long arg4,long arg5,long subcode)211 doorfs(long arg1, long arg2, long arg3, long arg4, long arg5, long subcode)
212 {
213 	switch (subcode) {
214 	case DOOR_CALL:
215 		return (door_call(arg1, (void *)arg2));
216 	case DOOR_RETURN: {
217 		door_return_desc_t *drdp = (door_return_desc_t *)arg3;
218 
219 		if (drdp != NULL) {
220 			door_return_desc_t drd;
221 			if (copyin(drdp, &drd, sizeof (drd)))
222 				return (EFAULT);
223 			return (door_return((caddr_t)arg1, arg2, drd.desc_ptr,
224 			    drd.desc_num, (caddr_t)arg4, arg5));
225 		}
226 		return (door_return((caddr_t)arg1, arg2, NULL,
227 		    0, (caddr_t)arg4, arg5));
228 	}
229 	case DOOR_RETURN_OLD:
230 		/*
231 		 * In order to support the S10 runtime environment, we
232 		 * still respond to the old syscall subcode for door_return.
233 		 * We treat it as having no stack limits.  This code should
234 		 * be removed when such support is no longer needed.
235 		 */
236 		return (door_return((caddr_t)arg1, arg2, (door_desc_t *)arg3,
237 		    arg4, (caddr_t)arg5, 0));
238 	case DOOR_CREATE:
239 		return (door_create((void (*)())arg1, (void *)arg2, arg3));
240 	case DOOR_REVOKE:
241 		return (door_revoke(arg1));
242 	case DOOR_INFO:
243 		return (door_info(arg1, (struct door_info *)arg2));
244 	case DOOR_BIND:
245 		return (door_bind(arg1));
246 	case DOOR_UNBIND:
247 		return (door_unbind());
248 	case DOOR_UNREFSYS:
249 		return (door_unref());
250 	case DOOR_UCRED:
251 		return (door_ucred((struct ucred_s *)arg1));
252 	case DOOR_GETPARAM:
253 		return (door_getparam(arg1, arg2, (size_t *)arg3));
254 	case DOOR_SETPARAM:
255 		return (door_setparam(arg1, arg2, arg3));
256 	default:
257 		return (set_errno(EINVAL));
258 	}
259 }
260 
261 #ifdef _SYSCALL32_IMPL
262 /*
263  * System call wrapper for all door related system calls from 32-bit programs.
264  * Needed at the moment because of the casts - they undo some damage
265  * that truss causes (sign-extending the stack pointer) when truss'ing
266  * a 32-bit program using doors.
267  */
268 static int
doorfs32(int32_t arg1,int32_t arg2,int32_t arg3,int32_t arg4,int32_t arg5,int32_t subcode)269 doorfs32(int32_t arg1, int32_t arg2, int32_t arg3,
270     int32_t arg4, int32_t arg5, int32_t subcode)
271 {
272 	switch (subcode) {
273 	case DOOR_CALL:
274 		return (door_call(arg1, (void *)(uintptr_t)(caddr32_t)arg2));
275 	case DOOR_RETURN: {
276 		door_return_desc32_t *drdp =
277 		    (door_return_desc32_t *)(uintptr_t)(caddr32_t)arg3;
278 		if (drdp != NULL) {
279 			door_return_desc32_t drd;
280 			if (copyin(drdp, &drd, sizeof (drd)))
281 				return (EFAULT);
282 			return (door_return(
283 			    (caddr_t)(uintptr_t)(caddr32_t)arg1, arg2,
284 			    (door_desc_t *)(uintptr_t)drd.desc_ptr,
285 			    drd.desc_num, (caddr_t)(uintptr_t)(caddr32_t)arg4,
286 			    (size_t)(uintptr_t)(size32_t)arg5));
287 		}
288 		return (door_return((caddr_t)(uintptr_t)(caddr32_t)arg1,
289 		    arg2, NULL, 0, (caddr_t)(uintptr_t)(caddr32_t)arg4,
290 		    (size_t)(uintptr_t)(size32_t)arg5));
291 	}
292 	case DOOR_RETURN_OLD:
293 		/*
294 		 * In order to support the S10 runtime environment, we
295 		 * still respond to the old syscall subcode for door_return.
296 		 * We treat it as having no stack limits.  This code should
297 		 * be removed when such support is no longer needed.
298 		 */
299 		return (door_return((caddr_t)(uintptr_t)(caddr32_t)arg1, arg2,
300 		    (door_desc_t *)(uintptr_t)(caddr32_t)arg3, arg4,
301 		    (caddr_t)(uintptr_t)(caddr32_t)arg5, 0));
302 	case DOOR_CREATE:
303 		return (door_create((void (*)())(uintptr_t)(caddr32_t)arg1,
304 		    (void *)(uintptr_t)(caddr32_t)arg2, arg3));
305 	case DOOR_REVOKE:
306 		return (door_revoke(arg1));
307 	case DOOR_INFO:
308 		return (door_info(arg1,
309 		    (struct door_info *)(uintptr_t)(caddr32_t)arg2));
310 	case DOOR_BIND:
311 		return (door_bind(arg1));
312 	case DOOR_UNBIND:
313 		return (door_unbind());
314 	case DOOR_UNREFSYS:
315 		return (door_unref());
316 	case DOOR_UCRED:
317 		return (door_ucred(
318 		    (struct ucred_s *)(uintptr_t)(caddr32_t)arg1));
319 	case DOOR_GETPARAM:
320 		return (door_getparam(arg1, arg2,
321 		    (size_t *)(uintptr_t)(caddr32_t)arg3));
322 	case DOOR_SETPARAM:
323 		return (door_setparam(arg1, arg2, (size_t)(size32_t)arg3));
324 
325 	default:
326 		return (set_errno(EINVAL));
327 	}
328 }
329 #endif
330 
331 void shuttle_resume(kthread_t *, kmutex_t *);
332 void shuttle_swtch(kmutex_t *);
333 void shuttle_sleep(kthread_t *);
334 
335 /*
336  * Support routines
337  */
338 static int door_create_common(void (*)(), void *, uint_t, int, int *,
339     file_t **);
340 static int door_overflow(kthread_t *, caddr_t, size_t, door_desc_t *, uint_t);
341 static int door_args(kthread_t *, int);
342 static int door_results(kthread_t *, caddr_t, size_t, door_desc_t *, uint_t);
343 static int door_copy(struct as *, caddr_t, caddr_t, uint_t);
344 static void	door_server_exit(proc_t *, kthread_t *);
345 static void	door_release_server(door_node_t *, kthread_t *);
346 static kthread_t	*door_get_server(door_node_t *);
347 static door_node_t	*door_lookup(int, file_t **);
348 static int	door_translate_in(void);
349 static int	door_translate_out(void);
350 static void	door_fd_rele(door_desc_t *, uint_t, int);
351 static void	door_list_insert(door_node_t *);
352 static void	door_info_common(door_node_t *, door_info_t *, file_t *);
353 static int	door_release_fds(door_desc_t *, uint_t);
354 static void	door_fd_close(door_desc_t *, uint_t);
355 static void	door_fp_close(struct file **, uint_t);
356 
357 static door_data_t *
door_my_data(int create_if_missing)358 door_my_data(int create_if_missing)
359 {
360 	door_data_t *ddp;
361 
362 	ddp = curthread->t_door;
363 	if (create_if_missing && ddp == NULL)
364 		ddp = curthread->t_door = kmem_zalloc(sizeof (*ddp), KM_SLEEP);
365 
366 	return (ddp);
367 }
368 
369 static door_server_t *
door_my_server(int create_if_missing)370 door_my_server(int create_if_missing)
371 {
372 	door_data_t *ddp = door_my_data(create_if_missing);
373 
374 	return ((ddp != NULL)? DOOR_SERVER(ddp) : NULL);
375 }
376 
377 static door_client_t *
door_my_client(int create_if_missing)378 door_my_client(int create_if_missing)
379 {
380 	door_data_t *ddp = door_my_data(create_if_missing);
381 
382 	return ((ddp != NULL)? DOOR_CLIENT(ddp) : NULL);
383 }
384 
385 /*
386  * System call to create a door
387  */
388 int
door_create(void (* pc_cookie)(),void * data_cookie,uint_t attributes)389 door_create(void (*pc_cookie)(), void *data_cookie, uint_t attributes)
390 {
391 	int fd;
392 	int err;
393 
394 	if ((attributes & ~DOOR_CREATE_MASK) ||
395 	    ((attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) ==
396 	    (DOOR_UNREF | DOOR_UNREF_MULTI)))
397 		return (set_errno(EINVAL));
398 
399 	if ((err = door_create_common(pc_cookie, data_cookie, attributes, 0,
400 	    &fd, NULL)) != 0)
401 		return (set_errno(err));
402 
403 	f_setfd(fd, FD_CLOEXEC);
404 	return (fd);
405 }
406 
407 /*
408  * Common code for creating user and kernel doors.  If a door was
409  * created, stores a file structure pointer in the location pointed
410  * to by fpp (if fpp is non-NULL) and returns 0.  Also, if a non-NULL
411  * pointer to a file descriptor is passed in as fdp, allocates a file
412  * descriptor representing the door.  If a door could not be created,
413  * returns an error.
414  */
415 static int
door_create_common(void (* pc_cookie)(),void * data_cookie,uint_t attributes,int from_kernel,int * fdp,file_t ** fpp)416 door_create_common(void (*pc_cookie)(), void *data_cookie, uint_t attributes,
417     int from_kernel, int *fdp, file_t **fpp)
418 {
419 	door_node_t	*dp;
420 	vnode_t		*vp;
421 	struct file	*fp;
422 	static door_id_t index = 0;
423 	proc_t		*p = (from_kernel)? &p0 : curproc;
424 
425 	dp = kmem_zalloc(sizeof (door_node_t), KM_SLEEP);
426 
427 	dp->door_vnode = vn_alloc(KM_SLEEP);
428 	dp->door_target = p;
429 	dp->door_data = data_cookie;
430 	dp->door_pc = pc_cookie;
431 	dp->door_flags = attributes;
432 #ifdef _SYSCALL32_IMPL
433 	if (!from_kernel && get_udatamodel() != DATAMODEL_NATIVE)
434 		dp->door_data_max = UINT32_MAX;
435 	else
436 #endif
437 		dp->door_data_max = SIZE_MAX;
438 	dp->door_data_min = 0UL;
439 	dp->door_desc_max = (attributes & DOOR_REFUSE_DESC)? 0 : INT_MAX;
440 
441 	vp = DTOV(dp);
442 	vn_setops(vp, door_vnodeops);
443 	vp->v_type = VDOOR;
444 	vp->v_vfsp = &door_vfs;
445 	vp->v_data = (caddr_t)dp;
446 	mutex_enter(&door_knob);
447 	dp->door_index = index++;
448 	/* add to per-process door list */
449 	door_list_insert(dp);
450 	mutex_exit(&door_knob);
451 
452 	if (falloc(vp, FREAD | FWRITE, &fp, fdp)) {
453 		/*
454 		 * If the file table is full, remove the door from the
455 		 * per-process list, free the door, and return NULL.
456 		 */
457 		mutex_enter(&door_knob);
458 		door_list_delete(dp);
459 		mutex_exit(&door_knob);
460 		vn_free(vp);
461 		kmem_free(dp, sizeof (door_node_t));
462 		return (EMFILE);
463 	}
464 	vn_exists(vp);
465 	if (fdp != NULL)
466 		setf(*fdp, fp);
467 	mutex_exit(&fp->f_tlock);
468 
469 	if (fpp != NULL)
470 		*fpp = fp;
471 	return (0);
472 }
473 
474 static int
door_check_limits(door_node_t * dp,door_arg_t * da,int upcall)475 door_check_limits(door_node_t *dp, door_arg_t *da, int upcall)
476 {
477 	ASSERT(MUTEX_HELD(&door_knob));
478 
479 	/* we allow unref upcalls through, despite any minimum */
480 	if (da->data_size < dp->door_data_min &&
481 	    !(upcall && da->data_ptr == DOOR_UNREF_DATA))
482 		return (ENOBUFS);
483 
484 	if (da->data_size > dp->door_data_max)
485 		return (ENOBUFS);
486 
487 	if (da->desc_num > 0 && (dp->door_flags & DOOR_REFUSE_DESC))
488 		return (ENOTSUP);
489 
490 	if (da->desc_num > dp->door_desc_max)
491 		return (ENFILE);
492 
493 	return (0);
494 }
495 
496 /*
497  * Door invocation.
498  */
499 int
door_call(int did,void * args)500 door_call(int did, void *args)
501 {
502 	/* Locals */
503 	door_node_t	*dp;
504 	kthread_t	*server_thread;
505 	int		error = 0;
506 	klwp_t		*lwp;
507 	door_client_t	*ct;		/* curthread door_data */
508 	door_server_t	*st;		/* server thread door_data */
509 	door_desc_t	*start = NULL;
510 	uint_t		ncopied = 0;
511 	size_t		dsize;
512 	/* destructor for data returned by a kernel server */
513 	void		(*destfn)() = NULL;
514 	void		*destarg;
515 	model_t		datamodel;
516 	int		gotresults = 0;
517 	int		needcleanup = 0;
518 	int		cancel_pending;
519 
520 	lwp = ttolwp(curthread);
521 	datamodel = lwp_getdatamodel(lwp);
522 
523 	ct = door_my_client(1);
524 
525 	/*
526 	 * Get the arguments
527 	 */
528 	if (args) {
529 		if (datamodel == DATAMODEL_NATIVE) {
530 			if (copyin(args, &ct->d_args, sizeof (door_arg_t)) != 0)
531 				return (set_errno(EFAULT));
532 		} else {
533 			door_arg32_t    da32;
534 
535 			if (copyin(args, &da32, sizeof (door_arg32_t)) != 0)
536 				return (set_errno(EFAULT));
537 			ct->d_args.data_ptr =
538 			    (char *)(uintptr_t)da32.data_ptr;
539 			ct->d_args.data_size = da32.data_size;
540 			ct->d_args.desc_ptr =
541 			    (door_desc_t *)(uintptr_t)da32.desc_ptr;
542 			ct->d_args.desc_num = da32.desc_num;
543 			ct->d_args.rbuf =
544 			    (char *)(uintptr_t)da32.rbuf;
545 			ct->d_args.rsize = da32.rsize;
546 		}
547 	} else {
548 		/* No arguments, and no results allowed */
549 		ct->d_noresults = 1;
550 		ct->d_args.data_size = 0;
551 		ct->d_args.desc_num = 0;
552 		ct->d_args.rsize = 0;
553 	}
554 
555 	if ((dp = door_lookup(did, NULL)) == NULL)
556 		return (set_errno(EBADF));
557 
558 	/*
559 	 * We don't want to hold the door FD over the entire operation;
560 	 * instead, we put a hold on the door vnode and release the FD
561 	 * immediately
562 	 */
563 	VN_HOLD(DTOV(dp));
564 	releasef(did);
565 
566 	/*
567 	 * This should be done in shuttle_resume(), just before going to
568 	 * sleep, but we want to avoid overhead while holding door_knob.
569 	 * prstop() is just a no-op if we don't really go to sleep.
570 	 * We test not-kernel-address-space for the sake of clustering code.
571 	 */
572 	if (lwp && lwp->lwp_nostop == 0 && curproc->p_as != &kas)
573 		prstop(PR_REQUESTED, 0);
574 
575 	mutex_enter(&door_knob);
576 	if (DOOR_INVALID(dp)) {
577 		mutex_exit(&door_knob);
578 		error = EBADF;
579 		goto out;
580 	}
581 
582 	/*
583 	 * before we do anything, check that we are not overflowing the
584 	 * required limits.
585 	 */
586 	error = door_check_limits(dp, &ct->d_args, 0);
587 	if (error != 0) {
588 		mutex_exit(&door_knob);
589 		goto out;
590 	}
591 
592 	/*
593 	 * Check for in-kernel door server.
594 	 */
595 	if (dp->door_target == &p0) {
596 		caddr_t rbuf = ct->d_args.rbuf;
597 		size_t rsize = ct->d_args.rsize;
598 
599 		dp->door_active++;
600 		ct->d_kernel = 1;
601 		ct->d_error = DOOR_WAIT;
602 		mutex_exit(&door_knob);
603 		/* translate file descriptors to vnodes */
604 		if (ct->d_args.desc_num) {
605 			error = door_translate_in();
606 			if (error)
607 				goto out;
608 		}
609 		/*
610 		 * Call kernel door server.  Arguments are passed and
611 		 * returned as a door_arg pointer.  When called, data_ptr
612 		 * points to user data and desc_ptr points to a kernel list
613 		 * of door descriptors that have been converted to file
614 		 * structure pointers.  It's the server function's
615 		 * responsibility to copyin the data pointed to by data_ptr
616 		 * (this avoids extra copying in some cases).  On return,
617 		 * data_ptr points to a user buffer of data, and desc_ptr
618 		 * points to a kernel list of door descriptors representing
619 		 * files.  When a reference is passed to a kernel server,
620 		 * it is the server's responsibility to release the reference
621 		 * (by calling closef).  When the server includes a
622 		 * reference in its reply, it is released as part of the
623 		 * the call (the server must duplicate the reference if
624 		 * it wants to retain a copy).  The destfn, if set to
625 		 * non-NULL, is a destructor to be called when the returned
626 		 * kernel data (if any) is no longer needed (has all been
627 		 * translated and copied to user level).
628 		 */
629 		(*(dp->door_pc))(dp->door_data, &ct->d_args,
630 		    &destfn, &destarg, &error);
631 		mutex_enter(&door_knob);
632 		/* not implemented yet */
633 		if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
634 			door_deliver_unref(dp);
635 		mutex_exit(&door_knob);
636 		if (error)
637 			goto out;
638 
639 		/* translate vnodes to files */
640 		if (ct->d_args.desc_num) {
641 			error = door_translate_out();
642 			if (error)
643 				goto out;
644 		}
645 		ct->d_buf = ct->d_args.rbuf;
646 		ct->d_bufsize = ct->d_args.rsize;
647 		if (rsize < (ct->d_args.data_size +
648 		    (ct->d_args.desc_num * sizeof (door_desc_t)))) {
649 			/* handle overflow */
650 			error = door_overflow(curthread, ct->d_args.data_ptr,
651 			    ct->d_args.data_size, ct->d_args.desc_ptr,
652 			    ct->d_args.desc_num);
653 			if (error)
654 				goto out;
655 			/* door_overflow sets d_args rbuf and rsize */
656 		} else {
657 			ct->d_args.rbuf = rbuf;
658 			ct->d_args.rsize = rsize;
659 		}
660 		goto results;
661 	}
662 
663 	/*
664 	 * Get a server thread from the target domain
665 	 */
666 	if ((server_thread = door_get_server(dp)) == NULL) {
667 		if (DOOR_INVALID(dp))
668 			error = EBADF;
669 		else
670 			error = EAGAIN;
671 		mutex_exit(&door_knob);
672 		goto out;
673 	}
674 
675 	st = DOOR_SERVER(server_thread->t_door);
676 	if (ct->d_args.desc_num || ct->d_args.data_size) {
677 		int is_private = (dp->door_flags & DOOR_PRIVATE);
678 		/*
679 		 * Move data from client to server
680 		 */
681 		DOOR_T_HOLD(st);
682 		mutex_exit(&door_knob);
683 		error = door_args(server_thread, is_private);
684 		mutex_enter(&door_knob);
685 		DOOR_T_RELEASE(st);
686 		if (error) {
687 			/*
688 			 * We're not going to resume this thread after all
689 			 */
690 			door_release_server(dp, server_thread);
691 			shuttle_sleep(server_thread);
692 			mutex_exit(&door_knob);
693 			goto out;
694 		}
695 	}
696 
697 	dp->door_active++;
698 	ct->d_error = DOOR_WAIT;
699 	ct->d_args_done = 0;
700 	st->d_caller = curthread;
701 	st->d_active = dp;
702 
703 	shuttle_resume(server_thread, &door_knob);
704 
705 	mutex_enter(&door_knob);
706 shuttle_return:
707 	if ((error = ct->d_error) < 0) {	/* DOOR_WAIT or DOOR_EXIT */
708 		/*
709 		 * Premature wakeup. Find out why (stop, forkall, sig, exit ...)
710 		 */
711 		mutex_exit(&door_knob);		/* May block in ISSIG */
712 		cancel_pending = 0;
713 		if (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort ||
714 		    MUSTRETURN(curproc, curthread) ||
715 		    (cancel_pending = schedctl_cancel_pending()) != 0) {
716 			/* Signal, forkall, ... */
717 			lwp->lwp_sysabort = 0;
718 			if (cancel_pending)
719 				schedctl_cancel_eintr();
720 			mutex_enter(&door_knob);
721 			error = EINTR;
722 			/*
723 			 * If the server has finished processing our call,
724 			 * or exited (calling door_slam()), then d_error
725 			 * will have changed.  If the server hasn't finished
726 			 * yet, d_error will still be DOOR_WAIT, and we
727 			 * let it know we are not interested in any
728 			 * results by sending a SIGCANCEL, unless the door
729 			 * is marked with DOOR_NO_CANCEL.
730 			 */
731 			if (ct->d_error == DOOR_WAIT &&
732 			    st->d_caller == curthread) {
733 				proc_t	*p = ttoproc(server_thread);
734 
735 				st->d_active = NULL;
736 				st->d_caller = NULL;
737 
738 				if (!(dp->door_flags & DOOR_NO_CANCEL)) {
739 					DOOR_T_HOLD(st);
740 					mutex_exit(&door_knob);
741 
742 					mutex_enter(&p->p_lock);
743 					sigtoproc(p, server_thread, SIGCANCEL);
744 					mutex_exit(&p->p_lock);
745 
746 					mutex_enter(&door_knob);
747 					DOOR_T_RELEASE(st);
748 				}
749 			}
750 		} else {
751 			/*
752 			 * Return from stop(), server exit...
753 			 *
754 			 * Note that the server could have done a
755 			 * door_return while the client was in stop state
756 			 * (ISSIG), in which case the error condition
757 			 * is updated by the server.
758 			 */
759 			mutex_enter(&door_knob);
760 			if (ct->d_error == DOOR_WAIT) {
761 				/* Still waiting for a reply */
762 				shuttle_swtch(&door_knob);
763 				mutex_enter(&door_knob);
764 				lwp->lwp_asleep = 0;
765 				goto	shuttle_return;
766 			} else if (ct->d_error == DOOR_EXIT) {
767 				/* Server exit */
768 				error = EINTR;
769 			} else {
770 				/* Server did a door_return during ISSIG */
771 				error = ct->d_error;
772 			}
773 		}
774 		/*
775 		 * Can't exit if the server is currently copying
776 		 * results for me.
777 		 */
778 		while (DOOR_T_HELD(ct))
779 			cv_wait(&ct->d_cv, &door_knob);
780 
781 		/*
782 		 * If the server has not processed our message, free the
783 		 * descriptors.
784 		 */
785 		if (!ct->d_args_done) {
786 			needcleanup = 1;
787 			ct->d_args_done = 1;
788 		}
789 
790 		/*
791 		 * Find out if results were successfully copied.
792 		 */
793 		if (ct->d_error == 0)
794 			gotresults = 1;
795 	}
796 	ASSERT(ct->d_args_done);
797 	lwp->lwp_asleep = 0;		/* /proc */
798 	lwp->lwp_sysabort = 0;		/* /proc */
799 	if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
800 		door_deliver_unref(dp);
801 	mutex_exit(&door_knob);
802 
803 	if (needcleanup)
804 		door_fp_close(ct->d_fpp, ct->d_args.desc_num);
805 
806 results:
807 	/*
808 	 * Move the results to userland (if any)
809 	 */
810 
811 	if (ct->d_noresults)
812 		goto out;
813 
814 	if (error) {
815 		/*
816 		 * If server returned results successfully, then we've
817 		 * been interrupted and may need to clean up.
818 		 */
819 		if (gotresults) {
820 			ASSERT(error == EINTR);
821 			door_fp_close(ct->d_fpp, ct->d_args.desc_num);
822 		}
823 		goto out;
824 	}
825 
826 	/*
827 	 * Copy back data if we haven't caused an overflow (already
828 	 * handled) and we are using a 2 copy transfer, or we are
829 	 * returning data from a kernel server.
830 	 */
831 	if (ct->d_args.data_size) {
832 		ct->d_args.data_ptr = ct->d_args.rbuf;
833 		if (ct->d_kernel || (!ct->d_overflow &&
834 		    ct->d_args.data_size <= door_max_arg)) {
835 			if (copyout_nowatch(ct->d_buf, ct->d_args.rbuf,
836 			    ct->d_args.data_size)) {
837 				door_fp_close(ct->d_fpp, ct->d_args.desc_num);
838 				error = EFAULT;
839 				goto out;
840 			}
841 		}
842 	}
843 
844 	/*
845 	 * stuff returned doors into our proc, copyout the descriptors
846 	 */
847 	if (ct->d_args.desc_num) {
848 		struct file	**fpp;
849 		door_desc_t	*didpp;
850 		uint_t		n = ct->d_args.desc_num;
851 
852 		dsize = n * sizeof (door_desc_t);
853 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
854 		fpp = ct->d_fpp;
855 
856 		while (n--) {
857 			if (door_insert(*fpp, didpp) == -1) {
858 				/* Close remaining files */
859 				door_fp_close(fpp, n + 1);
860 				error = EMFILE;
861 				goto out;
862 			}
863 			fpp++; didpp++; ncopied++;
864 		}
865 
866 		ct->d_args.desc_ptr = (door_desc_t *)(ct->d_args.rbuf +
867 		    roundup(ct->d_args.data_size, sizeof (door_desc_t)));
868 
869 		if (copyout_nowatch(start, ct->d_args.desc_ptr, dsize)) {
870 			error = EFAULT;
871 			goto out;
872 		}
873 	}
874 
875 	/*
876 	 * Return the results
877 	 */
878 	if (datamodel == DATAMODEL_NATIVE) {
879 		if (copyout_nowatch(&ct->d_args, args,
880 		    sizeof (door_arg_t)) != 0)
881 			error = EFAULT;
882 	} else {
883 		door_arg32_t    da32;
884 
885 		da32.data_ptr = (caddr32_t)(uintptr_t)ct->d_args.data_ptr;
886 		da32.data_size = ct->d_args.data_size;
887 		da32.desc_ptr = (caddr32_t)(uintptr_t)ct->d_args.desc_ptr;
888 		da32.desc_num = ct->d_args.desc_num;
889 		da32.rbuf = (caddr32_t)(uintptr_t)ct->d_args.rbuf;
890 		da32.rsize = ct->d_args.rsize;
891 		if (copyout_nowatch(&da32, args, sizeof (door_arg32_t)) != 0) {
892 			error = EFAULT;
893 		}
894 	}
895 
896 out:
897 	ct->d_noresults = 0;
898 
899 	/* clean up the overflow buffer if an error occurred */
900 	if (error != 0 && ct->d_overflow) {
901 		(void) as_unmap(curproc->p_as, ct->d_args.rbuf,
902 		    ct->d_args.rsize);
903 	}
904 	ct->d_overflow = 0;
905 
906 	/* call destructor */
907 	if (destfn) {
908 		ASSERT(ct->d_kernel);
909 		(*destfn)(dp->door_data, destarg);
910 		ct->d_buf = NULL;
911 		ct->d_bufsize = 0;
912 	}
913 
914 	if (dp)
915 		VN_RELE(DTOV(dp));
916 
917 	if (ct->d_buf) {
918 		ASSERT(!ct->d_kernel);
919 		kmem_free(ct->d_buf, ct->d_bufsize);
920 		ct->d_buf = NULL;
921 		ct->d_bufsize = 0;
922 	}
923 	ct->d_kernel = 0;
924 
925 	/* clean up the descriptor copyout buffer */
926 	if (start != NULL) {
927 		if (error != 0)
928 			door_fd_close(start, ncopied);
929 		kmem_free(start, dsize);
930 	}
931 
932 	if (ct->d_fpp) {
933 		kmem_free(ct->d_fpp, ct->d_fpp_size);
934 		ct->d_fpp = NULL;
935 		ct->d_fpp_size = 0;
936 	}
937 
938 	if (error)
939 		return (set_errno(error));
940 
941 	return (0);
942 }
943 
944 static int
door_setparam_common(door_node_t * dp,int from_kernel,int type,size_t val)945 door_setparam_common(door_node_t *dp, int from_kernel, int type, size_t val)
946 {
947 	int error = 0;
948 
949 	mutex_enter(&door_knob);
950 
951 	if (DOOR_INVALID(dp)) {
952 		mutex_exit(&door_knob);
953 		return (EBADF);
954 	}
955 
956 	/*
957 	 * door_ki_setparam() can only affect kernel doors.
958 	 * door_setparam() can only affect doors attached to the current
959 	 * process.
960 	 */
961 	if ((from_kernel && dp->door_target != &p0) ||
962 	    (!from_kernel && dp->door_target != curproc)) {
963 		mutex_exit(&door_knob);
964 		return (EPERM);
965 	}
966 
967 	switch (type) {
968 	case DOOR_PARAM_DESC_MAX:
969 		if (val > INT_MAX)
970 			error = ERANGE;
971 		else if ((dp->door_flags & DOOR_REFUSE_DESC) && val != 0)
972 			error = ENOTSUP;
973 		else
974 			dp->door_desc_max = (uint_t)val;
975 		break;
976 
977 	case DOOR_PARAM_DATA_MIN:
978 		if (val > dp->door_data_max)
979 			error = EINVAL;
980 		else
981 			dp->door_data_min = val;
982 		break;
983 
984 	case DOOR_PARAM_DATA_MAX:
985 		if (val < dp->door_data_min)
986 			error = EINVAL;
987 		else
988 			dp->door_data_max = val;
989 		break;
990 
991 	default:
992 		error = EINVAL;
993 		break;
994 	}
995 
996 	mutex_exit(&door_knob);
997 	return (error);
998 }
999 
1000 static int
door_getparam_common(door_node_t * dp,int type,size_t * out)1001 door_getparam_common(door_node_t *dp, int type, size_t *out)
1002 {
1003 	int error = 0;
1004 
1005 	mutex_enter(&door_knob);
1006 	switch (type) {
1007 	case DOOR_PARAM_DESC_MAX:
1008 		*out = (size_t)dp->door_desc_max;
1009 		break;
1010 	case DOOR_PARAM_DATA_MIN:
1011 		*out = dp->door_data_min;
1012 		break;
1013 	case DOOR_PARAM_DATA_MAX:
1014 		*out = dp->door_data_max;
1015 		break;
1016 	default:
1017 		error = EINVAL;
1018 		break;
1019 	}
1020 	mutex_exit(&door_knob);
1021 	return (error);
1022 }
1023 
1024 int
door_setparam(int did,int type,size_t val)1025 door_setparam(int did, int type, size_t val)
1026 {
1027 	door_node_t *dp;
1028 	int error = 0;
1029 
1030 	if ((dp = door_lookup(did, NULL)) == NULL)
1031 		return (set_errno(EBADF));
1032 
1033 	error = door_setparam_common(dp, 0, type, val);
1034 
1035 	releasef(did);
1036 
1037 	if (error)
1038 		return (set_errno(error));
1039 
1040 	return (0);
1041 }
1042 
1043 int
door_getparam(int did,int type,size_t * out)1044 door_getparam(int did, int type, size_t *out)
1045 {
1046 	door_node_t *dp;
1047 	size_t val = 0;
1048 	int error = 0;
1049 
1050 	if ((dp = door_lookup(did, NULL)) == NULL)
1051 		return (set_errno(EBADF));
1052 
1053 	error = door_getparam_common(dp, type, &val);
1054 
1055 	releasef(did);
1056 
1057 	if (error)
1058 		return (set_errno(error));
1059 
1060 	if (get_udatamodel() == DATAMODEL_NATIVE) {
1061 		if (copyout(&val, out, sizeof (val)))
1062 			return (set_errno(EFAULT));
1063 #ifdef _SYSCALL32_IMPL
1064 	} else {
1065 		size32_t val32 = (size32_t)val;
1066 
1067 		if (val != val32)
1068 			return (set_errno(EOVERFLOW));
1069 
1070 		if (copyout(&val32, out, sizeof (val32)))
1071 			return (set_errno(EFAULT));
1072 #endif /* _SYSCALL32_IMPL */
1073 	}
1074 
1075 	return (0);
1076 }
1077 
1078 /*
1079  * A copyout() which proceeds from high addresses to low addresses.  This way,
1080  * stack guard pages are effective.
1081  *
1082  * Note that we use copyout_nowatch();  this is called while the client is
1083  * held.
1084  */
1085 static int
door_stack_copyout(const void * kaddr,void * uaddr,size_t count)1086 door_stack_copyout(const void *kaddr, void *uaddr, size_t count)
1087 {
1088 	const char *kbase = (const char *)kaddr;
1089 	uintptr_t ubase = (uintptr_t)uaddr;
1090 	size_t pgsize = PAGESIZE;
1091 
1092 	if (count <= pgsize)
1093 		return (copyout_nowatch(kaddr, uaddr, count));
1094 
1095 	while (count > 0) {
1096 		uintptr_t start, end, offset, amount;
1097 
1098 		end = ubase + count;
1099 		start = P2ALIGN(end - 1, pgsize);
1100 		if (P2ALIGN(ubase, pgsize) == start)
1101 			start = ubase;
1102 
1103 		offset = start - ubase;
1104 		amount = end - start;
1105 
1106 		ASSERT(amount > 0 && amount <= count && amount <= pgsize);
1107 
1108 		if (copyout_nowatch(kbase + offset, (void *)start, amount))
1109 			return (1);
1110 		count -= amount;
1111 	}
1112 	return (0);
1113 }
1114 
1115 /*
1116  * Writes the stack layout for door_return() into the door_server_t of the
1117  * server thread.
1118  */
1119 static int
door_layout(kthread_t * tp,size_t data_size,uint_t ndesc,int info_needed)1120 door_layout(kthread_t *tp, size_t data_size, uint_t ndesc, int info_needed)
1121 {
1122 	door_server_t *st = DOOR_SERVER(tp->t_door);
1123 	door_layout_t *out = &st->d_layout;
1124 	uintptr_t base_sp = (uintptr_t)st->d_sp;
1125 	size_t ssize = st->d_ssize;
1126 	size_t descsz;
1127 	uintptr_t descp, datap, infop, resultsp, finalsp;
1128 	size_t align = STACK_ALIGN;
1129 	size_t results_sz = sizeof (struct door_results);
1130 	model_t datamodel = lwp_getdatamodel(ttolwp(tp));
1131 
1132 	ASSERT(!st->d_layout_done);
1133 
1134 #ifndef _STACK_GROWS_DOWNWARD
1135 #error stack does not grow downward, door_layout() must change
1136 #endif
1137 
1138 #ifdef _SYSCALL32_IMPL
1139 	if (datamodel != DATAMODEL_NATIVE) {
1140 		align = STACK_ALIGN32;
1141 		results_sz = sizeof (struct door_results32);
1142 	}
1143 #endif
1144 
1145 	descsz = ndesc * sizeof (door_desc_t);
1146 
1147 	/*
1148 	 * To speed up the overflow checking, we do an initial check
1149 	 * that the passed in data size won't cause us to wrap past
1150 	 * base_sp.  Since door_max_desc limits descsz, we can
1151 	 * safely use it here.  65535 is an arbitrary 'bigger than
1152 	 * we need, small enough to not cause trouble' constant;
1153 	 * the only constraint is that it must be > than:
1154 	 *
1155 	 *	5 * STACK_ALIGN +
1156 	 *	    sizeof (door_info_t) +
1157 	 *	    sizeof (door_results_t) +
1158 	 *	    (max adjustment from door_final_sp())
1159 	 *
1160 	 * After we compute the layout, we can safely do a "did we wrap
1161 	 * around" check, followed by a check against the recorded
1162 	 * stack size.
1163 	 */
1164 	if (data_size >= SIZE_MAX - (size_t)65535UL - descsz)
1165 		return (E2BIG);		/* overflow */
1166 
1167 	descp = P2ALIGN(base_sp - descsz, align);
1168 	datap = P2ALIGN(descp - data_size, align);
1169 
1170 	if (info_needed)
1171 		infop = P2ALIGN(datap - sizeof (door_info_t), align);
1172 	else
1173 		infop = datap;
1174 
1175 	resultsp = P2ALIGN(infop - results_sz, align);
1176 	finalsp = door_final_sp(resultsp, align, datamodel);
1177 
1178 	if (finalsp > base_sp)
1179 		return (E2BIG);		/* overflow */
1180 
1181 	if (ssize != 0 && (base_sp - finalsp) > ssize)
1182 		return (E2BIG);		/* doesn't fit in stack */
1183 
1184 	out->dl_descp = (ndesc != 0)? (caddr_t)descp : 0;
1185 	out->dl_datap = (data_size != 0)? (caddr_t)datap : 0;
1186 	out->dl_infop = info_needed? (caddr_t)infop : 0;
1187 	out->dl_resultsp = (caddr_t)resultsp;
1188 	out->dl_sp = (caddr_t)finalsp;
1189 
1190 	st->d_layout_done = 1;
1191 	return (0);
1192 }
1193 
1194 static int
door_server_dispatch(door_client_t * ct,door_node_t * dp)1195 door_server_dispatch(door_client_t *ct, door_node_t *dp)
1196 {
1197 	door_server_t *st = DOOR_SERVER(curthread->t_door);
1198 	door_layout_t *layout = &st->d_layout;
1199 	int error = 0;
1200 
1201 	int is_private = (dp->door_flags & DOOR_PRIVATE);
1202 
1203 	door_pool_t *pool = (is_private)? &dp->door_servers :
1204 	    &curproc->p_server_threads;
1205 
1206 	int empty_pool = (pool->dp_threads == NULL);
1207 
1208 	caddr_t infop = NULL;
1209 	char *datap = NULL;
1210 	size_t datasize = 0;
1211 	size_t descsize;
1212 
1213 	file_t **fpp = ct->d_fpp;
1214 	door_desc_t *start = NULL;
1215 	uint_t ndesc = 0;
1216 	uint_t ncopied = 0;
1217 
1218 	if (ct != NULL) {
1219 		datap = ct->d_args.data_ptr;
1220 		datasize = ct->d_args.data_size;
1221 		ndesc = ct->d_args.desc_num;
1222 	}
1223 
1224 	descsize = ndesc * sizeof (door_desc_t);
1225 
1226 	/*
1227 	 * Reset datap to NULL if we aren't passing any data.  Be careful
1228 	 * to let unref notifications through, though.
1229 	 */
1230 	if (datap == DOOR_UNREF_DATA) {
1231 		if (ct->d_upcall != NULL)
1232 			datasize = 0;
1233 		else
1234 			datap = NULL;
1235 	} else if (datasize == 0) {
1236 		datap = NULL;
1237 	}
1238 
1239 	/*
1240 	 * Get the stack layout, if it hasn't already been done.
1241 	 */
1242 	if (!st->d_layout_done) {
1243 		error = door_layout(curthread, datasize, ndesc,
1244 		    (is_private && empty_pool));
1245 		if (error != 0)
1246 			goto fail;
1247 	}
1248 
1249 	/*
1250 	 * fill out the stack, starting from the top.  Layout was already
1251 	 * filled in by door_args() or door_translate_out().
1252 	 */
1253 	if (layout->dl_descp != NULL) {
1254 		ASSERT(ndesc != 0);
1255 		start = kmem_alloc(descsize, KM_SLEEP);
1256 
1257 		while (ndesc > 0) {
1258 			if (door_insert(*fpp, &start[ncopied]) == -1) {
1259 				error = EMFILE;
1260 				goto fail;
1261 			}
1262 			ndesc--;
1263 			ncopied++;
1264 			fpp++;
1265 		}
1266 		if (door_stack_copyout(start, layout->dl_descp, descsize)) {
1267 			error = E2BIG;
1268 			goto fail;
1269 		}
1270 	}
1271 	fpp = NULL;			/* finished processing */
1272 
1273 	if (layout->dl_datap != NULL) {
1274 		ASSERT(datasize != 0);
1275 		datap = layout->dl_datap;
1276 		if (ct->d_upcall != NULL || datasize <= door_max_arg) {
1277 			if (door_stack_copyout(ct->d_buf, datap, datasize)) {
1278 				error = E2BIG;
1279 				goto fail;
1280 			}
1281 		}
1282 	}
1283 
1284 	if (is_private && empty_pool) {
1285 		door_info_t di;
1286 
1287 		infop = layout->dl_infop;
1288 		ASSERT(infop != NULL);
1289 
1290 		di.di_target = curproc->p_pid;
1291 		di.di_proc = (door_ptr_t)(uintptr_t)dp->door_pc;
1292 		di.di_data = (door_ptr_t)(uintptr_t)dp->door_data;
1293 		di.di_uniquifier = dp->door_index;
1294 		di.di_attributes = (dp->door_flags & DOOR_ATTR_MASK) |
1295 		    DOOR_LOCAL;
1296 
1297 		if (door_stack_copyout(&di, infop, sizeof (di))) {
1298 			error = E2BIG;
1299 			goto fail;
1300 		}
1301 	}
1302 
1303 	if (get_udatamodel() == DATAMODEL_NATIVE) {
1304 		struct door_results dr;
1305 
1306 		dr.cookie = dp->door_data;
1307 		dr.data_ptr = datap;
1308 		dr.data_size = datasize;
1309 		dr.desc_ptr = (door_desc_t *)layout->dl_descp;
1310 		dr.desc_num = ncopied;
1311 		dr.pc = dp->door_pc;
1312 		dr.nservers = !empty_pool;
1313 		dr.door_info = (door_info_t *)infop;
1314 
1315 		if (door_stack_copyout(&dr, layout->dl_resultsp, sizeof (dr))) {
1316 			error = E2BIG;
1317 			goto fail;
1318 		}
1319 #ifdef _SYSCALL32_IMPL
1320 	} else {
1321 		struct door_results32 dr32;
1322 
1323 		dr32.cookie = (caddr32_t)(uintptr_t)dp->door_data;
1324 		dr32.data_ptr = (caddr32_t)(uintptr_t)datap;
1325 		dr32.data_size = (size32_t)datasize;
1326 		dr32.desc_ptr = (caddr32_t)(uintptr_t)layout->dl_descp;
1327 		dr32.desc_num = ncopied;
1328 		dr32.pc = (caddr32_t)(uintptr_t)dp->door_pc;
1329 		dr32.nservers = !empty_pool;
1330 		dr32.door_info = (caddr32_t)(uintptr_t)infop;
1331 
1332 		if (door_stack_copyout(&dr32, layout->dl_resultsp,
1333 		    sizeof (dr32))) {
1334 			error = E2BIG;
1335 			goto fail;
1336 		}
1337 #endif
1338 	}
1339 
1340 	error = door_finish_dispatch(layout->dl_sp);
1341 fail:
1342 	if (start != NULL) {
1343 		if (error != 0)
1344 			door_fd_close(start, ncopied);
1345 		kmem_free(start, descsize);
1346 	}
1347 	if (fpp != NULL)
1348 		door_fp_close(fpp, ndesc);
1349 
1350 	return (error);
1351 }
1352 
1353 /*
1354  * Return the results (if any) to the caller (if any) and wait for the
1355  * next invocation on a door.
1356  */
1357 int
door_return(caddr_t data_ptr,size_t data_size,door_desc_t * desc_ptr,uint_t desc_num,caddr_t sp,size_t ssize)1358 door_return(caddr_t data_ptr, size_t data_size,
1359     door_desc_t *desc_ptr, uint_t desc_num, caddr_t sp, size_t ssize)
1360 {
1361 	kthread_t	*caller;
1362 	klwp_t		*lwp;
1363 	int		error = 0;
1364 	door_node_t	*dp;
1365 	door_server_t	*st;		/* curthread door_data */
1366 	door_client_t	*ct;		/* caller door_data */
1367 	int		cancel_pending;
1368 
1369 	st = door_my_server(1);
1370 
1371 	/*
1372 	 * If thread was bound to a door that no longer exists, return
1373 	 * an error.  This can happen if a thread is bound to a door
1374 	 * before the process calls forkall(); in the child, the door
1375 	 * doesn't exist and door_fork() sets the d_invbound flag.
1376 	 */
1377 	if (st->d_invbound)
1378 		return (set_errno(EINVAL));
1379 
1380 	st->d_sp = sp;			/* Save base of stack. */
1381 	st->d_ssize = ssize;		/* and its size */
1382 
1383 	/*
1384 	 * This should be done in shuttle_resume(), just before going to
1385 	 * sleep, but we want to avoid overhead while holding door_knob.
1386 	 * prstop() is just a no-op if we don't really go to sleep.
1387 	 * We test not-kernel-address-space for the sake of clustering code.
1388 	 */
1389 	lwp = ttolwp(curthread);
1390 	if (lwp && lwp->lwp_nostop == 0 && curproc->p_as != &kas)
1391 		prstop(PR_REQUESTED, 0);
1392 
1393 	/* Make sure the caller hasn't gone away */
1394 	mutex_enter(&door_knob);
1395 	if ((caller = st->d_caller) == NULL || caller->t_door == NULL) {
1396 		if (desc_num != 0) {
1397 			/* close any DOOR_RELEASE descriptors */
1398 			mutex_exit(&door_knob);
1399 			error = door_release_fds(desc_ptr, desc_num);
1400 			if (error)
1401 				return (set_errno(error));
1402 			mutex_enter(&door_knob);
1403 		}
1404 		goto out;
1405 	}
1406 	ct = DOOR_CLIENT(caller->t_door);
1407 
1408 	ct->d_args.data_size = data_size;
1409 	ct->d_args.desc_num = desc_num;
1410 	/*
1411 	 * Transfer results, if any, to the client
1412 	 */
1413 	if (data_size != 0 || desc_num != 0) {
1414 		/*
1415 		 * Prevent the client from exiting until we have finished
1416 		 * moving results.
1417 		 */
1418 		DOOR_T_HOLD(ct);
1419 		mutex_exit(&door_knob);
1420 		error = door_results(caller, data_ptr, data_size,
1421 		    desc_ptr, desc_num);
1422 		mutex_enter(&door_knob);
1423 		DOOR_T_RELEASE(ct);
1424 		/*
1425 		 * Pass EOVERFLOW errors back to the client
1426 		 */
1427 		if (error && error != EOVERFLOW) {
1428 			mutex_exit(&door_knob);
1429 			return (set_errno(error));
1430 		}
1431 	}
1432 out:
1433 	/* Put ourselves on the available server thread list */
1434 	door_release_server(st->d_pool, curthread);
1435 
1436 	/*
1437 	 * Make sure the caller is still waiting to be resumed
1438 	 */
1439 	if (caller) {
1440 		disp_lock_t *tlp;
1441 
1442 		thread_lock(caller);
1443 		ct->d_error = error;		/* Return any errors */
1444 		if (caller->t_state == TS_SLEEP &&
1445 		    SOBJ_TYPE(caller->t_sobj_ops) == SOBJ_SHUTTLE) {
1446 			cpu_t *cp = CPU;
1447 
1448 			tlp = caller->t_lockp;
1449 			/*
1450 			 * Setting t_disp_queue prevents erroneous preemptions
1451 			 * if this thread is still in execution on another
1452 			 * processor
1453 			 */
1454 			caller->t_disp_queue = cp->cpu_disp;
1455 			CL_ACTIVE(caller);
1456 			/*
1457 			 * We are calling thread_onproc() instead of
1458 			 * THREAD_ONPROC() because compiler can reorder
1459 			 * the two stores of t_state and t_lockp in
1460 			 * THREAD_ONPROC().
1461 			 */
1462 			thread_onproc(caller, cp);
1463 			disp_lock_exit_high(tlp);
1464 			shuttle_resume(caller, &door_knob);
1465 		} else {
1466 			/* May have been setrun or in stop state */
1467 			thread_unlock(caller);
1468 			shuttle_swtch(&door_knob);
1469 		}
1470 	} else {
1471 		shuttle_swtch(&door_knob);
1472 	}
1473 
1474 	/*
1475 	 * We've sprung to life. Determine if we are part of a door
1476 	 * invocation, or just interrupted
1477 	 */
1478 	mutex_enter(&door_knob);
1479 	if ((dp = st->d_active) != NULL) {
1480 		/*
1481 		 * Normal door invocation. Return any error condition
1482 		 * encountered while trying to pass args to the server
1483 		 * thread.
1484 		 */
1485 		lwp->lwp_asleep = 0;
1486 		/*
1487 		 * Prevent the caller from leaving us while we
1488 		 * are copying out the arguments from it's buffer.
1489 		 */
1490 		ASSERT(st->d_caller != NULL);
1491 		ct = DOOR_CLIENT(st->d_caller->t_door);
1492 
1493 		DOOR_T_HOLD(ct);
1494 		mutex_exit(&door_knob);
1495 		error = door_server_dispatch(ct, dp);
1496 		mutex_enter(&door_knob);
1497 		DOOR_T_RELEASE(ct);
1498 
1499 		/* let the client know we have processed his message */
1500 		ct->d_args_done = 1;
1501 
1502 		if (error) {
1503 			caller = st->d_caller;
1504 			if (caller)
1505 				ct = DOOR_CLIENT(caller->t_door);
1506 			else
1507 				ct = NULL;
1508 			goto out;
1509 		}
1510 		mutex_exit(&door_knob);
1511 		return (0);
1512 	} else {
1513 		/*
1514 		 * We are not involved in a door_invocation.
1515 		 * Check for /proc related activity...
1516 		 */
1517 		st->d_caller = NULL;
1518 		door_server_exit(curproc, curthread);
1519 		mutex_exit(&door_knob);
1520 		cancel_pending = 0;
1521 		if (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort ||
1522 		    MUSTRETURN(curproc, curthread) ||
1523 		    (cancel_pending = schedctl_cancel_pending()) != 0) {
1524 			if (cancel_pending)
1525 				schedctl_cancel_eintr();
1526 			lwp->lwp_asleep = 0;
1527 			lwp->lwp_sysabort = 0;
1528 			return (set_errno(EINTR));
1529 		}
1530 		/* Go back and wait for another request */
1531 		lwp->lwp_asleep = 0;
1532 		mutex_enter(&door_knob);
1533 		caller = NULL;
1534 		goto out;
1535 	}
1536 }
1537 
1538 /*
1539  * Revoke any future invocations on this door
1540  */
1541 int
door_revoke(int did)1542 door_revoke(int did)
1543 {
1544 	door_node_t	*d;
1545 	int		error;
1546 
1547 	if ((d = door_lookup(did, NULL)) == NULL)
1548 		return (set_errno(EBADF));
1549 
1550 	mutex_enter(&door_knob);
1551 	if (d->door_target != curproc) {
1552 		mutex_exit(&door_knob);
1553 		releasef(did);
1554 		return (set_errno(EPERM));
1555 	}
1556 	d->door_flags |= DOOR_REVOKED;
1557 	if (d->door_flags & DOOR_PRIVATE)
1558 		cv_broadcast(&d->door_servers.dp_cv);
1559 	else
1560 		cv_broadcast(&curproc->p_server_threads.dp_cv);
1561 	mutex_exit(&door_knob);
1562 	releasef(did);
1563 	/* Invalidate the descriptor */
1564 	if ((error = closeandsetf(did, NULL)) != 0)
1565 		return (set_errno(error));
1566 	return (0);
1567 }
1568 
1569 int
door_info(int did,struct door_info * d_info)1570 door_info(int did, struct door_info *d_info)
1571 {
1572 	door_node_t	*dp;
1573 	door_info_t	di;
1574 	door_server_t	*st;
1575 	file_t		*fp = NULL;
1576 
1577 	if (did == DOOR_QUERY) {
1578 		/* Get information on door current thread is bound to */
1579 		if ((st = door_my_server(0)) == NULL ||
1580 		    (dp = st->d_pool) == NULL)
1581 			/* Thread isn't bound to a door */
1582 			return (set_errno(EBADF));
1583 	} else if ((dp = door_lookup(did, &fp)) == NULL) {
1584 		/* Not a door */
1585 		return (set_errno(EBADF));
1586 	}
1587 
1588 	door_info_common(dp, &di, fp);
1589 
1590 	if (did != DOOR_QUERY)
1591 		releasef(did);
1592 
1593 	if (copyout(&di, d_info, sizeof (struct door_info)))
1594 		return (set_errno(EFAULT));
1595 	return (0);
1596 }
1597 
1598 /*
1599  * Common code for getting information about a door either via the
1600  * door_info system call or the door_ki_info kernel call.
1601  */
1602 void
door_info_common(door_node_t * dp,struct door_info * dip,file_t * fp)1603 door_info_common(door_node_t *dp, struct door_info *dip, file_t *fp)
1604 {
1605 	int unref_count;
1606 
1607 	bzero(dip, sizeof (door_info_t));
1608 
1609 	mutex_enter(&door_knob);
1610 	if (dp->door_target == NULL)
1611 		dip->di_target = -1;
1612 	else
1613 		dip->di_target = dp->door_target->p_pid;
1614 
1615 	dip->di_attributes = dp->door_flags & DOOR_ATTR_MASK;
1616 	if (dp->door_target == curproc)
1617 		dip->di_attributes |= DOOR_LOCAL;
1618 	dip->di_proc = (door_ptr_t)(uintptr_t)dp->door_pc;
1619 	dip->di_data = (door_ptr_t)(uintptr_t)dp->door_data;
1620 	dip->di_uniquifier = dp->door_index;
1621 	/*
1622 	 * If this door is in the middle of having an unreferenced
1623 	 * notification delivered, don't count the VN_HOLD by
1624 	 * door_deliver_unref in determining if it is unreferenced.
1625 	 * This handles the case where door_info is called from the
1626 	 * thread delivering the unref notification.
1627 	 */
1628 	if (dp->door_flags & DOOR_UNREF_ACTIVE)
1629 		unref_count = 2;
1630 	else
1631 		unref_count = 1;
1632 	mutex_exit(&door_knob);
1633 
1634 	if (fp == NULL) {
1635 		/*
1636 		 * If this thread is bound to the door, then we can just
1637 		 * check the vnode; a ref count of 1 (or 2 if this is
1638 		 * handling an unref notification) means that the hold
1639 		 * from the door_bind is the only reference to the door
1640 		 * (no file descriptor refers to it).
1641 		 */
1642 		if (DTOV(dp)->v_count == unref_count)
1643 			dip->di_attributes |= DOOR_IS_UNREF;
1644 	} else {
1645 		/*
1646 		 * If we're working from a file descriptor or door handle
1647 		 * we need to look at the file structure count.  We don't
1648 		 * need to hold the vnode lock since this is just a snapshot.
1649 		 */
1650 		mutex_enter(&fp->f_tlock);
1651 		if (fp->f_count == 1 && DTOV(dp)->v_count == unref_count)
1652 			dip->di_attributes |= DOOR_IS_UNREF;
1653 		mutex_exit(&fp->f_tlock);
1654 	}
1655 }
1656 
1657 /*
1658  * Return credentials of the door caller (if any) for this invocation
1659  */
1660 int
door_ucred(struct ucred_s * uch)1661 door_ucred(struct ucred_s *uch)
1662 {
1663 	kthread_t	*caller;
1664 	door_server_t	*st;
1665 	door_client_t	*ct;
1666 	door_upcall_t	*dup;
1667 	struct proc	*p;
1668 	struct ucred_s	*res;
1669 	int		err;
1670 
1671 	mutex_enter(&door_knob);
1672 	if ((st = door_my_server(0)) == NULL ||
1673 	    (caller = st->d_caller) == NULL) {
1674 		mutex_exit(&door_knob);
1675 		return (set_errno(EINVAL));
1676 	}
1677 
1678 	ASSERT(caller->t_door != NULL);
1679 	ct = DOOR_CLIENT(caller->t_door);
1680 
1681 	/* Prevent caller from exiting while we examine the cred */
1682 	DOOR_T_HOLD(ct);
1683 	mutex_exit(&door_knob);
1684 
1685 	p = ttoproc(caller);
1686 
1687 	/*
1688 	 * If the credentials are not specified by the client, get the one
1689 	 * associated with the calling process.
1690 	 */
1691 	if ((dup = ct->d_upcall) != NULL)
1692 		res = cred2ucred(dup->du_cred, p0.p_pid, NULL, CRED());
1693 	else
1694 		res = cred2ucred(caller->t_cred, p->p_pid, NULL, CRED());
1695 
1696 	mutex_enter(&door_knob);
1697 	DOOR_T_RELEASE(ct);
1698 	mutex_exit(&door_knob);
1699 
1700 	err = copyout(res, uch, res->uc_size);
1701 
1702 	kmem_free(res, res->uc_size);
1703 
1704 	if (err != 0)
1705 		return (set_errno(EFAULT));
1706 
1707 	return (0);
1708 }
1709 
1710 /*
1711  * Bind the current lwp to the server thread pool associated with 'did'
1712  */
1713 int
door_bind(int did)1714 door_bind(int did)
1715 {
1716 	door_node_t	*dp;
1717 	door_server_t	*st;
1718 
1719 	if ((dp = door_lookup(did, NULL)) == NULL) {
1720 		/* Not a door */
1721 		return (set_errno(EBADF));
1722 	}
1723 
1724 	/*
1725 	 * Can't bind to a non-private door, and can't bind to a door
1726 	 * served by another process.
1727 	 */
1728 	if ((dp->door_flags & DOOR_PRIVATE) == 0 ||
1729 	    dp->door_target != curproc) {
1730 		releasef(did);
1731 		return (set_errno(EINVAL));
1732 	}
1733 
1734 	st = door_my_server(1);
1735 	if (st->d_pool)
1736 		door_unbind_thread(st->d_pool);
1737 	st->d_pool = dp;
1738 	st->d_invbound = 0;
1739 	door_bind_thread(dp);
1740 	releasef(did);
1741 
1742 	return (0);
1743 }
1744 
1745 /*
1746  * Unbind the current lwp from it's server thread pool
1747  */
1748 int
door_unbind(void)1749 door_unbind(void)
1750 {
1751 	door_server_t *st;
1752 
1753 	if ((st = door_my_server(0)) == NULL)
1754 		return (set_errno(EBADF));
1755 
1756 	if (st->d_invbound) {
1757 		ASSERT(st->d_pool == NULL);
1758 		st->d_invbound = 0;
1759 		return (0);
1760 	}
1761 	if (st->d_pool == NULL)
1762 		return (set_errno(EBADF));
1763 	door_unbind_thread(st->d_pool);
1764 	st->d_pool = NULL;
1765 	return (0);
1766 }
1767 
1768 /*
1769  * Create a descriptor for the associated file and fill in the
1770  * attributes associated with it.
1771  *
1772  * Return 0 for success, -1 otherwise;
1773  */
1774 int
door_insert(struct file * fp,door_desc_t * dp)1775 door_insert(struct file *fp, door_desc_t *dp)
1776 {
1777 	struct vnode *vp;
1778 	int	fd;
1779 	door_attr_t attributes = DOOR_DESCRIPTOR;
1780 
1781 	ASSERT(MUTEX_NOT_HELD(&door_knob));
1782 	if ((fd = ufalloc(0)) == -1)
1783 		return (-1);
1784 	setf(fd, fp);
1785 	dp->d_data.d_desc.d_descriptor = fd;
1786 
1787 	/* Fill in the attributes */
1788 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
1789 		vp = fp->f_vnode;
1790 	if (vp && vp->v_type == VDOOR) {
1791 		if (VTOD(vp)->door_target == curproc)
1792 			attributes |= DOOR_LOCAL;
1793 		attributes |= VTOD(vp)->door_flags & DOOR_ATTR_MASK;
1794 		dp->d_data.d_desc.d_id = VTOD(vp)->door_index;
1795 	}
1796 	dp->d_attributes = attributes;
1797 	return (0);
1798 }
1799 
1800 /*
1801  * Return an available thread for this server.  A NULL return value indicates
1802  * that either:
1803  *	The door has been revoked, or
1804  *	a signal was received.
1805  * The two conditions can be differentiated using DOOR_INVALID(dp).
1806  */
1807 static kthread_t *
door_get_server(door_node_t * dp)1808 door_get_server(door_node_t *dp)
1809 {
1810 	kthread_t **ktp;
1811 	kthread_t *server_t;
1812 	door_pool_t *pool;
1813 	door_server_t *st;
1814 	int signalled;
1815 
1816 	disp_lock_t *tlp;
1817 	cpu_t *cp;
1818 
1819 	ASSERT(MUTEX_HELD(&door_knob));
1820 
1821 	if (dp->door_flags & DOOR_PRIVATE)
1822 		pool = &dp->door_servers;
1823 	else
1824 		pool = &dp->door_target->p_server_threads;
1825 
1826 	for (;;) {
1827 		/*
1828 		 * We search the thread pool, looking for a server thread
1829 		 * ready to take an invocation (i.e. one which is still
1830 		 * sleeping on a shuttle object).  If none are available,
1831 		 * we sleep on the pool's CV, and will be signaled when a
1832 		 * thread is added to the pool.
1833 		 *
1834 		 * This relies on the fact that once a thread in the thread
1835 		 * pool wakes up, it *must* remove and add itself to the pool
1836 		 * before it can receive door calls.
1837 		 */
1838 		if (DOOR_INVALID(dp))
1839 			return (NULL);	/* Target has become invalid */
1840 
1841 		for (ktp = &pool->dp_threads;
1842 		    (server_t = *ktp) != NULL;
1843 		    ktp = &st->d_servers) {
1844 			st = DOOR_SERVER(server_t->t_door);
1845 
1846 			thread_lock(server_t);
1847 			if (server_t->t_state == TS_SLEEP &&
1848 			    SOBJ_TYPE(server_t->t_sobj_ops) == SOBJ_SHUTTLE)
1849 				break;
1850 			thread_unlock(server_t);
1851 		}
1852 		if (server_t != NULL)
1853 			break;		/* we've got a live one! */
1854 
1855 		if (!cv_wait_sig_swap_core(&pool->dp_cv, &door_knob,
1856 		    &signalled)) {
1857 			/*
1858 			 * If we were signaled and the door is still
1859 			 * valid, pass the signal on to another waiter.
1860 			 */
1861 			if (signalled && !DOOR_INVALID(dp))
1862 				cv_signal(&pool->dp_cv);
1863 			return (NULL);	/* Got a signal */
1864 		}
1865 	}
1866 
1867 	/*
1868 	 * We've got a thread_lock()ed thread which is still on the
1869 	 * shuttle.  Take it off the list of available server threads
1870 	 * and mark it as ONPROC.  We are committed to resuming this
1871 	 * thread now.
1872 	 */
1873 	tlp = server_t->t_lockp;
1874 	cp = CPU;
1875 
1876 	*ktp = st->d_servers;
1877 	st->d_servers = NULL;
1878 	/*
1879 	 * Setting t_disp_queue prevents erroneous preemptions
1880 	 * if this thread is still in execution on another processor
1881 	 */
1882 	server_t->t_disp_queue = cp->cpu_disp;
1883 	CL_ACTIVE(server_t);
1884 	/*
1885 	 * We are calling thread_onproc() instead of
1886 	 * THREAD_ONPROC() because compiler can reorder
1887 	 * the two stores of t_state and t_lockp in
1888 	 * THREAD_ONPROC().
1889 	 */
1890 	thread_onproc(server_t, cp);
1891 	disp_lock_exit(tlp);
1892 	return (server_t);
1893 }
1894 
1895 /*
1896  * Put a server thread back in the pool.
1897  */
1898 static void
door_release_server(door_node_t * dp,kthread_t * t)1899 door_release_server(door_node_t *dp, kthread_t *t)
1900 {
1901 	door_server_t *st = DOOR_SERVER(t->t_door);
1902 	door_pool_t *pool;
1903 
1904 	ASSERT(MUTEX_HELD(&door_knob));
1905 	st->d_active = NULL;
1906 	st->d_caller = NULL;
1907 	st->d_layout_done = 0;
1908 	if (dp && (dp->door_flags & DOOR_PRIVATE)) {
1909 		ASSERT(dp->door_target == NULL ||
1910 		    dp->door_target == ttoproc(t));
1911 		pool = &dp->door_servers;
1912 	} else {
1913 		pool = &ttoproc(t)->p_server_threads;
1914 	}
1915 
1916 	st->d_servers = pool->dp_threads;
1917 	pool->dp_threads = t;
1918 
1919 	/* If someone is waiting for a server thread, wake him up */
1920 	cv_signal(&pool->dp_cv);
1921 }
1922 
1923 /*
1924  * Remove a server thread from the pool if present.
1925  */
1926 static void
door_server_exit(proc_t * p,kthread_t * t)1927 door_server_exit(proc_t *p, kthread_t *t)
1928 {
1929 	door_pool_t *pool;
1930 	kthread_t **next;
1931 	door_server_t *st = DOOR_SERVER(t->t_door);
1932 
1933 	ASSERT(MUTEX_HELD(&door_knob));
1934 	if (st->d_pool != NULL) {
1935 		ASSERT(st->d_pool->door_flags & DOOR_PRIVATE);
1936 		pool = &st->d_pool->door_servers;
1937 	} else {
1938 		pool = &p->p_server_threads;
1939 	}
1940 
1941 	next = &pool->dp_threads;
1942 	while (*next != NULL) {
1943 		if (*next == t) {
1944 			*next = DOOR_SERVER(t->t_door)->d_servers;
1945 			return;
1946 		}
1947 		next = &(DOOR_SERVER((*next)->t_door)->d_servers);
1948 	}
1949 }
1950 
1951 /*
1952  * Lookup the door descriptor. Caller must call releasef when finished
1953  * with associated door.
1954  */
1955 static door_node_t *
door_lookup(int did,file_t ** fpp)1956 door_lookup(int did, file_t **fpp)
1957 {
1958 	vnode_t	*vp;
1959 	file_t *fp;
1960 
1961 	ASSERT(MUTEX_NOT_HELD(&door_knob));
1962 	if ((fp = getf(did)) == NULL)
1963 		return (NULL);
1964 	/*
1965 	 * Use the underlying vnode (we may be namefs mounted)
1966 	 */
1967 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
1968 		vp = fp->f_vnode;
1969 
1970 	if (vp == NULL || vp->v_type != VDOOR) {
1971 		releasef(did);
1972 		return (NULL);
1973 	}
1974 
1975 	if (fpp)
1976 		*fpp = fp;
1977 
1978 	return (VTOD(vp));
1979 }
1980 
1981 /*
1982  * The current thread is exiting, so clean up any pending
1983  * invocation details
1984  */
1985 void
door_slam(void)1986 door_slam(void)
1987 {
1988 	door_node_t *dp;
1989 	door_data_t *dt;
1990 	door_client_t *ct;
1991 	door_server_t *st;
1992 
1993 	/*
1994 	 * If we are an active door server, notify our
1995 	 * client that we are exiting and revoke our door.
1996 	 */
1997 	if ((dt = door_my_data(0)) == NULL)
1998 		return;
1999 	ct = DOOR_CLIENT(dt);
2000 	st = DOOR_SERVER(dt);
2001 
2002 	mutex_enter(&door_knob);
2003 	for (;;) {
2004 		if (DOOR_T_HELD(ct))
2005 			cv_wait(&ct->d_cv, &door_knob);
2006 		else if (DOOR_T_HELD(st))
2007 			cv_wait(&st->d_cv, &door_knob);
2008 		else
2009 			break;			/* neither flag is set */
2010 	}
2011 	curthread->t_door = NULL;
2012 	if ((dp = st->d_active) != NULL) {
2013 		kthread_t *t = st->d_caller;
2014 		proc_t *p = curproc;
2015 
2016 		/* Revoke our door if the process is exiting */
2017 		if (dp->door_target == p && (p->p_flag & SEXITING)) {
2018 			door_list_delete(dp);
2019 			dp->door_target = NULL;
2020 			dp->door_flags |= DOOR_REVOKED;
2021 			if (dp->door_flags & DOOR_PRIVATE)
2022 				cv_broadcast(&dp->door_servers.dp_cv);
2023 			else
2024 				cv_broadcast(&p->p_server_threads.dp_cv);
2025 		}
2026 
2027 		if (t != NULL) {
2028 			/*
2029 			 * Let the caller know we are gone
2030 			 */
2031 			DOOR_CLIENT(t->t_door)->d_error = DOOR_EXIT;
2032 			thread_lock(t);
2033 			if (t->t_state == TS_SLEEP &&
2034 			    SOBJ_TYPE(t->t_sobj_ops) == SOBJ_SHUTTLE)
2035 				setrun_locked(t);
2036 			thread_unlock(t);
2037 		}
2038 	}
2039 	mutex_exit(&door_knob);
2040 	if (st->d_pool)
2041 		door_unbind_thread(st->d_pool);	/* Implicit door_unbind */
2042 	kmem_free(dt, sizeof (door_data_t));
2043 }
2044 
2045 /*
2046  * Set DOOR_REVOKED for all doors of the current process. This is called
2047  * on exit before all lwp's are being terminated so that door calls will
2048  * return with an error.
2049  */
2050 void
door_revoke_all()2051 door_revoke_all()
2052 {
2053 	door_node_t *dp;
2054 	proc_t *p = ttoproc(curthread);
2055 
2056 	mutex_enter(&door_knob);
2057 	for (dp = p->p_door_list; dp != NULL; dp = dp->door_list) {
2058 		ASSERT(dp->door_target == p);
2059 		dp->door_flags |= DOOR_REVOKED;
2060 		if (dp->door_flags & DOOR_PRIVATE)
2061 			cv_broadcast(&dp->door_servers.dp_cv);
2062 	}
2063 	cv_broadcast(&p->p_server_threads.dp_cv);
2064 	mutex_exit(&door_knob);
2065 }
2066 
2067 /*
2068  * The process is exiting, and all doors it created need to be revoked.
2069  */
2070 void
door_exit(void)2071 door_exit(void)
2072 {
2073 	door_node_t *dp;
2074 	proc_t *p = ttoproc(curthread);
2075 
2076 	ASSERT(p->p_lwpcnt == 1);
2077 	/*
2078 	 * Walk the list of active doors created by this process and
2079 	 * revoke them all.
2080 	 */
2081 	mutex_enter(&door_knob);
2082 	for (dp = p->p_door_list; dp != NULL; dp = dp->door_list) {
2083 		dp->door_target = NULL;
2084 		dp->door_flags |= DOOR_REVOKED;
2085 		if (dp->door_flags & DOOR_PRIVATE)
2086 			cv_broadcast(&dp->door_servers.dp_cv);
2087 	}
2088 	cv_broadcast(&p->p_server_threads.dp_cv);
2089 	/* Clear the list */
2090 	p->p_door_list = NULL;
2091 
2092 	/* Clean up the unref list */
2093 	while ((dp = p->p_unref_list) != NULL) {
2094 		p->p_unref_list = dp->door_ulist;
2095 		dp->door_ulist = NULL;
2096 		mutex_exit(&door_knob);
2097 		VN_RELE(DTOV(dp));
2098 		mutex_enter(&door_knob);
2099 	}
2100 	mutex_exit(&door_knob);
2101 }
2102 
2103 
2104 /*
2105  * The process is executing forkall(), and we need to flag threads that
2106  * are bound to a door in the child.  This will make the child threads
2107  * return an error to door_return unless they call door_unbind first.
2108  */
2109 void
door_fork(kthread_t * parent,kthread_t * child)2110 door_fork(kthread_t *parent, kthread_t *child)
2111 {
2112 	door_data_t *pt = parent->t_door;
2113 	door_server_t *st = DOOR_SERVER(pt);
2114 	door_data_t *dt;
2115 
2116 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2117 	if (pt != NULL && (st->d_pool != NULL || st->d_invbound)) {
2118 		/* parent thread is bound to a door */
2119 		dt = child->t_door =
2120 		    kmem_zalloc(sizeof (door_data_t), KM_SLEEP);
2121 		DOOR_SERVER(dt)->d_invbound = 1;
2122 	}
2123 }
2124 
2125 /*
2126  * Deliver queued unrefs to appropriate door server.
2127  */
2128 static int
door_unref(void)2129 door_unref(void)
2130 {
2131 	door_node_t	*dp;
2132 	static door_arg_t unref_args = { DOOR_UNREF_DATA, 0, 0, 0, 0, 0 };
2133 	proc_t *p = ttoproc(curthread);
2134 
2135 	/* make sure there's only one unref thread per process */
2136 	mutex_enter(&door_knob);
2137 	if (p->p_unref_thread) {
2138 		mutex_exit(&door_knob);
2139 		return (set_errno(EALREADY));
2140 	}
2141 	p->p_unref_thread = 1;
2142 	mutex_exit(&door_knob);
2143 
2144 	(void) door_my_data(1);			/* create info, if necessary */
2145 
2146 	for (;;) {
2147 		mutex_enter(&door_knob);
2148 
2149 		/* Grab a queued request */
2150 		while ((dp = p->p_unref_list) == NULL) {
2151 			if (!cv_wait_sig(&p->p_unref_cv, &door_knob)) {
2152 				/*
2153 				 * Interrupted.
2154 				 * Return so we can finish forkall() or exit().
2155 				 */
2156 				p->p_unref_thread = 0;
2157 				mutex_exit(&door_knob);
2158 				return (set_errno(EINTR));
2159 			}
2160 		}
2161 		p->p_unref_list = dp->door_ulist;
2162 		dp->door_ulist = NULL;
2163 		dp->door_flags |= DOOR_UNREF_ACTIVE;
2164 		mutex_exit(&door_knob);
2165 
2166 		(void) door_upcall(DTOV(dp), &unref_args, NULL, SIZE_MAX, 0);
2167 
2168 		if (unref_args.rbuf != 0) {
2169 			kmem_free(unref_args.rbuf, unref_args.rsize);
2170 			unref_args.rbuf = NULL;
2171 			unref_args.rsize = 0;
2172 		}
2173 
2174 		mutex_enter(&door_knob);
2175 		ASSERT(dp->door_flags & DOOR_UNREF_ACTIVE);
2176 		dp->door_flags &= ~DOOR_UNREF_ACTIVE;
2177 		mutex_exit(&door_knob);
2178 		VN_RELE(DTOV(dp));
2179 	}
2180 }
2181 
2182 
2183 /*
2184  * Deliver queued unrefs to kernel door server.
2185  */
2186 /* ARGSUSED */
2187 static void
door_unref_kernel(caddr_t arg)2188 door_unref_kernel(caddr_t arg)
2189 {
2190 	door_node_t	*dp;
2191 	static door_arg_t unref_args = { DOOR_UNREF_DATA, 0, 0, 0, 0, 0 };
2192 	proc_t *p = ttoproc(curthread);
2193 	callb_cpr_t cprinfo;
2194 
2195 	/* should only be one of these */
2196 	mutex_enter(&door_knob);
2197 	if (p->p_unref_thread) {
2198 		mutex_exit(&door_knob);
2199 		return;
2200 	}
2201 	p->p_unref_thread = 1;
2202 	mutex_exit(&door_knob);
2203 
2204 	(void) door_my_data(1);		/* make sure we have a door_data_t */
2205 
2206 	CALLB_CPR_INIT(&cprinfo, &door_knob, callb_generic_cpr, "door_unref");
2207 	for (;;) {
2208 		mutex_enter(&door_knob);
2209 		/* Grab a queued request */
2210 		while ((dp = p->p_unref_list) == NULL) {
2211 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
2212 			cv_wait(&p->p_unref_cv, &door_knob);
2213 			CALLB_CPR_SAFE_END(&cprinfo, &door_knob);
2214 		}
2215 		p->p_unref_list = dp->door_ulist;
2216 		dp->door_ulist = NULL;
2217 		dp->door_flags |= DOOR_UNREF_ACTIVE;
2218 		mutex_exit(&door_knob);
2219 
2220 		(*(dp->door_pc))(dp->door_data, &unref_args, NULL, NULL, NULL);
2221 
2222 		mutex_enter(&door_knob);
2223 		ASSERT(dp->door_flags & DOOR_UNREF_ACTIVE);
2224 		dp->door_flags &= ~DOOR_UNREF_ACTIVE;
2225 		mutex_exit(&door_knob);
2226 		VN_RELE(DTOV(dp));
2227 	}
2228 }
2229 
2230 
2231 /*
2232  * Queue an unref invocation for processing for the current process
2233  * The door may or may not be revoked at this point.
2234  */
2235 void
door_deliver_unref(door_node_t * d)2236 door_deliver_unref(door_node_t *d)
2237 {
2238 	struct proc *server = d->door_target;
2239 
2240 	ASSERT(MUTEX_HELD(&door_knob));
2241 	ASSERT(d->door_active == 0);
2242 
2243 	if (server == NULL)
2244 		return;
2245 	/*
2246 	 * Create a lwp to deliver unref calls if one isn't already running.
2247 	 *
2248 	 * A separate thread is used to deliver unrefs since the current
2249 	 * thread may be holding resources (e.g. locks) in user land that
2250 	 * may be needed by the unref processing. This would cause a
2251 	 * deadlock.
2252 	 */
2253 	if (d->door_flags & DOOR_UNREF_MULTI) {
2254 		/* multiple unrefs */
2255 		d->door_flags &= ~DOOR_DELAY;
2256 	} else {
2257 		/* Only 1 unref per door */
2258 		d->door_flags &= ~(DOOR_UNREF|DOOR_DELAY);
2259 	}
2260 	mutex_exit(&door_knob);
2261 
2262 	/*
2263 	 * Need to bump the vnode count before putting the door on the
2264 	 * list so it doesn't get prematurely released by door_unref.
2265 	 */
2266 	VN_HOLD(DTOV(d));
2267 
2268 	mutex_enter(&door_knob);
2269 	/* is this door already on the unref list? */
2270 	if (d->door_flags & DOOR_UNREF_MULTI) {
2271 		door_node_t *dp;
2272 		for (dp = server->p_unref_list; dp != NULL;
2273 		    dp = dp->door_ulist) {
2274 			if (d == dp) {
2275 				/* already there, don't need to add another */
2276 				mutex_exit(&door_knob);
2277 				VN_RELE(DTOV(d));
2278 				mutex_enter(&door_knob);
2279 				return;
2280 			}
2281 		}
2282 	}
2283 	ASSERT(d->door_ulist == NULL);
2284 	d->door_ulist = server->p_unref_list;
2285 	server->p_unref_list = d;
2286 	cv_broadcast(&server->p_unref_cv);
2287 }
2288 
2289 /*
2290  * The callers buffer isn't big enough for all of the data/fd's. Allocate
2291  * space in the callers address space for the results and copy the data
2292  * there.
2293  *
2294  * For EOVERFLOW, we must clean up the server's door descriptors.
2295  */
2296 static int
door_overflow(kthread_t * caller,caddr_t data_ptr,size_t data_size,door_desc_t * desc_ptr,uint_t desc_num)2297 door_overflow(
2298 	kthread_t	*caller,
2299 	caddr_t		data_ptr,	/* data location */
2300 	size_t		data_size,	/* data size */
2301 	door_desc_t	*desc_ptr,	/* descriptor location */
2302 	uint_t		desc_num)	/* descriptor size */
2303 {
2304 	proc_t *callerp = ttoproc(caller);
2305 	struct as *as = callerp->p_as;
2306 	door_client_t *ct = DOOR_CLIENT(caller->t_door);
2307 	caddr_t	addr;			/* Resulting address in target */
2308 	size_t	rlen;			/* Rounded len */
2309 	size_t	len;
2310 	uint_t	i;
2311 	size_t	ds = desc_num * sizeof (door_desc_t);
2312 
2313 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2314 	ASSERT(DOOR_T_HELD(ct) || ct->d_kernel);
2315 
2316 	/* Do initial overflow check */
2317 	if (!ufcanalloc(callerp, desc_num))
2318 		return (EMFILE);
2319 
2320 	/*
2321 	 * Allocate space for this stuff in the callers address space
2322 	 */
2323 	rlen = roundup(data_size + ds, PAGESIZE);
2324 	as_rangelock(as);
2325 	map_addr_proc(&addr, rlen, 0, 1, as->a_userlimit, ttoproc(caller), 0);
2326 	if (addr == NULL ||
2327 	    as_map(as, addr, rlen, segvn_create, zfod_argsp) != 0) {
2328 		/* No virtual memory available, or anon mapping failed */
2329 		as_rangeunlock(as);
2330 		if (!ct->d_kernel && desc_num > 0) {
2331 			int error = door_release_fds(desc_ptr, desc_num);
2332 			if (error)
2333 				return (error);
2334 		}
2335 		return (EOVERFLOW);
2336 	}
2337 	as_rangeunlock(as);
2338 
2339 	if (ct->d_kernel)
2340 		goto out;
2341 
2342 	if (data_size != 0) {
2343 		caddr_t	src = data_ptr;
2344 		caddr_t saddr = addr;
2345 
2346 		/* Copy any data */
2347 		len = data_size;
2348 		while (len != 0) {
2349 			int	amount;
2350 			int	error;
2351 
2352 			amount = len > PAGESIZE ? PAGESIZE : len;
2353 			if ((error = door_copy(as, src, saddr, amount)) != 0) {
2354 				(void) as_unmap(as, addr, rlen);
2355 				return (error);
2356 			}
2357 			saddr += amount;
2358 			src += amount;
2359 			len -= amount;
2360 		}
2361 	}
2362 	/* Copy any fd's */
2363 	if (desc_num != 0) {
2364 		door_desc_t	*didpp, *start;
2365 		struct file	**fpp;
2366 		int		fpp_size;
2367 
2368 		start = didpp = kmem_alloc(ds, KM_SLEEP);
2369 		if (copyin_nowatch(desc_ptr, didpp, ds)) {
2370 			kmem_free(start, ds);
2371 			(void) as_unmap(as, addr, rlen);
2372 			return (EFAULT);
2373 		}
2374 
2375 		fpp_size = desc_num * sizeof (struct file *);
2376 		if (fpp_size > ct->d_fpp_size) {
2377 			/* make more space */
2378 			if (ct->d_fpp_size)
2379 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2380 			ct->d_fpp_size = fpp_size;
2381 			ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2382 		}
2383 		fpp = ct->d_fpp;
2384 
2385 		for (i = 0; i < desc_num; i++) {
2386 			struct file *fp;
2387 			int fd = didpp->d_data.d_desc.d_descriptor;
2388 
2389 			if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2390 			    (fp = getf(fd)) == NULL) {
2391 				/* close translated references */
2392 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2393 				/* close untranslated references */
2394 				door_fd_rele(didpp, desc_num - i, 0);
2395 				kmem_free(start, ds);
2396 				(void) as_unmap(as, addr, rlen);
2397 				return (EINVAL);
2398 			}
2399 			mutex_enter(&fp->f_tlock);
2400 			fp->f_count++;
2401 			mutex_exit(&fp->f_tlock);
2402 
2403 			*fpp = fp;
2404 			releasef(fd);
2405 
2406 			if (didpp->d_attributes & DOOR_RELEASE) {
2407 				/* release passed reference */
2408 				(void) closeandsetf(fd, NULL);
2409 			}
2410 
2411 			fpp++; didpp++;
2412 		}
2413 		kmem_free(start, ds);
2414 	}
2415 
2416 out:
2417 	ct->d_overflow = 1;
2418 	ct->d_args.rbuf = addr;
2419 	ct->d_args.rsize = rlen;
2420 	return (0);
2421 }
2422 
2423 /*
2424  * Transfer arguments from the client to the server.
2425  */
2426 static int
door_args(kthread_t * server,int is_private)2427 door_args(kthread_t *server, int is_private)
2428 {
2429 	door_server_t *st = DOOR_SERVER(server->t_door);
2430 	door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2431 	uint_t	ndid;
2432 	size_t	dsize;
2433 	int	error;
2434 
2435 	ASSERT(DOOR_T_HELD(st));
2436 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2437 
2438 	ndid = ct->d_args.desc_num;
2439 	if (ndid > door_max_desc)
2440 		return (E2BIG);
2441 
2442 	/*
2443 	 * Get the stack layout, and fail now if it won't fit.
2444 	 */
2445 	error = door_layout(server, ct->d_args.data_size, ndid, is_private);
2446 	if (error != 0)
2447 		return (error);
2448 
2449 	dsize = ndid * sizeof (door_desc_t);
2450 	if (ct->d_args.data_size != 0) {
2451 		if (ct->d_args.data_size <= door_max_arg) {
2452 			/*
2453 			 * Use a 2 copy method for small amounts of data
2454 			 *
2455 			 * Allocate a little more than we need for the
2456 			 * args, in the hope that the results will fit
2457 			 * without having to reallocate a buffer
2458 			 */
2459 			ASSERT(ct->d_buf == NULL);
2460 			ct->d_bufsize = roundup(ct->d_args.data_size,
2461 			    DOOR_ROUND);
2462 			ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2463 			if (copyin_nowatch(ct->d_args.data_ptr,
2464 			    ct->d_buf, ct->d_args.data_size) != 0) {
2465 				kmem_free(ct->d_buf, ct->d_bufsize);
2466 				ct->d_buf = NULL;
2467 				ct->d_bufsize = 0;
2468 				return (EFAULT);
2469 			}
2470 		} else {
2471 			struct as	*as;
2472 			caddr_t		src;
2473 			caddr_t		dest;
2474 			size_t		len = ct->d_args.data_size;
2475 			uintptr_t	base;
2476 
2477 			/*
2478 			 * Use a 1 copy method
2479 			 */
2480 			as = ttoproc(server)->p_as;
2481 			src = ct->d_args.data_ptr;
2482 
2483 			dest = st->d_layout.dl_datap;
2484 			base = (uintptr_t)dest;
2485 
2486 			/*
2487 			 * Copy data directly into server.  We proceed
2488 			 * downward from the top of the stack, to mimic
2489 			 * normal stack usage. This allows the guard page
2490 			 * to stop us before we corrupt anything.
2491 			 */
2492 			while (len != 0) {
2493 				uintptr_t start;
2494 				uintptr_t end;
2495 				uintptr_t offset;
2496 				size_t	amount;
2497 
2498 				/*
2499 				 * Locate the next part to copy.
2500 				 */
2501 				end = base + len;
2502 				start = P2ALIGN(end - 1, PAGESIZE);
2503 
2504 				/*
2505 				 * if we are on the final (first) page, fix
2506 				 * up the start position.
2507 				 */
2508 				if (P2ALIGN(base, PAGESIZE) == start)
2509 					start = base;
2510 
2511 				offset = start - base;	/* the copy offset */
2512 				amount = end - start;	/* # bytes to copy */
2513 
2514 				ASSERT(amount > 0 && amount <= len &&
2515 				    amount <= PAGESIZE);
2516 
2517 				error = door_copy(as, src + offset,
2518 				    dest + offset, amount);
2519 				if (error != 0)
2520 					return (error);
2521 				len -= amount;
2522 			}
2523 		}
2524 	}
2525 	/*
2526 	 * Copyin the door args and translate them into files
2527 	 */
2528 	if (ndid != 0) {
2529 		door_desc_t	*didpp;
2530 		door_desc_t	*start;
2531 		struct file	**fpp;
2532 
2533 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
2534 
2535 		if (copyin_nowatch(ct->d_args.desc_ptr, didpp, dsize)) {
2536 			kmem_free(start, dsize);
2537 			return (EFAULT);
2538 		}
2539 		ct->d_fpp_size = ndid * sizeof (struct file *);
2540 		ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2541 		fpp = ct->d_fpp;
2542 		while (ndid--) {
2543 			struct file *fp;
2544 			int fd = didpp->d_data.d_desc.d_descriptor;
2545 
2546 			/* We only understand file descriptors as passed objs */
2547 			if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2548 			    (fp = getf(fd)) == NULL) {
2549 				/* close translated references */
2550 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2551 				/* close untranslated references */
2552 				door_fd_rele(didpp, ndid + 1, 0);
2553 				kmem_free(start, dsize);
2554 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2555 				ct->d_fpp = NULL;
2556 				ct->d_fpp_size = 0;
2557 				return (EINVAL);
2558 			}
2559 			/* Hold the fp */
2560 			mutex_enter(&fp->f_tlock);
2561 			fp->f_count++;
2562 			mutex_exit(&fp->f_tlock);
2563 
2564 			*fpp = fp;
2565 			releasef(fd);
2566 
2567 			if (didpp->d_attributes & DOOR_RELEASE) {
2568 				/* release passed reference */
2569 				(void) closeandsetf(fd, NULL);
2570 			}
2571 
2572 			fpp++; didpp++;
2573 		}
2574 		kmem_free(start, dsize);
2575 	}
2576 	return (0);
2577 }
2578 
2579 /*
2580  * Transfer arguments from a user client to a kernel server.  This copies in
2581  * descriptors and translates them into door handles.  It doesn't touch the
2582  * other data, letting the kernel server deal with that (to avoid needing
2583  * to copy the data twice).
2584  */
2585 static int
door_translate_in(void)2586 door_translate_in(void)
2587 {
2588 	door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2589 	uint_t	ndid;
2590 
2591 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2592 	ndid = ct->d_args.desc_num;
2593 	if (ndid > door_max_desc)
2594 		return (E2BIG);
2595 	/*
2596 	 * Copyin the door args and translate them into door handles.
2597 	 */
2598 	if (ndid != 0) {
2599 		door_desc_t	*didpp;
2600 		door_desc_t	*start;
2601 		size_t		dsize = ndid * sizeof (door_desc_t);
2602 		struct file	*fp;
2603 
2604 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
2605 
2606 		if (copyin_nowatch(ct->d_args.desc_ptr, didpp, dsize)) {
2607 			kmem_free(start, dsize);
2608 			return (EFAULT);
2609 		}
2610 		while (ndid--) {
2611 			vnode_t	*vp;
2612 			int fd = didpp->d_data.d_desc.d_descriptor;
2613 
2614 			/*
2615 			 * We only understand file descriptors as passed objs
2616 			 */
2617 			if ((didpp->d_attributes & DOOR_DESCRIPTOR) &&
2618 			    (fp = getf(fd)) != NULL) {
2619 				didpp->d_data.d_handle = FTODH(fp);
2620 				/* Hold the door */
2621 				door_ki_hold(didpp->d_data.d_handle);
2622 
2623 				releasef(fd);
2624 
2625 				if (didpp->d_attributes & DOOR_RELEASE) {
2626 					/* release passed reference */
2627 					(void) closeandsetf(fd, NULL);
2628 				}
2629 
2630 				if (VOP_REALVP(fp->f_vnode, &vp, NULL))
2631 					vp = fp->f_vnode;
2632 
2633 				/* Set attributes */
2634 				didpp->d_attributes = DOOR_HANDLE |
2635 				    (VTOD(vp)->door_flags & DOOR_ATTR_MASK);
2636 			} else {
2637 				/* close translated references */
2638 				door_fd_close(start, didpp - start);
2639 				/* close untranslated references */
2640 				door_fd_rele(didpp, ndid + 1, 0);
2641 				kmem_free(start, dsize);
2642 				return (EINVAL);
2643 			}
2644 			didpp++;
2645 		}
2646 		ct->d_args.desc_ptr = start;
2647 	}
2648 	return (0);
2649 }
2650 
2651 /*
2652  * Translate door arguments from kernel to user.  This copies the passed
2653  * door handles.  It doesn't touch other data.  It is used by door_upcall,
2654  * and for data returned by a door_call to a kernel server.
2655  */
2656 static int
door_translate_out(void)2657 door_translate_out(void)
2658 {
2659 	door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2660 	uint_t	ndid;
2661 
2662 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2663 	ndid = ct->d_args.desc_num;
2664 	if (ndid > door_max_desc) {
2665 		door_fd_rele(ct->d_args.desc_ptr, ndid, 1);
2666 		return (E2BIG);
2667 	}
2668 	/*
2669 	 * Translate the door args into files
2670 	 */
2671 	if (ndid != 0) {
2672 		door_desc_t	*didpp = ct->d_args.desc_ptr;
2673 		struct file	**fpp;
2674 
2675 		ct->d_fpp_size = ndid * sizeof (struct file *);
2676 		fpp = ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2677 		while (ndid--) {
2678 			struct file *fp = NULL;
2679 			int fd = -1;
2680 
2681 			/*
2682 			 * We understand file descriptors and door
2683 			 * handles as passed objs.
2684 			 */
2685 			if (didpp->d_attributes & DOOR_DESCRIPTOR) {
2686 				fd = didpp->d_data.d_desc.d_descriptor;
2687 				fp = getf(fd);
2688 			} else if (didpp->d_attributes & DOOR_HANDLE)
2689 				fp = DHTOF(didpp->d_data.d_handle);
2690 			if (fp != NULL) {
2691 				/* Hold the fp */
2692 				mutex_enter(&fp->f_tlock);
2693 				fp->f_count++;
2694 				mutex_exit(&fp->f_tlock);
2695 
2696 				*fpp = fp;
2697 				if (didpp->d_attributes & DOOR_DESCRIPTOR)
2698 					releasef(fd);
2699 				if (didpp->d_attributes & DOOR_RELEASE) {
2700 					/* release passed reference */
2701 					if (fd >= 0)
2702 						(void) closeandsetf(fd, NULL);
2703 					else
2704 						(void) closef(fp);
2705 				}
2706 			} else {
2707 				/* close translated references */
2708 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2709 				/* close untranslated references */
2710 				door_fd_rele(didpp, ndid + 1, 1);
2711 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2712 				ct->d_fpp = NULL;
2713 				ct->d_fpp_size = 0;
2714 				return (EINVAL);
2715 			}
2716 			fpp++; didpp++;
2717 		}
2718 	}
2719 	return (0);
2720 }
2721 
2722 /*
2723  * Move the results from the server to the client
2724  */
2725 static int
door_results(kthread_t * caller,caddr_t data_ptr,size_t data_size,door_desc_t * desc_ptr,uint_t desc_num)2726 door_results(kthread_t *caller, caddr_t data_ptr, size_t data_size,
2727 		door_desc_t *desc_ptr, uint_t desc_num)
2728 {
2729 	door_client_t	*ct = DOOR_CLIENT(caller->t_door);
2730 	door_upcall_t	*dup = ct->d_upcall;
2731 	size_t		dsize;
2732 	size_t		rlen;
2733 	size_t		result_size;
2734 
2735 	ASSERT(DOOR_T_HELD(ct));
2736 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2737 
2738 	if (ct->d_noresults)
2739 		return (E2BIG);		/* No results expected */
2740 
2741 	if (desc_num > door_max_desc)
2742 		return (E2BIG);		/* Too many descriptors */
2743 
2744 	dsize = desc_num * sizeof (door_desc_t);
2745 	/*
2746 	 * Check if the results are bigger than the clients buffer
2747 	 */
2748 	if (dsize)
2749 		rlen = roundup(data_size, sizeof (door_desc_t));
2750 	else
2751 		rlen = data_size;
2752 	if ((result_size = rlen + dsize) == 0)
2753 		return (0);
2754 
2755 	if (dup != NULL) {
2756 		if (desc_num > dup->du_max_descs)
2757 			return (EMFILE);
2758 
2759 		if (data_size > dup->du_max_data)
2760 			return (E2BIG);
2761 
2762 		/*
2763 		 * Handle upcalls
2764 		 */
2765 		if (ct->d_args.rbuf == NULL || ct->d_args.rsize < result_size) {
2766 			/*
2767 			 * If there's no return buffer or the buffer is too
2768 			 * small, allocate a new one.  The old buffer (if it
2769 			 * exists) will be freed by the upcall client.
2770 			 */
2771 			if (result_size > door_max_upcall_reply)
2772 				return (E2BIG);
2773 			ct->d_args.rsize = result_size;
2774 			ct->d_args.rbuf = kmem_alloc(result_size, KM_SLEEP);
2775 		}
2776 		ct->d_args.data_ptr = ct->d_args.rbuf;
2777 		if (data_size != 0 &&
2778 		    copyin_nowatch(data_ptr, ct->d_args.data_ptr,
2779 		    data_size) != 0)
2780 			return (EFAULT);
2781 	} else if (result_size > ct->d_args.rsize) {
2782 		return (door_overflow(caller, data_ptr, data_size,
2783 		    desc_ptr, desc_num));
2784 	} else if (data_size != 0) {
2785 		if (data_size <= door_max_arg) {
2786 			/*
2787 			 * Use a 2 copy method for small amounts of data
2788 			 */
2789 			if (ct->d_buf == NULL) {
2790 				ct->d_bufsize = data_size;
2791 				ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2792 			} else if (ct->d_bufsize < data_size) {
2793 				kmem_free(ct->d_buf, ct->d_bufsize);
2794 				ct->d_bufsize = data_size;
2795 				ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2796 			}
2797 			if (copyin_nowatch(data_ptr, ct->d_buf, data_size) != 0)
2798 				return (EFAULT);
2799 		} else {
2800 			struct as *as = ttoproc(caller)->p_as;
2801 			caddr_t	dest = ct->d_args.rbuf;
2802 			caddr_t	src = data_ptr;
2803 			size_t	len = data_size;
2804 
2805 			/* Copy data directly into client */
2806 			while (len != 0) {
2807 				uint_t	amount;
2808 				uint_t	max;
2809 				uint_t	off;
2810 				int	error;
2811 
2812 				off = (uintptr_t)dest & PAGEOFFSET;
2813 				if (off)
2814 					max = PAGESIZE - off;
2815 				else
2816 					max = PAGESIZE;
2817 				amount = len > max ? max : len;
2818 				error = door_copy(as, src, dest, amount);
2819 				if (error != 0)
2820 					return (error);
2821 				dest += amount;
2822 				src += amount;
2823 				len -= amount;
2824 			}
2825 		}
2826 	}
2827 
2828 	/*
2829 	 * Copyin the returned door ids and translate them into door_node_t
2830 	 */
2831 	if (desc_num != 0) {
2832 		door_desc_t *start;
2833 		door_desc_t *didpp;
2834 		struct file **fpp;
2835 		size_t	fpp_size;
2836 		uint_t	i;
2837 
2838 		/* First, check if we would overflow client */
2839 		if (!ufcanalloc(ttoproc(caller), desc_num))
2840 			return (EMFILE);
2841 
2842 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
2843 		if (copyin_nowatch(desc_ptr, didpp, dsize)) {
2844 			kmem_free(start, dsize);
2845 			return (EFAULT);
2846 		}
2847 		fpp_size = desc_num * sizeof (struct file *);
2848 		if (fpp_size > ct->d_fpp_size) {
2849 			/* make more space */
2850 			if (ct->d_fpp_size)
2851 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2852 			ct->d_fpp_size = fpp_size;
2853 			ct->d_fpp = kmem_alloc(fpp_size, KM_SLEEP);
2854 		}
2855 		fpp = ct->d_fpp;
2856 
2857 		for (i = 0; i < desc_num; i++) {
2858 			struct file *fp;
2859 			int fd = didpp->d_data.d_desc.d_descriptor;
2860 
2861 			/* Only understand file descriptor results */
2862 			if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2863 			    (fp = getf(fd)) == NULL) {
2864 				/* close translated references */
2865 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2866 				/* close untranslated references */
2867 				door_fd_rele(didpp, desc_num - i, 0);
2868 				kmem_free(start, dsize);
2869 				return (EINVAL);
2870 			}
2871 
2872 			mutex_enter(&fp->f_tlock);
2873 			fp->f_count++;
2874 			mutex_exit(&fp->f_tlock);
2875 
2876 			*fpp = fp;
2877 			releasef(fd);
2878 
2879 			if (didpp->d_attributes & DOOR_RELEASE) {
2880 				/* release passed reference */
2881 				(void) closeandsetf(fd, NULL);
2882 			}
2883 
2884 			fpp++; didpp++;
2885 		}
2886 		kmem_free(start, dsize);
2887 	}
2888 	return (0);
2889 }
2890 
2891 /*
2892  * Close all the descriptors.
2893  */
2894 static void
door_fd_close(door_desc_t * d,uint_t n)2895 door_fd_close(door_desc_t *d, uint_t n)
2896 {
2897 	uint_t	i;
2898 
2899 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2900 	for (i = 0; i < n; i++) {
2901 		if (d->d_attributes & DOOR_DESCRIPTOR) {
2902 			(void) closeandsetf(
2903 			    d->d_data.d_desc.d_descriptor, NULL);
2904 		} else if (d->d_attributes & DOOR_HANDLE) {
2905 			door_ki_rele(d->d_data.d_handle);
2906 		}
2907 		d++;
2908 	}
2909 }
2910 
2911 /*
2912  * Close descriptors that have the DOOR_RELEASE attribute set.
2913  */
2914 void
door_fd_rele(door_desc_t * d,uint_t n,int from_kernel)2915 door_fd_rele(door_desc_t *d, uint_t n, int from_kernel)
2916 {
2917 	uint_t	i;
2918 
2919 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2920 	for (i = 0; i < n; i++) {
2921 		if (d->d_attributes & DOOR_RELEASE) {
2922 			if (d->d_attributes & DOOR_DESCRIPTOR) {
2923 				(void) closeandsetf(
2924 				    d->d_data.d_desc.d_descriptor, NULL);
2925 			} else if (from_kernel &&
2926 			    (d->d_attributes & DOOR_HANDLE)) {
2927 				door_ki_rele(d->d_data.d_handle);
2928 			}
2929 		}
2930 		d++;
2931 	}
2932 }
2933 
2934 /*
2935  * Copy descriptors into the kernel so we can release any marked
2936  * DOOR_RELEASE.
2937  */
2938 int
door_release_fds(door_desc_t * desc_ptr,uint_t ndesc)2939 door_release_fds(door_desc_t *desc_ptr, uint_t ndesc)
2940 {
2941 	size_t dsize;
2942 	door_desc_t *didpp;
2943 	uint_t desc_num;
2944 
2945 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2946 	ASSERT(ndesc != 0);
2947 
2948 	desc_num = MIN(ndesc, door_max_desc);
2949 
2950 	dsize = desc_num * sizeof (door_desc_t);
2951 	didpp = kmem_alloc(dsize, KM_SLEEP);
2952 
2953 	while (ndesc > 0) {
2954 		uint_t count = MIN(ndesc, desc_num);
2955 
2956 		if (copyin_nowatch(desc_ptr, didpp,
2957 		    count * sizeof (door_desc_t))) {
2958 			kmem_free(didpp, dsize);
2959 			return (EFAULT);
2960 		}
2961 		door_fd_rele(didpp, count, 0);
2962 
2963 		ndesc -= count;
2964 		desc_ptr += count;
2965 	}
2966 	kmem_free(didpp, dsize);
2967 	return (0);
2968 }
2969 
2970 /*
2971  * Decrement ref count on all the files passed
2972  */
2973 static void
door_fp_close(struct file ** fp,uint_t n)2974 door_fp_close(struct file **fp, uint_t n)
2975 {
2976 	uint_t	i;
2977 
2978 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2979 
2980 	for (i = 0; i < n; i++)
2981 		(void) closef(fp[i]);
2982 }
2983 
2984 /*
2985  * Copy data from 'src' in current address space to 'dest' in 'as' for 'len'
2986  * bytes.
2987  *
2988  * Performs this using 1 mapin and 1 copy operation.
2989  *
2990  * We really should do more than 1 page at a time to improve
2991  * performance, but for now this is treated as an anomalous condition.
2992  */
2993 static int
door_copy(struct as * as,caddr_t src,caddr_t dest,uint_t len)2994 door_copy(struct as *as, caddr_t src, caddr_t dest, uint_t len)
2995 {
2996 	caddr_t	kaddr;
2997 	caddr_t	rdest;
2998 	uint_t	off;
2999 	page_t	**pplist;
3000 	page_t	*pp = NULL;
3001 	int	error = 0;
3002 
3003 	ASSERT(len <= PAGESIZE);
3004 	off = (uintptr_t)dest & PAGEOFFSET;	/* offset within the page */
3005 	rdest = (caddr_t)((uintptr_t)dest &
3006 	    (uintptr_t)PAGEMASK);	/* Page boundary */
3007 	ASSERT(off + len <= PAGESIZE);
3008 
3009 	/*
3010 	 * Lock down destination page.
3011 	 */
3012 	if (as_pagelock(as, &pplist, rdest, PAGESIZE, S_WRITE))
3013 		return (E2BIG);
3014 	/*
3015 	 * Check if we have a shadow page list from as_pagelock. If not,
3016 	 * we took the slow path and have to find our page struct the hard
3017 	 * way.
3018 	 */
3019 	if (pplist == NULL) {
3020 		pfn_t	pfnum;
3021 
3022 		/* MMU mapping is already locked down */
3023 		AS_LOCK_ENTER(as, RW_READER);
3024 		pfnum = hat_getpfnum(as->a_hat, rdest);
3025 		AS_LOCK_EXIT(as);
3026 
3027 		/*
3028 		 * TODO: The pfn step should not be necessary - need
3029 		 * a hat_getpp() function.
3030 		 */
3031 		if (pf_is_memory(pfnum)) {
3032 			pp = page_numtopp_nolock(pfnum);
3033 			ASSERT(pp == NULL || PAGE_LOCKED(pp));
3034 		} else
3035 			pp = NULL;
3036 		if (pp == NULL) {
3037 			as_pageunlock(as, pplist, rdest, PAGESIZE, S_WRITE);
3038 			return (E2BIG);
3039 		}
3040 	} else {
3041 		pp = *pplist;
3042 	}
3043 	/*
3044 	 * Map destination page into kernel address
3045 	 */
3046 	if (kpm_enable)
3047 		kaddr = (caddr_t)hat_kpm_mapin(pp, (struct kpme *)NULL);
3048 	else
3049 		kaddr = (caddr_t)ppmapin(pp, PROT_READ | PROT_WRITE,
3050 		    (caddr_t)-1);
3051 
3052 	/*
3053 	 * Copy from src to dest
3054 	 */
3055 	if (copyin_nowatch(src, kaddr + off, len) != 0)
3056 		error = EFAULT;
3057 	/*
3058 	 * Unmap destination page from kernel
3059 	 */
3060 	if (kpm_enable)
3061 		hat_kpm_mapout(pp, (struct kpme *)NULL, kaddr);
3062 	else
3063 		ppmapout(kaddr);
3064 	/*
3065 	 * Unlock destination page
3066 	 */
3067 	as_pageunlock(as, pplist, rdest, PAGESIZE, S_WRITE);
3068 	return (error);
3069 }
3070 
3071 /*
3072  * General kernel upcall using doors
3073  *	Returns 0 on success, errno for failures.
3074  *	Caller must have a hold on the door based vnode, and on any
3075  *	references passed in desc_ptr.  The references are released
3076  *	in the event of an error, and passed without duplication
3077  *	otherwise.  Note that param->rbuf must be 64-bit aligned in
3078  *	a 64-bit kernel, since it may be used to store door descriptors
3079  *	if they are returned by the server.  The caller is responsible
3080  *	for holding a reference to the cred passed in.
3081  */
3082 int
door_upcall(vnode_t * vp,door_arg_t * param,struct cred * cred,size_t max_data,uint_t max_descs)3083 door_upcall(vnode_t *vp, door_arg_t *param, struct cred *cred,
3084     size_t max_data, uint_t max_descs)
3085 {
3086 	/* Locals */
3087 	door_upcall_t	*dup;
3088 	door_node_t	*dp;
3089 	kthread_t	*server_thread;
3090 	int		error = 0;
3091 	klwp_t		*lwp;
3092 	door_client_t	*ct;		/* curthread door_data */
3093 	door_server_t	*st;		/* server thread door_data */
3094 	int		gotresults = 0;
3095 	int		cancel_pending;
3096 
3097 	if (vp->v_type != VDOOR) {
3098 		if (param->desc_num)
3099 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3100 		return (EINVAL);
3101 	}
3102 
3103 	lwp = ttolwp(curthread);
3104 	ct = door_my_client(1);
3105 	dp = VTOD(vp);	/* Convert to a door_node_t */
3106 
3107 	dup = kmem_zalloc(sizeof (*dup), KM_SLEEP);
3108 	dup->du_cred = (cred != NULL) ? cred : curthread->t_cred;
3109 	dup->du_max_data = max_data;
3110 	dup->du_max_descs = max_descs;
3111 
3112 	/*
3113 	 * This should be done in shuttle_resume(), just before going to
3114 	 * sleep, but we want to avoid overhead while holding door_knob.
3115 	 * prstop() is just a no-op if we don't really go to sleep.
3116 	 * We test not-kernel-address-space for the sake of clustering code.
3117 	 */
3118 	if (lwp && lwp->lwp_nostop == 0 && curproc->p_as != &kas)
3119 		prstop(PR_REQUESTED, 0);
3120 
3121 	mutex_enter(&door_knob);
3122 	if (DOOR_INVALID(dp)) {
3123 		mutex_exit(&door_knob);
3124 		if (param->desc_num)
3125 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3126 		error = EBADF;
3127 		goto out;
3128 	}
3129 
3130 	if (dp->door_target == &p0) {
3131 		/* Can't do an upcall to a kernel server */
3132 		mutex_exit(&door_knob);
3133 		if (param->desc_num)
3134 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3135 		error = EINVAL;
3136 		goto out;
3137 	}
3138 
3139 	error = door_check_limits(dp, param, 1);
3140 	if (error != 0) {
3141 		mutex_exit(&door_knob);
3142 		if (param->desc_num)
3143 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3144 		goto out;
3145 	}
3146 
3147 	/*
3148 	 * Get a server thread from the target domain
3149 	 */
3150 	if ((server_thread = door_get_server(dp)) == NULL) {
3151 		if (DOOR_INVALID(dp))
3152 			error = EBADF;
3153 		else
3154 			error = EAGAIN;
3155 		mutex_exit(&door_knob);
3156 		if (param->desc_num)
3157 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3158 		goto out;
3159 	}
3160 
3161 	st = DOOR_SERVER(server_thread->t_door);
3162 	ct->d_buf = param->data_ptr;
3163 	ct->d_bufsize = param->data_size;
3164 	ct->d_args = *param;	/* structure assignment */
3165 
3166 	if (ct->d_args.desc_num) {
3167 		/*
3168 		 * Move data from client to server
3169 		 */
3170 		DOOR_T_HOLD(st);
3171 		mutex_exit(&door_knob);
3172 		error = door_translate_out();
3173 		mutex_enter(&door_knob);
3174 		DOOR_T_RELEASE(st);
3175 		if (error) {
3176 			/*
3177 			 * We're not going to resume this thread after all
3178 			 */
3179 			door_release_server(dp, server_thread);
3180 			shuttle_sleep(server_thread);
3181 			mutex_exit(&door_knob);
3182 			goto out;
3183 		}
3184 	}
3185 
3186 	ct->d_upcall = dup;
3187 	if (param->rsize == 0)
3188 		ct->d_noresults = 1;
3189 	else
3190 		ct->d_noresults = 0;
3191 
3192 	dp->door_active++;
3193 
3194 	ct->d_error = DOOR_WAIT;
3195 	st->d_caller = curthread;
3196 	st->d_active = dp;
3197 
3198 	shuttle_resume(server_thread, &door_knob);
3199 
3200 	mutex_enter(&door_knob);
3201 shuttle_return:
3202 	if ((error = ct->d_error) < 0) {	/* DOOR_WAIT or DOOR_EXIT */
3203 		/*
3204 		 * Premature wakeup. Find out why (stop, forkall, sig, exit ...)
3205 		 */
3206 		mutex_exit(&door_knob);		/* May block in ISSIG */
3207 		cancel_pending = 0;
3208 		if (lwp && (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort ||
3209 		    MUSTRETURN(curproc, curthread) ||
3210 		    (cancel_pending = schedctl_cancel_pending()) != 0)) {
3211 			/* Signal, forkall, ... */
3212 			if (cancel_pending)
3213 				schedctl_cancel_eintr();
3214 			lwp->lwp_sysabort = 0;
3215 			mutex_enter(&door_knob);
3216 			error = EINTR;
3217 			/*
3218 			 * If the server has finished processing our call,
3219 			 * or exited (calling door_slam()), then d_error
3220 			 * will have changed.  If the server hasn't finished
3221 			 * yet, d_error will still be DOOR_WAIT, and we
3222 			 * let it know we are not interested in any
3223 			 * results by sending a SIGCANCEL, unless the door
3224 			 * is marked with DOOR_NO_CANCEL.
3225 			 */
3226 			if (ct->d_error == DOOR_WAIT &&
3227 			    st->d_caller == curthread) {
3228 				proc_t	*p = ttoproc(server_thread);
3229 
3230 				st->d_active = NULL;
3231 				st->d_caller = NULL;
3232 				if (!(dp->door_flags & DOOR_NO_CANCEL)) {
3233 					DOOR_T_HOLD(st);
3234 					mutex_exit(&door_knob);
3235 
3236 					mutex_enter(&p->p_lock);
3237 					sigtoproc(p, server_thread, SIGCANCEL);
3238 					mutex_exit(&p->p_lock);
3239 
3240 					mutex_enter(&door_knob);
3241 					DOOR_T_RELEASE(st);
3242 				}
3243 			}
3244 		} else {
3245 			/*
3246 			 * Return from stop(), server exit...
3247 			 *
3248 			 * Note that the server could have done a
3249 			 * door_return while the client was in stop state
3250 			 * (ISSIG), in which case the error condition
3251 			 * is updated by the server.
3252 			 */
3253 			mutex_enter(&door_knob);
3254 			if (ct->d_error == DOOR_WAIT) {
3255 				/* Still waiting for a reply */
3256 				shuttle_swtch(&door_knob);
3257 				mutex_enter(&door_knob);
3258 				if (lwp)
3259 					lwp->lwp_asleep = 0;
3260 				goto	shuttle_return;
3261 			} else if (ct->d_error == DOOR_EXIT) {
3262 				/* Server exit */
3263 				error = EINTR;
3264 			} else {
3265 				/* Server did a door_return during ISSIG */
3266 				error = ct->d_error;
3267 			}
3268 		}
3269 		/*
3270 		 * Can't exit if the server is currently copying
3271 		 * results for me
3272 		 */
3273 		while (DOOR_T_HELD(ct))
3274 			cv_wait(&ct->d_cv, &door_knob);
3275 
3276 		/*
3277 		 * Find out if results were successfully copied.
3278 		 */
3279 		if (ct->d_error == 0)
3280 			gotresults = 1;
3281 	}
3282 	if (lwp) {
3283 		lwp->lwp_asleep = 0;		/* /proc */
3284 		lwp->lwp_sysabort = 0;		/* /proc */
3285 	}
3286 	if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
3287 		door_deliver_unref(dp);
3288 	mutex_exit(&door_knob);
3289 
3290 	/*
3291 	 * Translate returned doors (if any)
3292 	 */
3293 
3294 	if (ct->d_noresults)
3295 		goto out;
3296 
3297 	if (error) {
3298 		/*
3299 		 * If server returned results successfully, then we've
3300 		 * been interrupted and may need to clean up.
3301 		 */
3302 		if (gotresults) {
3303 			ASSERT(error == EINTR);
3304 			door_fp_close(ct->d_fpp, ct->d_args.desc_num);
3305 		}
3306 		goto out;
3307 	}
3308 
3309 	if (ct->d_args.desc_num) {
3310 		struct file	**fpp;
3311 		door_desc_t	*didpp;
3312 		vnode_t		*vp;
3313 		uint_t		n = ct->d_args.desc_num;
3314 
3315 		didpp = ct->d_args.desc_ptr = (door_desc_t *)(ct->d_args.rbuf +
3316 		    roundup(ct->d_args.data_size, sizeof (door_desc_t)));
3317 		fpp = ct->d_fpp;
3318 
3319 		while (n--) {
3320 			struct file *fp;
3321 
3322 			fp = *fpp;
3323 			if (VOP_REALVP(fp->f_vnode, &vp, NULL))
3324 				vp = fp->f_vnode;
3325 
3326 			didpp->d_attributes = DOOR_HANDLE |
3327 			    (VTOD(vp)->door_flags & DOOR_ATTR_MASK);
3328 			didpp->d_data.d_handle = FTODH(fp);
3329 
3330 			fpp++; didpp++;
3331 		}
3332 	}
3333 
3334 	/* on return data is in rbuf */
3335 	*param = ct->d_args;		/* structure assignment */
3336 
3337 out:
3338 	kmem_free(dup, sizeof (*dup));
3339 
3340 	if (ct->d_fpp) {
3341 		kmem_free(ct->d_fpp, ct->d_fpp_size);
3342 		ct->d_fpp = NULL;
3343 		ct->d_fpp_size = 0;
3344 	}
3345 
3346 	ct->d_upcall = NULL;
3347 	ct->d_noresults = 0;
3348 	ct->d_buf = NULL;
3349 	ct->d_bufsize = 0;
3350 	return (error);
3351 }
3352 
3353 /*
3354  * Add a door to the per-process list of active doors for which the
3355  * process is a server.
3356  */
3357 static void
door_list_insert(door_node_t * dp)3358 door_list_insert(door_node_t *dp)
3359 {
3360 	proc_t *p = dp->door_target;
3361 
3362 	ASSERT(MUTEX_HELD(&door_knob));
3363 	dp->door_list = p->p_door_list;
3364 	p->p_door_list = dp;
3365 }
3366 
3367 /*
3368  * Remove a door from the per-process list of active doors.
3369  */
3370 void
door_list_delete(door_node_t * dp)3371 door_list_delete(door_node_t *dp)
3372 {
3373 	door_node_t **pp;
3374 
3375 	ASSERT(MUTEX_HELD(&door_knob));
3376 	/*
3377 	 * Find the door in the list.  If the door belongs to another process,
3378 	 * it's OK to use p_door_list since that process can't exit until all
3379 	 * doors have been taken off the list (see door_exit).
3380 	 */
3381 	pp = &(dp->door_target->p_door_list);
3382 	while (*pp != dp)
3383 		pp = &((*pp)->door_list);
3384 
3385 	/* found it, take it off the list */
3386 	*pp = dp->door_list;
3387 }
3388 
3389 
3390 /*
3391  * External kernel interfaces for doors.  These functions are available
3392  * outside the doorfs module for use in creating and using doors from
3393  * within the kernel.
3394  */
3395 
3396 /*
3397  * door_ki_upcall invokes a user-level door server from the kernel, with
3398  * the credentials associated with curthread.
3399  */
3400 int
door_ki_upcall(door_handle_t dh,door_arg_t * param)3401 door_ki_upcall(door_handle_t dh, door_arg_t *param)
3402 {
3403 	return (door_ki_upcall_limited(dh, param, NULL, SIZE_MAX, UINT_MAX));
3404 }
3405 
3406 /*
3407  * door_ki_upcall_limited invokes a user-level door server from the
3408  * kernel with the given credentials and reply limits.  If the "cred"
3409  * argument is NULL, uses the credentials associated with current
3410  * thread.  max_data limits the maximum length of the returned data (the
3411  * client will get E2BIG if they go over), and max_desc limits the
3412  * number of returned descriptors (the client will get EMFILE if they
3413  * go over).
3414  */
3415 int
door_ki_upcall_limited(door_handle_t dh,door_arg_t * param,struct cred * cred,size_t max_data,uint_t max_desc)3416 door_ki_upcall_limited(door_handle_t dh, door_arg_t *param, struct cred *cred,
3417     size_t max_data, uint_t max_desc)
3418 {
3419 	file_t *fp = DHTOF(dh);
3420 	vnode_t *realvp;
3421 
3422 	if (VOP_REALVP(fp->f_vnode, &realvp, NULL))
3423 		realvp = fp->f_vnode;
3424 	return (door_upcall(realvp, param, cred, max_data, max_desc));
3425 }
3426 
3427 /*
3428  * Function call to create a "kernel" door server.  A kernel door
3429  * server provides a way for a user-level process to invoke a function
3430  * in the kernel through a door_call.  From the caller's point of
3431  * view, a kernel door server looks the same as a user-level one
3432  * (except the server pid is 0).  Unlike normal door calls, the
3433  * kernel door function is invoked via a normal function call in the
3434  * same thread and context as the caller.
3435  */
3436 int
door_ki_create(void (* pc_cookie)(),void * data_cookie,uint_t attributes,door_handle_t * dhp)3437 door_ki_create(void (*pc_cookie)(), void *data_cookie, uint_t attributes,
3438     door_handle_t *dhp)
3439 {
3440 	int err;
3441 	file_t *fp;
3442 
3443 	/* no DOOR_PRIVATE */
3444 	if ((attributes & ~DOOR_KI_CREATE_MASK) ||
3445 	    (attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) ==
3446 	    (DOOR_UNREF | DOOR_UNREF_MULTI))
3447 		return (EINVAL);
3448 
3449 	err = door_create_common(pc_cookie, data_cookie, attributes,
3450 	    1, NULL, &fp);
3451 	if (err == 0 && (attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) &&
3452 	    p0.p_unref_thread == 0) {
3453 		/* need to create unref thread for process 0 */
3454 		(void) thread_create(NULL, 0, door_unref_kernel, NULL, 0, &p0,
3455 		    TS_RUN, minclsyspri);
3456 	}
3457 	if (err == 0) {
3458 		*dhp = FTODH(fp);
3459 	}
3460 	return (err);
3461 }
3462 
3463 void
door_ki_hold(door_handle_t dh)3464 door_ki_hold(door_handle_t dh)
3465 {
3466 	file_t *fp = DHTOF(dh);
3467 
3468 	mutex_enter(&fp->f_tlock);
3469 	fp->f_count++;
3470 	mutex_exit(&fp->f_tlock);
3471 }
3472 
3473 void
door_ki_rele(door_handle_t dh)3474 door_ki_rele(door_handle_t dh)
3475 {
3476 	file_t *fp = DHTOF(dh);
3477 
3478 	(void) closef(fp);
3479 }
3480 
3481 int
door_ki_open(char * pathname,door_handle_t * dhp)3482 door_ki_open(char *pathname, door_handle_t *dhp)
3483 {
3484 	file_t *fp;
3485 	vnode_t *vp;
3486 	int err;
3487 
3488 	if ((err = lookupname(pathname, UIO_SYSSPACE, FOLLOW, NULL, &vp)) != 0)
3489 		return (err);
3490 	if (err = VOP_OPEN(&vp, FREAD, kcred, NULL)) {
3491 		VN_RELE(vp);
3492 		return (err);
3493 	}
3494 	if (vp->v_type != VDOOR) {
3495 		VN_RELE(vp);
3496 		return (EINVAL);
3497 	}
3498 	if ((err = falloc(vp, FREAD | FWRITE, &fp, NULL)) != 0) {
3499 		VN_RELE(vp);
3500 		return (err);
3501 	}
3502 	/* falloc returns with f_tlock held on success */
3503 	mutex_exit(&fp->f_tlock);
3504 	*dhp = FTODH(fp);
3505 	return (0);
3506 }
3507 
3508 int
door_ki_info(door_handle_t dh,struct door_info * dip)3509 door_ki_info(door_handle_t dh, struct door_info *dip)
3510 {
3511 	file_t *fp = DHTOF(dh);
3512 	vnode_t *vp;
3513 
3514 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
3515 		vp = fp->f_vnode;
3516 	if (vp->v_type != VDOOR)
3517 		return (EINVAL);
3518 	door_info_common(VTOD(vp), dip, fp);
3519 	return (0);
3520 }
3521 
3522 door_handle_t
door_ki_lookup(int did)3523 door_ki_lookup(int did)
3524 {
3525 	file_t *fp;
3526 	door_handle_t dh;
3527 
3528 	/* is the descriptor really a door? */
3529 	if (door_lookup(did, &fp) == NULL)
3530 		return (NULL);
3531 	/* got the door, put a hold on it and release the fd */
3532 	dh = FTODH(fp);
3533 	door_ki_hold(dh);
3534 	releasef(did);
3535 	return (dh);
3536 }
3537 
3538 int
door_ki_setparam(door_handle_t dh,int type,size_t val)3539 door_ki_setparam(door_handle_t dh, int type, size_t val)
3540 {
3541 	file_t *fp = DHTOF(dh);
3542 	vnode_t *vp;
3543 
3544 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
3545 		vp = fp->f_vnode;
3546 	if (vp->v_type != VDOOR)
3547 		return (EINVAL);
3548 	return (door_setparam_common(VTOD(vp), 1, type, val));
3549 }
3550 
3551 int
door_ki_getparam(door_handle_t dh,int type,size_t * out)3552 door_ki_getparam(door_handle_t dh, int type, size_t *out)
3553 {
3554 	file_t *fp = DHTOF(dh);
3555 	vnode_t *vp;
3556 
3557 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
3558 		vp = fp->f_vnode;
3559 	if (vp->v_type != VDOOR)
3560 		return (EINVAL);
3561 	return (door_getparam_common(VTOD(vp), type, out));
3562 }
3563