xref: /illumos-gate/usr/src/uts/common/fs/doorfs/door_sys.c (revision 0250c53ad267726f2438e3c6556199a0bbf588a2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2016 by Delphix. All rights reserved.
25  * Copyright 2021 Tintri by DDN, Inc. All rights reserved.
26  */
27 
28 /*
29  * System call I/F to doors (outside of vnodes I/F) and misc support
30  * routines
31  */
32 #include <sys/types.h>
33 #include <sys/systm.h>
34 #include <sys/door.h>
35 #include <sys/door_data.h>
36 #include <sys/proc.h>
37 #include <sys/thread.h>
38 #include <sys/prsystm.h>
39 #include <sys/procfs.h>
40 #include <sys/class.h>
41 #include <sys/cred.h>
42 #include <sys/kmem.h>
43 #include <sys/cmn_err.h>
44 #include <sys/stack.h>
45 #include <sys/debug.h>
46 #include <sys/cpuvar.h>
47 #include <sys/file.h>
48 #include <sys/fcntl.h>
49 #include <sys/vnode.h>
50 #include <sys/vfs.h>
51 #include <sys/vfs_opreg.h>
52 #include <sys/sobject.h>
53 #include <sys/schedctl.h>
54 #include <sys/callb.h>
55 #include <sys/ucred.h>
56 
57 #include <sys/mman.h>
58 #include <sys/sysmacros.h>
59 #include <sys/vmsystm.h>
60 #include <vm/as.h>
61 #include <vm/hat.h>
62 #include <vm/page.h>
63 #include <vm/seg.h>
64 #include <vm/seg_vn.h>
65 #include <vm/seg_vn.h>
66 #include <vm/seg_kpm.h>
67 
68 #include <sys/modctl.h>
69 #include <sys/syscall.h>
70 #include <sys/pathname.h>
71 #include <sys/rctl.h>
72 
73 /*
74  * The maximum amount of data (in bytes) that will be transferred using
75  * an intermediate kernel buffer.  For sizes greater than this we map
76  * in the destination pages and perform a 1-copy transfer.
77  */
78 size_t	door_max_arg = 16 * 1024;
79 
80 /*
81  * Maximum amount of data that will be transferred in a reply to a
82  * door_upcall.  Need to guard against a process returning huge amounts
83  * of data and getting the kernel stuck in kmem_alloc.
84  */
85 size_t	door_max_upcall_reply = 4 * 1024 * 1024;
86 
87 /*
88  * Maximum number of descriptors allowed to be passed in a single
89  * door_call or door_return.  We need to allocate kernel memory
90  * for all of them at once, so we can't let it scale without limit.
91  */
92 uint_t door_max_desc = 1024;
93 
94 /*
95  * Definition of a door handle, used by other kernel subsystems when
96  * calling door functions.  This is really a file structure but we
97  * want to hide that fact.
98  */
99 struct __door_handle {
100 	file_t dh_file;
101 };
102 
103 #define	DHTOF(dh) ((file_t *)(dh))
104 #define	FTODH(fp) ((door_handle_t)(fp))
105 
106 static int doorfs(long, long, long, long, long, long);
107 
108 static struct sysent door_sysent = {
109 	6,
110 	SE_ARGC | SE_NOUNLOAD,
111 	(int (*)())doorfs,
112 };
113 
114 static struct modlsys modlsys = {
115 	&mod_syscallops, "doors", &door_sysent
116 };
117 
118 #ifdef _SYSCALL32_IMPL
119 
120 static int
121 doorfs32(int32_t arg1, int32_t arg2, int32_t arg3, int32_t arg4,
122     int32_t arg5, int32_t subcode);
123 
124 static struct sysent door_sysent32 = {
125 	6,
126 	SE_ARGC | SE_NOUNLOAD,
127 	(int (*)())doorfs32,
128 };
129 
130 static struct modlsys modlsys32 = {
131 	&mod_syscallops32,
132 	"32-bit door syscalls",
133 	&door_sysent32
134 };
135 #endif
136 
137 static struct modlinkage modlinkage = {
138 	MODREV_1,
139 	&modlsys,
140 #ifdef _SYSCALL32_IMPL
141 	&modlsys32,
142 #endif
143 	NULL
144 };
145 
146 dev_t	doordev;
147 
148 extern	struct vfs door_vfs;
149 extern	struct vnodeops *door_vnodeops;
150 
151 int
_init(void)152 _init(void)
153 {
154 	static const fs_operation_def_t door_vfsops_template[] = {
155 		NULL, NULL
156 	};
157 	extern const fs_operation_def_t door_vnodeops_template[];
158 	vfsops_t *door_vfsops;
159 	major_t major;
160 	int error;
161 
162 	mutex_init(&door_knob, NULL, MUTEX_DEFAULT, NULL);
163 	if ((major = getudev()) == (major_t)-1)
164 		return (ENXIO);
165 	doordev = makedevice(major, 0);
166 
167 	/* Create a dummy vfs */
168 	error = vfs_makefsops(door_vfsops_template, &door_vfsops);
169 	if (error != 0) {
170 		cmn_err(CE_WARN, "door init: bad vfs ops");
171 		return (error);
172 	}
173 	VFS_INIT(&door_vfs, door_vfsops, NULL);
174 	door_vfs.vfs_flag = VFS_RDONLY;
175 	door_vfs.vfs_dev = doordev;
176 	vfs_make_fsid(&(door_vfs.vfs_fsid), doordev, 0);
177 
178 	error = vn_make_ops("doorfs", door_vnodeops_template, &door_vnodeops);
179 	if (error != 0) {
180 		vfs_freevfsops(door_vfsops);
181 		cmn_err(CE_WARN, "door init: bad vnode ops");
182 		return (error);
183 	}
184 	return (mod_install(&modlinkage));
185 }
186 
187 int
_info(struct modinfo * modinfop)188 _info(struct modinfo *modinfop)
189 {
190 	return (mod_info(&modlinkage, modinfop));
191 }
192 
193 /* system call functions */
194 static int door_call(int, void *);
195 static int door_return(caddr_t, size_t, door_desc_t *, uint_t, caddr_t, size_t);
196 static int door_create(void (*pc_cookie)(void *, char *, size_t, door_desc_t *,
197     uint_t), void *data_cookie, uint_t);
198 static int door_revoke(int);
199 static int door_info(int, struct door_info *);
200 static int door_ucred(struct ucred_s *);
201 static int door_bind(int);
202 static int door_unbind(void);
203 static int door_unref(void);
204 static int door_getparam(int, int, size_t *);
205 static int door_setparam(int, int, size_t);
206 
207 #define	DOOR_RETURN_OLD	4		/* historic value, for s10 */
208 
209 /*
210  * System call wrapper for all door related system calls
211  */
212 static int
doorfs(long arg1,long arg2,long arg3,long arg4,long arg5,long subcode)213 doorfs(long arg1, long arg2, long arg3, long arg4, long arg5, long subcode)
214 {
215 	switch (subcode) {
216 	case DOOR_CALL:
217 		return (door_call(arg1, (void *)arg2));
218 	case DOOR_RETURN: {
219 		door_return_desc_t *drdp = (door_return_desc_t *)arg3;
220 
221 		if (drdp != NULL) {
222 			door_return_desc_t drd;
223 			if (copyin(drdp, &drd, sizeof (drd)))
224 				return (EFAULT);
225 			return (door_return((caddr_t)arg1, arg2, drd.desc_ptr,
226 			    drd.desc_num, (caddr_t)arg4, arg5));
227 		}
228 		return (door_return((caddr_t)arg1, arg2, NULL,
229 		    0, (caddr_t)arg4, arg5));
230 	}
231 	case DOOR_RETURN_OLD:
232 		/*
233 		 * In order to support the S10 runtime environment, we
234 		 * still respond to the old syscall subcode for door_return.
235 		 * We treat it as having no stack limits.  This code should
236 		 * be removed when such support is no longer needed.
237 		 */
238 		return (door_return((caddr_t)arg1, arg2, (door_desc_t *)arg3,
239 		    arg4, (caddr_t)arg5, 0));
240 	case DOOR_CREATE:
241 		return (door_create((void (*)())arg1, (void *)arg2, arg3));
242 	case DOOR_REVOKE:
243 		return (door_revoke(arg1));
244 	case DOOR_INFO:
245 		return (door_info(arg1, (struct door_info *)arg2));
246 	case DOOR_BIND:
247 		return (door_bind(arg1));
248 	case DOOR_UNBIND:
249 		return (door_unbind());
250 	case DOOR_UNREFSYS:
251 		return (door_unref());
252 	case DOOR_UCRED:
253 		return (door_ucred((struct ucred_s *)arg1));
254 	case DOOR_GETPARAM:
255 		return (door_getparam(arg1, arg2, (size_t *)arg3));
256 	case DOOR_SETPARAM:
257 		return (door_setparam(arg1, arg2, arg3));
258 	default:
259 		return (set_errno(EINVAL));
260 	}
261 }
262 
263 #ifdef _SYSCALL32_IMPL
264 /*
265  * System call wrapper for all door related system calls from 32-bit programs.
266  * Needed at the moment because of the casts - they undo some damage
267  * that truss causes (sign-extending the stack pointer) when truss'ing
268  * a 32-bit program using doors.
269  */
270 static int
doorfs32(int32_t arg1,int32_t arg2,int32_t arg3,int32_t arg4,int32_t arg5,int32_t subcode)271 doorfs32(int32_t arg1, int32_t arg2, int32_t arg3,
272     int32_t arg4, int32_t arg5, int32_t subcode)
273 {
274 	switch (subcode) {
275 	case DOOR_CALL:
276 		return (door_call(arg1, (void *)(uintptr_t)(caddr32_t)arg2));
277 	case DOOR_RETURN: {
278 		door_return_desc32_t *drdp =
279 		    (door_return_desc32_t *)(uintptr_t)(caddr32_t)arg3;
280 		if (drdp != NULL) {
281 			door_return_desc32_t drd;
282 			if (copyin(drdp, &drd, sizeof (drd)))
283 				return (EFAULT);
284 			return (door_return(
285 			    (caddr_t)(uintptr_t)(caddr32_t)arg1, arg2,
286 			    (door_desc_t *)(uintptr_t)drd.desc_ptr,
287 			    drd.desc_num, (caddr_t)(uintptr_t)(caddr32_t)arg4,
288 			    (size_t)(uintptr_t)(size32_t)arg5));
289 		}
290 		return (door_return((caddr_t)(uintptr_t)(caddr32_t)arg1,
291 		    arg2, NULL, 0, (caddr_t)(uintptr_t)(caddr32_t)arg4,
292 		    (size_t)(uintptr_t)(size32_t)arg5));
293 	}
294 	case DOOR_RETURN_OLD:
295 		/*
296 		 * In order to support the S10 runtime environment, we
297 		 * still respond to the old syscall subcode for door_return.
298 		 * We treat it as having no stack limits.  This code should
299 		 * be removed when such support is no longer needed.
300 		 */
301 		return (door_return((caddr_t)(uintptr_t)(caddr32_t)arg1, arg2,
302 		    (door_desc_t *)(uintptr_t)(caddr32_t)arg3, arg4,
303 		    (caddr_t)(uintptr_t)(caddr32_t)arg5, 0));
304 	case DOOR_CREATE:
305 		return (door_create((void (*)())(uintptr_t)(caddr32_t)arg1,
306 		    (void *)(uintptr_t)(caddr32_t)arg2, arg3));
307 	case DOOR_REVOKE:
308 		return (door_revoke(arg1));
309 	case DOOR_INFO:
310 		return (door_info(arg1,
311 		    (struct door_info *)(uintptr_t)(caddr32_t)arg2));
312 	case DOOR_BIND:
313 		return (door_bind(arg1));
314 	case DOOR_UNBIND:
315 		return (door_unbind());
316 	case DOOR_UNREFSYS:
317 		return (door_unref());
318 	case DOOR_UCRED:
319 		return (door_ucred(
320 		    (struct ucred_s *)(uintptr_t)(caddr32_t)arg1));
321 	case DOOR_GETPARAM:
322 		return (door_getparam(arg1, arg2,
323 		    (size_t *)(uintptr_t)(caddr32_t)arg3));
324 	case DOOR_SETPARAM:
325 		return (door_setparam(arg1, arg2, (size_t)(size32_t)arg3));
326 
327 	default:
328 		return (set_errno(EINVAL));
329 	}
330 }
331 #endif
332 
333 void shuttle_resume(kthread_t *, kmutex_t *);
334 void shuttle_swtch(kmutex_t *);
335 void shuttle_sleep(kthread_t *);
336 
337 /*
338  * Support routines
339  */
340 static int door_create_common(void (*)(), void *, uint_t, int, int *,
341     file_t **);
342 static int door_overflow(kthread_t *, caddr_t, size_t, door_desc_t *, uint_t);
343 static int door_args(kthread_t *, int);
344 static int door_results(kthread_t *, caddr_t, size_t, door_desc_t *, uint_t);
345 static int door_copy(struct as *, caddr_t, caddr_t, uint_t);
346 static void	door_server_exit(proc_t *, kthread_t *);
347 static void	door_release_server(door_node_t *, kthread_t *);
348 static kthread_t	*door_get_server(door_node_t *);
349 static door_node_t	*door_lookup(int, file_t **);
350 static int	door_translate_in(void);
351 static int	door_translate_out(void);
352 static void	door_fd_rele(door_desc_t *, uint_t, int);
353 static void	door_list_insert(door_node_t *);
354 static void	door_info_common(door_node_t *, door_info_t *, file_t *);
355 static int	door_release_fds(door_desc_t *, uint_t);
356 static void	door_fd_close(door_desc_t *, uint_t);
357 static void	door_fp_close(struct file **, uint_t);
358 
359 static door_data_t *
door_my_data(int create_if_missing)360 door_my_data(int create_if_missing)
361 {
362 	door_data_t *ddp;
363 
364 	ddp = curthread->t_door;
365 	if (create_if_missing && ddp == NULL)
366 		ddp = curthread->t_door = kmem_zalloc(sizeof (*ddp), KM_SLEEP);
367 
368 	return (ddp);
369 }
370 
371 static door_server_t *
door_my_server(int create_if_missing)372 door_my_server(int create_if_missing)
373 {
374 	door_data_t *ddp = door_my_data(create_if_missing);
375 
376 	return ((ddp != NULL)? DOOR_SERVER(ddp) : NULL);
377 }
378 
379 static door_client_t *
door_my_client(int create_if_missing)380 door_my_client(int create_if_missing)
381 {
382 	door_data_t *ddp = door_my_data(create_if_missing);
383 
384 	return ((ddp != NULL)? DOOR_CLIENT(ddp) : NULL);
385 }
386 
387 /*
388  * System call to create a door
389  */
390 int
door_create(void (* pc_cookie)(),void * data_cookie,uint_t attributes)391 door_create(void (*pc_cookie)(), void *data_cookie, uint_t attributes)
392 {
393 	int fd;
394 	int err;
395 
396 	if ((attributes & ~DOOR_CREATE_MASK) ||
397 	    ((attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) ==
398 	    (DOOR_UNREF | DOOR_UNREF_MULTI)))
399 		return (set_errno(EINVAL));
400 
401 	if ((err = door_create_common(pc_cookie, data_cookie, attributes, 0,
402 	    &fd, NULL)) != 0)
403 		return (set_errno(err));
404 
405 	f_setfd_or(fd, FD_CLOEXEC);
406 	return (fd);
407 }
408 
409 /*
410  * Common code for creating user and kernel doors.  If a door was
411  * created, stores a file structure pointer in the location pointed
412  * to by fpp (if fpp is non-NULL) and returns 0.  Also, if a non-NULL
413  * pointer to a file descriptor is passed in as fdp, allocates a file
414  * descriptor representing the door.  If a door could not be created,
415  * returns an error.
416  */
417 static int
door_create_common(void (* pc_cookie)(),void * data_cookie,uint_t attributes,int from_kernel,int * fdp,file_t ** fpp)418 door_create_common(void (*pc_cookie)(), void *data_cookie, uint_t attributes,
419     int from_kernel, int *fdp, file_t **fpp)
420 {
421 	door_node_t	*dp;
422 	vnode_t		*vp;
423 	struct file	*fp;
424 	static door_id_t index = 0;
425 	proc_t		*p = (from_kernel)? &p0 : curproc;
426 
427 	dp = kmem_zalloc(sizeof (door_node_t), KM_SLEEP);
428 
429 	dp->door_vnode = vn_alloc(KM_SLEEP);
430 	dp->door_target = p;
431 	dp->door_data = data_cookie;
432 	dp->door_pc = pc_cookie;
433 	dp->door_flags = attributes;
434 #ifdef _SYSCALL32_IMPL
435 	if (!from_kernel && get_udatamodel() != DATAMODEL_NATIVE)
436 		dp->door_data_max = UINT32_MAX;
437 	else
438 #endif
439 		dp->door_data_max = SIZE_MAX;
440 	dp->door_data_min = 0UL;
441 	dp->door_desc_max = (attributes & DOOR_REFUSE_DESC)? 0 : INT_MAX;
442 
443 	vp = DTOV(dp);
444 	vn_setops(vp, door_vnodeops);
445 	vp->v_type = VDOOR;
446 	vp->v_vfsp = &door_vfs;
447 	vp->v_data = (caddr_t)dp;
448 	mutex_enter(&door_knob);
449 	dp->door_index = index++;
450 	/* add to per-process door list */
451 	door_list_insert(dp);
452 	mutex_exit(&door_knob);
453 
454 	if (falloc(vp, FREAD | FWRITE, &fp, fdp)) {
455 		/*
456 		 * If the file table is full, remove the door from the
457 		 * per-process list, free the door, and return NULL.
458 		 */
459 		mutex_enter(&door_knob);
460 		door_list_delete(dp);
461 		mutex_exit(&door_knob);
462 		vn_free(vp);
463 		kmem_free(dp, sizeof (door_node_t));
464 		return (EMFILE);
465 	}
466 	vn_exists(vp);
467 	if (fdp != NULL)
468 		setf(*fdp, fp);
469 	mutex_exit(&fp->f_tlock);
470 
471 	if (fpp != NULL)
472 		*fpp = fp;
473 	return (0);
474 }
475 
476 static int
door_check_limits(door_node_t * dp,door_arg_t * da,int upcall)477 door_check_limits(door_node_t *dp, door_arg_t *da, int upcall)
478 {
479 	ASSERT(MUTEX_HELD(&door_knob));
480 
481 	/* we allow unref upcalls through, despite any minimum */
482 	if (da->data_size < dp->door_data_min &&
483 	    !(upcall && da->data_ptr == DOOR_UNREF_DATA))
484 		return (ENOBUFS);
485 
486 	if (da->data_size > dp->door_data_max)
487 		return (ENOBUFS);
488 
489 	if (da->desc_num > 0 && (dp->door_flags & DOOR_REFUSE_DESC))
490 		return (ENOTSUP);
491 
492 	if (da->desc_num > dp->door_desc_max)
493 		return (ENFILE);
494 
495 	return (0);
496 }
497 
498 /*
499  * Door invocation.
500  */
501 int
door_call(int did,void * args)502 door_call(int did, void *args)
503 {
504 	/* Locals */
505 	door_node_t	*dp;
506 	kthread_t	*server_thread;
507 	int		error = 0;
508 	klwp_t		*lwp;
509 	door_client_t	*ct;		/* curthread door_data */
510 	door_server_t	*st;		/* server thread door_data */
511 	door_desc_t	*start = NULL;
512 	uint_t		ncopied = 0;
513 	size_t		dsize;
514 	/* destructor for data returned by a kernel server */
515 	void		(*destfn)() = NULL;
516 	void		*destarg;
517 	model_t		datamodel;
518 	int		gotresults = 0;
519 	int		needcleanup = 0;
520 	int		cancel_pending;
521 
522 	lwp = ttolwp(curthread);
523 	datamodel = lwp_getdatamodel(lwp);
524 
525 	ct = door_my_client(1);
526 
527 	/*
528 	 * Get the arguments
529 	 */
530 	if (args) {
531 		if (datamodel == DATAMODEL_NATIVE) {
532 			if (copyin(args, &ct->d_args, sizeof (door_arg_t)) != 0)
533 				return (set_errno(EFAULT));
534 		} else {
535 			door_arg32_t    da32;
536 
537 			if (copyin(args, &da32, sizeof (door_arg32_t)) != 0)
538 				return (set_errno(EFAULT));
539 			ct->d_args.data_ptr =
540 			    (char *)(uintptr_t)da32.data_ptr;
541 			ct->d_args.data_size = da32.data_size;
542 			ct->d_args.desc_ptr =
543 			    (door_desc_t *)(uintptr_t)da32.desc_ptr;
544 			ct->d_args.desc_num = da32.desc_num;
545 			ct->d_args.rbuf =
546 			    (char *)(uintptr_t)da32.rbuf;
547 			ct->d_args.rsize = da32.rsize;
548 		}
549 	} else {
550 		/* No arguments, and no results allowed */
551 		ct->d_noresults = 1;
552 		ct->d_args.data_size = 0;
553 		ct->d_args.desc_num = 0;
554 		ct->d_args.rsize = 0;
555 	}
556 
557 	if ((dp = door_lookup(did, NULL)) == NULL)
558 		return (set_errno(EBADF));
559 
560 	/*
561 	 * We don't want to hold the door FD over the entire operation;
562 	 * instead, we put a hold on the door vnode and release the FD
563 	 * immediately
564 	 */
565 	VN_HOLD(DTOV(dp));
566 	releasef(did);
567 
568 	/*
569 	 * This should be done in shuttle_resume(), just before going to
570 	 * sleep, but we want to avoid overhead while holding door_knob.
571 	 * prstop() is just a no-op if we don't really go to sleep.
572 	 * We test not-kernel-address-space for the sake of clustering code.
573 	 */
574 	if (lwp && lwp->lwp_nostop == 0 && curproc->p_as != &kas)
575 		prstop(PR_REQUESTED, 0);
576 
577 	mutex_enter(&door_knob);
578 	if (DOOR_INVALID(dp)) {
579 		mutex_exit(&door_knob);
580 		error = EBADF;
581 		goto out;
582 	}
583 
584 	/*
585 	 * before we do anything, check that we are not overflowing the
586 	 * required limits.
587 	 */
588 	error = door_check_limits(dp, &ct->d_args, 0);
589 	if (error != 0) {
590 		mutex_exit(&door_knob);
591 		goto out;
592 	}
593 
594 	/*
595 	 * Check for in-kernel door server.
596 	 */
597 	if (dp->door_target == &p0) {
598 		caddr_t rbuf = ct->d_args.rbuf;
599 		size_t rsize = ct->d_args.rsize;
600 
601 		dp->door_active++;
602 		ct->d_kernel = 1;
603 		ct->d_error = DOOR_WAIT;
604 		mutex_exit(&door_knob);
605 		/* translate file descriptors to vnodes */
606 		if (ct->d_args.desc_num) {
607 			error = door_translate_in();
608 			if (error)
609 				goto out;
610 		}
611 		/*
612 		 * Call kernel door server.  Arguments are passed and
613 		 * returned as a door_arg pointer.  When called, data_ptr
614 		 * points to user data and desc_ptr points to a kernel list
615 		 * of door descriptors that have been converted to file
616 		 * structure pointers.  It's the server function's
617 		 * responsibility to copyin the data pointed to by data_ptr
618 		 * (this avoids extra copying in some cases).  On return,
619 		 * data_ptr points to a user buffer of data, and desc_ptr
620 		 * points to a kernel list of door descriptors representing
621 		 * files.  When a reference is passed to a kernel server,
622 		 * it is the server's responsibility to release the reference
623 		 * (by calling closef).  When the server includes a
624 		 * reference in its reply, it is released as part of the
625 		 * the call (the server must duplicate the reference if
626 		 * it wants to retain a copy).  The destfn, if set to
627 		 * non-NULL, is a destructor to be called when the returned
628 		 * kernel data (if any) is no longer needed (has all been
629 		 * translated and copied to user level).
630 		 */
631 		(*(dp->door_pc))(dp->door_data, &ct->d_args,
632 		    &destfn, &destarg, &error);
633 		mutex_enter(&door_knob);
634 		/* not implemented yet */
635 		if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
636 			door_deliver_unref(dp);
637 		mutex_exit(&door_knob);
638 		if (error)
639 			goto out;
640 
641 		/* translate vnodes to files */
642 		if (ct->d_args.desc_num) {
643 			error = door_translate_out();
644 			if (error)
645 				goto out;
646 		}
647 		ct->d_buf = ct->d_args.rbuf;
648 		ct->d_bufsize = ct->d_args.rsize;
649 		if (rsize < (ct->d_args.data_size +
650 		    (ct->d_args.desc_num * sizeof (door_desc_t)))) {
651 			/* handle overflow */
652 			error = door_overflow(curthread, ct->d_args.data_ptr,
653 			    ct->d_args.data_size, ct->d_args.desc_ptr,
654 			    ct->d_args.desc_num);
655 			if (error)
656 				goto out;
657 			/* door_overflow sets d_args rbuf and rsize */
658 		} else {
659 			ct->d_args.rbuf = rbuf;
660 			ct->d_args.rsize = rsize;
661 		}
662 		goto results;
663 	}
664 
665 	/*
666 	 * Get a server thread from the target domain
667 	 */
668 	if ((server_thread = door_get_server(dp)) == NULL) {
669 		if (DOOR_INVALID(dp))
670 			error = EBADF;
671 		else
672 			error = EAGAIN;
673 		mutex_exit(&door_knob);
674 		goto out;
675 	}
676 
677 	st = DOOR_SERVER(server_thread->t_door);
678 	if (ct->d_args.desc_num || ct->d_args.data_size) {
679 		int is_private = (dp->door_flags & DOOR_PRIVATE);
680 		/*
681 		 * Move data from client to server
682 		 */
683 		DOOR_T_HOLD(st);
684 		mutex_exit(&door_knob);
685 		error = door_args(server_thread, is_private);
686 		mutex_enter(&door_knob);
687 		DOOR_T_RELEASE(st);
688 		if (error) {
689 			/*
690 			 * We're not going to resume this thread after all
691 			 */
692 			door_release_server(dp, server_thread);
693 			shuttle_sleep(server_thread);
694 			mutex_exit(&door_knob);
695 			goto out;
696 		}
697 	}
698 
699 	dp->door_active++;
700 	ct->d_error = DOOR_WAIT;
701 	ct->d_args_done = 0;
702 	st->d_caller = curthread;
703 	st->d_active = dp;
704 
705 	shuttle_resume(server_thread, &door_knob);
706 
707 	mutex_enter(&door_knob);
708 shuttle_return:
709 	if ((error = ct->d_error) < 0) {	/* DOOR_WAIT or DOOR_EXIT */
710 		/*
711 		 * Premature wakeup. Find out why (stop, forkall, sig, exit ...)
712 		 */
713 		mutex_exit(&door_knob);		/* May block in ISSIG */
714 		cancel_pending = 0;
715 		if (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort ||
716 		    MUSTRETURN(curproc, curthread) ||
717 		    (cancel_pending = schedctl_cancel_pending()) != 0) {
718 			/* Signal, forkall, ... */
719 			lwp->lwp_sysabort = 0;
720 			if (cancel_pending)
721 				schedctl_cancel_eintr();
722 			mutex_enter(&door_knob);
723 			error = EINTR;
724 			/*
725 			 * If the server has finished processing our call,
726 			 * or exited (calling door_slam()), then d_error
727 			 * will have changed.  If the server hasn't finished
728 			 * yet, d_error will still be DOOR_WAIT, and we
729 			 * let it know we are not interested in any
730 			 * results by sending a SIGCANCEL, unless the door
731 			 * is marked with DOOR_NO_CANCEL.
732 			 */
733 			if (ct->d_error == DOOR_WAIT &&
734 			    st->d_caller == curthread) {
735 				proc_t	*p = ttoproc(server_thread);
736 
737 				st->d_active = NULL;
738 				st->d_caller = NULL;
739 
740 				if (!(dp->door_flags & DOOR_NO_CANCEL)) {
741 					DOOR_T_HOLD(st);
742 					mutex_exit(&door_knob);
743 
744 					mutex_enter(&p->p_lock);
745 					sigtoproc(p, server_thread, SIGCANCEL);
746 					mutex_exit(&p->p_lock);
747 
748 					mutex_enter(&door_knob);
749 					DOOR_T_RELEASE(st);
750 				}
751 			}
752 		} else {
753 			/*
754 			 * Return from stop(), server exit...
755 			 *
756 			 * Note that the server could have done a
757 			 * door_return while the client was in stop state
758 			 * (ISSIG), in which case the error condition
759 			 * is updated by the server.
760 			 */
761 			mutex_enter(&door_knob);
762 			if (ct->d_error == DOOR_WAIT) {
763 				/* Still waiting for a reply */
764 				shuttle_swtch(&door_knob);
765 				mutex_enter(&door_knob);
766 				lwp->lwp_asleep = 0;
767 				goto	shuttle_return;
768 			} else if (ct->d_error == DOOR_EXIT) {
769 				/* Server exit */
770 				error = EINTR;
771 			} else {
772 				/* Server did a door_return during ISSIG */
773 				error = ct->d_error;
774 			}
775 		}
776 		/*
777 		 * Can't exit if the server is currently copying
778 		 * results for me.
779 		 */
780 		while (DOOR_T_HELD(ct))
781 			cv_wait(&ct->d_cv, &door_knob);
782 
783 		/*
784 		 * If the server has not processed our message, free the
785 		 * descriptors.
786 		 */
787 		if (!ct->d_args_done) {
788 			needcleanup = 1;
789 			ct->d_args_done = 1;
790 		}
791 
792 		/*
793 		 * Find out if results were successfully copied.
794 		 */
795 		if (ct->d_error == 0)
796 			gotresults = 1;
797 	}
798 	ASSERT(ct->d_args_done);
799 	lwp->lwp_asleep = 0;		/* /proc */
800 	lwp->lwp_sysabort = 0;		/* /proc */
801 	if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
802 		door_deliver_unref(dp);
803 	mutex_exit(&door_knob);
804 
805 	if (needcleanup)
806 		door_fp_close(ct->d_fpp, ct->d_args.desc_num);
807 
808 results:
809 	/*
810 	 * Move the results to userland (if any)
811 	 */
812 
813 	if (ct->d_noresults)
814 		goto out;
815 
816 	if (error) {
817 		/*
818 		 * If server returned results successfully, then we've
819 		 * been interrupted and may need to clean up.
820 		 */
821 		if (gotresults) {
822 			ASSERT(error == EINTR);
823 			door_fp_close(ct->d_fpp, ct->d_args.desc_num);
824 		}
825 		goto out;
826 	}
827 
828 	/*
829 	 * Copy back data if we haven't caused an overflow (already
830 	 * handled) and we are using a 2 copy transfer, or we are
831 	 * returning data from a kernel server.
832 	 */
833 	if (ct->d_args.data_size) {
834 		ct->d_args.data_ptr = ct->d_args.rbuf;
835 		if (ct->d_kernel || (!ct->d_overflow &&
836 		    ct->d_args.data_size <= door_max_arg)) {
837 			if (copyout_nowatch(ct->d_buf, ct->d_args.rbuf,
838 			    ct->d_args.data_size)) {
839 				door_fp_close(ct->d_fpp, ct->d_args.desc_num);
840 				error = EFAULT;
841 				goto out;
842 			}
843 		}
844 	}
845 
846 	/*
847 	 * stuff returned doors into our proc, copyout the descriptors
848 	 */
849 	if (ct->d_args.desc_num) {
850 		struct file	**fpp;
851 		door_desc_t	*didpp;
852 		uint_t		n = ct->d_args.desc_num;
853 
854 		dsize = n * sizeof (door_desc_t);
855 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
856 		fpp = ct->d_fpp;
857 
858 		while (n--) {
859 			if (door_insert(*fpp, didpp) == -1) {
860 				/* Close remaining files */
861 				door_fp_close(fpp, n + 1);
862 				error = EMFILE;
863 				goto out;
864 			}
865 			fpp++; didpp++; ncopied++;
866 		}
867 
868 		ct->d_args.desc_ptr = (door_desc_t *)(ct->d_args.rbuf +
869 		    roundup(ct->d_args.data_size, sizeof (door_desc_t)));
870 
871 		if (copyout_nowatch(start, ct->d_args.desc_ptr, dsize)) {
872 			error = EFAULT;
873 			goto out;
874 		}
875 	}
876 
877 	/*
878 	 * Return the results
879 	 */
880 	if (datamodel == DATAMODEL_NATIVE) {
881 		if (copyout_nowatch(&ct->d_args, args,
882 		    sizeof (door_arg_t)) != 0)
883 			error = EFAULT;
884 	} else {
885 		door_arg32_t    da32;
886 
887 		da32.data_ptr = (caddr32_t)(uintptr_t)ct->d_args.data_ptr;
888 		da32.data_size = ct->d_args.data_size;
889 		da32.desc_ptr = (caddr32_t)(uintptr_t)ct->d_args.desc_ptr;
890 		da32.desc_num = ct->d_args.desc_num;
891 		da32.rbuf = (caddr32_t)(uintptr_t)ct->d_args.rbuf;
892 		da32.rsize = ct->d_args.rsize;
893 		if (copyout_nowatch(&da32, args, sizeof (door_arg32_t)) != 0) {
894 			error = EFAULT;
895 		}
896 	}
897 
898 out:
899 	ct->d_noresults = 0;
900 
901 	/* clean up the overflow buffer if an error occurred */
902 	if (error != 0 && ct->d_overflow) {
903 		(void) as_unmap(curproc->p_as, ct->d_args.rbuf,
904 		    ct->d_args.rsize);
905 	}
906 	ct->d_overflow = 0;
907 
908 	/* call destructor */
909 	if (destfn) {
910 		ASSERT(ct->d_kernel);
911 		(*destfn)(dp->door_data, destarg);
912 		ct->d_buf = NULL;
913 		ct->d_bufsize = 0;
914 	}
915 
916 	if (dp)
917 		VN_RELE(DTOV(dp));
918 
919 	if (ct->d_buf) {
920 		ASSERT(!ct->d_kernel);
921 		kmem_free(ct->d_buf, ct->d_bufsize);
922 		ct->d_buf = NULL;
923 		ct->d_bufsize = 0;
924 	}
925 	ct->d_kernel = 0;
926 
927 	/* clean up the descriptor copyout buffer */
928 	if (start != NULL) {
929 		if (error != 0)
930 			door_fd_close(start, ncopied);
931 		kmem_free(start, dsize);
932 	}
933 
934 	if (ct->d_fpp) {
935 		kmem_free(ct->d_fpp, ct->d_fpp_size);
936 		ct->d_fpp = NULL;
937 		ct->d_fpp_size = 0;
938 	}
939 
940 	if (error)
941 		return (set_errno(error));
942 
943 	return (0);
944 }
945 
946 static int
door_setparam_common(door_node_t * dp,int from_kernel,int type,size_t val)947 door_setparam_common(door_node_t *dp, int from_kernel, int type, size_t val)
948 {
949 	int error = 0;
950 
951 	mutex_enter(&door_knob);
952 
953 	if (DOOR_INVALID(dp)) {
954 		mutex_exit(&door_knob);
955 		return (EBADF);
956 	}
957 
958 	/*
959 	 * door_ki_setparam() can only affect kernel doors.
960 	 * door_setparam() can only affect doors attached to the current
961 	 * process.
962 	 */
963 	if ((from_kernel && dp->door_target != &p0) ||
964 	    (!from_kernel && dp->door_target != curproc)) {
965 		mutex_exit(&door_knob);
966 		return (EPERM);
967 	}
968 
969 	switch (type) {
970 	case DOOR_PARAM_DESC_MAX:
971 		if (val > INT_MAX)
972 			error = ERANGE;
973 		else if ((dp->door_flags & DOOR_REFUSE_DESC) && val != 0)
974 			error = ENOTSUP;
975 		else
976 			dp->door_desc_max = (uint_t)val;
977 		break;
978 
979 	case DOOR_PARAM_DATA_MIN:
980 		if (val > dp->door_data_max)
981 			error = EINVAL;
982 		else
983 			dp->door_data_min = val;
984 		break;
985 
986 	case DOOR_PARAM_DATA_MAX:
987 		if (val < dp->door_data_min)
988 			error = EINVAL;
989 		else
990 			dp->door_data_max = val;
991 		break;
992 
993 	default:
994 		error = EINVAL;
995 		break;
996 	}
997 
998 	mutex_exit(&door_knob);
999 	return (error);
1000 }
1001 
1002 static int
door_getparam_common(door_node_t * dp,int type,size_t * out)1003 door_getparam_common(door_node_t *dp, int type, size_t *out)
1004 {
1005 	int error = 0;
1006 
1007 	mutex_enter(&door_knob);
1008 	switch (type) {
1009 	case DOOR_PARAM_DESC_MAX:
1010 		*out = (size_t)dp->door_desc_max;
1011 		break;
1012 	case DOOR_PARAM_DATA_MIN:
1013 		*out = dp->door_data_min;
1014 		break;
1015 	case DOOR_PARAM_DATA_MAX:
1016 		*out = dp->door_data_max;
1017 		break;
1018 	default:
1019 		error = EINVAL;
1020 		break;
1021 	}
1022 	mutex_exit(&door_knob);
1023 	return (error);
1024 }
1025 
1026 int
door_setparam(int did,int type,size_t val)1027 door_setparam(int did, int type, size_t val)
1028 {
1029 	door_node_t *dp;
1030 	int error = 0;
1031 
1032 	if ((dp = door_lookup(did, NULL)) == NULL)
1033 		return (set_errno(EBADF));
1034 
1035 	error = door_setparam_common(dp, 0, type, val);
1036 
1037 	releasef(did);
1038 
1039 	if (error)
1040 		return (set_errno(error));
1041 
1042 	return (0);
1043 }
1044 
1045 int
door_getparam(int did,int type,size_t * out)1046 door_getparam(int did, int type, size_t *out)
1047 {
1048 	door_node_t *dp;
1049 	size_t val = 0;
1050 	int error = 0;
1051 
1052 	if ((dp = door_lookup(did, NULL)) == NULL)
1053 		return (set_errno(EBADF));
1054 
1055 	error = door_getparam_common(dp, type, &val);
1056 
1057 	releasef(did);
1058 
1059 	if (error)
1060 		return (set_errno(error));
1061 
1062 	if (get_udatamodel() == DATAMODEL_NATIVE) {
1063 		if (copyout(&val, out, sizeof (val)))
1064 			return (set_errno(EFAULT));
1065 #ifdef _SYSCALL32_IMPL
1066 	} else {
1067 		size32_t val32 = (size32_t)val;
1068 
1069 		if (val != val32)
1070 			return (set_errno(EOVERFLOW));
1071 
1072 		if (copyout(&val32, out, sizeof (val32)))
1073 			return (set_errno(EFAULT));
1074 #endif /* _SYSCALL32_IMPL */
1075 	}
1076 
1077 	return (0);
1078 }
1079 
1080 /*
1081  * A copyout() which proceeds from high addresses to low addresses.  This way,
1082  * stack guard pages are effective.
1083  *
1084  * Note that we use copyout_nowatch();  this is called while the client is
1085  * held.
1086  */
1087 static int
door_stack_copyout(const void * kaddr,void * uaddr,size_t count)1088 door_stack_copyout(const void *kaddr, void *uaddr, size_t count)
1089 {
1090 	const char *kbase = (const char *)kaddr;
1091 	uintptr_t ubase = (uintptr_t)uaddr;
1092 	size_t pgsize = PAGESIZE;
1093 
1094 	if (count <= pgsize)
1095 		return (copyout_nowatch(kaddr, uaddr, count));
1096 
1097 	while (count > 0) {
1098 		uintptr_t start, end, offset, amount;
1099 
1100 		end = ubase + count;
1101 		start = P2ALIGN(end - 1, pgsize);
1102 		if (P2ALIGN(ubase, pgsize) == start)
1103 			start = ubase;
1104 
1105 		offset = start - ubase;
1106 		amount = end - start;
1107 
1108 		ASSERT(amount > 0 && amount <= count && amount <= pgsize);
1109 
1110 		if (copyout_nowatch(kbase + offset, (void *)start, amount))
1111 			return (1);
1112 		count -= amount;
1113 	}
1114 	return (0);
1115 }
1116 
1117 /*
1118  * The IA32 ABI supplement 1.0 changed the required stack alignment to
1119  * 16 bytes (from 4 bytes), so that code can make use of SSE instructions.
1120  * This is already done for process entry, thread entry, and makecontext();
1121  * We need to do this for door_return as well. The stack will be aligned to
1122  * whatever the door_results is aligned.
1123  * See: usr/src/lib/libc/i386/gen/makectxt.c for more details.
1124  */
1125 #if defined(__amd64)
1126 #undef STACK_ALIGN32
1127 #define	STACK_ALIGN32 16
1128 #endif
1129 
1130 /*
1131  * Writes the stack layout for door_return() into the door_server_t of the
1132  * server thread.
1133  */
1134 static int
door_layout(kthread_t * tp,size_t data_size,uint_t ndesc,int info_needed)1135 door_layout(kthread_t *tp, size_t data_size, uint_t ndesc, int info_needed)
1136 {
1137 	door_server_t *st = DOOR_SERVER(tp->t_door);
1138 	door_layout_t *out = &st->d_layout;
1139 	uintptr_t base_sp = (uintptr_t)st->d_sp;
1140 	size_t ssize = st->d_ssize;
1141 	size_t descsz;
1142 	uintptr_t descp, datap, infop, resultsp, finalsp;
1143 	size_t align = STACK_ALIGN;
1144 	size_t results_sz = sizeof (struct door_results);
1145 	model_t datamodel = lwp_getdatamodel(ttolwp(tp));
1146 
1147 	ASSERT(!st->d_layout_done);
1148 
1149 #ifndef _STACK_GROWS_DOWNWARD
1150 #error stack does not grow downward, door_layout() must change
1151 #endif
1152 
1153 #ifdef _SYSCALL32_IMPL
1154 	if (datamodel != DATAMODEL_NATIVE) {
1155 		align = STACK_ALIGN32;
1156 		results_sz = sizeof (struct door_results32);
1157 	}
1158 #endif
1159 
1160 	descsz = ndesc * sizeof (door_desc_t);
1161 
1162 	/*
1163 	 * To speed up the overflow checking, we do an initial check
1164 	 * that the passed in data size won't cause us to wrap past
1165 	 * base_sp.  Since door_max_desc limits descsz, we can
1166 	 * safely use it here.  65535 is an arbitrary 'bigger than
1167 	 * we need, small enough to not cause trouble' constant;
1168 	 * the only constraint is that it must be > than:
1169 	 *
1170 	 *	5 * STACK_ALIGN +
1171 	 *	    sizeof (door_info_t) +
1172 	 *	    sizeof (door_results_t) +
1173 	 *	    (max adjustment from door_final_sp())
1174 	 *
1175 	 * After we compute the layout, we can safely do a "did we wrap
1176 	 * around" check, followed by a check against the recorded
1177 	 * stack size.
1178 	 */
1179 	if (data_size >= SIZE_MAX - (size_t)65535UL - descsz)
1180 		return (E2BIG);		/* overflow */
1181 
1182 	descp = P2ALIGN(base_sp - descsz, align);
1183 	datap = P2ALIGN(descp - data_size, align);
1184 
1185 	if (info_needed)
1186 		infop = P2ALIGN(datap - sizeof (door_info_t), align);
1187 	else
1188 		infop = datap;
1189 
1190 	resultsp = P2ALIGN(infop - results_sz, align);
1191 	finalsp = door_final_sp(resultsp, align, datamodel);
1192 
1193 	if (finalsp > base_sp)
1194 		return (E2BIG);		/* overflow */
1195 
1196 	if (ssize != 0 && (base_sp - finalsp) > ssize)
1197 		return (E2BIG);		/* doesn't fit in stack */
1198 
1199 	out->dl_descp = (ndesc != 0)? (caddr_t)descp : 0;
1200 	out->dl_datap = (data_size != 0)? (caddr_t)datap : 0;
1201 	out->dl_infop = info_needed? (caddr_t)infop : 0;
1202 	out->dl_resultsp = (caddr_t)resultsp;
1203 	out->dl_sp = (caddr_t)finalsp;
1204 
1205 	st->d_layout_done = 1;
1206 	return (0);
1207 }
1208 
1209 static int
door_server_dispatch(door_client_t * ct,door_node_t * dp)1210 door_server_dispatch(door_client_t *ct, door_node_t *dp)
1211 {
1212 	door_server_t *st = DOOR_SERVER(curthread->t_door);
1213 	door_layout_t *layout = &st->d_layout;
1214 	int error = 0;
1215 
1216 	int is_private = (dp->door_flags & DOOR_PRIVATE);
1217 
1218 	door_pool_t *pool = (is_private)? &dp->door_servers :
1219 	    &curproc->p_server_threads;
1220 
1221 	int empty_pool = (pool->dp_threads == NULL);
1222 
1223 	caddr_t infop = NULL;
1224 	char *datap = NULL;
1225 	size_t datasize = 0;
1226 	size_t descsize;
1227 
1228 	file_t **fpp = ct->d_fpp;
1229 	door_desc_t *start = NULL;
1230 	uint_t ndesc = 0;
1231 	uint_t ncopied = 0;
1232 
1233 	if (ct != NULL) {
1234 		datap = ct->d_args.data_ptr;
1235 		datasize = ct->d_args.data_size;
1236 		ndesc = ct->d_args.desc_num;
1237 	}
1238 
1239 	descsize = ndesc * sizeof (door_desc_t);
1240 
1241 	/*
1242 	 * Reset datap to NULL if we aren't passing any data.  Be careful
1243 	 * to let unref notifications through, though.
1244 	 */
1245 	if (datap == DOOR_UNREF_DATA) {
1246 		if (ct->d_upcall != NULL)
1247 			datasize = 0;
1248 		else
1249 			datap = NULL;
1250 	} else if (datasize == 0) {
1251 		datap = NULL;
1252 	}
1253 
1254 	/*
1255 	 * Get the stack layout, if it hasn't already been done.
1256 	 */
1257 	if (!st->d_layout_done) {
1258 		error = door_layout(curthread, datasize, ndesc,
1259 		    (is_private && empty_pool));
1260 		if (error != 0)
1261 			goto fail;
1262 	}
1263 
1264 	/*
1265 	 * fill out the stack, starting from the top.  Layout was already
1266 	 * filled in by door_args() or door_translate_out().
1267 	 */
1268 	if (layout->dl_descp != NULL) {
1269 		ASSERT(ndesc != 0);
1270 		start = kmem_alloc(descsize, KM_SLEEP);
1271 
1272 		while (ndesc > 0) {
1273 			if (door_insert(*fpp, &start[ncopied]) == -1) {
1274 				error = EMFILE;
1275 				goto fail;
1276 			}
1277 			ndesc--;
1278 			ncopied++;
1279 			fpp++;
1280 		}
1281 		if (door_stack_copyout(start, layout->dl_descp, descsize)) {
1282 			error = E2BIG;
1283 			goto fail;
1284 		}
1285 	}
1286 	fpp = NULL;			/* finished processing */
1287 
1288 	if (layout->dl_datap != NULL) {
1289 		ASSERT(datasize != 0);
1290 		datap = layout->dl_datap;
1291 		if (ct->d_upcall != NULL || datasize <= door_max_arg) {
1292 			if (door_stack_copyout(ct->d_buf, datap, datasize)) {
1293 				error = E2BIG;
1294 				goto fail;
1295 			}
1296 		}
1297 	}
1298 
1299 	if (is_private && empty_pool) {
1300 		door_info_t di;
1301 
1302 		infop = layout->dl_infop;
1303 		ASSERT(infop != NULL);
1304 
1305 		di.di_target = curproc->p_pid;
1306 		di.di_proc = (door_ptr_t)(uintptr_t)dp->door_pc;
1307 		di.di_data = (door_ptr_t)(uintptr_t)dp->door_data;
1308 		di.di_uniquifier = dp->door_index;
1309 		di.di_attributes = (dp->door_flags & DOOR_ATTR_MASK) |
1310 		    DOOR_LOCAL;
1311 
1312 		if (door_stack_copyout(&di, infop, sizeof (di))) {
1313 			error = E2BIG;
1314 			goto fail;
1315 		}
1316 	}
1317 
1318 	if (get_udatamodel() == DATAMODEL_NATIVE) {
1319 		struct door_results dr;
1320 
1321 		dr.cookie = dp->door_data;
1322 		dr.data_ptr = datap;
1323 		dr.data_size = datasize;
1324 		dr.desc_ptr = (door_desc_t *)layout->dl_descp;
1325 		dr.desc_num = ncopied;
1326 		dr.pc = dp->door_pc;
1327 		dr.nservers = !empty_pool;
1328 		dr.door_info = (door_info_t *)infop;
1329 
1330 		if (door_stack_copyout(&dr, layout->dl_resultsp, sizeof (dr))) {
1331 			error = E2BIG;
1332 			goto fail;
1333 		}
1334 #ifdef _SYSCALL32_IMPL
1335 	} else {
1336 		struct door_results32 dr32;
1337 
1338 		dr32.cookie = (caddr32_t)(uintptr_t)dp->door_data;
1339 		dr32.data_ptr = (caddr32_t)(uintptr_t)datap;
1340 		dr32.data_size = (size32_t)datasize;
1341 		dr32.desc_ptr = (caddr32_t)(uintptr_t)layout->dl_descp;
1342 		dr32.desc_num = ncopied;
1343 		dr32.pc = (caddr32_t)(uintptr_t)dp->door_pc;
1344 		dr32.nservers = !empty_pool;
1345 		dr32.door_info = (caddr32_t)(uintptr_t)infop;
1346 
1347 		if (door_stack_copyout(&dr32, layout->dl_resultsp,
1348 		    sizeof (dr32))) {
1349 			error = E2BIG;
1350 			goto fail;
1351 		}
1352 #endif
1353 	}
1354 
1355 	error = door_finish_dispatch(layout->dl_sp);
1356 fail:
1357 	if (start != NULL) {
1358 		if (error != 0)
1359 			door_fd_close(start, ncopied);
1360 		kmem_free(start, descsize);
1361 	}
1362 	if (fpp != NULL)
1363 		door_fp_close(fpp, ndesc);
1364 
1365 	return (error);
1366 }
1367 
1368 /*
1369  * Return the results (if any) to the caller (if any) and wait for the
1370  * next invocation on a door.
1371  */
1372 int
door_return(caddr_t data_ptr,size_t data_size,door_desc_t * desc_ptr,uint_t desc_num,caddr_t sp,size_t ssize)1373 door_return(caddr_t data_ptr, size_t data_size,
1374     door_desc_t *desc_ptr, uint_t desc_num, caddr_t sp, size_t ssize)
1375 {
1376 	kthread_t	*caller;
1377 	klwp_t		*lwp;
1378 	int		error = 0;
1379 	door_node_t	*dp;
1380 	door_server_t	*st;		/* curthread door_data */
1381 	door_client_t	*ct;		/* caller door_data */
1382 	int		cancel_pending;
1383 
1384 	st = door_my_server(1);
1385 
1386 	/*
1387 	 * If thread was bound to a door that no longer exists, return
1388 	 * an error.  This can happen if a thread is bound to a door
1389 	 * before the process calls forkall(); in the child, the door
1390 	 * doesn't exist and door_fork() sets the d_invbound flag.
1391 	 */
1392 	if (st->d_invbound)
1393 		return (set_errno(EINVAL));
1394 
1395 	st->d_sp = sp;			/* Save base of stack. */
1396 	st->d_ssize = ssize;		/* and its size */
1397 
1398 	/*
1399 	 * This should be done in shuttle_resume(), just before going to
1400 	 * sleep, but we want to avoid overhead while holding door_knob.
1401 	 * prstop() is just a no-op if we don't really go to sleep.
1402 	 * We test not-kernel-address-space for the sake of clustering code.
1403 	 */
1404 	lwp = ttolwp(curthread);
1405 	if (lwp && lwp->lwp_nostop == 0 && curproc->p_as != &kas)
1406 		prstop(PR_REQUESTED, 0);
1407 
1408 	/* Make sure the caller hasn't gone away */
1409 	mutex_enter(&door_knob);
1410 	if ((caller = st->d_caller) == NULL || caller->t_door == NULL) {
1411 		if (desc_num != 0) {
1412 			/* close any DOOR_RELEASE descriptors */
1413 			mutex_exit(&door_knob);
1414 			error = door_release_fds(desc_ptr, desc_num);
1415 			if (error)
1416 				return (set_errno(error));
1417 			mutex_enter(&door_knob);
1418 		}
1419 		goto out;
1420 	}
1421 	ct = DOOR_CLIENT(caller->t_door);
1422 
1423 	ct->d_args.data_size = data_size;
1424 	ct->d_args.desc_num = desc_num;
1425 	/*
1426 	 * Transfer results, if any, to the client
1427 	 */
1428 	if (data_size != 0 || desc_num != 0) {
1429 		/*
1430 		 * Prevent the client from exiting until we have finished
1431 		 * moving results.
1432 		 */
1433 		DOOR_T_HOLD(ct);
1434 		mutex_exit(&door_knob);
1435 		error = door_results(caller, data_ptr, data_size,
1436 		    desc_ptr, desc_num);
1437 		mutex_enter(&door_knob);
1438 		DOOR_T_RELEASE(ct);
1439 		/*
1440 		 * Pass EOVERFLOW errors back to the client
1441 		 */
1442 		if (error && error != EOVERFLOW) {
1443 			mutex_exit(&door_knob);
1444 			return (set_errno(error));
1445 		}
1446 	}
1447 out:
1448 	/* Put ourselves on the available server thread list */
1449 	door_release_server(st->d_pool, curthread);
1450 
1451 	/*
1452 	 * Make sure the caller is still waiting to be resumed
1453 	 */
1454 	if (caller) {
1455 		disp_lock_t *tlp;
1456 
1457 		thread_lock(caller);
1458 		ct->d_error = error;		/* Return any errors */
1459 		if (caller->t_state == TS_SLEEP &&
1460 		    SOBJ_TYPE(caller->t_sobj_ops) == SOBJ_SHUTTLE) {
1461 			cpu_t *cp = CPU;
1462 
1463 			tlp = caller->t_lockp;
1464 			/*
1465 			 * Setting t_disp_queue prevents erroneous preemptions
1466 			 * if this thread is still in execution on another
1467 			 * processor
1468 			 */
1469 			caller->t_disp_queue = cp->cpu_disp;
1470 			CL_ACTIVE(caller);
1471 			/*
1472 			 * We are calling thread_onproc() instead of
1473 			 * THREAD_ONPROC() because compiler can reorder
1474 			 * the two stores of t_state and t_lockp in
1475 			 * THREAD_ONPROC().
1476 			 */
1477 			thread_onproc(caller, cp);
1478 			disp_lock_exit_high(tlp);
1479 			shuttle_resume(caller, &door_knob);
1480 		} else {
1481 			/* May have been setrun or in stop state */
1482 			thread_unlock(caller);
1483 			shuttle_swtch(&door_knob);
1484 		}
1485 	} else {
1486 		shuttle_swtch(&door_knob);
1487 	}
1488 
1489 	/*
1490 	 * We've sprung to life. Determine if we are part of a door
1491 	 * invocation, or just interrupted
1492 	 */
1493 	mutex_enter(&door_knob);
1494 	if ((dp = st->d_active) != NULL) {
1495 		/*
1496 		 * Normal door invocation. Return any error condition
1497 		 * encountered while trying to pass args to the server
1498 		 * thread.
1499 		 */
1500 		lwp->lwp_asleep = 0;
1501 		/*
1502 		 * Prevent the caller from leaving us while we
1503 		 * are copying out the arguments from it's buffer.
1504 		 */
1505 		ASSERT(st->d_caller != NULL);
1506 		ct = DOOR_CLIENT(st->d_caller->t_door);
1507 
1508 		DOOR_T_HOLD(ct);
1509 		mutex_exit(&door_knob);
1510 		error = door_server_dispatch(ct, dp);
1511 		mutex_enter(&door_knob);
1512 		DOOR_T_RELEASE(ct);
1513 
1514 		/* let the client know we have processed its message */
1515 		ct->d_args_done = 1;
1516 
1517 		if (error) {
1518 			caller = st->d_caller;
1519 			if (caller)
1520 				ct = DOOR_CLIENT(caller->t_door);
1521 			else
1522 				ct = NULL;
1523 			goto out;
1524 		}
1525 		mutex_exit(&door_knob);
1526 		return (0);
1527 	} else {
1528 		/*
1529 		 * We are not involved in a door_invocation.
1530 		 * Check for /proc related activity...
1531 		 */
1532 		st->d_caller = NULL;
1533 		door_server_exit(curproc, curthread);
1534 		mutex_exit(&door_knob);
1535 		cancel_pending = 0;
1536 		if (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort ||
1537 		    MUSTRETURN(curproc, curthread) ||
1538 		    (cancel_pending = schedctl_cancel_pending()) != 0) {
1539 			if (cancel_pending)
1540 				schedctl_cancel_eintr();
1541 			lwp->lwp_asleep = 0;
1542 			lwp->lwp_sysabort = 0;
1543 			return (set_errno(EINTR));
1544 		}
1545 		/* Go back and wait for another request */
1546 		lwp->lwp_asleep = 0;
1547 		mutex_enter(&door_knob);
1548 		caller = NULL;
1549 		goto out;
1550 	}
1551 }
1552 
1553 /*
1554  * Revoke any future invocations on this door
1555  */
1556 int
door_revoke(int did)1557 door_revoke(int did)
1558 {
1559 	door_node_t	*d;
1560 	int		error;
1561 
1562 	if ((d = door_lookup(did, NULL)) == NULL)
1563 		return (set_errno(EBADF));
1564 
1565 	mutex_enter(&door_knob);
1566 	if (d->door_target != curproc) {
1567 		mutex_exit(&door_knob);
1568 		releasef(did);
1569 		return (set_errno(EPERM));
1570 	}
1571 	d->door_flags |= DOOR_REVOKED;
1572 	if (d->door_flags & DOOR_PRIVATE)
1573 		cv_broadcast(&d->door_servers.dp_cv);
1574 	else
1575 		cv_broadcast(&curproc->p_server_threads.dp_cv);
1576 	mutex_exit(&door_knob);
1577 	releasef(did);
1578 	/* Invalidate the descriptor */
1579 	if ((error = closeandsetf(did, NULL)) != 0)
1580 		return (set_errno(error));
1581 	return (0);
1582 }
1583 
1584 int
door_info(int did,struct door_info * d_info)1585 door_info(int did, struct door_info *d_info)
1586 {
1587 	door_node_t	*dp;
1588 	door_info_t	di;
1589 	door_server_t	*st;
1590 	file_t		*fp = NULL;
1591 
1592 	if (did == DOOR_QUERY) {
1593 		/* Get information on door current thread is bound to */
1594 		if ((st = door_my_server(0)) == NULL ||
1595 		    (dp = st->d_pool) == NULL)
1596 			/* Thread isn't bound to a door */
1597 			return (set_errno(EBADF));
1598 	} else if ((dp = door_lookup(did, &fp)) == NULL) {
1599 		/* Not a door */
1600 		return (set_errno(EBADF));
1601 	}
1602 
1603 	door_info_common(dp, &di, fp);
1604 
1605 	if (did != DOOR_QUERY)
1606 		releasef(did);
1607 
1608 	if (copyout(&di, d_info, sizeof (struct door_info)))
1609 		return (set_errno(EFAULT));
1610 	return (0);
1611 }
1612 
1613 /*
1614  * Common code for getting information about a door either via the
1615  * door_info system call or the door_ki_info kernel call.
1616  */
1617 void
door_info_common(door_node_t * dp,struct door_info * dip,file_t * fp)1618 door_info_common(door_node_t *dp, struct door_info *dip, file_t *fp)
1619 {
1620 	int unref_count;
1621 
1622 	bzero(dip, sizeof (door_info_t));
1623 
1624 	mutex_enter(&door_knob);
1625 	if (dp->door_target == NULL)
1626 		dip->di_target = -1;
1627 	else
1628 		dip->di_target = dp->door_target->p_pid;
1629 
1630 	dip->di_attributes = dp->door_flags & DOOR_ATTR_MASK;
1631 	if (dp->door_target == curproc)
1632 		dip->di_attributes |= DOOR_LOCAL;
1633 	dip->di_proc = (door_ptr_t)(uintptr_t)dp->door_pc;
1634 	dip->di_data = (door_ptr_t)(uintptr_t)dp->door_data;
1635 	dip->di_uniquifier = dp->door_index;
1636 	/*
1637 	 * If this door is in the middle of having an unreferenced
1638 	 * notification delivered, don't count the VN_HOLD by
1639 	 * door_deliver_unref in determining if it is unreferenced.
1640 	 * This handles the case where door_info is called from the
1641 	 * thread delivering the unref notification.
1642 	 */
1643 	if (dp->door_flags & DOOR_UNREF_ACTIVE)
1644 		unref_count = 2;
1645 	else
1646 		unref_count = 1;
1647 	mutex_exit(&door_knob);
1648 
1649 	if (fp == NULL) {
1650 		/*
1651 		 * If this thread is bound to the door, then we can just
1652 		 * check the vnode; a ref count of 1 (or 2 if this is
1653 		 * handling an unref notification) means that the hold
1654 		 * from the door_bind is the only reference to the door
1655 		 * (no file descriptor refers to it).
1656 		 */
1657 		if (DTOV(dp)->v_count == unref_count)
1658 			dip->di_attributes |= DOOR_IS_UNREF;
1659 	} else {
1660 		/*
1661 		 * If we're working from a file descriptor or door handle
1662 		 * we need to look at the file structure count.  We don't
1663 		 * need to hold the vnode lock since this is just a snapshot.
1664 		 */
1665 		mutex_enter(&fp->f_tlock);
1666 		if (fp->f_count == 1 && DTOV(dp)->v_count == unref_count)
1667 			dip->di_attributes |= DOOR_IS_UNREF;
1668 		mutex_exit(&fp->f_tlock);
1669 	}
1670 }
1671 
1672 /*
1673  * Return credentials of the door caller (if any) for this invocation
1674  */
1675 int
door_ucred(struct ucred_s * uch)1676 door_ucred(struct ucred_s *uch)
1677 {
1678 	kthread_t	*caller;
1679 	door_server_t	*st;
1680 	door_client_t	*ct;
1681 	door_upcall_t	*dup;
1682 	struct proc	*p;
1683 	struct ucred_s	*res;
1684 	int		err;
1685 
1686 	mutex_enter(&door_knob);
1687 	if ((st = door_my_server(0)) == NULL ||
1688 	    (caller = st->d_caller) == NULL) {
1689 		mutex_exit(&door_knob);
1690 		return (set_errno(EINVAL));
1691 	}
1692 
1693 	ASSERT(caller->t_door != NULL);
1694 	ct = DOOR_CLIENT(caller->t_door);
1695 
1696 	/* Prevent caller from exiting while we examine the cred */
1697 	DOOR_T_HOLD(ct);
1698 	mutex_exit(&door_knob);
1699 
1700 	p = ttoproc(caller);
1701 
1702 	/*
1703 	 * If the credentials are not specified by the client, get the one
1704 	 * associated with the calling process.
1705 	 */
1706 	if ((dup = ct->d_upcall) != NULL)
1707 		res = cred2ucred(dup->du_cred, p0.p_pid, NULL, CRED());
1708 	else
1709 		res = cred2ucred(caller->t_cred, p->p_pid, NULL, CRED());
1710 
1711 	mutex_enter(&door_knob);
1712 	DOOR_T_RELEASE(ct);
1713 	mutex_exit(&door_knob);
1714 
1715 	err = copyout(res, uch, res->uc_size);
1716 
1717 	kmem_free(res, res->uc_size);
1718 
1719 	if (err != 0)
1720 		return (set_errno(EFAULT));
1721 
1722 	return (0);
1723 }
1724 
1725 /*
1726  * Bind the current lwp to the server thread pool associated with 'did'
1727  */
1728 int
door_bind(int did)1729 door_bind(int did)
1730 {
1731 	door_node_t	*dp;
1732 	door_server_t	*st;
1733 
1734 	if ((dp = door_lookup(did, NULL)) == NULL) {
1735 		/* Not a door */
1736 		return (set_errno(EBADF));
1737 	}
1738 
1739 	/*
1740 	 * Can't bind to a non-private door, and can't bind to a door
1741 	 * served by another process.
1742 	 */
1743 	if ((dp->door_flags & DOOR_PRIVATE) == 0 ||
1744 	    dp->door_target != curproc) {
1745 		releasef(did);
1746 		return (set_errno(EINVAL));
1747 	}
1748 
1749 	st = door_my_server(1);
1750 	if (st->d_pool)
1751 		door_unbind_thread(st->d_pool);
1752 	st->d_pool = dp;
1753 	st->d_invbound = 0;
1754 	door_bind_thread(dp);
1755 	releasef(did);
1756 
1757 	return (0);
1758 }
1759 
1760 /*
1761  * Unbind the current lwp from it's server thread pool
1762  */
1763 int
door_unbind(void)1764 door_unbind(void)
1765 {
1766 	door_server_t *st;
1767 
1768 	if ((st = door_my_server(0)) == NULL)
1769 		return (set_errno(EBADF));
1770 
1771 	if (st->d_invbound) {
1772 		ASSERT(st->d_pool == NULL);
1773 		st->d_invbound = 0;
1774 		return (0);
1775 	}
1776 	if (st->d_pool == NULL)
1777 		return (set_errno(EBADF));
1778 	door_unbind_thread(st->d_pool);
1779 	st->d_pool = NULL;
1780 	return (0);
1781 }
1782 
1783 /*
1784  * Create a descriptor for the associated file and fill in the
1785  * attributes associated with it.
1786  *
1787  * Return 0 for success, -1 otherwise;
1788  */
1789 int
door_insert(struct file * fp,door_desc_t * dp)1790 door_insert(struct file *fp, door_desc_t *dp)
1791 {
1792 	struct vnode *vp;
1793 	int	fd;
1794 	door_attr_t attributes = DOOR_DESCRIPTOR;
1795 
1796 	ASSERT(MUTEX_NOT_HELD(&door_knob));
1797 	if ((fd = ufalloc(0)) == -1)
1798 		return (-1);
1799 	setf(fd, fp);
1800 	dp->d_data.d_desc.d_descriptor = fd;
1801 
1802 	/* Fill in the attributes */
1803 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
1804 		vp = fp->f_vnode;
1805 	if (vp && vp->v_type == VDOOR) {
1806 		if (VTOD(vp)->door_target == curproc)
1807 			attributes |= DOOR_LOCAL;
1808 		attributes |= VTOD(vp)->door_flags & DOOR_ATTR_MASK;
1809 		dp->d_data.d_desc.d_id = VTOD(vp)->door_index;
1810 	}
1811 	dp->d_attributes = attributes;
1812 	return (0);
1813 }
1814 
1815 /*
1816  * Return an available thread for this server.  A NULL return value indicates
1817  * that either:
1818  *	The door has been revoked, or
1819  *	a signal was received.
1820  * The two conditions can be differentiated using DOOR_INVALID(dp).
1821  */
1822 static kthread_t *
door_get_server(door_node_t * dp)1823 door_get_server(door_node_t *dp)
1824 {
1825 	kthread_t **ktp;
1826 	kthread_t *server_t;
1827 	door_pool_t *pool;
1828 	door_server_t *st;
1829 	int signalled;
1830 
1831 	disp_lock_t *tlp;
1832 	cpu_t *cp;
1833 
1834 	ASSERT(MUTEX_HELD(&door_knob));
1835 
1836 	if (dp->door_flags & DOOR_PRIVATE)
1837 		pool = &dp->door_servers;
1838 	else
1839 		pool = &dp->door_target->p_server_threads;
1840 
1841 	for (;;) {
1842 		/*
1843 		 * We search the thread pool, looking for a server thread
1844 		 * ready to take an invocation (i.e. one which is still
1845 		 * sleeping on a shuttle object).  If none are available,
1846 		 * we sleep on the pool's CV, and will be signaled when a
1847 		 * thread is added to the pool.
1848 		 *
1849 		 * This relies on the fact that once a thread in the thread
1850 		 * pool wakes up, it *must* remove and add itself to the pool
1851 		 * before it can receive door calls.
1852 		 */
1853 		if (DOOR_INVALID(dp))
1854 			return (NULL);	/* Target has become invalid */
1855 
1856 		for (ktp = &pool->dp_threads;
1857 		    (server_t = *ktp) != NULL;
1858 		    ktp = &st->d_servers) {
1859 			st = DOOR_SERVER(server_t->t_door);
1860 
1861 			thread_lock(server_t);
1862 			if (server_t->t_state == TS_SLEEP &&
1863 			    SOBJ_TYPE(server_t->t_sobj_ops) == SOBJ_SHUTTLE)
1864 				break;
1865 			thread_unlock(server_t);
1866 		}
1867 		if (server_t != NULL)
1868 			break;		/* we've got a live one! */
1869 
1870 		if (!cv_wait_sig_swap_core(&pool->dp_cv, &door_knob,
1871 		    &signalled)) {
1872 			/*
1873 			 * If we were signaled and the door is still
1874 			 * valid, pass the signal on to another waiter.
1875 			 */
1876 			if (signalled && !DOOR_INVALID(dp))
1877 				cv_signal(&pool->dp_cv);
1878 			return (NULL);	/* Got a signal */
1879 		}
1880 	}
1881 
1882 	/*
1883 	 * We've got a thread_lock()ed thread which is still on the
1884 	 * shuttle.  Take it off the list of available server threads
1885 	 * and mark it as ONPROC.  We are committed to resuming this
1886 	 * thread now.
1887 	 */
1888 	tlp = server_t->t_lockp;
1889 	cp = CPU;
1890 
1891 	*ktp = st->d_servers;
1892 	st->d_servers = NULL;
1893 	/*
1894 	 * Setting t_disp_queue prevents erroneous preemptions
1895 	 * if this thread is still in execution on another processor
1896 	 */
1897 	server_t->t_disp_queue = cp->cpu_disp;
1898 	CL_ACTIVE(server_t);
1899 	/*
1900 	 * We are calling thread_onproc() instead of
1901 	 * THREAD_ONPROC() because compiler can reorder
1902 	 * the two stores of t_state and t_lockp in
1903 	 * THREAD_ONPROC().
1904 	 */
1905 	thread_onproc(server_t, cp);
1906 	disp_lock_exit(tlp);
1907 	return (server_t);
1908 }
1909 
1910 /*
1911  * Put a server thread back in the pool.
1912  */
1913 static void
door_release_server(door_node_t * dp,kthread_t * t)1914 door_release_server(door_node_t *dp, kthread_t *t)
1915 {
1916 	door_server_t *st = DOOR_SERVER(t->t_door);
1917 	door_pool_t *pool;
1918 
1919 	ASSERT(MUTEX_HELD(&door_knob));
1920 	st->d_active = NULL;
1921 	st->d_caller = NULL;
1922 	st->d_layout_done = 0;
1923 	if (dp && (dp->door_flags & DOOR_PRIVATE)) {
1924 		ASSERT(dp->door_target == NULL ||
1925 		    dp->door_target == ttoproc(t));
1926 		pool = &dp->door_servers;
1927 	} else {
1928 		pool = &ttoproc(t)->p_server_threads;
1929 	}
1930 
1931 	st->d_servers = pool->dp_threads;
1932 	pool->dp_threads = t;
1933 
1934 	/* If someone is waiting for a server thread, wake him up */
1935 	cv_signal(&pool->dp_cv);
1936 }
1937 
1938 /*
1939  * Remove a server thread from the pool if present.
1940  */
1941 static void
door_server_exit(proc_t * p,kthread_t * t)1942 door_server_exit(proc_t *p, kthread_t *t)
1943 {
1944 	door_pool_t *pool;
1945 	kthread_t **next;
1946 	door_server_t *st = DOOR_SERVER(t->t_door);
1947 
1948 	ASSERT(MUTEX_HELD(&door_knob));
1949 	if (st->d_pool != NULL) {
1950 		ASSERT(st->d_pool->door_flags & DOOR_PRIVATE);
1951 		pool = &st->d_pool->door_servers;
1952 	} else {
1953 		pool = &p->p_server_threads;
1954 	}
1955 
1956 	next = &pool->dp_threads;
1957 	while (*next != NULL) {
1958 		if (*next == t) {
1959 			*next = DOOR_SERVER(t->t_door)->d_servers;
1960 			return;
1961 		}
1962 		next = &(DOOR_SERVER((*next)->t_door)->d_servers);
1963 	}
1964 }
1965 
1966 /*
1967  * Lookup the door descriptor. Caller must call releasef when finished
1968  * with associated door.
1969  */
1970 static door_node_t *
door_lookup(int did,file_t ** fpp)1971 door_lookup(int did, file_t **fpp)
1972 {
1973 	vnode_t	*vp;
1974 	file_t *fp;
1975 
1976 	ASSERT(MUTEX_NOT_HELD(&door_knob));
1977 	if ((fp = getf(did)) == NULL)
1978 		return (NULL);
1979 	/*
1980 	 * Use the underlying vnode (we may be namefs mounted)
1981 	 */
1982 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
1983 		vp = fp->f_vnode;
1984 
1985 	if (vp == NULL || vp->v_type != VDOOR) {
1986 		releasef(did);
1987 		return (NULL);
1988 	}
1989 
1990 	if (fpp)
1991 		*fpp = fp;
1992 
1993 	return (VTOD(vp));
1994 }
1995 
1996 /*
1997  * The current thread is exiting, so clean up any pending
1998  * invocation details
1999  */
2000 void
door_slam(void)2001 door_slam(void)
2002 {
2003 	door_node_t *dp;
2004 	door_data_t *dt;
2005 	door_client_t *ct;
2006 	door_server_t *st;
2007 
2008 	/*
2009 	 * If we are an active door server, notify our
2010 	 * client that we are exiting and revoke our door.
2011 	 */
2012 	if ((dt = door_my_data(0)) == NULL)
2013 		return;
2014 	ct = DOOR_CLIENT(dt);
2015 	st = DOOR_SERVER(dt);
2016 
2017 	mutex_enter(&door_knob);
2018 	for (;;) {
2019 		if (DOOR_T_HELD(ct))
2020 			cv_wait(&ct->d_cv, &door_knob);
2021 		else if (DOOR_T_HELD(st))
2022 			cv_wait(&st->d_cv, &door_knob);
2023 		else
2024 			break;			/* neither flag is set */
2025 	}
2026 	curthread->t_door = NULL;
2027 	if ((dp = st->d_active) != NULL) {
2028 		kthread_t *t = st->d_caller;
2029 		proc_t *p = curproc;
2030 
2031 		/* Revoke our door if the process is exiting */
2032 		if (dp->door_target == p && (p->p_flag & SEXITING)) {
2033 			door_list_delete(dp);
2034 			dp->door_target = NULL;
2035 			dp->door_flags |= DOOR_REVOKED;
2036 			if (dp->door_flags & DOOR_PRIVATE)
2037 				cv_broadcast(&dp->door_servers.dp_cv);
2038 			else
2039 				cv_broadcast(&p->p_server_threads.dp_cv);
2040 		}
2041 
2042 		if (t != NULL) {
2043 			/*
2044 			 * Let the caller know we are gone
2045 			 */
2046 			DOOR_CLIENT(t->t_door)->d_error = DOOR_EXIT;
2047 			thread_lock(t);
2048 			if (t->t_state == TS_SLEEP &&
2049 			    SOBJ_TYPE(t->t_sobj_ops) == SOBJ_SHUTTLE)
2050 				setrun_locked(t);
2051 			thread_unlock(t);
2052 		}
2053 	}
2054 	mutex_exit(&door_knob);
2055 	if (st->d_pool)
2056 		door_unbind_thread(st->d_pool);	/* Implicit door_unbind */
2057 	kmem_free(dt, sizeof (door_data_t));
2058 }
2059 
2060 /*
2061  * Set DOOR_REVOKED for all doors of the current process. This is called
2062  * on exit before all lwp's are being terminated so that door calls will
2063  * return with an error.
2064  */
2065 void
door_revoke_all()2066 door_revoke_all()
2067 {
2068 	door_node_t *dp;
2069 	proc_t *p = ttoproc(curthread);
2070 
2071 	mutex_enter(&door_knob);
2072 	for (dp = p->p_door_list; dp != NULL; dp = dp->door_list) {
2073 		ASSERT(dp->door_target == p);
2074 		dp->door_flags |= DOOR_REVOKED;
2075 		if (dp->door_flags & DOOR_PRIVATE)
2076 			cv_broadcast(&dp->door_servers.dp_cv);
2077 	}
2078 	cv_broadcast(&p->p_server_threads.dp_cv);
2079 	mutex_exit(&door_knob);
2080 }
2081 
2082 /*
2083  * The process is exiting, and all doors it created need to be revoked.
2084  */
2085 void
door_exit(void)2086 door_exit(void)
2087 {
2088 	door_node_t *dp;
2089 	proc_t *p = ttoproc(curthread);
2090 
2091 	ASSERT(p->p_lwpcnt == 1);
2092 	/*
2093 	 * Walk the list of active doors created by this process and
2094 	 * revoke them all.
2095 	 */
2096 	mutex_enter(&door_knob);
2097 	for (dp = p->p_door_list; dp != NULL; dp = dp->door_list) {
2098 		dp->door_target = NULL;
2099 		dp->door_flags |= DOOR_REVOKED;
2100 		if (dp->door_flags & DOOR_PRIVATE)
2101 			cv_broadcast(&dp->door_servers.dp_cv);
2102 	}
2103 	cv_broadcast(&p->p_server_threads.dp_cv);
2104 	/* Clear the list */
2105 	p->p_door_list = NULL;
2106 
2107 	/* Clean up the unref list */
2108 	while ((dp = p->p_unref_list) != NULL) {
2109 		p->p_unref_list = dp->door_ulist;
2110 		dp->door_ulist = NULL;
2111 		mutex_exit(&door_knob);
2112 		VN_RELE(DTOV(dp));
2113 		mutex_enter(&door_knob);
2114 	}
2115 	mutex_exit(&door_knob);
2116 }
2117 
2118 
2119 /*
2120  * The process is executing forkall(), and we need to flag threads that
2121  * are bound to a door in the child.  This will make the child threads
2122  * return an error to door_return unless they call door_unbind first.
2123  */
2124 void
door_fork(kthread_t * parent,kthread_t * child)2125 door_fork(kthread_t *parent, kthread_t *child)
2126 {
2127 	door_data_t *pt = parent->t_door;
2128 	door_server_t *st = DOOR_SERVER(pt);
2129 	door_data_t *dt;
2130 
2131 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2132 	if (pt != NULL && (st->d_pool != NULL || st->d_invbound)) {
2133 		/* parent thread is bound to a door */
2134 		dt = child->t_door =
2135 		    kmem_zalloc(sizeof (door_data_t), KM_SLEEP);
2136 		DOOR_SERVER(dt)->d_invbound = 1;
2137 	}
2138 }
2139 
2140 /*
2141  * Deliver queued unrefs to appropriate door server.
2142  */
2143 static int
door_unref(void)2144 door_unref(void)
2145 {
2146 	door_node_t	*dp;
2147 	static door_arg_t unref_args = { DOOR_UNREF_DATA, 0, 0, 0, 0, 0 };
2148 	proc_t *p = ttoproc(curthread);
2149 
2150 	/* make sure there's only one unref thread per process */
2151 	mutex_enter(&door_knob);
2152 	if (p->p_unref_thread) {
2153 		mutex_exit(&door_knob);
2154 		return (set_errno(EALREADY));
2155 	}
2156 	p->p_unref_thread = 1;
2157 	mutex_exit(&door_knob);
2158 
2159 	(void) door_my_data(1);			/* create info, if necessary */
2160 
2161 	for (;;) {
2162 		mutex_enter(&door_knob);
2163 
2164 		/* Grab a queued request */
2165 		while ((dp = p->p_unref_list) == NULL) {
2166 			if (!cv_wait_sig(&p->p_unref_cv, &door_knob)) {
2167 				/*
2168 				 * Interrupted.
2169 				 * Return so we can finish forkall() or exit().
2170 				 */
2171 				p->p_unref_thread = 0;
2172 				mutex_exit(&door_knob);
2173 				return (set_errno(EINTR));
2174 			}
2175 		}
2176 		p->p_unref_list = dp->door_ulist;
2177 		dp->door_ulist = NULL;
2178 		dp->door_flags |= DOOR_UNREF_ACTIVE;
2179 		mutex_exit(&door_knob);
2180 
2181 		(void) door_upcall(DTOV(dp), &unref_args, NULL, SIZE_MAX, 0);
2182 
2183 		if (unref_args.rbuf != 0) {
2184 			kmem_free(unref_args.rbuf, unref_args.rsize);
2185 			unref_args.rbuf = NULL;
2186 			unref_args.rsize = 0;
2187 		}
2188 
2189 		mutex_enter(&door_knob);
2190 		ASSERT(dp->door_flags & DOOR_UNREF_ACTIVE);
2191 		dp->door_flags &= ~DOOR_UNREF_ACTIVE;
2192 		mutex_exit(&door_knob);
2193 		VN_RELE(DTOV(dp));
2194 	}
2195 }
2196 
2197 
2198 /*
2199  * Deliver queued unrefs to kernel door server.
2200  */
2201 /* ARGSUSED */
2202 static void
door_unref_kernel(caddr_t arg)2203 door_unref_kernel(caddr_t arg)
2204 {
2205 	door_node_t	*dp;
2206 	static door_arg_t unref_args = { DOOR_UNREF_DATA, 0, 0, 0, 0, 0 };
2207 	proc_t *p = ttoproc(curthread);
2208 	callb_cpr_t cprinfo;
2209 
2210 	/* should only be one of these */
2211 	mutex_enter(&door_knob);
2212 	if (p->p_unref_thread) {
2213 		mutex_exit(&door_knob);
2214 		return;
2215 	}
2216 	p->p_unref_thread = 1;
2217 	mutex_exit(&door_knob);
2218 
2219 	(void) door_my_data(1);		/* make sure we have a door_data_t */
2220 
2221 	CALLB_CPR_INIT(&cprinfo, &door_knob, callb_generic_cpr, "door_unref");
2222 	for (;;) {
2223 		mutex_enter(&door_knob);
2224 		/* Grab a queued request */
2225 		while ((dp = p->p_unref_list) == NULL) {
2226 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
2227 			cv_wait(&p->p_unref_cv, &door_knob);
2228 			CALLB_CPR_SAFE_END(&cprinfo, &door_knob);
2229 		}
2230 		p->p_unref_list = dp->door_ulist;
2231 		dp->door_ulist = NULL;
2232 		dp->door_flags |= DOOR_UNREF_ACTIVE;
2233 		mutex_exit(&door_knob);
2234 
2235 		(*(dp->door_pc))(dp->door_data, &unref_args, NULL, NULL, NULL);
2236 
2237 		mutex_enter(&door_knob);
2238 		ASSERT(dp->door_flags & DOOR_UNREF_ACTIVE);
2239 		dp->door_flags &= ~DOOR_UNREF_ACTIVE;
2240 		mutex_exit(&door_knob);
2241 		VN_RELE(DTOV(dp));
2242 	}
2243 }
2244 
2245 
2246 /*
2247  * Queue an unref invocation for processing for the current process
2248  * The door may or may not be revoked at this point.
2249  */
2250 void
door_deliver_unref(door_node_t * d)2251 door_deliver_unref(door_node_t *d)
2252 {
2253 	struct proc *server = d->door_target;
2254 
2255 	ASSERT(MUTEX_HELD(&door_knob));
2256 	ASSERT(d->door_active == 0);
2257 
2258 	if (server == NULL)
2259 		return;
2260 	/*
2261 	 * Create a lwp to deliver unref calls if one isn't already running.
2262 	 *
2263 	 * A separate thread is used to deliver unrefs since the current
2264 	 * thread may be holding resources (e.g. locks) in user land that
2265 	 * may be needed by the unref processing. This would cause a
2266 	 * deadlock.
2267 	 */
2268 	if (d->door_flags & DOOR_UNREF_MULTI) {
2269 		/* multiple unrefs */
2270 		d->door_flags &= ~DOOR_DELAY;
2271 	} else {
2272 		/* Only 1 unref per door */
2273 		d->door_flags &= ~(DOOR_UNREF|DOOR_DELAY);
2274 	}
2275 	mutex_exit(&door_knob);
2276 
2277 	/*
2278 	 * Need to bump the vnode count before putting the door on the
2279 	 * list so it doesn't get prematurely released by door_unref.
2280 	 */
2281 	VN_HOLD(DTOV(d));
2282 
2283 	mutex_enter(&door_knob);
2284 	/* is this door already on the unref list? */
2285 	if (d->door_flags & DOOR_UNREF_MULTI) {
2286 		door_node_t *dp;
2287 		for (dp = server->p_unref_list; dp != NULL;
2288 		    dp = dp->door_ulist) {
2289 			if (d == dp) {
2290 				/* already there, don't need to add another */
2291 				mutex_exit(&door_knob);
2292 				VN_RELE(DTOV(d));
2293 				mutex_enter(&door_knob);
2294 				return;
2295 			}
2296 		}
2297 	}
2298 	ASSERT(d->door_ulist == NULL);
2299 	d->door_ulist = server->p_unref_list;
2300 	server->p_unref_list = d;
2301 	cv_broadcast(&server->p_unref_cv);
2302 }
2303 
2304 /*
2305  * The callers buffer isn't big enough for all of the data/fd's. Allocate
2306  * space in the callers address space for the results and copy the data
2307  * there.
2308  *
2309  * For EOVERFLOW, we must clean up the server's door descriptors.
2310  */
2311 static int
door_overflow(kthread_t * caller,caddr_t data_ptr,size_t data_size,door_desc_t * desc_ptr,uint_t desc_num)2312 door_overflow(
2313 	kthread_t	*caller,
2314 	caddr_t		data_ptr,	/* data location */
2315 	size_t		data_size,	/* data size */
2316 	door_desc_t	*desc_ptr,	/* descriptor location */
2317 	uint_t		desc_num)	/* descriptor size */
2318 {
2319 	proc_t *callerp = ttoproc(caller);
2320 	struct as *as = callerp->p_as;
2321 	door_client_t *ct = DOOR_CLIENT(caller->t_door);
2322 	caddr_t	addr;			/* Resulting address in target */
2323 	size_t	rlen;			/* Rounded len */
2324 	size_t	len;
2325 	uint_t	i;
2326 	size_t	ds = desc_num * sizeof (door_desc_t);
2327 
2328 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2329 	ASSERT(DOOR_T_HELD(ct) || ct->d_kernel);
2330 
2331 	/* Do initial overflow check */
2332 	if (!ufcanalloc(callerp, desc_num))
2333 		return (EMFILE);
2334 
2335 	/*
2336 	 * Allocate space for this stuff in the callers address space
2337 	 */
2338 	rlen = roundup(data_size + ds, PAGESIZE);
2339 	as_rangelock(as);
2340 	map_addr_proc(&addr, rlen, 0, 1, as->a_userlimit, ttoproc(caller), 0);
2341 	if (addr == NULL ||
2342 	    as_map(as, addr, rlen, segvn_create, zfod_argsp) != 0) {
2343 		/* No virtual memory available, or anon mapping failed */
2344 		as_rangeunlock(as);
2345 		if (!ct->d_kernel && desc_num > 0) {
2346 			int error = door_release_fds(desc_ptr, desc_num);
2347 			if (error)
2348 				return (error);
2349 		}
2350 		return (EOVERFLOW);
2351 	}
2352 	as_rangeunlock(as);
2353 
2354 	if (ct->d_kernel)
2355 		goto out;
2356 
2357 	if (data_size != 0) {
2358 		caddr_t	src = data_ptr;
2359 		caddr_t saddr = addr;
2360 
2361 		/* Copy any data */
2362 		len = data_size;
2363 		while (len != 0) {
2364 			int	amount;
2365 			int	error;
2366 
2367 			amount = len > PAGESIZE ? PAGESIZE : len;
2368 			if ((error = door_copy(as, src, saddr, amount)) != 0) {
2369 				(void) as_unmap(as, addr, rlen);
2370 				return (error);
2371 			}
2372 			saddr += amount;
2373 			src += amount;
2374 			len -= amount;
2375 		}
2376 	}
2377 	/* Copy any fd's */
2378 	if (desc_num != 0) {
2379 		door_desc_t	*didpp, *start;
2380 		struct file	**fpp;
2381 		int		fpp_size;
2382 
2383 		start = didpp = kmem_alloc(ds, KM_SLEEP);
2384 		if (copyin_nowatch(desc_ptr, didpp, ds)) {
2385 			kmem_free(start, ds);
2386 			(void) as_unmap(as, addr, rlen);
2387 			return (EFAULT);
2388 		}
2389 
2390 		fpp_size = desc_num * sizeof (struct file *);
2391 		if (fpp_size > ct->d_fpp_size) {
2392 			/* make more space */
2393 			if (ct->d_fpp_size)
2394 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2395 			ct->d_fpp_size = fpp_size;
2396 			ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2397 		}
2398 		fpp = ct->d_fpp;
2399 
2400 		for (i = 0; i < desc_num; i++) {
2401 			struct file *fp;
2402 			int fd = didpp->d_data.d_desc.d_descriptor;
2403 
2404 			if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2405 			    (fp = getf(fd)) == NULL) {
2406 				/* close translated references */
2407 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2408 				/* close untranslated references */
2409 				door_fd_rele(didpp, desc_num - i, 0);
2410 				kmem_free(start, ds);
2411 				(void) as_unmap(as, addr, rlen);
2412 				return (EINVAL);
2413 			}
2414 			mutex_enter(&fp->f_tlock);
2415 			fp->f_count++;
2416 			mutex_exit(&fp->f_tlock);
2417 
2418 			*fpp = fp;
2419 			releasef(fd);
2420 
2421 			if (didpp->d_attributes & DOOR_RELEASE) {
2422 				/* release passed reference */
2423 				(void) closeandsetf(fd, NULL);
2424 			}
2425 
2426 			fpp++; didpp++;
2427 		}
2428 		kmem_free(start, ds);
2429 	}
2430 
2431 out:
2432 	ct->d_overflow = 1;
2433 	ct->d_args.rbuf = addr;
2434 	ct->d_args.rsize = rlen;
2435 	return (0);
2436 }
2437 
2438 /*
2439  * Transfer arguments from the client to the server.
2440  */
2441 static int
door_args(kthread_t * server,int is_private)2442 door_args(kthread_t *server, int is_private)
2443 {
2444 	door_server_t *st = DOOR_SERVER(server->t_door);
2445 	door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2446 	uint_t	ndid;
2447 	size_t	dsize;
2448 	int	error;
2449 
2450 	ASSERT(DOOR_T_HELD(st));
2451 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2452 
2453 	ndid = ct->d_args.desc_num;
2454 	if (ndid > door_max_desc)
2455 		return (E2BIG);
2456 
2457 	/*
2458 	 * Get the stack layout, and fail now if it won't fit.
2459 	 */
2460 	error = door_layout(server, ct->d_args.data_size, ndid, is_private);
2461 	if (error != 0)
2462 		return (error);
2463 
2464 	dsize = ndid * sizeof (door_desc_t);
2465 	if (ct->d_args.data_size != 0) {
2466 		if (ct->d_args.data_size <= door_max_arg) {
2467 			/*
2468 			 * Use a 2 copy method for small amounts of data
2469 			 *
2470 			 * Allocate a little more than we need for the
2471 			 * args, in the hope that the results will fit
2472 			 * without having to reallocate a buffer
2473 			 */
2474 			ASSERT(ct->d_buf == NULL);
2475 			ct->d_bufsize = roundup(ct->d_args.data_size,
2476 			    DOOR_ROUND);
2477 			ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2478 			if (copyin_nowatch(ct->d_args.data_ptr,
2479 			    ct->d_buf, ct->d_args.data_size) != 0) {
2480 				kmem_free(ct->d_buf, ct->d_bufsize);
2481 				ct->d_buf = NULL;
2482 				ct->d_bufsize = 0;
2483 				return (EFAULT);
2484 			}
2485 		} else {
2486 			struct as	*as;
2487 			caddr_t		src;
2488 			caddr_t		dest;
2489 			size_t		len = ct->d_args.data_size;
2490 			uintptr_t	base;
2491 
2492 			/*
2493 			 * Use a 1 copy method
2494 			 */
2495 			as = ttoproc(server)->p_as;
2496 			src = ct->d_args.data_ptr;
2497 
2498 			dest = st->d_layout.dl_datap;
2499 			base = (uintptr_t)dest;
2500 
2501 			/*
2502 			 * Copy data directly into server.  We proceed
2503 			 * downward from the top of the stack, to mimic
2504 			 * normal stack usage. This allows the guard page
2505 			 * to stop us before we corrupt anything.
2506 			 */
2507 			while (len != 0) {
2508 				uintptr_t start;
2509 				uintptr_t end;
2510 				uintptr_t offset;
2511 				size_t	amount;
2512 
2513 				/*
2514 				 * Locate the next part to copy.
2515 				 */
2516 				end = base + len;
2517 				start = P2ALIGN(end - 1, PAGESIZE);
2518 
2519 				/*
2520 				 * if we are on the final (first) page, fix
2521 				 * up the start position.
2522 				 */
2523 				if (P2ALIGN(base, PAGESIZE) == start)
2524 					start = base;
2525 
2526 				offset = start - base;	/* the copy offset */
2527 				amount = end - start;	/* # bytes to copy */
2528 
2529 				ASSERT(amount > 0 && amount <= len &&
2530 				    amount <= PAGESIZE);
2531 
2532 				error = door_copy(as, src + offset,
2533 				    dest + offset, amount);
2534 				if (error != 0)
2535 					return (error);
2536 				len -= amount;
2537 			}
2538 		}
2539 	}
2540 	/*
2541 	 * Copyin the door args and translate them into files
2542 	 */
2543 	if (ndid != 0) {
2544 		door_desc_t	*didpp;
2545 		door_desc_t	*start;
2546 		struct file	**fpp;
2547 
2548 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
2549 
2550 		if (copyin_nowatch(ct->d_args.desc_ptr, didpp, dsize)) {
2551 			kmem_free(start, dsize);
2552 			return (EFAULT);
2553 		}
2554 		ct->d_fpp_size = ndid * sizeof (struct file *);
2555 		ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2556 		fpp = ct->d_fpp;
2557 		while (ndid--) {
2558 			struct file *fp;
2559 			int fd = didpp->d_data.d_desc.d_descriptor;
2560 
2561 			/* We only understand file descriptors as passed objs */
2562 			if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2563 			    (fp = getf(fd)) == NULL) {
2564 				/* close translated references */
2565 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2566 				/* close untranslated references */
2567 				door_fd_rele(didpp, ndid + 1, 0);
2568 				kmem_free(start, dsize);
2569 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2570 				ct->d_fpp = NULL;
2571 				ct->d_fpp_size = 0;
2572 				return (EINVAL);
2573 			}
2574 			/* Hold the fp */
2575 			mutex_enter(&fp->f_tlock);
2576 			fp->f_count++;
2577 			mutex_exit(&fp->f_tlock);
2578 
2579 			*fpp = fp;
2580 			releasef(fd);
2581 
2582 			if (didpp->d_attributes & DOOR_RELEASE) {
2583 				/* release passed reference */
2584 				(void) closeandsetf(fd, NULL);
2585 			}
2586 
2587 			fpp++; didpp++;
2588 		}
2589 		kmem_free(start, dsize);
2590 	}
2591 	return (0);
2592 }
2593 
2594 /*
2595  * Transfer arguments from a user client to a kernel server.  This copies in
2596  * descriptors and translates them into door handles.  It doesn't touch the
2597  * other data, letting the kernel server deal with that (to avoid needing
2598  * to copy the data twice).
2599  */
2600 static int
door_translate_in(void)2601 door_translate_in(void)
2602 {
2603 	door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2604 	uint_t	ndid;
2605 
2606 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2607 	ndid = ct->d_args.desc_num;
2608 	if (ndid > door_max_desc)
2609 		return (E2BIG);
2610 	/*
2611 	 * Copyin the door args and translate them into door handles.
2612 	 */
2613 	if (ndid != 0) {
2614 		door_desc_t	*didpp;
2615 		door_desc_t	*start;
2616 		size_t		dsize = ndid * sizeof (door_desc_t);
2617 		struct file	*fp;
2618 
2619 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
2620 
2621 		if (copyin_nowatch(ct->d_args.desc_ptr, didpp, dsize)) {
2622 			kmem_free(start, dsize);
2623 			return (EFAULT);
2624 		}
2625 		while (ndid--) {
2626 			vnode_t	*vp;
2627 			int fd = didpp->d_data.d_desc.d_descriptor;
2628 
2629 			/*
2630 			 * We only understand file descriptors as passed objs
2631 			 */
2632 			if ((didpp->d_attributes & DOOR_DESCRIPTOR) &&
2633 			    (fp = getf(fd)) != NULL) {
2634 				didpp->d_data.d_handle = FTODH(fp);
2635 				/* Hold the door */
2636 				door_ki_hold(didpp->d_data.d_handle);
2637 
2638 				releasef(fd);
2639 
2640 				if (didpp->d_attributes & DOOR_RELEASE) {
2641 					/* release passed reference */
2642 					(void) closeandsetf(fd, NULL);
2643 				}
2644 
2645 				if (VOP_REALVP(fp->f_vnode, &vp, NULL))
2646 					vp = fp->f_vnode;
2647 
2648 				/* Set attributes */
2649 				didpp->d_attributes = DOOR_HANDLE |
2650 				    (VTOD(vp)->door_flags & DOOR_ATTR_MASK);
2651 			} else {
2652 				/* close translated references */
2653 				door_fd_close(start, didpp - start);
2654 				/* close untranslated references */
2655 				door_fd_rele(didpp, ndid + 1, 0);
2656 				kmem_free(start, dsize);
2657 				return (EINVAL);
2658 			}
2659 			didpp++;
2660 		}
2661 		ct->d_args.desc_ptr = start;
2662 	}
2663 	return (0);
2664 }
2665 
2666 /*
2667  * Translate door arguments from kernel to user.  This copies the passed
2668  * door handles.  It doesn't touch other data.  It is used by door_upcall,
2669  * and for data returned by a door_call to a kernel server.
2670  */
2671 static int
door_translate_out(void)2672 door_translate_out(void)
2673 {
2674 	door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2675 	uint_t	ndid;
2676 
2677 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2678 	ndid = ct->d_args.desc_num;
2679 	if (ndid > door_max_desc) {
2680 		door_fd_rele(ct->d_args.desc_ptr, ndid, 1);
2681 		return (E2BIG);
2682 	}
2683 	/*
2684 	 * Translate the door args into files
2685 	 */
2686 	if (ndid != 0) {
2687 		door_desc_t	*didpp = ct->d_args.desc_ptr;
2688 		struct file	**fpp;
2689 
2690 		ct->d_fpp_size = ndid * sizeof (struct file *);
2691 		fpp = ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2692 		while (ndid--) {
2693 			struct file *fp = NULL;
2694 			int fd = -1;
2695 
2696 			/*
2697 			 * We understand file descriptors and door
2698 			 * handles as passed objs.
2699 			 */
2700 			if (didpp->d_attributes & DOOR_DESCRIPTOR) {
2701 				fd = didpp->d_data.d_desc.d_descriptor;
2702 				fp = getf(fd);
2703 			} else if (didpp->d_attributes & DOOR_HANDLE)
2704 				fp = DHTOF(didpp->d_data.d_handle);
2705 			if (fp != NULL) {
2706 				/* Hold the fp */
2707 				mutex_enter(&fp->f_tlock);
2708 				fp->f_count++;
2709 				mutex_exit(&fp->f_tlock);
2710 
2711 				*fpp = fp;
2712 				if (didpp->d_attributes & DOOR_DESCRIPTOR)
2713 					releasef(fd);
2714 				if (didpp->d_attributes & DOOR_RELEASE) {
2715 					/* release passed reference */
2716 					if (fd >= 0)
2717 						(void) closeandsetf(fd, NULL);
2718 					else
2719 						(void) closef(fp);
2720 				}
2721 			} else {
2722 				/* close translated references */
2723 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2724 				/* close untranslated references */
2725 				door_fd_rele(didpp, ndid + 1, 1);
2726 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2727 				ct->d_fpp = NULL;
2728 				ct->d_fpp_size = 0;
2729 				return (EINVAL);
2730 			}
2731 			fpp++; didpp++;
2732 		}
2733 	}
2734 	return (0);
2735 }
2736 
2737 /*
2738  * Move the results from the server to the client
2739  */
2740 static int
door_results(kthread_t * caller,caddr_t data_ptr,size_t data_size,door_desc_t * desc_ptr,uint_t desc_num)2741 door_results(kthread_t *caller, caddr_t data_ptr, size_t data_size,
2742     door_desc_t *desc_ptr, uint_t desc_num)
2743 {
2744 	door_client_t	*ct = DOOR_CLIENT(caller->t_door);
2745 	door_upcall_t	*dup = ct->d_upcall;
2746 	size_t		dsize;
2747 	size_t		rlen;
2748 	size_t		result_size;
2749 
2750 	ASSERT(DOOR_T_HELD(ct));
2751 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2752 
2753 	if (ct->d_noresults)
2754 		return (E2BIG);		/* No results expected */
2755 
2756 	if (desc_num > door_max_desc)
2757 		return (E2BIG);		/* Too many descriptors */
2758 
2759 	dsize = desc_num * sizeof (door_desc_t);
2760 	/*
2761 	 * Check if the results are bigger than the clients buffer
2762 	 */
2763 	if (dsize)
2764 		rlen = roundup(data_size, sizeof (door_desc_t));
2765 	else
2766 		rlen = data_size;
2767 	if ((result_size = rlen + dsize) == 0)
2768 		return (0);
2769 
2770 	if (dup != NULL) {
2771 		if (desc_num > dup->du_max_descs)
2772 			return (EMFILE);
2773 
2774 		if (data_size > dup->du_max_data)
2775 			return (E2BIG);
2776 
2777 		/*
2778 		 * Handle upcalls
2779 		 */
2780 		if (ct->d_args.rbuf == NULL || ct->d_args.rsize < result_size) {
2781 			/*
2782 			 * If there's no return buffer or the buffer is too
2783 			 * small, allocate a new one.  The old buffer (if it
2784 			 * exists) will be freed by the upcall client.
2785 			 */
2786 			if (result_size > door_max_upcall_reply)
2787 				return (E2BIG);
2788 			ct->d_args.rsize = result_size;
2789 			ct->d_args.rbuf = kmem_alloc(result_size, KM_SLEEP);
2790 		}
2791 		ct->d_args.data_ptr = ct->d_args.rbuf;
2792 		if (data_size != 0 &&
2793 		    copyin_nowatch(data_ptr, ct->d_args.data_ptr,
2794 		    data_size) != 0)
2795 			return (EFAULT);
2796 	} else if (result_size > ct->d_args.rsize) {
2797 		return (door_overflow(caller, data_ptr, data_size,
2798 		    desc_ptr, desc_num));
2799 	} else if (data_size != 0) {
2800 		if (data_size <= door_max_arg) {
2801 			/*
2802 			 * Use a 2 copy method for small amounts of data
2803 			 */
2804 			if (ct->d_buf == NULL) {
2805 				ct->d_bufsize = data_size;
2806 				ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2807 			} else if (ct->d_bufsize < data_size) {
2808 				kmem_free(ct->d_buf, ct->d_bufsize);
2809 				ct->d_bufsize = data_size;
2810 				ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2811 			}
2812 			if (copyin_nowatch(data_ptr, ct->d_buf, data_size) != 0)
2813 				return (EFAULT);
2814 		} else {
2815 			struct as *as = ttoproc(caller)->p_as;
2816 			caddr_t	dest = ct->d_args.rbuf;
2817 			caddr_t	src = data_ptr;
2818 			size_t	len = data_size;
2819 
2820 			/* Copy data directly into client */
2821 			while (len != 0) {
2822 				uint_t	amount;
2823 				uint_t	max;
2824 				uint_t	off;
2825 				int	error;
2826 
2827 				off = (uintptr_t)dest & PAGEOFFSET;
2828 				if (off)
2829 					max = PAGESIZE - off;
2830 				else
2831 					max = PAGESIZE;
2832 				amount = len > max ? max : len;
2833 				error = door_copy(as, src, dest, amount);
2834 				if (error != 0)
2835 					return (error);
2836 				dest += amount;
2837 				src += amount;
2838 				len -= amount;
2839 			}
2840 		}
2841 	}
2842 
2843 	/*
2844 	 * Copyin the returned door ids and translate them into door_node_t
2845 	 */
2846 	if (desc_num != 0) {
2847 		door_desc_t *start;
2848 		door_desc_t *didpp;
2849 		struct file **fpp;
2850 		size_t	fpp_size;
2851 		uint_t	i;
2852 
2853 		/* First, check if we would overflow client */
2854 		if (!ufcanalloc(ttoproc(caller), desc_num))
2855 			return (EMFILE);
2856 
2857 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
2858 		if (copyin_nowatch(desc_ptr, didpp, dsize)) {
2859 			kmem_free(start, dsize);
2860 			return (EFAULT);
2861 		}
2862 		fpp_size = desc_num * sizeof (struct file *);
2863 		if (fpp_size > ct->d_fpp_size) {
2864 			/* make more space */
2865 			if (ct->d_fpp_size)
2866 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2867 			ct->d_fpp_size = fpp_size;
2868 			ct->d_fpp = kmem_alloc(fpp_size, KM_SLEEP);
2869 		}
2870 		fpp = ct->d_fpp;
2871 
2872 		for (i = 0; i < desc_num; i++) {
2873 			struct file *fp;
2874 			int fd = didpp->d_data.d_desc.d_descriptor;
2875 
2876 			/* Only understand file descriptor results */
2877 			if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2878 			    (fp = getf(fd)) == NULL) {
2879 				/* close translated references */
2880 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2881 				/* close untranslated references */
2882 				door_fd_rele(didpp, desc_num - i, 0);
2883 				kmem_free(start, dsize);
2884 				return (EINVAL);
2885 			}
2886 
2887 			mutex_enter(&fp->f_tlock);
2888 			fp->f_count++;
2889 			mutex_exit(&fp->f_tlock);
2890 
2891 			*fpp = fp;
2892 			releasef(fd);
2893 
2894 			if (didpp->d_attributes & DOOR_RELEASE) {
2895 				/* release passed reference */
2896 				(void) closeandsetf(fd, NULL);
2897 			}
2898 
2899 			fpp++; didpp++;
2900 		}
2901 		kmem_free(start, dsize);
2902 	}
2903 	return (0);
2904 }
2905 
2906 /*
2907  * Close all the descriptors.
2908  */
2909 static void
door_fd_close(door_desc_t * d,uint_t n)2910 door_fd_close(door_desc_t *d, uint_t n)
2911 {
2912 	uint_t	i;
2913 
2914 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2915 	for (i = 0; i < n; i++) {
2916 		if (d->d_attributes & DOOR_DESCRIPTOR) {
2917 			(void) closeandsetf(
2918 			    d->d_data.d_desc.d_descriptor, NULL);
2919 		} else if (d->d_attributes & DOOR_HANDLE) {
2920 			door_ki_rele(d->d_data.d_handle);
2921 		}
2922 		d++;
2923 	}
2924 }
2925 
2926 /*
2927  * Close descriptors that have the DOOR_RELEASE attribute set.
2928  */
2929 void
door_fd_rele(door_desc_t * d,uint_t n,int from_kernel)2930 door_fd_rele(door_desc_t *d, uint_t n, int from_kernel)
2931 {
2932 	uint_t	i;
2933 
2934 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2935 	for (i = 0; i < n; i++) {
2936 		if (d->d_attributes & DOOR_RELEASE) {
2937 			if (d->d_attributes & DOOR_DESCRIPTOR) {
2938 				(void) closeandsetf(
2939 				    d->d_data.d_desc.d_descriptor, NULL);
2940 			} else if (from_kernel &&
2941 			    (d->d_attributes & DOOR_HANDLE)) {
2942 				door_ki_rele(d->d_data.d_handle);
2943 			}
2944 		}
2945 		d++;
2946 	}
2947 }
2948 
2949 /*
2950  * Copy descriptors into the kernel so we can release any marked
2951  * DOOR_RELEASE.
2952  */
2953 int
door_release_fds(door_desc_t * desc_ptr,uint_t ndesc)2954 door_release_fds(door_desc_t *desc_ptr, uint_t ndesc)
2955 {
2956 	size_t dsize;
2957 	door_desc_t *didpp;
2958 	uint_t desc_num;
2959 
2960 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2961 	ASSERT(ndesc != 0);
2962 
2963 	desc_num = MIN(ndesc, door_max_desc);
2964 
2965 	dsize = desc_num * sizeof (door_desc_t);
2966 	didpp = kmem_alloc(dsize, KM_SLEEP);
2967 
2968 	while (ndesc > 0) {
2969 		uint_t count = MIN(ndesc, desc_num);
2970 
2971 		if (copyin_nowatch(desc_ptr, didpp,
2972 		    count * sizeof (door_desc_t))) {
2973 			kmem_free(didpp, dsize);
2974 			return (EFAULT);
2975 		}
2976 		door_fd_rele(didpp, count, 0);
2977 
2978 		ndesc -= count;
2979 		desc_ptr += count;
2980 	}
2981 	kmem_free(didpp, dsize);
2982 	return (0);
2983 }
2984 
2985 /*
2986  * Decrement ref count on all the files passed
2987  */
2988 static void
door_fp_close(struct file ** fp,uint_t n)2989 door_fp_close(struct file **fp, uint_t n)
2990 {
2991 	uint_t	i;
2992 
2993 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2994 
2995 	for (i = 0; i < n; i++)
2996 		(void) closef(fp[i]);
2997 }
2998 
2999 /*
3000  * Copy data from 'src' in current address space to 'dest' in 'as' for 'len'
3001  * bytes.
3002  *
3003  * Performs this using 1 mapin and 1 copy operation.
3004  *
3005  * We really should do more than 1 page at a time to improve
3006  * performance, but for now this is treated as an anomalous condition.
3007  */
3008 static int
door_copy(struct as * as,caddr_t src,caddr_t dest,uint_t len)3009 door_copy(struct as *as, caddr_t src, caddr_t dest, uint_t len)
3010 {
3011 	caddr_t	kaddr;
3012 	caddr_t	rdest;
3013 	uint_t	off;
3014 	page_t	**pplist;
3015 	page_t	*pp = NULL;
3016 	int	error = 0;
3017 
3018 	ASSERT(len <= PAGESIZE);
3019 	off = (uintptr_t)dest & PAGEOFFSET;	/* offset within the page */
3020 	rdest = (caddr_t)((uintptr_t)dest &
3021 	    (uintptr_t)PAGEMASK);	/* Page boundary */
3022 	ASSERT(off + len <= PAGESIZE);
3023 
3024 	/*
3025 	 * Lock down destination page.
3026 	 */
3027 	if (as_pagelock(as, &pplist, rdest, PAGESIZE, S_WRITE))
3028 		return (E2BIG);
3029 	/*
3030 	 * Check if we have a shadow page list from as_pagelock. If not,
3031 	 * we took the slow path and have to find our page struct the hard
3032 	 * way.
3033 	 */
3034 	if (pplist == NULL) {
3035 		pfn_t	pfnum;
3036 
3037 		/* MMU mapping is already locked down */
3038 		AS_LOCK_ENTER(as, RW_READER);
3039 		pfnum = hat_getpfnum(as->a_hat, rdest);
3040 		AS_LOCK_EXIT(as);
3041 
3042 		/*
3043 		 * TODO: The pfn step should not be necessary - need
3044 		 * a hat_getpp() function.
3045 		 */
3046 		if (pf_is_memory(pfnum)) {
3047 			pp = page_numtopp_nolock(pfnum);
3048 			ASSERT(pp == NULL || PAGE_LOCKED(pp));
3049 		} else
3050 			pp = NULL;
3051 		if (pp == NULL) {
3052 			as_pageunlock(as, pplist, rdest, PAGESIZE, S_WRITE);
3053 			return (E2BIG);
3054 		}
3055 	} else {
3056 		pp = *pplist;
3057 	}
3058 	/*
3059 	 * Map destination page into kernel address
3060 	 */
3061 	if (kpm_enable)
3062 		kaddr = (caddr_t)hat_kpm_mapin(pp, (struct kpme *)NULL);
3063 	else
3064 		kaddr = (caddr_t)ppmapin(pp, PROT_READ | PROT_WRITE,
3065 		    (caddr_t)-1);
3066 
3067 	/*
3068 	 * Copy from src to dest
3069 	 */
3070 	if (copyin_nowatch(src, kaddr + off, len) != 0)
3071 		error = EFAULT;
3072 	/*
3073 	 * Unmap destination page from kernel
3074 	 */
3075 	if (kpm_enable)
3076 		hat_kpm_mapout(pp, (struct kpme *)NULL, kaddr);
3077 	else
3078 		ppmapout(kaddr);
3079 	/*
3080 	 * Unlock destination page
3081 	 */
3082 	as_pageunlock(as, pplist, rdest, PAGESIZE, S_WRITE);
3083 	return (error);
3084 }
3085 
3086 /*
3087  * General kernel upcall using doors
3088  *	Returns 0 on success, errno for failures.
3089  *	Caller must have a hold on the door based vnode, and on any
3090  *	references passed in desc_ptr.  The references are released
3091  *	in the event of an error, and passed without duplication
3092  *	otherwise.  Note that param->rbuf must be 64-bit aligned in
3093  *	a 64-bit kernel, since it may be used to store door descriptors
3094  *	if they are returned by the server.  The caller is responsible
3095  *	for holding a reference to the cred passed in.
3096  */
3097 int
door_upcall(vnode_t * vp,door_arg_t * param,struct cred * cred,size_t max_data,uint_t max_descs)3098 door_upcall(vnode_t *vp, door_arg_t *param, struct cred *cred,
3099     size_t max_data, uint_t max_descs)
3100 {
3101 	/* Locals */
3102 	door_upcall_t	*dup;
3103 	door_node_t	*dp;
3104 	kthread_t	*server_thread;
3105 	int		error = 0;
3106 	klwp_t		*lwp;
3107 	door_client_t	*ct;		/* curthread door_data */
3108 	door_server_t	*st;		/* server thread door_data */
3109 	int		gotresults = 0;
3110 	int		cancel_pending;
3111 
3112 	if (vp->v_type != VDOOR) {
3113 		if (param->desc_num)
3114 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3115 		return (EINVAL);
3116 	}
3117 
3118 	lwp = ttolwp(curthread);
3119 	ct = door_my_client(1);
3120 	dp = VTOD(vp);	/* Convert to a door_node_t */
3121 
3122 	dup = kmem_zalloc(sizeof (*dup), KM_SLEEP);
3123 	dup->du_cred = (cred != NULL) ? cred : curthread->t_cred;
3124 	dup->du_max_data = max_data;
3125 	dup->du_max_descs = max_descs;
3126 
3127 	/*
3128 	 * This should be done in shuttle_resume(), just before going to
3129 	 * sleep, but we want to avoid overhead while holding door_knob.
3130 	 * prstop() is just a no-op if we don't really go to sleep.
3131 	 * We test not-kernel-address-space for the sake of clustering code.
3132 	 */
3133 	if (lwp && lwp->lwp_nostop == 0 && curproc->p_as != &kas)
3134 		prstop(PR_REQUESTED, 0);
3135 
3136 	mutex_enter(&door_knob);
3137 	if (DOOR_INVALID(dp)) {
3138 		mutex_exit(&door_knob);
3139 		if (param->desc_num)
3140 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3141 		error = EBADF;
3142 		goto out;
3143 	}
3144 
3145 	if (dp->door_target == &p0) {
3146 		/* Can't do an upcall to a kernel server */
3147 		mutex_exit(&door_knob);
3148 		if (param->desc_num)
3149 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3150 		error = EINVAL;
3151 		goto out;
3152 	}
3153 
3154 	error = door_check_limits(dp, param, 1);
3155 	if (error != 0) {
3156 		mutex_exit(&door_knob);
3157 		if (param->desc_num)
3158 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3159 		goto out;
3160 	}
3161 
3162 	/*
3163 	 * Get a server thread from the target domain
3164 	 */
3165 	if ((server_thread = door_get_server(dp)) == NULL) {
3166 		if (DOOR_INVALID(dp))
3167 			error = EBADF;
3168 		else
3169 			error = EAGAIN;
3170 		mutex_exit(&door_knob);
3171 		if (param->desc_num)
3172 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3173 		goto out;
3174 	}
3175 
3176 	st = DOOR_SERVER(server_thread->t_door);
3177 	ct->d_buf = param->data_ptr;
3178 	ct->d_bufsize = param->data_size;
3179 	ct->d_args = *param;	/* structure assignment */
3180 
3181 	if (ct->d_args.desc_num) {
3182 		/*
3183 		 * Move data from client to server
3184 		 */
3185 		DOOR_T_HOLD(st);
3186 		mutex_exit(&door_knob);
3187 		error = door_translate_out();
3188 		mutex_enter(&door_knob);
3189 		DOOR_T_RELEASE(st);
3190 		if (error) {
3191 			/*
3192 			 * We're not going to resume this thread after all
3193 			 */
3194 			door_release_server(dp, server_thread);
3195 			shuttle_sleep(server_thread);
3196 			mutex_exit(&door_knob);
3197 			goto out;
3198 		}
3199 	}
3200 
3201 	ct->d_upcall = dup;
3202 	if (param->rsize == 0)
3203 		ct->d_noresults = 1;
3204 	else
3205 		ct->d_noresults = 0;
3206 
3207 	dp->door_active++;
3208 
3209 	ct->d_error = DOOR_WAIT;
3210 	st->d_caller = curthread;
3211 	st->d_active = dp;
3212 
3213 	shuttle_resume(server_thread, &door_knob);
3214 
3215 	mutex_enter(&door_knob);
3216 shuttle_return:
3217 	if ((error = ct->d_error) < 0) {	/* DOOR_WAIT or DOOR_EXIT */
3218 		/*
3219 		 * Premature wakeup. Find out why (stop, forkall, sig, exit ...)
3220 		 */
3221 		mutex_exit(&door_knob);		/* May block in ISSIG */
3222 		cancel_pending = 0;
3223 		if (lwp && (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort ||
3224 		    MUSTRETURN(curproc, curthread) ||
3225 		    (cancel_pending = schedctl_cancel_pending()) != 0)) {
3226 			/* Signal, forkall, ... */
3227 			if (cancel_pending)
3228 				schedctl_cancel_eintr();
3229 			lwp->lwp_sysabort = 0;
3230 			mutex_enter(&door_knob);
3231 			error = EINTR;
3232 			/*
3233 			 * If the server has finished processing our call,
3234 			 * or exited (calling door_slam()), then d_error
3235 			 * will have changed.  If the server hasn't finished
3236 			 * yet, d_error will still be DOOR_WAIT, and we
3237 			 * let it know we are not interested in any
3238 			 * results by sending a SIGCANCEL, unless the door
3239 			 * is marked with DOOR_NO_CANCEL.
3240 			 */
3241 			if (ct->d_error == DOOR_WAIT &&
3242 			    st->d_caller == curthread) {
3243 				proc_t	*p = ttoproc(server_thread);
3244 
3245 				st->d_active = NULL;
3246 				st->d_caller = NULL;
3247 				if (!(dp->door_flags & DOOR_NO_CANCEL)) {
3248 					DOOR_T_HOLD(st);
3249 					mutex_exit(&door_knob);
3250 
3251 					mutex_enter(&p->p_lock);
3252 					sigtoproc(p, server_thread, SIGCANCEL);
3253 					mutex_exit(&p->p_lock);
3254 
3255 					mutex_enter(&door_knob);
3256 					DOOR_T_RELEASE(st);
3257 				}
3258 			}
3259 		} else {
3260 			/*
3261 			 * Return from stop(), server exit...
3262 			 *
3263 			 * Note that the server could have done a
3264 			 * door_return while the client was in stop state
3265 			 * (ISSIG), in which case the error condition
3266 			 * is updated by the server.
3267 			 */
3268 			mutex_enter(&door_knob);
3269 			if (ct->d_error == DOOR_WAIT) {
3270 				/* Still waiting for a reply */
3271 				shuttle_swtch(&door_knob);
3272 				mutex_enter(&door_knob);
3273 				if (lwp)
3274 					lwp->lwp_asleep = 0;
3275 				goto	shuttle_return;
3276 			} else if (ct->d_error == DOOR_EXIT) {
3277 				/* Server exit */
3278 				error = EINTR;
3279 			} else {
3280 				/* Server did a door_return during ISSIG */
3281 				error = ct->d_error;
3282 			}
3283 		}
3284 		/*
3285 		 * Can't exit if the server is currently copying
3286 		 * results for me
3287 		 */
3288 		while (DOOR_T_HELD(ct))
3289 			cv_wait(&ct->d_cv, &door_knob);
3290 
3291 		/*
3292 		 * Find out if results were successfully copied.
3293 		 */
3294 		if (ct->d_error == 0)
3295 			gotresults = 1;
3296 	}
3297 	if (lwp) {
3298 		lwp->lwp_asleep = 0;		/* /proc */
3299 		lwp->lwp_sysabort = 0;		/* /proc */
3300 	}
3301 	if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
3302 		door_deliver_unref(dp);
3303 	mutex_exit(&door_knob);
3304 
3305 	/*
3306 	 * Translate returned doors (if any)
3307 	 */
3308 
3309 	if (ct->d_noresults)
3310 		goto out;
3311 
3312 	if (error) {
3313 		/*
3314 		 * If server returned results successfully, then we've
3315 		 * been interrupted and may need to clean up.
3316 		 */
3317 		if (gotresults) {
3318 			ASSERT(error == EINTR);
3319 			door_fp_close(ct->d_fpp, ct->d_args.desc_num);
3320 		}
3321 		goto out;
3322 	}
3323 
3324 	if (ct->d_args.desc_num) {
3325 		struct file	**fpp;
3326 		door_desc_t	*didpp;
3327 		vnode_t		*vp;
3328 		uint_t		n = ct->d_args.desc_num;
3329 
3330 		didpp = ct->d_args.desc_ptr = (door_desc_t *)(ct->d_args.rbuf +
3331 		    roundup(ct->d_args.data_size, sizeof (door_desc_t)));
3332 		fpp = ct->d_fpp;
3333 
3334 		while (n--) {
3335 			struct file *fp;
3336 
3337 			fp = *fpp;
3338 			if (VOP_REALVP(fp->f_vnode, &vp, NULL))
3339 				vp = fp->f_vnode;
3340 
3341 			didpp->d_attributes = DOOR_HANDLE |
3342 			    (VTOD(vp)->door_flags & DOOR_ATTR_MASK);
3343 			didpp->d_data.d_handle = FTODH(fp);
3344 
3345 			fpp++; didpp++;
3346 		}
3347 	}
3348 
3349 	/* on return data is in rbuf */
3350 	*param = ct->d_args;		/* structure assignment */
3351 
3352 out:
3353 	kmem_free(dup, sizeof (*dup));
3354 
3355 	if (ct->d_fpp) {
3356 		kmem_free(ct->d_fpp, ct->d_fpp_size);
3357 		ct->d_fpp = NULL;
3358 		ct->d_fpp_size = 0;
3359 	}
3360 
3361 	ct->d_upcall = NULL;
3362 	ct->d_noresults = 0;
3363 	ct->d_buf = NULL;
3364 	ct->d_bufsize = 0;
3365 	return (error);
3366 }
3367 
3368 /*
3369  * Add a door to the per-process list of active doors for which the
3370  * process is a server.
3371  */
3372 static void
door_list_insert(door_node_t * dp)3373 door_list_insert(door_node_t *dp)
3374 {
3375 	proc_t *p = dp->door_target;
3376 
3377 	ASSERT(MUTEX_HELD(&door_knob));
3378 	dp->door_list = p->p_door_list;
3379 	p->p_door_list = dp;
3380 }
3381 
3382 /*
3383  * Remove a door from the per-process list of active doors.
3384  */
3385 void
door_list_delete(door_node_t * dp)3386 door_list_delete(door_node_t *dp)
3387 {
3388 	door_node_t **pp;
3389 
3390 	ASSERT(MUTEX_HELD(&door_knob));
3391 	/*
3392 	 * Find the door in the list.  If the door belongs to another process,
3393 	 * it's OK to use p_door_list since that process can't exit until all
3394 	 * doors have been taken off the list (see door_exit).
3395 	 */
3396 	pp = &(dp->door_target->p_door_list);
3397 	while (*pp != dp)
3398 		pp = &((*pp)->door_list);
3399 
3400 	/* found it, take it off the list */
3401 	*pp = dp->door_list;
3402 }
3403 
3404 
3405 /*
3406  * External kernel interfaces for doors.  These functions are available
3407  * outside the doorfs module for use in creating and using doors from
3408  * within the kernel.
3409  */
3410 
3411 /*
3412  * door_ki_upcall invokes a user-level door server from the kernel, with
3413  * the credentials associated with curthread.
3414  */
3415 int
door_ki_upcall(door_handle_t dh,door_arg_t * param)3416 door_ki_upcall(door_handle_t dh, door_arg_t *param)
3417 {
3418 	return (door_ki_upcall_limited(dh, param, NULL, SIZE_MAX, UINT_MAX));
3419 }
3420 
3421 /*
3422  * door_ki_upcall_limited invokes a user-level door server from the
3423  * kernel with the given credentials and reply limits.  If the "cred"
3424  * argument is NULL, uses the credentials associated with current
3425  * thread.  max_data limits the maximum length of the returned data (the
3426  * client will get E2BIG if they go over), and max_desc limits the
3427  * number of returned descriptors (the client will get EMFILE if they
3428  * go over).
3429  */
3430 int
door_ki_upcall_limited(door_handle_t dh,door_arg_t * param,struct cred * cred,size_t max_data,uint_t max_desc)3431 door_ki_upcall_limited(door_handle_t dh, door_arg_t *param, struct cred *cred,
3432     size_t max_data, uint_t max_desc)
3433 {
3434 	file_t *fp = DHTOF(dh);
3435 	vnode_t *realvp;
3436 
3437 	if (VOP_REALVP(fp->f_vnode, &realvp, NULL))
3438 		realvp = fp->f_vnode;
3439 	return (door_upcall(realvp, param, cred, max_data, max_desc));
3440 }
3441 
3442 /*
3443  * Function call to create a "kernel" door server.  A kernel door
3444  * server provides a way for a user-level process to invoke a function
3445  * in the kernel through a door_call.  From the caller's point of
3446  * view, a kernel door server looks the same as a user-level one
3447  * (except the server pid is 0).  Unlike normal door calls, the
3448  * kernel door function is invoked via a normal function call in the
3449  * same thread and context as the caller.
3450  */
3451 int
door_ki_create(void (* pc_cookie)(),void * data_cookie,uint_t attributes,door_handle_t * dhp)3452 door_ki_create(void (*pc_cookie)(), void *data_cookie, uint_t attributes,
3453     door_handle_t *dhp)
3454 {
3455 	int err;
3456 	file_t *fp;
3457 
3458 	/* no DOOR_PRIVATE */
3459 	if ((attributes & ~DOOR_KI_CREATE_MASK) ||
3460 	    (attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) ==
3461 	    (DOOR_UNREF | DOOR_UNREF_MULTI))
3462 		return (EINVAL);
3463 
3464 	err = door_create_common(pc_cookie, data_cookie, attributes,
3465 	    1, NULL, &fp);
3466 	if (err == 0 && (attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) &&
3467 	    p0.p_unref_thread == 0) {
3468 		/* need to create unref thread for process 0 */
3469 		(void) thread_create(NULL, 0, door_unref_kernel, NULL, 0, &p0,
3470 		    TS_RUN, minclsyspri);
3471 	}
3472 	if (err == 0) {
3473 		*dhp = FTODH(fp);
3474 	}
3475 	return (err);
3476 }
3477 
3478 void
door_ki_hold(door_handle_t dh)3479 door_ki_hold(door_handle_t dh)
3480 {
3481 	file_t *fp = DHTOF(dh);
3482 
3483 	mutex_enter(&fp->f_tlock);
3484 	fp->f_count++;
3485 	mutex_exit(&fp->f_tlock);
3486 }
3487 
3488 void
door_ki_rele(door_handle_t dh)3489 door_ki_rele(door_handle_t dh)
3490 {
3491 	file_t *fp = DHTOF(dh);
3492 
3493 	(void) closef(fp);
3494 }
3495 
3496 int
door_ki_open(char * pathname,door_handle_t * dhp)3497 door_ki_open(char *pathname, door_handle_t *dhp)
3498 {
3499 	file_t *fp;
3500 	vnode_t *vp;
3501 	int err;
3502 
3503 	if ((err = lookupname(pathname, UIO_SYSSPACE, FOLLOW, NULL, &vp)) != 0)
3504 		return (err);
3505 	if (err = VOP_OPEN(&vp, FREAD, kcred, NULL)) {
3506 		VN_RELE(vp);
3507 		return (err);
3508 	}
3509 	if (vp->v_type != VDOOR) {
3510 		VN_RELE(vp);
3511 		return (EINVAL);
3512 	}
3513 	if ((err = falloc(vp, FREAD | FWRITE, &fp, NULL)) != 0) {
3514 		VN_RELE(vp);
3515 		return (err);
3516 	}
3517 	/* falloc returns with f_tlock held on success */
3518 	mutex_exit(&fp->f_tlock);
3519 	*dhp = FTODH(fp);
3520 	return (0);
3521 }
3522 
3523 int
door_ki_info(door_handle_t dh,struct door_info * dip)3524 door_ki_info(door_handle_t dh, struct door_info *dip)
3525 {
3526 	file_t *fp = DHTOF(dh);
3527 	vnode_t *vp;
3528 
3529 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
3530 		vp = fp->f_vnode;
3531 	if (vp->v_type != VDOOR)
3532 		return (EINVAL);
3533 	door_info_common(VTOD(vp), dip, fp);
3534 	return (0);
3535 }
3536 
3537 door_handle_t
door_ki_lookup(int did)3538 door_ki_lookup(int did)
3539 {
3540 	file_t *fp;
3541 	door_handle_t dh;
3542 
3543 	/* is the descriptor really a door? */
3544 	if (door_lookup(did, &fp) == NULL)
3545 		return (NULL);
3546 	/* got the door, put a hold on it and release the fd */
3547 	dh = FTODH(fp);
3548 	door_ki_hold(dh);
3549 	releasef(did);
3550 	return (dh);
3551 }
3552 
3553 int
door_ki_setparam(door_handle_t dh,int type,size_t val)3554 door_ki_setparam(door_handle_t dh, int type, size_t val)
3555 {
3556 	file_t *fp = DHTOF(dh);
3557 	vnode_t *vp;
3558 
3559 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
3560 		vp = fp->f_vnode;
3561 	if (vp->v_type != VDOOR)
3562 		return (EINVAL);
3563 	return (door_setparam_common(VTOD(vp), 1, type, val));
3564 }
3565 
3566 int
door_ki_getparam(door_handle_t dh,int type,size_t * out)3567 door_ki_getparam(door_handle_t dh, int type, size_t *out)
3568 {
3569 	file_t *fp = DHTOF(dh);
3570 	vnode_t *vp;
3571 
3572 	if (VOP_REALVP(fp->f_vnode, &vp, NULL))
3573 		vp = fp->f_vnode;
3574 	if (vp->v_type != VDOOR)
3575 		return (EINVAL);
3576 	return (door_getparam_common(VTOD(vp), type, out));
3577 }
3578