1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include "lint.h"
28 #include "thr_uberdata.h"
29 #include "libc.h"
30
31 #include <alloca.h>
32 #include <unistd.h>
33 #include <thread.h>
34 #include <pthread.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <door.h>
38 #include <signal.h>
39 #include <ucred.h>
40 #include <strings.h>
41 #include <ucontext.h>
42 #include <sys/ucred.h>
43 #include <atomic.h>
44
45 static door_server_func_t door_create_server;
46
47 /*
48 * Global state -- the non-statics are accessed from the __door_return()
49 * syscall wrapper.
50 */
51 static mutex_t door_state_lock = DEFAULTMUTEX;
52 door_server_func_t *door_server_func = door_create_server;
53 pid_t door_create_pid = 0;
54 static pid_t door_create_first_pid = 0;
55 static pid_t door_create_unref_pid = 0;
56
57 /*
58 * The raw system call interfaces
59 */
60 extern int __door_create(void (*)(void *, char *, size_t, door_desc_t *,
61 uint_t), void *, uint_t);
62 extern int __door_return(caddr_t, size_t, door_return_desc_t *, caddr_t,
63 size_t);
64 extern int __door_ucred(ucred_t *);
65 extern int __door_unref(void);
66 extern int __door_unbind(void);
67
68 /*
69 * Key for per-door data for doors created with door_xcreate.
70 */
71 static pthread_key_t privdoor_key = PTHREAD_ONCE_KEY_NP;
72
73 /*
74 * Each door_xcreate'd door has a struct privdoor_data allocated for it,
75 * and each of the initial pool of service threads for the door
76 * has TSD for the privdoor_key set to point to this structure.
77 * When a thread in door_return decides it is time to perform a
78 * thread depletion callback we can retrieve this door information
79 * via a TSD lookup on the privdoor key.
80 */
81 struct privdoor_data {
82 int pd_dfd;
83 door_id_t pd_uniqid;
84 volatile uint32_t pd_refcnt;
85 door_xcreate_server_func_t *pd_crf;
86 void *pd_crcookie;
87 door_xcreate_thrsetup_func_t *pd_setupf;
88 };
89
90 static int door_xcreate_n(door_info_t *, struct privdoor_data *, int);
91
92 /*
93 * door_create_cmn holds the privdoor data before kicking off server
94 * thread creation, all of which must succeed; if they don't then
95 * they return leaving the refcnt unchanged overall, and door_create_cmn
96 * releases its hold after revoking the door and we're done. Otherwise
97 * all n threads created add one each to the refcnt, and door_create_cmn
98 * drops its hold. If and when a server thread exits the key destructor
99 * function will be called, and we use that to decrement the reference
100 * count. We also decrement the reference count on door_unbind().
101 * If ever we get the reference count to 0 then we will free that data.
102 */
103 static void
privdoor_data_hold(struct privdoor_data * pdd)104 privdoor_data_hold(struct privdoor_data *pdd)
105 {
106 atomic_inc_32(&pdd->pd_refcnt);
107 }
108
109 static void
privdoor_data_rele(struct privdoor_data * pdd)110 privdoor_data_rele(struct privdoor_data *pdd)
111 {
112 if (atomic_dec_32_nv(&pdd->pd_refcnt) == 0)
113 free(pdd);
114 }
115
116 void
privdoor_destructor(void * data)117 privdoor_destructor(void *data)
118 {
119 privdoor_data_rele((struct privdoor_data *)data);
120 }
121
122 /*
123 * We park the ourselves in the kernel to serve as the "caller" for
124 * unreferenced upcalls for this process. If the call returns with
125 * EINTR (e.g., someone did a forkall), we repeat as long as we're still
126 * in the parent. If the child creates an unref door it will create
127 * a new thread.
128 */
129 static void *
door_unref_func(void * arg)130 door_unref_func(void *arg)
131 {
132 pid_t mypid = (pid_t)(uintptr_t)arg;
133
134 sigset_t fillset;
135
136 /* mask signals before diving into the kernel */
137 (void) sigfillset(&fillset);
138 (void) thr_sigsetmask(SIG_SETMASK, &fillset, NULL);
139
140 while (getpid() == mypid && __door_unref() && errno == EINTR)
141 continue;
142
143 return (NULL);
144 }
145
146 static int
door_create_cmn(door_server_procedure_t * f,void * cookie,uint_t flags,door_xcreate_server_func_t * crf,door_xcreate_thrsetup_func_t * setupf,void * crcookie,int nthread)147 door_create_cmn(door_server_procedure_t *f, void *cookie, uint_t flags,
148 door_xcreate_server_func_t *crf, door_xcreate_thrsetup_func_t *setupf,
149 void *crcookie, int nthread)
150 {
151 int d;
152
153 int is_private = (flags & DOOR_PRIVATE);
154 int is_unref = (flags & (DOOR_UNREF | DOOR_UNREF_MULTI));
155 int do_create_first = 0;
156 int do_create_unref = 0;
157
158 ulwp_t *self = curthread;
159
160 pid_t mypid;
161
162 if (self->ul_vfork) {
163 errno = ENOTSUP;
164 return (-1);
165 }
166
167 if (crf)
168 flags |= DOOR_PRIVCREATE;
169
170 /*
171 * Doors are associated with the processes which created them. In
172 * the face of forkall(), this gets quite complicated. To simplify
173 * it somewhat, we include the call to __door_create() in a critical
174 * section, and figure out what additional actions to take while
175 * still in the critical section.
176 */
177 enter_critical(self);
178 if ((d = __door_create(f, cookie, flags)) < 0) {
179 exit_critical(self);
180 return (-1); /* errno is set */
181 }
182 mypid = getpid();
183 if (mypid != door_create_pid ||
184 (!is_private && mypid != door_create_first_pid) ||
185 (is_unref && mypid != door_create_unref_pid)) {
186
187 lmutex_lock(&door_state_lock);
188 door_create_pid = mypid;
189
190 if (!is_private && mypid != door_create_first_pid) {
191 do_create_first = 1;
192 door_create_first_pid = mypid;
193 }
194 if (is_unref && mypid != door_create_unref_pid) {
195 do_create_unref = 1;
196 door_create_unref_pid = mypid;
197 }
198 lmutex_unlock(&door_state_lock);
199 }
200 exit_critical(self);
201
202 if (do_create_unref) {
203 /*
204 * Create an unref thread the first time we create an
205 * unref door for this process. Create it as a daemon
206 * thread, so that it doesn't interfere with normal exit
207 * processing.
208 */
209 (void) thr_create(NULL, 0, door_unref_func,
210 (void *)(uintptr_t)mypid, THR_DAEMON, NULL);
211 }
212
213 if (is_private) {
214 door_info_t di;
215
216 /*
217 * Create the first thread(s) for this private door.
218 */
219 if (__door_info(d, &di) < 0)
220 return (-1); /* errno is set */
221
222 /*
223 * This key must be available for lookup for all private
224 * door threads, whether associated with a door created via
225 * door_create or door_xcreate.
226 */
227 (void) pthread_key_create_once_np(&privdoor_key,
228 privdoor_destructor);
229
230 if (crf == NULL) {
231 (*door_server_func)(&di);
232 } else {
233 struct privdoor_data *pdd = malloc(sizeof (*pdd));
234
235 if (pdd == NULL) {
236 (void) door_revoke(d);
237 errno = ENOMEM;
238 return (-1);
239 }
240
241 pdd->pd_dfd = d;
242 pdd->pd_uniqid = di.di_uniquifier;
243 pdd->pd_refcnt = 1; /* prevent free during xcreate_n */
244 pdd->pd_crf = crf;
245 pdd->pd_crcookie = crcookie;
246 pdd->pd_setupf = setupf;
247
248 if (!door_xcreate_n(&di, pdd, nthread)) {
249 int errnocp = errno;
250
251 (void) door_revoke(d);
252 privdoor_data_rele(pdd);
253 errno = errnocp;
254 return (-1);
255 } else {
256 privdoor_data_rele(pdd);
257 }
258 }
259 } else if (do_create_first) {
260 /* First non-private door created in the process */
261 (*door_server_func)(NULL);
262 }
263
264 return (d);
265 }
266
267 int
door_create(door_server_procedure_t * f,void * cookie,uint_t flags)268 door_create(door_server_procedure_t *f, void *cookie, uint_t flags)
269 {
270 if (flags & (DOOR_NO_DEPLETION_CB | DOOR_PRIVCREATE)) {
271 errno = EINVAL;
272 return (-1);
273 }
274
275 return (door_create_cmn(f, cookie, flags, NULL, NULL, NULL, 1));
276 }
277
278 int
door_xcreate(door_server_procedure_t * f,void * cookie,uint_t flags,door_xcreate_server_func_t * crf,door_xcreate_thrsetup_func_t * setupf,void * crcookie,int nthread)279 door_xcreate(door_server_procedure_t *f, void *cookie, uint_t flags,
280 door_xcreate_server_func_t *crf, door_xcreate_thrsetup_func_t *setupf,
281 void *crcookie, int nthread)
282 {
283 if (flags & DOOR_PRIVCREATE || nthread < 1 || crf == NULL) {
284 errno = EINVAL;
285 return (-1);
286 }
287
288 return (door_create_cmn(f, cookie, flags | DOOR_PRIVATE,
289 crf, setupf, crcookie, nthread));
290 }
291
292 int
door_ucred(ucred_t ** uc)293 door_ucred(ucred_t **uc)
294 {
295 ucred_t *ucp = *uc;
296
297 if (ucp == NULL) {
298 ucp = _ucred_alloc();
299 if (ucp == NULL)
300 return (-1);
301 }
302
303 if (__door_ucred(ucp) != 0) {
304 if (*uc == NULL)
305 ucred_free(ucp);
306 return (-1);
307 }
308
309 *uc = ucp;
310
311 return (0);
312 }
313
314 int
door_cred(door_cred_t * dc)315 door_cred(door_cred_t *dc)
316 {
317 /*
318 * Ucred size is small and alloca is fast
319 * and cannot fail.
320 */
321 ucred_t *ucp = alloca(ucred_size());
322 int ret;
323
324 if ((ret = __door_ucred(ucp)) == 0) {
325 dc->dc_euid = ucred_geteuid(ucp);
326 dc->dc_ruid = ucred_getruid(ucp);
327 dc->dc_egid = ucred_getegid(ucp);
328 dc->dc_rgid = ucred_getrgid(ucp);
329 dc->dc_pid = ucred_getpid(ucp);
330 }
331 return (ret);
332 }
333
334 int
door_unbind(void)335 door_unbind(void)
336 {
337 struct privdoor_data *pdd;
338 int rv = __door_unbind();
339
340 /*
341 * If we were indeed bound to the door then check to see whether
342 * we are part of a door_xcreate'd door by checking for our TSD.
343 * If so, then clear the TSD for this key to avoid destructor
344 * callback on future thread exit, and release the private door data.
345 */
346 if (rv == 0 && (pdd = pthread_getspecific(privdoor_key)) != NULL) {
347 (void) pthread_setspecific(privdoor_key, NULL);
348 privdoor_data_rele(pdd);
349 }
350
351 return (rv);
352 }
353
354 int
door_return(char * data_ptr,size_t data_size,door_desc_t * desc_ptr,uint_t num_desc)355 door_return(char *data_ptr, size_t data_size,
356 door_desc_t *desc_ptr, uint_t num_desc)
357 {
358 caddr_t sp;
359 size_t ssize;
360 size_t reserve;
361 ulwp_t *self = curthread;
362
363 {
364 stack_t s;
365 if (thr_stksegment(&s) != 0) {
366 errno = EINVAL;
367 return (-1);
368 }
369 sp = s.ss_sp;
370 ssize = s.ss_size;
371 }
372
373 if (!self->ul_door_noreserve) {
374 /*
375 * When we return from the kernel, we must have enough stack
376 * available to handle the request. Since the creator of
377 * the thread has control over its stack size, and larger
378 * stacks generally indicate bigger request queues, we
379 * use the heuristic of reserving 1/32nd of the stack size
380 * (up to the default stack size), with a minimum of 1/8th
381 * of MINSTACK. Currently, this translates to:
382 *
383 * _ILP32 _LP64
384 * min resv 512 bytes 1024 bytes
385 * max resv 32k bytes 64k bytes
386 *
387 * This reservation can be disabled by setting
388 * _THREAD_DOOR_NORESERVE=1
389 * in the environment, but shouldn't be.
390 */
391
392 #define STACK_FRACTION 32
393 #define MINSTACK_FRACTION 8
394
395 if (ssize < (MINSTACK * (STACK_FRACTION/MINSTACK_FRACTION)))
396 reserve = MINSTACK / MINSTACK_FRACTION;
397 else if (ssize < DEFAULTSTACK)
398 reserve = ssize / STACK_FRACTION;
399 else
400 reserve = DEFAULTSTACK / STACK_FRACTION;
401
402 #undef STACK_FRACTION
403 #undef MINSTACK_FRACTION
404
405 if (ssize > reserve)
406 ssize -= reserve;
407 else
408 ssize = 0;
409 }
410
411 /*
412 * Historically, the __door_return() syscall wrapper subtracted
413 * some "slop" from the stack pointer before trapping into the
414 * kernel. We now do this here, so that ssize can be adjusted
415 * correctly. Eventually, this should be removed, since it is
416 * unnecessary. (note that TNF on x86 currently relies upon this
417 * idiocy)
418 */
419 #if defined(__sparc)
420 reserve = SA(MINFRAME);
421 #elif defined(__x86)
422 reserve = SA(512);
423 #else
424 #error need to define stack base reserve
425 #endif
426
427 #ifdef _STACK_GROWS_DOWNWARD
428 sp -= reserve;
429 #else
430 #error stack does not grow downwards, routine needs update
431 #endif
432
433 if (ssize > reserve)
434 ssize -= reserve;
435 else
436 ssize = 0;
437
438 /*
439 * Normally, the above will leave plenty of space in sp for a
440 * request. Just in case some bozo overrides thr_stksegment() to
441 * return an uncommonly small stack size, we turn off stack size
442 * checking if there is less than 1k remaining.
443 */
444 #define MIN_DOOR_STACK 1024
445 if (ssize < MIN_DOOR_STACK)
446 ssize = 0;
447
448 #undef MIN_DOOR_STACK
449
450 /*
451 * We have to wrap the desc_* arguments for the syscall. If there are
452 * no descriptors being returned, we can skip the wrapping.
453 */
454 if (num_desc != 0) {
455 door_return_desc_t d;
456
457 d.desc_ptr = desc_ptr;
458 d.desc_num = num_desc;
459 return (__door_return(data_ptr, data_size, &d, sp, ssize));
460 }
461 return (__door_return(data_ptr, data_size, NULL, sp, ssize));
462 }
463
464 /*
465 * To start and synchronize a number of door service threads at once
466 * we use a struct door_xsync_shared shared by all threads, and
467 * a struct door_xsync for each thread. While each thread
468 * has its own startup state, all such state are protected by the same
469 * shared lock. This could cause a little contention but it is a one-off
470 * cost at door creation.
471 */
472 enum door_xsync_state {
473 DOOR_XSYNC_CREATEWAIT = 0x1c8c8c80, /* awaits creation handshake */
474 DOOR_XSYNC_ABORT, /* aborting door_xcreate */
475 DOOR_XSYNC_ABORTED, /* thread heeded abort request */
476 DOOR_XSYNC_MAXCONCUR, /* create func decided no more */
477 DOOR_XSYNC_CREATEFAIL, /* thr_create/pthread_create failure */
478 DOOR_XSYNC_SETSPEC_FAIL, /* setspecific failed */
479 DOOR_XSYNC_BINDFAIL, /* door_bind failed */
480 DOOR_XSYNC_BOUND, /* door_bind succeeded */
481 DOOR_XSYNC_ENTER_SERVICE /* Go on to door_return */
482 };
483
484 /* These stats are incremented non-atomically - indicative only */
485 uint64_t door_xcreate_n_stats[DOOR_XSYNC_ENTER_SERVICE -
486 DOOR_XSYNC_CREATEWAIT + 1];
487
488 struct door_xsync_shared {
489 pthread_mutex_t lock;
490 pthread_cond_t cv_m2s;
491 pthread_cond_t cv_s2m;
492 struct privdoor_data *pdd;
493 volatile uint32_t waiting;
494 };
495
496 struct door_xsync {
497 volatile enum door_xsync_state state;
498 struct door_xsync_shared *sharedp;
499 };
500
501 /*
502 * Thread start function that xcreated private doors must use in
503 * thr_create or pthread_create. They must also use the argument we
504 * provide. We:
505 *
506 * o call a thread setup function if supplied, or apply sensible defaults
507 * o bind the newly-created thread to the door it will service
508 * o synchronize with door_xcreate to indicate that we have successfully
509 * bound to the door; door_xcreate will not return until all
510 * requested threads have at least bound
511 * o enter service with door_return quoting magic sentinel args
512 */
513 void *
door_xcreate_startf(void * arg)514 door_xcreate_startf(void *arg)
515 {
516 struct door_xsync *xsp = (struct door_xsync *)arg;
517 struct door_xsync_shared *xssp = xsp->sharedp;
518 struct privdoor_data *pdd = xssp->pdd;
519 enum door_xsync_state next_state;
520
521 privdoor_data_hold(pdd);
522 if (pthread_setspecific(privdoor_key, (const void *)pdd) != 0) {
523 next_state = DOOR_XSYNC_SETSPEC_FAIL;
524 privdoor_data_rele(pdd);
525 goto handshake;
526 }
527
528 if (pdd->pd_setupf != NULL) {
529 (pdd->pd_setupf)(pdd->pd_crcookie);
530 } else {
531 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
532 (void) pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
533 }
534
535 if (door_bind(pdd->pd_dfd) == 0)
536 next_state = DOOR_XSYNC_BOUND;
537 else
538 next_state = DOOR_XSYNC_BINDFAIL;
539
540 handshake:
541 (void) pthread_mutex_lock(&xssp->lock);
542
543 ASSERT(xsp->state == DOOR_XSYNC_CREATEWAIT ||
544 xsp->state == DOOR_XSYNC_ABORT);
545
546 if (xsp->state == DOOR_XSYNC_ABORT)
547 next_state = DOOR_XSYNC_ABORTED;
548
549 xsp->state = next_state;
550
551 if (--xssp->waiting == 0)
552 (void) pthread_cond_signal(&xssp->cv_s2m);
553
554 if (next_state != DOOR_XSYNC_BOUND) {
555 (void) pthread_mutex_unlock(&xssp->lock);
556 return (NULL); /* thread exits, key destructor called */
557 }
558
559 while (xsp->state == DOOR_XSYNC_BOUND)
560 (void) pthread_cond_wait(&xssp->cv_m2s, &xssp->lock);
561
562 next_state = xsp->state;
563 ASSERT(next_state == DOOR_XSYNC_ENTER_SERVICE ||
564 next_state == DOOR_XSYNC_ABORT);
565
566 if (--xssp->waiting == 0)
567 (void) pthread_cond_signal(&xssp->cv_s2m);
568
569 (void) pthread_mutex_unlock(&xssp->lock); /* xssp/xsp can be freed */
570
571 if (next_state == DOOR_XSYNC_ABORT)
572 return (NULL); /* thread exits, key destructor called */
573
574 (void) door_return(NULL, 0, NULL, 0);
575 return (NULL);
576 }
577
578 static int
door_xcreate_n(door_info_t * dip,struct privdoor_data * pdd,int n)579 door_xcreate_n(door_info_t *dip, struct privdoor_data *pdd, int n)
580 {
581 struct door_xsync_shared *xssp;
582 struct door_xsync *xsp;
583 int i, failidx = -1;
584 int isdepcb = 0;
585 int failerrno;
586 int bound = 0;
587 #ifdef _STACK_GROWS_DOWNWARD
588 int stkdir = -1;
589 #else
590 int stkdir = 1;
591 #endif
592 int rv = 0;
593
594 /*
595 * If we're called during door creation then we have the
596 * privdoor_data. If we're called as part of a depletion callback
597 * then the current thread has the privdoor_data as TSD.
598 */
599 if (pdd == NULL) {
600 isdepcb = 1;
601 if ((pdd = pthread_getspecific(privdoor_key)) == NULL)
602 thr_panic("door_xcreate_n - no privdoor_data "
603 "on existing server thread");
604 }
605
606 /*
607 * Allocate on our stack. We'll pass pointers to this to the
608 * newly-created threads, therefore this function must not return until
609 * we have synced with server threads that are created.
610 * We do not limit the number of threads so begin by checking
611 * that we have space on the stack for this.
612 */
613 {
614 size_t sz = sizeof (*xssp) + n * sizeof (*xsp) + 32;
615 char dummy;
616
617 if (!stack_inbounds(&dummy + stkdir * sz)) {
618 errno = E2BIG;
619 return (0);
620 }
621 }
622
623 if ((xssp = alloca(sizeof (*xssp))) == NULL ||
624 (xsp = alloca(n * sizeof (*xsp))) == NULL) {
625 errno = E2BIG;
626 return (0);
627 }
628
629 (void) pthread_mutex_init(&xssp->lock, NULL);
630 (void) pthread_cond_init(&xssp->cv_m2s, NULL);
631 (void) pthread_cond_init(&xssp->cv_s2m, NULL);
632 xssp->pdd = pdd;
633 xssp->waiting = 0;
634
635 (void) pthread_mutex_lock(&xssp->lock);
636
637 for (i = 0; failidx == -1 && i < n; i++) {
638 xsp[i].sharedp = xssp;
639 membar_producer(); /* xssp and xsp[i] for new thread */
640
641 switch ((pdd->pd_crf)(dip, door_xcreate_startf,
642 (void *)&xsp[i], pdd->pd_crcookie)) {
643 case 1:
644 /*
645 * Thread successfully created. Set mailbox
646 * state and increment the number we have to
647 * sync with.
648 */
649 xsp[i].state = DOOR_XSYNC_CREATEWAIT;
650 xssp->waiting++;
651 break;
652 case 0:
653 /*
654 * Elected to create no further threads. OK for
655 * a depletion callback, but not during door_xcreate.
656 */
657 xsp[i].state = DOOR_XSYNC_MAXCONCUR;
658 if (!isdepcb) {
659 failidx = i;
660 failerrno = EINVAL;
661 }
662 break;
663 case -1:
664 /*
665 * Thread creation was attempted but failed.
666 */
667 xsp[i].state = DOOR_XSYNC_CREATEFAIL;
668 failidx = i;
669 failerrno = EPIPE;
670 break;
671 default:
672 /*
673 * The application-supplied function did not return
674 * -1/0/1 - best we can do is panic because anything
675 * else is harder to debug.
676 */
677 thr_panic("door server create function illegal return");
678 /*NOTREACHED*/
679 }
680 }
681
682 /*
683 * On initial creation all must succeed; if not then abort
684 */
685 if (!isdepcb && failidx != -1) {
686 for (i = 0; i < failidx; i++)
687 if (xsp[i].state == DOOR_XSYNC_CREATEWAIT)
688 xsp[i].state = DOOR_XSYNC_ABORT;
689 }
690
691 /*
692 * Wait for thread startup handshake to complete for all threads
693 */
694 while (xssp->waiting)
695 (void) pthread_cond_wait(&xssp->cv_s2m, &xssp->lock);
696
697 /*
698 * If we are aborting for a failed thread create in door_xcreate
699 * then we're done.
700 */
701 if (!isdepcb && failidx != -1) {
702 rv = 0;
703 goto out; /* lock held, failerrno is set */
704 }
705
706 /*
707 * Did we all succeed in binding?
708 */
709 for (i = 0; i < n; i++) {
710 int statidx = xsp[i].state - DOOR_XSYNC_CREATEWAIT;
711
712 door_xcreate_n_stats[statidx]++;
713 if (xsp[i].state == DOOR_XSYNC_BOUND)
714 bound++;
715 }
716
717 if (bound == n) {
718 rv = 1;
719 } else {
720 failerrno = EBADF;
721 rv = 0;
722 }
723
724 /*
725 * During door_xcreate all must succeed in binding - if not then
726 * we command even those that did bind to abort. Threads that
727 * did not get as far as binding have already exited.
728 */
729 for (i = 0; i < n; i++) {
730 if (xsp[i].state == DOOR_XSYNC_BOUND) {
731 xsp[i].state = (rv == 1 || isdepcb) ?
732 DOOR_XSYNC_ENTER_SERVICE : DOOR_XSYNC_ABORT;
733 xssp->waiting++;
734 }
735 }
736
737 (void) pthread_cond_broadcast(&xssp->cv_m2s);
738
739 while (xssp->waiting)
740 (void) pthread_cond_wait(&xssp->cv_s2m, &xssp->lock);
741
742 out:
743 (void) pthread_mutex_unlock(&xssp->lock);
744 (void) pthread_mutex_destroy(&xssp->lock);
745 (void) pthread_cond_destroy(&xssp->cv_m2s);
746 (void) pthread_cond_destroy(&xssp->cv_s2m);
747
748 if (rv == 0)
749 errno = failerrno;
750
751 return (rv);
752 }
753
754 /*
755 * Call the server creation function to give it the opportunity to
756 * create more threads. Called during a door invocation when we
757 * return from door_return(NULL,0, NULL, 0) and notice that we're
758 * running on the last available thread.
759 */
760 void
door_depletion_cb(door_info_t * dip)761 door_depletion_cb(door_info_t *dip)
762 {
763 if (dip == NULL) {
764 /*
765 * Non-private doors always use door_server_func.
766 */
767 (*door_server_func)(NULL);
768 return;
769 }
770
771 if (dip->di_attributes & DOOR_NO_DEPLETION_CB) {
772 /*
773 * Private, door_xcreate'd door specified no callbacks.
774 */
775 return;
776 } else if (!(dip->di_attributes & DOOR_PRIVCREATE)) {
777 /*
778 * Private door with standard/legacy creation semantics.
779 */
780 dip->di_attributes |= DOOR_DEPLETION_CB;
781 (*door_server_func)(dip);
782 return;
783 } else {
784 /*
785 * Private, door_xcreate'd door.
786 */
787 dip->di_attributes |= DOOR_DEPLETION_CB;
788 (void) door_xcreate_n(dip, NULL, 1);
789 }
790 }
791
792 /*
793 * Install a new server creation function. The appointed function
794 * will receieve depletion callbacks for non-private doors and private
795 * doors created with door_create(..., DOOR_PRIVATE).
796 */
797 door_server_func_t *
door_server_create(door_server_func_t * create_func)798 door_server_create(door_server_func_t *create_func)
799 {
800 door_server_func_t *prev;
801
802 lmutex_lock(&door_state_lock);
803 prev = door_server_func;
804 door_server_func = create_func;
805 lmutex_unlock(&door_state_lock);
806
807 return (prev);
808 }
809
810 /*
811 * Thread start function for door_create_server() below.
812 * Create door server threads with cancellation(7) disabled.
813 */
814 static void *
door_create_func(void * arg)815 door_create_func(void *arg)
816 {
817 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
818 (void) door_return(NULL, 0, NULL, 0);
819
820 return (arg);
821 }
822
823 /*
824 * The default door_server_func_t.
825 */
826 static void
door_create_server(door_info_t * dip __unused)827 door_create_server(door_info_t *dip __unused)
828 {
829 (void) thr_create(NULL, 0, door_create_func, NULL, THR_DETACHED, NULL);
830 yield(); /* Gives server thread a chance to run */
831 }
832