1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #include <sys/types.h>
27 #include <sys/debug.h>
28 #include <sys/ksynch.h>
29 #include <sys/cmn_err.h>
30 #include <sys/kmem.h>
31 #include <sys/ddi.h>
32 #include <sys/errno.h>
33 #include "nsc_thread.h"
34
35 #ifdef DS_DDICT
36 #include "../contract.h"
37 #endif
38
39 #include "../nsctl.h"
40 #include "nskernd.h"
41 #include <sys/nsctl/nsctl.h>
42
43 #include <sys/sdt.h> /* dtrace is S10 or later */
44
45
46 /*
47 * Global data
48 */
49 static nstset_t *nst_sets;
50 static nsthread_t *nst_pending;
51 static kmutex_t nst_global_lock; /* nst_sets, nst_pending */
52
53
54 /*
55 * nst_kmem_xalloc
56 *
57 * Poll for memory.
58 */
59 static void *
nst_kmem_xalloc(size_t size,int sec,void * (* alloc)(size_t,int))60 nst_kmem_xalloc(size_t size, int sec, void *(*alloc)(size_t, int))
61 {
62 clock_t usec = sec * 1000000;
63 void *p = NULL;
64
65 while (usec > 0) {
66 if ((p = (*alloc)(size, KM_NOSLEEP)) != NULL)
67 return (p);
68
69 delay(drv_usectohz((clock_t)NST_MEMORY_TIMEOUT));
70 usec -= NST_MEMORY_TIMEOUT;
71 }
72
73 cmn_err(CE_WARN, "!nst_kmem_xalloc: failed to alloc %ld bytes", size);
74 return (NULL);
75 }
76
77
78 #if 0
79 /* currently unused */
80 static void *
81 nst_kmem_alloc(size_t size, int sec)
82 {
83 return (nst_kmem_xalloc(size, sec, kmem_alloc));
84 }
85 #endif
86
87
88 static void *
nst_kmem_zalloc(size_t size,int sec)89 nst_kmem_zalloc(size_t size, int sec)
90 {
91 return (nst_kmem_xalloc(size, sec, kmem_zalloc));
92 }
93
94
95 /*
96 * Queue stuff that should be in the DDI.
97 */
98
99 /*
100 * nst_insque
101 *
102 * Insert entryp after predp in a doubly linked list.
103 */
104 static void
nst_insque(nst_q_t * entryp,nst_q_t * predp)105 nst_insque(nst_q_t *entryp, nst_q_t *predp)
106 {
107 entryp->q_back = predp;
108 entryp->q_forw = predp->q_forw;
109 predp->q_forw = entryp;
110 entryp->q_forw->q_back = entryp;
111 }
112 #ifndef DS_DDICT
113 #pragma inline(nst_insque) /* compiler hint to inline this function */
114 #endif
115
116
117 /*
118 * nst_remque
119 *
120 * Remove entryp from a doubly linked list.
121 */
122 static void
nst_remque(nst_q_t * entryp)123 nst_remque(nst_q_t *entryp)
124 {
125 entryp->q_back->q_forw = entryp->q_forw;
126 entryp->q_forw->q_back = entryp->q_back;
127 entryp->q_forw = entryp->q_back = NULL;
128 }
129 #ifndef DS_DDICT
130 #pragma inline(nst_remque) /* compiler hint to inline this function */
131 #endif
132
133
134 /*
135 * nst_thread_init
136 *
137 * Initialise the dynamic part of a thread
138 */
139 static void
nst_thread_init(nsthread_t * tp)140 nst_thread_init(nsthread_t *tp)
141 {
142 ASSERT(MUTEX_HELD(&((tp->tp_set)->set_lock)));
143 ASSERT(!(tp->tp_flag & NST_TF_INUSE));
144 tp->tp_flag = NST_TF_INUSE;
145 tp->tp_func = NULL;
146 tp->tp_arg = NULL;
147 }
148 #ifndef DS_DDICT
149 #pragma inline(nst_thread_init) /* compiler hint to inline this function */
150 #endif
151
152
153 /*
154 * nst_thread_alloc
155 *
156 * Return an nsthread from the free pool, NULL if none
157 */
158 static nsthread_t *
nst_thread_alloc(nstset_t * set,const int sleep)159 nst_thread_alloc(nstset_t *set, const int sleep)
160 {
161 nsthread_t *tp = NULL;
162
163 mutex_enter(&set->set_lock);
164
165 if (set->set_flag & NST_SF_KILL) {
166 mutex_exit(&set->set_lock);
167 DTRACE_PROBE1(nst_thread_alloc_err_kill, nstset_t *, set);
168 return (NULL);
169 }
170
171 do {
172 tp = (nsthread_t *)set->set_free.q_forw;
173 if (tp != (nsthread_t *)&set->set_free)
174 nst_remque(&tp->tp_link);
175 else {
176 tp = NULL;
177
178 if (!sleep)
179 break;
180
181 set->set_res_cnt++;
182
183 DTRACE_PROBE2(nst_thread_alloc_sleep, nstset_t *, set,
184 int, set->set_res_cnt);
185
186 cv_wait(&set->set_res_cv, &set->set_lock);
187
188 DTRACE_PROBE1(nst_thread_alloc_wake, nstset_t *, set);
189
190 set->set_res_cnt--;
191
192 if (set->set_flag & NST_SF_KILL)
193 break;
194 }
195 } while (tp == NULL);
196
197 /* initialise the thread */
198
199 if (tp != NULL) {
200 nst_thread_init(tp);
201 set->set_nlive++;
202 }
203
204 mutex_exit(&set->set_lock);
205
206 return (tp);
207 }
208
209
210 /*
211 * nst_thread_free
212 *
213 * Requeue a thread on the free or reuse pools. Threads are always
214 * queued to the tail of the list to prevent rapid recycling.
215 *
216 * Must be called with set->set_lock held.
217 */
218 static void
nst_thread_free(nsthread_t * tp)219 nst_thread_free(nsthread_t *tp)
220 {
221 nstset_t *set = tp->tp_set;
222
223 if (!set)
224 return;
225
226 ASSERT(MUTEX_HELD(&set->set_lock));
227
228 tp->tp_flag &= ~NST_TF_INUSE;
229 if (tp->tp_flag & NST_TF_DESTROY) {
230 /* add self to reuse pool */
231 nst_insque(&tp->tp_link, set->set_reuse.q_back);
232 } else {
233 /* add self to free pool */
234 nst_insque(&tp->tp_link, set->set_free.q_back);
235 if (set->set_res_cnt > 0)
236 cv_broadcast(&set->set_res_cv);
237 }
238 }
239
240
241 /*
242 * nst_thread_run
243 *
244 * The first function that a new thread runs on entry from user land.
245 * This is the main thread function that handles thread work and death.
246 */
247 static void
nst_thread_run(void * arg)248 nst_thread_run(void *arg)
249 {
250 nsthread_t *tp;
251 nstset_t *set;
252 int first = 1;
253
254 mutex_enter(&nst_global_lock);
255
256 /* check if this thread is still on the pending list */
257
258 for (tp = nst_pending; tp; tp = tp->tp_chain) {
259 if (tp == (nsthread_t *)arg) {
260 break;
261 }
262 }
263
264 if (!tp) {
265 mutex_exit(&nst_global_lock);
266 return;
267 }
268
269 if (!tp->tp_set) {
270 mutex_exit(&nst_global_lock);
271 #ifdef DEBUG
272 cmn_err(CE_WARN, "!nst_thread_run(%p): already dead?",
273 (void *)tp);
274 #endif
275 return;
276 }
277
278 /* check that the set is still on the list of sets */
279
280 for (set = nst_sets; set; set = set->set_next) {
281 if (set == tp->tp_set) {
282 break;
283 }
284 }
285
286 if (!set) {
287 mutex_exit(&nst_global_lock);
288 #ifdef DEBUG
289 cmn_err(CE_WARN, "!nst_thread_run(%p): no set?", (void *)tp);
290 #endif
291 return;
292 }
293
294 mutex_enter(&set->set_lock);
295
296 mutex_exit(&nst_global_lock);
297
298 /*
299 * Mark the parent.
300 * The parent won't actually run until set->set_lock is dropped.
301 */
302
303 tp->tp_flag &= ~NST_TF_PENDING;
304 cv_broadcast(&tp->tp_cv);
305
306 /*
307 * Main loop.
308 */
309
310 while (!(set->set_flag & NST_SF_KILL) &&
311 !(tp->tp_flag & NST_TF_KILL)) {
312 /*
313 * On initial entry the caller will add this thread to
314 * the free pool if required, there after the thread
315 * must do it for itself.
316 */
317
318 if (first) {
319 first = 0;
320 } else {
321 nst_thread_free(tp);
322 set->set_nlive--;
323 }
324
325 DTRACE_PROBE1(nst_thread_run_sleep, nsthread_t *, tp);
326
327 cv_wait(&tp->tp_cv, &set->set_lock);
328
329 DTRACE_PROBE1(nst_thread_run_wake, nsthread_t *, tp);
330
331 if ((set->set_flag & NST_SF_KILL) ||
332 (tp->tp_flag & NST_TF_KILL)) {
333 break;
334 }
335
336 mutex_exit(&set->set_lock);
337
338 if (tp->tp_func) {
339 (*tp->tp_func)(tp->tp_arg);
340 tp->tp_func = 0;
341 tp->tp_arg = 0;
342 }
343 #ifdef DEBUG
344 else {
345 cmn_err(CE_WARN,
346 "!nst_thread_run(%p): NULL function pointer",
347 (void *)tp);
348 }
349 #endif
350
351 mutex_enter(&set->set_lock);
352 }
353
354 /* remove self from the free and/or reuse pools */
355 if (tp->tp_link.q_forw != NULL || tp->tp_link.q_back != NULL) {
356 ASSERT(tp->tp_link.q_forw != NULL &&
357 tp->tp_link.q_back != NULL);
358 nst_remque(&tp->tp_link);
359 }
360
361 set->set_nthread--;
362 tp->tp_flag &= ~NST_TF_KILL;
363
364 /* wake the context that is running nst_destroy() or nst_del_thread() */
365 cv_broadcast(&set->set_kill_cv);
366
367 mutex_exit(&set->set_lock);
368
369 /* suicide */
370 }
371
372
373 /*
374 * nst_thread_destroy
375 *
376 * Free up the kernel level resources. The thread must already be
377 * un-chained from the set, and the caller must not be the thread
378 * itself.
379 */
380 static void
nst_thread_destroy(nsthread_t * tp)381 nst_thread_destroy(nsthread_t *tp)
382 {
383 if (!tp)
384 return;
385
386 ASSERT(tp->tp_chain == NULL);
387
388 tp->tp_set = NULL;
389
390 if (tp->tp_flag & NST_TF_INUSE) {
391 cmn_err(CE_WARN, "!nst_thread_destroy(%p): still in use!",
392 (void *)tp);
393 /* leak the thread */
394 return;
395 }
396
397 cv_destroy(&tp->tp_cv);
398 kmem_free(tp, sizeof (*tp));
399 }
400
401
402 /*
403 * nst_thread_create
404 *
405 * Create and return a new thread from a threadset.
406 */
407 static nsthread_t *
nst_thread_create(nstset_t * set)408 nst_thread_create(nstset_t *set)
409 {
410 nsthread_t *tp, **tpp;
411 int rc;
412
413 /* try and reuse a thread first */
414
415 if (set->set_reuse.q_forw != &set->set_reuse) {
416 mutex_enter(&set->set_lock);
417
418 tp = (nsthread_t *)set->set_reuse.q_forw;
419 if (tp != (nsthread_t *)&set->set_reuse)
420 nst_remque(&tp->tp_link);
421 else
422 tp = NULL;
423
424 mutex_exit(&set->set_lock);
425
426 if (tp) {
427 DTRACE_PROBE2(nst_thread_create_end, nstset_t *, set,
428 nsthread_t *, tp);
429 return (tp);
430 }
431 }
432
433 /* create a thread using nskernd */
434
435 tp = nst_kmem_zalloc(sizeof (*tp), 2);
436 if (!tp) {
437 DTRACE_PROBE1(nst_thread_create_err_mem, nstset_t *, set);
438 return (NULL);
439 }
440
441 cv_init(&tp->tp_cv, NULL, CV_DRIVER, NULL);
442 tp->tp_flag = NST_TF_PENDING;
443 tp->tp_set = set;
444
445 mutex_enter(&set->set_lock);
446
447 if (set->set_flag & NST_SF_KILL) {
448 mutex_exit(&set->set_lock);
449 nst_thread_destroy(tp);
450 #ifdef DEBUG
451 cmn_err(CE_WARN, "!nst_thread_create: called during destroy");
452 #endif
453 DTRACE_PROBE2(nst_thread_create_err_kill, nstset_t *, set,
454 nsthread_t *, tp);
455 return (NULL);
456 }
457
458 set->set_pending++;
459
460 mutex_exit(&set->set_lock);
461
462 mutex_enter(&nst_global_lock);
463
464 tp->tp_chain = nst_pending;
465 nst_pending = tp;
466
467 mutex_exit(&nst_global_lock);
468
469 DTRACE_PROBE2(nst_dbg_thr_create_proc_start, nstset_t *, set,
470 nsthread_t *, tp);
471
472 rc = nsc_create_process(nst_thread_run, tp, 0);
473
474 DTRACE_PROBE2(nst_dbg_thr_create_proc_end, nstset_t *, set,
475 nsthread_t *, tp);
476
477 if (!rc) {
478 /*
479 * wait for child to start and check in.
480 */
481
482 mutex_enter(&set->set_lock);
483
484 while (tp->tp_flag & NST_TF_PENDING)
485 cv_wait(&tp->tp_cv, &set->set_lock);
486
487 mutex_exit(&set->set_lock);
488 }
489
490 /*
491 * remove from pending chain.
492 */
493
494 mutex_enter(&nst_global_lock);
495
496 for (tpp = &nst_pending; (*tpp); tpp = &((*tpp)->tp_chain)) {
497 if (*tpp == tp) {
498 *tpp = tp->tp_chain;
499 tp->tp_chain = NULL;
500 break;
501 }
502 }
503
504 mutex_exit(&nst_global_lock);
505
506 /*
507 * Check for errors and return if required.
508 */
509
510 mutex_enter(&set->set_lock);
511
512 set->set_pending--;
513
514 if (rc ||
515 (set->set_flag & NST_SF_KILL) ||
516 (set->set_nthread + 1) > USHRT_MAX) {
517 if (rc == 0) {
518 /*
519 * Thread is alive, and needs to be woken and killed.
520 */
521 tp->tp_flag |= NST_TF_KILL;
522 cv_broadcast(&tp->tp_cv);
523
524 while (tp->tp_flag & NST_TF_KILL)
525 cv_wait(&set->set_kill_cv, &set->set_lock);
526 }
527 mutex_exit(&set->set_lock);
528
529 nst_thread_destroy(tp);
530 #ifdef DEBUG
531 cmn_err(CE_WARN,
532 "!nst_thread_create: error (rc %d, set_flag %x, "
533 "set_nthread %d)", rc, set->set_flag, set->set_nthread);
534 #endif
535 DTRACE_PROBE2(nst_thread_create_err_proc, nstset_t *, set,
536 nsthread_t *, tp);
537
538 return (NULL);
539 }
540
541 /*
542 * Move into set proper.
543 */
544
545 tp->tp_chain = set->set_chain;
546 set->set_chain = tp;
547 set->set_nthread++;
548
549 mutex_exit(&set->set_lock);
550
551 return (tp);
552 }
553
554
555 /*
556 * nst_create
557 *
558 * Start a new thread from a thread set, returning the
559 * address of the thread, or NULL on failure.
560 *
561 * All threads are created detached.
562 *
563 * Valid flag values:
564 *
565 * NST_CREATE - create a new thread rather than using one
566 * from the threadset. Once the thread
567 * completes it will not be added to the active
568 * portion of the threadset, but will be cached
569 * on the reuse chain, and so is available for
570 * subsequent NST_CREATE or nst_add_thread()
571 * operations.
572 *
573 * NST_SLEEP - wait for a thread to be available instead of
574 * returning NULL. Has no meaning with NST_CREATE.
575 *
576 * Returns a pointer to the new thread, or NULL.
577 */
578 nsthread_t *
nst_create(nstset_t * set,void (* func)(),blind_t arg,int flags)579 nst_create(nstset_t *set, void (*func)(), blind_t arg, int flags)
580 {
581 nsthread_t *tp = NULL;
582
583 if (!set)
584 return (NULL);
585
586 if (set->set_flag & NST_SF_KILL) {
587 DTRACE_PROBE1(nst_create_err_kill, nstset_t *, set);
588 return (NULL);
589 }
590
591 if (flags & NST_CREATE) {
592 /* get new thread */
593
594 if ((tp = nst_thread_create(set)) == NULL)
595 return (NULL);
596
597 /* initialise the thread */
598
599 mutex_enter(&set->set_lock);
600 nst_thread_init(tp);
601 tp->tp_flag |= NST_TF_DESTROY;
602 set->set_nlive++;
603 mutex_exit(&set->set_lock);
604 } else {
605 if (!(tp = nst_thread_alloc(set, (flags & NST_SLEEP))))
606 return (NULL);
607 }
608
609 /* set thread running */
610
611 tp->tp_func = func;
612 tp->tp_arg = arg;
613
614 mutex_enter(&set->set_lock);
615 cv_broadcast(&tp->tp_cv);
616 mutex_exit(&set->set_lock);
617
618 return (tp);
619 }
620
621
622 /*
623 * nst_destroy
624 *
625 * Destroy a thread set created by nst_init(). It is the
626 * caller's responsibility to ensure that all prior thread
627 * calls have completed prior to this call and that the
628 * caller is not executing from within thread context.
629 */
630 void
nst_destroy(nstset_t * set)631 nst_destroy(nstset_t *set)
632 {
633 nsthread_t *tp, *ntp;
634 nstset_t *sp, **spp;
635
636 if (!set)
637 return;
638
639 mutex_enter(&nst_global_lock);
640
641 for (sp = nst_sets; sp; sp = sp->set_next) {
642 if (sp == set) {
643 break;
644 }
645 }
646
647 if (!sp) {
648 mutex_exit(&nst_global_lock);
649 #ifdef DEBUG
650 cmn_err(CE_WARN, "!nst_destroy(%p): no set?", (void *)set);
651 #endif
652 DTRACE_PROBE1(nst_destroy_err_noset, nstset_t *, set);
653 return;
654 }
655
656 mutex_enter(&set->set_lock);
657
658 mutex_exit(&nst_global_lock);
659
660 if (set->set_flag & NST_SF_KILL) {
661 /*
662 * Wait for a pending destroy to complete
663 */
664
665 #ifdef DEBUG
666 cmn_err(CE_WARN,
667 "!nst_destroy(%p): duplicate destroy of set", (void *)set);
668 #endif
669
670 set->set_destroy_cnt++;
671 (void) cv_wait_sig(&set->set_destroy_cv, &set->set_lock);
672 set->set_destroy_cnt--;
673
674 mutex_exit(&set->set_lock);
675
676 DTRACE_PROBE1(nst_destroy_end, nstset_t *, set);
677
678 return;
679 }
680
681 set->set_flag |= NST_SF_KILL;
682
683 /* Wake all threads in nst_create(NST_SLEEP) */
684 cv_broadcast(&set->set_res_cv);
685
686 /*
687 * Wake all the threads chained in the set.
688 */
689
690 for (tp = set->set_chain; tp; tp = tp->tp_chain)
691 cv_broadcast(&tp->tp_cv);
692
693 /* Wait for the threads to exit */
694
695 while ((set->set_free.q_forw != &set->set_free) ||
696 (set->set_reuse.q_forw != &set->set_reuse))
697 cv_wait(&set->set_kill_cv, &set->set_lock);
698
699 /* Unchain and destroy all the threads in the set */
700
701 tp = set->set_chain;
702 set->set_chain = 0;
703
704 while (tp) {
705 ntp = tp->tp_chain;
706 tp->tp_chain = 0;
707
708 nst_thread_destroy(tp);
709
710 tp = ntp;
711 }
712
713 mutex_exit(&set->set_lock);
714
715 mutex_enter(&nst_global_lock);
716
717 /* remove the set from the chain */
718
719 for (spp = &nst_sets; *spp; spp = &((*spp)->set_next)) {
720 if (*spp == set) {
721 *spp = set->set_next;
722 set->set_next = NULL;
723 break;
724 }
725 }
726
727 mutex_exit(&nst_global_lock);
728
729 mutex_enter(&set->set_lock);
730
731 #ifdef DEBUG
732 if (set->set_nthread != 0) {
733 cmn_err(CE_WARN, "!nst_destroy(%p): nthread != 0 (%d)",
734 (void *)set, set->set_nthread);
735 }
736 #endif
737
738 /* Allow any waiters (above) to continue */
739
740 cv_broadcast(&set->set_destroy_cv);
741
742 while (set->set_destroy_cnt > 0 || set->set_pending > 0 ||
743 set->set_res_cnt > 0) {
744 mutex_exit(&set->set_lock);
745 delay(drv_usectohz((clock_t)NST_KILL_TIMEOUT));
746 mutex_enter(&set->set_lock);
747 }
748
749 mutex_exit(&set->set_lock);
750
751 if (set->set_nthread != 0) {
752 /* leak the set control structure */
753
754 DTRACE_PROBE1(nst_destroy_end, nstset_t *, set);
755
756 return;
757 }
758
759 cv_destroy(&set->set_res_cv);
760 cv_destroy(&set->set_kill_cv);
761 cv_destroy(&set->set_destroy_cv);
762 mutex_destroy(&set->set_lock);
763 kmem_free(set, sizeof (*set));
764
765 }
766
767
768 /*
769 * nst_add_thread
770 *
771 * Add more threads into an existing thread set.
772 * Returns the number successfully added.
773 */
774 int
nst_add_thread(nstset_t * set,int nthread)775 nst_add_thread(nstset_t *set, int nthread)
776 {
777 nsthread_t *tp;
778 int i;
779
780 if (!set || nthread < 1) {
781 #ifdef DEBUG
782 cmn_err(CE_WARN,
783 "!nst_add_thread(%p, %d) - bad args", (void *)set, nthread);
784 #endif
785 return (0);
786 }
787
788 for (i = 0; i < nthread; i++) {
789 /* get new thread */
790
791 if ((tp = nst_thread_create(set)) == NULL)
792 break;
793
794 /* add to free list */
795
796 mutex_enter(&set->set_lock);
797 nst_thread_free(tp);
798 mutex_exit(&set->set_lock);
799 }
800
801 return (i);
802 }
803
804
805 /*
806 * nst_del_thread
807 *
808 * Removes threads from an existing thread set.
809 * Returns the number successfully removed.
810 */
811 int
nst_del_thread(nstset_t * set,int nthread)812 nst_del_thread(nstset_t *set, int nthread)
813 {
814 nsthread_t **tpp, *tp;
815 int i;
816
817 if (!set || nthread < 1) {
818 #ifdef DEBUG
819 cmn_err(CE_WARN,
820 "!nst_del_thread(%p, %d) - bad args", (void *)set, nthread);
821 #endif
822 return (0);
823 }
824
825 for (i = 0; i < nthread; i++) {
826 /* get thread */
827
828 if (!(tp = nst_thread_alloc(set, FALSE)))
829 break;
830
831 mutex_enter(&set->set_lock);
832
833 /* unlink from the set */
834
835 for (tpp = &set->set_chain; *tpp; tpp = &(*tpp)->tp_chain) {
836 if (*tpp == tp) {
837 *tpp = tp->tp_chain;
838 tp->tp_chain = NULL;
839 break;
840 }
841 }
842
843 /* kill the thread */
844
845 tp->tp_flag |= NST_TF_KILL;
846 tp->tp_flag &= ~NST_TF_INUSE;
847 cv_broadcast(&tp->tp_cv);
848
849 /* wait for thread to exit */
850
851 while (tp->tp_flag & NST_TF_KILL)
852 cv_wait(&set->set_kill_cv, &set->set_lock);
853
854 set->set_nlive--;
855 mutex_exit(&set->set_lock);
856
857 /* free kernel resources */
858
859 nst_thread_destroy(tp);
860 }
861
862 return (i);
863 }
864
865
866 /*
867 * nst_init
868 *
869 * Initialise a new nsthread set, returning its address or
870 * NULL in the event of failure. The set should be destroyed
871 * by calling nst_destroy().
872 */
873 nstset_t *
nst_init(char * name,int nthread)874 nst_init(char *name, int nthread)
875 {
876 nstset_t *set, *sp;
877 int len, i;
878
879 if (nthread < 1) {
880 #ifdef DEBUG
881 cmn_err(CE_WARN, "!nst_init: invalid arg");
882 #endif
883 return (NULL);
884 }
885
886 if (nthread > USHRT_MAX) {
887 #ifdef DEBUG
888 cmn_err(CE_WARN, "!nst_init: arg limit exceeded");
889 #endif
890 return (NULL);
891 }
892
893 if (!(set = nst_kmem_zalloc(sizeof (*set), 2)))
894 return (NULL);
895
896 len = strlen(name);
897 if (len >= sizeof (set->set_name))
898 len = sizeof (set->set_name) - 1;
899
900 bcopy(name, set->set_name, len);
901
902 mutex_init(&set->set_lock, NULL, MUTEX_DRIVER, NULL);
903 cv_init(&set->set_destroy_cv, NULL, CV_DRIVER, NULL);
904 cv_init(&set->set_kill_cv, NULL, CV_DRIVER, NULL);
905 cv_init(&set->set_res_cv, NULL, CV_DRIVER, NULL);
906
907 set->set_reuse.q_forw = set->set_reuse.q_back = &set->set_reuse;
908 set->set_free.q_forw = set->set_free.q_back = &set->set_free;
909
910 mutex_enter(&nst_global_lock);
911
912 /* check for duplicates */
913
914 for (sp = nst_sets; sp; sp = sp->set_next) {
915 if (strcmp(sp->set_name, set->set_name) == 0) {
916 /* duplicate */
917 mutex_exit(&nst_global_lock);
918 cv_destroy(&set->set_res_cv);
919 cv_destroy(&set->set_kill_cv);
920 cv_destroy(&set->set_destroy_cv);
921 mutex_destroy(&set->set_lock);
922 kmem_free(set, sizeof (*set));
923 #ifdef DEBUG
924 cmn_err(CE_WARN,
925 "!nst_init: duplicate set \"%s\"", name);
926 #endif
927 /* add threads if necessary */
928
929 if (nthread > sp->set_nthread) {
930 i = nst_add_thread(sp,
931 nthread - sp->set_nthread);
932 #ifdef DEBUG
933 if (i != (nthread - sp->set_nthread))
934 cmn_err(CE_WARN,
935 "!nst_init: failed to allocate %d "
936 "threads (got %d)",
937 (nthread - sp->set_nthread), i);
938 #endif
939 }
940
941 /* return pointer to existing set */
942
943 return (sp);
944 }
945 }
946
947 /* add new set to chain */
948 set->set_next = nst_sets;
949 nst_sets = set;
950
951 mutex_exit(&nst_global_lock);
952
953 i = nst_add_thread(set, nthread);
954
955 if (i != nthread) {
956 #ifdef DEBUG
957 cmn_err(CE_WARN,
958 "!nst_init: failed to allocate %d threads (got %d)",
959 nthread, i);
960 #endif
961 nst_destroy(set);
962 return (NULL);
963 }
964
965 return (set);
966 }
967
968
969 /*
970 * nst_nlive
971 *
972 * Return the number of live threads in a set.
973 */
974 int
nst_nlive(nstset_t * set)975 nst_nlive(nstset_t *set)
976 {
977 return (set ? set->set_nlive : 0);
978 }
979
980
981 /*
982 * nst_nthread
983 *
984 * Return the number of threads in the set.
985 */
986 int
nst_nthread(nstset_t * set)987 nst_nthread(nstset_t *set)
988 {
989 return (set ? set->set_nthread : 0);
990 }
991
992
993 /*
994 * nst_shutdown
995 *
996 * Called by nskern to shutdown the nsthread software.
997 */
998 void
nst_shutdown(void)999 nst_shutdown(void)
1000 {
1001 nstset_t *set;
1002
1003 mutex_enter(&nst_global_lock);
1004
1005 while ((set = nst_sets) != NULL) {
1006 mutex_exit(&nst_global_lock);
1007 nst_destroy(set);
1008 mutex_enter(&nst_global_lock);
1009 }
1010
1011 mutex_exit(&nst_global_lock);
1012 mutex_destroy(&nst_global_lock);
1013 }
1014
1015
1016 /*
1017 * nst_startup
1018 *
1019 * Called by nskern to initialise the nsthread software
1020 */
1021 int
nst_startup(void)1022 nst_startup(void)
1023 {
1024 mutex_init(&nst_global_lock, NULL, MUTEX_DRIVER, NULL);
1025 return (0);
1026 }
1027