kern_thread.c (6f8132a867d53fbc48dd8222a4fd1408ff1d9226) kern_thread.c (5215b1872feaad7ecf7cb1234749ecf04071deef)
1/*
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 49 unchanged lines hidden (view full) ---

58#include <machine/frame.h>
59
60/*
61 * KSEGRP related storage.
62 */
63static uma_zone_t ksegrp_zone;
64static uma_zone_t kse_zone;
65static uma_zone_t thread_zone;
1/*
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 49 unchanged lines hidden (view full) ---

58#include <machine/frame.h>
59
60/*
61 * KSEGRP related storage.
62 */
63static uma_zone_t ksegrp_zone;
64static uma_zone_t kse_zone;
65static uma_zone_t thread_zone;
66static uma_zone_t upcall_zone;
66
67/* DEBUG ONLY */
68SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
69static int thread_debug = 0;
70SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
71 &thread_debug, 0, "thread debug");
72
73static int max_threads_per_proc = 30;
74SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
75 &max_threads_per_proc, 0, "Limit on threads per proc");
76
77static int max_groups_per_proc = 5;
78SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
79 &max_groups_per_proc, 0, "Limit on thread groups per proc");
80
67
68/* DEBUG ONLY */
69SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
70static int thread_debug = 0;
71SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
72 &thread_debug, 0, "thread debug");
73
74static int max_threads_per_proc = 30;
75SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
76 &max_threads_per_proc, 0, "Limit on threads per proc");
77
78static int max_groups_per_proc = 5;
79SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
80 &max_groups_per_proc, 0, "Limit on thread groups per proc");
81
82static int virtual_cpu;
83
81#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
82
84#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
85
83struct threadqueue zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
86TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
84TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
85TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
87TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
88TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
86struct mtx zombie_thread_lock;
87MTX_SYSINIT(zombie_thread_lock, &zombie_thread_lock,
88 "zombie_thread_lock", MTX_SPIN);
89TAILQ_HEAD(, kse_upcall) zombie_upcalls =
90 TAILQ_HEAD_INITIALIZER(zombie_upcalls);
91struct mtx kse_zombie_lock;
92MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
89
90static void kse_purge(struct proc *p, struct thread *td);
93
94static void kse_purge(struct proc *p, struct thread *td);
95static void kse_purge_group(struct thread *td);
96static int thread_update_usr_ticks(struct thread *td);
97static int thread_update_sys_ticks(struct thread *td);
98static void thread_alloc_spare(struct thread *td, struct thread *spare);
91
99
100static int
101sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
102{
103 int error, new_val;
104 int def_val;
105
106#ifdef SMP
107 def_val = mp_ncpus;
108#else
109 def_val = 1;
110#endif
111 if (virtual_cpu == 0)
112 new_val = def_val;
113 else
114 new_val = virtual_cpu;
115 error = sysctl_handle_int(oidp, &new_val, 0, req);
116 if (error != 0 || req->newptr == NULL)
117 return (error);
118 if (new_val < 0)
119 return (EINVAL);
120 virtual_cpu = new_val;
121 return (0);
122}
123
124/* DEBUG ONLY */
125SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
126 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
127 "debug virtual cpus");
128
92/*
93 * Prepare a thread for use.
94 */
95static void
96thread_ctor(void *mem, int size, void *arg)
97{
98 struct thread *td;
99
100 td = (struct thread *)mem;
101 td->td_state = TDS_INACTIVE;
129/*
130 * Prepare a thread for use.
131 */
132static void
133thread_ctor(void *mem, int size, void *arg)
134{
135 struct thread *td;
136
137 td = (struct thread *)mem;
138 td->td_state = TDS_INACTIVE;
102 td->td_flags |= TDF_UNBOUND;
103}
104
105/*
106 * Reclaim a thread after use.
107 */
108static void
109thread_dtor(void *mem, int size, void *arg)
110{

--- 45 unchanged lines hidden (view full) ---

156static void
157thread_fini(void *mem, int size)
158{
159 struct thread *td;
160
161 td = (struct thread *)mem;
162 pmap_dispose_thread(td);
163}
139}
140
141/*
142 * Reclaim a thread after use.
143 */
144static void
145thread_dtor(void *mem, int size, void *arg)
146{

--- 45 unchanged lines hidden (view full) ---

192static void
193thread_fini(void *mem, int size)
194{
195 struct thread *td;
196
197 td = (struct thread *)mem;
198 pmap_dispose_thread(td);
199}
200
164/*
165 * Initialize type-stable parts of a kse (when newly created).
166 */
167static void
168kse_init(void *mem, int size)
169{
170 struct kse *ke;
171
172 ke = (struct kse *)mem;
173 ke->ke_sched = (struct ke_sched *)&ke[1];
174}
201/*
202 * Initialize type-stable parts of a kse (when newly created).
203 */
204static void
205kse_init(void *mem, int size)
206{
207 struct kse *ke;
208
209 ke = (struct kse *)mem;
210 ke->ke_sched = (struct ke_sched *)&ke[1];
211}
212
175/*
176 * Initialize type-stable parts of a ksegrp (when newly created).
177 */
178static void
179ksegrp_init(void *mem, int size)
180{
181 struct ksegrp *kg;
182
183 kg = (struct ksegrp *)mem;
184 kg->kg_sched = (struct kg_sched *)&kg[1];
185}
186
187/*
213/*
214 * Initialize type-stable parts of a ksegrp (when newly created).
215 */
216static void
217ksegrp_init(void *mem, int size)
218{
219 struct ksegrp *kg;
220
221 kg = (struct ksegrp *)mem;
222 kg->kg_sched = (struct kg_sched *)&kg[1];
223}
224
225/*
188 * KSE is linked onto the idle queue.
226 * KSE is linked into kse group.
189 */
190void
191kse_link(struct kse *ke, struct ksegrp *kg)
192{
193 struct proc *p = kg->kg_proc;
194
195 TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
196 kg->kg_kses++;
227 */
228void
229kse_link(struct kse *ke, struct ksegrp *kg)
230{
231 struct proc *p = kg->kg_proc;
232
233 TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
234 kg->kg_kses++;
197 ke->ke_state = KES_UNQUEUED;
235 ke->ke_state = KES_UNQUEUED;
198 ke->ke_proc = p;
199 ke->ke_ksegrp = kg;
236 ke->ke_proc = p;
237 ke->ke_ksegrp = kg;
200 ke->ke_owner = NULL;
201 ke->ke_thread = NULL;
238 ke->ke_thread = NULL;
202 ke->ke_oncpu = NOCPU;
239 ke->ke_oncpu = NOCPU;
240 ke->ke_flags = 0;
203}
204
205void
206kse_unlink(struct kse *ke)
207{
208 struct ksegrp *kg;
209
210 mtx_assert(&sched_lock, MA_OWNED);
211 kg = ke->ke_ksegrp;
241}
242
243void
244kse_unlink(struct kse *ke)
245{
246 struct ksegrp *kg;
247
248 mtx_assert(&sched_lock, MA_OWNED);
249 kg = ke->ke_ksegrp;
212
213 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
250 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
214 if (--kg->kg_kses == 0) {
215 ksegrp_unlink(kg);
251 if (ke->ke_state == KES_IDLE) {
252 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
253 kg->kg_idle_kses--;
216 }
254 }
255 if (--kg->kg_kses == 0)
256 ksegrp_unlink(kg);
217 /*
218 * Aggregate stats from the KSE
219 */
220 kse_stash(ke);
221}
222
223void
224ksegrp_link(struct ksegrp *kg, struct proc *p)
225{
226
227 TAILQ_INIT(&kg->kg_threads);
228 TAILQ_INIT(&kg->kg_runq); /* links with td_runq */
229 TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */
230 TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */
257 /*
258 * Aggregate stats from the KSE
259 */
260 kse_stash(ke);
261}
262
263void
264ksegrp_link(struct ksegrp *kg, struct proc *p)
265{
266
267 TAILQ_INIT(&kg->kg_threads);
268 TAILQ_INIT(&kg->kg_runq); /* links with td_runq */
269 TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */
270 TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */
231 TAILQ_INIT(&kg->kg_lq); /* loan kses in ksegrp */
232 kg->kg_proc = p;
233/* the following counters are in the -zero- section and may not need clearing */
271 TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */
272 TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */
273 kg->kg_proc = p;
274 /*
275 * the following counters are in the -zero- section
276 * and may not need clearing
277 */
234 kg->kg_numthreads = 0;
278 kg->kg_numthreads = 0;
235 kg->kg_runnable = 0;
236 kg->kg_kses = 0;
237 kg->kg_loan_kses = 0;
238 kg->kg_runq_kses = 0; /* XXXKSE change name */
239/* link it in now that it's consistent */
279 kg->kg_runnable = 0;
280 kg->kg_kses = 0;
281 kg->kg_runq_kses = 0; /* XXXKSE change name */
282 kg->kg_idle_kses = 0;
283 kg->kg_numupcalls = 0;
284 /* link it in now that it's consistent */
240 p->p_numksegrps++;
241 TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
242}
243
244void
245ksegrp_unlink(struct ksegrp *kg)
246{
247 struct proc *p;
248
249 mtx_assert(&sched_lock, MA_OWNED);
285 p->p_numksegrps++;
286 TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
287}
288
289void
290ksegrp_unlink(struct ksegrp *kg)
291{
292 struct proc *p;
293
294 mtx_assert(&sched_lock, MA_OWNED);
295 KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
296 KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses"));
297 KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
298
250 p = kg->kg_proc;
299 p = kg->kg_proc;
251 KASSERT(((kg->kg_numthreads == 0) && (kg->kg_kses == 0)),
252 ("kseg_unlink: residual threads or KSEs"));
253 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
254 p->p_numksegrps--;
255 /*
256 * Aggregate stats from the KSE
257 */
258 ksegrp_stash(kg);
259}
260
300 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
301 p->p_numksegrps--;
302 /*
303 * Aggregate stats from the KSE
304 */
305 ksegrp_stash(kg);
306}
307
308struct kse_upcall *
309upcall_alloc(void)
310{
311 struct kse_upcall *ku;
312
313 ku = uma_zalloc(upcall_zone, 0);
314 bzero(ku, sizeof(*ku));
315 return (ku);
316}
317
318void
319upcall_free(struct kse_upcall *ku)
320{
321
322 uma_zfree(upcall_zone, ku);
323}
324
325void
326upcall_link(struct kse_upcall *ku, struct ksegrp *kg)
327{
328
329 mtx_assert(&sched_lock, MA_OWNED);
330 TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link);
331 ku->ku_ksegrp = kg;
332 kg->kg_numupcalls++;
333}
334
335void
336upcall_unlink(struct kse_upcall *ku)
337{
338 struct ksegrp *kg = ku->ku_ksegrp;
339
340 mtx_assert(&sched_lock, MA_OWNED);
341 KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__));
342 TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link);
343 kg->kg_numupcalls--;
344 upcall_stash(ku);
345}
346
347void
348upcall_remove(struct thread *td)
349{
350
351 if (td->td_upcall) {
352 td->td_upcall->ku_owner = NULL;
353 upcall_unlink(td->td_upcall);
354 td->td_upcall = 0;
355 }
356}
357
261/*
358/*
262 * for a newly created process,
263 * link up a the structure and its initial threads etc.
359 * For a newly created process,
360 * link up all the structures and its initial threads etc.
264 */
265void
266proc_linkup(struct proc *p, struct ksegrp *kg,
361 */
362void
363proc_linkup(struct proc *p, struct ksegrp *kg,
267 struct kse *ke, struct thread *td)
364 struct kse *ke, struct thread *td)
268{
269
270 TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */
271 TAILQ_INIT(&p->p_threads); /* all threads in proc */
272 TAILQ_INIT(&p->p_suspended); /* Threads suspended */
273 p->p_numksegrps = 0;
274 p->p_numthreads = 0;
275
276 ksegrp_link(kg, p);
277 kse_link(ke, kg);
278 thread_link(td, kg);
279}
280
365{
366
367 TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */
368 TAILQ_INIT(&p->p_threads); /* all threads in proc */
369 TAILQ_INIT(&p->p_suspended); /* Threads suspended */
370 p->p_numksegrps = 0;
371 p->p_numthreads = 0;
372
373 ksegrp_link(kg, p);
374 kse_link(ke, kg);
375 thread_link(td, kg);
376}
377
378/*
379struct kse_thr_interrupt_args {
380 struct kse_thr_mailbox * tmbx;
381};
382*/
281int
282kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
283{
284 struct proc *p;
285 struct thread *td2;
286
287 p = td->td_proc;
383int
384kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
385{
386 struct proc *p;
387 struct thread *td2;
388
389 p = td->td_proc;
288 /* KSE-enabled processes only, please. */
289 if (!(p->p_flag & P_KSES))
390 if (!(p->p_flag & P_KSES) || (uap->tmbx == NULL))
290 return (EINVAL);
391 return (EINVAL);
291 if (uap->tmbx == NULL)
292 return (EINVAL);
293 mtx_lock_spin(&sched_lock);
294 FOREACH_THREAD_IN_PROC(p, td2) {
295 if (td2->td_mailbox == uap->tmbx) {
296 td2->td_flags |= TDF_INTERRUPT;
297 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) {
298 if (td2->td_flags & TDF_CVWAITQ)
299 cv_abort(td2);
300 else
301 abortsleep(td2);
392 mtx_lock_spin(&sched_lock);
393 FOREACH_THREAD_IN_PROC(p, td2) {
394 if (td2->td_mailbox == uap->tmbx) {
395 td2->td_flags |= TDF_INTERRUPT;
396 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) {
397 if (td2->td_flags & TDF_CVWAITQ)
398 cv_abort(td2);
399 else
400 abortsleep(td2);
302 }
401 }
303 mtx_unlock_spin(&sched_lock);
304 return (0);
305 }
306 }
307 mtx_unlock_spin(&sched_lock);
308 return (ESRCH);
309}
310
402 mtx_unlock_spin(&sched_lock);
403 return (0);
404 }
405 }
406 mtx_unlock_spin(&sched_lock);
407 return (ESRCH);
408}
409
410/*
411struct kse_exit_args {
412 register_t dummy;
413};
414*/
311int
312kse_exit(struct thread *td, struct kse_exit_args *uap)
313{
314 struct proc *p;
315 struct ksegrp *kg;
316 struct kse *ke;
317
318 p = td->td_proc;
415int
416kse_exit(struct thread *td, struct kse_exit_args *uap)
417{
418 struct proc *p;
419 struct ksegrp *kg;
420 struct kse *ke;
421
422 p = td->td_proc;
319 /* Only UTS can do the syscall */
320 if (!(p->p_flag & P_KSES) || (td->td_mailbox != NULL))
423 /*
424 * Only UTS can call the syscall and current group
425 * should be a threaded group.
426 */
427 if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0))
321 return (EINVAL);
428 return (EINVAL);
429 KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__));
430
322 kg = td->td_ksegrp;
431 kg = td->td_ksegrp;
323 /* serialize killing kse */
432 /* Serialize removing upcall */
324 PROC_LOCK(p);
325 mtx_lock_spin(&sched_lock);
433 PROC_LOCK(p);
434 mtx_lock_spin(&sched_lock);
326 if ((kg->kg_kses == 1) && (kg->kg_numthreads > 1)) {
435 if ((kg->kg_numupcalls == 1) && (kg->kg_numthreads > 1)) {
327 mtx_unlock_spin(&sched_lock);
328 PROC_UNLOCK(p);
329 return (EDEADLK);
330 }
331 ke = td->td_kse;
436 mtx_unlock_spin(&sched_lock);
437 PROC_UNLOCK(p);
438 return (EDEADLK);
439 }
440 ke = td->td_kse;
441 upcall_remove(td);
332 if (p->p_numthreads == 1) {
442 if (p->p_numthreads == 1) {
333 ke->ke_flags &= ~KEF_DOUPCALL;
334 ke->ke_mailbox = NULL;
443 kse_purge(p, td);
335 p->p_flag &= ~P_KSES;
336 mtx_unlock_spin(&sched_lock);
337 PROC_UNLOCK(p);
338 } else {
444 p->p_flag &= ~P_KSES;
445 mtx_unlock_spin(&sched_lock);
446 PROC_UNLOCK(p);
447 } else {
339 ke->ke_flags |= KEF_EXIT;
448 if (kg->kg_numthreads == 1) { /* Shutdown a group */
449 kse_purge_group(td);
450 ke->ke_flags |= KEF_EXIT;
451 }
340 thread_exit();
341 /* NOTREACHED */
342 }
343 return (0);
344}
345
346/*
347 * Either becomes an upcall or waits for an awakening event and
452 thread_exit();
453 /* NOTREACHED */
454 }
455 return (0);
456}
457
458/*
459 * Either becomes an upcall or waits for an awakening event and
348 * THEN becomes an upcall. Only error cases return.
460 * then becomes an upcall. Only error cases return.
349 */
461 */
462/*
463struct kse_release_args {
464 register_t dummy;
465};
466*/
350int
467int
351kse_release(struct thread * td, struct kse_release_args * uap)
468kse_release(struct thread *td, struct kse_release_args *uap)
352{
353 struct proc *p;
354 struct ksegrp *kg;
355
356 p = td->td_proc;
357 kg = td->td_ksegrp;
358 /*
469{
470 struct proc *p;
471 struct ksegrp *kg;
472
473 p = td->td_proc;
474 kg = td->td_ksegrp;
475 /*
359 * kse must have a mailbox ready for upcall, and only UTS can
360 * do the syscall.
361 */
362 if (!(p->p_flag & P_KSES) ||
363 (td->td_mailbox != NULL) ||
364 (td->td_kse->ke_mailbox == NULL))
476 * Only UTS can call the syscall and current group
477 * should be a threaded group.
478 */
479 if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0))
365 return (EINVAL);
480 return (EINVAL);
481 KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__));
366
367 PROC_LOCK(p);
368 mtx_lock_spin(&sched_lock);
369 /* Change OURSELF to become an upcall. */
482
483 PROC_LOCK(p);
484 mtx_lock_spin(&sched_lock);
485 /* Change OURSELF to become an upcall. */
370 td->td_flags = TDF_UPCALLING; /* BOUND */
371 if (!(td->td_kse->ke_flags & (KEF_DOUPCALL|KEF_ASTPENDING)) &&
486 td->td_flags = TDF_UPCALLING;
487 if ((td->td_upcall->ku_flags & KUF_DOUPCALL) == 0 &&
372 (kg->kg_completed == NULL)) {
488 (kg->kg_completed == NULL)) {
373 /*
374 * The KSE will however be lendable.
375 */
376 TD_SET_IDLE(td);
377 PROC_UNLOCK(p);
378 p->p_stats->p_ru.ru_nvcsw++;
379 mi_switch();
489 kg->kg_upsleeps++;
380 mtx_unlock_spin(&sched_lock);
490 mtx_unlock_spin(&sched_lock);
491 msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH, "ksepause",
492 NULL);
493 kg->kg_upsleeps--;
494 PROC_UNLOCK(p);
381 } else {
382 mtx_unlock_spin(&sched_lock);
383 PROC_UNLOCK(p);
384 }
385 return (0);
386}
387
388/* struct kse_wakeup_args {
389 struct kse_mailbox *mbx;
390}; */
391int
392kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
393{
394 struct proc *p;
495 } else {
496 mtx_unlock_spin(&sched_lock);
497 PROC_UNLOCK(p);
498 }
499 return (0);
500}
501
502/* struct kse_wakeup_args {
503 struct kse_mailbox *mbx;
504}; */
505int
506kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
507{
508 struct proc *p;
395 struct kse *ke;
396 struct ksegrp *kg;
509 struct ksegrp *kg;
510 struct kse_upcall *ku;
397 struct thread *td2;
398
399 p = td->td_proc;
400 td2 = NULL;
511 struct thread *td2;
512
513 p = td->td_proc;
514 td2 = NULL;
515 ku = NULL;
401 /* KSE-enabled processes only, please. */
402 if (!(p->p_flag & P_KSES))
516 /* KSE-enabled processes only, please. */
517 if (!(p->p_flag & P_KSES))
403 return EINVAL;
518 return (EINVAL);
404
519
520 PROC_LOCK(p);
405 mtx_lock_spin(&sched_lock);
406 if (uap->mbx) {
407 FOREACH_KSEGRP_IN_PROC(p, kg) {
521 mtx_lock_spin(&sched_lock);
522 if (uap->mbx) {
523 FOREACH_KSEGRP_IN_PROC(p, kg) {
408 FOREACH_KSE_IN_GROUP(kg, ke) {
409 if (ke->ke_mailbox != uap->mbx)
410 continue;
411 td2 = ke->ke_owner;
412 KASSERT((td2 != NULL),("KSE with no owner"));
413 break;
524 FOREACH_UPCALL_IN_GROUP(kg, ku) {
525 if (ku->ku_mailbox == uap->mbx)
526 break;
414 }
527 }
415 if (td2) {
528 if (ku)
416 break;
529 break;
417 }
418 }
419 } else {
530 }
531 } else {
420 /*
421 * look for any idle KSE to resurrect.
422 */
423 kg = td->td_ksegrp;
532 kg = td->td_ksegrp;
424 FOREACH_KSE_IN_GROUP(kg, ke) {
425 td2 = ke->ke_owner;
426 KASSERT((td2 != NULL),("KSE with no owner2"));
427 if (TD_IS_IDLE(td2))
428 break;
533 if (kg->kg_upsleeps) {
534 wakeup_one(&kg->kg_completed);
535 mtx_unlock_spin(&sched_lock);
536 PROC_UNLOCK(p);
537 return (0);
429 }
538 }
430 KASSERT((td2 != NULL), ("no thread(s)"));
539 ku = TAILQ_FIRST(&kg->kg_upcalls);
431 }
540 }
432 if (td2) {
433 if (TD_IS_IDLE(td2)) {
434 TD_CLR_IDLE(td2);
435 setrunnable(td2);
436 } else if (td != td2) {
437 /* guarantee do an upcall ASAP */
438 td2->td_kse->ke_flags |= KEF_DOUPCALL;
541 if (ku) {
542 if ((td2 = ku->ku_owner) == NULL) {
543 panic("%s: no owner", __func__);
544 } else if (TD_ON_SLEEPQ(td2) &&
545 (td2->td_wchan == &kg->kg_completed)) {
546 abortsleep(td2);
547 } else {
548 ku->ku_flags |= KUF_DOUPCALL;
439 }
440 mtx_unlock_spin(&sched_lock);
549 }
550 mtx_unlock_spin(&sched_lock);
551 PROC_UNLOCK(p);
441 return (0);
442 }
443 mtx_unlock_spin(&sched_lock);
552 return (0);
553 }
554 mtx_unlock_spin(&sched_lock);
555 PROC_UNLOCK(p);
444 return (ESRCH);
445}
446
447/*
448 * No new KSEG: first call: use current KSE, don't schedule an upcall
556 return (ESRCH);
557}
558
559/*
560 * No new KSEG: first call: use current KSE, don't schedule an upcall
449 * All other situations, do allocate a new KSE and schedule an upcall on it.
561 * All other situations, do allocate max new KSEs and schedule an upcall.
450 */
451/* struct kse_create_args {
452 struct kse_mailbox *mbx;
453 int newgroup;
454}; */
455int
456kse_create(struct thread *td, struct kse_create_args *uap)
457{
458 struct kse *newke;
562 */
563/* struct kse_create_args {
564 struct kse_mailbox *mbx;
565 int newgroup;
566}; */
567int
568kse_create(struct thread *td, struct kse_create_args *uap)
569{
570 struct kse *newke;
459 struct kse *ke;
460 struct ksegrp *newkg;
461 struct ksegrp *kg;
462 struct proc *p;
463 struct kse_mailbox mbx;
571 struct ksegrp *newkg;
572 struct ksegrp *kg;
573 struct proc *p;
574 struct kse_mailbox mbx;
464 int err;
575 struct kse_upcall *newku;
576 int err, ncpus;
465
466 p = td->td_proc;
467 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
468 return (err);
469
577
578 p = td->td_proc;
579 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
580 return (err);
581
470 p->p_flag |= P_KSES; /* easier to just set it than to test and set */
582 /* Too bad, why hasn't kernel always a cpu counter !? */
583#ifdef SMP
584 ncpus = mp_ncpus;
585#else
586 ncpus = 1;
587#endif
588 if (thread_debug && virtual_cpu != 0)
589 ncpus = virtual_cpu;
590
591 /* Easier to just set it than to test and set */
592 p->p_flag |= P_KSES;
471 kg = td->td_ksegrp;
472 if (uap->newgroup) {
593 kg = td->td_ksegrp;
594 if (uap->newgroup) {
595 /* Have race condition but it is cheap */
473 if (p->p_numksegrps >= max_groups_per_proc)
474 return (EPROCLIM);
475 /*
476 * If we want a new KSEGRP it doesn't matter whether
477 * we have already fired up KSE mode before or not.
596 if (p->p_numksegrps >= max_groups_per_proc)
597 return (EPROCLIM);
598 /*
599 * If we want a new KSEGRP it doesn't matter whether
600 * we have already fired up KSE mode before or not.
478 * We put the process in KSE mode and create a new KSEGRP
479 * and KSE. If our KSE has not got a mailbox yet then
480 * that doesn't matter, just leave it that way. It will
481 * ensure that this thread stay BOUND. It's possible
482 * that the call came form a threaded library and the main
483 * program knows nothing of threads.
601 * We put the process in KSE mode and create a new KSEGRP.
484 */
485 newkg = ksegrp_alloc();
486 bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
602 */
603 newkg = ksegrp_alloc();
604 bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
487 kg_startzero, kg_endzero));
605 kg_startzero, kg_endzero));
488 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
489 RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
606 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
607 RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
490 newke = kse_alloc();
608 mtx_lock_spin(&sched_lock);
609 ksegrp_link(newkg, p);
610 if (p->p_numksegrps >= max_groups_per_proc) {
611 ksegrp_unlink(newkg);
612 mtx_unlock_spin(&sched_lock);
613 return (EPROCLIM);
614 }
615 mtx_unlock_spin(&sched_lock);
491 } else {
616 } else {
492 /*
493 * Otherwise, if we have already set this KSE
494 * to have a mailbox, we want to make another KSE here,
495 * but only if there are not already the limit, which
496 * is 1 per CPU max.
497 *
498 * If the current KSE doesn't have a mailbox we just use it
499 * and give it one.
500 *
501 * Because we don't like to access
502 * the KSE outside of schedlock if we are UNBOUND,
503 * (because it can change if we are preempted by an interrupt)
504 * we can deduce it as having a mailbox if we are UNBOUND,
505 * and only need to actually look at it if we are BOUND,
506 * which is safe.
617 newkg = kg;
618 }
619
620 /*
621 * Creating upcalls more than number of physical cpu does
622 * not help performance.
623 */
624 if (newkg->kg_numupcalls >= ncpus)
625 return (EPROCLIM);
626
627 if (newkg->kg_numupcalls == 0) {
628 /*
629 * Initialize KSE group, optimized for MP.
630 * Create KSEs as many as physical cpus, this increases
631 * concurrent even if userland is not MP safe and can only run
632 * on single CPU (for early version of libpthread, it is true).
633 * In ideal world, every physical cpu should execute a thread.
634 * If there is enough KSEs, threads in kernel can be
635 * executed parallel on different cpus with full speed,
636 * Concurrent in kernel shouldn't be restricted by number of
637 * upcalls userland provides.
638 * Adding more upcall structures only increases concurrent
639 * in userland.
640 * Highest performance configuration is:
641 * N kses = N upcalls = N phyiscal cpus
507 */
642 */
508 if ((td->td_flags & TDF_UNBOUND) || td->td_kse->ke_mailbox) {
509 if (thread_debug == 0) { /* if debugging, allow more */
510#ifdef SMP
511 if (kg->kg_kses > mp_ncpus)
512#endif
513 return (EPROCLIM);
514 }
643 while (newkg->kg_kses < ncpus) {
515 newke = kse_alloc();
644 newke = kse_alloc();
516 } else {
517 newke = NULL;
518 }
519 newkg = NULL;
520 }
521 if (newke) {
522 bzero(&newke->ke_startzero, RANGEOF(struct kse,
523 ke_startzero, ke_endzero));
645 bzero(&newke->ke_startzero, RANGEOF(struct kse,
646 ke_startzero, ke_endzero));
524#if 0
647#if 0
525 bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
526 RANGEOF(struct kse, ke_startcopy, ke_endcopy));
648 mtx_lock_spin(&sched_lock);
649 bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
650 RANGEOF(struct kse, ke_startcopy, ke_endcopy));
651 mtx_unlock_spin(&sched_lock);
527#endif
652#endif
528 /* For the first call this may not have been set */
529 if (td->td_standin == NULL) {
530 td->td_standin = thread_alloc();
653 mtx_lock_spin(&sched_lock);
654 kse_link(newke, newkg);
655 if (p->p_sflag & PS_NEEDSIGCHK)
656 newke->ke_flags |= KEF_ASTPENDING;
657 /* Add engine */
658 kse_reassign(newke);
659 mtx_unlock_spin(&sched_lock);
531 }
660 }
532 mtx_lock_spin(&sched_lock);
533 if (newkg) {
534 if (p->p_numksegrps >= max_groups_per_proc) {
535 mtx_unlock_spin(&sched_lock);
536 ksegrp_free(newkg);
537 kse_free(newke);
538 return (EPROCLIM);
539 }
540 ksegrp_link(newkg, p);
541 }
542 else
543 newkg = kg;
544 kse_link(newke, newkg);
545 if (p->p_sflag & PS_NEEDSIGCHK)
546 newke->ke_flags |= KEF_ASTPENDING;
547 newke->ke_mailbox = uap->mbx;
548 newke->ke_upcall = mbx.km_func;
549 bcopy(&mbx.km_stack, &newke->ke_stack, sizeof(stack_t));
550 thread_schedule_upcall(td, newke);
661 }
662 newku = upcall_alloc();
663 newku->ku_mailbox = uap->mbx;
664 newku->ku_func = mbx.km_func;
665 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
666
667 /* For the first call this may not have been set */
668 if (td->td_standin == NULL)
669 thread_alloc_spare(td, NULL);
670
671 mtx_lock_spin(&sched_lock);
672 if (newkg->kg_numupcalls >= ncpus) {
673 upcall_free(newku);
551 mtx_unlock_spin(&sched_lock);
674 mtx_unlock_spin(&sched_lock);
675 return (EPROCLIM);
676 }
677 upcall_link(newku, newkg);
678
679 /*
680 * Each upcall structure has an owner thread, find which
681 * one owns it.
682 */
683 if (uap->newgroup) {
684 /*
685 * Because new ksegrp hasn't thread,
686 * create an initial upcall thread to own it.
687 */
688 thread_schedule_upcall(td, newku);
552 } else {
553 /*
689 } else {
690 /*
554 * If we didn't allocate a new KSE then the we are using
555 * the exisiting (BOUND) kse.
691 * If current thread hasn't an upcall structure,
692 * just assign the upcall to it.
556 */
693 */
557 ke = td->td_kse;
558 ke->ke_mailbox = uap->mbx;
559 ke->ke_upcall = mbx.km_func;
560 bcopy(&mbx.km_stack, &ke->ke_stack, sizeof(stack_t));
694 if (td->td_upcall == NULL) {
695 newku->ku_owner = td;
696 td->td_upcall = newku;
697 } else {
698 /*
699 * Create a new upcall thread to own it.
700 */
701 thread_schedule_upcall(td, newku);
702 }
561 }
703 }
562 /*
563 * Fill out the KSE-mode specific fields of the new kse.
564 */
704 mtx_unlock_spin(&sched_lock);
565 return (0);
566}
567
568/*
569 * Fill a ucontext_t with a thread's context information.
570 *
571 * This is an analogue to getcontext(3).
572 */

--- 64 unchanged lines hidden (view full) ---

637 uma_prealloc(thread_zone, 512); /* XXX arbitary */
638#endif
639 ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
640 NULL, NULL, ksegrp_init, NULL,
641 UMA_ALIGN_CACHE, 0);
642 kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
643 NULL, NULL, kse_init, NULL,
644 UMA_ALIGN_CACHE, 0);
705 return (0);
706}
707
708/*
709 * Fill a ucontext_t with a thread's context information.
710 *
711 * This is an analogue to getcontext(3).
712 */

--- 64 unchanged lines hidden (view full) ---

777 uma_prealloc(thread_zone, 512); /* XXX arbitary */
778#endif
779 ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
780 NULL, NULL, ksegrp_init, NULL,
781 UMA_ALIGN_CACHE, 0);
782 kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
783 NULL, NULL, kse_init, NULL,
784 UMA_ALIGN_CACHE, 0);
785 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
786 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
645}
646
647/*
648 * Stash an embarasingly extra thread into the zombie thread queue.
649 */
650void
651thread_stash(struct thread *td)
652{
787}
788
789/*
790 * Stash an embarasingly extra thread into the zombie thread queue.
791 */
792void
793thread_stash(struct thread *td)
794{
653 mtx_lock_spin(&zombie_thread_lock);
795 mtx_lock_spin(&kse_zombie_lock);
654 TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
796 TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
655 mtx_unlock_spin(&zombie_thread_lock);
797 mtx_unlock_spin(&kse_zombie_lock);
656}
657
658/*
659 * Stash an embarasingly extra kse into the zombie kse queue.
660 */
661void
662kse_stash(struct kse *ke)
663{
798}
799
800/*
801 * Stash an embarasingly extra kse into the zombie kse queue.
802 */
803void
804kse_stash(struct kse *ke)
805{
664 mtx_lock_spin(&zombie_thread_lock);
806 mtx_lock_spin(&kse_zombie_lock);
665 TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
807 TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
666 mtx_unlock_spin(&zombie_thread_lock);
808 mtx_unlock_spin(&kse_zombie_lock);
667}
668
669/*
809}
810
811/*
812 * Stash an embarasingly extra upcall into the zombie upcall queue.
813 */
814
815void
816upcall_stash(struct kse_upcall *ku)
817{
818 mtx_lock_spin(&kse_zombie_lock);
819 TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link);
820 mtx_unlock_spin(&kse_zombie_lock);
821}
822
823/*
670 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
671 */
672void
673ksegrp_stash(struct ksegrp *kg)
674{
824 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
825 */
826void
827ksegrp_stash(struct ksegrp *kg)
828{
675 mtx_lock_spin(&zombie_thread_lock);
829 mtx_lock_spin(&kse_zombie_lock);
676 TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
830 TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
677 mtx_unlock_spin(&zombie_thread_lock);
831 mtx_unlock_spin(&kse_zombie_lock);
678}
679
680/*
832}
833
834/*
681 * Reap zombie threads.
835 * Reap zombie kse resource.
682 */
683void
684thread_reap(void)
685{
686 struct thread *td_first, *td_next;
687 struct kse *ke_first, *ke_next;
688 struct ksegrp *kg_first, * kg_next;
836 */
837void
838thread_reap(void)
839{
840 struct thread *td_first, *td_next;
841 struct kse *ke_first, *ke_next;
842 struct ksegrp *kg_first, * kg_next;
843 struct kse_upcall *ku_first, *ku_next;
689
690 /*
844
845 /*
691 * don't even bother to lock if none at this instant
692 * We really don't care about the next instant..
846 * Don't even bother to lock if none at this instant,
847 * we really don't care about the next instant..
693 */
694 if ((!TAILQ_EMPTY(&zombie_threads))
695 || (!TAILQ_EMPTY(&zombie_kses))
848 */
849 if ((!TAILQ_EMPTY(&zombie_threads))
850 || (!TAILQ_EMPTY(&zombie_kses))
696 || (!TAILQ_EMPTY(&zombie_ksegrps))) {
697 mtx_lock_spin(&zombie_thread_lock);
851 || (!TAILQ_EMPTY(&zombie_ksegrps))
852 || (!TAILQ_EMPTY(&zombie_upcalls))) {
853 mtx_lock_spin(&kse_zombie_lock);
698 td_first = TAILQ_FIRST(&zombie_threads);
699 ke_first = TAILQ_FIRST(&zombie_kses);
700 kg_first = TAILQ_FIRST(&zombie_ksegrps);
854 td_first = TAILQ_FIRST(&zombie_threads);
855 ke_first = TAILQ_FIRST(&zombie_kses);
856 kg_first = TAILQ_FIRST(&zombie_ksegrps);
857 ku_first = TAILQ_FIRST(&zombie_upcalls);
701 if (td_first)
702 TAILQ_INIT(&zombie_threads);
703 if (ke_first)
704 TAILQ_INIT(&zombie_kses);
705 if (kg_first)
706 TAILQ_INIT(&zombie_ksegrps);
858 if (td_first)
859 TAILQ_INIT(&zombie_threads);
860 if (ke_first)
861 TAILQ_INIT(&zombie_kses);
862 if (kg_first)
863 TAILQ_INIT(&zombie_ksegrps);
707 mtx_unlock_spin(&zombie_thread_lock);
864 if (ku_first)
865 TAILQ_INIT(&zombie_upcalls);
866 mtx_unlock_spin(&kse_zombie_lock);
708 while (td_first) {
709 td_next = TAILQ_NEXT(td_first, td_runq);
867 while (td_first) {
868 td_next = TAILQ_NEXT(td_first, td_runq);
869 if (td_first->td_ucred)
870 crfree(td_first->td_ucred);
710 thread_free(td_first);
711 td_first = td_next;
712 }
713 while (ke_first) {
714 ke_next = TAILQ_NEXT(ke_first, ke_procq);
715 kse_free(ke_first);
716 ke_first = ke_next;
717 }
718 while (kg_first) {
719 kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
720 ksegrp_free(kg_first);
721 kg_first = kg_next;
722 }
871 thread_free(td_first);
872 td_first = td_next;
873 }
874 while (ke_first) {
875 ke_next = TAILQ_NEXT(ke_first, ke_procq);
876 kse_free(ke_first);
877 ke_first = ke_next;
878 }
879 while (kg_first) {
880 kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
881 ksegrp_free(kg_first);
882 kg_first = kg_next;
883 }
884 while (ku_first) {
885 ku_next = TAILQ_NEXT(ku_first, ku_link);
886 upcall_free(ku_first);
887 ku_first = ku_next;
888 }
723 }
724}
725
726/*
727 * Allocate a ksegrp.
728 */
729struct ksegrp *
730ksegrp_alloc(void)

--- 56 unchanged lines hidden (view full) ---

787 */
788int
789thread_export_context(struct thread *td)
790{
791 struct proc *p;
792 struct ksegrp *kg;
793 uintptr_t mbx;
794 void *addr;
889 }
890}
891
892/*
893 * Allocate a ksegrp.
894 */
895struct ksegrp *
896ksegrp_alloc(void)

--- 56 unchanged lines hidden (view full) ---

953 */
954int
955thread_export_context(struct thread *td)
956{
957 struct proc *p;
958 struct ksegrp *kg;
959 uintptr_t mbx;
960 void *addr;
795 int error;
961 int error,temp;
796 ucontext_t uc;
962 ucontext_t uc;
797 uint temp;
798
799 p = td->td_proc;
800 kg = td->td_ksegrp;
801
802 /* Export the user/machine context. */
963
964 p = td->td_proc;
965 kg = td->td_ksegrp;
966
967 /* Export the user/machine context. */
803#if 0
804 addr = (caddr_t)td->td_mailbox +
805 offsetof(struct kse_thr_mailbox, tm_context);
806#else /* if user pointer arithmetic is valid in the kernel */
807 addr = (void *)(&td->td_mailbox->tm_context);
808#endif
968 addr = (void *)(&td->td_mailbox->tm_context);
809 error = copyin(addr, &uc, sizeof(ucontext_t));
810 if (error)
811 goto bad;
812
813 thread_getcontext(td, &uc);
814 error = copyout(&uc, addr, sizeof(ucontext_t));
815 if (error)
816 goto bad;
817
969 error = copyin(addr, &uc, sizeof(ucontext_t));
970 if (error)
971 goto bad;
972
973 thread_getcontext(td, &uc);
974 error = copyout(&uc, addr, sizeof(ucontext_t));
975 if (error)
976 goto bad;
977
818 /* get address in latest mbox of list pointer */
819#if 0
820 addr = (caddr_t)td->td_mailbox
821 + offsetof(struct kse_thr_mailbox , tm_next);
822#else /* if user pointer arithmetic is valid in the kernel */
978 /* Exports clock ticks in kernel mode */
979 addr = (caddr_t)(&td->td_mailbox->tm_sticks);
980 temp = fuword(addr) + td->td_usticks;
981 if (suword(addr, temp))
982 goto bad;
983
984 /* Get address in latest mbox of list pointer */
823 addr = (void *)(&td->td_mailbox->tm_next);
985 addr = (void *)(&td->td_mailbox->tm_next);
824#endif
825 /*
826 * Put the saved address of the previous first
827 * entry into this one
828 */
829 for (;;) {
830 mbx = (uintptr_t)kg->kg_completed;
831 if (suword(addr, mbx)) {
832 error = EFAULT;
833 goto bad;
834 }
835 PROC_LOCK(p);
836 if (mbx == (uintptr_t)kg->kg_completed) {
837 kg->kg_completed = td->td_mailbox;
986 /*
987 * Put the saved address of the previous first
988 * entry into this one
989 */
990 for (;;) {
991 mbx = (uintptr_t)kg->kg_completed;
992 if (suword(addr, mbx)) {
993 error = EFAULT;
994 goto bad;
995 }
996 PROC_LOCK(p);
997 if (mbx == (uintptr_t)kg->kg_completed) {
998 kg->kg_completed = td->td_mailbox;
999 /*
1000 * The thread context may be taken away by
1001 * other upcall threads when we unlock
1002 * process lock. it's no longer valid to
1003 * use it again in any other places.
1004 */
1005 td->td_mailbox = NULL;
838 PROC_UNLOCK(p);
839 break;
840 }
841 PROC_UNLOCK(p);
842 }
1006 PROC_UNLOCK(p);
1007 break;
1008 }
1009 PROC_UNLOCK(p);
1010 }
843 addr = (caddr_t)td->td_mailbox
844 + offsetof(struct kse_thr_mailbox, tm_sticks);
845 temp = fuword(addr) + td->td_usticks;
846 if (suword(addr, temp))
847 goto bad;
1011 td->td_usticks = 0;
848 return (0);
849
850bad:
851 PROC_LOCK(p);
852 psignal(p, SIGSEGV);
853 PROC_UNLOCK(p);
1012 return (0);
1013
1014bad:
1015 PROC_LOCK(p);
1016 psignal(p, SIGSEGV);
1017 PROC_UNLOCK(p);
1018 /* The mailbox is bad, don't use it */
1019 td->td_mailbox = NULL;
1020 td->td_usticks = 0;
854 return (error);
855}
856
857/*
858 * Take the list of completed mailboxes for this KSEGRP and put them on this
1021 return (error);
1022}
1023
1024/*
1025 * Take the list of completed mailboxes for this KSEGRP and put them on this
859 * KSE's mailbox as it's the next one going up.
1026 * upcall's mailbox as it's the next one going up.
860 */
861static int
1027 */
1028static int
862thread_link_mboxes(struct ksegrp *kg, struct kse *ke)
1029thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku)
863{
864 struct proc *p = kg->kg_proc;
865 void *addr;
866 uintptr_t mbx;
867
1030{
1031 struct proc *p = kg->kg_proc;
1032 void *addr;
1033 uintptr_t mbx;
1034
868#if 0
869 addr = (caddr_t)ke->ke_mailbox
870 + offsetof(struct kse_mailbox, km_completed);
871#else /* if user pointer arithmetic is valid in the kernel */
872 addr = (void *)(&ke->ke_mailbox->km_completed);
873#endif
1035 addr = (void *)(&ku->ku_mailbox->km_completed);
874 for (;;) {
875 mbx = (uintptr_t)kg->kg_completed;
876 if (suword(addr, mbx)) {
877 PROC_LOCK(p);
878 psignal(p, SIGSEGV);
879 PROC_UNLOCK(p);
880 return (EFAULT);
881 }

--- 8 unchanged lines hidden (view full) ---

890 }
891 return (0);
892}
893
894/*
895 * This function should be called at statclock interrupt time
896 */
897int
1036 for (;;) {
1037 mbx = (uintptr_t)kg->kg_completed;
1038 if (suword(addr, mbx)) {
1039 PROC_LOCK(p);
1040 psignal(p, SIGSEGV);
1041 PROC_UNLOCK(p);
1042 return (EFAULT);
1043 }

--- 8 unchanged lines hidden (view full) ---

1052 }
1053 return (0);
1054}
1055
1056/*
1057 * This function should be called at statclock interrupt time
1058 */
1059int
898thread_add_ticks_intr(int user, uint ticks)
1060thread_statclock(int user)
899{
900 struct thread *td = curthread;
1061{
1062 struct thread *td = curthread;
901 struct kse *ke = td->td_kse;
902
1063
903 if (ke->ke_mailbox == NULL)
904 return -1;
1064 if (td->td_ksegrp->kg_numupcalls == 0)
1065 return (-1);
905 if (user) {
906 /* Current always do via ast() */
1066 if (user) {
1067 /* Current always do via ast() */
907 ke->ke_flags |= KEF_ASTPENDING;
908 ke->ke_uuticks += ticks;
1068 td->td_kse->ke_flags |= KEF_ASTPENDING; /* XXX TDF_ASTPENDING */
1069 td->td_flags |= TDF_USTATCLOCK;
1070 td->td_uuticks++;
909 } else {
910 if (td->td_mailbox != NULL)
1071 } else {
1072 if (td->td_mailbox != NULL)
911 td->td_usticks += ticks;
912 else
913 ke->ke_usticks += ticks;
1073 td->td_usticks++;
1074 else {
1075 /* XXXKSE
1076 * We will call thread_user_enter() for every
1077 * kernel entry in future, so if the thread mailbox
1078 * is NULL, it must be a UTS kernel, don't account
1079 * clock ticks for it.
1080 */
1081 }
914 }
1082 }
915 return 0;
1083 return (0);
916}
917
1084}
1085
1086/*
1087 * Export user mode state clock ticks
1088 */
918static int
1089static int
919thread_update_uticks(void)
1090thread_update_usr_ticks(struct thread *td)
920{
1091{
921 struct thread *td = curthread;
922 struct proc *p = td->td_proc;
1092 struct proc *p = td->td_proc;
923 struct kse *ke = td->td_kse;
924 struct kse_thr_mailbox *tmbx;
1093 struct kse_thr_mailbox *tmbx;
1094 struct kse_upcall *ku;
925 caddr_t addr;
1095 caddr_t addr;
926 uint uticks, sticks;
1096 uint uticks;
927
1097
928 if (ke->ke_mailbox == NULL)
929 return 0;
930
931 uticks = ke->ke_uuticks;
932 ke->ke_uuticks = 0;
933 sticks = ke->ke_usticks;
934 ke->ke_usticks = 0;
935#if 0
936 tmbx = (void *)fuword((caddr_t)ke->ke_mailbox
937 + offsetof(struct kse_mailbox, km_curthread));
938#else /* if user pointer arithmetic is ok in the kernel */
939 tmbx = (void *)fuword( (void *)&ke->ke_mailbox->km_curthread);
940#endif
1098 if ((ku = td->td_upcall) == NULL)
1099 return (-1);
1100
1101 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
941 if ((tmbx == NULL) || (tmbx == (void *)-1))
1102 if ((tmbx == NULL) || (tmbx == (void *)-1))
942 return 0;
1103 return (-1);
1104 uticks = td->td_uuticks;
1105 td->td_uuticks = 0;
943 if (uticks) {
1106 if (uticks) {
944 addr = (caddr_t)tmbx + offsetof(struct kse_thr_mailbox, tm_uticks);
1107 addr = (caddr_t)&tmbx->tm_uticks;
945 uticks += fuword(addr);
1108 uticks += fuword(addr);
946 if (suword(addr, uticks))
947 goto bad;
1109 if (suword(addr, uticks)) {
1110 PROC_LOCK(p);
1111 psignal(p, SIGSEGV);
1112 PROC_UNLOCK(p);
1113 return (-2);
1114 }
948 }
1115 }
949 if (sticks) {
950 addr = (caddr_t)tmbx + offsetof(struct kse_thr_mailbox, tm_sticks);
951 sticks += fuword(addr);
952 if (suword(addr, sticks))
953 goto bad;
1116 return (0);
1117}
1118
1119/*
1120 * Export kernel mode state clock ticks
1121 */
1122
1123static int
1124thread_update_sys_ticks(struct thread *td)
1125{
1126 struct proc *p = td->td_proc;
1127 caddr_t addr;
1128 int sticks;
1129
1130 if (td->td_mailbox == NULL)
1131 return (-1);
1132 if (td->td_usticks == 0)
1133 return (0);
1134 addr = (caddr_t)&td->td_mailbox->tm_sticks;
1135 sticks = fuword(addr);
1136 /* XXXKSE use XCHG instead */
1137 sticks += td->td_usticks;
1138 td->td_usticks = 0;
1139 if (suword(addr, sticks)) {
1140 PROC_LOCK(p);
1141 psignal(p, SIGSEGV);
1142 PROC_UNLOCK(p);
1143 return (-2);
954 }
1144 }
955 return 0;
956bad:
957 PROC_LOCK(p);
958 psignal(p, SIGSEGV);
959 PROC_UNLOCK(p);
960 return -1;
1145 return (0);
961}
962
963/*
964 * Discard the current thread and exit from its context.
965 *
966 * Because we can't free a thread while we're operating under its context,
967 * push the current thread into our CPU's deadthread holder. This means
968 * we needn't worry about someone else grabbing our context before we

--- 39 unchanged lines hidden (view full) ---

1008 * have a thread_unlink() that does some of this but it
1009 * would only be called from here (I think) so it would
1010 * be a waste. (might be useful for proc_fini() as well.)
1011 */
1012 TAILQ_REMOVE(&p->p_threads, td, td_plist);
1013 p->p_numthreads--;
1014 TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
1015 kg->kg_numthreads--;
1146}
1147
1148/*
1149 * Discard the current thread and exit from its context.
1150 *
1151 * Because we can't free a thread while we're operating under its context,
1152 * push the current thread into our CPU's deadthread holder. This means
1153 * we needn't worry about someone else grabbing our context before we

--- 39 unchanged lines hidden (view full) ---

1193 * have a thread_unlink() that does some of this but it
1194 * would only be called from here (I think) so it would
1195 * be a waste. (might be useful for proc_fini() as well.)
1196 */
1197 TAILQ_REMOVE(&p->p_threads, td, td_plist);
1198 p->p_numthreads--;
1199 TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
1200 kg->kg_numthreads--;
1201
1016 /*
1017 * The test below is NOT true if we are the
1018 * sole exiting thread. P_STOPPED_SNGL is unset
1019 * in exit1() after it is the only survivor.
1020 */
1021 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1022 if (p->p_numthreads == p->p_suspcount) {
1023 thread_unsuspend_one(p->p_singlethread);
1024 }
1025 }
1026
1202 /*
1203 * The test below is NOT true if we are the
1204 * sole exiting thread. P_STOPPED_SNGL is unset
1205 * in exit1() after it is the only survivor.
1206 */
1207 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1208 if (p->p_numthreads == p->p_suspcount) {
1209 thread_unsuspend_one(p->p_singlethread);
1210 }
1211 }
1212
1027 /* Reassign this thread's KSE. */
1213 /*
1214 * Because each upcall structure has an owner thread,
1215 * owner thread exits only when process is in exiting
1216 * state, so upcall to userland is no longer needed,
1217 * deleting upcall structure is safe here.
1218 * So when all threads in a group is exited, all upcalls
1219 * in the group should be automatically freed.
1220 */
1221 if (td->td_upcall)
1222 upcall_remove(td);
1223
1028 ke->ke_state = KES_UNQUEUED;
1224 ke->ke_state = KES_UNQUEUED;
1029
1225 ke->ke_thread = NULL;
1030 /*
1031 * Decide what to do with the KSE attached to this thread.
1226 /*
1227 * Decide what to do with the KSE attached to this thread.
1032 * XXX Possibly kse_reassign should do both cases as it already
1033 * does some of this.
1034 */
1228 */
1035 if (ke->ke_flags & KEF_EXIT) {
1036 KASSERT((ke->ke_owner == td),
1037 ("thread_exit: KSE exiting with non-owner thread"));
1038 ke->ke_thread = NULL;
1039 td->td_kse = NULL;
1229 if (ke->ke_flags & KEF_EXIT)
1040 kse_unlink(ke);
1230 kse_unlink(ke);
1041 } else {
1042 TD_SET_EXITING(td); /* definitly not runnable */
1231 else
1043 kse_reassign(ke);
1232 kse_reassign(ke);
1044 }
1045 PROC_UNLOCK(p);
1233 PROC_UNLOCK(p);
1234 td->td_kse = NULL;
1046 td->td_state = TDS_INACTIVE;
1047 td->td_proc = NULL;
1048 td->td_ksegrp = NULL;
1049 td->td_last_kse = NULL;
1050 PCPU_SET(deadthread, td);
1051 } else {
1052 PROC_UNLOCK(p);
1053 }

--- 31 unchanged lines hidden (view full) ---

1085 * The thread is linked as if running but no KSE assigned.
1086 */
1087void
1088thread_link(struct thread *td, struct ksegrp *kg)
1089{
1090 struct proc *p;
1091
1092 p = kg->kg_proc;
1235 td->td_state = TDS_INACTIVE;
1236 td->td_proc = NULL;
1237 td->td_ksegrp = NULL;
1238 td->td_last_kse = NULL;
1239 PCPU_SET(deadthread, td);
1240 } else {
1241 PROC_UNLOCK(p);
1242 }

--- 31 unchanged lines hidden (view full) ---

1274 * The thread is linked as if running but no KSE assigned.
1275 */
1276void
1277thread_link(struct thread *td, struct ksegrp *kg)
1278{
1279 struct proc *p;
1280
1281 p = kg->kg_proc;
1093 td->td_state = TDS_INACTIVE;
1094 td->td_proc = p;
1095 td->td_ksegrp = kg;
1096 td->td_last_kse = NULL;
1282 td->td_state = TDS_INACTIVE;
1283 td->td_proc = p;
1284 td->td_ksegrp = kg;
1285 td->td_last_kse = NULL;
1286 td->td_flags = 0;
1287 td->td_kse = NULL;
1097
1098 LIST_INIT(&td->td_contested);
1099 callout_init(&td->td_slpcallout, 1);
1100 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
1101 TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
1102 p->p_numthreads++;
1103 kg->kg_numthreads++;
1288
1289 LIST_INIT(&td->td_contested);
1290 callout_init(&td->td_slpcallout, 1);
1291 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
1292 TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
1293 p->p_numthreads++;
1294 kg->kg_numthreads++;
1104 td->td_kse = NULL;
1105}
1106
1295}
1296
1297/*
1298 * Purge a ksegrp resource. When a ksegrp is preparing to
1299 * exit, it calls this function.
1300 */
1107void
1301void
1302kse_purge_group(struct thread *td)
1303{
1304 struct ksegrp *kg;
1305 struct kse *ke;
1306
1307 kg = td->td_ksegrp;
1308 KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__));
1309 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1310 KASSERT(ke->ke_state == KES_IDLE,
1311 ("%s: wrong idle KSE state", __func__));
1312 kse_unlink(ke);
1313 }
1314 KASSERT((kg->kg_kses == 1),
1315 ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses));
1316 KASSERT((kg->kg_numupcalls == 0),
1317 ("%s: ksegrp still has %d upcall datas",
1318 __func__, kg->kg_numupcalls));
1319}
1320
1321/*
1322 * Purge a process's KSE resource. When a process is preparing to
1323 * exit, it calls kse_purge to release any extra KSE resources in
1324 * the process.
1325 */
1326void
1108kse_purge(struct proc *p, struct thread *td)
1109{
1327kse_purge(struct proc *p, struct thread *td)
1328{
1110 /* XXXKSE think about this..
1111 may need to wake up threads on loan queue. */
1112 struct ksegrp *kg;
1329 struct ksegrp *kg;
1330 struct kse *ke;
1113
1114 KASSERT(p->p_numthreads == 1, ("bad thread number"));
1115 mtx_lock_spin(&sched_lock);
1116 while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
1117 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
1118 p->p_numksegrps--;
1331
1332 KASSERT(p->p_numthreads == 1, ("bad thread number"));
1333 mtx_lock_spin(&sched_lock);
1334 while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
1335 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
1336 p->p_numksegrps--;
1337 /*
1338 * There is no ownership for KSE, after all threads
1339 * in the group exited, it is possible that some KSEs
1340 * were left in idle queue, gc them now.
1341 */
1342 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1343 KASSERT(ke->ke_state == KES_IDLE,
1344 ("%s: wrong idle KSE state", __func__));
1345 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
1346 kg->kg_idle_kses--;
1347 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
1348 kg->kg_kses--;
1349 kse_stash(ke);
1350 }
1119 KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
1351 KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
1120 ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
1121 ("wrong kg_kses"));
1122 if (kg != td->td_ksegrp) {
1352 ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
1353 ("ksegrp has wrong kg_kses: %d", kg->kg_kses));
1354 KASSERT((kg->kg_numupcalls == 0),
1355 ("%s: ksegrp still has %d upcall datas",
1356 __func__, kg->kg_numupcalls));
1357
1358 if (kg != td->td_ksegrp)
1123 ksegrp_stash(kg);
1359 ksegrp_stash(kg);
1124 }
1125 }
1126 TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp);
1127 p->p_numksegrps++;
1128 mtx_unlock_spin(&sched_lock);
1129}
1130
1360 }
1361 TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp);
1362 p->p_numksegrps++;
1363 mtx_unlock_spin(&sched_lock);
1364}
1365
1366/*
1367 * This function is intended to be used to initialize a spare thread
1368 * for upcall. Initialize thread's large data area outside sched_lock
1369 * for thread_schedule_upcall().
1370 */
1371void
1372thread_alloc_spare(struct thread *td, struct thread *spare)
1373{
1374 if (td->td_standin)
1375 return;
1376 if (spare == NULL)
1377 spare = thread_alloc();
1378 td->td_standin = spare;
1379 bzero(&spare->td_startzero,
1380 (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
1381 spare->td_proc = td->td_proc;
1382 /* Setup PCB and fork address */
1383 cpu_set_upcall(spare, td->td_pcb);
1384 /*
1385 * XXXKSE do we really need this? (default values for the
1386 * frame).
1387 */
1388 bcopy(td->td_frame, spare->td_frame, sizeof(struct trapframe));
1389 spare->td_ucred = crhold(td->td_ucred);
1390}
1131
1132/*
1133 * Create a thread and schedule it for upcall on the KSE given.
1134 * Use our thread's standin so that we don't have to allocate one.
1135 */
1136struct thread *
1391
1392/*
1393 * Create a thread and schedule it for upcall on the KSE given.
1394 * Use our thread's standin so that we don't have to allocate one.
1395 */
1396struct thread *
1137thread_schedule_upcall(struct thread *td, struct kse *ke)
1397thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
1138{
1139 struct thread *td2;
1398{
1399 struct thread *td2;
1140 int newkse;
1141
1142 mtx_assert(&sched_lock, MA_OWNED);
1400
1401 mtx_assert(&sched_lock, MA_OWNED);
1143 newkse = (ke != td->td_kse);
1144
1145 /*
1402
1403 /*
1146 * If the owner and kse are BOUND then that thread is planning to
1147 * go to userland and upcalls are not expected. So don't make one.
1148 * If it is not bound then make it so with the spare thread
1149 * anf then borrw back the KSE to allow us to complete some in-kernel
1150 * work. When we complete, the Bound thread will have the chance to
1151 * complete. This thread will sleep as planned. Hopefully there will
1152 * eventually be un unbound thread that can be converted to an
1153 * upcall to report the completion of this thread.
1404 * Schedule an upcall thread on specified kse_upcall,
1405 * the kse_upcall must be free.
1406 * td must have a spare thread.
1154 */
1407 */
1155
1408 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__));
1156 if ((td2 = td->td_standin) != NULL) {
1157 td->td_standin = NULL;
1158 } else {
1409 if ((td2 = td->td_standin) != NULL) {
1410 td->td_standin = NULL;
1411 } else {
1159 if (newkse)
1160 panic("no reserve thread when called with a new kse");
1161 /*
1162 * If called from (e.g.) sleep and we do not have
1163 * a reserve thread, then we've used it, so do not
1164 * create an upcall.
1165 */
1412 panic("no reserve thread when scheduling an upcall");
1166 return (NULL);
1167 }
1168 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1169 td2, td->td_proc->p_pid, td->td_proc->p_comm);
1413 return (NULL);
1414 }
1415 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1416 td2, td->td_proc->p_pid, td->td_proc->p_comm);
1170 bzero(&td2->td_startzero,
1171 (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
1172 bcopy(&td->td_startcopy, &td2->td_startcopy,
1173 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
1417 bcopy(&td->td_startcopy, &td2->td_startcopy,
1418 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
1174 thread_link(td2, ke->ke_ksegrp);
1175 cpu_set_upcall(td2, td->td_pcb);
1176
1177 /*
1178 * XXXKSE do we really need this? (default values for the
1179 * frame).
1180 */
1181 bcopy(td->td_frame, td2->td_frame, sizeof(struct trapframe));
1182
1183 /*
1184 * Bind the new thread to the KSE,
1185 * and if it's our KSE, lend it back to ourself
1186 * so we can continue running.
1187 */
1188 td2->td_ucred = crhold(td->td_ucred);
1189 td2->td_flags = TDF_UPCALLING; /* note: BOUND */
1190 td2->td_kse = ke;
1191 td2->td_state = TDS_CAN_RUN;
1419 thread_link(td2, ku->ku_ksegrp);
1420 /* Let the new thread become owner of the upcall */
1421 ku->ku_owner = td2;
1422 td2->td_upcall = ku;
1423 td2->td_flags = TDF_UPCALLING;
1424 td2->td_kse = NULL;
1425 td2->td_state = TDS_CAN_RUN;
1192 td2->td_inhibitors = 0;
1426 td2->td_inhibitors = 0;
1193 ke->ke_owner = td2;
1194 /*
1195 * If called from kse_reassign(), we are working on the current
1196 * KSE so fake that we borrowed it. If called from
1197 * kse_create(), don't, as we have a new kse too.
1198 */
1199 if (!newkse) {
1200 /*
1201 * This thread will be scheduled when the current thread
1202 * blocks, exits or tries to enter userspace, (which ever
1203 * happens first). When that happens the KSe will "revert"
1204 * to this thread in a BOUND manner. Since we are called
1205 * from msleep() this is going to be "very soon" in nearly
1206 * all cases.
1207 */
1208 TD_SET_LOAN(td2);
1209 } else {
1210 ke->ke_thread = td2;
1211 ke->ke_state = KES_THREAD;
1212 setrunqueue(td2);
1213 }
1427 setrunqueue(td2);
1214 return (td2); /* bogus.. should be a void function */
1215}
1216
1217/*
1218 * Schedule an upcall to notify a KSE process recieved signals.
1219 *
1220 * XXX - Modifying a sigset_t like this is totally bogus.
1221 */
1222struct thread *
1223signal_upcall(struct proc *p, int sig)
1224{
1428 return (td2); /* bogus.. should be a void function */
1429}
1430
1431/*
1432 * Schedule an upcall to notify a KSE process recieved signals.
1433 *
1434 * XXX - Modifying a sigset_t like this is totally bogus.
1435 */
1436struct thread *
1437signal_upcall(struct proc *p, int sig)
1438{
1439#if 0
1225 struct thread *td, *td2;
1226 struct kse *ke;
1227 sigset_t ss;
1228 int error;
1229
1440 struct thread *td, *td2;
1441 struct kse *ke;
1442 sigset_t ss;
1443 int error;
1444
1445#endif
1230 PROC_LOCK_ASSERT(p, MA_OWNED);
1231return (NULL);
1446 PROC_LOCK_ASSERT(p, MA_OWNED);
1447return (NULL);
1232
1448#if 0
1233 td = FIRST_THREAD_IN_PROC(p);
1234 ke = td->td_kse;
1235 PROC_UNLOCK(p);
1236 error = copyin(&ke->ke_mailbox->km_sigscaught, &ss, sizeof(sigset_t));
1237 PROC_LOCK(p);
1238 if (error)
1239 return (NULL);
1240 SIGADDSET(ss, sig);
1241 PROC_UNLOCK(p);
1242 error = copyout(&ss, &ke->ke_mailbox->km_sigscaught, sizeof(sigset_t));
1243 PROC_LOCK(p);
1244 if (error)
1245 return (NULL);
1246 if (td->td_standin == NULL)
1449 td = FIRST_THREAD_IN_PROC(p);
1450 ke = td->td_kse;
1451 PROC_UNLOCK(p);
1452 error = copyin(&ke->ke_mailbox->km_sigscaught, &ss, sizeof(sigset_t));
1453 PROC_LOCK(p);
1454 if (error)
1455 return (NULL);
1456 SIGADDSET(ss, sig);
1457 PROC_UNLOCK(p);
1458 error = copyout(&ss, &ke->ke_mailbox->km_sigscaught, sizeof(sigset_t));
1459 PROC_LOCK(p);
1460 if (error)
1461 return (NULL);
1462 if (td->td_standin == NULL)
1247 td->td_standin = thread_alloc();
1463 thread_alloc_spare(td, NULL);
1248 mtx_lock_spin(&sched_lock);
1249 td2 = thread_schedule_upcall(td, ke); /* Bogus JRE */
1250 mtx_unlock_spin(&sched_lock);
1251 return (td2);
1464 mtx_lock_spin(&sched_lock);
1465 td2 = thread_schedule_upcall(td, ke); /* Bogus JRE */
1466 mtx_unlock_spin(&sched_lock);
1467 return (td2);
1468#endif
1252}
1253
1254/*
1469}
1470
1471/*
1255 * setup done on the thread when it enters the kernel.
1472 * Setup done on the thread when it enters the kernel.
1256 * XXXKSE Presently only for syscalls but eventually all kernel entries.
1257 */
1258void
1259thread_user_enter(struct proc *p, struct thread *td)
1260{
1473 * XXXKSE Presently only for syscalls but eventually all kernel entries.
1474 */
1475void
1476thread_user_enter(struct proc *p, struct thread *td)
1477{
1261 struct kse *ke;
1478 struct ksegrp *kg;
1479 struct kse_upcall *ku;
1262
1480
1481 kg = td->td_ksegrp;
1263 /*
1264 * First check that we shouldn't just abort.
1265 * But check if we are the single thread first!
1266 * XXX p_singlethread not locked, but should be safe.
1267 */
1482 /*
1483 * First check that we shouldn't just abort.
1484 * But check if we are the single thread first!
1485 * XXX p_singlethread not locked, but should be safe.
1486 */
1268 if ((p->p_flag & P_WEXIT) && (p->p_singlethread != td)) {
1487 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1269 PROC_LOCK(p);
1270 mtx_lock_spin(&sched_lock);
1271 thread_exit();
1272 /* NOTREACHED */
1273 }
1274
1275 /*
1276 * If we are doing a syscall in a KSE environment,
1277 * note where our mailbox is. There is always the
1278 * possibility that we could do this lazily (in kse_reassign()),
1279 * but for now do it every time.
1280 */
1488 PROC_LOCK(p);
1489 mtx_lock_spin(&sched_lock);
1490 thread_exit();
1491 /* NOTREACHED */
1492 }
1493
1494 /*
1495 * If we are doing a syscall in a KSE environment,
1496 * note where our mailbox is. There is always the
1497 * possibility that we could do this lazily (in kse_reassign()),
1498 * but for now do it every time.
1499 */
1281 ke = td->td_kse;
1282 td->td_flags &= ~TDF_UNBOUND;
1283 if (ke->ke_mailbox != NULL) {
1284#if 0
1285 td->td_mailbox = (void *)fuword((caddr_t)ke->ke_mailbox
1286 + offsetof(struct kse_mailbox, km_curthread));
1287#else /* if user pointer arithmetic is ok in the kernel */
1500 kg = td->td_ksegrp;
1501 if (kg->kg_numupcalls) {
1502 ku = td->td_upcall;
1503 KASSERT(ku, ("%s: no upcall owned", __func__));
1504 KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__));
1288 td->td_mailbox =
1505 td->td_mailbox =
1289 (void *)fuword( (void *)&ke->ke_mailbox->km_curthread);
1290#endif
1506 (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1291 if ((td->td_mailbox == NULL) ||
1292 (td->td_mailbox == (void *)-1)) {
1507 if ((td->td_mailbox == NULL) ||
1508 (td->td_mailbox == (void *)-1)) {
1293 td->td_mailbox = NULL; /* single thread it.. */
1509 /* Don't schedule upcall when blocked */
1510 td->td_mailbox = NULL;
1294 mtx_lock_spin(&sched_lock);
1511 mtx_lock_spin(&sched_lock);
1295 td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND);
1512 td->td_flags &= ~TDF_CAN_UNBIND;
1296 mtx_unlock_spin(&sched_lock);
1297 } else {
1513 mtx_unlock_spin(&sched_lock);
1514 } else {
1298 /*
1299 * when thread limit reached, act like that the thread
1300 * has already done an upcall.
1301 */
1302 if (p->p_numthreads > max_threads_per_proc) {
1515 if (p->p_numthreads > max_threads_per_proc) {
1303 if (td->td_standin != NULL) {
1304 thread_stash(td->td_standin);
1305 td->td_standin = NULL;
1306 }
1516 /*
1517 * Since kernel thread limit reached,
1518 * don't schedule upcall anymore.
1519 * XXXKSE These code in fact needn't.
1520 */
1521 mtx_lock_spin(&sched_lock);
1522 td->td_flags &= ~TDF_CAN_UNBIND;
1523 mtx_unlock_spin(&sched_lock);
1307 } else {
1308 if (td->td_standin == NULL)
1524 } else {
1525 if (td->td_standin == NULL)
1309 td->td_standin = thread_alloc();
1526 thread_alloc_spare(td, NULL);
1527 mtx_lock_spin(&sched_lock);
1528 td->td_flags |= TDF_CAN_UNBIND;
1529 mtx_unlock_spin(&sched_lock);
1310 }
1530 }
1311 mtx_lock_spin(&sched_lock);
1312 td->td_flags |= TDF_CAN_UNBIND;
1313 mtx_unlock_spin(&sched_lock);
1314 KASSERT((ke->ke_owner == td),
1315 ("thread_user_enter: No starting owner "));
1316 ke->ke_owner = td;
1317 td->td_usticks = 0;
1318 }
1319 }
1320}
1321
1322/*
1323 * The extra work we go through if we are a threaded process when we
1324 * return to userland.
1325 *

--- 4 unchanged lines hidden (view full) ---

1330 * which case the mailbox's context's busy indicator will be set).
1331 * The only traps we suport will have set the mailbox.
1332 * We will clear it here.
1333 */
1334int
1335thread_userret(struct thread *td, struct trapframe *frame)
1336{
1337 int error;
1531 }
1532 }
1533}
1534
1535/*
1536 * The extra work we go through if we are a threaded process when we
1537 * return to userland.
1538 *

--- 4 unchanged lines hidden (view full) ---

1543 * which case the mailbox's context's busy indicator will be set).
1544 * The only traps we suport will have set the mailbox.
1545 * We will clear it here.
1546 */
1547int
1548thread_userret(struct thread *td, struct trapframe *frame)
1549{
1550 int error;
1338 int unbound;
1339 struct kse *ke;
1551 struct kse_upcall *ku;
1340 struct ksegrp *kg;
1552 struct ksegrp *kg;
1341 struct thread *worktodo;
1342 struct proc *p;
1343 struct timespec ts;
1344
1553 struct proc *p;
1554 struct timespec ts;
1555
1345 KASSERT((td->td_kse && td->td_kse->ke_thread && td->td_kse->ke_owner),
1346 ("thread_userret: bad thread/kse pointers"));
1347 KASSERT((td == curthread),
1348 ("thread_userret: bad thread argument"));
1349
1350
1351 kg = td->td_ksegrp;
1352 p = td->td_proc;
1556 p = td->td_proc;
1353 error = 0;
1354 unbound = TD_IS_UNBOUND(td);
1557 kg = td->td_ksegrp;
1355
1558
1356 mtx_lock_spin(&sched_lock);
1357 if ((worktodo = kg->kg_last_assigned))
1358 worktodo = TAILQ_NEXT(worktodo, td_runq);
1359 else
1360 worktodo = TAILQ_FIRST(&kg->kg_runq);
1559 /* Nothing to do with non-threaded group/process */
1560 if (td->td_ksegrp->kg_numupcalls == 0)
1561 return (0);
1361
1362 /*
1562
1563 /*
1363 * Permanently bound threads never upcall but they may
1364 * loan out their KSE at this point.
1365 * Upcalls imply bound.. They also may want to do some Philantropy.
1366 * Temporarily bound threads on the other hand either yield
1367 * to other work and transform into an upcall, or proceed back to
1368 * userland.
1564 * Stat clock interrupt hit in userland, it
1565 * is returning from interrupt, charge thread's
1566 * userland time for UTS.
1369 */
1567 */
1568 if (td->td_flags & TDF_USTATCLOCK) {
1569 thread_update_usr_ticks(td);
1570 mtx_lock_spin(&sched_lock);
1571 td->td_flags &= ~TDF_USTATCLOCK;
1572 mtx_unlock_spin(&sched_lock);
1573 }
1370
1574
1575 /*
1576 * Optimisation:
1577 * This thread has not started any upcall.
1578 * If there is no work to report other than ourself,
1579 * then it can return direct to userland.
1580 */
1371 if (TD_CAN_UNBIND(td)) {
1581 if (TD_CAN_UNBIND(td)) {
1372 td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND);
1373 if (!worktodo && (kg->kg_completed == NULL) &&
1374 !(td->td_kse->ke_flags & KEF_DOUPCALL)) {
1375 /*
1376 * This thread has not started any upcall.
1377 * If there is no work to report other than
1378 * ourself, then it can return direct to userland.
1379 */
1380justreturn:
1381 mtx_unlock_spin(&sched_lock);
1382 thread_update_uticks();
1582 mtx_lock_spin(&sched_lock);
1583 td->td_flags &= ~TDF_CAN_UNBIND;
1584 mtx_unlock_spin(&sched_lock);
1585 if ((kg->kg_completed == NULL) &&
1586 (td->td_upcall->ku_flags & KUF_DOUPCALL) == 0) {
1587 thread_update_sys_ticks(td);
1383 td->td_mailbox = NULL;
1384 return (0);
1385 }
1588 td->td_mailbox = NULL;
1589 return (0);
1590 }
1386 mtx_unlock_spin(&sched_lock);
1387 error = thread_export_context(td);
1591 error = thread_export_context(td);
1388 td->td_usticks = 0;
1389 if (error) {
1390 /*
1592 if (error) {
1593 /*
1391 * As we are not running on a borrowed KSE,
1392 * failing to do the KSE operation just defaults
1594 * Failing to do the KSE operation just defaults
1393 * back to synchonous operation, so just return from
1394 * the syscall.
1395 */
1595 * back to synchonous operation, so just return from
1596 * the syscall.
1597 */
1396 goto justreturn;
1598 return (0);
1397 }
1599 }
1398 mtx_lock_spin(&sched_lock);
1399 /*
1600 /*
1400 * Turn ourself into a bound upcall.
1401 * We will rely on kse_reassign()
1402 * to make us run at a later time.
1601 * There is something to report, and we own an upcall
1602 * strucuture, we can go to userland.
1603 * Turn ourself into an upcall thread.
1403 */
1604 */
1605 mtx_lock_spin(&sched_lock);
1404 td->td_flags |= TDF_UPCALLING;
1606 td->td_flags |= TDF_UPCALLING;
1405
1406 /* there may be more work since we re-locked schedlock */
1407 if ((worktodo = kg->kg_last_assigned))
1408 worktodo = TAILQ_NEXT(worktodo, td_runq);
1409 else
1410 worktodo = TAILQ_FIRST(&kg->kg_runq);
1411 } else if (unbound) {
1412 /*
1413 * We are an unbound thread, looking to
1414 * return to user space. There must be another owner
1415 * of this KSE.
1416 * We are using a borrowed KSE. save state and exit.
1417 * kse_reassign() will recycle the kse as needed,
1418 */
1419 mtx_unlock_spin(&sched_lock);
1607 mtx_unlock_spin(&sched_lock);
1608 } else if (td->td_mailbox) {
1420 error = thread_export_context(td);
1609 error = thread_export_context(td);
1421 td->td_usticks = 0;
1422 if (error) {
1610 if (error) {
1423 /*
1424 * There is nothing we can do.
1425 * We just lose that context. We
1426 * probably should note this somewhere and send
1427 * the process a signal.
1428 */
1429 PROC_LOCK(td->td_proc);
1611 PROC_LOCK(td->td_proc);
1430 psignal(td->td_proc, SIGSEGV);
1431 mtx_lock_spin(&sched_lock);
1612 mtx_lock_spin(&sched_lock);
1432 ke = td->td_kse;
1433 /* possibly upcall with error? */
1434 } else {
1613 /* possibly upcall with error? */
1614 } else {
1615 PROC_LOCK(td->td_proc);
1616 mtx_lock_spin(&sched_lock);
1435 /*
1617 /*
1436 * Don't make an upcall, just exit so that the owner
1437 * can get its KSE if it wants it.
1438 * Our context is already safely stored for later
1439 * use by the UTS.
1618 * There are upcall threads waiting for
1619 * work to do, wake one of them up.
1620 * XXXKSE Maybe wake all of them up.
1440 */
1621 */
1441 PROC_LOCK(p);
1442 mtx_lock_spin(&sched_lock);
1443 ke = td->td_kse;
1622 if (kg->kg_upsleeps)
1623 wakeup_one(&kg->kg_completed);
1444 }
1624 }
1445 /*
1446 * If the owner is idling, we now have something for it
1447 * to report, so make it runnable.
1448 * If the owner is not an upcall, make an attempt to
1449 * ensure that at least one of any IDLED upcalls can
1450 * wake up.
1451 */
1452 if (ke->ke_owner->td_flags & TDF_UPCALLING) {
1453 TD_CLR_IDLE(ke->ke_owner);
1454 } else {
1455 FOREACH_KSE_IN_GROUP(kg, ke) {
1456 if (TD_IS_IDLE(ke->ke_owner)) {
1457 TD_CLR_IDLE(ke->ke_owner);
1458 setrunnable(ke->ke_owner);
1459 break;
1460 }
1461 }
1462 }
1463 thread_exit();
1625 thread_exit();
1626 /* NOTREACHED */
1464 }
1627 }
1465 /*
1466 * We ARE going back to userland with this KSE.
1467 * We are permanently bound. We may be an upcall.
1468 * If an upcall, check for threads that need to borrow the KSE.
1469 * Any other thread that comes ready after this missed the boat.
1470 */
1471 ke = td->td_kse;
1472
1628
1473 /*
1474 * If not upcalling, go back to userspace.
1475 * If we are, get the upcall set up.
1476 */
1477 if (td->td_flags & TDF_UPCALLING) {
1629 if (td->td_flags & TDF_UPCALLING) {
1478 if (worktodo) {
1479 /*
1480 * force a switch to more urgent 'in kernel'
1481 * work. Control will return to this thread
1482 * when there is no more work to do.
1483 * kse_reassign() will do that for us.
1484 */
1485 TD_SET_LOAN(td);
1486 p->p_stats->p_ru.ru_nvcsw++;
1487 mi_switch(); /* kse_reassign() will (re)find worktodo */
1488 }
1489 td->td_flags &= ~TDF_UPCALLING;
1490 if (ke->ke_flags & KEF_DOUPCALL)
1491 ke->ke_flags &= ~KEF_DOUPCALL;
1492 mtx_unlock_spin(&sched_lock);
1493
1630 KASSERT(TD_CAN_UNBIND(td) == 0, ("upcall thread can unbind"));
1631 ku = td->td_upcall;
1494 /*
1495 * There is no more work to do and we are going to ride
1632 /*
1633 * There is no more work to do and we are going to ride
1496 * this thread/KSE up to userland as an upcall.
1634 * this thread up to userland as an upcall.
1497 * Do the last parts of the setup needed for the upcall.
1498 */
1499 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1500 td, td->td_proc->p_pid, td->td_proc->p_comm);
1501
1502 /*
1503 * Set user context to the UTS.
1504 * Will use Giant in cpu_thread_clean() because it uses
1505 * kmem_free(kernel_map, ...)
1506 */
1635 * Do the last parts of the setup needed for the upcall.
1636 */
1637 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1638 td, td->td_proc->p_pid, td->td_proc->p_comm);
1639
1640 /*
1641 * Set user context to the UTS.
1642 * Will use Giant in cpu_thread_clean() because it uses
1643 * kmem_free(kernel_map, ...)
1644 */
1507 cpu_set_upcall_kse(td, ke);
1645 cpu_set_upcall_kse(td, ku);
1508
1646
1509 /*
1647 /*
1648 * Clear TDF_UPCALLING after set upcall context,
1649 * profiling code looks TDF_UPCALLING to avoid account
1650 * a wrong user %EIP
1651 */
1652 mtx_lock_spin(&sched_lock);
1653 td->td_flags &= ~TDF_UPCALLING;
1654 if (ku->ku_flags & KUF_DOUPCALL)
1655 ku->ku_flags &= ~KUF_DOUPCALL;
1656 mtx_unlock_spin(&sched_lock);
1657
1658 /*
1510 * Unhook the list of completed threads.
1511 * anything that completes after this gets to
1512 * come in next time.
1513 * Put the list of completed thread mailboxes on
1514 * this KSE's mailbox.
1515 */
1659 * Unhook the list of completed threads.
1660 * anything that completes after this gets to
1661 * come in next time.
1662 * Put the list of completed thread mailboxes on
1663 * this KSE's mailbox.
1664 */
1516 error = thread_link_mboxes(kg, ke);
1665 error = thread_link_mboxes(kg, ku);
1517 if (error)
1518 goto bad;
1519
1520 /*
1521 * Set state and clear the thread mailbox pointer.
1522 * From now on we are just a bound outgoing process.
1523 * **Problem** userret is often called several times.
1524 * it would be nice if this all happenned only on the first
1525 * time through. (the scan for extra work etc.)
1526 */
1666 if (error)
1667 goto bad;
1668
1669 /*
1670 * Set state and clear the thread mailbox pointer.
1671 * From now on we are just a bound outgoing process.
1672 * **Problem** userret is often called several times.
1673 * it would be nice if this all happenned only on the first
1674 * time through. (the scan for extra work etc.)
1675 */
1527#if 0
1528 error = suword((caddr_t)ke->ke_mailbox +
1529 offsetof(struct kse_mailbox, km_curthread), 0);
1530#else /* if user pointer arithmetic is ok in the kernel */
1531 error = suword((caddr_t)&ke->ke_mailbox->km_curthread, 0);
1532#endif
1533 ke->ke_uuticks = ke->ke_usticks = 0;
1676 error = suword((caddr_t)&ku->ku_mailbox->km_curthread, 0);
1534 if (error)
1535 goto bad;
1677 if (error)
1678 goto bad;
1679
1680 /* Export current system time */
1536 nanotime(&ts);
1537 if (copyout(&ts,
1681 nanotime(&ts);
1682 if (copyout(&ts,
1538 (caddr_t)&ke->ke_mailbox->km_timeofday, sizeof(ts))) {
1683 (caddr_t)&ku->ku_mailbox->km_timeofday, sizeof(ts))) {
1539 goto bad;
1540 }
1684 goto bad;
1685 }
1541 } else {
1542 mtx_unlock_spin(&sched_lock);
1543 }
1544 /*
1545 * Optimisation:
1546 * Ensure that we have a spare thread available,
1547 * for when we re-enter the kernel.
1548 */
1686 }
1687 /*
1688 * Optimisation:
1689 * Ensure that we have a spare thread available,
1690 * for when we re-enter the kernel.
1691 */
1549 if (td->td_standin == NULL) {
1550 td->td_standin = thread_alloc();
1551 }
1692 if (td->td_standin == NULL)
1693 thread_alloc_spare(td, NULL);
1552
1694
1553 thread_update_uticks();
1695 /*
1696 * Clear thread mailbox first, then clear system tick count.
1697 * The order is important because thread_statclock() use
1698 * mailbox pointer to see if it is an userland thread or
1699 * an UTS kernel thread.
1700 */
1554 td->td_mailbox = NULL;
1701 td->td_mailbox = NULL;
1702 td->td_usticks = 0;
1555 return (0);
1556
1557bad:
1558 /*
1559 * Things are going to be so screwed we should just kill the process.
1560 * how do we do that?
1561 */
1562 PROC_LOCK(td->td_proc);
1563 psignal(td->td_proc, SIGSEGV);
1564 PROC_UNLOCK(td->td_proc);
1565 td->td_mailbox = NULL;
1703 return (0);
1704
1705bad:
1706 /*
1707 * Things are going to be so screwed we should just kill the process.
1708 * how do we do that?
1709 */
1710 PROC_LOCK(td->td_proc);
1711 psignal(td->td_proc, SIGSEGV);
1712 PROC_UNLOCK(td->td_proc);
1713 td->td_mailbox = NULL;
1714 td->td_usticks = 0;
1566 return (error); /* go sync */
1567}
1568
1569/*
1570 * Enforce single-threading.
1571 *
1572 * Returns 1 if the caller must abort (another thread is waiting to
1573 * exit the process or similar). Process is locked!

--- 22 unchanged lines hidden (view full) ---

1596 return (0);
1597
1598 /* Is someone already single threading? */
1599 if (p->p_singlethread)
1600 return (1);
1601
1602 if (force_exit == SINGLE_EXIT) {
1603 p->p_flag |= P_SINGLE_EXIT;
1715 return (error); /* go sync */
1716}
1717
1718/*
1719 * Enforce single-threading.
1720 *
1721 * Returns 1 if the caller must abort (another thread is waiting to
1722 * exit the process or similar). Process is locked!

--- 22 unchanged lines hidden (view full) ---

1745 return (0);
1746
1747 /* Is someone already single threading? */
1748 if (p->p_singlethread)
1749 return (1);
1750
1751 if (force_exit == SINGLE_EXIT) {
1752 p->p_flag |= P_SINGLE_EXIT;
1604 td->td_flags &= ~TDF_UNBOUND;
1605 } else
1606 p->p_flag &= ~P_SINGLE_EXIT;
1607 p->p_flag |= P_STOPPED_SINGLE;
1608 p->p_singlethread = td;
1609 /* XXXKSE Which lock protects the below values? */
1610 while ((p->p_numthreads - p->p_suspcount) != 1) {
1611 mtx_lock_spin(&sched_lock);
1612 FOREACH_THREAD_IN_PROC(p, td2) {

--- 6 unchanged lines hidden (view full) ---

1619 }
1620 if (TD_ON_SLEEPQ(td2) &&
1621 (td2->td_flags & TDF_SINTR)) {
1622 if (td2->td_flags & TDF_CVWAITQ)
1623 cv_abort(td2);
1624 else
1625 abortsleep(td2);
1626 }
1753 } else
1754 p->p_flag &= ~P_SINGLE_EXIT;
1755 p->p_flag |= P_STOPPED_SINGLE;
1756 p->p_singlethread = td;
1757 /* XXXKSE Which lock protects the below values? */
1758 while ((p->p_numthreads - p->p_suspcount) != 1) {
1759 mtx_lock_spin(&sched_lock);
1760 FOREACH_THREAD_IN_PROC(p, td2) {

--- 6 unchanged lines hidden (view full) ---

1767 }
1768 if (TD_ON_SLEEPQ(td2) &&
1769 (td2->td_flags & TDF_SINTR)) {
1770 if (td2->td_flags & TDF_CVWAITQ)
1771 cv_abort(td2);
1772 else
1773 abortsleep(td2);
1774 }
1627 if (TD_IS_IDLE(td2)) {
1628 TD_CLR_IDLE(td2);
1629 }
1630 } else {
1631 if (TD_IS_SUSPENDED(td2))
1632 continue;
1775 } else {
1776 if (TD_IS_SUSPENDED(td2))
1777 continue;
1633 /* maybe other inhibitted states too? */
1778 /*
1779 * maybe other inhibitted states too?
1780 * XXXKSE Is it totally safe to
1781 * suspend a non-interruptable thread?
1782 */
1634 if (td2->td_inhibitors &
1783 if (td2->td_inhibitors &
1635 (TDI_SLEEPING | TDI_SWAPPED |
1636 TDI_LOAN | TDI_IDLE |
1637 TDI_EXITING))
1784 (TDI_SLEEPING | TDI_SWAPPED))
1638 thread_suspend_one(td2);
1639 }
1640 }
1641 }
1642 /*
1643 * Maybe we suspended some threads.. was it enough?
1644 */
1645 if ((p->p_numthreads - p->p_suspcount) == 1) {

--- 9 unchanged lines hidden (view full) ---

1655 mtx_unlock(&Giant);
1656 PROC_UNLOCK(p);
1657 p->p_stats->p_ru.ru_nvcsw++;
1658 mi_switch();
1659 mtx_unlock_spin(&sched_lock);
1660 mtx_lock(&Giant);
1661 PROC_LOCK(p);
1662 }
1785 thread_suspend_one(td2);
1786 }
1787 }
1788 }
1789 /*
1790 * Maybe we suspended some threads.. was it enough?
1791 */
1792 if ((p->p_numthreads - p->p_suspcount) == 1) {

--- 9 unchanged lines hidden (view full) ---

1802 mtx_unlock(&Giant);
1803 PROC_UNLOCK(p);
1804 p->p_stats->p_ru.ru_nvcsw++;
1805 mi_switch();
1806 mtx_unlock_spin(&sched_lock);
1807 mtx_lock(&Giant);
1808 PROC_LOCK(p);
1809 }
1663 if (force_exit == SINGLE_EXIT)
1810 if (force_exit == SINGLE_EXIT) {
1811 if (td->td_upcall) {
1812 mtx_lock_spin(&sched_lock);
1813 upcall_remove(td);
1814 mtx_unlock_spin(&sched_lock);
1815 }
1664 kse_purge(p, td);
1816 kse_purge(p, td);
1817 }
1665 return (0);
1666}
1667
1668/*
1669 * Called in from locations that can safely check to see
1670 * whether we have to suspend or at least throttle for a
1671 * single-thread event (e.g. fork).
1672 *

--- 25 unchanged lines hidden (view full) ---

1698 * thread_exit() would be safe as that may be the outcome unless
1699 * return_instead is set.
1700 */
1701int
1702thread_suspend_check(int return_instead)
1703{
1704 struct thread *td;
1705 struct proc *p;
1818 return (0);
1819}
1820
1821/*
1822 * Called in from locations that can safely check to see
1823 * whether we have to suspend or at least throttle for a
1824 * single-thread event (e.g. fork).
1825 *

--- 25 unchanged lines hidden (view full) ---

1851 * thread_exit() would be safe as that may be the outcome unless
1852 * return_instead is set.
1853 */
1854int
1855thread_suspend_check(int return_instead)
1856{
1857 struct thread *td;
1858 struct proc *p;
1706 struct kse *ke;
1707 struct ksegrp *kg;
1708
1709 td = curthread;
1710 p = td->td_proc;
1711 kg = td->td_ksegrp;
1712 PROC_LOCK_ASSERT(p, MA_OWNED);
1713 while (P_SHOULDSTOP(p)) {
1714 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {

--- 15 unchanged lines hidden (view full) ---

1730 * If the process is waiting for us to exit,
1731 * this thread should just suicide.
1732 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
1733 */
1734 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1735 mtx_lock_spin(&sched_lock);
1736 while (mtx_owned(&Giant))
1737 mtx_unlock(&Giant);
1859 struct ksegrp *kg;
1860
1861 td = curthread;
1862 p = td->td_proc;
1863 kg = td->td_ksegrp;
1864 PROC_LOCK_ASSERT(p, MA_OWNED);
1865 while (P_SHOULDSTOP(p)) {
1866 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {

--- 15 unchanged lines hidden (view full) ---

1882 * If the process is waiting for us to exit,
1883 * this thread should just suicide.
1884 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
1885 */
1886 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1887 mtx_lock_spin(&sched_lock);
1888 while (mtx_owned(&Giant))
1889 mtx_unlock(&Giant);
1738 /*
1739 * All threads should be exiting
1740 * Unless they are the active "singlethread".
1741 * destroy un-needed KSEs as we go..
1742 * KSEGRPS may implode too as #kses -> 0.
1743 */
1744 ke = td->td_kse;
1745 if (ke->ke_owner == td &&
1746 (kg->kg_kses >= kg->kg_numthreads ))
1747 ke->ke_flags |= KEF_EXIT;
1748 thread_exit();
1749 }
1750
1751 /*
1752 * When a thread suspends, it just
1753 * moves to the processes's suspend queue
1754 * and stays there.
1890 thread_exit();
1891 }
1892
1893 /*
1894 * When a thread suspends, it just
1895 * moves to the processes's suspend queue
1896 * and stays there.
1755 *
1756 * XXXKSE if TDF_BOUND is true
1757 * it will not release it's KSE which might
1758 * lead to deadlock if there are not enough KSEs
1759 * to complete all waiting threads.
1760 * Maybe be able to 'lend' it out again.
1761 * (lent kse's can not go back to userland?)
1762 * and can only be lent in STOPPED state.
1763 */
1764 mtx_lock_spin(&sched_lock);
1765 if ((p->p_flag & P_STOPPED_SIG) &&
1766 (p->p_suspcount+1 == p->p_numthreads)) {
1767 mtx_unlock_spin(&sched_lock);
1768 PROC_LOCK(p->p_pptr);
1769 if ((p->p_pptr->p_procsig->ps_flag &
1770 PS_NOCLDSTOP) == 0) {

--- 104 unchanged lines hidden ---
1897 */
1898 mtx_lock_spin(&sched_lock);
1899 if ((p->p_flag & P_STOPPED_SIG) &&
1900 (p->p_suspcount+1 == p->p_numthreads)) {
1901 mtx_unlock_spin(&sched_lock);
1902 PROC_LOCK(p->p_pptr);
1903 if ((p->p_pptr->p_procsig->ps_flag &
1904 PS_NOCLDSTOP) == 0) {

--- 104 unchanged lines hidden ---