kern_timeout.c (7834081c88009811574366394f970a8a7ede6cd7) kern_timeout.c (98c926b20fcc0953abe69be05b43e9cfc95b2c47)
1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.

--- 39 unchanged lines hidden (view full) ---

48#include <sys/sysctl.h>
49
50static int avg_depth;
51SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
52 "Average number of items examined per softclock call. Units = 1/1000");
53static int avg_gcalls;
54SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
55 "Average number of Giant callouts made per softclock call. Units = 1/1000");
1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.

--- 39 unchanged lines hidden (view full) ---

48#include <sys/sysctl.h>
49
50static int avg_depth;
51SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
52 "Average number of items examined per softclock call. Units = 1/1000");
53static int avg_gcalls;
54SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
55 "Average number of Giant callouts made per softclock call. Units = 1/1000");
56static int avg_mtxcalls;
57SYSCTL_INT(_debug, OID_AUTO, to_avg_mtxcalls, CTLFLAG_RD, &avg_mtxcalls, 0,
58 "Average number of mtx callouts made per softclock call. Units = 1/1000");
56static int avg_mpcalls;
57SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
58 "Average number of MP callouts made per softclock call. Units = 1/1000");
59/*
60 * TODO:
61 * allocate more timeout table slots when table overflows.
62 */
63

--- 11 unchanged lines hidden (view full) ---

75static struct callout *nextsoftcheck; /* Next callout to be checked. */
76
77/**
78 * Locked by callout_lock:
79 * curr_callout - If a callout is in progress, it is curr_callout.
80 * If curr_callout is non-NULL, threads waiting on
81 * callout_wait will be woken up as soon as the
82 * relevant callout completes.
59static int avg_mpcalls;
60SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
61 "Average number of MP callouts made per softclock call. Units = 1/1000");
62/*
63 * TODO:
64 * allocate more timeout table slots when table overflows.
65 */
66

--- 11 unchanged lines hidden (view full) ---

78static struct callout *nextsoftcheck; /* Next callout to be checked. */
79
80/**
81 * Locked by callout_lock:
82 * curr_callout - If a callout is in progress, it is curr_callout.
83 * If curr_callout is non-NULL, threads waiting on
84 * callout_wait will be woken up as soon as the
85 * relevant callout completes.
86 * curr_cancelled - Changing to 1 with both callout_lock and c_mtx held
87 * guarantees that the current callout will not run.
88 * The softclock() function sets this to 0 before it
89 * drops callout_lock to acquire c_mtx, and it calls
90 * the handler only if curr_cancelled still 0 when
91 * c_mtx is successfully acquired.
83 * wakeup_ctr - Incremented every time a thread wants to wait
84 * for a callout to complete. Modified only when
85 * curr_callout is non-NULL.
86 * wakeup_needed - If a thread is waiting on callout_wait, then
87 * wakeup_needed is nonzero. Increased only when
88 * cutt_callout is non-NULL.
89 */
90static struct callout *curr_callout;
92 * wakeup_ctr - Incremented every time a thread wants to wait
93 * for a callout to complete. Modified only when
94 * curr_callout is non-NULL.
95 * wakeup_needed - If a thread is waiting on callout_wait, then
96 * wakeup_needed is nonzero. Increased only when
97 * cutt_callout is non-NULL.
98 */
99static struct callout *curr_callout;
100static int curr_cancelled;
91static int wakeup_ctr;
92static int wakeup_needed;
93
94/**
95 * Locked by callout_wait_lock:
96 * callout_wait - If wakeup_needed is set, callout_wait will be
97 * triggered after the current callout finishes.
98 * wakeup_done_ctr - Set to the current value of wakeup_ctr after

--- 77 unchanged lines hidden (view full) ---

176softclock(void *dummy)
177{
178 struct callout *c;
179 struct callout_tailq *bucket;
180 int curticks;
181 int steps; /* #steps since we last allowed interrupts */
182 int depth;
183 int mpcalls;
101static int wakeup_ctr;
102static int wakeup_needed;
103
104/**
105 * Locked by callout_wait_lock:
106 * callout_wait - If wakeup_needed is set, callout_wait will be
107 * triggered after the current callout finishes.
108 * wakeup_done_ctr - Set to the current value of wakeup_ctr after

--- 77 unchanged lines hidden (view full) ---

186softclock(void *dummy)
187{
188 struct callout *c;
189 struct callout_tailq *bucket;
190 int curticks;
191 int steps; /* #steps since we last allowed interrupts */
192 int depth;
193 int mpcalls;
194 int mtxcalls;
184 int gcalls;
185 int wakeup_cookie;
186#ifdef DIAGNOSTIC
187 struct bintime bt1, bt2;
188 struct timespec ts2;
189 static uint64_t maxdt = 36893488147419102LL; /* 2 msec */
190 static timeout_t *lastfunc;
191#endif
192
193#ifndef MAX_SOFTCLOCK_STEPS
194#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
195#endif /* MAX_SOFTCLOCK_STEPS */
196
197 mpcalls = 0;
195 int gcalls;
196 int wakeup_cookie;
197#ifdef DIAGNOSTIC
198 struct bintime bt1, bt2;
199 struct timespec ts2;
200 static uint64_t maxdt = 36893488147419102LL; /* 2 msec */
201 static timeout_t *lastfunc;
202#endif
203
204#ifndef MAX_SOFTCLOCK_STEPS
205#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
206#endif /* MAX_SOFTCLOCK_STEPS */
207
208 mpcalls = 0;
209 mtxcalls = 0;
198 gcalls = 0;
199 depth = 0;
200 steps = 0;
201 mtx_lock_spin(&callout_lock);
202 while (softticks != ticks) {
203 softticks++;
204 /*
205 * softticks may be modified by hard clock, so cache

--- 14 unchanged lines hidden (view full) ---

220 ; /* nothing */
221 mtx_lock_spin(&callout_lock);
222 c = nextsoftcheck;
223 steps = 0;
224 }
225 } else {
226 void (*c_func)(void *);
227 void *c_arg;
210 gcalls = 0;
211 depth = 0;
212 steps = 0;
213 mtx_lock_spin(&callout_lock);
214 while (softticks != ticks) {
215 softticks++;
216 /*
217 * softticks may be modified by hard clock, so cache

--- 14 unchanged lines hidden (view full) ---

232 ; /* nothing */
233 mtx_lock_spin(&callout_lock);
234 c = nextsoftcheck;
235 steps = 0;
236 }
237 } else {
238 void (*c_func)(void *);
239 void *c_arg;
240 struct mtx *c_mtx;
228 int c_flags;
229
230 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
231 TAILQ_REMOVE(bucket, c, c_links.tqe);
232 c_func = c->c_func;
233 c_arg = c->c_arg;
241 int c_flags;
242
243 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
244 TAILQ_REMOVE(bucket, c, c_links.tqe);
245 c_func = c->c_func;
246 c_arg = c->c_arg;
247 c_mtx = c->c_mtx;
234 c_flags = c->c_flags;
235 if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
236 c->c_func = NULL;
237 c->c_flags = CALLOUT_LOCAL_ALLOC;
238 SLIST_INSERT_HEAD(&callfree, c,
239 c_links.sle);
240 } else {
241 c->c_flags =
242 (c->c_flags & ~CALLOUT_PENDING);
243 }
244 curr_callout = c;
248 c_flags = c->c_flags;
249 if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
250 c->c_func = NULL;
251 c->c_flags = CALLOUT_LOCAL_ALLOC;
252 SLIST_INSERT_HEAD(&callfree, c,
253 c_links.sle);
254 } else {
255 c->c_flags =
256 (c->c_flags & ~CALLOUT_PENDING);
257 }
258 curr_callout = c;
259 curr_cancelled = 0;
245 mtx_unlock_spin(&callout_lock);
260 mtx_unlock_spin(&callout_lock);
246 if (!(c_flags & CALLOUT_MPSAFE)) {
247 mtx_lock(&Giant);
248 gcalls++;
249 CTR1(KTR_CALLOUT, "callout %p", c_func);
261 if (c_mtx != NULL) {
262 mtx_lock(c_mtx);
263 /*
264 * The callout may have been cancelled
265 * while we switched locks.
266 */
267 if (curr_cancelled) {
268 mtx_unlock(c_mtx);
269 mtx_lock_spin(&callout_lock);
270 goto done_locked;
271 }
272 /* The callout cannot be stopped now. */
273 curr_cancelled = 1;
274
275 if (c_mtx == &Giant) {
276 gcalls++;
277 CTR1(KTR_CALLOUT, "callout %p",
278 c_func);
279 } else {
280 mtxcalls++;
281 CTR1(KTR_CALLOUT,
282 "callout mtx %p",
283 c_func);
284 }
250 } else {
251 mpcalls++;
252 CTR1(KTR_CALLOUT, "callout mpsafe %p",
253 c_func);
254 }
255#ifdef DIAGNOSTIC
256 binuptime(&bt1);
257 mtx_lock(&dont_sleep_in_callout);

--- 12 unchanged lines hidden (view full) ---

270 c_func, c_arg,
271 (intmax_t)ts2.tv_sec,
272 ts2.tv_nsec);
273 }
274 maxdt = bt2.frac;
275 lastfunc = c_func;
276 }
277#endif
285 } else {
286 mpcalls++;
287 CTR1(KTR_CALLOUT, "callout mpsafe %p",
288 c_func);
289 }
290#ifdef DIAGNOSTIC
291 binuptime(&bt1);
292 mtx_lock(&dont_sleep_in_callout);

--- 12 unchanged lines hidden (view full) ---

305 c_func, c_arg,
306 (intmax_t)ts2.tv_sec,
307 ts2.tv_nsec);
308 }
309 maxdt = bt2.frac;
310 lastfunc = c_func;
311 }
312#endif
278 if (!(c_flags & CALLOUT_MPSAFE))
279 mtx_unlock(&Giant);
313 if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
314 mtx_unlock(c_mtx);
280 mtx_lock_spin(&callout_lock);
315 mtx_lock_spin(&callout_lock);
316done_locked:
281 curr_callout = NULL;
282 if (wakeup_needed) {
283 /*
284 * There might be someone waiting
285 * for the callout to complete.
286 */
287 wakeup_cookie = wakeup_ctr;
288 mtx_unlock_spin(&callout_lock);

--- 6 unchanged lines hidden (view full) ---

295 }
296 steps = 0;
297 c = nextsoftcheck;
298 }
299 }
300 }
301 avg_depth += (depth * 1000 - avg_depth) >> 8;
302 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
317 curr_callout = NULL;
318 if (wakeup_needed) {
319 /*
320 * There might be someone waiting
321 * for the callout to complete.
322 */
323 wakeup_cookie = wakeup_ctr;
324 mtx_unlock_spin(&callout_lock);

--- 6 unchanged lines hidden (view full) ---

331 }
332 steps = 0;
333 c = nextsoftcheck;
334 }
335 }
336 }
337 avg_depth += (depth * 1000 - avg_depth) >> 8;
338 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
339 avg_mtxcalls += (mtxcalls * 1000 - avg_mtxcalls) >> 8;
303 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
304 nextsoftcheck = NULL;
305 mtx_unlock_spin(&callout_lock);
306}
307
308/*
309 * timeout --
310 * Execute a function after a specified length of time.

--- 81 unchanged lines hidden (view full) ---

392void
393callout_reset(c, to_ticks, ftn, arg)
394 struct callout *c;
395 int to_ticks;
396 void (*ftn)(void *);
397 void *arg;
398{
399
340 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
341 nextsoftcheck = NULL;
342 mtx_unlock_spin(&callout_lock);
343}
344
345/*
346 * timeout --
347 * Execute a function after a specified length of time.

--- 81 unchanged lines hidden (view full) ---

429void
430callout_reset(c, to_ticks, ftn, arg)
431 struct callout *c;
432 int to_ticks;
433 void (*ftn)(void *);
434 void *arg;
435{
436
437#ifdef notyet /* Some callers of timeout() do not hold Giant. */
438 if (c->c_mtx != NULL)
439 mtx_assert(c->c_mtx, MA_OWNED);
440#endif
441
400 mtx_lock_spin(&callout_lock);
442 mtx_lock_spin(&callout_lock);
401 if (c == curr_callout && wakeup_needed) {
443 if (c == curr_callout) {
402 /*
403 * We're being asked to reschedule a callout which is
444 /*
445 * We're being asked to reschedule a callout which is
404 * currently in progress, and someone has called
405 * callout_drain to kill that callout. Don't reschedule.
446 * currently in progress. If there is a mutex then we
447 * can cancel the callout if it has not really started.
406 */
448 */
407 mtx_unlock_spin(&callout_lock);
408 return;
449 if (c->c_mtx != NULL && !curr_cancelled)
450 curr_cancelled = 1;
451 if (wakeup_needed) {
452 /*
453 * Someone has called callout_drain to kill this
454 * callout. Don't reschedule.
455 */
456 mtx_unlock_spin(&callout_lock);
457 return;
458 }
409 }
410 if (c->c_flags & CALLOUT_PENDING) {
411 if (nextsoftcheck == c) {
412 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
413 }
414 TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c,
415 c_links.tqe);
416

--- 24 unchanged lines hidden (view full) ---

441 mtx_unlock_spin(&callout_lock);
442}
443
444int
445_callout_stop_safe(c, safe)
446 struct callout *c;
447 int safe;
448{
459 }
460 if (c->c_flags & CALLOUT_PENDING) {
461 if (nextsoftcheck == c) {
462 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
463 }
464 TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c,
465 c_links.tqe);
466

--- 24 unchanged lines hidden (view full) ---

491 mtx_unlock_spin(&callout_lock);
492}
493
494int
495_callout_stop_safe(c, safe)
496 struct callout *c;
497 int safe;
498{
449 int wakeup_cookie;
499 int use_mtx, wakeup_cookie;
450
500
501 if (!safe && c->c_mtx != NULL) {
502#ifdef notyet /* Some callers do not hold Giant for Giant-locked callouts. */
503 mtx_assert(c->c_mtx, MA_OWNED);
504 use_mtx = 1;
505#else
506 use_mtx = mtx_owned(c->c_mtx);
507#endif
508 } else {
509 use_mtx = 0;
510 }
511
451 mtx_lock_spin(&callout_lock);
452 /*
453 * Don't attempt to delete a callout that's not on the queue.
454 */
455 if (!(c->c_flags & CALLOUT_PENDING)) {
456 c->c_flags &= ~CALLOUT_ACTIVE;
512 mtx_lock_spin(&callout_lock);
513 /*
514 * Don't attempt to delete a callout that's not on the queue.
515 */
516 if (!(c->c_flags & CALLOUT_PENDING)) {
517 c->c_flags &= ~CALLOUT_ACTIVE;
457 if (c == curr_callout && safe) {
518 if (c != curr_callout) {
519 mtx_unlock_spin(&callout_lock);
520 return (0);
521 }
522 if (safe) {
458 /* We need to wait until the callout is finished. */
459 wakeup_needed = 1;
460 wakeup_cookie = wakeup_ctr++;
461 mtx_unlock_spin(&callout_lock);
462 mtx_lock(&callout_wait_lock);
463
464 /*
465 * Check to make sure that softclock() didn't
466 * do the wakeup in between our dropping
467 * callout_lock and picking up callout_wait_lock
468 */
469 if (wakeup_cookie - wakeup_done_ctr > 0)
470 cv_wait(&callout_wait, &callout_wait_lock);
471
472 mtx_unlock(&callout_wait_lock);
523 /* We need to wait until the callout is finished. */
524 wakeup_needed = 1;
525 wakeup_cookie = wakeup_ctr++;
526 mtx_unlock_spin(&callout_lock);
527 mtx_lock(&callout_wait_lock);
528
529 /*
530 * Check to make sure that softclock() didn't
531 * do the wakeup in between our dropping
532 * callout_lock and picking up callout_wait_lock
533 */
534 if (wakeup_cookie - wakeup_done_ctr > 0)
535 cv_wait(&callout_wait, &callout_wait_lock);
536
537 mtx_unlock(&callout_wait_lock);
538 } else if (use_mtx && !curr_cancelled) {
539 /* We can stop the callout before it runs. */
540 curr_cancelled = 1;
541 mtx_unlock_spin(&callout_lock);
542 return (1);
473 } else
474 mtx_unlock_spin(&callout_lock);
475 return (0);
476 }
477 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
478
479 if (nextsoftcheck == c) {
480 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);

--- 9 unchanged lines hidden (view full) ---

490}
491
492void
493callout_init(c, mpsafe)
494 struct callout *c;
495 int mpsafe;
496{
497 bzero(c, sizeof *c);
543 } else
544 mtx_unlock_spin(&callout_lock);
545 return (0);
546 }
547 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
548
549 if (nextsoftcheck == c) {
550 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);

--- 9 unchanged lines hidden (view full) ---

560}
561
562void
563callout_init(c, mpsafe)
564 struct callout *c;
565 int mpsafe;
566{
567 bzero(c, sizeof *c);
498 if (mpsafe)
499 c->c_flags |= CALLOUT_MPSAFE;
568 if (mpsafe) {
569 c->c_mtx = NULL;
570 c->c_flags = CALLOUT_RETURNUNLOCKED;
571 } else {
572 c->c_mtx = &Giant;
573 c->c_flags = 0;
574 }
500}
501
575}
576
577void
578callout_init_mtx(c, mtx, flags)
579 struct callout *c;
580 struct mtx *mtx;
581 int flags;
582{
583 bzero(c, sizeof *c);
584 c->c_mtx = mtx;
585 KASSERT((flags & ~CALLOUT_RETURNUNLOCKED) == 0,
586 ("callout_init_mtx: bad flags %d", flags));
587 /* CALLOUT_RETURNUNLOCKED makes no sense without a mutex. */
588 KASSERT(mtx != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
589 ("callout_init_mtx: CALLOUT_RETURNUNLOCKED with no mutex"));
590 c->c_flags = flags & CALLOUT_RETURNUNLOCKED;
591}
592
502#ifdef APM_FIXUP_CALLTODO
503/*
504 * Adjust the kernel calltodo timeout list. This routine is used after
505 * an APM resume to recalculate the calltodo timer list values with the
506 * number of hz's we have been sleeping. The next hardclock() will detect
507 * that there are fired timers and run softclock() to execute them.
508 *
509 * Please note, I have not done an exhaustive analysis of what code this

--- 57 unchanged lines hidden ---
593#ifdef APM_FIXUP_CALLTODO
594/*
595 * Adjust the kernel calltodo timeout list. This routine is used after
596 * an APM resume to recalculate the calltodo timer list values with the
597 * number of hz's we have been sleeping. The next hardclock() will detect
598 * that there are fired timers and run softclock() to execute them.
599 *
600 * Please note, I have not done an exhaustive analysis of what code this

--- 57 unchanged lines hidden ---