1 /*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 39 * $FreeBSD$ 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/callout.h> 45 #include <sys/kernel.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 49 /* 50 * TODO: 51 * allocate more timeout table slots when table overflows. 52 */ 53 54 /* Exported to machdep.c and/or kern_clock.c. */ 55 struct callout *callout; 56 struct callout_list callfree; 57 int callwheelsize, callwheelbits, callwheelmask; 58 struct callout_tailq *callwheel; 59 int softticks; /* Like ticks, but for softclock(). */ 60 struct mtx callout_lock; 61 62 static struct callout *nextsoftcheck; /* Next callout to be checked. */ 63 64 /* 65 * The callout mechanism is based on the work of Adam M. Costello and 66 * George Varghese, published in a technical report entitled "Redesigning 67 * the BSD Callout and Timer Facilities" and modified slightly for inclusion 68 * in FreeBSD by Justin T. Gibbs. The original work on the data structures 69 * used in this implementation was published by G.Varghese and A. Lauck in 70 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 71 * the Efficient Implementation of a Timer Facility" in the Proceedings of 72 * the 11th ACM Annual Symposium on Operating Systems Principles, 73 * Austin, Texas Nov 1987. 74 */ 75 76 /* 77 * Software (low priority) clock interrupt. 78 * Run periodic events from timeout queue. 79 */ 80 void 81 softclock(void *dummy) 82 { 83 register struct callout *c; 84 register struct callout_tailq *bucket; 85 register int s; 86 register int curticks; 87 register int steps; /* #steps since we last allowed interrupts */ 88 89 #ifndef MAX_SOFTCLOCK_STEPS 90 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ 91 #endif /* MAX_SOFTCLOCK_STEPS */ 92 93 steps = 0; 94 s = splhigh(); 95 mtx_lock_spin(&callout_lock); 96 while (softticks != ticks) { 97 softticks++; 98 /* 99 * softticks may be modified by hard clock, so cache 100 * it while we work on a given bucket. 101 */ 102 curticks = softticks; 103 bucket = &callwheel[curticks & callwheelmask]; 104 c = TAILQ_FIRST(bucket); 105 while (c) { 106 if (c->c_time != curticks) { 107 c = TAILQ_NEXT(c, c_links.tqe); 108 ++steps; 109 if (steps >= MAX_SOFTCLOCK_STEPS) { 110 nextsoftcheck = c; 111 /* Give interrupts a chance. */ 112 mtx_unlock_spin(&callout_lock); 113 splx(s); 114 s = splhigh(); 115 mtx_lock_spin(&callout_lock); 116 c = nextsoftcheck; 117 steps = 0; 118 } 119 } else { 120 void (*c_func)(void *); 121 void *c_arg; 122 int c_flags; 123 124 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 125 TAILQ_REMOVE(bucket, c, c_links.tqe); 126 c_func = c->c_func; 127 c_arg = c->c_arg; 128 c_flags = c->c_flags; 129 c->c_func = NULL; 130 if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 131 c->c_flags = CALLOUT_LOCAL_ALLOC; 132 SLIST_INSERT_HEAD(&callfree, c, 133 c_links.sle); 134 } else { 135 c->c_flags = 136 (c->c_flags & ~CALLOUT_PENDING); 137 } 138 mtx_unlock_spin(&callout_lock); 139 if (!(c_flags & CALLOUT_MPSAFE)) 140 mtx_lock(&Giant); 141 splx(s); 142 c_func(c_arg); 143 s = splhigh(); 144 if (!(c_flags & CALLOUT_MPSAFE)) 145 mtx_unlock(&Giant); 146 mtx_lock_spin(&callout_lock); 147 steps = 0; 148 c = nextsoftcheck; 149 } 150 } 151 } 152 nextsoftcheck = NULL; 153 mtx_unlock_spin(&callout_lock); 154 splx(s); 155 } 156 157 /* 158 * timeout -- 159 * Execute a function after a specified length of time. 160 * 161 * untimeout -- 162 * Cancel previous timeout function call. 163 * 164 * callout_handle_init -- 165 * Initialize a handle so that using it with untimeout is benign. 166 * 167 * See AT&T BCI Driver Reference Manual for specification. This 168 * implementation differs from that one in that although an 169 * identification value is returned from timeout, the original 170 * arguments to timeout as well as the identifier are used to 171 * identify entries for untimeout. 172 */ 173 struct callout_handle 174 timeout(ftn, arg, to_ticks) 175 timeout_t *ftn; 176 void *arg; 177 int to_ticks; 178 { 179 int s; 180 struct callout *new; 181 struct callout_handle handle; 182 183 s = splhigh(); 184 mtx_lock_spin(&callout_lock); 185 186 /* Fill in the next free callout structure. */ 187 new = SLIST_FIRST(&callfree); 188 if (new == NULL) 189 /* XXX Attempt to malloc first */ 190 panic("timeout table full"); 191 SLIST_REMOVE_HEAD(&callfree, c_links.sle); 192 193 callout_reset(new, to_ticks, ftn, arg); 194 195 handle.callout = new; 196 mtx_unlock_spin(&callout_lock); 197 splx(s); 198 return (handle); 199 } 200 201 void 202 untimeout(ftn, arg, handle) 203 timeout_t *ftn; 204 void *arg; 205 struct callout_handle handle; 206 { 207 register int s; 208 209 /* 210 * Check for a handle that was initialized 211 * by callout_handle_init, but never used 212 * for a real timeout. 213 */ 214 if (handle.callout == NULL) 215 return; 216 217 s = splhigh(); 218 mtx_lock_spin(&callout_lock); 219 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 220 callout_stop(handle.callout); 221 mtx_unlock_spin(&callout_lock); 222 splx(s); 223 } 224 225 void 226 callout_handle_init(struct callout_handle *handle) 227 { 228 handle->callout = NULL; 229 } 230 231 /* 232 * New interface; clients allocate their own callout structures. 233 * 234 * callout_reset() - establish or change a timeout 235 * callout_stop() - disestablish a timeout 236 * callout_init() - initialize a callout structure so that it can 237 * safely be passed to callout_reset() and callout_stop() 238 * 239 * <sys/callout.h> defines three convenience macros: 240 * 241 * callout_active() - returns truth if callout has not been serviced 242 * callout_pending() - returns truth if callout is still waiting for timeout 243 * callout_deactivate() - marks the callout as having been serviced 244 */ 245 void 246 callout_reset(c, to_ticks, ftn, arg) 247 struct callout *c; 248 int to_ticks; 249 void (*ftn) __P((void *)); 250 void *arg; 251 { 252 int s; 253 254 s = splhigh(); 255 mtx_lock_spin(&callout_lock); 256 if (c->c_flags & CALLOUT_PENDING) 257 callout_stop(c); 258 259 /* 260 * We could spl down here and back up at the TAILQ_INSERT_TAIL, 261 * but there's no point since doing this setup doesn't take much 262 * time. 263 */ 264 if (to_ticks <= 0) 265 to_ticks = 1; 266 267 c->c_arg = arg; 268 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 269 c->c_func = ftn; 270 c->c_time = ticks + to_ticks; 271 TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask], 272 c, c_links.tqe); 273 mtx_unlock_spin(&callout_lock); 274 splx(s); 275 } 276 277 void 278 callout_stop(c) 279 struct callout *c; 280 { 281 int s; 282 283 s = splhigh(); 284 mtx_lock_spin(&callout_lock); 285 /* 286 * Don't attempt to delete a callout that's not on the queue. 287 */ 288 if (!(c->c_flags & CALLOUT_PENDING)) { 289 c->c_flags &= ~CALLOUT_ACTIVE; 290 mtx_unlock_spin(&callout_lock); 291 splx(s); 292 return; 293 } 294 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 295 296 if (nextsoftcheck == c) { 297 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 298 } 299 TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe); 300 c->c_func = NULL; 301 302 if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 303 SLIST_INSERT_HEAD(&callfree, c, c_links.sle); 304 } 305 mtx_unlock_spin(&callout_lock); 306 splx(s); 307 } 308 309 void 310 callout_init(c, mpsafe) 311 struct callout *c; 312 int mpsafe; 313 { 314 bzero(c, sizeof *c); 315 if (mpsafe) 316 c->c_flags |= CALLOUT_MPSAFE; 317 } 318 319 #ifdef APM_FIXUP_CALLTODO 320 /* 321 * Adjust the kernel calltodo timeout list. This routine is used after 322 * an APM resume to recalculate the calltodo timer list values with the 323 * number of hz's we have been sleeping. The next hardclock() will detect 324 * that there are fired timers and run softclock() to execute them. 325 * 326 * Please note, I have not done an exhaustive analysis of what code this 327 * might break. I am motivated to have my select()'s and alarm()'s that 328 * have expired during suspend firing upon resume so that the applications 329 * which set the timer can do the maintanence the timer was for as close 330 * as possible to the originally intended time. Testing this code for a 331 * week showed that resuming from a suspend resulted in 22 to 25 timers 332 * firing, which seemed independant on whether the suspend was 2 hours or 333 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 334 */ 335 void 336 adjust_timeout_calltodo(time_change) 337 struct timeval *time_change; 338 { 339 register struct callout *p; 340 unsigned long delta_ticks; 341 int s; 342 343 /* 344 * How many ticks were we asleep? 345 * (stolen from tvtohz()). 346 */ 347 348 /* Don't do anything */ 349 if (time_change->tv_sec < 0) 350 return; 351 else if (time_change->tv_sec <= LONG_MAX / 1000000) 352 delta_ticks = (time_change->tv_sec * 1000000 + 353 time_change->tv_usec + (tick - 1)) / tick + 1; 354 else if (time_change->tv_sec <= LONG_MAX / hz) 355 delta_ticks = time_change->tv_sec * hz + 356 (time_change->tv_usec + (tick - 1)) / tick + 1; 357 else 358 delta_ticks = LONG_MAX; 359 360 if (delta_ticks > INT_MAX) 361 delta_ticks = INT_MAX; 362 363 /* 364 * Now rip through the timer calltodo list looking for timers 365 * to expire. 366 */ 367 368 /* don't collide with softclock() */ 369 s = splhigh(); 370 mtx_lock_spin(&callout_lock); 371 for (p = calltodo.c_next; p != NULL; p = p->c_next) { 372 p->c_time -= delta_ticks; 373 374 /* Break if the timer had more time on it than delta_ticks */ 375 if (p->c_time > 0) 376 break; 377 378 /* take back the ticks the timer didn't use (p->c_time <= 0) */ 379 delta_ticks = -p->c_time; 380 } 381 mtx_unlock_spin(&callout_lock); 382 splx(s); 383 384 return; 385 } 386 #endif /* APM_FIXUP_CALLTODO */ 387