1 /*- 2 * Copyright (c) 2010-2012 Alexander Motin <mav@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 /* 31 * Common routines to manage event timers hardware. 32 */ 33 34 #include "opt_device_polling.h" 35 #include "opt_kdtrace.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/bus.h> 40 #include <sys/lock.h> 41 #include <sys/kdb.h> 42 #include <sys/ktr.h> 43 #include <sys/mutex.h> 44 #include <sys/proc.h> 45 #include <sys/kernel.h> 46 #include <sys/sched.h> 47 #include <sys/smp.h> 48 #include <sys/sysctl.h> 49 #include <sys/timeet.h> 50 #include <sys/timetc.h> 51 52 #include <machine/atomic.h> 53 #include <machine/clock.h> 54 #include <machine/cpu.h> 55 #include <machine/smp.h> 56 57 #ifdef KDTRACE_HOOKS 58 #include <sys/dtrace_bsd.h> 59 cyclic_clock_func_t cyclic_clock_func = NULL; 60 #endif 61 62 int cpu_can_deep_sleep = 0; /* C3 state is available. */ 63 int cpu_disable_deep_sleep = 0; /* Timer dies in C3. */ 64 65 static void setuptimer(void); 66 static void loadtimer(struct bintime *now, int first); 67 static int doconfigtimer(void); 68 static void configtimer(int start); 69 static int round_freq(struct eventtimer *et, int freq); 70 71 static void getnextcpuevent(struct bintime *event, int idle); 72 static void getnextevent(struct bintime *event); 73 static int handleevents(struct bintime *now, int fake); 74 #ifdef SMP 75 static void cpu_new_callout(int cpu, int ticks); 76 #endif 77 78 static struct mtx et_hw_mtx; 79 80 #define ET_HW_LOCK(state) \ 81 { \ 82 if (timer->et_flags & ET_FLAGS_PERCPU) \ 83 mtx_lock_spin(&(state)->et_hw_mtx); \ 84 else \ 85 mtx_lock_spin(&et_hw_mtx); \ 86 } 87 88 #define ET_HW_UNLOCK(state) \ 89 { \ 90 if (timer->et_flags & ET_FLAGS_PERCPU) \ 91 mtx_unlock_spin(&(state)->et_hw_mtx); \ 92 else \ 93 mtx_unlock_spin(&et_hw_mtx); \ 94 } 95 96 static struct eventtimer *timer = NULL; 97 static struct bintime timerperiod; /* Timer period for periodic mode. */ 98 static struct bintime hardperiod; /* hardclock() events period. */ 99 static struct bintime statperiod; /* statclock() events period. */ 100 static struct bintime profperiod; /* profclock() events period. */ 101 static struct bintime nexttick; /* Next global timer tick time. */ 102 static struct bintime nexthard; /* Next global hardlock() event. */ 103 static u_int busy = 0; /* Reconfiguration is in progress. */ 104 static int profiling = 0; /* Profiling events enabled. */ 105 106 static char timername[32]; /* Wanted timer. */ 107 TUNABLE_STR("kern.eventtimer.timer", timername, sizeof(timername)); 108 109 static int singlemul = 0; /* Multiplier for periodic mode. */ 110 TUNABLE_INT("kern.eventtimer.singlemul", &singlemul); 111 SYSCTL_INT(_kern_eventtimer, OID_AUTO, singlemul, CTLFLAG_RW, &singlemul, 112 0, "Multiplier for periodic mode"); 113 114 static u_int idletick = 0; /* Run periodic events when idle. */ 115 TUNABLE_INT("kern.eventtimer.idletick", &idletick); 116 SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RW, &idletick, 117 0, "Run periodic events when idle"); 118 119 static u_int activetick = 1; /* Run all periodic events when active. */ 120 TUNABLE_INT("kern.eventtimer.activetick", &activetick); 121 SYSCTL_UINT(_kern_eventtimer, OID_AUTO, activetick, CTLFLAG_RW, &activetick, 122 0, "Run all periodic events when active"); 123 124 static int periodic = 0; /* Periodic or one-shot mode. */ 125 static int want_periodic = 0; /* What mode to prefer. */ 126 TUNABLE_INT("kern.eventtimer.periodic", &want_periodic); 127 128 struct pcpu_state { 129 struct mtx et_hw_mtx; /* Per-CPU timer mutex. */ 130 u_int action; /* Reconfiguration requests. */ 131 u_int handle; /* Immediate handle resuests. */ 132 struct bintime now; /* Last tick time. */ 133 struct bintime nextevent; /* Next scheduled event on this CPU. */ 134 struct bintime nexttick; /* Next timer tick time. */ 135 struct bintime nexthard; /* Next hardlock() event. */ 136 struct bintime nextstat; /* Next statclock() event. */ 137 struct bintime nextprof; /* Next profclock() event. */ 138 #ifdef KDTRACE_HOOKS 139 struct bintime nextcyc; /* Next OpenSolaris cyclics event. */ 140 #endif 141 int ipi; /* This CPU needs IPI. */ 142 int idle; /* This CPU is in idle mode. */ 143 }; 144 145 static DPCPU_DEFINE(struct pcpu_state, timerstate); 146 147 #define FREQ2BT(freq, bt) \ 148 { \ 149 (bt)->sec = 0; \ 150 (bt)->frac = ((uint64_t)0x8000000000000000 / (freq)) << 1; \ 151 } 152 #define BT2FREQ(bt) \ 153 (((uint64_t)0x8000000000000000 + ((bt)->frac >> 2)) / \ 154 ((bt)->frac >> 1)) 155 156 /* 157 * Timer broadcast IPI handler. 158 */ 159 int 160 hardclockintr(void) 161 { 162 struct bintime now; 163 struct pcpu_state *state; 164 int done; 165 166 if (doconfigtimer() || busy) 167 return (FILTER_HANDLED); 168 state = DPCPU_PTR(timerstate); 169 now = state->now; 170 CTR4(KTR_SPARE2, "ipi at %d: now %d.%08x%08x", 171 curcpu, now.sec, (u_int)(now.frac >> 32), 172 (u_int)(now.frac & 0xffffffff)); 173 done = handleevents(&now, 0); 174 return (done ? FILTER_HANDLED : FILTER_STRAY); 175 } 176 177 /* 178 * Handle all events for specified time on this CPU 179 */ 180 static int 181 handleevents(struct bintime *now, int fake) 182 { 183 struct bintime t; 184 struct trapframe *frame; 185 struct pcpu_state *state; 186 uintfptr_t pc; 187 int usermode; 188 int done, runs; 189 190 CTR4(KTR_SPARE2, "handle at %d: now %d.%08x%08x", 191 curcpu, now->sec, (u_int)(now->frac >> 32), 192 (u_int)(now->frac & 0xffffffff)); 193 done = 0; 194 if (fake) { 195 frame = NULL; 196 usermode = 0; 197 pc = 0; 198 } else { 199 frame = curthread->td_intr_frame; 200 usermode = TRAPF_USERMODE(frame); 201 pc = TRAPF_PC(frame); 202 } 203 204 state = DPCPU_PTR(timerstate); 205 206 runs = 0; 207 while (bintime_cmp(now, &state->nexthard, >=)) { 208 bintime_addx(&state->nexthard, hardperiod.frac); 209 runs++; 210 } 211 if (runs) { 212 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 && 213 bintime_cmp(&state->nexthard, &nexthard, >)) 214 nexthard = state->nexthard; 215 if (fake < 2) { 216 hardclock_cnt(runs, usermode); 217 done = 1; 218 } 219 } 220 runs = 0; 221 while (bintime_cmp(now, &state->nextstat, >=)) { 222 bintime_addx(&state->nextstat, statperiod.frac); 223 runs++; 224 } 225 if (runs && fake < 2) { 226 statclock_cnt(runs, usermode); 227 done = 1; 228 } 229 if (profiling) { 230 runs = 0; 231 while (bintime_cmp(now, &state->nextprof, >=)) { 232 bintime_addx(&state->nextprof, profperiod.frac); 233 runs++; 234 } 235 if (runs && !fake) { 236 profclock_cnt(runs, usermode, pc); 237 done = 1; 238 } 239 } else 240 state->nextprof = state->nextstat; 241 242 #ifdef KDTRACE_HOOKS 243 if (fake == 0 && cyclic_clock_func != NULL && 244 state->nextcyc.sec != -1 && 245 bintime_cmp(now, &state->nextcyc, >=)) { 246 state->nextcyc.sec = -1; 247 (*cyclic_clock_func)(frame); 248 } 249 #endif 250 251 getnextcpuevent(&t, 0); 252 if (fake == 2) { 253 state->nextevent = t; 254 return (done); 255 } 256 ET_HW_LOCK(state); 257 if (!busy) { 258 state->idle = 0; 259 state->nextevent = t; 260 loadtimer(now, 0); 261 } 262 ET_HW_UNLOCK(state); 263 return (done); 264 } 265 266 /* 267 * Schedule binuptime of the next event on current CPU. 268 */ 269 static void 270 getnextcpuevent(struct bintime *event, int idle) 271 { 272 struct bintime tmp; 273 struct pcpu_state *state; 274 int skip; 275 276 state = DPCPU_PTR(timerstate); 277 /* Handle hardclock() events. */ 278 *event = state->nexthard; 279 if (idle || (!activetick && !profiling && 280 (timer->et_flags & ET_FLAGS_PERCPU) == 0)) { 281 skip = idle ? 4 : (stathz / 2); 282 if (curcpu == CPU_FIRST() && tc_min_ticktock_freq > skip) 283 skip = tc_min_ticktock_freq; 284 skip = callout_tickstofirst(hz / skip) - 1; 285 CTR2(KTR_SPARE2, "skip at %d: %d", curcpu, skip); 286 tmp = hardperiod; 287 bintime_mul(&tmp, skip); 288 bintime_add(event, &tmp); 289 } 290 if (!idle) { /* If CPU is active - handle other types of events. */ 291 if (bintime_cmp(event, &state->nextstat, >)) 292 *event = state->nextstat; 293 if (profiling && bintime_cmp(event, &state->nextprof, >)) 294 *event = state->nextprof; 295 } 296 #ifdef KDTRACE_HOOKS 297 if (state->nextcyc.sec != -1 && bintime_cmp(event, &state->nextcyc, >)) 298 *event = state->nextcyc; 299 #endif 300 } 301 302 /* 303 * Schedule binuptime of the next event on all CPUs. 304 */ 305 static void 306 getnextevent(struct bintime *event) 307 { 308 struct pcpu_state *state; 309 #ifdef SMP 310 int cpu; 311 #endif 312 int c, nonidle; 313 314 state = DPCPU_PTR(timerstate); 315 *event = state->nextevent; 316 c = curcpu; 317 nonidle = !state->idle; 318 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) { 319 #ifdef SMP 320 CPU_FOREACH(cpu) { 321 if (curcpu == cpu) 322 continue; 323 state = DPCPU_ID_PTR(cpu, timerstate); 324 nonidle += !state->idle; 325 if (bintime_cmp(event, &state->nextevent, >)) { 326 *event = state->nextevent; 327 c = cpu; 328 } 329 } 330 #endif 331 if (nonidle != 0 && bintime_cmp(event, &nexthard, >)) 332 *event = nexthard; 333 } 334 CTR5(KTR_SPARE2, "next at %d: next %d.%08x%08x by %d", 335 curcpu, event->sec, (u_int)(event->frac >> 32), 336 (u_int)(event->frac & 0xffffffff), c); 337 } 338 339 /* Hardware timer callback function. */ 340 static void 341 timercb(struct eventtimer *et, void *arg) 342 { 343 struct bintime now; 344 struct bintime *next; 345 struct pcpu_state *state; 346 #ifdef SMP 347 int cpu, bcast; 348 #endif 349 350 /* Do not touch anything if somebody reconfiguring timers. */ 351 if (busy) 352 return; 353 /* Update present and next tick times. */ 354 state = DPCPU_PTR(timerstate); 355 if (et->et_flags & ET_FLAGS_PERCPU) { 356 next = &state->nexttick; 357 } else 358 next = &nexttick; 359 binuptime(&now); 360 if (periodic) { 361 *next = now; 362 bintime_addx(next, timerperiod.frac); /* Next tick in 1 period. */ 363 } else 364 next->sec = -1; /* Next tick is not scheduled yet. */ 365 state->now = now; 366 CTR4(KTR_SPARE2, "intr at %d: now %d.%08x%08x", 367 curcpu, (int)(now.sec), (u_int)(now.frac >> 32), 368 (u_int)(now.frac & 0xffffffff)); 369 370 #ifdef SMP 371 /* Prepare broadcasting to other CPUs for non-per-CPU timers. */ 372 bcast = 0; 373 if ((et->et_flags & ET_FLAGS_PERCPU) == 0 && smp_started) { 374 CPU_FOREACH(cpu) { 375 state = DPCPU_ID_PTR(cpu, timerstate); 376 ET_HW_LOCK(state); 377 state->now = now; 378 if (bintime_cmp(&now, &state->nextevent, >=)) { 379 state->nextevent.sec++; 380 if (curcpu != cpu) { 381 state->ipi = 1; 382 bcast = 1; 383 } 384 } 385 ET_HW_UNLOCK(state); 386 } 387 } 388 #endif 389 390 /* Handle events for this time on this CPU. */ 391 handleevents(&now, 0); 392 393 #ifdef SMP 394 /* Broadcast interrupt to other CPUs for non-per-CPU timers. */ 395 if (bcast) { 396 CPU_FOREACH(cpu) { 397 if (curcpu == cpu) 398 continue; 399 state = DPCPU_ID_PTR(cpu, timerstate); 400 if (state->ipi) { 401 state->ipi = 0; 402 ipi_cpu(cpu, IPI_HARDCLOCK); 403 } 404 } 405 } 406 #endif 407 } 408 409 /* 410 * Load new value into hardware timer. 411 */ 412 static void 413 loadtimer(struct bintime *now, int start) 414 { 415 struct pcpu_state *state; 416 struct bintime new; 417 struct bintime *next; 418 uint64_t tmp; 419 int eq; 420 421 if (timer->et_flags & ET_FLAGS_PERCPU) { 422 state = DPCPU_PTR(timerstate); 423 next = &state->nexttick; 424 } else 425 next = &nexttick; 426 if (periodic) { 427 if (start) { 428 /* 429 * Try to start all periodic timers aligned 430 * to period to make events synchronous. 431 */ 432 tmp = ((uint64_t)now->sec << 36) + (now->frac >> 28); 433 tmp = (tmp % (timerperiod.frac >> 28)) << 28; 434 new.sec = 0; 435 new.frac = timerperiod.frac - tmp; 436 if (new.frac < tmp) /* Left less then passed. */ 437 bintime_addx(&new, timerperiod.frac); 438 CTR5(KTR_SPARE2, "load p at %d: now %d.%08x first in %d.%08x", 439 curcpu, now->sec, (u_int)(now->frac >> 32), 440 new.sec, (u_int)(new.frac >> 32)); 441 *next = new; 442 bintime_add(next, now); 443 et_start(timer, &new, &timerperiod); 444 } 445 } else { 446 getnextevent(&new); 447 eq = bintime_cmp(&new, next, ==); 448 CTR5(KTR_SPARE2, "load at %d: next %d.%08x%08x eq %d", 449 curcpu, new.sec, (u_int)(new.frac >> 32), 450 (u_int)(new.frac & 0xffffffff), 451 eq); 452 if (!eq) { 453 *next = new; 454 bintime_sub(&new, now); 455 et_start(timer, &new, NULL); 456 } 457 } 458 } 459 460 /* 461 * Prepare event timer parameters after configuration changes. 462 */ 463 static void 464 setuptimer(void) 465 { 466 int freq; 467 468 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0) 469 periodic = 0; 470 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) 471 periodic = 1; 472 singlemul = MIN(MAX(singlemul, 1), 20); 473 freq = hz * singlemul; 474 while (freq < (profiling ? profhz : stathz)) 475 freq += hz; 476 freq = round_freq(timer, freq); 477 FREQ2BT(freq, &timerperiod); 478 } 479 480 /* 481 * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler. 482 */ 483 static int 484 doconfigtimer(void) 485 { 486 struct bintime now; 487 struct pcpu_state *state; 488 489 state = DPCPU_PTR(timerstate); 490 switch (atomic_load_acq_int(&state->action)) { 491 case 1: 492 binuptime(&now); 493 ET_HW_LOCK(state); 494 loadtimer(&now, 1); 495 ET_HW_UNLOCK(state); 496 state->handle = 0; 497 atomic_store_rel_int(&state->action, 0); 498 return (1); 499 case 2: 500 ET_HW_LOCK(state); 501 et_stop(timer); 502 ET_HW_UNLOCK(state); 503 state->handle = 0; 504 atomic_store_rel_int(&state->action, 0); 505 return (1); 506 } 507 if (atomic_readandclear_int(&state->handle) && !busy) { 508 binuptime(&now); 509 handleevents(&now, 0); 510 return (1); 511 } 512 return (0); 513 } 514 515 /* 516 * Reconfigure specified timer. 517 * For per-CPU timers use IPI to make other CPUs to reconfigure. 518 */ 519 static void 520 configtimer(int start) 521 { 522 struct bintime now, next; 523 struct pcpu_state *state; 524 int cpu; 525 526 if (start) { 527 setuptimer(); 528 binuptime(&now); 529 } 530 critical_enter(); 531 ET_HW_LOCK(DPCPU_PTR(timerstate)); 532 if (start) { 533 /* Initialize time machine parameters. */ 534 next = now; 535 bintime_addx(&next, timerperiod.frac); 536 if (periodic) 537 nexttick = next; 538 else 539 nexttick.sec = -1; 540 CPU_FOREACH(cpu) { 541 state = DPCPU_ID_PTR(cpu, timerstate); 542 state->now = now; 543 state->nextevent = next; 544 if (periodic) 545 state->nexttick = next; 546 else 547 state->nexttick.sec = -1; 548 state->nexthard = next; 549 state->nextstat = next; 550 state->nextprof = next; 551 hardclock_sync(cpu); 552 } 553 busy = 0; 554 /* Start global timer or per-CPU timer of this CPU. */ 555 loadtimer(&now, 1); 556 } else { 557 busy = 1; 558 /* Stop global timer or per-CPU timer of this CPU. */ 559 et_stop(timer); 560 } 561 ET_HW_UNLOCK(DPCPU_PTR(timerstate)); 562 #ifdef SMP 563 /* If timer is global or there is no other CPUs yet - we are done. */ 564 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) { 565 critical_exit(); 566 return; 567 } 568 /* Set reconfigure flags for other CPUs. */ 569 CPU_FOREACH(cpu) { 570 state = DPCPU_ID_PTR(cpu, timerstate); 571 atomic_store_rel_int(&state->action, 572 (cpu == curcpu) ? 0 : ( start ? 1 : 2)); 573 } 574 /* Broadcast reconfigure IPI. */ 575 ipi_all_but_self(IPI_HARDCLOCK); 576 /* Wait for reconfiguration completed. */ 577 restart: 578 cpu_spinwait(); 579 CPU_FOREACH(cpu) { 580 if (cpu == curcpu) 581 continue; 582 state = DPCPU_ID_PTR(cpu, timerstate); 583 if (atomic_load_acq_int(&state->action)) 584 goto restart; 585 } 586 #endif 587 critical_exit(); 588 } 589 590 /* 591 * Calculate nearest frequency supported by hardware timer. 592 */ 593 static int 594 round_freq(struct eventtimer *et, int freq) 595 { 596 uint64_t div; 597 598 if (et->et_frequency != 0) { 599 div = lmax((et->et_frequency + freq / 2) / freq, 1); 600 if (et->et_flags & ET_FLAGS_POW2DIV) 601 div = 1 << (flsl(div + div / 2) - 1); 602 freq = (et->et_frequency + div / 2) / div; 603 } 604 if (et->et_min_period.sec > 0) 605 panic("Event timer \"%s\" doesn't support sub-second periods!", 606 et->et_name); 607 else if (et->et_min_period.frac != 0) 608 freq = min(freq, BT2FREQ(&et->et_min_period)); 609 if (et->et_max_period.sec == 0 && et->et_max_period.frac != 0) 610 freq = max(freq, BT2FREQ(&et->et_max_period)); 611 return (freq); 612 } 613 614 /* 615 * Configure and start event timers (BSP part). 616 */ 617 void 618 cpu_initclocks_bsp(void) 619 { 620 struct pcpu_state *state; 621 int base, div, cpu; 622 623 mtx_init(&et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); 624 CPU_FOREACH(cpu) { 625 state = DPCPU_ID_PTR(cpu, timerstate); 626 mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); 627 #ifdef KDTRACE_HOOKS 628 state->nextcyc.sec = -1; 629 #endif 630 } 631 #ifdef SMP 632 callout_new_inserted = cpu_new_callout; 633 #endif 634 periodic = want_periodic; 635 /* Grab requested timer or the best of present. */ 636 if (timername[0]) 637 timer = et_find(timername, 0, 0); 638 if (timer == NULL && periodic) { 639 timer = et_find(NULL, 640 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC); 641 } 642 if (timer == NULL) { 643 timer = et_find(NULL, 644 ET_FLAGS_ONESHOT, ET_FLAGS_ONESHOT); 645 } 646 if (timer == NULL && !periodic) { 647 timer = et_find(NULL, 648 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC); 649 } 650 if (timer == NULL) 651 panic("No usable event timer found!"); 652 et_init(timer, timercb, NULL, NULL); 653 654 /* Adapt to timer capabilities. */ 655 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0) 656 periodic = 0; 657 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) 658 periodic = 1; 659 if (timer->et_flags & ET_FLAGS_C3STOP) 660 cpu_disable_deep_sleep++; 661 662 /* 663 * We honor the requested 'hz' value. 664 * We want to run stathz in the neighborhood of 128hz. 665 * We would like profhz to run as often as possible. 666 */ 667 if (singlemul <= 0 || singlemul > 20) { 668 if (hz >= 1500 || (hz % 128) == 0) 669 singlemul = 1; 670 else if (hz >= 750) 671 singlemul = 2; 672 else 673 singlemul = 4; 674 } 675 if (periodic) { 676 base = round_freq(timer, hz * singlemul); 677 singlemul = max((base + hz / 2) / hz, 1); 678 hz = (base + singlemul / 2) / singlemul; 679 if (base <= 128) 680 stathz = base; 681 else { 682 div = base / 128; 683 if (div >= singlemul && (div % singlemul) == 0) 684 div++; 685 stathz = base / div; 686 } 687 profhz = stathz; 688 while ((profhz + stathz) <= 128 * 64) 689 profhz += stathz; 690 profhz = round_freq(timer, profhz); 691 } else { 692 hz = round_freq(timer, hz); 693 stathz = round_freq(timer, 127); 694 profhz = round_freq(timer, stathz * 64); 695 } 696 tick = 1000000 / hz; 697 FREQ2BT(hz, &hardperiod); 698 FREQ2BT(stathz, &statperiod); 699 FREQ2BT(profhz, &profperiod); 700 ET_LOCK(); 701 configtimer(1); 702 ET_UNLOCK(); 703 } 704 705 /* 706 * Start per-CPU event timers on APs. 707 */ 708 void 709 cpu_initclocks_ap(void) 710 { 711 struct bintime now; 712 struct pcpu_state *state; 713 714 state = DPCPU_PTR(timerstate); 715 binuptime(&now); 716 ET_HW_LOCK(state); 717 state->now = now; 718 hardclock_sync(curcpu); 719 handleevents(&state->now, 2); 720 if (timer->et_flags & ET_FLAGS_PERCPU) 721 loadtimer(&now, 1); 722 ET_HW_UNLOCK(state); 723 } 724 725 /* 726 * Switch to profiling clock rates. 727 */ 728 void 729 cpu_startprofclock(void) 730 { 731 732 ET_LOCK(); 733 if (periodic) { 734 configtimer(0); 735 profiling = 1; 736 configtimer(1); 737 } else 738 profiling = 1; 739 ET_UNLOCK(); 740 } 741 742 /* 743 * Switch to regular clock rates. 744 */ 745 void 746 cpu_stopprofclock(void) 747 { 748 749 ET_LOCK(); 750 if (periodic) { 751 configtimer(0); 752 profiling = 0; 753 configtimer(1); 754 } else 755 profiling = 0; 756 ET_UNLOCK(); 757 } 758 759 /* 760 * Switch to idle mode (all ticks handled). 761 */ 762 void 763 cpu_idleclock(void) 764 { 765 struct bintime now, t; 766 struct pcpu_state *state; 767 768 if (idletick || busy || 769 (periodic && (timer->et_flags & ET_FLAGS_PERCPU)) 770 #ifdef DEVICE_POLLING 771 || curcpu == CPU_FIRST() 772 #endif 773 ) 774 return; 775 state = DPCPU_PTR(timerstate); 776 if (periodic) 777 now = state->now; 778 else 779 binuptime(&now); 780 CTR4(KTR_SPARE2, "idle at %d: now %d.%08x%08x", 781 curcpu, now.sec, (u_int)(now.frac >> 32), 782 (u_int)(now.frac & 0xffffffff)); 783 getnextcpuevent(&t, 1); 784 ET_HW_LOCK(state); 785 state->idle = 1; 786 state->nextevent = t; 787 if (!periodic) 788 loadtimer(&now, 0); 789 ET_HW_UNLOCK(state); 790 } 791 792 /* 793 * Switch to active mode (skip empty ticks). 794 */ 795 void 796 cpu_activeclock(void) 797 { 798 struct bintime now; 799 struct pcpu_state *state; 800 struct thread *td; 801 802 state = DPCPU_PTR(timerstate); 803 if (state->idle == 0 || busy) 804 return; 805 if (periodic) 806 now = state->now; 807 else 808 binuptime(&now); 809 CTR4(KTR_SPARE2, "active at %d: now %d.%08x%08x", 810 curcpu, now.sec, (u_int)(now.frac >> 32), 811 (u_int)(now.frac & 0xffffffff)); 812 spinlock_enter(); 813 td = curthread; 814 td->td_intr_nesting_level++; 815 handleevents(&now, 1); 816 td->td_intr_nesting_level--; 817 spinlock_exit(); 818 } 819 820 #ifdef KDTRACE_HOOKS 821 void 822 clocksource_cyc_set(const struct bintime *t) 823 { 824 struct bintime now; 825 struct pcpu_state *state; 826 827 state = DPCPU_PTR(timerstate); 828 if (periodic) 829 now = state->now; 830 else 831 binuptime(&now); 832 833 CTR4(KTR_SPARE2, "set_cyc at %d: now %d.%08x%08x", 834 curcpu, now.sec, (u_int)(now.frac >> 32), 835 (u_int)(now.frac & 0xffffffff)); 836 CTR4(KTR_SPARE2, "set_cyc at %d: t %d.%08x%08x", 837 curcpu, t->sec, (u_int)(t->frac >> 32), 838 (u_int)(t->frac & 0xffffffff)); 839 840 ET_HW_LOCK(state); 841 if (bintime_cmp(t, &state->nextcyc, ==)) { 842 ET_HW_UNLOCK(state); 843 return; 844 } 845 state->nextcyc = *t; 846 if (bintime_cmp(&state->nextcyc, &state->nextevent, >=)) { 847 ET_HW_UNLOCK(state); 848 return; 849 } 850 state->nextevent = state->nextcyc; 851 if (!periodic) 852 loadtimer(&now, 0); 853 ET_HW_UNLOCK(state); 854 } 855 #endif 856 857 #ifdef SMP 858 static void 859 cpu_new_callout(int cpu, int ticks) 860 { 861 struct bintime tmp; 862 struct pcpu_state *state; 863 864 CTR3(KTR_SPARE2, "new co at %d: on %d in %d", 865 curcpu, cpu, ticks); 866 state = DPCPU_ID_PTR(cpu, timerstate); 867 ET_HW_LOCK(state); 868 if (state->idle == 0 || busy) { 869 ET_HW_UNLOCK(state); 870 return; 871 } 872 /* 873 * If timer is periodic - just update next event time for target CPU. 874 * If timer is global - there is chance it is already programmed. 875 */ 876 if (periodic || (timer->et_flags & ET_FLAGS_PERCPU) == 0) { 877 tmp = hardperiod; 878 bintime_mul(&tmp, ticks - 1); 879 bintime_add(&tmp, &state->nexthard); 880 if (bintime_cmp(&tmp, &state->nextevent, <)) 881 state->nextevent = tmp; 882 if (periodic || 883 bintime_cmp(&state->nextevent, &nexttick, >=)) { 884 ET_HW_UNLOCK(state); 885 return; 886 } 887 } 888 /* 889 * Otherwise we have to wake that CPU up, as we can't get present 890 * bintime to reprogram global timer from here. If timer is per-CPU, 891 * we by definition can't do it from here. 892 */ 893 ET_HW_UNLOCK(state); 894 if (timer->et_flags & ET_FLAGS_PERCPU) { 895 state->handle = 1; 896 ipi_cpu(cpu, IPI_HARDCLOCK); 897 } else { 898 if (!cpu_idle_wakeup(cpu)) 899 ipi_cpu(cpu, IPI_AST); 900 } 901 } 902 #endif 903 904 /* 905 * Report or change the active event timers hardware. 906 */ 907 static int 908 sysctl_kern_eventtimer_timer(SYSCTL_HANDLER_ARGS) 909 { 910 char buf[32]; 911 struct eventtimer *et; 912 int error; 913 914 ET_LOCK(); 915 et = timer; 916 snprintf(buf, sizeof(buf), "%s", et->et_name); 917 ET_UNLOCK(); 918 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 919 ET_LOCK(); 920 et = timer; 921 if (error != 0 || req->newptr == NULL || 922 strcasecmp(buf, et->et_name) == 0) { 923 ET_UNLOCK(); 924 return (error); 925 } 926 et = et_find(buf, 0, 0); 927 if (et == NULL) { 928 ET_UNLOCK(); 929 return (ENOENT); 930 } 931 configtimer(0); 932 et_free(timer); 933 if (et->et_flags & ET_FLAGS_C3STOP) 934 cpu_disable_deep_sleep++; 935 if (timer->et_flags & ET_FLAGS_C3STOP) 936 cpu_disable_deep_sleep--; 937 periodic = want_periodic; 938 timer = et; 939 et_init(timer, timercb, NULL, NULL); 940 configtimer(1); 941 ET_UNLOCK(); 942 return (error); 943 } 944 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer, 945 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 946 0, 0, sysctl_kern_eventtimer_timer, "A", "Chosen event timer"); 947 948 /* 949 * Report or change the active event timer periodicity. 950 */ 951 static int 952 sysctl_kern_eventtimer_periodic(SYSCTL_HANDLER_ARGS) 953 { 954 int error, val; 955 956 val = periodic; 957 error = sysctl_handle_int(oidp, &val, 0, req); 958 if (error != 0 || req->newptr == NULL) 959 return (error); 960 ET_LOCK(); 961 configtimer(0); 962 periodic = want_periodic = val; 963 configtimer(1); 964 ET_UNLOCK(); 965 return (error); 966 } 967 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, periodic, 968 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 969 0, 0, sysctl_kern_eventtimer_periodic, "I", "Enable event timer periodic mode"); 970