1 /*- 2 * Copyright (c) 2010-2012 Alexander Motin <mav@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 /* 31 * Common routines to manage event timers hardware. 32 */ 33 34 #include "opt_device_polling.h" 35 #include "opt_kdtrace.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/bus.h> 40 #include <sys/lock.h> 41 #include <sys/kdb.h> 42 #include <sys/ktr.h> 43 #include <sys/mutex.h> 44 #include <sys/proc.h> 45 #include <sys/kernel.h> 46 #include <sys/sched.h> 47 #include <sys/smp.h> 48 #include <sys/sysctl.h> 49 #include <sys/timeet.h> 50 #include <sys/timetc.h> 51 52 #include <machine/atomic.h> 53 #include <machine/clock.h> 54 #include <machine/cpu.h> 55 #include <machine/smp.h> 56 57 #ifdef KDTRACE_HOOKS 58 #include <sys/dtrace_bsd.h> 59 cyclic_clock_func_t cyclic_clock_func = NULL; 60 #endif 61 62 int cpu_can_deep_sleep = 0; /* C3 state is available. */ 63 int cpu_disable_deep_sleep = 0; /* Timer dies in C3. */ 64 65 static void setuptimer(void); 66 static void loadtimer(struct bintime *now, int first); 67 static int doconfigtimer(void); 68 static void configtimer(int start); 69 static int round_freq(struct eventtimer *et, int freq); 70 71 static void getnextcpuevent(struct bintime *event, int idle); 72 static void getnextevent(struct bintime *event); 73 static int handleevents(struct bintime *now, int fake); 74 #ifdef SMP 75 static void cpu_new_callout(int cpu, int ticks); 76 #endif 77 78 static struct mtx et_hw_mtx; 79 80 #define ET_HW_LOCK(state) \ 81 { \ 82 if (timer->et_flags & ET_FLAGS_PERCPU) \ 83 mtx_lock_spin(&(state)->et_hw_mtx); \ 84 else \ 85 mtx_lock_spin(&et_hw_mtx); \ 86 } 87 88 #define ET_HW_UNLOCK(state) \ 89 { \ 90 if (timer->et_flags & ET_FLAGS_PERCPU) \ 91 mtx_unlock_spin(&(state)->et_hw_mtx); \ 92 else \ 93 mtx_unlock_spin(&et_hw_mtx); \ 94 } 95 96 static struct eventtimer *timer = NULL; 97 static struct bintime timerperiod; /* Timer period for periodic mode. */ 98 static struct bintime hardperiod; /* hardclock() events period. */ 99 static struct bintime statperiod; /* statclock() events period. */ 100 static struct bintime profperiod; /* profclock() events period. */ 101 static struct bintime nexttick; /* Next global timer tick time. */ 102 static struct bintime nexthard; /* Next global hardlock() event. */ 103 static u_int busy = 0; /* Reconfiguration is in progress. */ 104 static int profiling = 0; /* Profiling events enabled. */ 105 106 static char timername[32]; /* Wanted timer. */ 107 TUNABLE_STR("kern.eventtimer.timer", timername, sizeof(timername)); 108 109 static int singlemul = 0; /* Multiplier for periodic mode. */ 110 TUNABLE_INT("kern.eventtimer.singlemul", &singlemul); 111 SYSCTL_INT(_kern_eventtimer, OID_AUTO, singlemul, CTLFLAG_RW, &singlemul, 112 0, "Multiplier for periodic mode"); 113 114 static u_int idletick = 0; /* Run periodic events when idle. */ 115 TUNABLE_INT("kern.eventtimer.idletick", &idletick); 116 SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RW, &idletick, 117 0, "Run periodic events when idle"); 118 119 static u_int activetick = 1; /* Run all periodic events when active. */ 120 TUNABLE_INT("kern.eventtimer.activetick", &activetick); 121 SYSCTL_UINT(_kern_eventtimer, OID_AUTO, activetick, CTLFLAG_RW, &activetick, 122 0, "Run all periodic events when active"); 123 124 static int periodic = 0; /* Periodic or one-shot mode. */ 125 static int want_periodic = 0; /* What mode to prefer. */ 126 TUNABLE_INT("kern.eventtimer.periodic", &want_periodic); 127 128 struct pcpu_state { 129 struct mtx et_hw_mtx; /* Per-CPU timer mutex. */ 130 u_int action; /* Reconfiguration requests. */ 131 u_int handle; /* Immediate handle resuests. */ 132 struct bintime now; /* Last tick time. */ 133 struct bintime nextevent; /* Next scheduled event on this CPU. */ 134 struct bintime nexttick; /* Next timer tick time. */ 135 struct bintime nexthard; /* Next hardlock() event. */ 136 struct bintime nextstat; /* Next statclock() event. */ 137 struct bintime nextprof; /* Next profclock() event. */ 138 #ifdef KDTRACE_HOOKS 139 struct bintime nextcyc; /* Next OpenSolaris cyclics event. */ 140 #endif 141 int ipi; /* This CPU needs IPI. */ 142 int idle; /* This CPU is in idle mode. */ 143 }; 144 145 static DPCPU_DEFINE(struct pcpu_state, timerstate); 146 147 #define FREQ2BT(freq, bt) \ 148 { \ 149 (bt)->sec = 0; \ 150 (bt)->frac = ((uint64_t)0x8000000000000000 / (freq)) << 1; \ 151 } 152 #define BT2FREQ(bt) \ 153 (((uint64_t)0x8000000000000000 + ((bt)->frac >> 2)) / \ 154 ((bt)->frac >> 1)) 155 156 /* 157 * Timer broadcast IPI handler. 158 */ 159 int 160 hardclockintr(void) 161 { 162 struct bintime now; 163 struct pcpu_state *state; 164 int done; 165 166 if (doconfigtimer() || busy) 167 return (FILTER_HANDLED); 168 state = DPCPU_PTR(timerstate); 169 now = state->now; 170 CTR4(KTR_SPARE2, "ipi at %d: now %d.%08x%08x", 171 curcpu, now.sec, (u_int)(now.frac >> 32), 172 (u_int)(now.frac & 0xffffffff)); 173 done = handleevents(&now, 0); 174 return (done ? FILTER_HANDLED : FILTER_STRAY); 175 } 176 177 /* 178 * Handle all events for specified time on this CPU 179 */ 180 static int 181 handleevents(struct bintime *now, int fake) 182 { 183 struct bintime t; 184 struct trapframe *frame; 185 struct pcpu_state *state; 186 uintfptr_t pc; 187 int usermode; 188 int done, runs; 189 190 CTR4(KTR_SPARE2, "handle at %d: now %d.%08x%08x", 191 curcpu, now->sec, (u_int)(now->frac >> 32), 192 (u_int)(now->frac & 0xffffffff)); 193 done = 0; 194 if (fake) { 195 frame = NULL; 196 usermode = 0; 197 pc = 0; 198 } else { 199 frame = curthread->td_intr_frame; 200 usermode = TRAPF_USERMODE(frame); 201 pc = TRAPF_PC(frame); 202 } 203 204 state = DPCPU_PTR(timerstate); 205 206 runs = 0; 207 while (bintime_cmp(now, &state->nexthard, >=)) { 208 bintime_addx(&state->nexthard, hardperiod.frac); 209 runs++; 210 } 211 if (runs) { 212 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 && 213 bintime_cmp(&state->nexthard, &nexthard, >)) 214 nexthard = state->nexthard; 215 if (fake < 2) { 216 hardclock_cnt(runs, usermode); 217 done = 1; 218 } 219 } 220 runs = 0; 221 while (bintime_cmp(now, &state->nextstat, >=)) { 222 bintime_addx(&state->nextstat, statperiod.frac); 223 runs++; 224 } 225 if (runs && fake < 2) { 226 statclock_cnt(runs, usermode); 227 done = 1; 228 } 229 if (profiling) { 230 runs = 0; 231 while (bintime_cmp(now, &state->nextprof, >=)) { 232 bintime_addx(&state->nextprof, profperiod.frac); 233 runs++; 234 } 235 if (runs && !fake) { 236 profclock_cnt(runs, usermode, pc); 237 done = 1; 238 } 239 } else 240 state->nextprof = state->nextstat; 241 242 #ifdef KDTRACE_HOOKS 243 if (fake == 0 && cyclic_clock_func != NULL && 244 state->nextcyc.sec != -1 && 245 bintime_cmp(now, &state->nextcyc, >=)) { 246 state->nextcyc.sec = -1; 247 (*cyclic_clock_func)(frame); 248 } 249 #endif 250 251 getnextcpuevent(&t, 0); 252 if (fake == 2) { 253 state->nextevent = t; 254 return (done); 255 } 256 ET_HW_LOCK(state); 257 if (!busy) { 258 state->idle = 0; 259 state->nextevent = t; 260 loadtimer(now, 0); 261 } 262 ET_HW_UNLOCK(state); 263 return (done); 264 } 265 266 /* 267 * Schedule binuptime of the next event on current CPU. 268 */ 269 static void 270 getnextcpuevent(struct bintime *event, int idle) 271 { 272 struct bintime tmp; 273 struct pcpu_state *state; 274 int skip; 275 276 state = DPCPU_PTR(timerstate); 277 /* Handle hardclock() events. */ 278 *event = state->nexthard; 279 if (idle || (!activetick && !profiling && 280 (timer->et_flags & ET_FLAGS_PERCPU) == 0)) { 281 skip = idle ? 4 : (stathz / 2); 282 if (curcpu == CPU_FIRST() && tc_min_ticktock_freq > skip) 283 skip = tc_min_ticktock_freq; 284 skip = callout_tickstofirst(hz / skip) - 1; 285 CTR2(KTR_SPARE2, "skip at %d: %d", curcpu, skip); 286 tmp = hardperiod; 287 bintime_mul(&tmp, skip); 288 bintime_add(event, &tmp); 289 } 290 if (!idle) { /* If CPU is active - handle other types of events. */ 291 if (bintime_cmp(event, &state->nextstat, >)) 292 *event = state->nextstat; 293 if (profiling && bintime_cmp(event, &state->nextprof, >)) 294 *event = state->nextprof; 295 } 296 #ifdef KDTRACE_HOOKS 297 if (state->nextcyc.sec != -1 && bintime_cmp(event, &state->nextcyc, >)) 298 *event = state->nextcyc; 299 #endif 300 } 301 302 /* 303 * Schedule binuptime of the next event on all CPUs. 304 */ 305 static void 306 getnextevent(struct bintime *event) 307 { 308 struct pcpu_state *state; 309 #ifdef SMP 310 int cpu; 311 #endif 312 int c, nonidle; 313 314 state = DPCPU_PTR(timerstate); 315 *event = state->nextevent; 316 c = curcpu; 317 nonidle = !state->idle; 318 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) { 319 #ifdef SMP 320 if (smp_started) { 321 CPU_FOREACH(cpu) { 322 if (curcpu == cpu) 323 continue; 324 state = DPCPU_ID_PTR(cpu, timerstate); 325 nonidle += !state->idle; 326 if (bintime_cmp(event, &state->nextevent, >)) { 327 *event = state->nextevent; 328 c = cpu; 329 } 330 } 331 } 332 #endif 333 if (nonidle != 0 && bintime_cmp(event, &nexthard, >)) 334 *event = nexthard; 335 } 336 CTR5(KTR_SPARE2, "next at %d: next %d.%08x%08x by %d", 337 curcpu, event->sec, (u_int)(event->frac >> 32), 338 (u_int)(event->frac & 0xffffffff), c); 339 } 340 341 /* Hardware timer callback function. */ 342 static void 343 timercb(struct eventtimer *et, void *arg) 344 { 345 struct bintime now; 346 struct bintime *next; 347 struct pcpu_state *state; 348 #ifdef SMP 349 int cpu, bcast; 350 #endif 351 352 /* Do not touch anything if somebody reconfiguring timers. */ 353 if (busy) 354 return; 355 /* Update present and next tick times. */ 356 state = DPCPU_PTR(timerstate); 357 if (et->et_flags & ET_FLAGS_PERCPU) { 358 next = &state->nexttick; 359 } else 360 next = &nexttick; 361 binuptime(&now); 362 if (periodic) { 363 *next = now; 364 bintime_addx(next, timerperiod.frac); /* Next tick in 1 period. */ 365 } else 366 next->sec = -1; /* Next tick is not scheduled yet. */ 367 state->now = now; 368 CTR4(KTR_SPARE2, "intr at %d: now %d.%08x%08x", 369 curcpu, (int)(now.sec), (u_int)(now.frac >> 32), 370 (u_int)(now.frac & 0xffffffff)); 371 372 #ifdef SMP 373 /* Prepare broadcasting to other CPUs for non-per-CPU timers. */ 374 bcast = 0; 375 if ((et->et_flags & ET_FLAGS_PERCPU) == 0 && smp_started) { 376 CPU_FOREACH(cpu) { 377 state = DPCPU_ID_PTR(cpu, timerstate); 378 ET_HW_LOCK(state); 379 state->now = now; 380 if (bintime_cmp(&now, &state->nextevent, >=)) { 381 state->nextevent.sec++; 382 if (curcpu != cpu) { 383 state->ipi = 1; 384 bcast = 1; 385 } 386 } 387 ET_HW_UNLOCK(state); 388 } 389 } 390 #endif 391 392 /* Handle events for this time on this CPU. */ 393 handleevents(&now, 0); 394 395 #ifdef SMP 396 /* Broadcast interrupt to other CPUs for non-per-CPU timers. */ 397 if (bcast) { 398 CPU_FOREACH(cpu) { 399 if (curcpu == cpu) 400 continue; 401 state = DPCPU_ID_PTR(cpu, timerstate); 402 if (state->ipi) { 403 state->ipi = 0; 404 ipi_cpu(cpu, IPI_HARDCLOCK); 405 } 406 } 407 } 408 #endif 409 } 410 411 /* 412 * Load new value into hardware timer. 413 */ 414 static void 415 loadtimer(struct bintime *now, int start) 416 { 417 struct pcpu_state *state; 418 struct bintime new; 419 struct bintime *next; 420 uint64_t tmp; 421 int eq; 422 423 if (timer->et_flags & ET_FLAGS_PERCPU) { 424 state = DPCPU_PTR(timerstate); 425 next = &state->nexttick; 426 } else 427 next = &nexttick; 428 if (periodic) { 429 if (start) { 430 /* 431 * Try to start all periodic timers aligned 432 * to period to make events synchronous. 433 */ 434 tmp = ((uint64_t)now->sec << 36) + (now->frac >> 28); 435 tmp = (tmp % (timerperiod.frac >> 28)) << 28; 436 new.sec = 0; 437 new.frac = timerperiod.frac - tmp; 438 if (new.frac < tmp) /* Left less then passed. */ 439 bintime_addx(&new, timerperiod.frac); 440 CTR5(KTR_SPARE2, "load p at %d: now %d.%08x first in %d.%08x", 441 curcpu, now->sec, (u_int)(now->frac >> 32), 442 new.sec, (u_int)(new.frac >> 32)); 443 *next = new; 444 bintime_add(next, now); 445 et_start(timer, &new, &timerperiod); 446 } 447 } else { 448 getnextevent(&new); 449 eq = bintime_cmp(&new, next, ==); 450 CTR5(KTR_SPARE2, "load at %d: next %d.%08x%08x eq %d", 451 curcpu, new.sec, (u_int)(new.frac >> 32), 452 (u_int)(new.frac & 0xffffffff), 453 eq); 454 if (!eq) { 455 *next = new; 456 bintime_sub(&new, now); 457 et_start(timer, &new, NULL); 458 } 459 } 460 } 461 462 /* 463 * Prepare event timer parameters after configuration changes. 464 */ 465 static void 466 setuptimer(void) 467 { 468 int freq; 469 470 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0) 471 periodic = 0; 472 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) 473 periodic = 1; 474 singlemul = MIN(MAX(singlemul, 1), 20); 475 freq = hz * singlemul; 476 while (freq < (profiling ? profhz : stathz)) 477 freq += hz; 478 freq = round_freq(timer, freq); 479 FREQ2BT(freq, &timerperiod); 480 } 481 482 /* 483 * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler. 484 */ 485 static int 486 doconfigtimer(void) 487 { 488 struct bintime now; 489 struct pcpu_state *state; 490 491 state = DPCPU_PTR(timerstate); 492 switch (atomic_load_acq_int(&state->action)) { 493 case 1: 494 binuptime(&now); 495 ET_HW_LOCK(state); 496 loadtimer(&now, 1); 497 ET_HW_UNLOCK(state); 498 state->handle = 0; 499 atomic_store_rel_int(&state->action, 0); 500 return (1); 501 case 2: 502 ET_HW_LOCK(state); 503 et_stop(timer); 504 ET_HW_UNLOCK(state); 505 state->handle = 0; 506 atomic_store_rel_int(&state->action, 0); 507 return (1); 508 } 509 if (atomic_readandclear_int(&state->handle) && !busy) { 510 binuptime(&now); 511 handleevents(&now, 0); 512 return (1); 513 } 514 return (0); 515 } 516 517 /* 518 * Reconfigure specified timer. 519 * For per-CPU timers use IPI to make other CPUs to reconfigure. 520 */ 521 static void 522 configtimer(int start) 523 { 524 struct bintime now, next; 525 struct pcpu_state *state; 526 int cpu; 527 528 if (start) { 529 setuptimer(); 530 binuptime(&now); 531 } 532 critical_enter(); 533 ET_HW_LOCK(DPCPU_PTR(timerstate)); 534 if (start) { 535 /* Initialize time machine parameters. */ 536 next = now; 537 bintime_addx(&next, timerperiod.frac); 538 if (periodic) 539 nexttick = next; 540 else 541 nexttick.sec = -1; 542 CPU_FOREACH(cpu) { 543 state = DPCPU_ID_PTR(cpu, timerstate); 544 state->now = now; 545 state->nextevent = next; 546 if (periodic) 547 state->nexttick = next; 548 else 549 state->nexttick.sec = -1; 550 state->nexthard = next; 551 state->nextstat = next; 552 state->nextprof = next; 553 hardclock_sync(cpu); 554 } 555 busy = 0; 556 /* Start global timer or per-CPU timer of this CPU. */ 557 loadtimer(&now, 1); 558 } else { 559 busy = 1; 560 /* Stop global timer or per-CPU timer of this CPU. */ 561 et_stop(timer); 562 } 563 ET_HW_UNLOCK(DPCPU_PTR(timerstate)); 564 #ifdef SMP 565 /* If timer is global or there is no other CPUs yet - we are done. */ 566 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) { 567 critical_exit(); 568 return; 569 } 570 /* Set reconfigure flags for other CPUs. */ 571 CPU_FOREACH(cpu) { 572 state = DPCPU_ID_PTR(cpu, timerstate); 573 atomic_store_rel_int(&state->action, 574 (cpu == curcpu) ? 0 : ( start ? 1 : 2)); 575 } 576 /* Broadcast reconfigure IPI. */ 577 ipi_all_but_self(IPI_HARDCLOCK); 578 /* Wait for reconfiguration completed. */ 579 restart: 580 cpu_spinwait(); 581 CPU_FOREACH(cpu) { 582 if (cpu == curcpu) 583 continue; 584 state = DPCPU_ID_PTR(cpu, timerstate); 585 if (atomic_load_acq_int(&state->action)) 586 goto restart; 587 } 588 #endif 589 critical_exit(); 590 } 591 592 /* 593 * Calculate nearest frequency supported by hardware timer. 594 */ 595 static int 596 round_freq(struct eventtimer *et, int freq) 597 { 598 uint64_t div; 599 600 if (et->et_frequency != 0) { 601 div = lmax((et->et_frequency + freq / 2) / freq, 1); 602 if (et->et_flags & ET_FLAGS_POW2DIV) 603 div = 1 << (flsl(div + div / 2) - 1); 604 freq = (et->et_frequency + div / 2) / div; 605 } 606 if (et->et_min_period.sec > 0) 607 panic("Event timer \"%s\" doesn't support sub-second periods!", 608 et->et_name); 609 else if (et->et_min_period.frac != 0) 610 freq = min(freq, BT2FREQ(&et->et_min_period)); 611 if (et->et_max_period.sec == 0 && et->et_max_period.frac != 0) 612 freq = max(freq, BT2FREQ(&et->et_max_period)); 613 return (freq); 614 } 615 616 /* 617 * Configure and start event timers (BSP part). 618 */ 619 void 620 cpu_initclocks_bsp(void) 621 { 622 struct pcpu_state *state; 623 int base, div, cpu; 624 625 mtx_init(&et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); 626 CPU_FOREACH(cpu) { 627 state = DPCPU_ID_PTR(cpu, timerstate); 628 mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); 629 #ifdef KDTRACE_HOOKS 630 state->nextcyc.sec = -1; 631 #endif 632 } 633 #ifdef SMP 634 callout_new_inserted = cpu_new_callout; 635 #endif 636 periodic = want_periodic; 637 /* Grab requested timer or the best of present. */ 638 if (timername[0]) 639 timer = et_find(timername, 0, 0); 640 if (timer == NULL && periodic) { 641 timer = et_find(NULL, 642 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC); 643 } 644 if (timer == NULL) { 645 timer = et_find(NULL, 646 ET_FLAGS_ONESHOT, ET_FLAGS_ONESHOT); 647 } 648 if (timer == NULL && !periodic) { 649 timer = et_find(NULL, 650 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC); 651 } 652 if (timer == NULL) 653 panic("No usable event timer found!"); 654 et_init(timer, timercb, NULL, NULL); 655 656 /* Adapt to timer capabilities. */ 657 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0) 658 periodic = 0; 659 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) 660 periodic = 1; 661 if (timer->et_flags & ET_FLAGS_C3STOP) 662 cpu_disable_deep_sleep++; 663 664 /* 665 * We honor the requested 'hz' value. 666 * We want to run stathz in the neighborhood of 128hz. 667 * We would like profhz to run as often as possible. 668 */ 669 if (singlemul <= 0 || singlemul > 20) { 670 if (hz >= 1500 || (hz % 128) == 0) 671 singlemul = 1; 672 else if (hz >= 750) 673 singlemul = 2; 674 else 675 singlemul = 4; 676 } 677 if (periodic) { 678 base = round_freq(timer, hz * singlemul); 679 singlemul = max((base + hz / 2) / hz, 1); 680 hz = (base + singlemul / 2) / singlemul; 681 if (base <= 128) 682 stathz = base; 683 else { 684 div = base / 128; 685 if (div >= singlemul && (div % singlemul) == 0) 686 div++; 687 stathz = base / div; 688 } 689 profhz = stathz; 690 while ((profhz + stathz) <= 128 * 64) 691 profhz += stathz; 692 profhz = round_freq(timer, profhz); 693 } else { 694 hz = round_freq(timer, hz); 695 stathz = round_freq(timer, 127); 696 profhz = round_freq(timer, stathz * 64); 697 } 698 tick = 1000000 / hz; 699 FREQ2BT(hz, &hardperiod); 700 FREQ2BT(stathz, &statperiod); 701 FREQ2BT(profhz, &profperiod); 702 ET_LOCK(); 703 configtimer(1); 704 ET_UNLOCK(); 705 } 706 707 /* 708 * Start per-CPU event timers on APs. 709 */ 710 void 711 cpu_initclocks_ap(void) 712 { 713 struct bintime now; 714 struct pcpu_state *state; 715 716 state = DPCPU_PTR(timerstate); 717 binuptime(&now); 718 ET_HW_LOCK(state); 719 state->now = now; 720 hardclock_sync(curcpu); 721 handleevents(&state->now, 2); 722 if (timer->et_flags & ET_FLAGS_PERCPU) 723 loadtimer(&now, 1); 724 ET_HW_UNLOCK(state); 725 } 726 727 /* 728 * Switch to profiling clock rates. 729 */ 730 void 731 cpu_startprofclock(void) 732 { 733 734 ET_LOCK(); 735 if (periodic) { 736 configtimer(0); 737 profiling = 1; 738 configtimer(1); 739 } else 740 profiling = 1; 741 ET_UNLOCK(); 742 } 743 744 /* 745 * Switch to regular clock rates. 746 */ 747 void 748 cpu_stopprofclock(void) 749 { 750 751 ET_LOCK(); 752 if (periodic) { 753 configtimer(0); 754 profiling = 0; 755 configtimer(1); 756 } else 757 profiling = 0; 758 ET_UNLOCK(); 759 } 760 761 /* 762 * Switch to idle mode (all ticks handled). 763 */ 764 void 765 cpu_idleclock(void) 766 { 767 struct bintime now, t; 768 struct pcpu_state *state; 769 770 if (idletick || busy || 771 (periodic && (timer->et_flags & ET_FLAGS_PERCPU)) 772 #ifdef DEVICE_POLLING 773 || curcpu == CPU_FIRST() 774 #endif 775 ) 776 return; 777 state = DPCPU_PTR(timerstate); 778 if (periodic) 779 now = state->now; 780 else 781 binuptime(&now); 782 CTR4(KTR_SPARE2, "idle at %d: now %d.%08x%08x", 783 curcpu, now.sec, (u_int)(now.frac >> 32), 784 (u_int)(now.frac & 0xffffffff)); 785 getnextcpuevent(&t, 1); 786 ET_HW_LOCK(state); 787 state->idle = 1; 788 state->nextevent = t; 789 if (!periodic) 790 loadtimer(&now, 0); 791 ET_HW_UNLOCK(state); 792 } 793 794 /* 795 * Switch to active mode (skip empty ticks). 796 */ 797 void 798 cpu_activeclock(void) 799 { 800 struct bintime now; 801 struct pcpu_state *state; 802 struct thread *td; 803 804 state = DPCPU_PTR(timerstate); 805 if (state->idle == 0 || busy) 806 return; 807 if (periodic) 808 now = state->now; 809 else 810 binuptime(&now); 811 CTR4(KTR_SPARE2, "active at %d: now %d.%08x%08x", 812 curcpu, now.sec, (u_int)(now.frac >> 32), 813 (u_int)(now.frac & 0xffffffff)); 814 spinlock_enter(); 815 td = curthread; 816 td->td_intr_nesting_level++; 817 handleevents(&now, 1); 818 td->td_intr_nesting_level--; 819 spinlock_exit(); 820 } 821 822 #ifdef KDTRACE_HOOKS 823 void 824 clocksource_cyc_set(const struct bintime *t) 825 { 826 struct bintime now; 827 struct pcpu_state *state; 828 829 state = DPCPU_PTR(timerstate); 830 if (periodic) 831 now = state->now; 832 else 833 binuptime(&now); 834 835 CTR4(KTR_SPARE2, "set_cyc at %d: now %d.%08x%08x", 836 curcpu, now.sec, (u_int)(now.frac >> 32), 837 (u_int)(now.frac & 0xffffffff)); 838 CTR4(KTR_SPARE2, "set_cyc at %d: t %d.%08x%08x", 839 curcpu, t->sec, (u_int)(t->frac >> 32), 840 (u_int)(t->frac & 0xffffffff)); 841 842 ET_HW_LOCK(state); 843 if (bintime_cmp(t, &state->nextcyc, ==)) { 844 ET_HW_UNLOCK(state); 845 return; 846 } 847 state->nextcyc = *t; 848 if (bintime_cmp(&state->nextcyc, &state->nextevent, >=)) { 849 ET_HW_UNLOCK(state); 850 return; 851 } 852 state->nextevent = state->nextcyc; 853 if (!periodic) 854 loadtimer(&now, 0); 855 ET_HW_UNLOCK(state); 856 } 857 #endif 858 859 #ifdef SMP 860 static void 861 cpu_new_callout(int cpu, int ticks) 862 { 863 struct bintime tmp; 864 struct pcpu_state *state; 865 866 CTR3(KTR_SPARE2, "new co at %d: on %d in %d", 867 curcpu, cpu, ticks); 868 state = DPCPU_ID_PTR(cpu, timerstate); 869 ET_HW_LOCK(state); 870 if (state->idle == 0 || busy) { 871 ET_HW_UNLOCK(state); 872 return; 873 } 874 /* 875 * If timer is periodic - just update next event time for target CPU. 876 * If timer is global - there is chance it is already programmed. 877 */ 878 if (periodic || (timer->et_flags & ET_FLAGS_PERCPU) == 0) { 879 tmp = hardperiod; 880 bintime_mul(&tmp, ticks - 1); 881 bintime_add(&tmp, &state->nexthard); 882 if (bintime_cmp(&tmp, &state->nextevent, <)) 883 state->nextevent = tmp; 884 if (periodic || 885 bintime_cmp(&state->nextevent, &nexttick, >=)) { 886 ET_HW_UNLOCK(state); 887 return; 888 } 889 } 890 /* 891 * Otherwise we have to wake that CPU up, as we can't get present 892 * bintime to reprogram global timer from here. If timer is per-CPU, 893 * we by definition can't do it from here. 894 */ 895 ET_HW_UNLOCK(state); 896 if (timer->et_flags & ET_FLAGS_PERCPU) { 897 state->handle = 1; 898 ipi_cpu(cpu, IPI_HARDCLOCK); 899 } else { 900 if (!cpu_idle_wakeup(cpu)) 901 ipi_cpu(cpu, IPI_AST); 902 } 903 } 904 #endif 905 906 /* 907 * Report or change the active event timers hardware. 908 */ 909 static int 910 sysctl_kern_eventtimer_timer(SYSCTL_HANDLER_ARGS) 911 { 912 char buf[32]; 913 struct eventtimer *et; 914 int error; 915 916 ET_LOCK(); 917 et = timer; 918 snprintf(buf, sizeof(buf), "%s", et->et_name); 919 ET_UNLOCK(); 920 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 921 ET_LOCK(); 922 et = timer; 923 if (error != 0 || req->newptr == NULL || 924 strcasecmp(buf, et->et_name) == 0) { 925 ET_UNLOCK(); 926 return (error); 927 } 928 et = et_find(buf, 0, 0); 929 if (et == NULL) { 930 ET_UNLOCK(); 931 return (ENOENT); 932 } 933 configtimer(0); 934 et_free(timer); 935 if (et->et_flags & ET_FLAGS_C3STOP) 936 cpu_disable_deep_sleep++; 937 if (timer->et_flags & ET_FLAGS_C3STOP) 938 cpu_disable_deep_sleep--; 939 periodic = want_periodic; 940 timer = et; 941 et_init(timer, timercb, NULL, NULL); 942 configtimer(1); 943 ET_UNLOCK(); 944 return (error); 945 } 946 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer, 947 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 948 0, 0, sysctl_kern_eventtimer_timer, "A", "Chosen event timer"); 949 950 /* 951 * Report or change the active event timer periodicity. 952 */ 953 static int 954 sysctl_kern_eventtimer_periodic(SYSCTL_HANDLER_ARGS) 955 { 956 int error, val; 957 958 val = periodic; 959 error = sysctl_handle_int(oidp, &val, 0, req); 960 if (error != 0 || req->newptr == NULL) 961 return (error); 962 ET_LOCK(); 963 configtimer(0); 964 periodic = want_periodic = val; 965 configtimer(1); 966 ET_UNLOCK(); 967 return (error); 968 } 969 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, periodic, 970 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 971 0, 0, sysctl_kern_eventtimer_periodic, "I", "Enable event timer periodic mode"); 972