1 /*- 2 * Copyright (c) 2010-2012 Alexander Motin <mav@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 /* 31 * Common routines to manage event timers hardware. 32 */ 33 34 #include "opt_device_polling.h" 35 #include "opt_kdtrace.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/bus.h> 40 #include <sys/lock.h> 41 #include <sys/kdb.h> 42 #include <sys/ktr.h> 43 #include <sys/mutex.h> 44 #include <sys/proc.h> 45 #include <sys/kernel.h> 46 #include <sys/sched.h> 47 #include <sys/smp.h> 48 #include <sys/sysctl.h> 49 #include <sys/timeet.h> 50 #include <sys/timetc.h> 51 52 #include <machine/atomic.h> 53 #include <machine/clock.h> 54 #include <machine/cpu.h> 55 #include <machine/smp.h> 56 57 #ifdef KDTRACE_HOOKS 58 #include <sys/dtrace_bsd.h> 59 cyclic_clock_func_t cyclic_clock_func = NULL; 60 #endif 61 62 int cpu_can_deep_sleep = 0; /* C3 state is available. */ 63 int cpu_disable_deep_sleep = 0; /* Timer dies in C3. */ 64 65 static void setuptimer(void); 66 static void loadtimer(struct bintime *now, int first); 67 static int doconfigtimer(void); 68 static void configtimer(int start); 69 static int round_freq(struct eventtimer *et, int freq); 70 71 static void getnextcpuevent(struct bintime *event, int idle); 72 static void getnextevent(struct bintime *event); 73 static int handleevents(struct bintime *now, int fake); 74 #ifdef SMP 75 static void cpu_new_callout(int cpu, int ticks); 76 #endif 77 78 static struct mtx et_hw_mtx; 79 80 #define ET_HW_LOCK(state) \ 81 { \ 82 if (timer->et_flags & ET_FLAGS_PERCPU) \ 83 mtx_lock_spin(&(state)->et_hw_mtx); \ 84 else \ 85 mtx_lock_spin(&et_hw_mtx); \ 86 } 87 88 #define ET_HW_UNLOCK(state) \ 89 { \ 90 if (timer->et_flags & ET_FLAGS_PERCPU) \ 91 mtx_unlock_spin(&(state)->et_hw_mtx); \ 92 else \ 93 mtx_unlock_spin(&et_hw_mtx); \ 94 } 95 96 static struct eventtimer *timer = NULL; 97 static struct bintime timerperiod; /* Timer period for periodic mode. */ 98 static struct bintime hardperiod; /* hardclock() events period. */ 99 static struct bintime statperiod; /* statclock() events period. */ 100 static struct bintime profperiod; /* profclock() events period. */ 101 static struct bintime nexttick; /* Next global timer tick time. */ 102 static struct bintime nexthard; /* Next global hardlock() event. */ 103 static u_int busy = 0; /* Reconfiguration is in progress. */ 104 static int profiling = 0; /* Profiling events enabled. */ 105 106 static char timername[32]; /* Wanted timer. */ 107 TUNABLE_STR("kern.eventtimer.timer", timername, sizeof(timername)); 108 109 static int singlemul = 0; /* Multiplier for periodic mode. */ 110 TUNABLE_INT("kern.eventtimer.singlemul", &singlemul); 111 SYSCTL_INT(_kern_eventtimer, OID_AUTO, singlemul, CTLFLAG_RW, &singlemul, 112 0, "Multiplier for periodic mode"); 113 114 static u_int idletick = 0; /* Run periodic events when idle. */ 115 TUNABLE_INT("kern.eventtimer.idletick", &idletick); 116 SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RW, &idletick, 117 0, "Run periodic events when idle"); 118 119 static u_int activetick = 1; /* Run all periodic events when active. */ 120 TUNABLE_INT("kern.eventtimer.activetick", &activetick); 121 SYSCTL_UINT(_kern_eventtimer, OID_AUTO, activetick, CTLFLAG_RW, &activetick, 122 0, "Run all periodic events when active"); 123 124 static int periodic = 0; /* Periodic or one-shot mode. */ 125 static int want_periodic = 0; /* What mode to prefer. */ 126 TUNABLE_INT("kern.eventtimer.periodic", &want_periodic); 127 128 struct pcpu_state { 129 struct mtx et_hw_mtx; /* Per-CPU timer mutex. */ 130 u_int action; /* Reconfiguration requests. */ 131 u_int handle; /* Immediate handle resuests. */ 132 struct bintime now; /* Last tick time. */ 133 struct bintime nextevent; /* Next scheduled event on this CPU. */ 134 struct bintime nexttick; /* Next timer tick time. */ 135 struct bintime nexthard; /* Next hardlock() event. */ 136 struct bintime nextstat; /* Next statclock() event. */ 137 struct bintime nextprof; /* Next profclock() event. */ 138 #ifdef KDTRACE_HOOKS 139 struct bintime nextcyc; /* Next OpenSolaris cyclics event. */ 140 #endif 141 int ipi; /* This CPU needs IPI. */ 142 int idle; /* This CPU is in idle mode. */ 143 }; 144 145 static DPCPU_DEFINE(struct pcpu_state, timerstate); 146 147 #define FREQ2BT(freq, bt) \ 148 { \ 149 (bt)->sec = 0; \ 150 (bt)->frac = ((uint64_t)0x8000000000000000 / (freq)) << 1; \ 151 } 152 #define BT2FREQ(bt) \ 153 (((uint64_t)0x8000000000000000 + ((bt)->frac >> 2)) / \ 154 ((bt)->frac >> 1)) 155 156 /* 157 * Timer broadcast IPI handler. 158 */ 159 int 160 hardclockintr(void) 161 { 162 struct bintime now; 163 struct pcpu_state *state; 164 int done; 165 166 if (doconfigtimer() || busy) 167 return (FILTER_HANDLED); 168 state = DPCPU_PTR(timerstate); 169 now = state->now; 170 CTR4(KTR_SPARE2, "ipi at %d: now %d.%08x%08x", 171 curcpu, now.sec, (u_int)(now.frac >> 32), 172 (u_int)(now.frac & 0xffffffff)); 173 done = handleevents(&now, 0); 174 return (done ? FILTER_HANDLED : FILTER_STRAY); 175 } 176 177 /* 178 * Handle all events for specified time on this CPU 179 */ 180 static int 181 handleevents(struct bintime *now, int fake) 182 { 183 struct bintime t; 184 struct trapframe *frame; 185 struct pcpu_state *state; 186 uintfptr_t pc; 187 int usermode; 188 int done, runs; 189 190 CTR4(KTR_SPARE2, "handle at %d: now %d.%08x%08x", 191 curcpu, now->sec, (u_int)(now->frac >> 32), 192 (u_int)(now->frac & 0xffffffff)); 193 done = 0; 194 if (fake) { 195 frame = NULL; 196 usermode = 0; 197 pc = 0; 198 } else { 199 frame = curthread->td_intr_frame; 200 usermode = TRAPF_USERMODE(frame); 201 pc = TRAPF_PC(frame); 202 } 203 204 state = DPCPU_PTR(timerstate); 205 206 runs = 0; 207 while (bintime_cmp(now, &state->nexthard, >=)) { 208 bintime_addx(&state->nexthard, hardperiod.frac); 209 runs++; 210 } 211 if (runs) { 212 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 && 213 bintime_cmp(&state->nexthard, &nexthard, >)) 214 nexthard = state->nexthard; 215 if (fake < 2) { 216 hardclock_cnt(runs, usermode); 217 done = 1; 218 } 219 } 220 runs = 0; 221 while (bintime_cmp(now, &state->nextstat, >=)) { 222 bintime_addx(&state->nextstat, statperiod.frac); 223 runs++; 224 } 225 if (runs && fake < 2) { 226 statclock_cnt(runs, usermode); 227 done = 1; 228 } 229 if (profiling) { 230 runs = 0; 231 while (bintime_cmp(now, &state->nextprof, >=)) { 232 bintime_addx(&state->nextprof, profperiod.frac); 233 runs++; 234 } 235 if (runs && !fake) { 236 profclock_cnt(runs, usermode, pc); 237 done = 1; 238 } 239 } else 240 state->nextprof = state->nextstat; 241 242 #ifdef KDTRACE_HOOKS 243 if (fake == 0 && cyclic_clock_func != NULL && 244 state->nextcyc.sec != -1 && 245 bintime_cmp(now, &state->nextcyc, >=)) { 246 state->nextcyc.sec = -1; 247 (*cyclic_clock_func)(frame); 248 } 249 #endif 250 251 getnextcpuevent(&t, 0); 252 if (fake == 2) { 253 state->nextevent = t; 254 return (done); 255 } 256 ET_HW_LOCK(state); 257 if (!busy) { 258 state->idle = 0; 259 state->nextevent = t; 260 loadtimer(now, 0); 261 } 262 ET_HW_UNLOCK(state); 263 return (done); 264 } 265 266 /* 267 * Schedule binuptime of the next event on current CPU. 268 */ 269 static void 270 getnextcpuevent(struct bintime *event, int idle) 271 { 272 struct bintime tmp; 273 struct pcpu_state *state; 274 int skip; 275 276 state = DPCPU_PTR(timerstate); 277 /* Handle hardclock() events. */ 278 *event = state->nexthard; 279 if (idle || (!activetick && !profiling && 280 (timer->et_flags & ET_FLAGS_PERCPU) == 0)) { 281 skip = idle ? 4 : (stathz / 2); 282 if (curcpu == CPU_FIRST() && tc_min_ticktock_freq > skip) 283 skip = tc_min_ticktock_freq; 284 skip = callout_tickstofirst(hz / skip) - 1; 285 CTR2(KTR_SPARE2, "skip at %d: %d", curcpu, skip); 286 tmp = hardperiod; 287 bintime_mul(&tmp, skip); 288 bintime_add(event, &tmp); 289 } 290 if (!idle) { /* If CPU is active - handle other types of events. */ 291 if (bintime_cmp(event, &state->nextstat, >)) 292 *event = state->nextstat; 293 if (profiling && bintime_cmp(event, &state->nextprof, >)) 294 *event = state->nextprof; 295 } 296 #ifdef KDTRACE_HOOKS 297 if (state->nextcyc.sec != -1 && bintime_cmp(event, &state->nextcyc, >)) 298 *event = state->nextcyc; 299 #endif 300 } 301 302 /* 303 * Schedule binuptime of the next event on all CPUs. 304 */ 305 static void 306 getnextevent(struct bintime *event) 307 { 308 struct pcpu_state *state; 309 #ifdef SMP 310 int cpu; 311 #endif 312 int c, nonidle; 313 314 state = DPCPU_PTR(timerstate); 315 *event = state->nextevent; 316 c = curcpu; 317 nonidle = !state->idle; 318 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) { 319 #ifdef SMP 320 CPU_FOREACH(cpu) { 321 if (curcpu == cpu) 322 continue; 323 state = DPCPU_ID_PTR(cpu, timerstate); 324 nonidle += !state->idle; 325 if (bintime_cmp(event, &state->nextevent, >)) { 326 *event = state->nextevent; 327 c = cpu; 328 } 329 } 330 #endif 331 if (nonidle != 0 && bintime_cmp(event, &nexthard, >)) 332 *event = nexthard; 333 } 334 CTR5(KTR_SPARE2, "next at %d: next %d.%08x%08x by %d", 335 curcpu, event->sec, (u_int)(event->frac >> 32), 336 (u_int)(event->frac & 0xffffffff), c); 337 } 338 339 /* Hardware timer callback function. */ 340 static void 341 timercb(struct eventtimer *et, void *arg) 342 { 343 struct bintime now; 344 struct bintime *next; 345 struct pcpu_state *state; 346 #ifdef SMP 347 int cpu, bcast; 348 #endif 349 350 /* Do not touch anything if somebody reconfiguring timers. */ 351 if (busy) 352 return; 353 /* Update present and next tick times. */ 354 state = DPCPU_PTR(timerstate); 355 if (et->et_flags & ET_FLAGS_PERCPU) { 356 next = &state->nexttick; 357 } else 358 next = &nexttick; 359 binuptime(&now); 360 if (periodic) { 361 *next = now; 362 bintime_addx(next, timerperiod.frac); /* Next tick in 1 period. */ 363 } else 364 next->sec = -1; /* Next tick is not scheduled yet. */ 365 state->now = now; 366 CTR4(KTR_SPARE2, "intr at %d: now %d.%08x%08x", 367 curcpu, (int)(now.sec), (u_int)(now.frac >> 32), 368 (u_int)(now.frac & 0xffffffff)); 369 370 #ifdef SMP 371 /* Prepare broadcasting to other CPUs for non-per-CPU timers. */ 372 bcast = 0; 373 if ((et->et_flags & ET_FLAGS_PERCPU) == 0 && smp_started) { 374 CPU_FOREACH(cpu) { 375 state = DPCPU_ID_PTR(cpu, timerstate); 376 ET_HW_LOCK(state); 377 state->now = now; 378 if (bintime_cmp(&now, &state->nextevent, >=)) { 379 state->nextevent.sec++; 380 if (curcpu != cpu) { 381 state->ipi = 1; 382 bcast = 1; 383 } 384 } 385 ET_HW_UNLOCK(state); 386 } 387 } 388 #endif 389 390 /* Handle events for this time on this CPU. */ 391 handleevents(&now, 0); 392 393 #ifdef SMP 394 /* Broadcast interrupt to other CPUs for non-per-CPU timers. */ 395 if (bcast) { 396 CPU_FOREACH(cpu) { 397 if (curcpu == cpu) 398 continue; 399 state = DPCPU_ID_PTR(cpu, timerstate); 400 if (state->ipi) { 401 state->ipi = 0; 402 ipi_cpu(cpu, IPI_HARDCLOCK); 403 } 404 } 405 } 406 #endif 407 } 408 409 /* 410 * Load new value into hardware timer. 411 */ 412 static void 413 loadtimer(struct bintime *now, int start) 414 { 415 struct pcpu_state *state; 416 struct bintime new; 417 struct bintime *next; 418 uint64_t tmp; 419 int eq; 420 421 if (timer->et_flags & ET_FLAGS_PERCPU) { 422 state = DPCPU_PTR(timerstate); 423 next = &state->nexttick; 424 } else 425 next = &nexttick; 426 if (periodic) { 427 if (start) { 428 /* 429 * Try to start all periodic timers aligned 430 * to period to make events synchronous. 431 */ 432 tmp = ((uint64_t)now->sec << 36) + (now->frac >> 28); 433 tmp = (tmp % (timerperiod.frac >> 28)) << 28; 434 new.sec = 0; 435 new.frac = timerperiod.frac - tmp; 436 if (new.frac < tmp) /* Left less then passed. */ 437 bintime_addx(&new, timerperiod.frac); 438 CTR5(KTR_SPARE2, "load p at %d: now %d.%08x first in %d.%08x", 439 curcpu, now->sec, (u_int)(now->frac >> 32), 440 new.sec, (u_int)(new.frac >> 32)); 441 *next = new; 442 bintime_add(next, now); 443 et_start(timer, &new, &timerperiod); 444 } 445 } else { 446 getnextevent(&new); 447 eq = bintime_cmp(&new, next, ==); 448 CTR5(KTR_SPARE2, "load at %d: next %d.%08x%08x eq %d", 449 curcpu, new.sec, (u_int)(new.frac >> 32), 450 (u_int)(new.frac & 0xffffffff), 451 eq); 452 if (!eq) { 453 *next = new; 454 bintime_sub(&new, now); 455 et_start(timer, &new, NULL); 456 } 457 } 458 } 459 460 /* 461 * Prepare event timer parameters after configuration changes. 462 */ 463 static void 464 setuptimer(void) 465 { 466 int freq; 467 468 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0) 469 periodic = 0; 470 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) 471 periodic = 1; 472 singlemul = MIN(MAX(singlemul, 1), 20); 473 freq = hz * singlemul; 474 while (freq < (profiling ? profhz : stathz)) 475 freq += hz; 476 freq = round_freq(timer, freq); 477 FREQ2BT(freq, &timerperiod); 478 } 479 480 /* 481 * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler. 482 */ 483 static int 484 doconfigtimer(void) 485 { 486 struct bintime now; 487 struct pcpu_state *state; 488 489 state = DPCPU_PTR(timerstate); 490 switch (atomic_load_acq_int(&state->action)) { 491 case 1: 492 binuptime(&now); 493 ET_HW_LOCK(state); 494 loadtimer(&now, 1); 495 ET_HW_UNLOCK(state); 496 state->handle = 0; 497 atomic_store_rel_int(&state->action, 0); 498 return (1); 499 case 2: 500 ET_HW_LOCK(state); 501 et_stop(timer); 502 ET_HW_UNLOCK(state); 503 state->handle = 0; 504 atomic_store_rel_int(&state->action, 0); 505 return (1); 506 } 507 if (atomic_readandclear_int(&state->handle) && !busy) { 508 binuptime(&now); 509 handleevents(&now, 0); 510 return (1); 511 } 512 return (0); 513 } 514 515 /* 516 * Reconfigure specified timer. 517 * For per-CPU timers use IPI to make other CPUs to reconfigure. 518 */ 519 static void 520 configtimer(int start) 521 { 522 struct bintime now, next; 523 struct pcpu_state *state; 524 int cpu; 525 526 if (start) { 527 setuptimer(); 528 binuptime(&now); 529 } 530 critical_enter(); 531 ET_HW_LOCK(DPCPU_PTR(timerstate)); 532 if (start) { 533 /* Initialize time machine parameters. */ 534 next = now; 535 bintime_addx(&next, timerperiod.frac); 536 if (periodic) 537 nexttick = next; 538 else 539 nexttick.sec = -1; 540 CPU_FOREACH(cpu) { 541 state = DPCPU_ID_PTR(cpu, timerstate); 542 state->now = now; 543 state->nextevent = next; 544 if (periodic) 545 state->nexttick = next; 546 else 547 state->nexttick.sec = -1; 548 state->nexthard = next; 549 state->nextstat = next; 550 state->nextprof = next; 551 hardclock_sync(cpu); 552 } 553 busy = 0; 554 /* Start global timer or per-CPU timer of this CPU. */ 555 loadtimer(&now, 1); 556 } else { 557 busy = 1; 558 /* Stop global timer or per-CPU timer of this CPU. */ 559 et_stop(timer); 560 } 561 ET_HW_UNLOCK(DPCPU_PTR(timerstate)); 562 #ifdef SMP 563 /* If timer is global or there is no other CPUs yet - we are done. */ 564 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) { 565 critical_exit(); 566 return; 567 } 568 /* Set reconfigure flags for other CPUs. */ 569 CPU_FOREACH(cpu) { 570 state = DPCPU_ID_PTR(cpu, timerstate); 571 atomic_store_rel_int(&state->action, 572 (cpu == curcpu) ? 0 : ( start ? 1 : 2)); 573 } 574 /* Broadcast reconfigure IPI. */ 575 ipi_all_but_self(IPI_HARDCLOCK); 576 /* Wait for reconfiguration completed. */ 577 restart: 578 cpu_spinwait(); 579 CPU_FOREACH(cpu) { 580 if (cpu == curcpu) 581 continue; 582 state = DPCPU_ID_PTR(cpu, timerstate); 583 if (atomic_load_acq_int(&state->action)) 584 goto restart; 585 } 586 #endif 587 critical_exit(); 588 } 589 590 /* 591 * Calculate nearest frequency supported by hardware timer. 592 */ 593 static int 594 round_freq(struct eventtimer *et, int freq) 595 { 596 uint64_t div; 597 598 if (et->et_frequency != 0) { 599 div = lmax((et->et_frequency + freq / 2) / freq, 1); 600 if (et->et_flags & ET_FLAGS_POW2DIV) 601 div = 1 << (flsl(div + div / 2) - 1); 602 freq = (et->et_frequency + div / 2) / div; 603 } 604 if (et->et_min_period.sec > 0) 605 freq = 0; 606 else if (et->et_min_period.frac != 0) 607 freq = min(freq, BT2FREQ(&et->et_min_period)); 608 if (et->et_max_period.sec == 0 && et->et_max_period.frac != 0) 609 freq = max(freq, BT2FREQ(&et->et_max_period)); 610 return (freq); 611 } 612 613 /* 614 * Configure and start event timers (BSP part). 615 */ 616 void 617 cpu_initclocks_bsp(void) 618 { 619 struct pcpu_state *state; 620 int base, div, cpu; 621 622 mtx_init(&et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); 623 CPU_FOREACH(cpu) { 624 state = DPCPU_ID_PTR(cpu, timerstate); 625 mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); 626 #ifdef KDTRACE_HOOKS 627 state->nextcyc.sec = -1; 628 #endif 629 } 630 #ifdef SMP 631 callout_new_inserted = cpu_new_callout; 632 #endif 633 periodic = want_periodic; 634 /* Grab requested timer or the best of present. */ 635 if (timername[0]) 636 timer = et_find(timername, 0, 0); 637 if (timer == NULL && periodic) { 638 timer = et_find(NULL, 639 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC); 640 } 641 if (timer == NULL) { 642 timer = et_find(NULL, 643 ET_FLAGS_ONESHOT, ET_FLAGS_ONESHOT); 644 } 645 if (timer == NULL && !periodic) { 646 timer = et_find(NULL, 647 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC); 648 } 649 if (timer == NULL) 650 panic("No usable event timer found!"); 651 et_init(timer, timercb, NULL, NULL); 652 653 /* Adapt to timer capabilities. */ 654 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0) 655 periodic = 0; 656 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) 657 periodic = 1; 658 if (timer->et_flags & ET_FLAGS_C3STOP) 659 cpu_disable_deep_sleep++; 660 661 /* 662 * We honor the requested 'hz' value. 663 * We want to run stathz in the neighborhood of 128hz. 664 * We would like profhz to run as often as possible. 665 */ 666 if (singlemul <= 0 || singlemul > 20) { 667 if (hz >= 1500 || (hz % 128) == 0) 668 singlemul = 1; 669 else if (hz >= 750) 670 singlemul = 2; 671 else 672 singlemul = 4; 673 } 674 if (periodic) { 675 base = round_freq(timer, hz * singlemul); 676 singlemul = max((base + hz / 2) / hz, 1); 677 hz = (base + singlemul / 2) / singlemul; 678 if (base <= 128) 679 stathz = base; 680 else { 681 div = base / 128; 682 if (div >= singlemul && (div % singlemul) == 0) 683 div++; 684 stathz = base / div; 685 } 686 profhz = stathz; 687 while ((profhz + stathz) <= 128 * 64) 688 profhz += stathz; 689 profhz = round_freq(timer, profhz); 690 } else { 691 hz = round_freq(timer, hz); 692 stathz = round_freq(timer, 127); 693 profhz = round_freq(timer, stathz * 64); 694 } 695 tick = 1000000 / hz; 696 FREQ2BT(hz, &hardperiod); 697 FREQ2BT(stathz, &statperiod); 698 FREQ2BT(profhz, &profperiod); 699 ET_LOCK(); 700 configtimer(1); 701 ET_UNLOCK(); 702 } 703 704 /* 705 * Start per-CPU event timers on APs. 706 */ 707 void 708 cpu_initclocks_ap(void) 709 { 710 struct bintime now; 711 struct pcpu_state *state; 712 713 state = DPCPU_PTR(timerstate); 714 binuptime(&now); 715 ET_HW_LOCK(state); 716 state->now = now; 717 hardclock_sync(curcpu); 718 handleevents(&state->now, 2); 719 if (timer->et_flags & ET_FLAGS_PERCPU) 720 loadtimer(&now, 1); 721 ET_HW_UNLOCK(state); 722 } 723 724 /* 725 * Switch to profiling clock rates. 726 */ 727 void 728 cpu_startprofclock(void) 729 { 730 731 ET_LOCK(); 732 if (periodic) { 733 configtimer(0); 734 profiling = 1; 735 configtimer(1); 736 } else 737 profiling = 1; 738 ET_UNLOCK(); 739 } 740 741 /* 742 * Switch to regular clock rates. 743 */ 744 void 745 cpu_stopprofclock(void) 746 { 747 748 ET_LOCK(); 749 if (periodic) { 750 configtimer(0); 751 profiling = 0; 752 configtimer(1); 753 } else 754 profiling = 0; 755 ET_UNLOCK(); 756 } 757 758 /* 759 * Switch to idle mode (all ticks handled). 760 */ 761 void 762 cpu_idleclock(void) 763 { 764 struct bintime now, t; 765 struct pcpu_state *state; 766 767 if (idletick || busy || 768 (periodic && (timer->et_flags & ET_FLAGS_PERCPU)) 769 #ifdef DEVICE_POLLING 770 || curcpu == CPU_FIRST() 771 #endif 772 ) 773 return; 774 state = DPCPU_PTR(timerstate); 775 if (periodic) 776 now = state->now; 777 else 778 binuptime(&now); 779 CTR4(KTR_SPARE2, "idle at %d: now %d.%08x%08x", 780 curcpu, now.sec, (u_int)(now.frac >> 32), 781 (u_int)(now.frac & 0xffffffff)); 782 getnextcpuevent(&t, 1); 783 ET_HW_LOCK(state); 784 state->idle = 1; 785 state->nextevent = t; 786 if (!periodic) 787 loadtimer(&now, 0); 788 ET_HW_UNLOCK(state); 789 } 790 791 /* 792 * Switch to active mode (skip empty ticks). 793 */ 794 void 795 cpu_activeclock(void) 796 { 797 struct bintime now; 798 struct pcpu_state *state; 799 struct thread *td; 800 801 state = DPCPU_PTR(timerstate); 802 if (state->idle == 0 || busy) 803 return; 804 if (periodic) 805 now = state->now; 806 else 807 binuptime(&now); 808 CTR4(KTR_SPARE2, "active at %d: now %d.%08x%08x", 809 curcpu, now.sec, (u_int)(now.frac >> 32), 810 (u_int)(now.frac & 0xffffffff)); 811 spinlock_enter(); 812 td = curthread; 813 td->td_intr_nesting_level++; 814 handleevents(&now, 1); 815 td->td_intr_nesting_level--; 816 spinlock_exit(); 817 } 818 819 #ifdef KDTRACE_HOOKS 820 void 821 clocksource_cyc_set(const struct bintime *t) 822 { 823 struct bintime now; 824 struct pcpu_state *state; 825 826 state = DPCPU_PTR(timerstate); 827 if (periodic) 828 now = state->now; 829 else 830 binuptime(&now); 831 832 CTR4(KTR_SPARE2, "set_cyc at %d: now %d.%08x%08x", 833 curcpu, now.sec, (u_int)(now.frac >> 32), 834 (u_int)(now.frac & 0xffffffff)); 835 CTR4(KTR_SPARE2, "set_cyc at %d: t %d.%08x%08x", 836 curcpu, t->sec, (u_int)(t->frac >> 32), 837 (u_int)(t->frac & 0xffffffff)); 838 839 ET_HW_LOCK(state); 840 if (bintime_cmp(t, &state->nextcyc, ==)) { 841 ET_HW_UNLOCK(state); 842 return; 843 } 844 state->nextcyc = *t; 845 if (bintime_cmp(&state->nextcyc, &state->nextevent, >=)) { 846 ET_HW_UNLOCK(state); 847 return; 848 } 849 state->nextevent = state->nextcyc; 850 if (!periodic) 851 loadtimer(&now, 0); 852 ET_HW_UNLOCK(state); 853 } 854 #endif 855 856 #ifdef SMP 857 static void 858 cpu_new_callout(int cpu, int ticks) 859 { 860 struct bintime tmp; 861 struct pcpu_state *state; 862 863 CTR3(KTR_SPARE2, "new co at %d: on %d in %d", 864 curcpu, cpu, ticks); 865 state = DPCPU_ID_PTR(cpu, timerstate); 866 ET_HW_LOCK(state); 867 if (state->idle == 0 || busy) { 868 ET_HW_UNLOCK(state); 869 return; 870 } 871 /* 872 * If timer is periodic - just update next event time for target CPU. 873 * If timer is global - there is chance it is already programmed. 874 */ 875 if (periodic || (timer->et_flags & ET_FLAGS_PERCPU) == 0) { 876 tmp = hardperiod; 877 bintime_mul(&tmp, ticks - 1); 878 bintime_add(&tmp, &state->nexthard); 879 if (bintime_cmp(&tmp, &state->nextevent, <)) 880 state->nextevent = tmp; 881 if (periodic || 882 bintime_cmp(&state->nextevent, &nexttick, >=)) { 883 ET_HW_UNLOCK(state); 884 return; 885 } 886 } 887 /* 888 * Otherwise we have to wake that CPU up, as we can't get present 889 * bintime to reprogram global timer from here. If timer is per-CPU, 890 * we by definition can't do it from here. 891 */ 892 ET_HW_UNLOCK(state); 893 if (timer->et_flags & ET_FLAGS_PERCPU) { 894 state->handle = 1; 895 ipi_cpu(cpu, IPI_HARDCLOCK); 896 } else { 897 if (!cpu_idle_wakeup(cpu)) 898 ipi_cpu(cpu, IPI_AST); 899 } 900 } 901 #endif 902 903 /* 904 * Report or change the active event timers hardware. 905 */ 906 static int 907 sysctl_kern_eventtimer_timer(SYSCTL_HANDLER_ARGS) 908 { 909 char buf[32]; 910 struct eventtimer *et; 911 int error; 912 913 ET_LOCK(); 914 et = timer; 915 snprintf(buf, sizeof(buf), "%s", et->et_name); 916 ET_UNLOCK(); 917 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 918 ET_LOCK(); 919 et = timer; 920 if (error != 0 || req->newptr == NULL || 921 strcasecmp(buf, et->et_name) == 0) { 922 ET_UNLOCK(); 923 return (error); 924 } 925 et = et_find(buf, 0, 0); 926 if (et == NULL) { 927 ET_UNLOCK(); 928 return (ENOENT); 929 } 930 configtimer(0); 931 et_free(timer); 932 if (et->et_flags & ET_FLAGS_C3STOP) 933 cpu_disable_deep_sleep++; 934 if (timer->et_flags & ET_FLAGS_C3STOP) 935 cpu_disable_deep_sleep--; 936 periodic = want_periodic; 937 timer = et; 938 et_init(timer, timercb, NULL, NULL); 939 configtimer(1); 940 ET_UNLOCK(); 941 return (error); 942 } 943 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer, 944 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 945 0, 0, sysctl_kern_eventtimer_timer, "A", "Chosen event timer"); 946 947 /* 948 * Report or change the active event timer periodicity. 949 */ 950 static int 951 sysctl_kern_eventtimer_periodic(SYSCTL_HANDLER_ARGS) 952 { 953 int error, val; 954 955 val = periodic; 956 error = sysctl_handle_int(oidp, &val, 0, req); 957 if (error != 0 || req->newptr == NULL) 958 return (error); 959 ET_LOCK(); 960 configtimer(0); 961 periodic = want_periodic = val; 962 configtimer(1); 963 ET_UNLOCK(); 964 return (error); 965 } 966 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, periodic, 967 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 968 0, 0, sysctl_kern_eventtimer_periodic, "I", "Enable event timer periodic mode"); 969