1 /*- 2 * Copyright (c) 2010-2013 Alexander Motin <mav@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 /* 31 * Common routines to manage event timers hardware. 32 */ 33 34 #include "opt_device_polling.h" 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/bus.h> 39 #include <sys/limits.h> 40 #include <sys/lock.h> 41 #include <sys/kdb.h> 42 #include <sys/ktr.h> 43 #include <sys/mutex.h> 44 #include <sys/proc.h> 45 #include <sys/kernel.h> 46 #include <sys/sched.h> 47 #include <sys/smp.h> 48 #include <sys/sysctl.h> 49 #include <sys/timeet.h> 50 #include <sys/timetc.h> 51 52 #include <machine/atomic.h> 53 #include <machine/clock.h> 54 #include <machine/cpu.h> 55 #include <machine/smp.h> 56 57 #ifdef KDTRACE_HOOKS 58 #include <sys/dtrace_bsd.h> 59 cyclic_clock_func_t cyclic_clock_func = NULL; 60 #endif 61 62 int cpu_can_deep_sleep = 0; /* C3 state is available. */ 63 int cpu_disable_deep_sleep = 0; /* Timer dies in C3. */ 64 65 static void setuptimer(void); 66 static void loadtimer(sbintime_t now, int first); 67 static int doconfigtimer(void); 68 static void configtimer(int start); 69 static int round_freq(struct eventtimer *et, int freq); 70 71 static sbintime_t getnextcpuevent(int idle); 72 static sbintime_t getnextevent(void); 73 static int handleevents(sbintime_t now, int fake); 74 75 static struct mtx et_hw_mtx; 76 77 #define ET_HW_LOCK(state) \ 78 { \ 79 if (timer->et_flags & ET_FLAGS_PERCPU) \ 80 mtx_lock_spin(&(state)->et_hw_mtx); \ 81 else \ 82 mtx_lock_spin(&et_hw_mtx); \ 83 } 84 85 #define ET_HW_UNLOCK(state) \ 86 { \ 87 if (timer->et_flags & ET_FLAGS_PERCPU) \ 88 mtx_unlock_spin(&(state)->et_hw_mtx); \ 89 else \ 90 mtx_unlock_spin(&et_hw_mtx); \ 91 } 92 93 static struct eventtimer *timer = NULL; 94 static sbintime_t timerperiod; /* Timer period for periodic mode. */ 95 static sbintime_t statperiod; /* statclock() events period. */ 96 static sbintime_t profperiod; /* profclock() events period. */ 97 static sbintime_t nexttick; /* Next global timer tick time. */ 98 static u_int busy = 1; /* Reconfiguration is in progress. */ 99 static int profiling = 0; /* Profiling events enabled. */ 100 101 static char timername[32]; /* Wanted timer. */ 102 TUNABLE_STR("kern.eventtimer.timer", timername, sizeof(timername)); 103 104 static int singlemul = 0; /* Multiplier for periodic mode. */ 105 TUNABLE_INT("kern.eventtimer.singlemul", &singlemul); 106 SYSCTL_INT(_kern_eventtimer, OID_AUTO, singlemul, CTLFLAG_RW, &singlemul, 107 0, "Multiplier for periodic mode"); 108 109 static u_int idletick = 0; /* Run periodic events when idle. */ 110 TUNABLE_INT("kern.eventtimer.idletick", &idletick); 111 SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RW, &idletick, 112 0, "Run periodic events when idle"); 113 114 static int periodic = 0; /* Periodic or one-shot mode. */ 115 static int want_periodic = 0; /* What mode to prefer. */ 116 TUNABLE_INT("kern.eventtimer.periodic", &want_periodic); 117 118 struct pcpu_state { 119 struct mtx et_hw_mtx; /* Per-CPU timer mutex. */ 120 u_int action; /* Reconfiguration requests. */ 121 u_int handle; /* Immediate handle resuests. */ 122 sbintime_t now; /* Last tick time. */ 123 sbintime_t nextevent; /* Next scheduled event on this CPU. */ 124 sbintime_t nexttick; /* Next timer tick time. */ 125 sbintime_t nexthard; /* Next hardlock() event. */ 126 sbintime_t nextstat; /* Next statclock() event. */ 127 sbintime_t nextprof; /* Next profclock() event. */ 128 sbintime_t nextcall; /* Next callout event. */ 129 sbintime_t nextcallopt; /* Next optional callout event. */ 130 #ifdef KDTRACE_HOOKS 131 sbintime_t nextcyc; /* Next OpenSolaris cyclics event. */ 132 #endif 133 int ipi; /* This CPU needs IPI. */ 134 int idle; /* This CPU is in idle mode. */ 135 }; 136 137 static DPCPU_DEFINE(struct pcpu_state, timerstate); 138 DPCPU_DEFINE(sbintime_t, hardclocktime); 139 140 /* 141 * Timer broadcast IPI handler. 142 */ 143 int 144 hardclockintr(void) 145 { 146 sbintime_t now; 147 struct pcpu_state *state; 148 int done; 149 150 if (doconfigtimer() || busy) 151 return (FILTER_HANDLED); 152 state = DPCPU_PTR(timerstate); 153 now = state->now; 154 CTR3(KTR_SPARE2, "ipi at %d: now %d.%08x", 155 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); 156 done = handleevents(now, 0); 157 return (done ? FILTER_HANDLED : FILTER_STRAY); 158 } 159 160 /* 161 * Handle all events for specified time on this CPU 162 */ 163 static int 164 handleevents(sbintime_t now, int fake) 165 { 166 sbintime_t t, *hct; 167 struct trapframe *frame; 168 struct pcpu_state *state; 169 int usermode; 170 int done, runs; 171 172 CTR3(KTR_SPARE2, "handle at %d: now %d.%08x", 173 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); 174 done = 0; 175 if (fake) { 176 frame = NULL; 177 usermode = 0; 178 } else { 179 frame = curthread->td_intr_frame; 180 usermode = TRAPF_USERMODE(frame); 181 } 182 183 state = DPCPU_PTR(timerstate); 184 185 runs = 0; 186 while (now >= state->nexthard) { 187 state->nexthard += tick_sbt; 188 runs++; 189 } 190 if (runs) { 191 hct = DPCPU_PTR(hardclocktime); 192 *hct = state->nexthard - tick_sbt; 193 if (fake < 2) { 194 hardclock_cnt(runs, usermode); 195 done = 1; 196 } 197 } 198 runs = 0; 199 while (now >= state->nextstat) { 200 state->nextstat += statperiod; 201 runs++; 202 } 203 if (runs && fake < 2) { 204 statclock_cnt(runs, usermode); 205 done = 1; 206 } 207 if (profiling) { 208 runs = 0; 209 while (now >= state->nextprof) { 210 state->nextprof += profperiod; 211 runs++; 212 } 213 if (runs && !fake) { 214 profclock_cnt(runs, usermode, TRAPF_PC(frame)); 215 done = 1; 216 } 217 } else 218 state->nextprof = state->nextstat; 219 if (now >= state->nextcallopt) { 220 state->nextcall = state->nextcallopt = INT64_MAX; 221 callout_process(now); 222 } 223 224 #ifdef KDTRACE_HOOKS 225 if (fake == 0 && now >= state->nextcyc && cyclic_clock_func != NULL) { 226 state->nextcyc = INT64_MAX; 227 (*cyclic_clock_func)(frame); 228 } 229 #endif 230 231 t = getnextcpuevent(0); 232 ET_HW_LOCK(state); 233 if (!busy) { 234 state->idle = 0; 235 state->nextevent = t; 236 loadtimer(now, (fake == 2) && 237 (timer->et_flags & ET_FLAGS_PERCPU)); 238 } 239 ET_HW_UNLOCK(state); 240 return (done); 241 } 242 243 /* 244 * Schedule binuptime of the next event on current CPU. 245 */ 246 static sbintime_t 247 getnextcpuevent(int idle) 248 { 249 sbintime_t event; 250 struct pcpu_state *state; 251 u_int hardfreq; 252 253 state = DPCPU_PTR(timerstate); 254 /* Handle hardclock() events, skipping some if CPU is idle. */ 255 event = state->nexthard; 256 if (idle) { 257 hardfreq = (u_int)hz / 2; 258 if (tc_min_ticktock_freq > 2 259 #ifdef SMP 260 && curcpu == CPU_FIRST() 261 #endif 262 ) 263 hardfreq = hz / tc_min_ticktock_freq; 264 if (hardfreq > 1) 265 event += tick_sbt * (hardfreq - 1); 266 } 267 /* Handle callout events. */ 268 if (event > state->nextcall) 269 event = state->nextcall; 270 if (!idle) { /* If CPU is active - handle other types of events. */ 271 if (event > state->nextstat) 272 event = state->nextstat; 273 if (profiling && event > state->nextprof) 274 event = state->nextprof; 275 } 276 #ifdef KDTRACE_HOOKS 277 if (event > state->nextcyc) 278 event = state->nextcyc; 279 #endif 280 return (event); 281 } 282 283 /* 284 * Schedule binuptime of the next event on all CPUs. 285 */ 286 static sbintime_t 287 getnextevent(void) 288 { 289 struct pcpu_state *state; 290 sbintime_t event; 291 #ifdef SMP 292 int cpu; 293 #endif 294 int c; 295 296 state = DPCPU_PTR(timerstate); 297 event = state->nextevent; 298 c = -1; 299 #ifdef SMP 300 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) { 301 CPU_FOREACH(cpu) { 302 state = DPCPU_ID_PTR(cpu, timerstate); 303 if (event > state->nextevent) { 304 event = state->nextevent; 305 c = cpu; 306 } 307 } 308 } 309 #endif 310 CTR4(KTR_SPARE2, "next at %d: next %d.%08x by %d", 311 curcpu, (int)(event >> 32), (u_int)(event & 0xffffffff), c); 312 return (event); 313 } 314 315 /* Hardware timer callback function. */ 316 static void 317 timercb(struct eventtimer *et, void *arg) 318 { 319 sbintime_t now; 320 sbintime_t *next; 321 struct pcpu_state *state; 322 #ifdef SMP 323 int cpu, bcast; 324 #endif 325 326 /* Do not touch anything if somebody reconfiguring timers. */ 327 if (busy) 328 return; 329 /* Update present and next tick times. */ 330 state = DPCPU_PTR(timerstate); 331 if (et->et_flags & ET_FLAGS_PERCPU) { 332 next = &state->nexttick; 333 } else 334 next = &nexttick; 335 now = sbinuptime(); 336 if (periodic) 337 *next = now + timerperiod; 338 else 339 *next = -1; /* Next tick is not scheduled yet. */ 340 state->now = now; 341 CTR3(KTR_SPARE2, "intr at %d: now %d.%08x", 342 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); 343 344 #ifdef SMP 345 /* Prepare broadcasting to other CPUs for non-per-CPU timers. */ 346 bcast = 0; 347 if ((et->et_flags & ET_FLAGS_PERCPU) == 0 && smp_started) { 348 CPU_FOREACH(cpu) { 349 state = DPCPU_ID_PTR(cpu, timerstate); 350 ET_HW_LOCK(state); 351 state->now = now; 352 if (now >= state->nextevent) { 353 state->nextevent += SBT_1S; 354 if (curcpu != cpu) { 355 state->ipi = 1; 356 bcast = 1; 357 } 358 } 359 ET_HW_UNLOCK(state); 360 } 361 } 362 #endif 363 364 /* Handle events for this time on this CPU. */ 365 handleevents(now, 0); 366 367 #ifdef SMP 368 /* Broadcast interrupt to other CPUs for non-per-CPU timers. */ 369 if (bcast) { 370 CPU_FOREACH(cpu) { 371 if (curcpu == cpu) 372 continue; 373 state = DPCPU_ID_PTR(cpu, timerstate); 374 if (state->ipi) { 375 state->ipi = 0; 376 ipi_cpu(cpu, IPI_HARDCLOCK); 377 } 378 } 379 } 380 #endif 381 } 382 383 /* 384 * Load new value into hardware timer. 385 */ 386 static void 387 loadtimer(sbintime_t now, int start) 388 { 389 struct pcpu_state *state; 390 sbintime_t new; 391 sbintime_t *next; 392 uint64_t tmp; 393 int eq; 394 395 if (timer->et_flags & ET_FLAGS_PERCPU) { 396 state = DPCPU_PTR(timerstate); 397 next = &state->nexttick; 398 } else 399 next = &nexttick; 400 if (periodic) { 401 if (start) { 402 /* 403 * Try to start all periodic timers aligned 404 * to period to make events synchronous. 405 */ 406 tmp = now % timerperiod; 407 new = timerperiod - tmp; 408 if (new < tmp) /* Left less then passed. */ 409 new += timerperiod; 410 CTR5(KTR_SPARE2, "load p at %d: now %d.%08x first in %d.%08x", 411 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff), 412 (int)(new >> 32), (u_int)(new & 0xffffffff)); 413 *next = new + now; 414 et_start(timer, new, timerperiod); 415 } 416 } else { 417 new = getnextevent(); 418 eq = (new == *next); 419 CTR4(KTR_SPARE2, "load at %d: next %d.%08x eq %d", 420 curcpu, (int)(new >> 32), (u_int)(new & 0xffffffff), eq); 421 if (!eq) { 422 *next = new; 423 et_start(timer, new - now, 0); 424 } 425 } 426 } 427 428 /* 429 * Prepare event timer parameters after configuration changes. 430 */ 431 static void 432 setuptimer(void) 433 { 434 int freq; 435 436 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0) 437 periodic = 0; 438 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) 439 periodic = 1; 440 singlemul = MIN(MAX(singlemul, 1), 20); 441 freq = hz * singlemul; 442 while (freq < (profiling ? profhz : stathz)) 443 freq += hz; 444 freq = round_freq(timer, freq); 445 timerperiod = SBT_1S / freq; 446 } 447 448 /* 449 * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler. 450 */ 451 static int 452 doconfigtimer(void) 453 { 454 sbintime_t now; 455 struct pcpu_state *state; 456 457 state = DPCPU_PTR(timerstate); 458 switch (atomic_load_acq_int(&state->action)) { 459 case 1: 460 now = sbinuptime(); 461 ET_HW_LOCK(state); 462 loadtimer(now, 1); 463 ET_HW_UNLOCK(state); 464 state->handle = 0; 465 atomic_store_rel_int(&state->action, 0); 466 return (1); 467 case 2: 468 ET_HW_LOCK(state); 469 et_stop(timer); 470 ET_HW_UNLOCK(state); 471 state->handle = 0; 472 atomic_store_rel_int(&state->action, 0); 473 return (1); 474 } 475 if (atomic_readandclear_int(&state->handle) && !busy) { 476 now = sbinuptime(); 477 handleevents(now, 0); 478 return (1); 479 } 480 return (0); 481 } 482 483 /* 484 * Reconfigure specified timer. 485 * For per-CPU timers use IPI to make other CPUs to reconfigure. 486 */ 487 static void 488 configtimer(int start) 489 { 490 sbintime_t now, next; 491 struct pcpu_state *state; 492 int cpu; 493 494 if (start) { 495 setuptimer(); 496 now = sbinuptime(); 497 } else 498 now = 0; 499 critical_enter(); 500 ET_HW_LOCK(DPCPU_PTR(timerstate)); 501 if (start) { 502 /* Initialize time machine parameters. */ 503 next = now + timerperiod; 504 if (periodic) 505 nexttick = next; 506 else 507 nexttick = -1; 508 CPU_FOREACH(cpu) { 509 state = DPCPU_ID_PTR(cpu, timerstate); 510 state->now = now; 511 if (!smp_started && cpu != CPU_FIRST()) 512 state->nextevent = INT64_MAX; 513 else 514 state->nextevent = next; 515 if (periodic) 516 state->nexttick = next; 517 else 518 state->nexttick = -1; 519 state->nexthard = next; 520 state->nextstat = next; 521 state->nextprof = next; 522 state->nextcall = next; 523 state->nextcallopt = next; 524 hardclock_sync(cpu); 525 } 526 busy = 0; 527 /* Start global timer or per-CPU timer of this CPU. */ 528 loadtimer(now, 1); 529 } else { 530 busy = 1; 531 /* Stop global timer or per-CPU timer of this CPU. */ 532 et_stop(timer); 533 } 534 ET_HW_UNLOCK(DPCPU_PTR(timerstate)); 535 #ifdef SMP 536 /* If timer is global or there is no other CPUs yet - we are done. */ 537 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) { 538 critical_exit(); 539 return; 540 } 541 /* Set reconfigure flags for other CPUs. */ 542 CPU_FOREACH(cpu) { 543 state = DPCPU_ID_PTR(cpu, timerstate); 544 atomic_store_rel_int(&state->action, 545 (cpu == curcpu) ? 0 : ( start ? 1 : 2)); 546 } 547 /* Broadcast reconfigure IPI. */ 548 ipi_all_but_self(IPI_HARDCLOCK); 549 /* Wait for reconfiguration completed. */ 550 restart: 551 cpu_spinwait(); 552 CPU_FOREACH(cpu) { 553 if (cpu == curcpu) 554 continue; 555 state = DPCPU_ID_PTR(cpu, timerstate); 556 if (atomic_load_acq_int(&state->action)) 557 goto restart; 558 } 559 #endif 560 critical_exit(); 561 } 562 563 /* 564 * Calculate nearest frequency supported by hardware timer. 565 */ 566 static int 567 round_freq(struct eventtimer *et, int freq) 568 { 569 uint64_t div; 570 571 if (et->et_frequency != 0) { 572 div = lmax((et->et_frequency + freq / 2) / freq, 1); 573 if (et->et_flags & ET_FLAGS_POW2DIV) 574 div = 1 << (flsl(div + div / 2) - 1); 575 freq = (et->et_frequency + div / 2) / div; 576 } 577 if (et->et_min_period > SBT_1S) 578 panic("Event timer \"%s\" doesn't support sub-second periods!", 579 et->et_name); 580 else if (et->et_min_period != 0) 581 freq = min(freq, SBT2FREQ(et->et_min_period)); 582 if (et->et_max_period < SBT_1S && et->et_max_period != 0) 583 freq = max(freq, SBT2FREQ(et->et_max_period)); 584 return (freq); 585 } 586 587 /* 588 * Configure and start event timers (BSP part). 589 */ 590 void 591 cpu_initclocks_bsp(void) 592 { 593 struct pcpu_state *state; 594 int base, div, cpu; 595 596 mtx_init(&et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); 597 CPU_FOREACH(cpu) { 598 state = DPCPU_ID_PTR(cpu, timerstate); 599 mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); 600 #ifdef KDTRACE_HOOKS 601 state->nextcyc = INT64_MAX; 602 #endif 603 state->nextcall = INT64_MAX; 604 state->nextcallopt = INT64_MAX; 605 } 606 periodic = want_periodic; 607 /* Grab requested timer or the best of present. */ 608 if (timername[0]) 609 timer = et_find(timername, 0, 0); 610 if (timer == NULL && periodic) { 611 timer = et_find(NULL, 612 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC); 613 } 614 if (timer == NULL) { 615 timer = et_find(NULL, 616 ET_FLAGS_ONESHOT, ET_FLAGS_ONESHOT); 617 } 618 if (timer == NULL && !periodic) { 619 timer = et_find(NULL, 620 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC); 621 } 622 if (timer == NULL) 623 panic("No usable event timer found!"); 624 et_init(timer, timercb, NULL, NULL); 625 626 /* Adapt to timer capabilities. */ 627 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0) 628 periodic = 0; 629 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) 630 periodic = 1; 631 if (timer->et_flags & ET_FLAGS_C3STOP) 632 cpu_disable_deep_sleep++; 633 634 /* 635 * We honor the requested 'hz' value. 636 * We want to run stathz in the neighborhood of 128hz. 637 * We would like profhz to run as often as possible. 638 */ 639 if (singlemul <= 0 || singlemul > 20) { 640 if (hz >= 1500 || (hz % 128) == 0) 641 singlemul = 1; 642 else if (hz >= 750) 643 singlemul = 2; 644 else 645 singlemul = 4; 646 } 647 if (periodic) { 648 base = round_freq(timer, hz * singlemul); 649 singlemul = max((base + hz / 2) / hz, 1); 650 hz = (base + singlemul / 2) / singlemul; 651 if (base <= 128) 652 stathz = base; 653 else { 654 div = base / 128; 655 if (div >= singlemul && (div % singlemul) == 0) 656 div++; 657 stathz = base / div; 658 } 659 profhz = stathz; 660 while ((profhz + stathz) <= 128 * 64) 661 profhz += stathz; 662 profhz = round_freq(timer, profhz); 663 } else { 664 hz = round_freq(timer, hz); 665 stathz = round_freq(timer, 127); 666 profhz = round_freq(timer, stathz * 64); 667 } 668 tick = 1000000 / hz; 669 tick_sbt = SBT_1S / hz; 670 tick_bt = sbttobt(tick_sbt); 671 statperiod = SBT_1S / stathz; 672 profperiod = SBT_1S / profhz; 673 ET_LOCK(); 674 configtimer(1); 675 ET_UNLOCK(); 676 } 677 678 /* 679 * Start per-CPU event timers on APs. 680 */ 681 void 682 cpu_initclocks_ap(void) 683 { 684 sbintime_t now; 685 struct pcpu_state *state; 686 struct thread *td; 687 688 state = DPCPU_PTR(timerstate); 689 now = sbinuptime(); 690 ET_HW_LOCK(state); 691 state->now = now; 692 hardclock_sync(curcpu); 693 spinlock_enter(); 694 ET_HW_UNLOCK(state); 695 td = curthread; 696 td->td_intr_nesting_level++; 697 handleevents(state->now, 2); 698 td->td_intr_nesting_level--; 699 spinlock_exit(); 700 } 701 702 /* 703 * Switch to profiling clock rates. 704 */ 705 void 706 cpu_startprofclock(void) 707 { 708 709 ET_LOCK(); 710 if (profiling == 0) { 711 if (periodic) { 712 configtimer(0); 713 profiling = 1; 714 configtimer(1); 715 } else 716 profiling = 1; 717 } else 718 profiling++; 719 ET_UNLOCK(); 720 } 721 722 /* 723 * Switch to regular clock rates. 724 */ 725 void 726 cpu_stopprofclock(void) 727 { 728 729 ET_LOCK(); 730 if (profiling == 1) { 731 if (periodic) { 732 configtimer(0); 733 profiling = 0; 734 configtimer(1); 735 } else 736 profiling = 0; 737 } else 738 profiling--; 739 ET_UNLOCK(); 740 } 741 742 /* 743 * Switch to idle mode (all ticks handled). 744 */ 745 sbintime_t 746 cpu_idleclock(void) 747 { 748 sbintime_t now, t; 749 struct pcpu_state *state; 750 751 if (idletick || busy || 752 (periodic && (timer->et_flags & ET_FLAGS_PERCPU)) 753 #ifdef DEVICE_POLLING 754 || curcpu == CPU_FIRST() 755 #endif 756 ) 757 return (-1); 758 state = DPCPU_PTR(timerstate); 759 if (periodic) 760 now = state->now; 761 else 762 now = sbinuptime(); 763 CTR3(KTR_SPARE2, "idle at %d: now %d.%08x", 764 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); 765 t = getnextcpuevent(1); 766 ET_HW_LOCK(state); 767 state->idle = 1; 768 state->nextevent = t; 769 if (!periodic) 770 loadtimer(now, 0); 771 ET_HW_UNLOCK(state); 772 return (MAX(t - now, 0)); 773 } 774 775 /* 776 * Switch to active mode (skip empty ticks). 777 */ 778 void 779 cpu_activeclock(void) 780 { 781 sbintime_t now; 782 struct pcpu_state *state; 783 struct thread *td; 784 785 state = DPCPU_PTR(timerstate); 786 if (state->idle == 0 || busy) 787 return; 788 if (periodic) 789 now = state->now; 790 else 791 now = sbinuptime(); 792 CTR3(KTR_SPARE2, "active at %d: now %d.%08x", 793 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); 794 spinlock_enter(); 795 td = curthread; 796 td->td_intr_nesting_level++; 797 handleevents(now, 1); 798 td->td_intr_nesting_level--; 799 spinlock_exit(); 800 } 801 802 #ifdef KDTRACE_HOOKS 803 void 804 clocksource_cyc_set(const struct bintime *bt) 805 { 806 sbintime_t now, t; 807 struct pcpu_state *state; 808 809 /* Do not touch anything if somebody reconfiguring timers. */ 810 if (busy) 811 return; 812 t = bttosbt(*bt); 813 state = DPCPU_PTR(timerstate); 814 if (periodic) 815 now = state->now; 816 else 817 now = sbinuptime(); 818 819 CTR5(KTR_SPARE2, "set_cyc at %d: now %d.%08x t %d.%08x", 820 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff), 821 (int)(t >> 32), (u_int)(t & 0xffffffff)); 822 823 ET_HW_LOCK(state); 824 if (t == state->nextcyc) 825 goto done; 826 state->nextcyc = t; 827 if (t >= state->nextevent) 828 goto done; 829 state->nextevent = t; 830 if (!periodic) 831 loadtimer(now, 0); 832 done: 833 ET_HW_UNLOCK(state); 834 } 835 #endif 836 837 void 838 cpu_new_callout(int cpu, sbintime_t bt, sbintime_t bt_opt) 839 { 840 struct pcpu_state *state; 841 842 /* Do not touch anything if somebody reconfiguring timers. */ 843 if (busy) 844 return; 845 CTR6(KTR_SPARE2, "new co at %d: on %d at %d.%08x - %d.%08x", 846 curcpu, cpu, (int)(bt_opt >> 32), (u_int)(bt_opt & 0xffffffff), 847 (int)(bt >> 32), (u_int)(bt & 0xffffffff)); 848 state = DPCPU_ID_PTR(cpu, timerstate); 849 ET_HW_LOCK(state); 850 851 /* 852 * If there is callout time already set earlier -- do nothing. 853 * This check may appear redundant because we check already in 854 * callout_process() but this double check guarantees we're safe 855 * with respect to race conditions between interrupts execution 856 * and scheduling. 857 */ 858 state->nextcallopt = bt_opt; 859 if (bt >= state->nextcall) 860 goto done; 861 state->nextcall = bt; 862 /* If there is some other event set earlier -- do nothing. */ 863 if (bt >= state->nextevent) 864 goto done; 865 state->nextevent = bt; 866 /* If timer is periodic -- there is nothing to reprogram. */ 867 if (periodic) 868 goto done; 869 /* If timer is global or of the current CPU -- reprogram it. */ 870 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || cpu == curcpu) { 871 loadtimer(sbinuptime(), 0); 872 done: 873 ET_HW_UNLOCK(state); 874 return; 875 } 876 /* Otherwise make other CPU to reprogram it. */ 877 state->handle = 1; 878 ET_HW_UNLOCK(state); 879 #ifdef SMP 880 ipi_cpu(cpu, IPI_HARDCLOCK); 881 #endif 882 } 883 884 /* 885 * Report or change the active event timers hardware. 886 */ 887 static int 888 sysctl_kern_eventtimer_timer(SYSCTL_HANDLER_ARGS) 889 { 890 char buf[32]; 891 struct eventtimer *et; 892 int error; 893 894 ET_LOCK(); 895 et = timer; 896 snprintf(buf, sizeof(buf), "%s", et->et_name); 897 ET_UNLOCK(); 898 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 899 ET_LOCK(); 900 et = timer; 901 if (error != 0 || req->newptr == NULL || 902 strcasecmp(buf, et->et_name) == 0) { 903 ET_UNLOCK(); 904 return (error); 905 } 906 et = et_find(buf, 0, 0); 907 if (et == NULL) { 908 ET_UNLOCK(); 909 return (ENOENT); 910 } 911 configtimer(0); 912 et_free(timer); 913 if (et->et_flags & ET_FLAGS_C3STOP) 914 cpu_disable_deep_sleep++; 915 if (timer->et_flags & ET_FLAGS_C3STOP) 916 cpu_disable_deep_sleep--; 917 periodic = want_periodic; 918 timer = et; 919 et_init(timer, timercb, NULL, NULL); 920 configtimer(1); 921 ET_UNLOCK(); 922 return (error); 923 } 924 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer, 925 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 926 0, 0, sysctl_kern_eventtimer_timer, "A", "Chosen event timer"); 927 928 /* 929 * Report or change the active event timer periodicity. 930 */ 931 static int 932 sysctl_kern_eventtimer_periodic(SYSCTL_HANDLER_ARGS) 933 { 934 int error, val; 935 936 val = periodic; 937 error = sysctl_handle_int(oidp, &val, 0, req); 938 if (error != 0 || req->newptr == NULL) 939 return (error); 940 ET_LOCK(); 941 configtimer(0); 942 periodic = want_periodic = val; 943 configtimer(1); 944 ET_UNLOCK(); 945 return (error); 946 } 947 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, periodic, 948 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 949 0, 0, sysctl_kern_eventtimer_periodic, "I", "Enable event timer periodic mode"); 950