1 /*- 2 * Copyright (c) 2010-2013 Alexander Motin <mav@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 /* 31 * Common routines to manage event timers hardware. 32 */ 33 34 #include "opt_device_polling.h" 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/bus.h> 39 #include <sys/limits.h> 40 #include <sys/lock.h> 41 #include <sys/kdb.h> 42 #include <sys/ktr.h> 43 #include <sys/mutex.h> 44 #include <sys/proc.h> 45 #include <sys/kernel.h> 46 #include <sys/sched.h> 47 #include <sys/smp.h> 48 #include <sys/sysctl.h> 49 #include <sys/timeet.h> 50 #include <sys/timetc.h> 51 52 #include <machine/atomic.h> 53 #include <machine/clock.h> 54 #include <machine/cpu.h> 55 #include <machine/smp.h> 56 57 #ifdef KDTRACE_HOOKS 58 #include <sys/dtrace_bsd.h> 59 cyclic_clock_func_t cyclic_clock_func = NULL; 60 #endif 61 62 int cpu_can_deep_sleep = 0; /* C3 state is available. */ 63 int cpu_disable_deep_sleep = 0; /* Timer dies in C3. */ 64 65 static void setuptimer(void); 66 static void loadtimer(sbintime_t now, int first); 67 static int doconfigtimer(void); 68 static void configtimer(int start); 69 static int round_freq(struct eventtimer *et, int freq); 70 71 static sbintime_t getnextcpuevent(int idle); 72 static sbintime_t getnextevent(void); 73 static int handleevents(sbintime_t now, int fake); 74 75 static struct mtx et_hw_mtx; 76 77 #define ET_HW_LOCK(state) \ 78 { \ 79 if (timer->et_flags & ET_FLAGS_PERCPU) \ 80 mtx_lock_spin(&(state)->et_hw_mtx); \ 81 else \ 82 mtx_lock_spin(&et_hw_mtx); \ 83 } 84 85 #define ET_HW_UNLOCK(state) \ 86 { \ 87 if (timer->et_flags & ET_FLAGS_PERCPU) \ 88 mtx_unlock_spin(&(state)->et_hw_mtx); \ 89 else \ 90 mtx_unlock_spin(&et_hw_mtx); \ 91 } 92 93 static struct eventtimer *timer = NULL; 94 static sbintime_t timerperiod; /* Timer period for periodic mode. */ 95 static sbintime_t statperiod; /* statclock() events period. */ 96 static sbintime_t profperiod; /* profclock() events period. */ 97 static sbintime_t nexttick; /* Next global timer tick time. */ 98 static u_int busy = 1; /* Reconfiguration is in progress. */ 99 static int profiling = 0; /* Profiling events enabled. */ 100 101 static char timername[32]; /* Wanted timer. */ 102 TUNABLE_STR("kern.eventtimer.timer", timername, sizeof(timername)); 103 104 static int singlemul = 0; /* Multiplier for periodic mode. */ 105 TUNABLE_INT("kern.eventtimer.singlemul", &singlemul); 106 SYSCTL_INT(_kern_eventtimer, OID_AUTO, singlemul, CTLFLAG_RW, &singlemul, 107 0, "Multiplier for periodic mode"); 108 109 static u_int idletick = 0; /* Run periodic events when idle. */ 110 TUNABLE_INT("kern.eventtimer.idletick", &idletick); 111 SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RW, &idletick, 112 0, "Run periodic events when idle"); 113 114 static int periodic = 0; /* Periodic or one-shot mode. */ 115 static int want_periodic = 0; /* What mode to prefer. */ 116 TUNABLE_INT("kern.eventtimer.periodic", &want_periodic); 117 118 struct pcpu_state { 119 struct mtx et_hw_mtx; /* Per-CPU timer mutex. */ 120 u_int action; /* Reconfiguration requests. */ 121 u_int handle; /* Immediate handle resuests. */ 122 sbintime_t now; /* Last tick time. */ 123 sbintime_t nextevent; /* Next scheduled event on this CPU. */ 124 sbintime_t nexttick; /* Next timer tick time. */ 125 sbintime_t nexthard; /* Next hardlock() event. */ 126 sbintime_t nextstat; /* Next statclock() event. */ 127 sbintime_t nextprof; /* Next profclock() event. */ 128 sbintime_t nextcall; /* Next callout event. */ 129 sbintime_t nextcallopt; /* Next optional callout event. */ 130 #ifdef KDTRACE_HOOKS 131 sbintime_t nextcyc; /* Next OpenSolaris cyclics event. */ 132 #endif 133 int ipi; /* This CPU needs IPI. */ 134 int idle; /* This CPU is in idle mode. */ 135 }; 136 137 static DPCPU_DEFINE(struct pcpu_state, timerstate); 138 DPCPU_DEFINE(sbintime_t, hardclocktime); 139 140 /* 141 * Timer broadcast IPI handler. 142 */ 143 int 144 hardclockintr(void) 145 { 146 sbintime_t now; 147 struct pcpu_state *state; 148 int done; 149 150 if (doconfigtimer() || busy) 151 return (FILTER_HANDLED); 152 state = DPCPU_PTR(timerstate); 153 now = state->now; 154 CTR3(KTR_SPARE2, "ipi at %d: now %d.%08x", 155 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); 156 done = handleevents(now, 0); 157 return (done ? FILTER_HANDLED : FILTER_STRAY); 158 } 159 160 /* 161 * Handle all events for specified time on this CPU 162 */ 163 static int 164 handleevents(sbintime_t now, int fake) 165 { 166 sbintime_t t, *hct; 167 struct trapframe *frame; 168 struct pcpu_state *state; 169 int usermode; 170 int done, runs; 171 172 CTR3(KTR_SPARE2, "handle at %d: now %d.%08x", 173 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); 174 done = 0; 175 if (fake) { 176 frame = NULL; 177 usermode = 0; 178 } else { 179 frame = curthread->td_intr_frame; 180 usermode = TRAPF_USERMODE(frame); 181 } 182 183 state = DPCPU_PTR(timerstate); 184 185 runs = 0; 186 while (now >= state->nexthard) { 187 state->nexthard += tick_sbt; 188 runs++; 189 } 190 if (runs) { 191 hct = DPCPU_PTR(hardclocktime); 192 *hct = state->nexthard - tick_sbt; 193 if (fake < 2) { 194 hardclock_cnt(runs, usermode); 195 done = 1; 196 } 197 } 198 runs = 0; 199 while (now >= state->nextstat) { 200 state->nextstat += statperiod; 201 runs++; 202 } 203 if (runs && fake < 2) { 204 statclock_cnt(runs, usermode); 205 done = 1; 206 } 207 if (profiling) { 208 runs = 0; 209 while (now >= state->nextprof) { 210 state->nextprof += profperiod; 211 runs++; 212 } 213 if (runs && !fake) { 214 profclock_cnt(runs, usermode, TRAPF_PC(frame)); 215 done = 1; 216 } 217 } else 218 state->nextprof = state->nextstat; 219 if (now >= state->nextcallopt) { 220 state->nextcall = state->nextcallopt = INT64_MAX; 221 callout_process(now); 222 } 223 224 #ifdef KDTRACE_HOOKS 225 if (fake == 0 && now >= state->nextcyc && cyclic_clock_func != NULL) { 226 state->nextcyc = INT64_MAX; 227 (*cyclic_clock_func)(frame); 228 } 229 #endif 230 231 t = getnextcpuevent(0); 232 ET_HW_LOCK(state); 233 if (!busy) { 234 state->idle = 0; 235 state->nextevent = t; 236 loadtimer(now, 0); 237 } 238 ET_HW_UNLOCK(state); 239 return (done); 240 } 241 242 /* 243 * Schedule binuptime of the next event on current CPU. 244 */ 245 static sbintime_t 246 getnextcpuevent(int idle) 247 { 248 sbintime_t event; 249 struct pcpu_state *state; 250 u_int hardfreq; 251 252 state = DPCPU_PTR(timerstate); 253 /* Handle hardclock() events, skipping some if CPU is idle. */ 254 event = state->nexthard; 255 if (idle) { 256 hardfreq = (u_int)hz / 2; 257 if (tc_min_ticktock_freq > 2 258 #ifdef SMP 259 && curcpu == CPU_FIRST() 260 #endif 261 ) 262 hardfreq = hz / tc_min_ticktock_freq; 263 if (hardfreq > 1) 264 event += tick_sbt * (hardfreq - 1); 265 } 266 /* Handle callout events. */ 267 if (event > state->nextcall) 268 event = state->nextcall; 269 if (!idle) { /* If CPU is active - handle other types of events. */ 270 if (event > state->nextstat) 271 event = state->nextstat; 272 if (profiling && event > state->nextprof) 273 event = state->nextprof; 274 } 275 #ifdef KDTRACE_HOOKS 276 if (event > state->nextcyc) 277 event = state->nextcyc; 278 #endif 279 return (event); 280 } 281 282 /* 283 * Schedule binuptime of the next event on all CPUs. 284 */ 285 static sbintime_t 286 getnextevent(void) 287 { 288 struct pcpu_state *state; 289 sbintime_t event; 290 #ifdef SMP 291 int cpu; 292 #endif 293 int c; 294 295 state = DPCPU_PTR(timerstate); 296 event = state->nextevent; 297 c = -1; 298 #ifdef SMP 299 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) { 300 CPU_FOREACH(cpu) { 301 state = DPCPU_ID_PTR(cpu, timerstate); 302 if (event > state->nextevent) { 303 event = state->nextevent; 304 c = cpu; 305 } 306 } 307 } 308 #endif 309 CTR4(KTR_SPARE2, "next at %d: next %d.%08x by %d", 310 curcpu, (int)(event >> 32), (u_int)(event & 0xffffffff), c); 311 return (event); 312 } 313 314 /* Hardware timer callback function. */ 315 static void 316 timercb(struct eventtimer *et, void *arg) 317 { 318 sbintime_t now; 319 sbintime_t *next; 320 struct pcpu_state *state; 321 #ifdef SMP 322 int cpu, bcast; 323 #endif 324 325 /* Do not touch anything if somebody reconfiguring timers. */ 326 if (busy) 327 return; 328 /* Update present and next tick times. */ 329 state = DPCPU_PTR(timerstate); 330 if (et->et_flags & ET_FLAGS_PERCPU) { 331 next = &state->nexttick; 332 } else 333 next = &nexttick; 334 now = sbinuptime(); 335 if (periodic) 336 *next = now + timerperiod; 337 else 338 *next = -1; /* Next tick is not scheduled yet. */ 339 state->now = now; 340 CTR3(KTR_SPARE2, "intr at %d: now %d.%08x", 341 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); 342 343 #ifdef SMP 344 /* Prepare broadcasting to other CPUs for non-per-CPU timers. */ 345 bcast = 0; 346 if ((et->et_flags & ET_FLAGS_PERCPU) == 0 && smp_started) { 347 CPU_FOREACH(cpu) { 348 state = DPCPU_ID_PTR(cpu, timerstate); 349 ET_HW_LOCK(state); 350 state->now = now; 351 if (now >= state->nextevent) { 352 state->nextevent += SBT_1S; 353 if (curcpu != cpu) { 354 state->ipi = 1; 355 bcast = 1; 356 } 357 } 358 ET_HW_UNLOCK(state); 359 } 360 } 361 #endif 362 363 /* Handle events for this time on this CPU. */ 364 handleevents(now, 0); 365 366 #ifdef SMP 367 /* Broadcast interrupt to other CPUs for non-per-CPU timers. */ 368 if (bcast) { 369 CPU_FOREACH(cpu) { 370 if (curcpu == cpu) 371 continue; 372 state = DPCPU_ID_PTR(cpu, timerstate); 373 if (state->ipi) { 374 state->ipi = 0; 375 ipi_cpu(cpu, IPI_HARDCLOCK); 376 } 377 } 378 } 379 #endif 380 } 381 382 /* 383 * Load new value into hardware timer. 384 */ 385 static void 386 loadtimer(sbintime_t now, int start) 387 { 388 struct pcpu_state *state; 389 sbintime_t new; 390 sbintime_t *next; 391 uint64_t tmp; 392 int eq; 393 394 if (timer->et_flags & ET_FLAGS_PERCPU) { 395 state = DPCPU_PTR(timerstate); 396 next = &state->nexttick; 397 } else 398 next = &nexttick; 399 if (periodic) { 400 if (start) { 401 /* 402 * Try to start all periodic timers aligned 403 * to period to make events synchronous. 404 */ 405 tmp = now % timerperiod; 406 new = timerperiod - tmp; 407 if (new < tmp) /* Left less then passed. */ 408 new += timerperiod; 409 CTR5(KTR_SPARE2, "load p at %d: now %d.%08x first in %d.%08x", 410 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff), 411 (int)(new >> 32), (u_int)(new & 0xffffffff)); 412 *next = new + now; 413 et_start(timer, new, timerperiod); 414 } 415 } else { 416 new = getnextevent(); 417 eq = (new == *next); 418 CTR4(KTR_SPARE2, "load at %d: next %d.%08x eq %d", 419 curcpu, (int)(new >> 32), (u_int)(new & 0xffffffff), eq); 420 if (!eq) { 421 *next = new; 422 et_start(timer, new - now, 0); 423 } 424 } 425 } 426 427 /* 428 * Prepare event timer parameters after configuration changes. 429 */ 430 static void 431 setuptimer(void) 432 { 433 int freq; 434 435 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0) 436 periodic = 0; 437 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) 438 periodic = 1; 439 singlemul = MIN(MAX(singlemul, 1), 20); 440 freq = hz * singlemul; 441 while (freq < (profiling ? profhz : stathz)) 442 freq += hz; 443 freq = round_freq(timer, freq); 444 timerperiod = SBT_1S / freq; 445 } 446 447 /* 448 * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler. 449 */ 450 static int 451 doconfigtimer(void) 452 { 453 sbintime_t now; 454 struct pcpu_state *state; 455 456 state = DPCPU_PTR(timerstate); 457 switch (atomic_load_acq_int(&state->action)) { 458 case 1: 459 now = sbinuptime(); 460 ET_HW_LOCK(state); 461 loadtimer(now, 1); 462 ET_HW_UNLOCK(state); 463 state->handle = 0; 464 atomic_store_rel_int(&state->action, 0); 465 return (1); 466 case 2: 467 ET_HW_LOCK(state); 468 et_stop(timer); 469 ET_HW_UNLOCK(state); 470 state->handle = 0; 471 atomic_store_rel_int(&state->action, 0); 472 return (1); 473 } 474 if (atomic_readandclear_int(&state->handle) && !busy) { 475 now = sbinuptime(); 476 handleevents(now, 0); 477 return (1); 478 } 479 return (0); 480 } 481 482 /* 483 * Reconfigure specified timer. 484 * For per-CPU timers use IPI to make other CPUs to reconfigure. 485 */ 486 static void 487 configtimer(int start) 488 { 489 sbintime_t now, next; 490 struct pcpu_state *state; 491 int cpu; 492 493 if (start) { 494 setuptimer(); 495 now = sbinuptime(); 496 } else 497 now = 0; 498 critical_enter(); 499 ET_HW_LOCK(DPCPU_PTR(timerstate)); 500 if (start) { 501 /* Initialize time machine parameters. */ 502 next = now + timerperiod; 503 if (periodic) 504 nexttick = next; 505 else 506 nexttick = -1; 507 CPU_FOREACH(cpu) { 508 state = DPCPU_ID_PTR(cpu, timerstate); 509 state->now = now; 510 if (!smp_started && cpu != CPU_FIRST()) 511 state->nextevent = INT64_MAX; 512 else 513 state->nextevent = next; 514 if (periodic) 515 state->nexttick = next; 516 else 517 state->nexttick = -1; 518 state->nexthard = next; 519 state->nextstat = next; 520 state->nextprof = next; 521 state->nextcall = next; 522 state->nextcallopt = next; 523 hardclock_sync(cpu); 524 } 525 busy = 0; 526 /* Start global timer or per-CPU timer of this CPU. */ 527 loadtimer(now, 1); 528 } else { 529 busy = 1; 530 /* Stop global timer or per-CPU timer of this CPU. */ 531 et_stop(timer); 532 } 533 ET_HW_UNLOCK(DPCPU_PTR(timerstate)); 534 #ifdef SMP 535 /* If timer is global or there is no other CPUs yet - we are done. */ 536 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) { 537 critical_exit(); 538 return; 539 } 540 /* Set reconfigure flags for other CPUs. */ 541 CPU_FOREACH(cpu) { 542 state = DPCPU_ID_PTR(cpu, timerstate); 543 atomic_store_rel_int(&state->action, 544 (cpu == curcpu) ? 0 : ( start ? 1 : 2)); 545 } 546 /* Broadcast reconfigure IPI. */ 547 ipi_all_but_self(IPI_HARDCLOCK); 548 /* Wait for reconfiguration completed. */ 549 restart: 550 cpu_spinwait(); 551 CPU_FOREACH(cpu) { 552 if (cpu == curcpu) 553 continue; 554 state = DPCPU_ID_PTR(cpu, timerstate); 555 if (atomic_load_acq_int(&state->action)) 556 goto restart; 557 } 558 #endif 559 critical_exit(); 560 } 561 562 /* 563 * Calculate nearest frequency supported by hardware timer. 564 */ 565 static int 566 round_freq(struct eventtimer *et, int freq) 567 { 568 uint64_t div; 569 570 if (et->et_frequency != 0) { 571 div = lmax((et->et_frequency + freq / 2) / freq, 1); 572 if (et->et_flags & ET_FLAGS_POW2DIV) 573 div = 1 << (flsl(div + div / 2) - 1); 574 freq = (et->et_frequency + div / 2) / div; 575 } 576 if (et->et_min_period > SBT_1S) 577 panic("Event timer \"%s\" doesn't support sub-second periods!", 578 et->et_name); 579 else if (et->et_min_period != 0) 580 freq = min(freq, SBT2FREQ(et->et_min_period)); 581 if (et->et_max_period < SBT_1S && et->et_max_period != 0) 582 freq = max(freq, SBT2FREQ(et->et_max_period)); 583 return (freq); 584 } 585 586 /* 587 * Configure and start event timers (BSP part). 588 */ 589 void 590 cpu_initclocks_bsp(void) 591 { 592 struct pcpu_state *state; 593 int base, div, cpu; 594 595 mtx_init(&et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); 596 CPU_FOREACH(cpu) { 597 state = DPCPU_ID_PTR(cpu, timerstate); 598 mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); 599 #ifdef KDTRACE_HOOKS 600 state->nextcyc = INT64_MAX; 601 #endif 602 state->nextcall = INT64_MAX; 603 state->nextcallopt = INT64_MAX; 604 } 605 periodic = want_periodic; 606 /* Grab requested timer or the best of present. */ 607 if (timername[0]) 608 timer = et_find(timername, 0, 0); 609 if (timer == NULL && periodic) { 610 timer = et_find(NULL, 611 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC); 612 } 613 if (timer == NULL) { 614 timer = et_find(NULL, 615 ET_FLAGS_ONESHOT, ET_FLAGS_ONESHOT); 616 } 617 if (timer == NULL && !periodic) { 618 timer = et_find(NULL, 619 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC); 620 } 621 if (timer == NULL) 622 panic("No usable event timer found!"); 623 et_init(timer, timercb, NULL, NULL); 624 625 /* Adapt to timer capabilities. */ 626 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0) 627 periodic = 0; 628 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) 629 periodic = 1; 630 if (timer->et_flags & ET_FLAGS_C3STOP) 631 cpu_disable_deep_sleep++; 632 633 /* 634 * We honor the requested 'hz' value. 635 * We want to run stathz in the neighborhood of 128hz. 636 * We would like profhz to run as often as possible. 637 */ 638 if (singlemul <= 0 || singlemul > 20) { 639 if (hz >= 1500 || (hz % 128) == 0) 640 singlemul = 1; 641 else if (hz >= 750) 642 singlemul = 2; 643 else 644 singlemul = 4; 645 } 646 if (periodic) { 647 base = round_freq(timer, hz * singlemul); 648 singlemul = max((base + hz / 2) / hz, 1); 649 hz = (base + singlemul / 2) / singlemul; 650 if (base <= 128) 651 stathz = base; 652 else { 653 div = base / 128; 654 if (div >= singlemul && (div % singlemul) == 0) 655 div++; 656 stathz = base / div; 657 } 658 profhz = stathz; 659 while ((profhz + stathz) <= 128 * 64) 660 profhz += stathz; 661 profhz = round_freq(timer, profhz); 662 } else { 663 hz = round_freq(timer, hz); 664 stathz = round_freq(timer, 127); 665 profhz = round_freq(timer, stathz * 64); 666 } 667 tick = 1000000 / hz; 668 tick_sbt = SBT_1S / hz; 669 tick_bt = sbttobt(tick_sbt); 670 statperiod = SBT_1S / stathz; 671 profperiod = SBT_1S / profhz; 672 ET_LOCK(); 673 configtimer(1); 674 ET_UNLOCK(); 675 } 676 677 /* 678 * Start per-CPU event timers on APs. 679 */ 680 void 681 cpu_initclocks_ap(void) 682 { 683 sbintime_t now; 684 struct pcpu_state *state; 685 struct thread *td; 686 687 state = DPCPU_PTR(timerstate); 688 now = sbinuptime(); 689 ET_HW_LOCK(state); 690 state->now = now; 691 hardclock_sync(curcpu); 692 spinlock_enter(); 693 ET_HW_UNLOCK(state); 694 td = curthread; 695 td->td_intr_nesting_level++; 696 handleevents(state->now, 2); 697 td->td_intr_nesting_level--; 698 spinlock_exit(); 699 } 700 701 /* 702 * Switch to profiling clock rates. 703 */ 704 void 705 cpu_startprofclock(void) 706 { 707 708 ET_LOCK(); 709 if (profiling == 0) { 710 if (periodic) { 711 configtimer(0); 712 profiling = 1; 713 configtimer(1); 714 } else 715 profiling = 1; 716 } else 717 profiling++; 718 ET_UNLOCK(); 719 } 720 721 /* 722 * Switch to regular clock rates. 723 */ 724 void 725 cpu_stopprofclock(void) 726 { 727 728 ET_LOCK(); 729 if (profiling == 1) { 730 if (periodic) { 731 configtimer(0); 732 profiling = 0; 733 configtimer(1); 734 } else 735 profiling = 0; 736 } else 737 profiling--; 738 ET_UNLOCK(); 739 } 740 741 /* 742 * Switch to idle mode (all ticks handled). 743 */ 744 sbintime_t 745 cpu_idleclock(void) 746 { 747 sbintime_t now, t; 748 struct pcpu_state *state; 749 750 if (idletick || busy || 751 (periodic && (timer->et_flags & ET_FLAGS_PERCPU)) 752 #ifdef DEVICE_POLLING 753 || curcpu == CPU_FIRST() 754 #endif 755 ) 756 return (-1); 757 state = DPCPU_PTR(timerstate); 758 if (periodic) 759 now = state->now; 760 else 761 now = sbinuptime(); 762 CTR3(KTR_SPARE2, "idle at %d: now %d.%08x", 763 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); 764 t = getnextcpuevent(1); 765 ET_HW_LOCK(state); 766 state->idle = 1; 767 state->nextevent = t; 768 if (!periodic) 769 loadtimer(now, 0); 770 ET_HW_UNLOCK(state); 771 return (MAX(t - now, 0)); 772 } 773 774 /* 775 * Switch to active mode (skip empty ticks). 776 */ 777 void 778 cpu_activeclock(void) 779 { 780 sbintime_t now; 781 struct pcpu_state *state; 782 struct thread *td; 783 784 state = DPCPU_PTR(timerstate); 785 if (state->idle == 0 || busy) 786 return; 787 if (periodic) 788 now = state->now; 789 else 790 now = sbinuptime(); 791 CTR3(KTR_SPARE2, "active at %d: now %d.%08x", 792 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); 793 spinlock_enter(); 794 td = curthread; 795 td->td_intr_nesting_level++; 796 handleevents(now, 1); 797 td->td_intr_nesting_level--; 798 spinlock_exit(); 799 } 800 801 #ifdef KDTRACE_HOOKS 802 void 803 clocksource_cyc_set(const struct bintime *bt) 804 { 805 sbintime_t now, t; 806 struct pcpu_state *state; 807 808 /* Do not touch anything if somebody reconfiguring timers. */ 809 if (busy) 810 return; 811 t = bttosbt(*bt); 812 state = DPCPU_PTR(timerstate); 813 if (periodic) 814 now = state->now; 815 else 816 now = sbinuptime(); 817 818 CTR5(KTR_SPARE2, "set_cyc at %d: now %d.%08x t %d.%08x", 819 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff), 820 (int)(t >> 32), (u_int)(t & 0xffffffff)); 821 822 ET_HW_LOCK(state); 823 if (t == state->nextcyc) 824 goto done; 825 state->nextcyc = t; 826 if (t >= state->nextevent) 827 goto done; 828 state->nextevent = t; 829 if (!periodic) 830 loadtimer(now, 0); 831 done: 832 ET_HW_UNLOCK(state); 833 } 834 #endif 835 836 void 837 cpu_new_callout(int cpu, sbintime_t bt, sbintime_t bt_opt) 838 { 839 struct pcpu_state *state; 840 841 /* Do not touch anything if somebody reconfiguring timers. */ 842 if (busy) 843 return; 844 CTR6(KTR_SPARE2, "new co at %d: on %d at %d.%08x - %d.%08x", 845 curcpu, cpu, (int)(bt_opt >> 32), (u_int)(bt_opt & 0xffffffff), 846 (int)(bt >> 32), (u_int)(bt & 0xffffffff)); 847 state = DPCPU_ID_PTR(cpu, timerstate); 848 ET_HW_LOCK(state); 849 850 /* 851 * If there is callout time already set earlier -- do nothing. 852 * This check may appear redundant because we check already in 853 * callout_process() but this double check guarantees we're safe 854 * with respect to race conditions between interrupts execution 855 * and scheduling. 856 */ 857 state->nextcallopt = bt_opt; 858 if (bt >= state->nextcall) 859 goto done; 860 state->nextcall = bt; 861 /* If there is some other event set earlier -- do nothing. */ 862 if (bt >= state->nextevent) 863 goto done; 864 state->nextevent = bt; 865 /* If timer is periodic -- there is nothing to reprogram. */ 866 if (periodic) 867 goto done; 868 /* If timer is global or of the current CPU -- reprogram it. */ 869 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || cpu == curcpu) { 870 loadtimer(sbinuptime(), 0); 871 done: 872 ET_HW_UNLOCK(state); 873 return; 874 } 875 /* Otherwise make other CPU to reprogram it. */ 876 state->handle = 1; 877 ET_HW_UNLOCK(state); 878 #ifdef SMP 879 ipi_cpu(cpu, IPI_HARDCLOCK); 880 #endif 881 } 882 883 /* 884 * Report or change the active event timers hardware. 885 */ 886 static int 887 sysctl_kern_eventtimer_timer(SYSCTL_HANDLER_ARGS) 888 { 889 char buf[32]; 890 struct eventtimer *et; 891 int error; 892 893 ET_LOCK(); 894 et = timer; 895 snprintf(buf, sizeof(buf), "%s", et->et_name); 896 ET_UNLOCK(); 897 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 898 ET_LOCK(); 899 et = timer; 900 if (error != 0 || req->newptr == NULL || 901 strcasecmp(buf, et->et_name) == 0) { 902 ET_UNLOCK(); 903 return (error); 904 } 905 et = et_find(buf, 0, 0); 906 if (et == NULL) { 907 ET_UNLOCK(); 908 return (ENOENT); 909 } 910 configtimer(0); 911 et_free(timer); 912 if (et->et_flags & ET_FLAGS_C3STOP) 913 cpu_disable_deep_sleep++; 914 if (timer->et_flags & ET_FLAGS_C3STOP) 915 cpu_disable_deep_sleep--; 916 periodic = want_periodic; 917 timer = et; 918 et_init(timer, timercb, NULL, NULL); 919 configtimer(1); 920 ET_UNLOCK(); 921 return (error); 922 } 923 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer, 924 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 925 0, 0, sysctl_kern_eventtimer_timer, "A", "Chosen event timer"); 926 927 /* 928 * Report or change the active event timer periodicity. 929 */ 930 static int 931 sysctl_kern_eventtimer_periodic(SYSCTL_HANDLER_ARGS) 932 { 933 int error, val; 934 935 val = periodic; 936 error = sysctl_handle_int(oidp, &val, 0, req); 937 if (error != 0 || req->newptr == NULL) 938 return (error); 939 ET_LOCK(); 940 configtimer(0); 941 periodic = want_periodic = val; 942 configtimer(1); 943 ET_UNLOCK(); 944 return (error); 945 } 946 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, periodic, 947 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 948 0, 0, sysctl_kern_eventtimer_periodic, "I", "Enable event timer periodic mode"); 949