1 /*- 2 * Copyright (c) 2010-2013 Alexander Motin <mav@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 /* 31 * Common routines to manage event timers hardware. 32 */ 33 34 #include "opt_device_polling.h" 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/bus.h> 39 #include <sys/limits.h> 40 #include <sys/lock.h> 41 #include <sys/kdb.h> 42 #include <sys/ktr.h> 43 #include <sys/mutex.h> 44 #include <sys/proc.h> 45 #include <sys/kernel.h> 46 #include <sys/sched.h> 47 #include <sys/smp.h> 48 #include <sys/sysctl.h> 49 #include <sys/timeet.h> 50 #include <sys/timetc.h> 51 52 #include <machine/atomic.h> 53 #include <machine/clock.h> 54 #include <machine/cpu.h> 55 #include <machine/smp.h> 56 57 int cpu_disable_c2_sleep = 0; /* Timer dies in C2. */ 58 int cpu_disable_c3_sleep = 0; /* Timer dies in C3. */ 59 60 static void setuptimer(void); 61 static void loadtimer(sbintime_t now, int first); 62 static int doconfigtimer(void); 63 static void configtimer(int start); 64 static int round_freq(struct eventtimer *et, int freq); 65 66 static sbintime_t getnextcpuevent(int idle); 67 static sbintime_t getnextevent(void); 68 static int handleevents(sbintime_t now, int fake); 69 70 static struct mtx et_hw_mtx; 71 72 #define ET_HW_LOCK(state) \ 73 { \ 74 if (timer->et_flags & ET_FLAGS_PERCPU) \ 75 mtx_lock_spin(&(state)->et_hw_mtx); \ 76 else \ 77 mtx_lock_spin(&et_hw_mtx); \ 78 } 79 80 #define ET_HW_UNLOCK(state) \ 81 { \ 82 if (timer->et_flags & ET_FLAGS_PERCPU) \ 83 mtx_unlock_spin(&(state)->et_hw_mtx); \ 84 else \ 85 mtx_unlock_spin(&et_hw_mtx); \ 86 } 87 88 static struct eventtimer *timer = NULL; 89 static sbintime_t timerperiod; /* Timer period for periodic mode. */ 90 static sbintime_t statperiod; /* statclock() events period. */ 91 static sbintime_t profperiod; /* profclock() events period. */ 92 static sbintime_t nexttick; /* Next global timer tick time. */ 93 static u_int busy = 1; /* Reconfiguration is in progress. */ 94 static int profiling; /* Profiling events enabled. */ 95 96 static char timername[32]; /* Wanted timer. */ 97 TUNABLE_STR("kern.eventtimer.timer", timername, sizeof(timername)); 98 99 static int singlemul; /* Multiplier for periodic mode. */ 100 SYSCTL_INT(_kern_eventtimer, OID_AUTO, singlemul, CTLFLAG_RWTUN, &singlemul, 101 0, "Multiplier for periodic mode"); 102 103 static u_int idletick; /* Run periodic events when idle. */ 104 SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RWTUN, &idletick, 105 0, "Run periodic events when idle"); 106 107 static int periodic; /* Periodic or one-shot mode. */ 108 static int want_periodic; /* What mode to prefer. */ 109 TUNABLE_INT("kern.eventtimer.periodic", &want_periodic); 110 111 struct pcpu_state { 112 struct mtx et_hw_mtx; /* Per-CPU timer mutex. */ 113 u_int action; /* Reconfiguration requests. */ 114 u_int handle; /* Immediate handle resuests. */ 115 sbintime_t now; /* Last tick time. */ 116 sbintime_t nextevent; /* Next scheduled event on this CPU. */ 117 sbintime_t nexttick; /* Next timer tick time. */ 118 sbintime_t nexthard; /* Next hardclock() event. */ 119 sbintime_t nextstat; /* Next statclock() event. */ 120 sbintime_t nextprof; /* Next profclock() event. */ 121 sbintime_t nextcall; /* Next callout event. */ 122 sbintime_t nextcallopt; /* Next optional callout event. */ 123 int ipi; /* This CPU needs IPI. */ 124 int idle; /* This CPU is in idle mode. */ 125 }; 126 127 static DPCPU_DEFINE(struct pcpu_state, timerstate); 128 DPCPU_DEFINE(sbintime_t, hardclocktime); 129 130 /* 131 * Timer broadcast IPI handler. 132 */ 133 int 134 hardclockintr(void) 135 { 136 sbintime_t now; 137 struct pcpu_state *state; 138 int done; 139 140 if (doconfigtimer() || busy) 141 return (FILTER_HANDLED); 142 state = DPCPU_PTR(timerstate); 143 now = state->now; 144 CTR3(KTR_SPARE2, "ipi at %d: now %d.%08x", 145 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); 146 done = handleevents(now, 0); 147 return (done ? FILTER_HANDLED : FILTER_STRAY); 148 } 149 150 /* 151 * Handle all events for specified time on this CPU 152 */ 153 static int 154 handleevents(sbintime_t now, int fake) 155 { 156 sbintime_t t, *hct; 157 struct trapframe *frame; 158 struct pcpu_state *state; 159 int usermode; 160 int done, runs; 161 162 CTR3(KTR_SPARE2, "handle at %d: now %d.%08x", 163 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); 164 done = 0; 165 if (fake) { 166 frame = NULL; 167 usermode = 0; 168 } else { 169 frame = curthread->td_intr_frame; 170 usermode = TRAPF_USERMODE(frame); 171 } 172 173 state = DPCPU_PTR(timerstate); 174 175 runs = 0; 176 while (now >= state->nexthard) { 177 state->nexthard += tick_sbt; 178 runs++; 179 } 180 if (runs) { 181 hct = DPCPU_PTR(hardclocktime); 182 *hct = state->nexthard - tick_sbt; 183 if (fake < 2) { 184 hardclock_cnt(runs, usermode); 185 done = 1; 186 } 187 } 188 runs = 0; 189 while (now >= state->nextstat) { 190 state->nextstat += statperiod; 191 runs++; 192 } 193 if (runs && fake < 2) { 194 statclock_cnt(runs, usermode); 195 done = 1; 196 } 197 if (profiling) { 198 runs = 0; 199 while (now >= state->nextprof) { 200 state->nextprof += profperiod; 201 runs++; 202 } 203 if (runs && !fake) { 204 profclock_cnt(runs, usermode, TRAPF_PC(frame)); 205 done = 1; 206 } 207 } else 208 state->nextprof = state->nextstat; 209 if (now >= state->nextcallopt || now >= state->nextcall) { 210 state->nextcall = state->nextcallopt = SBT_MAX; 211 callout_process(now); 212 } 213 214 t = getnextcpuevent(0); 215 ET_HW_LOCK(state); 216 if (!busy) { 217 state->idle = 0; 218 state->nextevent = t; 219 loadtimer(now, (fake == 2) && 220 (timer->et_flags & ET_FLAGS_PERCPU)); 221 } 222 ET_HW_UNLOCK(state); 223 return (done); 224 } 225 226 /* 227 * Schedule binuptime of the next event on current CPU. 228 */ 229 static sbintime_t 230 getnextcpuevent(int idle) 231 { 232 sbintime_t event; 233 struct pcpu_state *state; 234 u_int hardfreq; 235 236 state = DPCPU_PTR(timerstate); 237 /* Handle hardclock() events, skipping some if CPU is idle. */ 238 event = state->nexthard; 239 if (idle) { 240 hardfreq = (u_int)hz / 2; 241 if (tc_min_ticktock_freq > 2 242 #ifdef SMP 243 && curcpu == CPU_FIRST() 244 #endif 245 ) 246 hardfreq = hz / tc_min_ticktock_freq; 247 if (hardfreq > 1) 248 event += tick_sbt * (hardfreq - 1); 249 } 250 /* Handle callout events. */ 251 if (event > state->nextcall) 252 event = state->nextcall; 253 if (!idle) { /* If CPU is active - handle other types of events. */ 254 if (event > state->nextstat) 255 event = state->nextstat; 256 if (profiling && event > state->nextprof) 257 event = state->nextprof; 258 } 259 return (event); 260 } 261 262 /* 263 * Schedule binuptime of the next event on all CPUs. 264 */ 265 static sbintime_t 266 getnextevent(void) 267 { 268 struct pcpu_state *state; 269 sbintime_t event; 270 #ifdef SMP 271 int cpu; 272 #endif 273 int c; 274 275 state = DPCPU_PTR(timerstate); 276 event = state->nextevent; 277 c = -1; 278 #ifdef SMP 279 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) { 280 CPU_FOREACH(cpu) { 281 state = DPCPU_ID_PTR(cpu, timerstate); 282 if (event > state->nextevent) { 283 event = state->nextevent; 284 c = cpu; 285 } 286 } 287 } 288 #endif 289 CTR4(KTR_SPARE2, "next at %d: next %d.%08x by %d", 290 curcpu, (int)(event >> 32), (u_int)(event & 0xffffffff), c); 291 return (event); 292 } 293 294 /* Hardware timer callback function. */ 295 static void 296 timercb(struct eventtimer *et, void *arg) 297 { 298 sbintime_t now; 299 sbintime_t *next; 300 struct pcpu_state *state; 301 #ifdef SMP 302 int cpu, bcast; 303 #endif 304 305 /* Do not touch anything if somebody reconfiguring timers. */ 306 if (busy) 307 return; 308 /* Update present and next tick times. */ 309 state = DPCPU_PTR(timerstate); 310 if (et->et_flags & ET_FLAGS_PERCPU) { 311 next = &state->nexttick; 312 } else 313 next = &nexttick; 314 now = sbinuptime(); 315 if (periodic) 316 *next = now + timerperiod; 317 else 318 *next = -1; /* Next tick is not scheduled yet. */ 319 state->now = now; 320 CTR3(KTR_SPARE2, "intr at %d: now %d.%08x", 321 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); 322 323 #ifdef SMP 324 #ifdef EARLY_AP_STARTUP 325 MPASS(mp_ncpus == 1 || smp_started); 326 #endif 327 /* Prepare broadcasting to other CPUs for non-per-CPU timers. */ 328 bcast = 0; 329 #ifdef EARLY_AP_STARTUP 330 if ((et->et_flags & ET_FLAGS_PERCPU) == 0) { 331 #else 332 if ((et->et_flags & ET_FLAGS_PERCPU) == 0 && smp_started) { 333 #endif 334 CPU_FOREACH(cpu) { 335 state = DPCPU_ID_PTR(cpu, timerstate); 336 ET_HW_LOCK(state); 337 state->now = now; 338 if (now >= state->nextevent) { 339 state->nextevent += SBT_1S; 340 if (curcpu != cpu) { 341 state->ipi = 1; 342 bcast = 1; 343 } 344 } 345 ET_HW_UNLOCK(state); 346 } 347 } 348 #endif 349 350 /* Handle events for this time on this CPU. */ 351 handleevents(now, 0); 352 353 #ifdef SMP 354 /* Broadcast interrupt to other CPUs for non-per-CPU timers. */ 355 if (bcast) { 356 CPU_FOREACH(cpu) { 357 if (curcpu == cpu) 358 continue; 359 state = DPCPU_ID_PTR(cpu, timerstate); 360 if (state->ipi) { 361 state->ipi = 0; 362 ipi_cpu(cpu, IPI_HARDCLOCK); 363 } 364 } 365 } 366 #endif 367 } 368 369 /* 370 * Load new value into hardware timer. 371 */ 372 static void 373 loadtimer(sbintime_t now, int start) 374 { 375 struct pcpu_state *state; 376 sbintime_t new; 377 sbintime_t *next; 378 uint64_t tmp; 379 int eq; 380 381 if (timer->et_flags & ET_FLAGS_PERCPU) { 382 state = DPCPU_PTR(timerstate); 383 next = &state->nexttick; 384 } else 385 next = &nexttick; 386 if (periodic) { 387 if (start) { 388 /* 389 * Try to start all periodic timers aligned 390 * to period to make events synchronous. 391 */ 392 tmp = now % timerperiod; 393 new = timerperiod - tmp; 394 if (new < tmp) /* Left less then passed. */ 395 new += timerperiod; 396 CTR5(KTR_SPARE2, "load p at %d: now %d.%08x first in %d.%08x", 397 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff), 398 (int)(new >> 32), (u_int)(new & 0xffffffff)); 399 *next = new + now; 400 et_start(timer, new, timerperiod); 401 } 402 } else { 403 new = getnextevent(); 404 eq = (new == *next); 405 CTR4(KTR_SPARE2, "load at %d: next %d.%08x eq %d", 406 curcpu, (int)(new >> 32), (u_int)(new & 0xffffffff), eq); 407 if (!eq) { 408 *next = new; 409 et_start(timer, new - now, 0); 410 } 411 } 412 } 413 414 /* 415 * Prepare event timer parameters after configuration changes. 416 */ 417 static void 418 setuptimer(void) 419 { 420 int freq; 421 422 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0) 423 periodic = 0; 424 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) 425 periodic = 1; 426 singlemul = MIN(MAX(singlemul, 1), 20); 427 freq = hz * singlemul; 428 while (freq < (profiling ? profhz : stathz)) 429 freq += hz; 430 freq = round_freq(timer, freq); 431 timerperiod = SBT_1S / freq; 432 } 433 434 /* 435 * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler. 436 */ 437 static int 438 doconfigtimer(void) 439 { 440 sbintime_t now; 441 struct pcpu_state *state; 442 443 state = DPCPU_PTR(timerstate); 444 switch (atomic_load_acq_int(&state->action)) { 445 case 1: 446 now = sbinuptime(); 447 ET_HW_LOCK(state); 448 loadtimer(now, 1); 449 ET_HW_UNLOCK(state); 450 state->handle = 0; 451 atomic_store_rel_int(&state->action, 0); 452 return (1); 453 case 2: 454 ET_HW_LOCK(state); 455 et_stop(timer); 456 ET_HW_UNLOCK(state); 457 state->handle = 0; 458 atomic_store_rel_int(&state->action, 0); 459 return (1); 460 } 461 if (atomic_readandclear_int(&state->handle) && !busy) { 462 now = sbinuptime(); 463 handleevents(now, 0); 464 return (1); 465 } 466 return (0); 467 } 468 469 /* 470 * Reconfigure specified timer. 471 * For per-CPU timers use IPI to make other CPUs to reconfigure. 472 */ 473 static void 474 configtimer(int start) 475 { 476 sbintime_t now, next; 477 struct pcpu_state *state; 478 int cpu; 479 480 if (start) { 481 setuptimer(); 482 now = sbinuptime(); 483 } else 484 now = 0; 485 critical_enter(); 486 ET_HW_LOCK(DPCPU_PTR(timerstate)); 487 if (start) { 488 /* Initialize time machine parameters. */ 489 next = now + timerperiod; 490 if (periodic) 491 nexttick = next; 492 else 493 nexttick = -1; 494 #ifdef EARLY_AP_STARTUP 495 MPASS(mp_ncpus == 1 || smp_started); 496 #endif 497 CPU_FOREACH(cpu) { 498 state = DPCPU_ID_PTR(cpu, timerstate); 499 state->now = now; 500 #ifndef EARLY_AP_STARTUP 501 if (!smp_started && cpu != CPU_FIRST()) 502 state->nextevent = SBT_MAX; 503 else 504 #endif 505 state->nextevent = next; 506 if (periodic) 507 state->nexttick = next; 508 else 509 state->nexttick = -1; 510 state->nexthard = next; 511 state->nextstat = next; 512 state->nextprof = next; 513 state->nextcall = next; 514 state->nextcallopt = next; 515 hardclock_sync(cpu); 516 } 517 busy = 0; 518 /* Start global timer or per-CPU timer of this CPU. */ 519 loadtimer(now, 1); 520 } else { 521 busy = 1; 522 /* Stop global timer or per-CPU timer of this CPU. */ 523 et_stop(timer); 524 } 525 ET_HW_UNLOCK(DPCPU_PTR(timerstate)); 526 #ifdef SMP 527 #ifdef EARLY_AP_STARTUP 528 /* If timer is global we are done. */ 529 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) { 530 #else 531 /* If timer is global or there is no other CPUs yet - we are done. */ 532 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) { 533 #endif 534 critical_exit(); 535 return; 536 } 537 /* Set reconfigure flags for other CPUs. */ 538 CPU_FOREACH(cpu) { 539 state = DPCPU_ID_PTR(cpu, timerstate); 540 atomic_store_rel_int(&state->action, 541 (cpu == curcpu) ? 0 : ( start ? 1 : 2)); 542 } 543 /* Broadcast reconfigure IPI. */ 544 ipi_all_but_self(IPI_HARDCLOCK); 545 /* Wait for reconfiguration completed. */ 546 restart: 547 cpu_spinwait(); 548 CPU_FOREACH(cpu) { 549 if (cpu == curcpu) 550 continue; 551 state = DPCPU_ID_PTR(cpu, timerstate); 552 if (atomic_load_acq_int(&state->action)) 553 goto restart; 554 } 555 #endif 556 critical_exit(); 557 } 558 559 /* 560 * Calculate nearest frequency supported by hardware timer. 561 */ 562 static int 563 round_freq(struct eventtimer *et, int freq) 564 { 565 uint64_t div; 566 567 if (et->et_frequency != 0) { 568 div = lmax((et->et_frequency + freq / 2) / freq, 1); 569 if (et->et_flags & ET_FLAGS_POW2DIV) 570 div = 1 << (flsl(div + div / 2) - 1); 571 freq = (et->et_frequency + div / 2) / div; 572 } 573 if (et->et_min_period > SBT_1S) 574 panic("Event timer \"%s\" doesn't support sub-second periods!", 575 et->et_name); 576 else if (et->et_min_period != 0) 577 freq = min(freq, SBT2FREQ(et->et_min_period)); 578 if (et->et_max_period < SBT_1S && et->et_max_period != 0) 579 freq = max(freq, SBT2FREQ(et->et_max_period)); 580 return (freq); 581 } 582 583 /* 584 * Configure and start event timers (BSP part). 585 */ 586 void 587 cpu_initclocks_bsp(void) 588 { 589 struct pcpu_state *state; 590 int base, div, cpu; 591 592 mtx_init(&et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); 593 CPU_FOREACH(cpu) { 594 state = DPCPU_ID_PTR(cpu, timerstate); 595 mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); 596 state->nextcall = SBT_MAX; 597 state->nextcallopt = SBT_MAX; 598 } 599 periodic = want_periodic; 600 /* Grab requested timer or the best of present. */ 601 if (timername[0]) 602 timer = et_find(timername, 0, 0); 603 if (timer == NULL && periodic) { 604 timer = et_find(NULL, 605 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC); 606 } 607 if (timer == NULL) { 608 timer = et_find(NULL, 609 ET_FLAGS_ONESHOT, ET_FLAGS_ONESHOT); 610 } 611 if (timer == NULL && !periodic) { 612 timer = et_find(NULL, 613 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC); 614 } 615 if (timer == NULL) 616 panic("No usable event timer found!"); 617 et_init(timer, timercb, NULL, NULL); 618 619 /* Adapt to timer capabilities. */ 620 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0) 621 periodic = 0; 622 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) 623 periodic = 1; 624 if (timer->et_flags & ET_FLAGS_C3STOP) 625 cpu_disable_c3_sleep++; 626 627 /* 628 * We honor the requested 'hz' value. 629 * We want to run stathz in the neighborhood of 128hz. 630 * We would like profhz to run as often as possible. 631 */ 632 if (singlemul <= 0 || singlemul > 20) { 633 if (hz >= 1500 || (hz % 128) == 0) 634 singlemul = 1; 635 else if (hz >= 750) 636 singlemul = 2; 637 else 638 singlemul = 4; 639 } 640 if (periodic) { 641 base = round_freq(timer, hz * singlemul); 642 singlemul = max((base + hz / 2) / hz, 1); 643 hz = (base + singlemul / 2) / singlemul; 644 if (base <= 128) 645 stathz = base; 646 else { 647 div = base / 128; 648 if (div >= singlemul && (div % singlemul) == 0) 649 div++; 650 stathz = base / div; 651 } 652 profhz = stathz; 653 while ((profhz + stathz) <= 128 * 64) 654 profhz += stathz; 655 profhz = round_freq(timer, profhz); 656 } else { 657 hz = round_freq(timer, hz); 658 stathz = round_freq(timer, 127); 659 profhz = round_freq(timer, stathz * 64); 660 } 661 tick = 1000000 / hz; 662 tick_sbt = SBT_1S / hz; 663 tick_bt = sbttobt(tick_sbt); 664 statperiod = SBT_1S / stathz; 665 profperiod = SBT_1S / profhz; 666 ET_LOCK(); 667 configtimer(1); 668 ET_UNLOCK(); 669 } 670 671 /* 672 * Start per-CPU event timers on APs. 673 */ 674 void 675 cpu_initclocks_ap(void) 676 { 677 sbintime_t now; 678 struct pcpu_state *state; 679 struct thread *td; 680 681 state = DPCPU_PTR(timerstate); 682 now = sbinuptime(); 683 ET_HW_LOCK(state); 684 state->now = now; 685 hardclock_sync(curcpu); 686 spinlock_enter(); 687 ET_HW_UNLOCK(state); 688 td = curthread; 689 td->td_intr_nesting_level++; 690 handleevents(state->now, 2); 691 td->td_intr_nesting_level--; 692 spinlock_exit(); 693 } 694 695 /* 696 * Switch to profiling clock rates. 697 */ 698 void 699 cpu_startprofclock(void) 700 { 701 702 ET_LOCK(); 703 if (profiling == 0) { 704 if (periodic) { 705 configtimer(0); 706 profiling = 1; 707 configtimer(1); 708 } else 709 profiling = 1; 710 } else 711 profiling++; 712 ET_UNLOCK(); 713 } 714 715 /* 716 * Switch to regular clock rates. 717 */ 718 void 719 cpu_stopprofclock(void) 720 { 721 722 ET_LOCK(); 723 if (profiling == 1) { 724 if (periodic) { 725 configtimer(0); 726 profiling = 0; 727 configtimer(1); 728 } else 729 profiling = 0; 730 } else 731 profiling--; 732 ET_UNLOCK(); 733 } 734 735 /* 736 * Switch to idle mode (all ticks handled). 737 */ 738 sbintime_t 739 cpu_idleclock(void) 740 { 741 sbintime_t now, t; 742 struct pcpu_state *state; 743 744 if (idletick || busy || 745 (periodic && (timer->et_flags & ET_FLAGS_PERCPU)) 746 #ifdef DEVICE_POLLING 747 || curcpu == CPU_FIRST() 748 #endif 749 ) 750 return (-1); 751 state = DPCPU_PTR(timerstate); 752 if (periodic) 753 now = state->now; 754 else 755 now = sbinuptime(); 756 CTR3(KTR_SPARE2, "idle at %d: now %d.%08x", 757 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); 758 t = getnextcpuevent(1); 759 ET_HW_LOCK(state); 760 state->idle = 1; 761 state->nextevent = t; 762 if (!periodic) 763 loadtimer(now, 0); 764 ET_HW_UNLOCK(state); 765 return (MAX(t - now, 0)); 766 } 767 768 /* 769 * Switch to active mode (skip empty ticks). 770 */ 771 void 772 cpu_activeclock(void) 773 { 774 sbintime_t now; 775 struct pcpu_state *state; 776 struct thread *td; 777 778 state = DPCPU_PTR(timerstate); 779 if (state->idle == 0 || busy) 780 return; 781 if (periodic) 782 now = state->now; 783 else 784 now = sbinuptime(); 785 CTR3(KTR_SPARE2, "active at %d: now %d.%08x", 786 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); 787 spinlock_enter(); 788 td = curthread; 789 td->td_intr_nesting_level++; 790 handleevents(now, 1); 791 td->td_intr_nesting_level--; 792 spinlock_exit(); 793 } 794 795 /* 796 * Change the frequency of the given timer. This changes et->et_frequency and 797 * if et is the active timer it reconfigures the timer on all CPUs. This is 798 * intended to be a private interface for the use of et_change_frequency() only. 799 */ 800 void 801 cpu_et_frequency(struct eventtimer *et, uint64_t newfreq) 802 { 803 804 ET_LOCK(); 805 if (et == timer) { 806 configtimer(0); 807 et->et_frequency = newfreq; 808 configtimer(1); 809 } else 810 et->et_frequency = newfreq; 811 ET_UNLOCK(); 812 } 813 814 void 815 cpu_new_callout(int cpu, sbintime_t bt, sbintime_t bt_opt) 816 { 817 struct pcpu_state *state; 818 819 /* Do not touch anything if somebody reconfiguring timers. */ 820 if (busy) 821 return; 822 CTR6(KTR_SPARE2, "new co at %d: on %d at %d.%08x - %d.%08x", 823 curcpu, cpu, (int)(bt_opt >> 32), (u_int)(bt_opt & 0xffffffff), 824 (int)(bt >> 32), (u_int)(bt & 0xffffffff)); 825 state = DPCPU_ID_PTR(cpu, timerstate); 826 ET_HW_LOCK(state); 827 828 /* 829 * If there is callout time already set earlier -- do nothing. 830 * This check may appear redundant because we check already in 831 * callout_process() but this double check guarantees we're safe 832 * with respect to race conditions between interrupts execution 833 * and scheduling. 834 */ 835 state->nextcallopt = bt_opt; 836 if (bt >= state->nextcall) 837 goto done; 838 state->nextcall = bt; 839 /* If there is some other event set earlier -- do nothing. */ 840 if (bt >= state->nextevent) 841 goto done; 842 state->nextevent = bt; 843 /* If timer is periodic -- there is nothing to reprogram. */ 844 if (periodic) 845 goto done; 846 /* If timer is global or of the current CPU -- reprogram it. */ 847 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || cpu == curcpu) { 848 loadtimer(sbinuptime(), 0); 849 done: 850 ET_HW_UNLOCK(state); 851 return; 852 } 853 /* Otherwise make other CPU to reprogram it. */ 854 state->handle = 1; 855 ET_HW_UNLOCK(state); 856 #ifdef SMP 857 ipi_cpu(cpu, IPI_HARDCLOCK); 858 #endif 859 } 860 861 /* 862 * Report or change the active event timers hardware. 863 */ 864 static int 865 sysctl_kern_eventtimer_timer(SYSCTL_HANDLER_ARGS) 866 { 867 char buf[32]; 868 struct eventtimer *et; 869 int error; 870 871 ET_LOCK(); 872 et = timer; 873 snprintf(buf, sizeof(buf), "%s", et->et_name); 874 ET_UNLOCK(); 875 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 876 ET_LOCK(); 877 et = timer; 878 if (error != 0 || req->newptr == NULL || 879 strcasecmp(buf, et->et_name) == 0) { 880 ET_UNLOCK(); 881 return (error); 882 } 883 et = et_find(buf, 0, 0); 884 if (et == NULL) { 885 ET_UNLOCK(); 886 return (ENOENT); 887 } 888 configtimer(0); 889 et_free(timer); 890 if (et->et_flags & ET_FLAGS_C3STOP) 891 cpu_disable_c3_sleep++; 892 if (timer->et_flags & ET_FLAGS_C3STOP) 893 cpu_disable_c3_sleep--; 894 periodic = want_periodic; 895 timer = et; 896 et_init(timer, timercb, NULL, NULL); 897 configtimer(1); 898 ET_UNLOCK(); 899 return (error); 900 } 901 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer, 902 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 903 0, 0, sysctl_kern_eventtimer_timer, "A", "Chosen event timer"); 904 905 /* 906 * Report or change the active event timer periodicity. 907 */ 908 static int 909 sysctl_kern_eventtimer_periodic(SYSCTL_HANDLER_ARGS) 910 { 911 int error, val; 912 913 val = periodic; 914 error = sysctl_handle_int(oidp, &val, 0, req); 915 if (error != 0 || req->newptr == NULL) 916 return (error); 917 ET_LOCK(); 918 configtimer(0); 919 periodic = want_periodic = val; 920 configtimer(1); 921 ET_UNLOCK(); 922 return (error); 923 } 924 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, periodic, 925 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 926 0, 0, sysctl_kern_eventtimer_periodic, "I", "Enable event timer periodic mode"); 927 928 #include "opt_ddb.h" 929 930 #ifdef DDB 931 #include <ddb/ddb.h> 932 933 DB_SHOW_COMMAND(clocksource, db_show_clocksource) 934 { 935 struct pcpu_state *st; 936 int c; 937 938 CPU_FOREACH(c) { 939 st = DPCPU_ID_PTR(c, timerstate); 940 db_printf( 941 "CPU %2d: action %d handle %d ipi %d idle %d\n" 942 " now %#jx nevent %#jx (%jd)\n" 943 " ntick %#jx (%jd) nhard %#jx (%jd)\n" 944 " nstat %#jx (%jd) nprof %#jx (%jd)\n" 945 " ncall %#jx (%jd) ncallopt %#jx (%jd)\n", 946 c, st->action, st->handle, st->ipi, st->idle, 947 (uintmax_t)st->now, 948 (uintmax_t)st->nextevent, 949 (uintmax_t)(st->nextevent - st->now) / tick_sbt, 950 (uintmax_t)st->nexttick, 951 (uintmax_t)(st->nexttick - st->now) / tick_sbt, 952 (uintmax_t)st->nexthard, 953 (uintmax_t)(st->nexthard - st->now) / tick_sbt, 954 (uintmax_t)st->nextstat, 955 (uintmax_t)(st->nextstat - st->now) / tick_sbt, 956 (uintmax_t)st->nextprof, 957 (uintmax_t)(st->nextprof - st->now) / tick_sbt, 958 (uintmax_t)st->nextcall, 959 (uintmax_t)(st->nextcall - st->now) / tick_sbt, 960 (uintmax_t)st->nextcallopt, 961 (uintmax_t)(st->nextcallopt - st->now) / tick_sbt); 962 } 963 } 964 965 #endif 966